1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/types.h> |
8 | #include <asm/byteorder.h> |
9 | #include <linux/bitops.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/errno.h> |
13 | #include <linux/io.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/list.h> |
16 | #include <linux/module.h> |
17 | #include <linux/mutex.h> |
18 | #include <linux/pci.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/spinlock.h> |
21 | #include <linux/string.h> |
22 | #include <net/addrconf.h> |
23 | #include "qed.h" |
24 | #include "qed_cxt.h" |
25 | #include "qed_hsi.h" |
26 | #include "qed_iro_hsi.h" |
27 | #include "qed_hw.h" |
28 | #include "qed_init_ops.h" |
29 | #include "qed_int.h" |
30 | #include "qed_ll2.h" |
31 | #include "qed_mcp.h" |
32 | #include "qed_reg_addr.h" |
33 | #include <linux/qed/qed_rdma_if.h> |
34 | #include "qed_rdma.h" |
35 | #include "qed_roce.h" |
36 | #include "qed_sp.h" |
37 | |
38 | int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, |
39 | struct qed_bmap *bmap, u32 max_count, char *name) |
40 | { |
41 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n" , max_count); |
42 | |
43 | bmap->max_count = max_count; |
44 | |
45 | bmap->bitmap = bitmap_zalloc(nbits: max_count, GFP_KERNEL); |
46 | if (!bmap->bitmap) |
47 | return -ENOMEM; |
48 | |
49 | snprintf(buf: bmap->name, QED_RDMA_MAX_BMAP_NAME, fmt: "%s" , name); |
50 | |
51 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n" ); |
52 | return 0; |
53 | } |
54 | |
55 | int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, |
56 | struct qed_bmap *bmap, u32 *id_num) |
57 | { |
58 | *id_num = find_first_zero_bit(addr: bmap->bitmap, size: bmap->max_count); |
59 | if (*id_num >= bmap->max_count) |
60 | return -EINVAL; |
61 | |
62 | __set_bit(*id_num, bmap->bitmap); |
63 | |
64 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n" , |
65 | bmap->name, *id_num); |
66 | |
67 | return 0; |
68 | } |
69 | |
70 | void qed_bmap_set_id(struct qed_hwfn *p_hwfn, |
71 | struct qed_bmap *bmap, u32 id_num) |
72 | { |
73 | if (id_num >= bmap->max_count) |
74 | return; |
75 | |
76 | __set_bit(id_num, bmap->bitmap); |
77 | } |
78 | |
79 | void qed_bmap_release_id(struct qed_hwfn *p_hwfn, |
80 | struct qed_bmap *bmap, u32 id_num) |
81 | { |
82 | bool b_acquired; |
83 | |
84 | if (id_num >= bmap->max_count) |
85 | return; |
86 | |
87 | b_acquired = test_and_clear_bit(nr: id_num, addr: bmap->bitmap); |
88 | if (!b_acquired) { |
89 | DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n" , |
90 | bmap->name, id_num); |
91 | return; |
92 | } |
93 | |
94 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n" , |
95 | bmap->name, id_num); |
96 | } |
97 | |
98 | int qed_bmap_test_id(struct qed_hwfn *p_hwfn, |
99 | struct qed_bmap *bmap, u32 id_num) |
100 | { |
101 | if (id_num >= bmap->max_count) |
102 | return -1; |
103 | |
104 | return test_bit(id_num, bmap->bitmap); |
105 | } |
106 | |
107 | static bool qed_bmap_is_empty(struct qed_bmap *bmap) |
108 | { |
109 | return bitmap_empty(src: bmap->bitmap, nbits: bmap->max_count); |
110 | } |
111 | |
112 | static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) |
113 | { |
114 | /* First sb id for RoCE is after all the l2 sb */ |
115 | return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; |
116 | } |
117 | |
118 | int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) |
119 | { |
120 | struct qed_rdma_info *p_rdma_info; |
121 | |
122 | p_rdma_info = kzalloc(size: sizeof(*p_rdma_info), GFP_KERNEL); |
123 | if (!p_rdma_info) |
124 | return -ENOMEM; |
125 | |
126 | spin_lock_init(&p_rdma_info->lock); |
127 | |
128 | p_hwfn->p_rdma_info = p_rdma_info; |
129 | return 0; |
130 | } |
131 | |
132 | void qed_rdma_info_free(struct qed_hwfn *p_hwfn) |
133 | { |
134 | kfree(objp: p_hwfn->p_rdma_info); |
135 | p_hwfn->p_rdma_info = NULL; |
136 | } |
137 | |
138 | static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) |
139 | { |
140 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; |
141 | u32 num_cons, num_tasks; |
142 | int rc = -ENOMEM; |
143 | |
144 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n" ); |
145 | |
146 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
147 | p_rdma_info->proto = PROTOCOLID_IWARP; |
148 | else |
149 | p_rdma_info->proto = PROTOCOLID_ROCE; |
150 | |
151 | num_cons = qed_cxt_get_proto_cid_count(p_hwfn, type: p_rdma_info->proto, |
152 | NULL); |
153 | |
154 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
155 | p_rdma_info->num_qps = num_cons; |
156 | else |
157 | p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ |
158 | |
159 | num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, type: PROTOCOLID_ROCE); |
160 | |
161 | /* Each MR uses a single task */ |
162 | p_rdma_info->num_mrs = num_tasks; |
163 | |
164 | /* Queue zone lines are shared between RoCE and L2 in such a way that |
165 | * they can be used by each without obstructing the other. |
166 | */ |
167 | p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); |
168 | p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); |
169 | |
170 | /* Allocate a struct with device params and fill it */ |
171 | p_rdma_info->dev = kzalloc(size: sizeof(*p_rdma_info->dev), GFP_KERNEL); |
172 | if (!p_rdma_info->dev) |
173 | return rc; |
174 | |
175 | /* Allocate a struct with port params and fill it */ |
176 | p_rdma_info->port = kzalloc(size: sizeof(*p_rdma_info->port), GFP_KERNEL); |
177 | if (!p_rdma_info->port) |
178 | goto free_rdma_dev; |
179 | |
180 | /* Allocate bit map for pd's */ |
181 | rc = qed_rdma_bmap_alloc(p_hwfn, bmap: &p_rdma_info->pd_map, RDMA_MAX_PDS, |
182 | name: "PD" ); |
183 | if (rc) { |
184 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
185 | "Failed to allocate pd_map, rc = %d\n" , |
186 | rc); |
187 | goto free_rdma_port; |
188 | } |
189 | |
190 | /* Allocate bit map for XRC Domains */ |
191 | rc = qed_rdma_bmap_alloc(p_hwfn, bmap: &p_rdma_info->xrcd_map, |
192 | QED_RDMA_MAX_XRCDS, name: "XRCD" ); |
193 | if (rc) { |
194 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
195 | "Failed to allocate xrcd_map,rc = %d\n" , rc); |
196 | goto free_pd_map; |
197 | } |
198 | |
199 | /* Allocate DPI bitmap */ |
200 | rc = qed_rdma_bmap_alloc(p_hwfn, bmap: &p_rdma_info->dpi_map, |
201 | max_count: p_hwfn->dpi_count, name: "DPI" ); |
202 | if (rc) { |
203 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
204 | "Failed to allocate DPI bitmap, rc = %d\n" , rc); |
205 | goto free_xrcd_map; |
206 | } |
207 | |
208 | /* Allocate bitmap for cq's. The maximum number of CQs is bound to |
209 | * the number of connections we support. (num_qps in iWARP or |
210 | * num_qps/2 in RoCE). |
211 | */ |
212 | rc = qed_rdma_bmap_alloc(p_hwfn, bmap: &p_rdma_info->cq_map, max_count: num_cons, name: "CQ" ); |
213 | if (rc) { |
214 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
215 | "Failed to allocate cq bitmap, rc = %d\n" , rc); |
216 | goto free_dpi_map; |
217 | } |
218 | |
219 | /* Allocate bitmap for toggle bit for cq icids |
220 | * We toggle the bit every time we create or resize cq for a given icid. |
221 | * Size needs to equal the size of the cq bmap. |
222 | */ |
223 | rc = qed_rdma_bmap_alloc(p_hwfn, bmap: &p_rdma_info->toggle_bits, |
224 | max_count: num_cons, name: "Toggle" ); |
225 | if (rc) { |
226 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
227 | "Failed to allocate toggle bits, rc = %d\n" , rc); |
228 | goto free_cq_map; |
229 | } |
230 | |
231 | /* Allocate bitmap for itids */ |
232 | rc = qed_rdma_bmap_alloc(p_hwfn, bmap: &p_rdma_info->tid_map, |
233 | max_count: p_rdma_info->num_mrs, name: "MR" ); |
234 | if (rc) { |
235 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
236 | "Failed to allocate itids bitmaps, rc = %d\n" , rc); |
237 | goto free_toggle_map; |
238 | } |
239 | |
240 | /* Allocate bitmap for cids used for qps. */ |
241 | rc = qed_rdma_bmap_alloc(p_hwfn, bmap: &p_rdma_info->cid_map, max_count: num_cons, |
242 | name: "CID" ); |
243 | if (rc) { |
244 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
245 | "Failed to allocate cid bitmap, rc = %d\n" , rc); |
246 | goto free_tid_map; |
247 | } |
248 | |
249 | /* Allocate bitmap for cids used for responders/requesters. */ |
250 | rc = qed_rdma_bmap_alloc(p_hwfn, bmap: &p_rdma_info->real_cid_map, max_count: num_cons, |
251 | name: "REAL_CID" ); |
252 | if (rc) { |
253 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
254 | "Failed to allocate real cid bitmap, rc = %d\n" , rc); |
255 | goto free_cid_map; |
256 | } |
257 | |
258 | /* The first SRQ follows the last XRC SRQ. This means that the |
259 | * SRQ IDs start from an offset equals to max_xrc_srqs. |
260 | */ |
261 | p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count; |
262 | rc = qed_rdma_bmap_alloc(p_hwfn, |
263 | bmap: &p_rdma_info->xrc_srq_map, |
264 | max_count: p_hwfn->p_cxt_mngr->xrc_srq_count, name: "XRC SRQ" ); |
265 | if (rc) { |
266 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
267 | "Failed to allocate xrc srq bitmap, rc = %d\n" , rc); |
268 | goto free_real_cid_map; |
269 | } |
270 | |
271 | /* Allocate bitmap for srqs */ |
272 | p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count; |
273 | rc = qed_rdma_bmap_alloc(p_hwfn, bmap: &p_rdma_info->srq_map, |
274 | max_count: p_rdma_info->num_srqs, name: "SRQ" ); |
275 | if (rc) { |
276 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
277 | "Failed to allocate srq bitmap, rc = %d\n" , rc); |
278 | goto free_xrc_srq_map; |
279 | } |
280 | |
281 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
282 | rc = qed_iwarp_alloc(p_hwfn); |
283 | |
284 | if (rc) |
285 | goto free_srq_map; |
286 | |
287 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n" ); |
288 | return 0; |
289 | |
290 | free_srq_map: |
291 | kfree(objp: p_rdma_info->srq_map.bitmap); |
292 | free_xrc_srq_map: |
293 | kfree(objp: p_rdma_info->xrc_srq_map.bitmap); |
294 | free_real_cid_map: |
295 | kfree(objp: p_rdma_info->real_cid_map.bitmap); |
296 | free_cid_map: |
297 | kfree(objp: p_rdma_info->cid_map.bitmap); |
298 | free_tid_map: |
299 | kfree(objp: p_rdma_info->tid_map.bitmap); |
300 | free_toggle_map: |
301 | kfree(objp: p_rdma_info->toggle_bits.bitmap); |
302 | free_cq_map: |
303 | kfree(objp: p_rdma_info->cq_map.bitmap); |
304 | free_dpi_map: |
305 | kfree(objp: p_rdma_info->dpi_map.bitmap); |
306 | free_xrcd_map: |
307 | kfree(objp: p_rdma_info->xrcd_map.bitmap); |
308 | free_pd_map: |
309 | kfree(objp: p_rdma_info->pd_map.bitmap); |
310 | free_rdma_port: |
311 | kfree(objp: p_rdma_info->port); |
312 | free_rdma_dev: |
313 | kfree(objp: p_rdma_info->dev); |
314 | |
315 | return rc; |
316 | } |
317 | |
318 | void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, |
319 | struct qed_bmap *bmap, bool check) |
320 | { |
321 | unsigned int bit, weight, nbits; |
322 | unsigned long *b; |
323 | |
324 | if (!check) |
325 | goto end; |
326 | |
327 | weight = bitmap_weight(src: bmap->bitmap, nbits: bmap->max_count); |
328 | if (!weight) |
329 | goto end; |
330 | |
331 | DP_NOTICE(p_hwfn, |
332 | "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n" , |
333 | bmap->name, bmap->max_count, weight); |
334 | |
335 | for (bit = 0; bit < bmap->max_count; bit += 512) { |
336 | b = bmap->bitmap + BITS_TO_LONGS(bit); |
337 | nbits = min(bmap->max_count - bit, 512U); |
338 | |
339 | if (!bitmap_empty(src: b, nbits)) |
340 | DP_NOTICE(p_hwfn, |
341 | "line 0x%04x: %*pb\n" , bit / 512, nbits, b); |
342 | } |
343 | |
344 | end: |
345 | bitmap_free(bitmap: bmap->bitmap); |
346 | bmap->bitmap = NULL; |
347 | } |
348 | |
349 | static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) |
350 | { |
351 | struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; |
352 | |
353 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
354 | qed_iwarp_resc_free(p_hwfn); |
355 | |
356 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->cid_map, check: 1); |
357 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->pd_map, check: 1); |
358 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->dpi_map, check: 1); |
359 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->cq_map, check: 1); |
360 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->toggle_bits, check: 0); |
361 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->tid_map, check: 1); |
362 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->srq_map, check: 1); |
363 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->real_cid_map, check: 1); |
364 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->xrc_srq_map, check: 1); |
365 | qed_rdma_bmap_free(p_hwfn, bmap: &p_hwfn->p_rdma_info->xrcd_map, check: 1); |
366 | |
367 | kfree(objp: p_rdma_info->port); |
368 | kfree(objp: p_rdma_info->dev); |
369 | } |
370 | |
371 | static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) |
372 | { |
373 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
374 | |
375 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n" , itid); |
376 | |
377 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
378 | qed_bmap_release_id(p_hwfn, bmap: &p_hwfn->p_rdma_info->tid_map, id_num: itid); |
379 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
380 | } |
381 | |
382 | static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn) |
383 | { |
384 | qed_rdma_free_tid(rdma_cxt: p_hwfn, itid: p_hwfn->p_rdma_info->dev->reserved_lkey); |
385 | } |
386 | |
387 | static void qed_rdma_free(struct qed_hwfn *p_hwfn) |
388 | { |
389 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n" ); |
390 | |
391 | qed_rdma_free_reserved_lkey(p_hwfn); |
392 | qed_cxt_free_proto_ilt(p_hwfn, proto: p_hwfn->p_rdma_info->proto); |
393 | qed_rdma_resc_free(p_hwfn); |
394 | } |
395 | |
396 | static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, |
397 | struct qed_rdma_start_in_params *params) |
398 | { |
399 | struct qed_rdma_events *events; |
400 | |
401 | events = &p_hwfn->p_rdma_info->events; |
402 | |
403 | events->unaffiliated_event = params->events->unaffiliated_event; |
404 | events->affiliated_event = params->events->affiliated_event; |
405 | events->context = params->events->context; |
406 | } |
407 | |
408 | static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, |
409 | struct qed_rdma_start_in_params *params) |
410 | { |
411 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; |
412 | struct qed_dev *cdev = p_hwfn->cdev; |
413 | u32 pci_status_control; |
414 | u32 num_qps; |
415 | |
416 | /* Vendor specific information */ |
417 | dev->vendor_id = cdev->vendor_id; |
418 | dev->vendor_part_id = cdev->device_id; |
419 | dev->hw_ver = cdev->chip_rev; |
420 | dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | |
421 | (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); |
422 | |
423 | addrconf_addr_eui48(eui: (u8 *)&dev->sys_image_guid, |
424 | addr: p_hwfn->hw_info.hw_mac_addr); |
425 | |
426 | dev->node_guid = dev->sys_image_guid; |
427 | |
428 | dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, |
429 | RDMA_MAX_SGE_PER_RQ_WQE); |
430 | |
431 | if (cdev->rdma_max_sge) |
432 | dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); |
433 | |
434 | dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; |
435 | if (p_hwfn->cdev->rdma_max_srq_sge) { |
436 | dev->max_srq_sge = min_t(u32, |
437 | p_hwfn->cdev->rdma_max_srq_sge, |
438 | dev->max_srq_sge); |
439 | } |
440 | dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; |
441 | |
442 | dev->max_inline = (cdev->rdma_max_inline) ? |
443 | min_t(u32, cdev->rdma_max_inline, dev->max_inline) : |
444 | dev->max_inline; |
445 | |
446 | dev->max_wqe = QED_RDMA_MAX_WQE; |
447 | dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); |
448 | |
449 | /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because |
450 | * it is up-aligned to 16 and then to ILT page size within qed cxt. |
451 | * This is OK in terms of ILT but we don't want to configure the FW |
452 | * above its abilities |
453 | */ |
454 | num_qps = ROCE_MAX_QPS; |
455 | num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); |
456 | dev->max_qp = num_qps; |
457 | |
458 | /* CQs uses the same icids that QPs use hence they are limited by the |
459 | * number of icids. There are two icids per QP. |
460 | */ |
461 | dev->max_cq = num_qps * 2; |
462 | |
463 | /* The number of mrs is smaller by 1 since the first is reserved */ |
464 | dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; |
465 | dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; |
466 | |
467 | /* The maximum CQE capacity per CQ supported. |
468 | * max number of cqes will be in two layer pbl, |
469 | * 8 is the pointer size in bytes |
470 | * 32 is the size of cq element in bytes |
471 | */ |
472 | if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) |
473 | dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; |
474 | else |
475 | dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; |
476 | |
477 | dev->max_mw = 0; |
478 | dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); |
479 | dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; |
480 | if (QED_IS_ROCE_PERSONALITY(p_hwfn)) |
481 | dev->max_pkey = QED_RDMA_MAX_P_KEY; |
482 | |
483 | dev->max_srq = p_hwfn->p_rdma_info->num_srqs; |
484 | dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; |
485 | dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / |
486 | (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); |
487 | dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / |
488 | RDMA_REQ_RD_ATOMIC_ELM_SIZE; |
489 | dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * |
490 | p_hwfn->p_rdma_info->num_qps; |
491 | dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; |
492 | dev->dev_ack_delay = QED_RDMA_ACK_DELAY; |
493 | dev->max_pd = RDMA_MAX_PDS; |
494 | dev->max_ah = p_hwfn->p_rdma_info->num_qps; |
495 | dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); |
496 | |
497 | /* Set capablities */ |
498 | dev->dev_caps = 0; |
499 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); |
500 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); |
501 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); |
502 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); |
503 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); |
504 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); |
505 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); |
506 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); |
507 | |
508 | /* Check atomic operations support in PCI configuration space. */ |
509 | pcie_capability_read_dword(dev: cdev->pdev, PCI_EXP_DEVCTL2, |
510 | val: &pci_status_control); |
511 | |
512 | if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) |
513 | SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); |
514 | |
515 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
516 | qed_iwarp_init_devinfo(p_hwfn); |
517 | } |
518 | |
519 | static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) |
520 | { |
521 | struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; |
522 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; |
523 | |
524 | port->port_state = p_hwfn->mcp_info->link_output.link_up ? |
525 | QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; |
526 | |
527 | port->max_msg_size = min_t(u64, |
528 | (dev->max_mr_mw_fmr_size * |
529 | p_hwfn->cdev->rdma_max_sge), |
530 | BIT(31)); |
531 | |
532 | port->pkey_bad_counter = 0; |
533 | } |
534 | |
535 | static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
536 | { |
537 | int rc = 0; |
538 | |
539 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n" ); |
540 | p_hwfn->b_rdma_enabled_in_prs = false; |
541 | |
542 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
543 | qed_iwarp_init_hw(p_hwfn, p_ptt); |
544 | else |
545 | rc = qed_roce_init_hw(p_hwfn, p_ptt); |
546 | |
547 | return rc; |
548 | } |
549 | |
550 | static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, |
551 | struct qed_rdma_start_in_params *params, |
552 | struct qed_ptt *p_ptt) |
553 | { |
554 | struct rdma_init_func_ramrod_data *p_ramrod; |
555 | struct qed_rdma_cnq_params *p_cnq_pbl_list; |
556 | struct rdma_init_func_hdr *; |
557 | struct rdma_cnq_params *p_cnq_params; |
558 | struct qed_sp_init_data init_data; |
559 | struct qed_spq_entry *p_ent; |
560 | u32 cnq_id, sb_id; |
561 | u16 igu_sb_id; |
562 | int rc; |
563 | |
564 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n" ); |
565 | |
566 | /* Save the number of cnqs for the function close ramrod */ |
567 | p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; |
568 | |
569 | /* Get SPQ entry */ |
570 | memset(&init_data, 0, sizeof(init_data)); |
571 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
572 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
573 | |
574 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, cmd: RDMA_RAMROD_FUNC_INIT, |
575 | protocol: p_hwfn->p_rdma_info->proto, p_data: &init_data); |
576 | if (rc) |
577 | return rc; |
578 | |
579 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
580 | qed_iwarp_init_fw_ramrod(p_hwfn, |
581 | p_ramrod: &p_ent->ramrod.iwarp_init_func); |
582 | p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; |
583 | } else { |
584 | p_ramrod = &p_ent->ramrod.roce_init_func.rdma; |
585 | } |
586 | |
587 | p_params_header = &p_ramrod->params_header; |
588 | p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, |
589 | QED_RDMA_CNQ_RAM); |
590 | p_params_header->num_cnqs = params->desired_cnq; |
591 | p_params_header->first_reg_srq_id = |
592 | cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset); |
593 | p_params_header->reg_srq_base_addr = |
594 | cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM)); |
595 | if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) |
596 | p_params_header->cq_ring_mode = 1; |
597 | else |
598 | p_params_header->cq_ring_mode = 0; |
599 | |
600 | for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { |
601 | sb_id = qed_rdma_get_sb_id(p_hwfn, rel_sb_id: cnq_id); |
602 | igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); |
603 | p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); |
604 | p_cnq_params = &p_ramrod->cnq_params[cnq_id]; |
605 | p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; |
606 | |
607 | p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; |
608 | p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; |
609 | |
610 | DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, |
611 | p_cnq_pbl_list->pbl_ptr); |
612 | |
613 | /* we assume here that cnq_id and qz_offset are the same */ |
614 | p_cnq_params->queue_zone_num = |
615 | cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + |
616 | cnq_id); |
617 | } |
618 | |
619 | return qed_spq_post(p_hwfn, p_ent, NULL); |
620 | } |
621 | |
622 | static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) |
623 | { |
624 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
625 | int rc; |
626 | |
627 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n" ); |
628 | |
629 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
630 | rc = qed_rdma_bmap_alloc_id(p_hwfn, |
631 | bmap: &p_hwfn->p_rdma_info->tid_map, id_num: itid); |
632 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
633 | if (rc) |
634 | goto out; |
635 | |
636 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type: QED_ELEM_TASK, iid: *itid); |
637 | out: |
638 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n" , rc); |
639 | return rc; |
640 | } |
641 | |
642 | static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) |
643 | { |
644 | struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; |
645 | |
646 | /* Tid 0 will be used as the key for "reserved MR". |
647 | * The driver should allocate memory for it so it can be loaded but no |
648 | * ramrod should be passed on it. |
649 | */ |
650 | qed_rdma_alloc_tid(rdma_cxt: p_hwfn, itid: &dev->reserved_lkey); |
651 | if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { |
652 | DP_NOTICE(p_hwfn, |
653 | "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n" ); |
654 | return -EINVAL; |
655 | } |
656 | |
657 | return 0; |
658 | } |
659 | |
660 | static int qed_rdma_setup(struct qed_hwfn *p_hwfn, |
661 | struct qed_ptt *p_ptt, |
662 | struct qed_rdma_start_in_params *params) |
663 | { |
664 | int rc; |
665 | |
666 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n" ); |
667 | |
668 | qed_rdma_init_devinfo(p_hwfn, params); |
669 | qed_rdma_init_port(p_hwfn); |
670 | qed_rdma_init_events(p_hwfn, params); |
671 | |
672 | rc = qed_rdma_reserve_lkey(p_hwfn); |
673 | if (rc) |
674 | return rc; |
675 | |
676 | rc = qed_rdma_init_hw(p_hwfn, p_ptt); |
677 | if (rc) |
678 | return rc; |
679 | |
680 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
681 | rc = qed_iwarp_setup(p_hwfn, params); |
682 | if (rc) |
683 | return rc; |
684 | } else { |
685 | rc = qed_roce_setup(p_hwfn); |
686 | if (rc) |
687 | return rc; |
688 | } |
689 | |
690 | return qed_rdma_start_fw(p_hwfn, params, p_ptt); |
691 | } |
692 | |
693 | static int qed_rdma_stop(void *rdma_cxt) |
694 | { |
695 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
696 | struct rdma_close_func_ramrod_data *p_ramrod; |
697 | struct qed_sp_init_data init_data; |
698 | struct qed_spq_entry *p_ent; |
699 | struct qed_ptt *p_ptt; |
700 | u32 ll2_ethertype_en; |
701 | int rc = -EBUSY; |
702 | |
703 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n" ); |
704 | |
705 | p_ptt = qed_ptt_acquire(p_hwfn); |
706 | if (!p_ptt) { |
707 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n" ); |
708 | return rc; |
709 | } |
710 | |
711 | /* Disable RoCE search */ |
712 | qed_wr(p_hwfn, p_ptt, hw_addr: p_hwfn->rdma_prs_search_reg, val: 0); |
713 | p_hwfn->b_rdma_enabled_in_prs = false; |
714 | p_hwfn->p_rdma_info->active = 0; |
715 | qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, val: 0); |
716 | |
717 | ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); |
718 | |
719 | qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, |
720 | val: (ll2_ethertype_en & 0xFFFE)); |
721 | |
722 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
723 | rc = qed_iwarp_stop(p_hwfn); |
724 | if (rc) { |
725 | qed_ptt_release(p_hwfn, p_ptt); |
726 | return rc; |
727 | } |
728 | } else { |
729 | qed_roce_stop(p_hwfn); |
730 | } |
731 | |
732 | qed_ptt_release(p_hwfn, p_ptt); |
733 | |
734 | /* Get SPQ entry */ |
735 | memset(&init_data, 0, sizeof(init_data)); |
736 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
737 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
738 | |
739 | /* Stop RoCE */ |
740 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, cmd: RDMA_RAMROD_FUNC_CLOSE, |
741 | protocol: p_hwfn->p_rdma_info->proto, p_data: &init_data); |
742 | if (rc) |
743 | goto out; |
744 | |
745 | p_ramrod = &p_ent->ramrod.rdma_close_func; |
746 | |
747 | p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; |
748 | p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); |
749 | |
750 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
751 | |
752 | out: |
753 | qed_rdma_free(p_hwfn); |
754 | |
755 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n" , rc); |
756 | return rc; |
757 | } |
758 | |
759 | static int qed_rdma_add_user(void *rdma_cxt, |
760 | struct qed_rdma_add_user_out_params *out_params) |
761 | { |
762 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
763 | u32 dpi_start_offset; |
764 | u32 returned_id = 0; |
765 | int rc; |
766 | |
767 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n" ); |
768 | |
769 | /* Allocate DPI */ |
770 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
771 | rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap: &p_hwfn->p_rdma_info->dpi_map, |
772 | id_num: &returned_id); |
773 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
774 | |
775 | out_params->dpi = (u16)returned_id; |
776 | |
777 | /* Calculate the corresponding DPI address */ |
778 | dpi_start_offset = p_hwfn->dpi_start_offset; |
779 | |
780 | out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset + |
781 | out_params->dpi * p_hwfn->dpi_size; |
782 | |
783 | out_params->dpi_phys_addr = p_hwfn->db_phys_addr + |
784 | dpi_start_offset + |
785 | ((out_params->dpi) * p_hwfn->dpi_size); |
786 | |
787 | out_params->dpi_size = p_hwfn->dpi_size; |
788 | out_params->wid_count = p_hwfn->wid_count; |
789 | |
790 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n" , rc); |
791 | return rc; |
792 | } |
793 | |
794 | static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) |
795 | { |
796 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
797 | struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; |
798 | struct qed_mcp_link_state *p_link_output; |
799 | |
800 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n" ); |
801 | |
802 | /* The link state is saved only for the leading hwfn */ |
803 | p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; |
804 | |
805 | p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP |
806 | : QED_RDMA_PORT_DOWN; |
807 | |
808 | p_port->link_speed = p_link_output->speed; |
809 | |
810 | p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; |
811 | |
812 | return p_port; |
813 | } |
814 | |
815 | static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) |
816 | { |
817 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
818 | |
819 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n" ); |
820 | |
821 | /* Return struct with device parameters */ |
822 | return p_hwfn->p_rdma_info->dev; |
823 | } |
824 | |
825 | static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) |
826 | { |
827 | struct qed_hwfn *p_hwfn; |
828 | u16 qz_num; |
829 | u32 addr; |
830 | |
831 | p_hwfn = (struct qed_hwfn *)rdma_cxt; |
832 | |
833 | if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { |
834 | DP_NOTICE(p_hwfn, |
835 | "queue zone offset %d is too large (max is %d)\n" , |
836 | qz_offset, p_hwfn->p_rdma_info->max_queue_zones); |
837 | return; |
838 | } |
839 | |
840 | qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; |
841 | addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, |
842 | USTORM_COMMON_QUEUE_CONS, qz_num); |
843 | |
844 | REG_WR16(p_hwfn, addr, prod); |
845 | |
846 | /* keep prod updates ordered */ |
847 | wmb(); |
848 | } |
849 | |
850 | static int qed_fill_rdma_dev_info(struct qed_dev *cdev, |
851 | struct qed_dev_rdma_info *info) |
852 | { |
853 | struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); |
854 | |
855 | memset(info, 0, sizeof(*info)); |
856 | |
857 | info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ? |
858 | QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP; |
859 | |
860 | info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); |
861 | |
862 | qed_fill_dev_info(cdev, dev_info: &info->common); |
863 | |
864 | return 0; |
865 | } |
866 | |
867 | static int qed_rdma_get_sb_start(struct qed_dev *cdev) |
868 | { |
869 | int feat_num; |
870 | |
871 | if (cdev->num_hwfns > 1) |
872 | feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE); |
873 | else |
874 | feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) * |
875 | cdev->num_hwfns; |
876 | |
877 | return feat_num; |
878 | } |
879 | |
880 | static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) |
881 | { |
882 | int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ); |
883 | int n_msix = cdev->int_params.rdma_msix_cnt; |
884 | |
885 | return min_t(int, n_cnq, n_msix); |
886 | } |
887 | |
888 | static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) |
889 | { |
890 | int limit = 0; |
891 | |
892 | /* Mark the fastpath as free/used */ |
893 | cdev->int_params.fp_initialized = cnt ? true : false; |
894 | |
895 | if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { |
896 | DP_ERR(cdev, |
897 | "qed roce supports only MSI-X interrupts (detected %d).\n" , |
898 | cdev->int_params.out.int_mode); |
899 | return -EINVAL; |
900 | } else if (cdev->int_params.fp_msix_cnt) { |
901 | limit = cdev->int_params.rdma_msix_cnt; |
902 | } |
903 | |
904 | if (!limit) |
905 | return -ENOMEM; |
906 | |
907 | return min_t(int, cnt, limit); |
908 | } |
909 | |
910 | static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) |
911 | { |
912 | memset(info, 0, sizeof(*info)); |
913 | |
914 | if (!cdev->int_params.fp_initialized) { |
915 | DP_INFO(cdev, |
916 | "Protocol driver requested interrupt information, but its support is not yet configured\n" ); |
917 | return -EINVAL; |
918 | } |
919 | |
920 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { |
921 | int msix_base = cdev->int_params.rdma_msix_base; |
922 | |
923 | info->msix_cnt = cdev->int_params.rdma_msix_cnt; |
924 | info->msix = &cdev->int_params.msix_table[msix_base]; |
925 | |
926 | DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n" , |
927 | info->msix_cnt, msix_base); |
928 | } |
929 | |
930 | return 0; |
931 | } |
932 | |
933 | static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) |
934 | { |
935 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
936 | u32 returned_id; |
937 | int rc; |
938 | |
939 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n" ); |
940 | |
941 | /* Allocates an unused protection domain */ |
942 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
943 | rc = qed_rdma_bmap_alloc_id(p_hwfn, |
944 | bmap: &p_hwfn->p_rdma_info->pd_map, id_num: &returned_id); |
945 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
946 | |
947 | *pd = (u16)returned_id; |
948 | |
949 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n" , rc); |
950 | return rc; |
951 | } |
952 | |
953 | static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) |
954 | { |
955 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
956 | |
957 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n" , pd); |
958 | |
959 | /* Returns a previously allocated protection domain for reuse */ |
960 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
961 | qed_bmap_release_id(p_hwfn, bmap: &p_hwfn->p_rdma_info->pd_map, id_num: pd); |
962 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
963 | } |
964 | |
965 | static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id) |
966 | { |
967 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
968 | u32 returned_id; |
969 | int rc; |
970 | |
971 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n" ); |
972 | |
973 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
974 | rc = qed_rdma_bmap_alloc_id(p_hwfn, |
975 | bmap: &p_hwfn->p_rdma_info->xrcd_map, |
976 | id_num: &returned_id); |
977 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
978 | if (rc) { |
979 | DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n" ); |
980 | return rc; |
981 | } |
982 | |
983 | *xrcd_id = (u16)returned_id; |
984 | |
985 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n" , rc); |
986 | return rc; |
987 | } |
988 | |
989 | static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id) |
990 | { |
991 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
992 | |
993 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n" , xrcd_id); |
994 | |
995 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
996 | qed_bmap_release_id(p_hwfn, bmap: &p_hwfn->p_rdma_info->xrcd_map, id_num: xrcd_id); |
997 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
998 | } |
999 | |
1000 | static enum qed_rdma_toggle_bit |
1001 | qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) |
1002 | { |
1003 | struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; |
1004 | enum qed_rdma_toggle_bit toggle_bit; |
1005 | u32 bmap_id; |
1006 | |
1007 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n" , icid); |
1008 | |
1009 | /* the function toggle the bit that is related to a given icid |
1010 | * and returns the new toggle bit's value |
1011 | */ |
1012 | bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, type: p_info->proto); |
1013 | |
1014 | spin_lock_bh(lock: &p_info->lock); |
1015 | toggle_bit = !test_and_change_bit(nr: bmap_id, |
1016 | addr: p_info->toggle_bits.bitmap); |
1017 | spin_unlock_bh(lock: &p_info->lock); |
1018 | |
1019 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n" , |
1020 | toggle_bit); |
1021 | |
1022 | return toggle_bit; |
1023 | } |
1024 | |
1025 | static int qed_rdma_create_cq(void *rdma_cxt, |
1026 | struct qed_rdma_create_cq_in_params *params, |
1027 | u16 *icid) |
1028 | { |
1029 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1030 | struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; |
1031 | struct rdma_create_cq_ramrod_data *p_ramrod; |
1032 | enum qed_rdma_toggle_bit toggle_bit; |
1033 | struct qed_sp_init_data init_data; |
1034 | struct qed_spq_entry *p_ent; |
1035 | u32 returned_id, start_cid; |
1036 | int rc; |
1037 | |
1038 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n" , |
1039 | params->cq_handle_hi, params->cq_handle_lo); |
1040 | |
1041 | /* Allocate icid */ |
1042 | spin_lock_bh(lock: &p_info->lock); |
1043 | rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap: &p_info->cq_map, id_num: &returned_id); |
1044 | spin_unlock_bh(lock: &p_info->lock); |
1045 | |
1046 | if (rc) { |
1047 | DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n" , rc); |
1048 | return rc; |
1049 | } |
1050 | |
1051 | start_cid = qed_cxt_get_proto_cid_start(p_hwfn, |
1052 | type: p_info->proto); |
1053 | *icid = returned_id + start_cid; |
1054 | |
1055 | /* Check if icid requires a page allocation */ |
1056 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type: QED_ELEM_CXT, iid: *icid); |
1057 | if (rc) |
1058 | goto err; |
1059 | |
1060 | /* Get SPQ entry */ |
1061 | memset(&init_data, 0, sizeof(init_data)); |
1062 | init_data.cid = *icid; |
1063 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
1064 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1065 | |
1066 | /* Send create CQ ramrod */ |
1067 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1068 | cmd: RDMA_RAMROD_CREATE_CQ, |
1069 | protocol: p_info->proto, p_data: &init_data); |
1070 | if (rc) |
1071 | goto err; |
1072 | |
1073 | p_ramrod = &p_ent->ramrod.rdma_create_cq; |
1074 | |
1075 | p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); |
1076 | p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); |
1077 | p_ramrod->dpi = cpu_to_le16(params->dpi); |
1078 | p_ramrod->is_two_level_pbl = params->pbl_two_level; |
1079 | p_ramrod->max_cqes = cpu_to_le32(params->cq_size); |
1080 | DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); |
1081 | p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); |
1082 | p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + |
1083 | params->cnq_id; |
1084 | p_ramrod->int_timeout = cpu_to_le16(params->int_timeout); |
1085 | |
1086 | /* toggle the bit for every resize or create cq for a given icid */ |
1087 | toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, icid: *icid); |
1088 | |
1089 | p_ramrod->toggle_bit = toggle_bit; |
1090 | |
1091 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
1092 | if (rc) { |
1093 | /* restore toggle bit */ |
1094 | qed_rdma_toggle_bit_create_resize_cq(p_hwfn, icid: *icid); |
1095 | goto err; |
1096 | } |
1097 | |
1098 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n" , rc); |
1099 | return rc; |
1100 | |
1101 | err: |
1102 | /* release allocated icid */ |
1103 | spin_lock_bh(lock: &p_info->lock); |
1104 | qed_bmap_release_id(p_hwfn, bmap: &p_info->cq_map, id_num: returned_id); |
1105 | spin_unlock_bh(lock: &p_info->lock); |
1106 | DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n" , rc); |
1107 | |
1108 | return rc; |
1109 | } |
1110 | |
1111 | static int |
1112 | qed_rdma_destroy_cq(void *rdma_cxt, |
1113 | struct qed_rdma_destroy_cq_in_params *in_params, |
1114 | struct qed_rdma_destroy_cq_out_params *out_params) |
1115 | { |
1116 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1117 | struct rdma_destroy_cq_output_params *p_ramrod_res; |
1118 | struct rdma_destroy_cq_ramrod_data *p_ramrod; |
1119 | struct qed_sp_init_data init_data; |
1120 | struct qed_spq_entry *p_ent; |
1121 | dma_addr_t ramrod_res_phys; |
1122 | enum protocol_type proto; |
1123 | int rc = -ENOMEM; |
1124 | |
1125 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n" , in_params->icid); |
1126 | |
1127 | p_ramrod_res = |
1128 | dma_alloc_coherent(dev: &p_hwfn->cdev->pdev->dev, |
1129 | size: sizeof(struct rdma_destroy_cq_output_params), |
1130 | dma_handle: &ramrod_res_phys, GFP_KERNEL); |
1131 | if (!p_ramrod_res) { |
1132 | DP_NOTICE(p_hwfn, |
1133 | "qed destroy cq failed: cannot allocate memory (ramrod)\n" ); |
1134 | return rc; |
1135 | } |
1136 | |
1137 | /* Get SPQ entry */ |
1138 | memset(&init_data, 0, sizeof(init_data)); |
1139 | init_data.cid = in_params->icid; |
1140 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
1141 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1142 | proto = p_hwfn->p_rdma_info->proto; |
1143 | /* Send destroy CQ ramrod */ |
1144 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1145 | cmd: RDMA_RAMROD_DESTROY_CQ, |
1146 | protocol: proto, p_data: &init_data); |
1147 | if (rc) |
1148 | goto err; |
1149 | |
1150 | p_ramrod = &p_ent->ramrod.rdma_destroy_cq; |
1151 | DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); |
1152 | |
1153 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
1154 | if (rc) |
1155 | goto err; |
1156 | |
1157 | out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); |
1158 | |
1159 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
1160 | size: sizeof(struct rdma_destroy_cq_output_params), |
1161 | cpu_addr: p_ramrod_res, dma_handle: ramrod_res_phys); |
1162 | |
1163 | /* Free icid */ |
1164 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1165 | |
1166 | qed_bmap_release_id(p_hwfn, |
1167 | bmap: &p_hwfn->p_rdma_info->cq_map, |
1168 | id_num: (in_params->icid - |
1169 | qed_cxt_get_proto_cid_start(p_hwfn, type: proto))); |
1170 | |
1171 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1172 | |
1173 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n" , rc); |
1174 | return rc; |
1175 | |
1176 | err: dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
1177 | size: sizeof(struct rdma_destroy_cq_output_params), |
1178 | cpu_addr: p_ramrod_res, dma_handle: ramrod_res_phys); |
1179 | |
1180 | return rc; |
1181 | } |
1182 | |
1183 | void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac) |
1184 | { |
1185 | p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); |
1186 | p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); |
1187 | p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); |
1188 | } |
1189 | |
1190 | static int qed_rdma_query_qp(void *rdma_cxt, |
1191 | struct qed_rdma_qp *qp, |
1192 | struct qed_rdma_query_qp_out_params *out_params) |
1193 | { |
1194 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1195 | int rc = 0; |
1196 | |
1197 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n" , qp->icid); |
1198 | |
1199 | /* The following fields are filled in from qp and not FW as they can't |
1200 | * be modified by FW |
1201 | */ |
1202 | out_params->mtu = qp->mtu; |
1203 | out_params->dest_qp = qp->dest_qp; |
1204 | out_params->incoming_atomic_en = qp->incoming_atomic_en; |
1205 | out_params->e2e_flow_control_en = qp->e2e_flow_control_en; |
1206 | out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; |
1207 | out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; |
1208 | out_params->dgid = qp->dgid; |
1209 | out_params->flow_label = qp->flow_label; |
1210 | out_params->hop_limit_ttl = qp->hop_limit_ttl; |
1211 | out_params->traffic_class_tos = qp->traffic_class_tos; |
1212 | out_params->timeout = qp->ack_timeout; |
1213 | out_params->rnr_retry = qp->rnr_retry_cnt; |
1214 | out_params->retry_cnt = qp->retry_cnt; |
1215 | out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; |
1216 | out_params->pkey_index = 0; |
1217 | out_params->max_rd_atomic = qp->max_rd_atomic_req; |
1218 | out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; |
1219 | out_params->sqd_async = qp->sqd_async; |
1220 | |
1221 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
1222 | qed_iwarp_query_qp(qp, out_params); |
1223 | else |
1224 | rc = qed_roce_query_qp(p_hwfn, qp, out_params); |
1225 | |
1226 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n" , rc); |
1227 | return rc; |
1228 | } |
1229 | |
1230 | static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) |
1231 | { |
1232 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1233 | int rc = 0; |
1234 | |
1235 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n" , qp->icid); |
1236 | |
1237 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) |
1238 | rc = qed_iwarp_destroy_qp(p_hwfn, qp); |
1239 | else |
1240 | rc = qed_roce_destroy_qp(p_hwfn, qp); |
1241 | |
1242 | /* free qp params struct */ |
1243 | kfree(objp: qp); |
1244 | |
1245 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n" ); |
1246 | return rc; |
1247 | } |
1248 | |
1249 | static struct qed_rdma_qp * |
1250 | qed_rdma_create_qp(void *rdma_cxt, |
1251 | struct qed_rdma_create_qp_in_params *in_params, |
1252 | struct qed_rdma_create_qp_out_params *out_params) |
1253 | { |
1254 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1255 | struct qed_rdma_qp *qp; |
1256 | u8 max_stats_queues; |
1257 | int rc; |
1258 | |
1259 | if (!rdma_cxt || !in_params || !out_params || |
1260 | !p_hwfn->p_rdma_info->active) { |
1261 | pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n" , |
1262 | rdma_cxt, in_params, out_params); |
1263 | return NULL; |
1264 | } |
1265 | |
1266 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1267 | "qed rdma create qp called with qp_handle = %08x%08x\n" , |
1268 | in_params->qp_handle_hi, in_params->qp_handle_lo); |
1269 | |
1270 | /* Some sanity checks... */ |
1271 | max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; |
1272 | if (in_params->stats_queue >= max_stats_queues) { |
1273 | DP_ERR(p_hwfn->cdev, |
1274 | "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n" , |
1275 | in_params->stats_queue, max_stats_queues); |
1276 | return NULL; |
1277 | } |
1278 | |
1279 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
1280 | if (in_params->sq_num_pages * sizeof(struct regpair) > |
1281 | IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) { |
1282 | DP_NOTICE(p_hwfn->cdev, |
1283 | "Sq num pages: %d exceeds maximum\n" , |
1284 | in_params->sq_num_pages); |
1285 | return NULL; |
1286 | } |
1287 | if (in_params->rq_num_pages * sizeof(struct regpair) > |
1288 | IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) { |
1289 | DP_NOTICE(p_hwfn->cdev, |
1290 | "Rq num pages: %d exceeds maximum\n" , |
1291 | in_params->rq_num_pages); |
1292 | return NULL; |
1293 | } |
1294 | } |
1295 | |
1296 | qp = kzalloc(size: sizeof(*qp), GFP_KERNEL); |
1297 | if (!qp) |
1298 | return NULL; |
1299 | |
1300 | qp->cur_state = QED_ROCE_QP_STATE_RESET; |
1301 | qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); |
1302 | qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); |
1303 | qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); |
1304 | qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); |
1305 | qp->use_srq = in_params->use_srq; |
1306 | qp->signal_all = in_params->signal_all; |
1307 | qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; |
1308 | qp->pd = in_params->pd; |
1309 | qp->dpi = in_params->dpi; |
1310 | qp->sq_cq_id = in_params->sq_cq_id; |
1311 | qp->sq_num_pages = in_params->sq_num_pages; |
1312 | qp->sq_pbl_ptr = in_params->sq_pbl_ptr; |
1313 | qp->rq_cq_id = in_params->rq_cq_id; |
1314 | qp->rq_num_pages = in_params->rq_num_pages; |
1315 | qp->rq_pbl_ptr = in_params->rq_pbl_ptr; |
1316 | qp->srq_id = in_params->srq_id; |
1317 | qp->req_offloaded = false; |
1318 | qp->resp_offloaded = false; |
1319 | qp->e2e_flow_control_en = qp->use_srq ? false : true; |
1320 | qp->stats_queue = in_params->stats_queue; |
1321 | qp->qp_type = in_params->qp_type; |
1322 | qp->xrcd_id = in_params->xrcd_id; |
1323 | |
1324 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
1325 | rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); |
1326 | qp->qpid = qp->icid; |
1327 | } else { |
1328 | qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE); |
1329 | rc = qed_roce_alloc_cid(p_hwfn, cid: &qp->icid); |
1330 | qp->qpid = ((0xFF << 16) | qp->icid); |
1331 | } |
1332 | |
1333 | if (rc) { |
1334 | kfree(objp: qp); |
1335 | return NULL; |
1336 | } |
1337 | |
1338 | out_params->icid = qp->icid; |
1339 | out_params->qp_id = qp->qpid; |
1340 | |
1341 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n" , rc); |
1342 | return qp; |
1343 | } |
1344 | |
1345 | static int qed_rdma_modify_qp(void *rdma_cxt, |
1346 | struct qed_rdma_qp *qp, |
1347 | struct qed_rdma_modify_qp_in_params *params) |
1348 | { |
1349 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1350 | enum qed_roce_qp_state prev_state; |
1351 | int rc = 0; |
1352 | |
1353 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n" , |
1354 | qp->icid, params->new_state); |
1355 | |
1356 | if (rc) { |
1357 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n" , rc); |
1358 | return rc; |
1359 | } |
1360 | |
1361 | if (GET_FIELD(params->modify_flags, |
1362 | QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { |
1363 | qp->incoming_rdma_read_en = params->incoming_rdma_read_en; |
1364 | qp->incoming_rdma_write_en = params->incoming_rdma_write_en; |
1365 | qp->incoming_atomic_en = params->incoming_atomic_en; |
1366 | } |
1367 | |
1368 | /* Update QP structure with the updated values */ |
1369 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) |
1370 | qp->roce_mode = params->roce_mode; |
1371 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) |
1372 | qp->pkey = params->pkey; |
1373 | if (GET_FIELD(params->modify_flags, |
1374 | QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) |
1375 | qp->e2e_flow_control_en = params->e2e_flow_control_en; |
1376 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) |
1377 | qp->dest_qp = params->dest_qp; |
1378 | if (GET_FIELD(params->modify_flags, |
1379 | QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { |
1380 | /* Indicates that the following parameters have changed: |
1381 | * Traffic class, flow label, hop limit, source GID, |
1382 | * destination GID, loopback indicator |
1383 | */ |
1384 | qp->traffic_class_tos = params->traffic_class_tos; |
1385 | qp->flow_label = params->flow_label; |
1386 | qp->hop_limit_ttl = params->hop_limit_ttl; |
1387 | |
1388 | qp->sgid = params->sgid; |
1389 | qp->dgid = params->dgid; |
1390 | qp->udp_src_port = 0; |
1391 | qp->vlan_id = params->vlan_id; |
1392 | qp->mtu = params->mtu; |
1393 | qp->lb_indication = params->lb_indication; |
1394 | memcpy((u8 *)&qp->remote_mac_addr[0], |
1395 | (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); |
1396 | if (params->use_local_mac) { |
1397 | memcpy((u8 *)&qp->local_mac_addr[0], |
1398 | (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); |
1399 | } else { |
1400 | memcpy((u8 *)&qp->local_mac_addr[0], |
1401 | (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); |
1402 | } |
1403 | } |
1404 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) |
1405 | qp->rq_psn = params->rq_psn; |
1406 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) |
1407 | qp->sq_psn = params->sq_psn; |
1408 | if (GET_FIELD(params->modify_flags, |
1409 | QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) |
1410 | qp->max_rd_atomic_req = params->max_rd_atomic_req; |
1411 | if (GET_FIELD(params->modify_flags, |
1412 | QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) |
1413 | qp->max_rd_atomic_resp = params->max_rd_atomic_resp; |
1414 | if (GET_FIELD(params->modify_flags, |
1415 | QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) |
1416 | qp->ack_timeout = params->ack_timeout; |
1417 | if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) |
1418 | qp->retry_cnt = params->retry_cnt; |
1419 | if (GET_FIELD(params->modify_flags, |
1420 | QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) |
1421 | qp->rnr_retry_cnt = params->rnr_retry_cnt; |
1422 | if (GET_FIELD(params->modify_flags, |
1423 | QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) |
1424 | qp->min_rnr_nak_timer = params->min_rnr_nak_timer; |
1425 | |
1426 | qp->sqd_async = params->sqd_async; |
1427 | |
1428 | prev_state = qp->cur_state; |
1429 | if (GET_FIELD(params->modify_flags, |
1430 | QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { |
1431 | qp->cur_state = params->new_state; |
1432 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n" , |
1433 | qp->cur_state); |
1434 | } |
1435 | |
1436 | switch (qp->qp_type) { |
1437 | case QED_RDMA_QP_TYPE_XRC_INI: |
1438 | qp->has_req = true; |
1439 | break; |
1440 | case QED_RDMA_QP_TYPE_XRC_TGT: |
1441 | qp->has_resp = true; |
1442 | break; |
1443 | default: |
1444 | qp->has_req = true; |
1445 | qp->has_resp = true; |
1446 | } |
1447 | |
1448 | if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { |
1449 | enum qed_iwarp_qp_state new_state = |
1450 | qed_roce2iwarp_state(state: qp->cur_state); |
1451 | |
1452 | rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, internal: 0); |
1453 | } else { |
1454 | rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); |
1455 | } |
1456 | |
1457 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n" , rc); |
1458 | return rc; |
1459 | } |
1460 | |
1461 | static int |
1462 | qed_rdma_register_tid(void *rdma_cxt, |
1463 | struct qed_rdma_register_tid_in_params *params) |
1464 | { |
1465 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1466 | struct rdma_register_tid_ramrod_data *p_ramrod; |
1467 | struct qed_sp_init_data init_data; |
1468 | struct qed_spq_entry *p_ent; |
1469 | enum rdma_tid_type tid_type; |
1470 | u8 fw_return_code; |
1471 | u16 flags = 0; |
1472 | int rc; |
1473 | |
1474 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n" , params->itid); |
1475 | |
1476 | /* Get SPQ entry */ |
1477 | memset(&init_data, 0, sizeof(init_data)); |
1478 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
1479 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1480 | |
1481 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, cmd: RDMA_RAMROD_REGISTER_MR, |
1482 | protocol: p_hwfn->p_rdma_info->proto, p_data: &init_data); |
1483 | if (rc) { |
1484 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n" , rc); |
1485 | return rc; |
1486 | } |
1487 | |
1488 | if (p_hwfn->p_rdma_info->last_tid < params->itid) |
1489 | p_hwfn->p_rdma_info->last_tid = params->itid; |
1490 | |
1491 | SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, |
1492 | params->pbl_two_level); |
1493 | |
1494 | SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, |
1495 | false); |
1496 | |
1497 | SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); |
1498 | |
1499 | /* Don't initialize D/C field, as it may override other bits. */ |
1500 | if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) |
1501 | SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, |
1502 | params->page_size_log - 12); |
1503 | |
1504 | SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, |
1505 | params->remote_read); |
1506 | |
1507 | SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, |
1508 | params->remote_write); |
1509 | |
1510 | SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, |
1511 | params->remote_atomic); |
1512 | |
1513 | SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, |
1514 | params->local_write); |
1515 | |
1516 | SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, |
1517 | params->local_read); |
1518 | |
1519 | SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, |
1520 | params->mw_bind); |
1521 | |
1522 | p_ramrod = &p_ent->ramrod.rdma_register_tid; |
1523 | p_ramrod->flags = cpu_to_le16(flags); |
1524 | |
1525 | SET_FIELD(p_ramrod->flags1, |
1526 | RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, |
1527 | params->pbl_page_size_log - 12); |
1528 | |
1529 | SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, |
1530 | params->dma_mr); |
1531 | |
1532 | switch (params->tid_type) { |
1533 | case QED_RDMA_TID_REGISTERED_MR: |
1534 | tid_type = RDMA_TID_REGISTERED_MR; |
1535 | break; |
1536 | case QED_RDMA_TID_FMR: |
1537 | tid_type = RDMA_TID_FMR; |
1538 | break; |
1539 | case QED_RDMA_TID_MW: |
1540 | tid_type = RDMA_TID_MW; |
1541 | break; |
1542 | default: |
1543 | rc = -EINVAL; |
1544 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n" , rc); |
1545 | qed_sp_destroy_request(p_hwfn, p_ent); |
1546 | return rc; |
1547 | } |
1548 | |
1549 | SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, |
1550 | tid_type); |
1551 | |
1552 | p_ramrod->itid = cpu_to_le32(params->itid); |
1553 | p_ramrod->key = params->key; |
1554 | p_ramrod->pd = cpu_to_le16(params->pd); |
1555 | p_ramrod->length_hi = (u8)(params->length >> 32); |
1556 | p_ramrod->length_lo = DMA_LO_LE(params->length); |
1557 | DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); |
1558 | DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); |
1559 | |
1560 | /* DIF */ |
1561 | if (params->dif_enabled) { |
1562 | SET_FIELD(p_ramrod->flags2, |
1563 | RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); |
1564 | DMA_REGPAIR_LE(p_ramrod->dif_error_addr, |
1565 | params->dif_error_addr); |
1566 | } |
1567 | |
1568 | rc = qed_spq_post(p_hwfn, p_ent, fw_return_code: &fw_return_code); |
1569 | if (rc) |
1570 | return rc; |
1571 | |
1572 | if (fw_return_code != RDMA_RETURN_OK) { |
1573 | DP_NOTICE(p_hwfn, "fw_return_code = %d\n" , fw_return_code); |
1574 | return -EINVAL; |
1575 | } |
1576 | |
1577 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n" , rc); |
1578 | return rc; |
1579 | } |
1580 | |
1581 | static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) |
1582 | { |
1583 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1584 | struct rdma_deregister_tid_ramrod_data *p_ramrod; |
1585 | struct qed_sp_init_data init_data; |
1586 | struct qed_spq_entry *p_ent; |
1587 | struct qed_ptt *p_ptt; |
1588 | u8 fw_return_code; |
1589 | int rc; |
1590 | |
1591 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n" , itid); |
1592 | |
1593 | /* Get SPQ entry */ |
1594 | memset(&init_data, 0, sizeof(init_data)); |
1595 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
1596 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1597 | |
1598 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, cmd: RDMA_RAMROD_DEREGISTER_MR, |
1599 | protocol: p_hwfn->p_rdma_info->proto, p_data: &init_data); |
1600 | if (rc) { |
1601 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n" , rc); |
1602 | return rc; |
1603 | } |
1604 | |
1605 | p_ramrod = &p_ent->ramrod.rdma_deregister_tid; |
1606 | p_ramrod->itid = cpu_to_le32(itid); |
1607 | |
1608 | rc = qed_spq_post(p_hwfn, p_ent, fw_return_code: &fw_return_code); |
1609 | if (rc) { |
1610 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n" , rc); |
1611 | return rc; |
1612 | } |
1613 | |
1614 | if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { |
1615 | DP_NOTICE(p_hwfn, "fw_return_code = %d\n" , fw_return_code); |
1616 | return -EINVAL; |
1617 | } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { |
1618 | /* Bit indicating that the TID is in use and a nig drain is |
1619 | * required before sending the ramrod again |
1620 | */ |
1621 | p_ptt = qed_ptt_acquire(p_hwfn); |
1622 | if (!p_ptt) { |
1623 | rc = -EBUSY; |
1624 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1625 | "Failed to acquire PTT\n" ); |
1626 | return rc; |
1627 | } |
1628 | |
1629 | rc = qed_mcp_drain(p_hwfn, p_ptt); |
1630 | if (rc) { |
1631 | qed_ptt_release(p_hwfn, p_ptt); |
1632 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1633 | "Drain failed\n" ); |
1634 | return rc; |
1635 | } |
1636 | |
1637 | qed_ptt_release(p_hwfn, p_ptt); |
1638 | |
1639 | /* Resend the ramrod */ |
1640 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1641 | cmd: RDMA_RAMROD_DEREGISTER_MR, |
1642 | protocol: p_hwfn->p_rdma_info->proto, |
1643 | p_data: &init_data); |
1644 | if (rc) { |
1645 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1646 | "Failed to init sp-element\n" ); |
1647 | return rc; |
1648 | } |
1649 | |
1650 | rc = qed_spq_post(p_hwfn, p_ent, fw_return_code: &fw_return_code); |
1651 | if (rc) { |
1652 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1653 | "Ramrod failed\n" ); |
1654 | return rc; |
1655 | } |
1656 | |
1657 | if (fw_return_code != RDMA_RETURN_OK) { |
1658 | DP_NOTICE(p_hwfn, "fw_return_code = %d\n" , |
1659 | fw_return_code); |
1660 | return rc; |
1661 | } |
1662 | } |
1663 | |
1664 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n" , rc); |
1665 | return rc; |
1666 | } |
1667 | |
1668 | static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) |
1669 | { |
1670 | return QED_AFFIN_HWFN(cdev); |
1671 | } |
1672 | |
1673 | static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn, |
1674 | bool is_xrc) |
1675 | { |
1676 | if (is_xrc) |
1677 | return &p_hwfn->p_rdma_info->xrc_srq_map; |
1678 | |
1679 | return &p_hwfn->p_rdma_info->srq_map; |
1680 | } |
1681 | |
1682 | static int qed_rdma_modify_srq(void *rdma_cxt, |
1683 | struct qed_rdma_modify_srq_in_params *in_params) |
1684 | { |
1685 | struct rdma_srq_modify_ramrod_data *p_ramrod; |
1686 | struct qed_sp_init_data init_data = {}; |
1687 | struct qed_hwfn *p_hwfn = rdma_cxt; |
1688 | struct qed_spq_entry *p_ent; |
1689 | u16 opaque_fid; |
1690 | int rc; |
1691 | |
1692 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
1693 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1694 | |
1695 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1696 | cmd: RDMA_RAMROD_MODIFY_SRQ, |
1697 | protocol: p_hwfn->p_rdma_info->proto, p_data: &init_data); |
1698 | if (rc) |
1699 | return rc; |
1700 | |
1701 | p_ramrod = &p_ent->ramrod.rdma_modify_srq; |
1702 | p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); |
1703 | opaque_fid = p_hwfn->hw_info.opaque_fid; |
1704 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); |
1705 | p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); |
1706 | |
1707 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
1708 | if (rc) |
1709 | return rc; |
1710 | |
1711 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n" , |
1712 | in_params->srq_id, in_params->is_xrc); |
1713 | |
1714 | return rc; |
1715 | } |
1716 | |
1717 | static int |
1718 | qed_rdma_destroy_srq(void *rdma_cxt, |
1719 | struct qed_rdma_destroy_srq_in_params *in_params) |
1720 | { |
1721 | struct rdma_srq_destroy_ramrod_data *p_ramrod; |
1722 | struct qed_sp_init_data init_data = {}; |
1723 | struct qed_hwfn *p_hwfn = rdma_cxt; |
1724 | struct qed_spq_entry *p_ent; |
1725 | struct qed_bmap *bmap; |
1726 | u16 opaque_fid; |
1727 | u16 offset; |
1728 | int rc; |
1729 | |
1730 | opaque_fid = p_hwfn->hw_info.opaque_fid; |
1731 | |
1732 | init_data.opaque_fid = opaque_fid; |
1733 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1734 | |
1735 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1736 | cmd: RDMA_RAMROD_DESTROY_SRQ, |
1737 | protocol: p_hwfn->p_rdma_info->proto, p_data: &init_data); |
1738 | if (rc) |
1739 | return rc; |
1740 | |
1741 | p_ramrod = &p_ent->ramrod.rdma_destroy_srq; |
1742 | p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); |
1743 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); |
1744 | |
1745 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
1746 | if (rc) |
1747 | return rc; |
1748 | |
1749 | bmap = qed_rdma_get_srq_bmap(p_hwfn, is_xrc: in_params->is_xrc); |
1750 | offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; |
1751 | |
1752 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1753 | qed_bmap_release_id(p_hwfn, bmap, id_num: in_params->srq_id - offset); |
1754 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1755 | |
1756 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1757 | "XRC/SRQ destroyed Id = %x, is_xrc=%u\n" , |
1758 | in_params->srq_id, in_params->is_xrc); |
1759 | |
1760 | return rc; |
1761 | } |
1762 | |
1763 | static int |
1764 | qed_rdma_create_srq(void *rdma_cxt, |
1765 | struct qed_rdma_create_srq_in_params *in_params, |
1766 | struct qed_rdma_create_srq_out_params *out_params) |
1767 | { |
1768 | struct rdma_srq_create_ramrod_data *p_ramrod; |
1769 | struct qed_sp_init_data init_data = {}; |
1770 | struct qed_hwfn *p_hwfn = rdma_cxt; |
1771 | enum qed_cxt_elem_type elem_type; |
1772 | struct qed_spq_entry *p_ent; |
1773 | u16 opaque_fid, srq_id; |
1774 | struct qed_bmap *bmap; |
1775 | u32 returned_id; |
1776 | u16 offset; |
1777 | int rc; |
1778 | |
1779 | bmap = qed_rdma_get_srq_bmap(p_hwfn, is_xrc: in_params->is_xrc); |
1780 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1781 | rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, id_num: &returned_id); |
1782 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1783 | |
1784 | if (rc) { |
1785 | DP_NOTICE(p_hwfn, |
1786 | "failed to allocate xrc/srq id (is_xrc=%u)\n" , |
1787 | in_params->is_xrc); |
1788 | return rc; |
1789 | } |
1790 | |
1791 | elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ); |
1792 | rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, iid: returned_id); |
1793 | if (rc) |
1794 | goto err; |
1795 | |
1796 | opaque_fid = p_hwfn->hw_info.opaque_fid; |
1797 | init_data.opaque_fid = opaque_fid; |
1798 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1799 | |
1800 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1801 | cmd: RDMA_RAMROD_CREATE_SRQ, |
1802 | protocol: p_hwfn->p_rdma_info->proto, p_data: &init_data); |
1803 | if (rc) |
1804 | goto err; |
1805 | |
1806 | p_ramrod = &p_ent->ramrod.rdma_create_srq; |
1807 | DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); |
1808 | p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); |
1809 | p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); |
1810 | p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); |
1811 | p_ramrod->page_size = cpu_to_le16(in_params->page_size); |
1812 | DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); |
1813 | offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; |
1814 | srq_id = (u16)returned_id + offset; |
1815 | p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); |
1816 | |
1817 | if (in_params->is_xrc) { |
1818 | SET_FIELD(p_ramrod->flags, |
1819 | RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1); |
1820 | SET_FIELD(p_ramrod->flags, |
1821 | RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN, |
1822 | in_params->reserved_key_en); |
1823 | p_ramrod->xrc_srq_cq_cid = |
1824 | cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | |
1825 | in_params->cq_cid); |
1826 | p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id); |
1827 | } |
1828 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
1829 | if (rc) |
1830 | goto err; |
1831 | |
1832 | out_params->srq_id = srq_id; |
1833 | |
1834 | DP_VERBOSE(p_hwfn, |
1835 | QED_MSG_RDMA, |
1836 | "XRC/SRQ created Id = %x (is_xrc=%u)\n" , |
1837 | out_params->srq_id, in_params->is_xrc); |
1838 | return rc; |
1839 | |
1840 | err: |
1841 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1842 | qed_bmap_release_id(p_hwfn, bmap, id_num: returned_id); |
1843 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1844 | |
1845 | return rc; |
1846 | } |
1847 | |
1848 | bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) |
1849 | { |
1850 | bool result; |
1851 | |
1852 | /* if rdma wasn't activated yet, naturally there are no qps */ |
1853 | if (!p_hwfn->p_rdma_info->active) |
1854 | return false; |
1855 | |
1856 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1857 | if (!p_hwfn->p_rdma_info->cid_map.bitmap) |
1858 | result = false; |
1859 | else |
1860 | result = !qed_bmap_is_empty(bmap: &p_hwfn->p_rdma_info->cid_map); |
1861 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1862 | return result; |
1863 | } |
1864 | |
1865 | void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
1866 | { |
1867 | u32 val; |
1868 | |
1869 | val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; |
1870 | |
1871 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); |
1872 | DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), |
1873 | "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n" , |
1874 | val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); |
1875 | } |
1876 | |
1877 | void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
1878 | { |
1879 | p_hwfn->db_bar_no_edpm = true; |
1880 | |
1881 | qed_rdma_dpm_conf(p_hwfn, p_ptt); |
1882 | } |
1883 | |
1884 | static int qed_rdma_start(void *rdma_cxt, |
1885 | struct qed_rdma_start_in_params *params) |
1886 | { |
1887 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1888 | struct qed_ptt *p_ptt; |
1889 | int rc = -EBUSY; |
1890 | |
1891 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, |
1892 | "desired_cnq = %08x\n" , params->desired_cnq); |
1893 | |
1894 | p_ptt = qed_ptt_acquire(p_hwfn); |
1895 | if (!p_ptt) |
1896 | goto err; |
1897 | |
1898 | rc = qed_rdma_alloc(p_hwfn); |
1899 | if (rc) |
1900 | goto err1; |
1901 | |
1902 | rc = qed_rdma_setup(p_hwfn, p_ptt, params); |
1903 | if (rc) |
1904 | goto err2; |
1905 | |
1906 | qed_ptt_release(p_hwfn, p_ptt); |
1907 | p_hwfn->p_rdma_info->active = 1; |
1908 | |
1909 | return rc; |
1910 | |
1911 | err2: |
1912 | qed_rdma_free(p_hwfn); |
1913 | err1: |
1914 | qed_ptt_release(p_hwfn, p_ptt); |
1915 | err: |
1916 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n" , rc); |
1917 | return rc; |
1918 | } |
1919 | |
1920 | static int qed_rdma_init(struct qed_dev *cdev, |
1921 | struct qed_rdma_start_in_params *params) |
1922 | { |
1923 | return qed_rdma_start(QED_AFFIN_HWFN(cdev), params); |
1924 | } |
1925 | |
1926 | static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) |
1927 | { |
1928 | struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; |
1929 | |
1930 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n" , dpi); |
1931 | |
1932 | spin_lock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1933 | qed_bmap_release_id(p_hwfn, bmap: &p_hwfn->p_rdma_info->dpi_map, id_num: dpi); |
1934 | spin_unlock_bh(lock: &p_hwfn->p_rdma_info->lock); |
1935 | } |
1936 | |
1937 | static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, |
1938 | u8 *old_mac_address, |
1939 | const u8 *new_mac_address) |
1940 | { |
1941 | int rc = 0; |
1942 | |
1943 | if (old_mac_address) |
1944 | qed_llh_remove_mac_filter(cdev, ppfid: 0, mac_addr: old_mac_address); |
1945 | if (new_mac_address) |
1946 | rc = qed_llh_add_mac_filter(cdev, ppfid: 0, mac_addr: new_mac_address); |
1947 | |
1948 | if (rc) |
1949 | DP_ERR(cdev, |
1950 | "qed roce ll2 mac filter set: failed to add MAC filter\n" ); |
1951 | |
1952 | return rc; |
1953 | } |
1954 | |
1955 | static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset) |
1956 | { |
1957 | enum qed_eng eng; |
1958 | u8 ppfid = 0; |
1959 | int rc; |
1960 | |
1961 | /* Make sure iwarp cmt mode is enabled before setting affinity */ |
1962 | if (!cdev->iwarp_cmt) |
1963 | return -EINVAL; |
1964 | |
1965 | if (b_reset) |
1966 | eng = QED_BOTH_ENG; |
1967 | else |
1968 | eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0; |
1969 | |
1970 | rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng); |
1971 | if (rc) { |
1972 | DP_NOTICE(cdev, |
1973 | "Failed to set the engine affinity of ppfid %d\n" , |
1974 | ppfid); |
1975 | return rc; |
1976 | } |
1977 | |
1978 | DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP), |
1979 | "LLH: Set the engine affinity of non-RoCE packets as %d\n" , |
1980 | eng); |
1981 | |
1982 | return 0; |
1983 | } |
1984 | |
1985 | static const struct qed_rdma_ops qed_rdma_ops_pass = { |
1986 | .common = &qed_common_ops_pass, |
1987 | .fill_dev_info = &qed_fill_rdma_dev_info, |
1988 | .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, |
1989 | .rdma_init = &qed_rdma_init, |
1990 | .rdma_add_user = &qed_rdma_add_user, |
1991 | .rdma_remove_user = &qed_rdma_remove_user, |
1992 | .rdma_stop = &qed_rdma_stop, |
1993 | .rdma_query_port = &qed_rdma_query_port, |
1994 | .rdma_query_device = &qed_rdma_query_device, |
1995 | .rdma_get_start_sb = &qed_rdma_get_sb_start, |
1996 | .rdma_get_rdma_int = &qed_rdma_get_int, |
1997 | .rdma_set_rdma_int = &qed_rdma_set_int, |
1998 | .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, |
1999 | .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, |
2000 | .rdma_alloc_pd = &qed_rdma_alloc_pd, |
2001 | .rdma_dealloc_pd = &qed_rdma_free_pd, |
2002 | .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd, |
2003 | .rdma_dealloc_xrcd = &qed_rdma_free_xrcd, |
2004 | .rdma_create_cq = &qed_rdma_create_cq, |
2005 | .rdma_destroy_cq = &qed_rdma_destroy_cq, |
2006 | .rdma_create_qp = &qed_rdma_create_qp, |
2007 | .rdma_modify_qp = &qed_rdma_modify_qp, |
2008 | .rdma_query_qp = &qed_rdma_query_qp, |
2009 | .rdma_destroy_qp = &qed_rdma_destroy_qp, |
2010 | .rdma_alloc_tid = &qed_rdma_alloc_tid, |
2011 | .rdma_free_tid = &qed_rdma_free_tid, |
2012 | .rdma_register_tid = &qed_rdma_register_tid, |
2013 | .rdma_deregister_tid = &qed_rdma_deregister_tid, |
2014 | .rdma_create_srq = &qed_rdma_create_srq, |
2015 | .rdma_modify_srq = &qed_rdma_modify_srq, |
2016 | .rdma_destroy_srq = &qed_rdma_destroy_srq, |
2017 | .ll2_acquire_connection = &qed_ll2_acquire_connection, |
2018 | .ll2_establish_connection = &qed_ll2_establish_connection, |
2019 | .ll2_terminate_connection = &qed_ll2_terminate_connection, |
2020 | .ll2_release_connection = &qed_ll2_release_connection, |
2021 | .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, |
2022 | .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, |
2023 | .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, |
2024 | .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, |
2025 | .ll2_get_stats = &qed_ll2_get_stats, |
2026 | .iwarp_set_engine_affin = &qed_iwarp_set_engine_affin, |
2027 | .iwarp_connect = &qed_iwarp_connect, |
2028 | .iwarp_create_listen = &qed_iwarp_create_listen, |
2029 | .iwarp_destroy_listen = &qed_iwarp_destroy_listen, |
2030 | .iwarp_accept = &qed_iwarp_accept, |
2031 | .iwarp_reject = &qed_iwarp_reject, |
2032 | .iwarp_send_rtr = &qed_iwarp_send_rtr, |
2033 | }; |
2034 | |
2035 | const struct qed_rdma_ops *qed_get_rdma_ops(void) |
2036 | { |
2037 | return &qed_rdma_ops_pass; |
2038 | } |
2039 | EXPORT_SYMBOL(qed_get_rdma_ops); |
2040 | |