1 | /* |
2 | * Broadcom NetXtreme-E RoCE driver. |
3 | * |
4 | * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term |
5 | * Broadcom refers to Broadcom Limited and/or its subsidiaries. |
6 | * |
7 | * This software is available to you under a choice of one of two |
8 | * licenses. You may choose to be licensed under the terms of the GNU |
9 | * General Public License (GPL) Version 2, available from the file |
10 | * COPYING in the main directory of this source tree, or the |
11 | * BSD license below: |
12 | * |
13 | * Redistribution and use in source and binary forms, with or without |
14 | * modification, are permitted provided that the following conditions |
15 | * are met: |
16 | * |
17 | * 1. Redistributions of source code must retain the above copyright |
18 | * notice, this list of conditions and the following disclaimer. |
19 | * 2. Redistributions in binary form must reproduce the above copyright |
20 | * notice, this list of conditions and the following disclaimer in |
21 | * the documentation and/or other materials provided with the |
22 | * distribution. |
23 | * |
24 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' |
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
26 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
27 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS |
28 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
31 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
32 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE |
33 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN |
34 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
35 | * |
36 | * Description: IB Verbs interpreter |
37 | */ |
38 | |
39 | #include <linux/interrupt.h> |
40 | #include <linux/types.h> |
41 | #include <linux/pci.h> |
42 | #include <linux/netdevice.h> |
43 | #include <linux/if_ether.h> |
44 | #include <net/addrconf.h> |
45 | |
46 | #include <rdma/ib_verbs.h> |
47 | #include <rdma/ib_user_verbs.h> |
48 | #include <rdma/ib_umem.h> |
49 | #include <rdma/ib_addr.h> |
50 | #include <rdma/ib_mad.h> |
51 | #include <rdma/ib_cache.h> |
52 | #include <rdma/uverbs_ioctl.h> |
53 | #include <linux/hashtable.h> |
54 | |
55 | #include "bnxt_ulp.h" |
56 | |
57 | #include "roce_hsi.h" |
58 | #include "qplib_res.h" |
59 | #include "qplib_sp.h" |
60 | #include "qplib_fp.h" |
61 | #include "qplib_rcfw.h" |
62 | |
63 | #include "bnxt_re.h" |
64 | #include "ib_verbs.h" |
65 | |
66 | #include <rdma/uverbs_types.h> |
67 | #include <rdma/uverbs_std_types.h> |
68 | |
69 | #include <rdma/ib_user_ioctl_cmds.h> |
70 | |
71 | #define UVERBS_MODULE_NAME bnxt_re |
72 | #include <rdma/uverbs_named_ioctl.h> |
73 | |
74 | #include <rdma/bnxt_re-abi.h> |
75 | |
76 | static int __from_ib_access_flags(int iflags) |
77 | { |
78 | int qflags = 0; |
79 | |
80 | if (iflags & IB_ACCESS_LOCAL_WRITE) |
81 | qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; |
82 | if (iflags & IB_ACCESS_REMOTE_READ) |
83 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; |
84 | if (iflags & IB_ACCESS_REMOTE_WRITE) |
85 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; |
86 | if (iflags & IB_ACCESS_REMOTE_ATOMIC) |
87 | qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; |
88 | if (iflags & IB_ACCESS_MW_BIND) |
89 | qflags |= BNXT_QPLIB_ACCESS_MW_BIND; |
90 | if (iflags & IB_ZERO_BASED) |
91 | qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; |
92 | if (iflags & IB_ACCESS_ON_DEMAND) |
93 | qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; |
94 | return qflags; |
95 | }; |
96 | |
97 | static enum ib_access_flags __to_ib_access_flags(int qflags) |
98 | { |
99 | enum ib_access_flags iflags = 0; |
100 | |
101 | if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) |
102 | iflags |= IB_ACCESS_LOCAL_WRITE; |
103 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) |
104 | iflags |= IB_ACCESS_REMOTE_WRITE; |
105 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) |
106 | iflags |= IB_ACCESS_REMOTE_READ; |
107 | if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) |
108 | iflags |= IB_ACCESS_REMOTE_ATOMIC; |
109 | if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) |
110 | iflags |= IB_ACCESS_MW_BIND; |
111 | if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) |
112 | iflags |= IB_ZERO_BASED; |
113 | if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) |
114 | iflags |= IB_ACCESS_ON_DEMAND; |
115 | return iflags; |
116 | }; |
117 | |
118 | static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, |
119 | struct bnxt_qplib_sge *sg_list, int num) |
120 | { |
121 | int i, total = 0; |
122 | |
123 | for (i = 0; i < num; i++) { |
124 | sg_list[i].addr = ib_sg_list[i].addr; |
125 | sg_list[i].lkey = ib_sg_list[i].lkey; |
126 | sg_list[i].size = ib_sg_list[i].length; |
127 | total += sg_list[i].size; |
128 | } |
129 | return total; |
130 | } |
131 | |
132 | /* Device */ |
133 | int bnxt_re_query_device(struct ib_device *ibdev, |
134 | struct ib_device_attr *ib_attr, |
135 | struct ib_udata *udata) |
136 | { |
137 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
138 | struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; |
139 | |
140 | memset(ib_attr, 0, sizeof(*ib_attr)); |
141 | memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, |
142 | min(sizeof(dev_attr->fw_ver), |
143 | sizeof(ib_attr->fw_ver))); |
144 | addrconf_addr_eui48(eui: (u8 *)&ib_attr->sys_image_guid, |
145 | addr: rdev->netdev->dev_addr); |
146 | ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE; |
147 | ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED; |
148 | |
149 | ib_attr->vendor_id = rdev->en_dev->pdev->vendor; |
150 | ib_attr->vendor_part_id = rdev->en_dev->pdev->device; |
151 | ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device; |
152 | ib_attr->max_qp = dev_attr->max_qp; |
153 | ib_attr->max_qp_wr = dev_attr->max_qp_wqes; |
154 | ib_attr->device_cap_flags = |
155 | IB_DEVICE_CURR_QP_STATE_MOD |
156 | | IB_DEVICE_RC_RNR_NAK_GEN |
157 | | IB_DEVICE_SHUTDOWN_PORT |
158 | | IB_DEVICE_SYS_IMAGE_GUID |
159 | | IB_DEVICE_RESIZE_MAX_WR |
160 | | IB_DEVICE_PORT_ACTIVE_EVENT |
161 | | IB_DEVICE_N_NOTIFY_CQ |
162 | | IB_DEVICE_MEM_WINDOW |
163 | | IB_DEVICE_MEM_WINDOW_TYPE_2B |
164 | | IB_DEVICE_MEM_MGT_EXTENSIONS; |
165 | ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; |
166 | ib_attr->max_send_sge = dev_attr->max_qp_sges; |
167 | ib_attr->max_recv_sge = dev_attr->max_qp_sges; |
168 | ib_attr->max_sge_rd = dev_attr->max_qp_sges; |
169 | ib_attr->max_cq = dev_attr->max_cq; |
170 | ib_attr->max_cqe = dev_attr->max_cq_wqes; |
171 | ib_attr->max_mr = dev_attr->max_mr; |
172 | ib_attr->max_pd = dev_attr->max_pd; |
173 | ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; |
174 | ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; |
175 | ib_attr->atomic_cap = IB_ATOMIC_NONE; |
176 | ib_attr->masked_atomic_cap = IB_ATOMIC_NONE; |
177 | if (dev_attr->is_atomic) { |
178 | ib_attr->atomic_cap = IB_ATOMIC_GLOB; |
179 | ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB; |
180 | } |
181 | |
182 | ib_attr->max_ee_rd_atom = 0; |
183 | ib_attr->max_res_rd_atom = 0; |
184 | ib_attr->max_ee_init_rd_atom = 0; |
185 | ib_attr->max_ee = 0; |
186 | ib_attr->max_rdd = 0; |
187 | ib_attr->max_mw = dev_attr->max_mw; |
188 | ib_attr->max_raw_ipv6_qp = 0; |
189 | ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp; |
190 | ib_attr->max_mcast_grp = 0; |
191 | ib_attr->max_mcast_qp_attach = 0; |
192 | ib_attr->max_total_mcast_qp_attach = 0; |
193 | ib_attr->max_ah = dev_attr->max_ah; |
194 | |
195 | ib_attr->max_srq = dev_attr->max_srq; |
196 | ib_attr->max_srq_wr = dev_attr->max_srq_wqes; |
197 | ib_attr->max_srq_sge = dev_attr->max_srq_sges; |
198 | |
199 | ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; |
200 | |
201 | ib_attr->max_pkeys = 1; |
202 | ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY; |
203 | return 0; |
204 | } |
205 | |
206 | /* Port */ |
207 | int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, |
208 | struct ib_port_attr *port_attr) |
209 | { |
210 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
211 | struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; |
212 | int rc; |
213 | |
214 | memset(port_attr, 0, sizeof(*port_attr)); |
215 | |
216 | if (netif_running(dev: rdev->netdev) && netif_carrier_ok(dev: rdev->netdev)) { |
217 | port_attr->state = IB_PORT_ACTIVE; |
218 | port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
219 | } else { |
220 | port_attr->state = IB_PORT_DOWN; |
221 | port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
222 | } |
223 | port_attr->max_mtu = IB_MTU_4096; |
224 | port_attr->active_mtu = iboe_get_mtu(mtu: rdev->netdev->mtu); |
225 | port_attr->gid_tbl_len = dev_attr->max_sgid; |
226 | port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | |
227 | IB_PORT_DEVICE_MGMT_SUP | |
228 | IB_PORT_VENDOR_CLASS_SUP; |
229 | port_attr->ip_gids = true; |
230 | |
231 | port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW; |
232 | port_attr->bad_pkey_cntr = 0; |
233 | port_attr->qkey_viol_cntr = 0; |
234 | port_attr->pkey_tbl_len = dev_attr->max_pkey; |
235 | port_attr->lid = 0; |
236 | port_attr->sm_lid = 0; |
237 | port_attr->lmc = 0; |
238 | port_attr->max_vl_num = 4; |
239 | port_attr->sm_sl = 0; |
240 | port_attr->subnet_timeout = 0; |
241 | port_attr->init_type_reply = 0; |
242 | rc = ib_get_eth_speed(dev: &rdev->ibdev, port_num, speed: &port_attr->active_speed, |
243 | width: &port_attr->active_width); |
244 | |
245 | return rc; |
246 | } |
247 | |
248 | int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num, |
249 | struct ib_port_immutable *immutable) |
250 | { |
251 | struct ib_port_attr port_attr; |
252 | |
253 | if (bnxt_re_query_port(ibdev, port_num, port_attr: &port_attr)) |
254 | return -EINVAL; |
255 | |
256 | immutable->pkey_tbl_len = port_attr.pkey_tbl_len; |
257 | immutable->gid_tbl_len = port_attr.gid_tbl_len; |
258 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; |
259 | immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; |
260 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; |
261 | return 0; |
262 | } |
263 | |
264 | void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str) |
265 | { |
266 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
267 | |
268 | snprintf(buf: str, IB_FW_VERSION_NAME_MAX, fmt: "%d.%d.%d.%d" , |
269 | rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1], |
270 | rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]); |
271 | } |
272 | |
273 | int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, |
274 | u16 index, u16 *pkey) |
275 | { |
276 | if (index > 0) |
277 | return -EINVAL; |
278 | |
279 | *pkey = IB_DEFAULT_PKEY_FULL; |
280 | |
281 | return 0; |
282 | } |
283 | |
284 | int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, |
285 | int index, union ib_gid *gid) |
286 | { |
287 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
288 | int rc; |
289 | |
290 | /* Ignore port_num */ |
291 | memset(gid, 0, sizeof(*gid)); |
292 | rc = bnxt_qplib_get_sgid(res: &rdev->qplib_res, |
293 | sgid_tbl: &rdev->qplib_res.sgid_tbl, index, |
294 | gid: (struct bnxt_qplib_gid *)gid); |
295 | return rc; |
296 | } |
297 | |
298 | int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) |
299 | { |
300 | int rc = 0; |
301 | struct bnxt_re_gid_ctx *ctx, **ctx_tbl; |
302 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); |
303 | struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; |
304 | struct bnxt_qplib_gid *gid_to_del; |
305 | u16 vlan_id = 0xFFFF; |
306 | |
307 | /* Delete the entry from the hardware */ |
308 | ctx = *context; |
309 | if (!ctx) |
310 | return -EINVAL; |
311 | |
312 | if (sgid_tbl && sgid_tbl->active) { |
313 | if (ctx->idx >= sgid_tbl->max) |
314 | return -EINVAL; |
315 | gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; |
316 | vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; |
317 | /* DEL_GID is called in WQ context(netdevice_event_work_handler) |
318 | * or via the ib_unregister_device path. In the former case QP1 |
319 | * may not be destroyed yet, in which case just return as FW |
320 | * needs that entry to be present and will fail it's deletion. |
321 | * We could get invoked again after QP1 is destroyed OR get an |
322 | * ADD_GID call with a different GID value for the same index |
323 | * where we issue MODIFY_GID cmd to update the GID entry -- TBD |
324 | */ |
325 | if (ctx->idx == 0 && |
326 | rdma_link_local_addr(addr: (struct in6_addr *)gid_to_del) && |
327 | ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) { |
328 | ibdev_dbg(&rdev->ibdev, |
329 | "Trying to delete GID0 while QP1 is alive\n" ); |
330 | return -EFAULT; |
331 | } |
332 | ctx->refcnt--; |
333 | if (!ctx->refcnt) { |
334 | rc = bnxt_qplib_del_sgid(sgid_tbl, gid: gid_to_del, |
335 | vlan_id, update: true); |
336 | if (rc) { |
337 | ibdev_err(ibdev: &rdev->ibdev, |
338 | format: "Failed to remove GID: %#x" , rc); |
339 | } else { |
340 | ctx_tbl = sgid_tbl->ctx; |
341 | ctx_tbl[ctx->idx] = NULL; |
342 | kfree(objp: ctx); |
343 | } |
344 | } |
345 | } else { |
346 | return -EINVAL; |
347 | } |
348 | return rc; |
349 | } |
350 | |
351 | int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context) |
352 | { |
353 | int rc; |
354 | u32 tbl_idx = 0; |
355 | u16 vlan_id = 0xFFFF; |
356 | struct bnxt_re_gid_ctx *ctx, **ctx_tbl; |
357 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); |
358 | struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; |
359 | |
360 | rc = rdma_read_gid_l2_fields(attr, vlan_id: &vlan_id, NULL); |
361 | if (rc) |
362 | return rc; |
363 | |
364 | rc = bnxt_qplib_add_sgid(sgid_tbl, gid: (struct bnxt_qplib_gid *)&attr->gid, |
365 | mac: rdev->qplib_res.netdev->dev_addr, |
366 | vlan_id, update: true, index: &tbl_idx); |
367 | if (rc == -EALREADY) { |
368 | ctx_tbl = sgid_tbl->ctx; |
369 | ctx_tbl[tbl_idx]->refcnt++; |
370 | *context = ctx_tbl[tbl_idx]; |
371 | return 0; |
372 | } |
373 | |
374 | if (rc < 0) { |
375 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to add GID: %#x" , rc); |
376 | return rc; |
377 | } |
378 | |
379 | ctx = kmalloc(size: sizeof(*ctx), GFP_KERNEL); |
380 | if (!ctx) |
381 | return -ENOMEM; |
382 | ctx_tbl = sgid_tbl->ctx; |
383 | ctx->idx = tbl_idx; |
384 | ctx->refcnt = 1; |
385 | ctx_tbl[tbl_idx] = ctx; |
386 | *context = ctx; |
387 | |
388 | return rc; |
389 | } |
390 | |
391 | enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, |
392 | u32 port_num) |
393 | { |
394 | return IB_LINK_LAYER_ETHERNET; |
395 | } |
396 | |
397 | #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) |
398 | |
399 | static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) |
400 | { |
401 | struct bnxt_re_fence_data *fence = &pd->fence; |
402 | struct ib_mr *ib_mr = &fence->mr->ib_mr; |
403 | struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; |
404 | struct bnxt_re_dev *rdev = pd->rdev; |
405 | |
406 | if (bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx)) |
407 | return; |
408 | |
409 | memset(wqe, 0, sizeof(*wqe)); |
410 | wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; |
411 | wqe->wr_id = BNXT_QPLIB_FENCE_WRID; |
412 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
413 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; |
414 | wqe->bind.zero_based = false; |
415 | wqe->bind.parent_l_key = ib_mr->lkey; |
416 | wqe->bind.va = (u64)(unsigned long)fence->va; |
417 | wqe->bind.length = fence->size; |
418 | wqe->bind.access_cntl = __from_ib_access_flags(iflags: IB_ACCESS_REMOTE_READ); |
419 | wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; |
420 | |
421 | /* Save the initial rkey in fence structure for now; |
422 | * wqe->bind.r_key will be set at (re)bind time. |
423 | */ |
424 | fence->bind_rkey = ib_inc_rkey(rkey: fence->mw->rkey); |
425 | } |
426 | |
427 | static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) |
428 | { |
429 | struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, |
430 | qplib_qp); |
431 | struct ib_pd *ib_pd = qp->ib_qp.pd; |
432 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
433 | struct bnxt_re_fence_data *fence = &pd->fence; |
434 | struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; |
435 | struct bnxt_qplib_swqe wqe; |
436 | int rc; |
437 | |
438 | memcpy(&wqe, fence_wqe, sizeof(wqe)); |
439 | wqe.bind.r_key = fence->bind_rkey; |
440 | fence->bind_rkey = ib_inc_rkey(rkey: fence->bind_rkey); |
441 | |
442 | ibdev_dbg(&qp->rdev->ibdev, |
443 | "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n" , |
444 | wqe.bind.r_key, qp->qplib_qp.id, pd); |
445 | rc = bnxt_qplib_post_send(qp: &qp->qplib_qp, wqe: &wqe); |
446 | if (rc) { |
447 | ibdev_err(ibdev: &qp->rdev->ibdev, format: "Failed to bind fence-WQE\n" ); |
448 | return rc; |
449 | } |
450 | bnxt_qplib_post_send_db(qp: &qp->qplib_qp); |
451 | |
452 | return rc; |
453 | } |
454 | |
455 | static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) |
456 | { |
457 | struct bnxt_re_fence_data *fence = &pd->fence; |
458 | struct bnxt_re_dev *rdev = pd->rdev; |
459 | struct device *dev = &rdev->en_dev->pdev->dev; |
460 | struct bnxt_re_mr *mr = fence->mr; |
461 | |
462 | if (bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx)) |
463 | return; |
464 | |
465 | if (fence->mw) { |
466 | bnxt_re_dealloc_mw(mw: fence->mw); |
467 | fence->mw = NULL; |
468 | } |
469 | if (mr) { |
470 | if (mr->ib_mr.rkey) |
471 | bnxt_qplib_dereg_mrw(res: &rdev->qplib_res, mrw: &mr->qplib_mr, |
472 | block: true); |
473 | if (mr->ib_mr.lkey) |
474 | bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mr->qplib_mr); |
475 | kfree(objp: mr); |
476 | fence->mr = NULL; |
477 | } |
478 | if (fence->dma_addr) { |
479 | dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, |
480 | DMA_BIDIRECTIONAL); |
481 | fence->dma_addr = 0; |
482 | } |
483 | } |
484 | |
485 | static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) |
486 | { |
487 | int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; |
488 | struct bnxt_re_fence_data *fence = &pd->fence; |
489 | struct bnxt_re_dev *rdev = pd->rdev; |
490 | struct device *dev = &rdev->en_dev->pdev->dev; |
491 | struct bnxt_re_mr *mr = NULL; |
492 | dma_addr_t dma_addr = 0; |
493 | struct ib_mw *mw; |
494 | int rc; |
495 | |
496 | if (bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx)) |
497 | return 0; |
498 | |
499 | dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, |
500 | DMA_BIDIRECTIONAL); |
501 | rc = dma_mapping_error(dev, dma_addr); |
502 | if (rc) { |
503 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to dma-map fence-MR-mem\n" ); |
504 | rc = -EIO; |
505 | fence->dma_addr = 0; |
506 | goto fail; |
507 | } |
508 | fence->dma_addr = dma_addr; |
509 | |
510 | /* Allocate a MR */ |
511 | mr = kzalloc(size: sizeof(*mr), GFP_KERNEL); |
512 | if (!mr) { |
513 | rc = -ENOMEM; |
514 | goto fail; |
515 | } |
516 | fence->mr = mr; |
517 | mr->rdev = rdev; |
518 | mr->qplib_mr.pd = &pd->qplib_pd; |
519 | mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; |
520 | mr->qplib_mr.flags = __from_ib_access_flags(iflags: mr_access_flags); |
521 | rc = bnxt_qplib_alloc_mrw(res: &rdev->qplib_res, mrw: &mr->qplib_mr); |
522 | if (rc) { |
523 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to alloc fence-HW-MR\n" ); |
524 | goto fail; |
525 | } |
526 | |
527 | /* Register MR */ |
528 | mr->ib_mr.lkey = mr->qplib_mr.lkey; |
529 | mr->qplib_mr.va = (u64)(unsigned long)fence->va; |
530 | mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; |
531 | rc = bnxt_qplib_reg_mr(res: &rdev->qplib_res, mr: &mr->qplib_mr, NULL, |
532 | BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE); |
533 | if (rc) { |
534 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to register fence-MR\n" ); |
535 | goto fail; |
536 | } |
537 | mr->ib_mr.rkey = mr->qplib_mr.rkey; |
538 | |
539 | /* Create a fence MW only for kernel consumers */ |
540 | mw = bnxt_re_alloc_mw(ib_pd: &pd->ib_pd, type: IB_MW_TYPE_1, NULL); |
541 | if (IS_ERR(ptr: mw)) { |
542 | ibdev_err(ibdev: &rdev->ibdev, |
543 | format: "Failed to create fence-MW for PD: %p\n" , pd); |
544 | rc = PTR_ERR(ptr: mw); |
545 | goto fail; |
546 | } |
547 | fence->mw = mw; |
548 | |
549 | bnxt_re_create_fence_wqe(pd); |
550 | return 0; |
551 | |
552 | fail: |
553 | bnxt_re_destroy_fence_mr(pd); |
554 | return rc; |
555 | } |
556 | |
557 | static struct bnxt_re_user_mmap_entry* |
558 | bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset, |
559 | enum bnxt_re_mmap_flag mmap_flag, u64 *offset) |
560 | { |
561 | struct bnxt_re_user_mmap_entry *entry; |
562 | int ret; |
563 | |
564 | entry = kzalloc(size: sizeof(*entry), GFP_KERNEL); |
565 | if (!entry) |
566 | return NULL; |
567 | |
568 | entry->mem_offset = mem_offset; |
569 | entry->mmap_flag = mmap_flag; |
570 | entry->uctx = uctx; |
571 | |
572 | switch (mmap_flag) { |
573 | case BNXT_RE_MMAP_SH_PAGE: |
574 | ret = rdma_user_mmap_entry_insert_exact(ucontext: &uctx->ib_uctx, |
575 | entry: &entry->rdma_entry, PAGE_SIZE, pgoff: 0); |
576 | break; |
577 | case BNXT_RE_MMAP_UC_DB: |
578 | case BNXT_RE_MMAP_WC_DB: |
579 | case BNXT_RE_MMAP_DBR_BAR: |
580 | case BNXT_RE_MMAP_DBR_PAGE: |
581 | case BNXT_RE_MMAP_TOGGLE_PAGE: |
582 | ret = rdma_user_mmap_entry_insert(ucontext: &uctx->ib_uctx, |
583 | entry: &entry->rdma_entry, PAGE_SIZE); |
584 | break; |
585 | default: |
586 | ret = -EINVAL; |
587 | break; |
588 | } |
589 | |
590 | if (ret) { |
591 | kfree(objp: entry); |
592 | return NULL; |
593 | } |
594 | if (offset) |
595 | *offset = rdma_user_mmap_get_offset(entry: &entry->rdma_entry); |
596 | |
597 | return entry; |
598 | } |
599 | |
600 | /* Protection Domains */ |
601 | int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) |
602 | { |
603 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
604 | struct bnxt_re_dev *rdev = pd->rdev; |
605 | |
606 | if (udata) { |
607 | rdma_user_mmap_entry_remove(entry: pd->pd_db_mmap); |
608 | pd->pd_db_mmap = NULL; |
609 | } |
610 | |
611 | bnxt_re_destroy_fence_mr(pd); |
612 | |
613 | if (pd->qplib_pd.id) { |
614 | if (!bnxt_qplib_dealloc_pd(res: &rdev->qplib_res, |
615 | pd_tbl: &rdev->qplib_res.pd_tbl, |
616 | pd: &pd->qplib_pd)) |
617 | atomic_dec(v: &rdev->stats.res.pd_count); |
618 | } |
619 | return 0; |
620 | } |
621 | |
622 | int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
623 | { |
624 | struct ib_device *ibdev = ibpd->device; |
625 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
626 | struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context( |
627 | udata, struct bnxt_re_ucontext, ib_uctx); |
628 | struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); |
629 | struct bnxt_re_user_mmap_entry *entry = NULL; |
630 | u32 active_pds; |
631 | int rc = 0; |
632 | |
633 | pd->rdev = rdev; |
634 | if (bnxt_qplib_alloc_pd(res: &rdev->qplib_res, pd: &pd->qplib_pd)) { |
635 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to allocate HW PD" ); |
636 | rc = -ENOMEM; |
637 | goto fail; |
638 | } |
639 | |
640 | if (udata) { |
641 | struct bnxt_re_pd_resp resp = {}; |
642 | |
643 | if (!ucntx->dpi.dbr) { |
644 | /* Allocate DPI in alloc_pd to avoid failing of |
645 | * ibv_devinfo and family of application when DPIs |
646 | * are depleted. |
647 | */ |
648 | if (bnxt_qplib_alloc_dpi(res: &rdev->qplib_res, |
649 | dpi: &ucntx->dpi, app: ucntx, type: BNXT_QPLIB_DPI_TYPE_UC)) { |
650 | rc = -ENOMEM; |
651 | goto dbfail; |
652 | } |
653 | } |
654 | |
655 | resp.pdid = pd->qplib_pd.id; |
656 | /* Still allow mapping this DBR to the new user PD. */ |
657 | resp.dpi = ucntx->dpi.dpi; |
658 | |
659 | entry = bnxt_re_mmap_entry_insert(uctx: ucntx, mem_offset: (u64)ucntx->dpi.umdbr, |
660 | mmap_flag: BNXT_RE_MMAP_UC_DB, offset: &resp.dbr); |
661 | |
662 | if (!entry) { |
663 | rc = -ENOMEM; |
664 | goto dbfail; |
665 | } |
666 | |
667 | pd->pd_db_mmap = &entry->rdma_entry; |
668 | |
669 | rc = ib_copy_to_udata(udata, src: &resp, min(sizeof(resp), udata->outlen)); |
670 | if (rc) { |
671 | rdma_user_mmap_entry_remove(entry: pd->pd_db_mmap); |
672 | rc = -EFAULT; |
673 | goto dbfail; |
674 | } |
675 | } |
676 | |
677 | if (!udata) |
678 | if (bnxt_re_create_fence_mr(pd)) |
679 | ibdev_warn(ibdev: &rdev->ibdev, |
680 | format: "Failed to create Fence-MR\n" ); |
681 | active_pds = atomic_inc_return(v: &rdev->stats.res.pd_count); |
682 | if (active_pds > rdev->stats.res.pd_watermark) |
683 | rdev->stats.res.pd_watermark = active_pds; |
684 | |
685 | return 0; |
686 | dbfail: |
687 | bnxt_qplib_dealloc_pd(res: &rdev->qplib_res, pd_tbl: &rdev->qplib_res.pd_tbl, |
688 | pd: &pd->qplib_pd); |
689 | fail: |
690 | return rc; |
691 | } |
692 | |
693 | /* Address Handles */ |
694 | int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) |
695 | { |
696 | struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); |
697 | struct bnxt_re_dev *rdev = ah->rdev; |
698 | bool block = true; |
699 | int rc; |
700 | |
701 | block = !(flags & RDMA_DESTROY_AH_SLEEPABLE); |
702 | rc = bnxt_qplib_destroy_ah(res: &rdev->qplib_res, ah: &ah->qplib_ah, block); |
703 | if (BNXT_RE_CHECK_RC(rc)) { |
704 | if (rc == -ETIMEDOUT) |
705 | rc = 0; |
706 | else |
707 | goto fail; |
708 | } |
709 | atomic_dec(v: &rdev->stats.res.ah_count); |
710 | fail: |
711 | return rc; |
712 | } |
713 | |
714 | static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype) |
715 | { |
716 | u8 nw_type; |
717 | |
718 | switch (ntype) { |
719 | case RDMA_NETWORK_IPV4: |
720 | nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4; |
721 | break; |
722 | case RDMA_NETWORK_IPV6: |
723 | nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6; |
724 | break; |
725 | default: |
726 | nw_type = CMDQ_CREATE_AH_TYPE_V1; |
727 | break; |
728 | } |
729 | return nw_type; |
730 | } |
731 | |
732 | int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr, |
733 | struct ib_udata *udata) |
734 | { |
735 | struct ib_pd *ib_pd = ib_ah->pd; |
736 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
737 | struct rdma_ah_attr *ah_attr = init_attr->ah_attr; |
738 | const struct ib_global_route *grh = rdma_ah_read_grh(attr: ah_attr); |
739 | struct bnxt_re_dev *rdev = pd->rdev; |
740 | const struct ib_gid_attr *sgid_attr; |
741 | struct bnxt_re_gid_ctx *ctx; |
742 | struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); |
743 | u32 active_ahs; |
744 | u8 nw_type; |
745 | int rc; |
746 | |
747 | if (!(rdma_ah_get_ah_flags(attr: ah_attr) & IB_AH_GRH)) { |
748 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to alloc AH: GRH not set" ); |
749 | return -EINVAL; |
750 | } |
751 | |
752 | ah->rdev = rdev; |
753 | ah->qplib_ah.pd = &pd->qplib_pd; |
754 | |
755 | /* Supply the configuration for the HW */ |
756 | memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw, |
757 | sizeof(union ib_gid)); |
758 | sgid_attr = grh->sgid_attr; |
759 | /* Get the HW context of the GID. The reference |
760 | * of GID table entry is already taken by the caller. |
761 | */ |
762 | ctx = rdma_read_gid_hw_context(attr: sgid_attr); |
763 | ah->qplib_ah.sgid_index = ctx->idx; |
764 | ah->qplib_ah.host_sgid_index = grh->sgid_index; |
765 | ah->qplib_ah.traffic_class = grh->traffic_class; |
766 | ah->qplib_ah.flow_label = grh->flow_label; |
767 | ah->qplib_ah.hop_limit = grh->hop_limit; |
768 | ah->qplib_ah.sl = rdma_ah_get_sl(attr: ah_attr); |
769 | |
770 | /* Get network header type for this GID */ |
771 | nw_type = rdma_gid_attr_network_type(attr: sgid_attr); |
772 | ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(ntype: nw_type); |
773 | |
774 | memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN); |
775 | rc = bnxt_qplib_create_ah(res: &rdev->qplib_res, ah: &ah->qplib_ah, |
776 | block: !(init_attr->flags & |
777 | RDMA_CREATE_AH_SLEEPABLE)); |
778 | if (rc) { |
779 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to allocate HW AH" ); |
780 | return rc; |
781 | } |
782 | |
783 | /* Write AVID to shared page. */ |
784 | if (udata) { |
785 | struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( |
786 | udata, struct bnxt_re_ucontext, ib_uctx); |
787 | unsigned long flag; |
788 | u32 *wrptr; |
789 | |
790 | spin_lock_irqsave(&uctx->sh_lock, flag); |
791 | wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT); |
792 | *wrptr = ah->qplib_ah.id; |
793 | wmb(); /* make sure cache is updated. */ |
794 | spin_unlock_irqrestore(lock: &uctx->sh_lock, flags: flag); |
795 | } |
796 | active_ahs = atomic_inc_return(v: &rdev->stats.res.ah_count); |
797 | if (active_ahs > rdev->stats.res.ah_watermark) |
798 | rdev->stats.res.ah_watermark = active_ahs; |
799 | |
800 | return 0; |
801 | } |
802 | |
803 | int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) |
804 | { |
805 | struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); |
806 | |
807 | ah_attr->type = ib_ah->type; |
808 | rdma_ah_set_sl(attr: ah_attr, sl: ah->qplib_ah.sl); |
809 | memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN); |
810 | rdma_ah_set_grh(attr: ah_attr, NULL, flow_label: 0, |
811 | sgid_index: ah->qplib_ah.host_sgid_index, |
812 | hop_limit: 0, traffic_class: ah->qplib_ah.traffic_class); |
813 | rdma_ah_set_dgid_raw(attr: ah_attr, dgid: ah->qplib_ah.dgid.data); |
814 | rdma_ah_set_port_num(attr: ah_attr, port_num: 1); |
815 | rdma_ah_set_static_rate(attr: ah_attr, static_rate: 0); |
816 | return 0; |
817 | } |
818 | |
819 | unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) |
820 | __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) |
821 | { |
822 | unsigned long flags; |
823 | |
824 | spin_lock_irqsave(&qp->scq->cq_lock, flags); |
825 | if (qp->rcq != qp->scq) |
826 | spin_lock(lock: &qp->rcq->cq_lock); |
827 | else |
828 | __acquire(&qp->rcq->cq_lock); |
829 | |
830 | return flags; |
831 | } |
832 | |
833 | void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, |
834 | unsigned long flags) |
835 | __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) |
836 | { |
837 | if (qp->rcq != qp->scq) |
838 | spin_unlock(lock: &qp->rcq->cq_lock); |
839 | else |
840 | __release(&qp->rcq->cq_lock); |
841 | spin_unlock_irqrestore(lock: &qp->scq->cq_lock, flags); |
842 | } |
843 | |
844 | static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) |
845 | { |
846 | struct bnxt_re_qp *gsi_sqp; |
847 | struct bnxt_re_ah *gsi_sah; |
848 | struct bnxt_re_dev *rdev; |
849 | int rc; |
850 | |
851 | rdev = qp->rdev; |
852 | gsi_sqp = rdev->gsi_ctx.gsi_sqp; |
853 | gsi_sah = rdev->gsi_ctx.gsi_sah; |
854 | |
855 | ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n" ); |
856 | bnxt_qplib_destroy_ah(res: &rdev->qplib_res, |
857 | ah: &gsi_sah->qplib_ah, |
858 | block: true); |
859 | atomic_dec(v: &rdev->stats.res.ah_count); |
860 | bnxt_qplib_clean_qp(qp: &qp->qplib_qp); |
861 | |
862 | ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n" ); |
863 | rc = bnxt_qplib_destroy_qp(res: &rdev->qplib_res, qp: &gsi_sqp->qplib_qp); |
864 | if (rc) { |
865 | ibdev_err(ibdev: &rdev->ibdev, format: "Destroy Shadow QP failed" ); |
866 | goto fail; |
867 | } |
868 | bnxt_qplib_free_qp_res(res: &rdev->qplib_res, qp: &gsi_sqp->qplib_qp); |
869 | |
870 | /* remove from active qp list */ |
871 | mutex_lock(&rdev->qp_lock); |
872 | list_del(entry: &gsi_sqp->list); |
873 | mutex_unlock(lock: &rdev->qp_lock); |
874 | atomic_dec(v: &rdev->stats.res.qp_count); |
875 | |
876 | kfree(objp: rdev->gsi_ctx.sqp_tbl); |
877 | kfree(objp: gsi_sah); |
878 | kfree(objp: gsi_sqp); |
879 | rdev->gsi_ctx.gsi_sqp = NULL; |
880 | rdev->gsi_ctx.gsi_sah = NULL; |
881 | rdev->gsi_ctx.sqp_tbl = NULL; |
882 | |
883 | return 0; |
884 | fail: |
885 | return rc; |
886 | } |
887 | |
888 | /* Queue Pairs */ |
889 | int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) |
890 | { |
891 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
892 | struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp; |
893 | struct bnxt_re_dev *rdev = qp->rdev; |
894 | struct bnxt_qplib_nq *scq_nq = NULL; |
895 | struct bnxt_qplib_nq *rcq_nq = NULL; |
896 | unsigned int flags; |
897 | int rc; |
898 | |
899 | bnxt_qplib_flush_cqn_wq(qp: &qp->qplib_qp); |
900 | |
901 | rc = bnxt_qplib_destroy_qp(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
902 | if (rc) { |
903 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to destroy HW QP" ); |
904 | return rc; |
905 | } |
906 | |
907 | if (rdma_is_kernel_res(res: &qp->ib_qp.res)) { |
908 | flags = bnxt_re_lock_cqs(qp); |
909 | bnxt_qplib_clean_qp(qp: &qp->qplib_qp); |
910 | bnxt_re_unlock_cqs(qp, flags); |
911 | } |
912 | |
913 | bnxt_qplib_free_qp_res(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
914 | |
915 | if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) { |
916 | rc = bnxt_re_destroy_gsi_sqp(qp); |
917 | if (rc) |
918 | return rc; |
919 | } |
920 | |
921 | mutex_lock(&rdev->qp_lock); |
922 | list_del(entry: &qp->list); |
923 | mutex_unlock(lock: &rdev->qp_lock); |
924 | atomic_dec(v: &rdev->stats.res.qp_count); |
925 | if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC) |
926 | atomic_dec(v: &rdev->stats.res.rc_qp_count); |
927 | else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD) |
928 | atomic_dec(v: &rdev->stats.res.ud_qp_count); |
929 | |
930 | ib_umem_release(umem: qp->rumem); |
931 | ib_umem_release(umem: qp->sumem); |
932 | |
933 | /* Flush all the entries of notification queue associated with |
934 | * given qp. |
935 | */ |
936 | scq_nq = qplib_qp->scq->nq; |
937 | rcq_nq = qplib_qp->rcq->nq; |
938 | bnxt_re_synchronize_nq(nq: scq_nq); |
939 | if (scq_nq != rcq_nq) |
940 | bnxt_re_synchronize_nq(nq: rcq_nq); |
941 | |
942 | return 0; |
943 | } |
944 | |
945 | static u8 __from_ib_qp_type(enum ib_qp_type type) |
946 | { |
947 | switch (type) { |
948 | case IB_QPT_GSI: |
949 | return CMDQ_CREATE_QP1_TYPE_GSI; |
950 | case IB_QPT_RC: |
951 | return CMDQ_CREATE_QP_TYPE_RC; |
952 | case IB_QPT_UD: |
953 | return CMDQ_CREATE_QP_TYPE_UD; |
954 | default: |
955 | return IB_QPT_MAX; |
956 | } |
957 | } |
958 | |
959 | static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp, |
960 | int rsge, int max) |
961 | { |
962 | if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) |
963 | rsge = max; |
964 | return bnxt_re_get_rwqe_size(nsge: rsge); |
965 | } |
966 | |
967 | static u16 bnxt_re_get_wqe_size(int ilsize, int nsge) |
968 | { |
969 | u16 wqe_size, calc_ils; |
970 | |
971 | wqe_size = bnxt_re_get_swqe_size(nsge); |
972 | if (ilsize) { |
973 | calc_ils = sizeof(struct sq_send_hdr) + ilsize; |
974 | wqe_size = max_t(u16, calc_ils, wqe_size); |
975 | wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr)); |
976 | } |
977 | return wqe_size; |
978 | } |
979 | |
980 | static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp, |
981 | struct ib_qp_init_attr *init_attr) |
982 | { |
983 | struct bnxt_qplib_dev_attr *dev_attr; |
984 | struct bnxt_qplib_qp *qplqp; |
985 | struct bnxt_re_dev *rdev; |
986 | struct bnxt_qplib_q *sq; |
987 | int align, ilsize; |
988 | |
989 | rdev = qp->rdev; |
990 | qplqp = &qp->qplib_qp; |
991 | sq = &qplqp->sq; |
992 | dev_attr = &rdev->dev_attr; |
993 | |
994 | align = sizeof(struct sq_send_hdr); |
995 | ilsize = ALIGN(init_attr->cap.max_inline_data, align); |
996 | |
997 | sq->wqe_size = bnxt_re_get_wqe_size(ilsize, nsge: sq->max_sge); |
998 | if (sq->wqe_size > bnxt_re_get_swqe_size(nsge: dev_attr->max_qp_sges)) |
999 | return -EINVAL; |
1000 | /* For gen p4 and gen p5 backward compatibility mode |
1001 | * wqe size is fixed to 128 bytes |
1002 | */ |
1003 | if (sq->wqe_size < bnxt_re_get_swqe_size(nsge: dev_attr->max_qp_sges) && |
1004 | qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) |
1005 | sq->wqe_size = bnxt_re_get_swqe_size(nsge: dev_attr->max_qp_sges); |
1006 | |
1007 | if (init_attr->cap.max_inline_data) { |
1008 | qplqp->max_inline_data = sq->wqe_size - |
1009 | sizeof(struct sq_send_hdr); |
1010 | init_attr->cap.max_inline_data = qplqp->max_inline_data; |
1011 | if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) |
1012 | sq->max_sge = qplqp->max_inline_data / |
1013 | sizeof(struct sq_sge); |
1014 | } |
1015 | |
1016 | return 0; |
1017 | } |
1018 | |
1019 | static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, |
1020 | struct bnxt_re_qp *qp, struct ib_udata *udata) |
1021 | { |
1022 | struct bnxt_qplib_qp *qplib_qp; |
1023 | struct bnxt_re_ucontext *cntx; |
1024 | struct bnxt_re_qp_req ureq; |
1025 | int bytes = 0, psn_sz; |
1026 | struct ib_umem *umem; |
1027 | int psn_nume; |
1028 | |
1029 | qplib_qp = &qp->qplib_qp; |
1030 | cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, |
1031 | ib_uctx); |
1032 | if (ib_copy_from_udata(dest: &ureq, udata, len: sizeof(ureq))) |
1033 | return -EFAULT; |
1034 | |
1035 | bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size); |
1036 | /* Consider mapping PSN search memory only for RC QPs. */ |
1037 | if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) { |
1038 | psn_sz = bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx) ? |
1039 | sizeof(struct sq_psn_search_ext) : |
1040 | sizeof(struct sq_psn_search); |
1041 | psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? |
1042 | qplib_qp->sq.max_wqe : |
1043 | ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) / |
1044 | sizeof(struct bnxt_qplib_sge)); |
1045 | bytes += (psn_nume * psn_sz); |
1046 | } |
1047 | |
1048 | bytes = PAGE_ALIGN(bytes); |
1049 | umem = ib_umem_get(device: &rdev->ibdev, addr: ureq.qpsva, size: bytes, |
1050 | access: IB_ACCESS_LOCAL_WRITE); |
1051 | if (IS_ERR(ptr: umem)) |
1052 | return PTR_ERR(ptr: umem); |
1053 | |
1054 | qp->sumem = umem; |
1055 | qplib_qp->sq.sg_info.umem = umem; |
1056 | qplib_qp->sq.sg_info.pgsize = PAGE_SIZE; |
1057 | qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT; |
1058 | qplib_qp->qp_handle = ureq.qp_handle; |
1059 | |
1060 | if (!qp->qplib_qp.srq) { |
1061 | bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size); |
1062 | bytes = PAGE_ALIGN(bytes); |
1063 | umem = ib_umem_get(device: &rdev->ibdev, addr: ureq.qprva, size: bytes, |
1064 | access: IB_ACCESS_LOCAL_WRITE); |
1065 | if (IS_ERR(ptr: umem)) |
1066 | goto rqfail; |
1067 | qp->rumem = umem; |
1068 | qplib_qp->rq.sg_info.umem = umem; |
1069 | qplib_qp->rq.sg_info.pgsize = PAGE_SIZE; |
1070 | qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT; |
1071 | } |
1072 | |
1073 | qplib_qp->dpi = &cntx->dpi; |
1074 | return 0; |
1075 | rqfail: |
1076 | ib_umem_release(umem: qp->sumem); |
1077 | qp->sumem = NULL; |
1078 | memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info)); |
1079 | |
1080 | return PTR_ERR(ptr: umem); |
1081 | } |
1082 | |
1083 | static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah |
1084 | (struct bnxt_re_pd *pd, |
1085 | struct bnxt_qplib_res *qp1_res, |
1086 | struct bnxt_qplib_qp *qp1_qp) |
1087 | { |
1088 | struct bnxt_re_dev *rdev = pd->rdev; |
1089 | struct bnxt_re_ah *ah; |
1090 | union ib_gid sgid; |
1091 | int rc; |
1092 | |
1093 | ah = kzalloc(size: sizeof(*ah), GFP_KERNEL); |
1094 | if (!ah) |
1095 | return NULL; |
1096 | |
1097 | ah->rdev = rdev; |
1098 | ah->qplib_ah.pd = &pd->qplib_pd; |
1099 | |
1100 | rc = bnxt_re_query_gid(ibdev: &rdev->ibdev, port_num: 1, index: 0, gid: &sgid); |
1101 | if (rc) |
1102 | goto fail; |
1103 | |
1104 | /* supply the dgid data same as sgid */ |
1105 | memcpy(ah->qplib_ah.dgid.data, &sgid.raw, |
1106 | sizeof(union ib_gid)); |
1107 | ah->qplib_ah.sgid_index = 0; |
1108 | |
1109 | ah->qplib_ah.traffic_class = 0; |
1110 | ah->qplib_ah.flow_label = 0; |
1111 | ah->qplib_ah.hop_limit = 1; |
1112 | ah->qplib_ah.sl = 0; |
1113 | /* Have DMAC same as SMAC */ |
1114 | ether_addr_copy(dst: ah->qplib_ah.dmac, src: rdev->netdev->dev_addr); |
1115 | |
1116 | rc = bnxt_qplib_create_ah(res: &rdev->qplib_res, ah: &ah->qplib_ah, block: false); |
1117 | if (rc) { |
1118 | ibdev_err(ibdev: &rdev->ibdev, |
1119 | format: "Failed to allocate HW AH for Shadow QP" ); |
1120 | goto fail; |
1121 | } |
1122 | atomic_inc(v: &rdev->stats.res.ah_count); |
1123 | |
1124 | return ah; |
1125 | |
1126 | fail: |
1127 | kfree(objp: ah); |
1128 | return NULL; |
1129 | } |
1130 | |
1131 | static struct bnxt_re_qp *bnxt_re_create_shadow_qp |
1132 | (struct bnxt_re_pd *pd, |
1133 | struct bnxt_qplib_res *qp1_res, |
1134 | struct bnxt_qplib_qp *qp1_qp) |
1135 | { |
1136 | struct bnxt_re_dev *rdev = pd->rdev; |
1137 | struct bnxt_re_qp *qp; |
1138 | int rc; |
1139 | |
1140 | qp = kzalloc(size: sizeof(*qp), GFP_KERNEL); |
1141 | if (!qp) |
1142 | return NULL; |
1143 | |
1144 | qp->rdev = rdev; |
1145 | |
1146 | /* Initialize the shadow QP structure from the QP1 values */ |
1147 | ether_addr_copy(dst: qp->qplib_qp.smac, src: rdev->netdev->dev_addr); |
1148 | |
1149 | qp->qplib_qp.pd = &pd->qplib_pd; |
1150 | qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp); |
1151 | qp->qplib_qp.type = IB_QPT_UD; |
1152 | |
1153 | qp->qplib_qp.max_inline_data = 0; |
1154 | qp->qplib_qp.sig_type = true; |
1155 | |
1156 | /* Shadow QP SQ depth should be same as QP1 RQ depth */ |
1157 | qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(ilsize: 0, nsge: 6); |
1158 | qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; |
1159 | qp->qplib_qp.sq.max_sge = 2; |
1160 | /* Q full delta can be 1 since it is internal QP */ |
1161 | qp->qplib_qp.sq.q_full_delta = 1; |
1162 | qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE; |
1163 | qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT; |
1164 | |
1165 | qp->qplib_qp.scq = qp1_qp->scq; |
1166 | qp->qplib_qp.rcq = qp1_qp->rcq; |
1167 | |
1168 | qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(nsge: 6); |
1169 | qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; |
1170 | qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; |
1171 | /* Q full delta can be 1 since it is internal QP */ |
1172 | qp->qplib_qp.rq.q_full_delta = 1; |
1173 | qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE; |
1174 | qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT; |
1175 | |
1176 | qp->qplib_qp.mtu = qp1_qp->mtu; |
1177 | |
1178 | qp->qplib_qp.sq_hdr_buf_size = 0; |
1179 | qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; |
1180 | qp->qplib_qp.dpi = &rdev->dpi_privileged; |
1181 | |
1182 | rc = bnxt_qplib_create_qp(res: qp1_res, qp: &qp->qplib_qp); |
1183 | if (rc) |
1184 | goto fail; |
1185 | |
1186 | spin_lock_init(&qp->sq_lock); |
1187 | INIT_LIST_HEAD(list: &qp->list); |
1188 | mutex_lock(&rdev->qp_lock); |
1189 | list_add_tail(new: &qp->list, head: &rdev->qp_list); |
1190 | atomic_inc(v: &rdev->stats.res.qp_count); |
1191 | mutex_unlock(lock: &rdev->qp_lock); |
1192 | return qp; |
1193 | fail: |
1194 | kfree(objp: qp); |
1195 | return NULL; |
1196 | } |
1197 | |
1198 | static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, |
1199 | struct ib_qp_init_attr *init_attr, |
1200 | struct bnxt_re_ucontext *uctx) |
1201 | { |
1202 | struct bnxt_qplib_dev_attr *dev_attr; |
1203 | struct bnxt_qplib_qp *qplqp; |
1204 | struct bnxt_re_dev *rdev; |
1205 | struct bnxt_qplib_q *rq; |
1206 | int entries; |
1207 | |
1208 | rdev = qp->rdev; |
1209 | qplqp = &qp->qplib_qp; |
1210 | rq = &qplqp->rq; |
1211 | dev_attr = &rdev->dev_attr; |
1212 | |
1213 | if (init_attr->srq) { |
1214 | struct bnxt_re_srq *srq; |
1215 | |
1216 | srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq); |
1217 | qplqp->srq = &srq->qplib_srq; |
1218 | rq->max_wqe = 0; |
1219 | } else { |
1220 | rq->max_sge = init_attr->cap.max_recv_sge; |
1221 | if (rq->max_sge > dev_attr->max_qp_sges) |
1222 | rq->max_sge = dev_attr->max_qp_sges; |
1223 | init_attr->cap.max_recv_sge = rq->max_sge; |
1224 | rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rsge: rq->max_sge, |
1225 | max: dev_attr->max_qp_sges); |
1226 | /* Allocate 1 more than what's provided so posting max doesn't |
1227 | * mean empty. |
1228 | */ |
1229 | entries = bnxt_re_init_depth(ent: init_attr->cap.max_recv_wr + 1, uctx); |
1230 | rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); |
1231 | rq->q_full_delta = 0; |
1232 | rq->sg_info.pgsize = PAGE_SIZE; |
1233 | rq->sg_info.pgshft = PAGE_SHIFT; |
1234 | } |
1235 | |
1236 | return 0; |
1237 | } |
1238 | |
1239 | static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) |
1240 | { |
1241 | struct bnxt_qplib_dev_attr *dev_attr; |
1242 | struct bnxt_qplib_qp *qplqp; |
1243 | struct bnxt_re_dev *rdev; |
1244 | |
1245 | rdev = qp->rdev; |
1246 | qplqp = &qp->qplib_qp; |
1247 | dev_attr = &rdev->dev_attr; |
1248 | |
1249 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx)) { |
1250 | qplqp->rq.max_sge = dev_attr->max_qp_sges; |
1251 | if (qplqp->rq.max_sge > dev_attr->max_qp_sges) |
1252 | qplqp->rq.max_sge = dev_attr->max_qp_sges; |
1253 | qplqp->rq.max_sge = 6; |
1254 | } |
1255 | } |
1256 | |
1257 | static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, |
1258 | struct ib_qp_init_attr *init_attr, |
1259 | struct bnxt_re_ucontext *uctx) |
1260 | { |
1261 | struct bnxt_qplib_dev_attr *dev_attr; |
1262 | struct bnxt_qplib_qp *qplqp; |
1263 | struct bnxt_re_dev *rdev; |
1264 | struct bnxt_qplib_q *sq; |
1265 | int entries; |
1266 | int diff; |
1267 | int rc; |
1268 | |
1269 | rdev = qp->rdev; |
1270 | qplqp = &qp->qplib_qp; |
1271 | sq = &qplqp->sq; |
1272 | dev_attr = &rdev->dev_attr; |
1273 | |
1274 | sq->max_sge = init_attr->cap.max_send_sge; |
1275 | if (sq->max_sge > dev_attr->max_qp_sges) { |
1276 | sq->max_sge = dev_attr->max_qp_sges; |
1277 | init_attr->cap.max_send_sge = sq->max_sge; |
1278 | } |
1279 | |
1280 | rc = bnxt_re_setup_swqe_size(qp, init_attr); |
1281 | if (rc) |
1282 | return rc; |
1283 | |
1284 | entries = init_attr->cap.max_send_wr; |
1285 | /* Allocate 128 + 1 more than what's provided */ |
1286 | diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ? |
1287 | 0 : BNXT_QPLIB_RESERVED_QP_WRS; |
1288 | entries = bnxt_re_init_depth(ent: entries + diff + 1, uctx); |
1289 | sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1); |
1290 | sq->q_full_delta = diff + 1; |
1291 | /* |
1292 | * Reserving one slot for Phantom WQE. Application can |
1293 | * post one extra entry in this case. But allowing this to avoid |
1294 | * unexpected Queue full condition |
1295 | */ |
1296 | qplqp->sq.q_full_delta -= 1; |
1297 | qplqp->sq.sg_info.pgsize = PAGE_SIZE; |
1298 | qplqp->sq.sg_info.pgshft = PAGE_SHIFT; |
1299 | |
1300 | return 0; |
1301 | } |
1302 | |
1303 | static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, |
1304 | struct ib_qp_init_attr *init_attr, |
1305 | struct bnxt_re_ucontext *uctx) |
1306 | { |
1307 | struct bnxt_qplib_dev_attr *dev_attr; |
1308 | struct bnxt_qplib_qp *qplqp; |
1309 | struct bnxt_re_dev *rdev; |
1310 | int entries; |
1311 | |
1312 | rdev = qp->rdev; |
1313 | qplqp = &qp->qplib_qp; |
1314 | dev_attr = &rdev->dev_attr; |
1315 | |
1316 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx)) { |
1317 | entries = bnxt_re_init_depth(ent: init_attr->cap.max_send_wr + 1, uctx); |
1318 | qplqp->sq.max_wqe = min_t(u32, entries, |
1319 | dev_attr->max_qp_wqes + 1); |
1320 | qplqp->sq.q_full_delta = qplqp->sq.max_wqe - |
1321 | init_attr->cap.max_send_wr; |
1322 | qplqp->sq.max_sge++; /* Need one extra sge to put UD header */ |
1323 | if (qplqp->sq.max_sge > dev_attr->max_qp_sges) |
1324 | qplqp->sq.max_sge = dev_attr->max_qp_sges; |
1325 | } |
1326 | } |
1327 | |
1328 | static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, |
1329 | struct ib_qp_init_attr *init_attr) |
1330 | { |
1331 | struct bnxt_qplib_chip_ctx *chip_ctx; |
1332 | int qptype; |
1333 | |
1334 | chip_ctx = rdev->chip_ctx; |
1335 | |
1336 | qptype = __from_ib_qp_type(type: init_attr->qp_type); |
1337 | if (qptype == IB_QPT_MAX) { |
1338 | ibdev_err(ibdev: &rdev->ibdev, format: "QP type 0x%x not supported" , qptype); |
1339 | qptype = -EOPNOTSUPP; |
1340 | goto out; |
1341 | } |
1342 | |
1343 | if (bnxt_qplib_is_chip_gen_p5_p7(cctx: chip_ctx) && |
1344 | init_attr->qp_type == IB_QPT_GSI) |
1345 | qptype = CMDQ_CREATE_QP_TYPE_GSI; |
1346 | out: |
1347 | return qptype; |
1348 | } |
1349 | |
1350 | static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, |
1351 | struct ib_qp_init_attr *init_attr, |
1352 | struct ib_udata *udata) |
1353 | { |
1354 | struct bnxt_qplib_dev_attr *dev_attr; |
1355 | struct bnxt_re_ucontext *uctx; |
1356 | struct bnxt_qplib_qp *qplqp; |
1357 | struct bnxt_re_dev *rdev; |
1358 | struct bnxt_re_cq *cq; |
1359 | int rc = 0, qptype; |
1360 | |
1361 | rdev = qp->rdev; |
1362 | qplqp = &qp->qplib_qp; |
1363 | dev_attr = &rdev->dev_attr; |
1364 | |
1365 | uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); |
1366 | /* Setup misc params */ |
1367 | ether_addr_copy(dst: qplqp->smac, src: rdev->netdev->dev_addr); |
1368 | qplqp->pd = &pd->qplib_pd; |
1369 | qplqp->qp_handle = (u64)qplqp; |
1370 | qplqp->max_inline_data = init_attr->cap.max_inline_data; |
1371 | qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; |
1372 | qptype = bnxt_re_init_qp_type(rdev, init_attr); |
1373 | if (qptype < 0) { |
1374 | rc = qptype; |
1375 | goto out; |
1376 | } |
1377 | qplqp->type = (u8)qptype; |
1378 | qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode; |
1379 | |
1380 | if (init_attr->qp_type == IB_QPT_RC) { |
1381 | qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom; |
1382 | qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; |
1383 | } |
1384 | qplqp->mtu = ib_mtu_enum_to_int(mtu: iboe_get_mtu(mtu: rdev->netdev->mtu)); |
1385 | qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */ |
1386 | if (init_attr->create_flags) { |
1387 | ibdev_dbg(&rdev->ibdev, |
1388 | "QP create flags 0x%x not supported" , |
1389 | init_attr->create_flags); |
1390 | return -EOPNOTSUPP; |
1391 | } |
1392 | |
1393 | /* Setup CQs */ |
1394 | if (init_attr->send_cq) { |
1395 | cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq); |
1396 | qplqp->scq = &cq->qplib_cq; |
1397 | qp->scq = cq; |
1398 | } |
1399 | |
1400 | if (init_attr->recv_cq) { |
1401 | cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq); |
1402 | qplqp->rcq = &cq->qplib_cq; |
1403 | qp->rcq = cq; |
1404 | } |
1405 | |
1406 | /* Setup RQ/SRQ */ |
1407 | rc = bnxt_re_init_rq_attr(qp, init_attr, uctx); |
1408 | if (rc) |
1409 | goto out; |
1410 | if (init_attr->qp_type == IB_QPT_GSI) |
1411 | bnxt_re_adjust_gsi_rq_attr(qp); |
1412 | |
1413 | /* Setup SQ */ |
1414 | rc = bnxt_re_init_sq_attr(qp, init_attr, uctx); |
1415 | if (rc) |
1416 | goto out; |
1417 | if (init_attr->qp_type == IB_QPT_GSI) |
1418 | bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx); |
1419 | |
1420 | if (udata) /* This will update DPI and qp_handle */ |
1421 | rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); |
1422 | out: |
1423 | return rc; |
1424 | } |
1425 | |
1426 | static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, |
1427 | struct bnxt_re_pd *pd) |
1428 | { |
1429 | struct bnxt_re_sqp_entries *sqp_tbl; |
1430 | struct bnxt_re_dev *rdev; |
1431 | struct bnxt_re_qp *sqp; |
1432 | struct bnxt_re_ah *sah; |
1433 | int rc = 0; |
1434 | |
1435 | rdev = qp->rdev; |
1436 | /* Create a shadow QP to handle the QP1 traffic */ |
1437 | sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, size: sizeof(*sqp_tbl), |
1438 | GFP_KERNEL); |
1439 | if (!sqp_tbl) |
1440 | return -ENOMEM; |
1441 | rdev->gsi_ctx.sqp_tbl = sqp_tbl; |
1442 | |
1443 | sqp = bnxt_re_create_shadow_qp(pd, qp1_res: &rdev->qplib_res, qp1_qp: &qp->qplib_qp); |
1444 | if (!sqp) { |
1445 | rc = -ENODEV; |
1446 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to create Shadow QP for QP1" ); |
1447 | goto out; |
1448 | } |
1449 | rdev->gsi_ctx.gsi_sqp = sqp; |
1450 | |
1451 | sqp->rcq = qp->rcq; |
1452 | sqp->scq = qp->scq; |
1453 | sah = bnxt_re_create_shadow_qp_ah(pd, qp1_res: &rdev->qplib_res, |
1454 | qp1_qp: &qp->qplib_qp); |
1455 | if (!sah) { |
1456 | bnxt_qplib_destroy_qp(res: &rdev->qplib_res, |
1457 | qp: &sqp->qplib_qp); |
1458 | rc = -ENODEV; |
1459 | ibdev_err(ibdev: &rdev->ibdev, |
1460 | format: "Failed to create AH entry for ShadowQP" ); |
1461 | goto out; |
1462 | } |
1463 | rdev->gsi_ctx.gsi_sah = sah; |
1464 | |
1465 | return 0; |
1466 | out: |
1467 | kfree(objp: sqp_tbl); |
1468 | return rc; |
1469 | } |
1470 | |
1471 | static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, |
1472 | struct ib_qp_init_attr *init_attr) |
1473 | { |
1474 | struct bnxt_re_dev *rdev; |
1475 | struct bnxt_qplib_qp *qplqp; |
1476 | int rc; |
1477 | |
1478 | rdev = qp->rdev; |
1479 | qplqp = &qp->qplib_qp; |
1480 | |
1481 | qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; |
1482 | qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; |
1483 | |
1484 | rc = bnxt_qplib_create_qp1(res: &rdev->qplib_res, qp: qplqp); |
1485 | if (rc) { |
1486 | ibdev_err(ibdev: &rdev->ibdev, format: "create HW QP1 failed!" ); |
1487 | goto out; |
1488 | } |
1489 | |
1490 | rc = bnxt_re_create_shadow_gsi(qp, pd); |
1491 | out: |
1492 | return rc; |
1493 | } |
1494 | |
1495 | static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, |
1496 | struct ib_qp_init_attr *init_attr, |
1497 | struct bnxt_qplib_dev_attr *dev_attr) |
1498 | { |
1499 | bool rc = true; |
1500 | |
1501 | if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes || |
1502 | init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes || |
1503 | init_attr->cap.max_send_sge > dev_attr->max_qp_sges || |
1504 | init_attr->cap.max_recv_sge > dev_attr->max_qp_sges || |
1505 | init_attr->cap.max_inline_data > dev_attr->max_inline_data) { |
1506 | ibdev_err(ibdev: &rdev->ibdev, |
1507 | format: "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x" , |
1508 | init_attr->cap.max_send_wr, dev_attr->max_qp_wqes, |
1509 | init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes, |
1510 | init_attr->cap.max_send_sge, dev_attr->max_qp_sges, |
1511 | init_attr->cap.max_recv_sge, dev_attr->max_qp_sges, |
1512 | init_attr->cap.max_inline_data, |
1513 | dev_attr->max_inline_data); |
1514 | rc = false; |
1515 | } |
1516 | return rc; |
1517 | } |
1518 | |
1519 | int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, |
1520 | struct ib_udata *udata) |
1521 | { |
1522 | struct ib_pd *ib_pd = ib_qp->pd; |
1523 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
1524 | struct bnxt_re_dev *rdev = pd->rdev; |
1525 | struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; |
1526 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
1527 | u32 active_qps; |
1528 | int rc; |
1529 | |
1530 | rc = bnxt_re_test_qp_limits(rdev, init_attr: qp_init_attr, dev_attr); |
1531 | if (!rc) { |
1532 | rc = -EINVAL; |
1533 | goto fail; |
1534 | } |
1535 | |
1536 | qp->rdev = rdev; |
1537 | rc = bnxt_re_init_qp_attr(qp, pd, init_attr: qp_init_attr, udata); |
1538 | if (rc) |
1539 | goto fail; |
1540 | |
1541 | if (qp_init_attr->qp_type == IB_QPT_GSI && |
1542 | !(bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx))) { |
1543 | rc = bnxt_re_create_gsi_qp(qp, pd, init_attr: qp_init_attr); |
1544 | if (rc == -ENODEV) |
1545 | goto qp_destroy; |
1546 | if (rc) |
1547 | goto fail; |
1548 | } else { |
1549 | rc = bnxt_qplib_create_qp(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
1550 | if (rc) { |
1551 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to create HW QP" ); |
1552 | goto free_umem; |
1553 | } |
1554 | if (udata) { |
1555 | struct bnxt_re_qp_resp resp; |
1556 | |
1557 | resp.qpid = qp->qplib_qp.id; |
1558 | resp.rsvd = 0; |
1559 | rc = ib_copy_to_udata(udata, src: &resp, len: sizeof(resp)); |
1560 | if (rc) { |
1561 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to copy QP udata" ); |
1562 | goto qp_destroy; |
1563 | } |
1564 | } |
1565 | } |
1566 | |
1567 | qp->ib_qp.qp_num = qp->qplib_qp.id; |
1568 | if (qp_init_attr->qp_type == IB_QPT_GSI) |
1569 | rdev->gsi_ctx.gsi_qp = qp; |
1570 | spin_lock_init(&qp->sq_lock); |
1571 | spin_lock_init(&qp->rq_lock); |
1572 | INIT_LIST_HEAD(list: &qp->list); |
1573 | mutex_lock(&rdev->qp_lock); |
1574 | list_add_tail(new: &qp->list, head: &rdev->qp_list); |
1575 | mutex_unlock(lock: &rdev->qp_lock); |
1576 | active_qps = atomic_inc_return(v: &rdev->stats.res.qp_count); |
1577 | if (active_qps > rdev->stats.res.qp_watermark) |
1578 | rdev->stats.res.qp_watermark = active_qps; |
1579 | if (qp_init_attr->qp_type == IB_QPT_RC) { |
1580 | active_qps = atomic_inc_return(v: &rdev->stats.res.rc_qp_count); |
1581 | if (active_qps > rdev->stats.res.rc_qp_watermark) |
1582 | rdev->stats.res.rc_qp_watermark = active_qps; |
1583 | } else if (qp_init_attr->qp_type == IB_QPT_UD) { |
1584 | active_qps = atomic_inc_return(v: &rdev->stats.res.ud_qp_count); |
1585 | if (active_qps > rdev->stats.res.ud_qp_watermark) |
1586 | rdev->stats.res.ud_qp_watermark = active_qps; |
1587 | } |
1588 | |
1589 | return 0; |
1590 | qp_destroy: |
1591 | bnxt_qplib_destroy_qp(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
1592 | free_umem: |
1593 | ib_umem_release(umem: qp->rumem); |
1594 | ib_umem_release(umem: qp->sumem); |
1595 | fail: |
1596 | return rc; |
1597 | } |
1598 | |
1599 | static u8 __from_ib_qp_state(enum ib_qp_state state) |
1600 | { |
1601 | switch (state) { |
1602 | case IB_QPS_RESET: |
1603 | return CMDQ_MODIFY_QP_NEW_STATE_RESET; |
1604 | case IB_QPS_INIT: |
1605 | return CMDQ_MODIFY_QP_NEW_STATE_INIT; |
1606 | case IB_QPS_RTR: |
1607 | return CMDQ_MODIFY_QP_NEW_STATE_RTR; |
1608 | case IB_QPS_RTS: |
1609 | return CMDQ_MODIFY_QP_NEW_STATE_RTS; |
1610 | case IB_QPS_SQD: |
1611 | return CMDQ_MODIFY_QP_NEW_STATE_SQD; |
1612 | case IB_QPS_SQE: |
1613 | return CMDQ_MODIFY_QP_NEW_STATE_SQE; |
1614 | case IB_QPS_ERR: |
1615 | default: |
1616 | return CMDQ_MODIFY_QP_NEW_STATE_ERR; |
1617 | } |
1618 | } |
1619 | |
1620 | static enum ib_qp_state __to_ib_qp_state(u8 state) |
1621 | { |
1622 | switch (state) { |
1623 | case CMDQ_MODIFY_QP_NEW_STATE_RESET: |
1624 | return IB_QPS_RESET; |
1625 | case CMDQ_MODIFY_QP_NEW_STATE_INIT: |
1626 | return IB_QPS_INIT; |
1627 | case CMDQ_MODIFY_QP_NEW_STATE_RTR: |
1628 | return IB_QPS_RTR; |
1629 | case CMDQ_MODIFY_QP_NEW_STATE_RTS: |
1630 | return IB_QPS_RTS; |
1631 | case CMDQ_MODIFY_QP_NEW_STATE_SQD: |
1632 | return IB_QPS_SQD; |
1633 | case CMDQ_MODIFY_QP_NEW_STATE_SQE: |
1634 | return IB_QPS_SQE; |
1635 | case CMDQ_MODIFY_QP_NEW_STATE_ERR: |
1636 | default: |
1637 | return IB_QPS_ERR; |
1638 | } |
1639 | } |
1640 | |
1641 | static u32 __from_ib_mtu(enum ib_mtu mtu) |
1642 | { |
1643 | switch (mtu) { |
1644 | case IB_MTU_256: |
1645 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_256; |
1646 | case IB_MTU_512: |
1647 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_512; |
1648 | case IB_MTU_1024: |
1649 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024; |
1650 | case IB_MTU_2048: |
1651 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; |
1652 | case IB_MTU_4096: |
1653 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096; |
1654 | default: |
1655 | return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; |
1656 | } |
1657 | } |
1658 | |
1659 | static enum ib_mtu __to_ib_mtu(u32 mtu) |
1660 | { |
1661 | switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) { |
1662 | case CMDQ_MODIFY_QP_PATH_MTU_MTU_256: |
1663 | return IB_MTU_256; |
1664 | case CMDQ_MODIFY_QP_PATH_MTU_MTU_512: |
1665 | return IB_MTU_512; |
1666 | case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024: |
1667 | return IB_MTU_1024; |
1668 | case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048: |
1669 | return IB_MTU_2048; |
1670 | case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096: |
1671 | return IB_MTU_4096; |
1672 | default: |
1673 | return IB_MTU_2048; |
1674 | } |
1675 | } |
1676 | |
1677 | /* Shared Receive Queues */ |
1678 | int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) |
1679 | { |
1680 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, |
1681 | ib_srq); |
1682 | struct bnxt_re_dev *rdev = srq->rdev; |
1683 | struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; |
1684 | struct bnxt_qplib_nq *nq = NULL; |
1685 | |
1686 | if (qplib_srq->cq) |
1687 | nq = qplib_srq->cq->nq; |
1688 | bnxt_qplib_destroy_srq(res: &rdev->qplib_res, srq: qplib_srq); |
1689 | ib_umem_release(umem: srq->umem); |
1690 | atomic_dec(v: &rdev->stats.res.srq_count); |
1691 | if (nq) |
1692 | nq->budget--; |
1693 | return 0; |
1694 | } |
1695 | |
1696 | static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, |
1697 | struct bnxt_re_pd *pd, |
1698 | struct bnxt_re_srq *srq, |
1699 | struct ib_udata *udata) |
1700 | { |
1701 | struct bnxt_re_srq_req ureq; |
1702 | struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; |
1703 | struct ib_umem *umem; |
1704 | int bytes = 0; |
1705 | struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context( |
1706 | udata, struct bnxt_re_ucontext, ib_uctx); |
1707 | |
1708 | if (ib_copy_from_udata(dest: &ureq, udata, len: sizeof(ureq))) |
1709 | return -EFAULT; |
1710 | |
1711 | bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size); |
1712 | bytes = PAGE_ALIGN(bytes); |
1713 | umem = ib_umem_get(device: &rdev->ibdev, addr: ureq.srqva, size: bytes, |
1714 | access: IB_ACCESS_LOCAL_WRITE); |
1715 | if (IS_ERR(ptr: umem)) |
1716 | return PTR_ERR(ptr: umem); |
1717 | |
1718 | srq->umem = umem; |
1719 | qplib_srq->sg_info.umem = umem; |
1720 | qplib_srq->sg_info.pgsize = PAGE_SIZE; |
1721 | qplib_srq->sg_info.pgshft = PAGE_SHIFT; |
1722 | qplib_srq->srq_handle = ureq.srq_handle; |
1723 | qplib_srq->dpi = &cntx->dpi; |
1724 | |
1725 | return 0; |
1726 | } |
1727 | |
1728 | int bnxt_re_create_srq(struct ib_srq *ib_srq, |
1729 | struct ib_srq_init_attr *srq_init_attr, |
1730 | struct ib_udata *udata) |
1731 | { |
1732 | struct bnxt_qplib_dev_attr *dev_attr; |
1733 | struct bnxt_qplib_nq *nq = NULL; |
1734 | struct bnxt_re_ucontext *uctx; |
1735 | struct bnxt_re_dev *rdev; |
1736 | struct bnxt_re_srq *srq; |
1737 | struct bnxt_re_pd *pd; |
1738 | struct ib_pd *ib_pd; |
1739 | u32 active_srqs; |
1740 | int rc, entries; |
1741 | |
1742 | ib_pd = ib_srq->pd; |
1743 | pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
1744 | rdev = pd->rdev; |
1745 | dev_attr = &rdev->dev_attr; |
1746 | srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); |
1747 | |
1748 | if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { |
1749 | ibdev_err(ibdev: &rdev->ibdev, format: "Create CQ failed - max exceeded" ); |
1750 | rc = -EINVAL; |
1751 | goto exit; |
1752 | } |
1753 | |
1754 | if (srq_init_attr->srq_type != IB_SRQT_BASIC) { |
1755 | rc = -EOPNOTSUPP; |
1756 | goto exit; |
1757 | } |
1758 | |
1759 | uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); |
1760 | srq->rdev = rdev; |
1761 | srq->qplib_srq.pd = &pd->qplib_pd; |
1762 | srq->qplib_srq.dpi = &rdev->dpi_privileged; |
1763 | /* Allocate 1 more than what's provided so posting max doesn't |
1764 | * mean empty |
1765 | */ |
1766 | entries = bnxt_re_init_depth(ent: srq_init_attr->attr.max_wr + 1, uctx); |
1767 | if (entries > dev_attr->max_srq_wqes + 1) |
1768 | entries = dev_attr->max_srq_wqes + 1; |
1769 | srq->qplib_srq.max_wqe = entries; |
1770 | |
1771 | srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge; |
1772 | /* 128 byte wqe size for SRQ . So use max sges */ |
1773 | srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(nsge: dev_attr->max_srq_sges); |
1774 | srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; |
1775 | srq->srq_limit = srq_init_attr->attr.srq_limit; |
1776 | srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; |
1777 | nq = &rdev->nq[0]; |
1778 | |
1779 | if (udata) { |
1780 | rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); |
1781 | if (rc) |
1782 | goto fail; |
1783 | } |
1784 | |
1785 | rc = bnxt_qplib_create_srq(res: &rdev->qplib_res, srq: &srq->qplib_srq); |
1786 | if (rc) { |
1787 | ibdev_err(ibdev: &rdev->ibdev, format: "Create HW SRQ failed!" ); |
1788 | goto fail; |
1789 | } |
1790 | |
1791 | if (udata) { |
1792 | struct bnxt_re_srq_resp resp; |
1793 | |
1794 | resp.srqid = srq->qplib_srq.id; |
1795 | rc = ib_copy_to_udata(udata, src: &resp, len: sizeof(resp)); |
1796 | if (rc) { |
1797 | ibdev_err(ibdev: &rdev->ibdev, format: "SRQ copy to udata failed!" ); |
1798 | bnxt_qplib_destroy_srq(res: &rdev->qplib_res, |
1799 | srq: &srq->qplib_srq); |
1800 | goto fail; |
1801 | } |
1802 | } |
1803 | if (nq) |
1804 | nq->budget++; |
1805 | active_srqs = atomic_inc_return(v: &rdev->stats.res.srq_count); |
1806 | if (active_srqs > rdev->stats.res.srq_watermark) |
1807 | rdev->stats.res.srq_watermark = active_srqs; |
1808 | spin_lock_init(&srq->lock); |
1809 | |
1810 | return 0; |
1811 | |
1812 | fail: |
1813 | ib_umem_release(umem: srq->umem); |
1814 | exit: |
1815 | return rc; |
1816 | } |
1817 | |
1818 | int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, |
1819 | enum ib_srq_attr_mask srq_attr_mask, |
1820 | struct ib_udata *udata) |
1821 | { |
1822 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, |
1823 | ib_srq); |
1824 | struct bnxt_re_dev *rdev = srq->rdev; |
1825 | int rc; |
1826 | |
1827 | switch (srq_attr_mask) { |
1828 | case IB_SRQ_MAX_WR: |
1829 | /* SRQ resize is not supported */ |
1830 | return -EINVAL; |
1831 | case IB_SRQ_LIMIT: |
1832 | /* Change the SRQ threshold */ |
1833 | if (srq_attr->srq_limit > srq->qplib_srq.max_wqe) |
1834 | return -EINVAL; |
1835 | |
1836 | srq->qplib_srq.threshold = srq_attr->srq_limit; |
1837 | rc = bnxt_qplib_modify_srq(res: &rdev->qplib_res, srq: &srq->qplib_srq); |
1838 | if (rc) { |
1839 | ibdev_err(ibdev: &rdev->ibdev, format: "Modify HW SRQ failed!" ); |
1840 | return rc; |
1841 | } |
1842 | /* On success, update the shadow */ |
1843 | srq->srq_limit = srq_attr->srq_limit; |
1844 | /* No need to Build and send response back to udata */ |
1845 | return 0; |
1846 | default: |
1847 | ibdev_err(ibdev: &rdev->ibdev, |
1848 | format: "Unsupported srq_attr_mask 0x%x" , srq_attr_mask); |
1849 | return -EINVAL; |
1850 | } |
1851 | } |
1852 | |
1853 | int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) |
1854 | { |
1855 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, |
1856 | ib_srq); |
1857 | struct bnxt_re_srq tsrq; |
1858 | struct bnxt_re_dev *rdev = srq->rdev; |
1859 | int rc; |
1860 | |
1861 | /* Get live SRQ attr */ |
1862 | tsrq.qplib_srq.id = srq->qplib_srq.id; |
1863 | rc = bnxt_qplib_query_srq(res: &rdev->qplib_res, srq: &tsrq.qplib_srq); |
1864 | if (rc) { |
1865 | ibdev_err(ibdev: &rdev->ibdev, format: "Query HW SRQ failed!" ); |
1866 | return rc; |
1867 | } |
1868 | srq_attr->max_wr = srq->qplib_srq.max_wqe; |
1869 | srq_attr->max_sge = srq->qplib_srq.max_sge; |
1870 | srq_attr->srq_limit = tsrq.qplib_srq.threshold; |
1871 | |
1872 | return 0; |
1873 | } |
1874 | |
1875 | int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr, |
1876 | const struct ib_recv_wr **bad_wr) |
1877 | { |
1878 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, |
1879 | ib_srq); |
1880 | struct bnxt_qplib_swqe wqe; |
1881 | unsigned long flags; |
1882 | int rc = 0; |
1883 | |
1884 | spin_lock_irqsave(&srq->lock, flags); |
1885 | while (wr) { |
1886 | /* Transcribe each ib_recv_wr to qplib_swqe */ |
1887 | wqe.num_sge = wr->num_sge; |
1888 | bnxt_re_build_sgl(ib_sg_list: wr->sg_list, sg_list: wqe.sg_list, num: wr->num_sge); |
1889 | wqe.wr_id = wr->wr_id; |
1890 | wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; |
1891 | |
1892 | rc = bnxt_qplib_post_srq_recv(srq: &srq->qplib_srq, wqe: &wqe); |
1893 | if (rc) { |
1894 | *bad_wr = wr; |
1895 | break; |
1896 | } |
1897 | wr = wr->next; |
1898 | } |
1899 | spin_unlock_irqrestore(lock: &srq->lock, flags); |
1900 | |
1901 | return rc; |
1902 | } |
1903 | static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, |
1904 | struct bnxt_re_qp *qp1_qp, |
1905 | int qp_attr_mask) |
1906 | { |
1907 | struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; |
1908 | int rc; |
1909 | |
1910 | if (qp_attr_mask & IB_QP_STATE) { |
1911 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; |
1912 | qp->qplib_qp.state = qp1_qp->qplib_qp.state; |
1913 | } |
1914 | if (qp_attr_mask & IB_QP_PKEY_INDEX) { |
1915 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; |
1916 | qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index; |
1917 | } |
1918 | |
1919 | if (qp_attr_mask & IB_QP_QKEY) { |
1920 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; |
1921 | /* Using a Random QKEY */ |
1922 | qp->qplib_qp.qkey = 0x81818181; |
1923 | } |
1924 | if (qp_attr_mask & IB_QP_SQ_PSN) { |
1925 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; |
1926 | qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn; |
1927 | } |
1928 | |
1929 | rc = bnxt_qplib_modify_qp(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
1930 | if (rc) |
1931 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to modify Shadow QP for QP1" ); |
1932 | return rc; |
1933 | } |
1934 | |
1935 | int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, |
1936 | int qp_attr_mask, struct ib_udata *udata) |
1937 | { |
1938 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
1939 | struct bnxt_re_dev *rdev = qp->rdev; |
1940 | struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; |
1941 | enum ib_qp_state curr_qp_state, new_qp_state; |
1942 | int rc, entries; |
1943 | unsigned int flags; |
1944 | u8 nw_type; |
1945 | |
1946 | if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS) |
1947 | return -EOPNOTSUPP; |
1948 | |
1949 | qp->qplib_qp.modify_flags = 0; |
1950 | if (qp_attr_mask & IB_QP_STATE) { |
1951 | curr_qp_state = __to_ib_qp_state(state: qp->qplib_qp.cur_qp_state); |
1952 | new_qp_state = qp_attr->qp_state; |
1953 | if (!ib_modify_qp_is_ok(cur_state: curr_qp_state, next_state: new_qp_state, |
1954 | type: ib_qp->qp_type, mask: qp_attr_mask)) { |
1955 | ibdev_err(ibdev: &rdev->ibdev, |
1956 | format: "Invalid attribute mask: %#x specified " , |
1957 | qp_attr_mask); |
1958 | ibdev_err(ibdev: &rdev->ibdev, |
1959 | format: "for qpn: %#x type: %#x" , |
1960 | ib_qp->qp_num, ib_qp->qp_type); |
1961 | ibdev_err(ibdev: &rdev->ibdev, |
1962 | format: "curr_qp_state=0x%x, new_qp_state=0x%x\n" , |
1963 | curr_qp_state, new_qp_state); |
1964 | return -EINVAL; |
1965 | } |
1966 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; |
1967 | qp->qplib_qp.state = __from_ib_qp_state(state: qp_attr->qp_state); |
1968 | |
1969 | if (!qp->sumem && |
1970 | qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { |
1971 | ibdev_dbg(&rdev->ibdev, |
1972 | "Move QP = %p to flush list\n" , qp); |
1973 | flags = bnxt_re_lock_cqs(qp); |
1974 | bnxt_qplib_add_flush_qp(qp: &qp->qplib_qp); |
1975 | bnxt_re_unlock_cqs(qp, flags); |
1976 | } |
1977 | if (!qp->sumem && |
1978 | qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { |
1979 | ibdev_dbg(&rdev->ibdev, |
1980 | "Move QP = %p out of flush list\n" , qp); |
1981 | flags = bnxt_re_lock_cqs(qp); |
1982 | bnxt_qplib_clean_qp(qp: &qp->qplib_qp); |
1983 | bnxt_re_unlock_cqs(qp, flags); |
1984 | } |
1985 | } |
1986 | if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { |
1987 | qp->qplib_qp.modify_flags |= |
1988 | CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY; |
1989 | qp->qplib_qp.en_sqd_async_notify = true; |
1990 | } |
1991 | if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { |
1992 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS; |
1993 | qp->qplib_qp.access = |
1994 | __from_ib_access_flags(iflags: qp_attr->qp_access_flags); |
1995 | /* LOCAL_WRITE access must be set to allow RC receive */ |
1996 | qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; |
1997 | /* Temp: Set all params on QP as of now */ |
1998 | qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE; |
1999 | qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ; |
2000 | } |
2001 | if (qp_attr_mask & IB_QP_PKEY_INDEX) { |
2002 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; |
2003 | qp->qplib_qp.pkey_index = qp_attr->pkey_index; |
2004 | } |
2005 | if (qp_attr_mask & IB_QP_QKEY) { |
2006 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; |
2007 | qp->qplib_qp.qkey = qp_attr->qkey; |
2008 | } |
2009 | if (qp_attr_mask & IB_QP_AV) { |
2010 | const struct ib_global_route *grh = |
2011 | rdma_ah_read_grh(attr: &qp_attr->ah_attr); |
2012 | const struct ib_gid_attr *sgid_attr; |
2013 | struct bnxt_re_gid_ctx *ctx; |
2014 | |
2015 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID | |
2016 | CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | |
2017 | CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX | |
2018 | CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT | |
2019 | CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS | |
2020 | CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC | |
2021 | CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; |
2022 | memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw, |
2023 | sizeof(qp->qplib_qp.ah.dgid.data)); |
2024 | qp->qplib_qp.ah.flow_label = grh->flow_label; |
2025 | sgid_attr = grh->sgid_attr; |
2026 | /* Get the HW context of the GID. The reference |
2027 | * of GID table entry is already taken by the caller. |
2028 | */ |
2029 | ctx = rdma_read_gid_hw_context(attr: sgid_attr); |
2030 | qp->qplib_qp.ah.sgid_index = ctx->idx; |
2031 | qp->qplib_qp.ah.host_sgid_index = grh->sgid_index; |
2032 | qp->qplib_qp.ah.hop_limit = grh->hop_limit; |
2033 | qp->qplib_qp.ah.traffic_class = grh->traffic_class; |
2034 | qp->qplib_qp.ah.sl = rdma_ah_get_sl(attr: &qp_attr->ah_attr); |
2035 | ether_addr_copy(dst: qp->qplib_qp.ah.dmac, |
2036 | src: qp_attr->ah_attr.roce.dmac); |
2037 | |
2038 | rc = rdma_read_gid_l2_fields(attr: sgid_attr, NULL, |
2039 | smac: &qp->qplib_qp.smac[0]); |
2040 | if (rc) |
2041 | return rc; |
2042 | |
2043 | nw_type = rdma_gid_attr_network_type(attr: sgid_attr); |
2044 | switch (nw_type) { |
2045 | case RDMA_NETWORK_IPV4: |
2046 | qp->qplib_qp.nw_type = |
2047 | CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4; |
2048 | break; |
2049 | case RDMA_NETWORK_IPV6: |
2050 | qp->qplib_qp.nw_type = |
2051 | CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6; |
2052 | break; |
2053 | default: |
2054 | qp->qplib_qp.nw_type = |
2055 | CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1; |
2056 | break; |
2057 | } |
2058 | } |
2059 | |
2060 | if (qp_attr_mask & IB_QP_PATH_MTU) { |
2061 | qp->qplib_qp.modify_flags |= |
2062 | CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; |
2063 | qp->qplib_qp.path_mtu = __from_ib_mtu(mtu: qp_attr->path_mtu); |
2064 | qp->qplib_qp.mtu = ib_mtu_enum_to_int(mtu: qp_attr->path_mtu); |
2065 | } else if (qp_attr->qp_state == IB_QPS_RTR) { |
2066 | qp->qplib_qp.modify_flags |= |
2067 | CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; |
2068 | qp->qplib_qp.path_mtu = |
2069 | __from_ib_mtu(mtu: iboe_get_mtu(mtu: rdev->netdev->mtu)); |
2070 | qp->qplib_qp.mtu = |
2071 | ib_mtu_enum_to_int(mtu: iboe_get_mtu(mtu: rdev->netdev->mtu)); |
2072 | } |
2073 | |
2074 | if (qp_attr_mask & IB_QP_TIMEOUT) { |
2075 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT; |
2076 | qp->qplib_qp.timeout = qp_attr->timeout; |
2077 | } |
2078 | if (qp_attr_mask & IB_QP_RETRY_CNT) { |
2079 | qp->qplib_qp.modify_flags |= |
2080 | CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT; |
2081 | qp->qplib_qp.retry_cnt = qp_attr->retry_cnt; |
2082 | } |
2083 | if (qp_attr_mask & IB_QP_RNR_RETRY) { |
2084 | qp->qplib_qp.modify_flags |= |
2085 | CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY; |
2086 | qp->qplib_qp.rnr_retry = qp_attr->rnr_retry; |
2087 | } |
2088 | if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) { |
2089 | qp->qplib_qp.modify_flags |= |
2090 | CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER; |
2091 | qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer; |
2092 | } |
2093 | if (qp_attr_mask & IB_QP_RQ_PSN) { |
2094 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN; |
2095 | qp->qplib_qp.rq.psn = qp_attr->rq_psn; |
2096 | } |
2097 | if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { |
2098 | qp->qplib_qp.modify_flags |= |
2099 | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; |
2100 | /* Cap the max_rd_atomic to device max */ |
2101 | qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic, |
2102 | dev_attr->max_qp_rd_atom); |
2103 | } |
2104 | if (qp_attr_mask & IB_QP_SQ_PSN) { |
2105 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; |
2106 | qp->qplib_qp.sq.psn = qp_attr->sq_psn; |
2107 | } |
2108 | if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
2109 | if (qp_attr->max_dest_rd_atomic > |
2110 | dev_attr->max_qp_init_rd_atom) { |
2111 | ibdev_err(ibdev: &rdev->ibdev, |
2112 | format: "max_dest_rd_atomic requested%d is > dev_max%d" , |
2113 | qp_attr->max_dest_rd_atomic, |
2114 | dev_attr->max_qp_init_rd_atom); |
2115 | return -EINVAL; |
2116 | } |
2117 | |
2118 | qp->qplib_qp.modify_flags |= |
2119 | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; |
2120 | qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; |
2121 | } |
2122 | if (qp_attr_mask & IB_QP_CAP) { |
2123 | struct bnxt_re_ucontext *uctx = |
2124 | rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); |
2125 | |
2126 | qp->qplib_qp.modify_flags |= |
2127 | CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE | |
2128 | CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE | |
2129 | CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE | |
2130 | CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE | |
2131 | CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA; |
2132 | if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) || |
2133 | (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) || |
2134 | (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) || |
2135 | (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) || |
2136 | (qp_attr->cap.max_inline_data >= |
2137 | dev_attr->max_inline_data)) { |
2138 | ibdev_err(ibdev: &rdev->ibdev, |
2139 | format: "Create QP failed - max exceeded" ); |
2140 | return -EINVAL; |
2141 | } |
2142 | entries = bnxt_re_init_depth(ent: qp_attr->cap.max_send_wr, uctx); |
2143 | qp->qplib_qp.sq.max_wqe = min_t(u32, entries, |
2144 | dev_attr->max_qp_wqes + 1); |
2145 | qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - |
2146 | qp_attr->cap.max_send_wr; |
2147 | /* |
2148 | * Reserving one slot for Phantom WQE. Some application can |
2149 | * post one extra entry in this case. Allowing this to avoid |
2150 | * unexpected Queue full condition |
2151 | */ |
2152 | qp->qplib_qp.sq.q_full_delta -= 1; |
2153 | qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; |
2154 | if (qp->qplib_qp.rq.max_wqe) { |
2155 | entries = bnxt_re_init_depth(ent: qp_attr->cap.max_recv_wr, uctx); |
2156 | qp->qplib_qp.rq.max_wqe = |
2157 | min_t(u32, entries, dev_attr->max_qp_wqes + 1); |
2158 | qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - |
2159 | qp_attr->cap.max_recv_wr; |
2160 | qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; |
2161 | } else { |
2162 | /* SRQ was used prior, just ignore the RQ caps */ |
2163 | } |
2164 | } |
2165 | if (qp_attr_mask & IB_QP_DEST_QPN) { |
2166 | qp->qplib_qp.modify_flags |= |
2167 | CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID; |
2168 | qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num; |
2169 | } |
2170 | rc = bnxt_qplib_modify_qp(res: &rdev->qplib_res, qp: &qp->qplib_qp); |
2171 | if (rc) { |
2172 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to modify HW QP" ); |
2173 | return rc; |
2174 | } |
2175 | if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) |
2176 | rc = bnxt_re_modify_shadow_qp(rdev, qp1_qp: qp, qp_attr_mask); |
2177 | return rc; |
2178 | } |
2179 | |
2180 | int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, |
2181 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) |
2182 | { |
2183 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
2184 | struct bnxt_re_dev *rdev = qp->rdev; |
2185 | struct bnxt_qplib_qp *qplib_qp; |
2186 | int rc; |
2187 | |
2188 | qplib_qp = kzalloc(size: sizeof(*qplib_qp), GFP_KERNEL); |
2189 | if (!qplib_qp) |
2190 | return -ENOMEM; |
2191 | |
2192 | qplib_qp->id = qp->qplib_qp.id; |
2193 | qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; |
2194 | |
2195 | rc = bnxt_qplib_query_qp(res: &rdev->qplib_res, qp: qplib_qp); |
2196 | if (rc) { |
2197 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to query HW QP" ); |
2198 | goto out; |
2199 | } |
2200 | qp_attr->qp_state = __to_ib_qp_state(state: qplib_qp->state); |
2201 | qp_attr->cur_qp_state = __to_ib_qp_state(state: qplib_qp->cur_qp_state); |
2202 | qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; |
2203 | qp_attr->qp_access_flags = __to_ib_access_flags(qflags: qplib_qp->access); |
2204 | qp_attr->pkey_index = qplib_qp->pkey_index; |
2205 | qp_attr->qkey = qplib_qp->qkey; |
2206 | qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; |
2207 | rdma_ah_set_grh(attr: &qp_attr->ah_attr, NULL, flow_label: qplib_qp->ah.flow_label, |
2208 | sgid_index: qplib_qp->ah.host_sgid_index, |
2209 | hop_limit: qplib_qp->ah.hop_limit, |
2210 | traffic_class: qplib_qp->ah.traffic_class); |
2211 | rdma_ah_set_dgid_raw(attr: &qp_attr->ah_attr, dgid: qplib_qp->ah.dgid.data); |
2212 | rdma_ah_set_sl(attr: &qp_attr->ah_attr, sl: qplib_qp->ah.sl); |
2213 | ether_addr_copy(dst: qp_attr->ah_attr.roce.dmac, src: qplib_qp->ah.dmac); |
2214 | qp_attr->path_mtu = __to_ib_mtu(mtu: qplib_qp->path_mtu); |
2215 | qp_attr->timeout = qplib_qp->timeout; |
2216 | qp_attr->retry_cnt = qplib_qp->retry_cnt; |
2217 | qp_attr->rnr_retry = qplib_qp->rnr_retry; |
2218 | qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; |
2219 | qp_attr->rq_psn = qplib_qp->rq.psn; |
2220 | qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; |
2221 | qp_attr->sq_psn = qplib_qp->sq.psn; |
2222 | qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; |
2223 | qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : |
2224 | IB_SIGNAL_REQ_WR; |
2225 | qp_attr->dest_qp_num = qplib_qp->dest_qpn; |
2226 | |
2227 | qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; |
2228 | qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; |
2229 | qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe; |
2230 | qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge; |
2231 | qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; |
2232 | qp_init_attr->cap = qp_attr->cap; |
2233 | |
2234 | out: |
2235 | kfree(objp: qplib_qp); |
2236 | return rc; |
2237 | } |
2238 | |
2239 | /* Routine for sending QP1 packets for RoCE V1 an V2 |
2240 | */ |
2241 | static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp, |
2242 | const struct ib_send_wr *wr, |
2243 | struct bnxt_qplib_swqe *wqe, |
2244 | int payload_size) |
2245 | { |
2246 | struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, |
2247 | ib_ah); |
2248 | struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah; |
2249 | const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr; |
2250 | struct bnxt_qplib_sge sge; |
2251 | u8 nw_type; |
2252 | u16 ether_type; |
2253 | union ib_gid dgid; |
2254 | bool is_eth = false; |
2255 | bool is_vlan = false; |
2256 | bool is_grh = false; |
2257 | bool is_udp = false; |
2258 | u8 ip_version = 0; |
2259 | u16 vlan_id = 0xFFFF; |
2260 | void *buf; |
2261 | int i, rc; |
2262 | |
2263 | memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); |
2264 | |
2265 | rc = rdma_read_gid_l2_fields(attr: sgid_attr, vlan_id: &vlan_id, NULL); |
2266 | if (rc) |
2267 | return rc; |
2268 | |
2269 | /* Get network header type for this GID */ |
2270 | nw_type = rdma_gid_attr_network_type(attr: sgid_attr); |
2271 | switch (nw_type) { |
2272 | case RDMA_NETWORK_IPV4: |
2273 | nw_type = BNXT_RE_ROCEV2_IPV4_PACKET; |
2274 | break; |
2275 | case RDMA_NETWORK_IPV6: |
2276 | nw_type = BNXT_RE_ROCEV2_IPV6_PACKET; |
2277 | break; |
2278 | default: |
2279 | nw_type = BNXT_RE_ROCE_V1_PACKET; |
2280 | break; |
2281 | } |
2282 | memcpy(&dgid.raw, &qplib_ah->dgid, 16); |
2283 | is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; |
2284 | if (is_udp) { |
2285 | if (ipv6_addr_v4mapped(a: (struct in6_addr *)&sgid_attr->gid)) { |
2286 | ip_version = 4; |
2287 | ether_type = ETH_P_IP; |
2288 | } else { |
2289 | ip_version = 6; |
2290 | ether_type = ETH_P_IPV6; |
2291 | } |
2292 | is_grh = false; |
2293 | } else { |
2294 | ether_type = ETH_P_IBOE; |
2295 | is_grh = true; |
2296 | } |
2297 | |
2298 | is_eth = true; |
2299 | is_vlan = vlan_id && (vlan_id < 0x1000); |
2300 | |
2301 | ib_ud_header_init(payload_bytes: payload_size, lrh_present: !is_eth, eth_present: is_eth, vlan_present: is_vlan, grh_present: is_grh, |
2302 | ip_version, udp_present: is_udp, immediate_present: 0, header: &qp->qp1_hdr); |
2303 | |
2304 | /* ETH */ |
2305 | ether_addr_copy(dst: qp->qp1_hdr.eth.dmac_h, src: ah->qplib_ah.dmac); |
2306 | ether_addr_copy(dst: qp->qp1_hdr.eth.smac_h, src: qp->qplib_qp.smac); |
2307 | |
2308 | /* For vlan, check the sgid for vlan existence */ |
2309 | |
2310 | if (!is_vlan) { |
2311 | qp->qp1_hdr.eth.type = cpu_to_be16(ether_type); |
2312 | } else { |
2313 | qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type); |
2314 | qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id); |
2315 | } |
2316 | |
2317 | if (is_grh || (ip_version == 6)) { |
2318 | memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw, |
2319 | sizeof(sgid_attr->gid)); |
2320 | memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data, |
2321 | sizeof(sgid_attr->gid)); |
2322 | qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit; |
2323 | } |
2324 | |
2325 | if (ip_version == 4) { |
2326 | qp->qp1_hdr.ip4.tos = 0; |
2327 | qp->qp1_hdr.ip4.id = 0; |
2328 | qp->qp1_hdr.ip4.frag_off = htons(IP_DF); |
2329 | qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit; |
2330 | |
2331 | memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4); |
2332 | memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4); |
2333 | qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(header: &qp->qp1_hdr); |
2334 | } |
2335 | |
2336 | if (is_udp) { |
2337 | qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT); |
2338 | qp->qp1_hdr.udp.sport = htons(0x8CD1); |
2339 | qp->qp1_hdr.udp.csum = 0; |
2340 | } |
2341 | |
2342 | /* BTH */ |
2343 | if (wr->opcode == IB_WR_SEND_WITH_IMM) { |
2344 | qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; |
2345 | qp->qp1_hdr.immediate_present = 1; |
2346 | } else { |
2347 | qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; |
2348 | } |
2349 | if (wr->send_flags & IB_SEND_SOLICITED) |
2350 | qp->qp1_hdr.bth.solicited_event = 1; |
2351 | /* pad_count */ |
2352 | qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; |
2353 | |
2354 | /* P_key for QP1 is for all members */ |
2355 | qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); |
2356 | qp->qp1_hdr.bth.destination_qpn = IB_QP1; |
2357 | qp->qp1_hdr.bth.ack_req = 0; |
2358 | qp->send_psn++; |
2359 | qp->send_psn &= BTH_PSN_MASK; |
2360 | qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); |
2361 | /* DETH */ |
2362 | /* Use the priviledged Q_Key for QP1 */ |
2363 | qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); |
2364 | qp->qp1_hdr.deth.source_qpn = IB_QP1; |
2365 | |
2366 | /* Pack the QP1 to the transmit buffer */ |
2367 | buf = bnxt_qplib_get_qp1_sq_buf(qp: &qp->qplib_qp, sge: &sge); |
2368 | if (buf) { |
2369 | ib_ud_header_pack(header: &qp->qp1_hdr, buf); |
2370 | for (i = wqe->num_sge; i; i--) { |
2371 | wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr; |
2372 | wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey; |
2373 | wqe->sg_list[i].size = wqe->sg_list[i - 1].size; |
2374 | } |
2375 | |
2376 | /* |
2377 | * Max Header buf size for IPV6 RoCE V2 is 86, |
2378 | * which is same as the QP1 SQ header buffer. |
2379 | * Header buf size for IPV4 RoCE V2 can be 66. |
2380 | * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20). |
2381 | * Subtract 20 bytes from QP1 SQ header buf size |
2382 | */ |
2383 | if (is_udp && ip_version == 4) |
2384 | sge.size -= 20; |
2385 | /* |
2386 | * Max Header buf size for RoCE V1 is 78. |
2387 | * ETH(14) + VLAN(4) + GRH(40) + BTH(20). |
2388 | * Subtract 8 bytes from QP1 SQ header buf size |
2389 | */ |
2390 | if (!is_udp) |
2391 | sge.size -= 8; |
2392 | |
2393 | /* Subtract 4 bytes for non vlan packets */ |
2394 | if (!is_vlan) |
2395 | sge.size -= 4; |
2396 | |
2397 | wqe->sg_list[0].addr = sge.addr; |
2398 | wqe->sg_list[0].lkey = sge.lkey; |
2399 | wqe->sg_list[0].size = sge.size; |
2400 | wqe->num_sge++; |
2401 | |
2402 | } else { |
2403 | ibdev_err(ibdev: &qp->rdev->ibdev, format: "QP1 buffer is empty!" ); |
2404 | rc = -ENOMEM; |
2405 | } |
2406 | return rc; |
2407 | } |
2408 | |
2409 | /* For the MAD layer, it only provides the recv SGE the size of |
2410 | * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH, |
2411 | * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire |
2412 | * receive packet (334 bytes) with no VLAN and then copy the GRH |
2413 | * and the MAD datagram out to the provided SGE. |
2414 | */ |
2415 | static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, |
2416 | const struct ib_recv_wr *wr, |
2417 | struct bnxt_qplib_swqe *wqe, |
2418 | int payload_size) |
2419 | { |
2420 | struct bnxt_re_sqp_entries *sqp_entry; |
2421 | struct bnxt_qplib_sge ref, sge; |
2422 | struct bnxt_re_dev *rdev; |
2423 | u32 rq_prod_index; |
2424 | |
2425 | rdev = qp->rdev; |
2426 | |
2427 | rq_prod_index = bnxt_qplib_get_rq_prod_index(qp: &qp->qplib_qp); |
2428 | |
2429 | if (!bnxt_qplib_get_qp1_rq_buf(qp: &qp->qplib_qp, sge: &sge)) |
2430 | return -ENOMEM; |
2431 | |
2432 | /* Create 1 SGE to receive the entire |
2433 | * ethernet packet |
2434 | */ |
2435 | /* Save the reference from ULP */ |
2436 | ref.addr = wqe->sg_list[0].addr; |
2437 | ref.lkey = wqe->sg_list[0].lkey; |
2438 | ref.size = wqe->sg_list[0].size; |
2439 | |
2440 | sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index]; |
2441 | |
2442 | /* SGE 1 */ |
2443 | wqe->sg_list[0].addr = sge.addr; |
2444 | wqe->sg_list[0].lkey = sge.lkey; |
2445 | wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; |
2446 | sge.size -= wqe->sg_list[0].size; |
2447 | |
2448 | sqp_entry->sge.addr = ref.addr; |
2449 | sqp_entry->sge.lkey = ref.lkey; |
2450 | sqp_entry->sge.size = ref.size; |
2451 | /* Store the wrid for reporting completion */ |
2452 | sqp_entry->wrid = wqe->wr_id; |
2453 | /* change the wqe->wrid to table index */ |
2454 | wqe->wr_id = rq_prod_index; |
2455 | return 0; |
2456 | } |
2457 | |
2458 | static int is_ud_qp(struct bnxt_re_qp *qp) |
2459 | { |
2460 | return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD || |
2461 | qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI); |
2462 | } |
2463 | |
2464 | static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, |
2465 | const struct ib_send_wr *wr, |
2466 | struct bnxt_qplib_swqe *wqe) |
2467 | { |
2468 | struct bnxt_re_ah *ah = NULL; |
2469 | |
2470 | if (is_ud_qp(qp)) { |
2471 | ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah); |
2472 | wqe->send.q_key = ud_wr(wr)->remote_qkey; |
2473 | wqe->send.dst_qp = ud_wr(wr)->remote_qpn; |
2474 | wqe->send.avid = ah->qplib_ah.id; |
2475 | } |
2476 | switch (wr->opcode) { |
2477 | case IB_WR_SEND: |
2478 | wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND; |
2479 | break; |
2480 | case IB_WR_SEND_WITH_IMM: |
2481 | wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM; |
2482 | wqe->send.imm_data = wr->ex.imm_data; |
2483 | break; |
2484 | case IB_WR_SEND_WITH_INV: |
2485 | wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV; |
2486 | wqe->send.inv_key = wr->ex.invalidate_rkey; |
2487 | break; |
2488 | default: |
2489 | return -EINVAL; |
2490 | } |
2491 | if (wr->send_flags & IB_SEND_SIGNALED) |
2492 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2493 | if (wr->send_flags & IB_SEND_FENCE) |
2494 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; |
2495 | if (wr->send_flags & IB_SEND_SOLICITED) |
2496 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; |
2497 | if (wr->send_flags & IB_SEND_INLINE) |
2498 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; |
2499 | |
2500 | return 0; |
2501 | } |
2502 | |
2503 | static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr, |
2504 | struct bnxt_qplib_swqe *wqe) |
2505 | { |
2506 | switch (wr->opcode) { |
2507 | case IB_WR_RDMA_WRITE: |
2508 | wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE; |
2509 | break; |
2510 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2511 | wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM; |
2512 | wqe->rdma.imm_data = wr->ex.imm_data; |
2513 | break; |
2514 | case IB_WR_RDMA_READ: |
2515 | wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ; |
2516 | wqe->rdma.inv_key = wr->ex.invalidate_rkey; |
2517 | break; |
2518 | default: |
2519 | return -EINVAL; |
2520 | } |
2521 | wqe->rdma.remote_va = rdma_wr(wr)->remote_addr; |
2522 | wqe->rdma.r_key = rdma_wr(wr)->rkey; |
2523 | if (wr->send_flags & IB_SEND_SIGNALED) |
2524 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2525 | if (wr->send_flags & IB_SEND_FENCE) |
2526 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; |
2527 | if (wr->send_flags & IB_SEND_SOLICITED) |
2528 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; |
2529 | if (wr->send_flags & IB_SEND_INLINE) |
2530 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE; |
2531 | |
2532 | return 0; |
2533 | } |
2534 | |
2535 | static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr, |
2536 | struct bnxt_qplib_swqe *wqe) |
2537 | { |
2538 | switch (wr->opcode) { |
2539 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2540 | wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; |
2541 | wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; |
2542 | wqe->atomic.swap_data = atomic_wr(wr)->swap; |
2543 | break; |
2544 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2545 | wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD; |
2546 | wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; |
2547 | break; |
2548 | default: |
2549 | return -EINVAL; |
2550 | } |
2551 | wqe->atomic.remote_va = atomic_wr(wr)->remote_addr; |
2552 | wqe->atomic.r_key = atomic_wr(wr)->rkey; |
2553 | if (wr->send_flags & IB_SEND_SIGNALED) |
2554 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2555 | if (wr->send_flags & IB_SEND_FENCE) |
2556 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; |
2557 | if (wr->send_flags & IB_SEND_SOLICITED) |
2558 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; |
2559 | return 0; |
2560 | } |
2561 | |
2562 | static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr, |
2563 | struct bnxt_qplib_swqe *wqe) |
2564 | { |
2565 | wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; |
2566 | wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; |
2567 | |
2568 | if (wr->send_flags & IB_SEND_SIGNALED) |
2569 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2570 | if (wr->send_flags & IB_SEND_SOLICITED) |
2571 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; |
2572 | |
2573 | return 0; |
2574 | } |
2575 | |
2576 | static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr, |
2577 | struct bnxt_qplib_swqe *wqe) |
2578 | { |
2579 | struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr); |
2580 | struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl; |
2581 | int access = wr->access; |
2582 | |
2583 | wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; |
2584 | wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; |
2585 | wqe->frmr.page_list = mr->pages; |
2586 | wqe->frmr.page_list_len = mr->npages; |
2587 | wqe->frmr.levels = qplib_frpl->hwq.level; |
2588 | wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; |
2589 | |
2590 | if (wr->wr.send_flags & IB_SEND_SIGNALED) |
2591 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2592 | |
2593 | if (access & IB_ACCESS_LOCAL_WRITE) |
2594 | wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE; |
2595 | if (access & IB_ACCESS_REMOTE_READ) |
2596 | wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ; |
2597 | if (access & IB_ACCESS_REMOTE_WRITE) |
2598 | wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE; |
2599 | if (access & IB_ACCESS_REMOTE_ATOMIC) |
2600 | wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC; |
2601 | if (access & IB_ACCESS_MW_BIND) |
2602 | wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND; |
2603 | |
2604 | wqe->frmr.l_key = wr->key; |
2605 | wqe->frmr.length = wr->mr->length; |
2606 | wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K); |
2607 | wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K); |
2608 | wqe->frmr.va = wr->mr->iova; |
2609 | return 0; |
2610 | } |
2611 | |
2612 | static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev, |
2613 | const struct ib_send_wr *wr, |
2614 | struct bnxt_qplib_swqe *wqe) |
2615 | { |
2616 | /* Copy the inline data to the data field */ |
2617 | u8 *in_data; |
2618 | u32 i, sge_len; |
2619 | void *sge_addr; |
2620 | |
2621 | in_data = wqe->inline_data; |
2622 | for (i = 0; i < wr->num_sge; i++) { |
2623 | sge_addr = (void *)(unsigned long) |
2624 | wr->sg_list[i].addr; |
2625 | sge_len = wr->sg_list[i].length; |
2626 | |
2627 | if ((sge_len + wqe->inline_len) > |
2628 | BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) { |
2629 | ibdev_err(ibdev: &rdev->ibdev, |
2630 | format: "Inline data size requested > supported value" ); |
2631 | return -EINVAL; |
2632 | } |
2633 | sge_len = wr->sg_list[i].length; |
2634 | |
2635 | memcpy(in_data, sge_addr, sge_len); |
2636 | in_data += wr->sg_list[i].length; |
2637 | wqe->inline_len += wr->sg_list[i].length; |
2638 | } |
2639 | return wqe->inline_len; |
2640 | } |
2641 | |
2642 | static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, |
2643 | const struct ib_send_wr *wr, |
2644 | struct bnxt_qplib_swqe *wqe) |
2645 | { |
2646 | int payload_sz = 0; |
2647 | |
2648 | if (wr->send_flags & IB_SEND_INLINE) |
2649 | payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe); |
2650 | else |
2651 | payload_sz = bnxt_re_build_sgl(ib_sg_list: wr->sg_list, sg_list: wqe->sg_list, |
2652 | num: wqe->num_sge); |
2653 | |
2654 | return payload_sz; |
2655 | } |
2656 | |
2657 | static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) |
2658 | { |
2659 | if ((qp->ib_qp.qp_type == IB_QPT_UD || |
2660 | qp->ib_qp.qp_type == IB_QPT_GSI || |
2661 | qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && |
2662 | qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { |
2663 | int qp_attr_mask; |
2664 | struct ib_qp_attr qp_attr; |
2665 | |
2666 | qp_attr_mask = IB_QP_STATE; |
2667 | qp_attr.qp_state = IB_QPS_RTS; |
2668 | bnxt_re_modify_qp(ib_qp: &qp->ib_qp, qp_attr: &qp_attr, qp_attr_mask, NULL); |
2669 | qp->qplib_qp.wqe_cnt = 0; |
2670 | } |
2671 | } |
2672 | |
2673 | static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, |
2674 | struct bnxt_re_qp *qp, |
2675 | const struct ib_send_wr *wr) |
2676 | { |
2677 | int rc = 0, payload_sz = 0; |
2678 | unsigned long flags; |
2679 | |
2680 | spin_lock_irqsave(&qp->sq_lock, flags); |
2681 | while (wr) { |
2682 | struct bnxt_qplib_swqe wqe = {}; |
2683 | |
2684 | /* Common */ |
2685 | wqe.num_sge = wr->num_sge; |
2686 | if (wr->num_sge > qp->qplib_qp.sq.max_sge) { |
2687 | ibdev_err(ibdev: &rdev->ibdev, |
2688 | format: "Limit exceeded for Send SGEs" ); |
2689 | rc = -EINVAL; |
2690 | goto bad; |
2691 | } |
2692 | |
2693 | payload_sz = bnxt_re_copy_wr_payload(rdev: qp->rdev, wr, wqe: &wqe); |
2694 | if (payload_sz < 0) { |
2695 | rc = -EINVAL; |
2696 | goto bad; |
2697 | } |
2698 | wqe.wr_id = wr->wr_id; |
2699 | |
2700 | wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND; |
2701 | |
2702 | rc = bnxt_re_build_send_wqe(qp, wr, wqe: &wqe); |
2703 | if (!rc) |
2704 | rc = bnxt_qplib_post_send(qp: &qp->qplib_qp, wqe: &wqe); |
2705 | bad: |
2706 | if (rc) { |
2707 | ibdev_err(ibdev: &rdev->ibdev, |
2708 | format: "Post send failed opcode = %#x rc = %d" , |
2709 | wr->opcode, rc); |
2710 | break; |
2711 | } |
2712 | wr = wr->next; |
2713 | } |
2714 | bnxt_qplib_post_send_db(qp: &qp->qplib_qp); |
2715 | bnxt_ud_qp_hw_stall_workaround(qp); |
2716 | spin_unlock_irqrestore(lock: &qp->sq_lock, flags); |
2717 | return rc; |
2718 | } |
2719 | |
2720 | static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe) |
2721 | { |
2722 | /* Need unconditional fence for non-wire memory opcode |
2723 | * to work as expected. |
2724 | */ |
2725 | if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV || |
2726 | wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR || |
2727 | wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR || |
2728 | wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW) |
2729 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; |
2730 | } |
2731 | |
2732 | int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, |
2733 | const struct ib_send_wr **bad_wr) |
2734 | { |
2735 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
2736 | struct bnxt_qplib_swqe wqe; |
2737 | int rc = 0, payload_sz = 0; |
2738 | unsigned long flags; |
2739 | |
2740 | spin_lock_irqsave(&qp->sq_lock, flags); |
2741 | while (wr) { |
2742 | /* House keeping */ |
2743 | memset(&wqe, 0, sizeof(wqe)); |
2744 | |
2745 | /* Common */ |
2746 | wqe.num_sge = wr->num_sge; |
2747 | if (wr->num_sge > qp->qplib_qp.sq.max_sge) { |
2748 | ibdev_err(ibdev: &qp->rdev->ibdev, |
2749 | format: "Limit exceeded for Send SGEs" ); |
2750 | rc = -EINVAL; |
2751 | goto bad; |
2752 | } |
2753 | |
2754 | payload_sz = bnxt_re_copy_wr_payload(rdev: qp->rdev, wr, wqe: &wqe); |
2755 | if (payload_sz < 0) { |
2756 | rc = -EINVAL; |
2757 | goto bad; |
2758 | } |
2759 | wqe.wr_id = wr->wr_id; |
2760 | |
2761 | switch (wr->opcode) { |
2762 | case IB_WR_SEND: |
2763 | case IB_WR_SEND_WITH_IMM: |
2764 | if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) { |
2765 | rc = bnxt_re_build_qp1_send_v2(qp, wr, wqe: &wqe, |
2766 | payload_size: payload_sz); |
2767 | if (rc) |
2768 | goto bad; |
2769 | wqe.rawqp1.lflags |= |
2770 | SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC; |
2771 | } |
2772 | switch (wr->send_flags) { |
2773 | case IB_SEND_IP_CSUM: |
2774 | wqe.rawqp1.lflags |= |
2775 | SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM; |
2776 | break; |
2777 | default: |
2778 | break; |
2779 | } |
2780 | fallthrough; |
2781 | case IB_WR_SEND_WITH_INV: |
2782 | rc = bnxt_re_build_send_wqe(qp, wr, wqe: &wqe); |
2783 | break; |
2784 | case IB_WR_RDMA_WRITE: |
2785 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2786 | case IB_WR_RDMA_READ: |
2787 | rc = bnxt_re_build_rdma_wqe(wr, wqe: &wqe); |
2788 | break; |
2789 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2790 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2791 | rc = bnxt_re_build_atomic_wqe(wr, wqe: &wqe); |
2792 | break; |
2793 | case IB_WR_RDMA_READ_WITH_INV: |
2794 | ibdev_err(ibdev: &qp->rdev->ibdev, |
2795 | format: "RDMA Read with Invalidate is not supported" ); |
2796 | rc = -EINVAL; |
2797 | goto bad; |
2798 | case IB_WR_LOCAL_INV: |
2799 | rc = bnxt_re_build_inv_wqe(wr, wqe: &wqe); |
2800 | break; |
2801 | case IB_WR_REG_MR: |
2802 | rc = bnxt_re_build_reg_wqe(wr: reg_wr(wr), wqe: &wqe); |
2803 | break; |
2804 | default: |
2805 | /* Unsupported WRs */ |
2806 | ibdev_err(ibdev: &qp->rdev->ibdev, |
2807 | format: "WR (%#x) is not supported" , wr->opcode); |
2808 | rc = -EINVAL; |
2809 | goto bad; |
2810 | } |
2811 | if (!rc) { |
2812 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx: qp->rdev->chip_ctx)) |
2813 | bnxt_re_legacy_set_uc_fence(wqe: &wqe); |
2814 | rc = bnxt_qplib_post_send(qp: &qp->qplib_qp, wqe: &wqe); |
2815 | } |
2816 | bad: |
2817 | if (rc) { |
2818 | ibdev_err(ibdev: &qp->rdev->ibdev, |
2819 | format: "post_send failed op:%#x qps = %#x rc = %d\n" , |
2820 | wr->opcode, qp->qplib_qp.state, rc); |
2821 | *bad_wr = wr; |
2822 | break; |
2823 | } |
2824 | wr = wr->next; |
2825 | } |
2826 | bnxt_qplib_post_send_db(qp: &qp->qplib_qp); |
2827 | bnxt_ud_qp_hw_stall_workaround(qp); |
2828 | spin_unlock_irqrestore(lock: &qp->sq_lock, flags); |
2829 | |
2830 | return rc; |
2831 | } |
2832 | |
2833 | static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev, |
2834 | struct bnxt_re_qp *qp, |
2835 | const struct ib_recv_wr *wr) |
2836 | { |
2837 | struct bnxt_qplib_swqe wqe; |
2838 | int rc = 0; |
2839 | |
2840 | while (wr) { |
2841 | /* House keeping */ |
2842 | memset(&wqe, 0, sizeof(wqe)); |
2843 | |
2844 | /* Common */ |
2845 | wqe.num_sge = wr->num_sge; |
2846 | if (wr->num_sge > qp->qplib_qp.rq.max_sge) { |
2847 | ibdev_err(ibdev: &rdev->ibdev, |
2848 | format: "Limit exceeded for Receive SGEs" ); |
2849 | rc = -EINVAL; |
2850 | break; |
2851 | } |
2852 | bnxt_re_build_sgl(ib_sg_list: wr->sg_list, sg_list: wqe.sg_list, num: wr->num_sge); |
2853 | wqe.wr_id = wr->wr_id; |
2854 | wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; |
2855 | |
2856 | rc = bnxt_qplib_post_recv(qp: &qp->qplib_qp, wqe: &wqe); |
2857 | if (rc) |
2858 | break; |
2859 | |
2860 | wr = wr->next; |
2861 | } |
2862 | if (!rc) |
2863 | bnxt_qplib_post_recv_db(qp: &qp->qplib_qp); |
2864 | return rc; |
2865 | } |
2866 | |
2867 | int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr, |
2868 | const struct ib_recv_wr **bad_wr) |
2869 | { |
2870 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
2871 | struct bnxt_qplib_swqe wqe; |
2872 | int rc = 0, payload_sz = 0; |
2873 | unsigned long flags; |
2874 | u32 count = 0; |
2875 | |
2876 | spin_lock_irqsave(&qp->rq_lock, flags); |
2877 | while (wr) { |
2878 | /* House keeping */ |
2879 | memset(&wqe, 0, sizeof(wqe)); |
2880 | |
2881 | /* Common */ |
2882 | wqe.num_sge = wr->num_sge; |
2883 | if (wr->num_sge > qp->qplib_qp.rq.max_sge) { |
2884 | ibdev_err(ibdev: &qp->rdev->ibdev, |
2885 | format: "Limit exceeded for Receive SGEs" ); |
2886 | rc = -EINVAL; |
2887 | *bad_wr = wr; |
2888 | break; |
2889 | } |
2890 | |
2891 | payload_sz = bnxt_re_build_sgl(ib_sg_list: wr->sg_list, sg_list: wqe.sg_list, |
2892 | num: wr->num_sge); |
2893 | wqe.wr_id = wr->wr_id; |
2894 | wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; |
2895 | |
2896 | if (ib_qp->qp_type == IB_QPT_GSI && |
2897 | qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI) |
2898 | rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, wqe: &wqe, |
2899 | payload_size: payload_sz); |
2900 | if (!rc) |
2901 | rc = bnxt_qplib_post_recv(qp: &qp->qplib_qp, wqe: &wqe); |
2902 | if (rc) { |
2903 | *bad_wr = wr; |
2904 | break; |
2905 | } |
2906 | |
2907 | /* Ring DB if the RQEs posted reaches a threshold value */ |
2908 | if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { |
2909 | bnxt_qplib_post_recv_db(qp: &qp->qplib_qp); |
2910 | count = 0; |
2911 | } |
2912 | |
2913 | wr = wr->next; |
2914 | } |
2915 | |
2916 | if (count) |
2917 | bnxt_qplib_post_recv_db(qp: &qp->qplib_qp); |
2918 | |
2919 | spin_unlock_irqrestore(lock: &qp->rq_lock, flags); |
2920 | |
2921 | return rc; |
2922 | } |
2923 | |
2924 | /* Completion Queues */ |
2925 | int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) |
2926 | { |
2927 | struct bnxt_qplib_chip_ctx *cctx; |
2928 | struct bnxt_qplib_nq *nq; |
2929 | struct bnxt_re_dev *rdev; |
2930 | struct bnxt_re_cq *cq; |
2931 | |
2932 | cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); |
2933 | rdev = cq->rdev; |
2934 | nq = cq->qplib_cq.nq; |
2935 | cctx = rdev->chip_ctx; |
2936 | |
2937 | if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) { |
2938 | free_page((unsigned long)cq->uctx_cq_page); |
2939 | hash_del(node: &cq->hash_entry); |
2940 | } |
2941 | bnxt_qplib_destroy_cq(res: &rdev->qplib_res, cq: &cq->qplib_cq); |
2942 | ib_umem_release(umem: cq->umem); |
2943 | |
2944 | atomic_dec(v: &rdev->stats.res.cq_count); |
2945 | nq->budget--; |
2946 | kfree(objp: cq->cql); |
2947 | return 0; |
2948 | } |
2949 | |
2950 | int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
2951 | struct ib_udata *udata) |
2952 | { |
2953 | struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); |
2954 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev); |
2955 | struct bnxt_re_ucontext *uctx = |
2956 | rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); |
2957 | struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; |
2958 | struct bnxt_qplib_chip_ctx *cctx; |
2959 | struct bnxt_qplib_nq *nq = NULL; |
2960 | unsigned int nq_alloc_cnt; |
2961 | int cqe = attr->cqe; |
2962 | int rc, entries; |
2963 | u32 active_cqs; |
2964 | |
2965 | if (attr->flags) |
2966 | return -EOPNOTSUPP; |
2967 | |
2968 | /* Validate CQ fields */ |
2969 | if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { |
2970 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to create CQ -max exceeded" ); |
2971 | return -EINVAL; |
2972 | } |
2973 | |
2974 | cq->rdev = rdev; |
2975 | cctx = rdev->chip_ctx; |
2976 | cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq); |
2977 | |
2978 | entries = bnxt_re_init_depth(ent: cqe + 1, uctx); |
2979 | if (entries > dev_attr->max_cq_wqes + 1) |
2980 | entries = dev_attr->max_cq_wqes + 1; |
2981 | |
2982 | cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; |
2983 | cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; |
2984 | if (udata) { |
2985 | struct bnxt_re_cq_req req; |
2986 | if (ib_copy_from_udata(dest: &req, udata, len: sizeof(req))) { |
2987 | rc = -EFAULT; |
2988 | goto fail; |
2989 | } |
2990 | |
2991 | cq->umem = ib_umem_get(device: &rdev->ibdev, addr: req.cq_va, |
2992 | size: entries * sizeof(struct cq_base), |
2993 | access: IB_ACCESS_LOCAL_WRITE); |
2994 | if (IS_ERR(ptr: cq->umem)) { |
2995 | rc = PTR_ERR(ptr: cq->umem); |
2996 | goto fail; |
2997 | } |
2998 | cq->qplib_cq.sg_info.umem = cq->umem; |
2999 | cq->qplib_cq.dpi = &uctx->dpi; |
3000 | } else { |
3001 | cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); |
3002 | cq->cql = kcalloc(n: cq->max_cql, size: sizeof(struct bnxt_qplib_cqe), |
3003 | GFP_KERNEL); |
3004 | if (!cq->cql) { |
3005 | rc = -ENOMEM; |
3006 | goto fail; |
3007 | } |
3008 | |
3009 | cq->qplib_cq.dpi = &rdev->dpi_privileged; |
3010 | } |
3011 | /* |
3012 | * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a |
3013 | * used for getting the NQ index. |
3014 | */ |
3015 | nq_alloc_cnt = atomic_inc_return(v: &rdev->nq_alloc_cnt); |
3016 | nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)]; |
3017 | cq->qplib_cq.max_wqe = entries; |
3018 | cq->qplib_cq.cnq_hw_ring_id = nq->ring_id; |
3019 | cq->qplib_cq.nq = nq; |
3020 | |
3021 | rc = bnxt_qplib_create_cq(res: &rdev->qplib_res, cq: &cq->qplib_cq); |
3022 | if (rc) { |
3023 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to create HW CQ" ); |
3024 | goto fail; |
3025 | } |
3026 | |
3027 | cq->ib_cq.cqe = entries; |
3028 | cq->cq_period = cq->qplib_cq.period; |
3029 | nq->budget++; |
3030 | |
3031 | active_cqs = atomic_inc_return(v: &rdev->stats.res.cq_count); |
3032 | if (active_cqs > rdev->stats.res.cq_watermark) |
3033 | rdev->stats.res.cq_watermark = active_cqs; |
3034 | spin_lock_init(&cq->cq_lock); |
3035 | |
3036 | if (udata) { |
3037 | struct bnxt_re_cq_resp resp = {}; |
3038 | |
3039 | if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) { |
3040 | hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id); |
3041 | /* Allocate a page */ |
3042 | cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL); |
3043 | if (!cq->uctx_cq_page) { |
3044 | rc = -ENOMEM; |
3045 | goto c2fail; |
3046 | } |
3047 | resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT; |
3048 | } |
3049 | resp.cqid = cq->qplib_cq.id; |
3050 | resp.tail = cq->qplib_cq.hwq.cons; |
3051 | resp.phase = cq->qplib_cq.period; |
3052 | resp.rsvd = 0; |
3053 | rc = ib_copy_to_udata(udata, src: &resp, min(sizeof(resp), udata->outlen)); |
3054 | if (rc) { |
3055 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to copy CQ udata" ); |
3056 | bnxt_qplib_destroy_cq(res: &rdev->qplib_res, cq: &cq->qplib_cq); |
3057 | goto free_mem; |
3058 | } |
3059 | } |
3060 | |
3061 | return 0; |
3062 | |
3063 | free_mem: |
3064 | free_page((unsigned long)cq->uctx_cq_page); |
3065 | c2fail: |
3066 | ib_umem_release(umem: cq->umem); |
3067 | fail: |
3068 | kfree(objp: cq->cql); |
3069 | return rc; |
3070 | } |
3071 | |
3072 | static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq) |
3073 | { |
3074 | struct bnxt_re_dev *rdev = cq->rdev; |
3075 | |
3076 | bnxt_qplib_resize_cq_complete(res: &rdev->qplib_res, cq: &cq->qplib_cq); |
3077 | |
3078 | cq->qplib_cq.max_wqe = cq->resize_cqe; |
3079 | if (cq->resize_umem) { |
3080 | ib_umem_release(umem: cq->umem); |
3081 | cq->umem = cq->resize_umem; |
3082 | cq->resize_umem = NULL; |
3083 | cq->resize_cqe = 0; |
3084 | } |
3085 | } |
3086 | |
3087 | int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) |
3088 | { |
3089 | struct bnxt_qplib_sg_info sg_info = {}; |
3090 | struct bnxt_qplib_dpi *orig_dpi = NULL; |
3091 | struct bnxt_qplib_dev_attr *dev_attr; |
3092 | struct bnxt_re_ucontext *uctx = NULL; |
3093 | struct bnxt_re_resize_cq_req req; |
3094 | struct bnxt_re_dev *rdev; |
3095 | struct bnxt_re_cq *cq; |
3096 | int rc, entries; |
3097 | |
3098 | cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); |
3099 | rdev = cq->rdev; |
3100 | dev_attr = &rdev->dev_attr; |
3101 | if (!ibcq->uobject) { |
3102 | ibdev_err(ibdev: &rdev->ibdev, format: "Kernel CQ Resize not supported" ); |
3103 | return -EOPNOTSUPP; |
3104 | } |
3105 | |
3106 | if (cq->resize_umem) { |
3107 | ibdev_err(ibdev: &rdev->ibdev, format: "Resize CQ %#x failed - Busy" , |
3108 | cq->qplib_cq.id); |
3109 | return -EBUSY; |
3110 | } |
3111 | |
3112 | /* Check the requested cq depth out of supported depth */ |
3113 | if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { |
3114 | ibdev_err(ibdev: &rdev->ibdev, format: "Resize CQ %#x failed - out of range cqe %d" , |
3115 | cq->qplib_cq.id, cqe); |
3116 | return -EINVAL; |
3117 | } |
3118 | |
3119 | uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx); |
3120 | entries = bnxt_re_init_depth(ent: cqe + 1, uctx); |
3121 | if (entries > dev_attr->max_cq_wqes + 1) |
3122 | entries = dev_attr->max_cq_wqes + 1; |
3123 | |
3124 | /* uverbs consumer */ |
3125 | if (ib_copy_from_udata(dest: &req, udata, len: sizeof(req))) { |
3126 | rc = -EFAULT; |
3127 | goto fail; |
3128 | } |
3129 | |
3130 | cq->resize_umem = ib_umem_get(device: &rdev->ibdev, addr: req.cq_va, |
3131 | size: entries * sizeof(struct cq_base), |
3132 | access: IB_ACCESS_LOCAL_WRITE); |
3133 | if (IS_ERR(ptr: cq->resize_umem)) { |
3134 | rc = PTR_ERR(ptr: cq->resize_umem); |
3135 | cq->resize_umem = NULL; |
3136 | ibdev_err(ibdev: &rdev->ibdev, format: "%s: ib_umem_get failed! rc = %d\n" , |
3137 | __func__, rc); |
3138 | goto fail; |
3139 | } |
3140 | cq->resize_cqe = entries; |
3141 | memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info)); |
3142 | orig_dpi = cq->qplib_cq.dpi; |
3143 | |
3144 | cq->qplib_cq.sg_info.umem = cq->resize_umem; |
3145 | cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; |
3146 | cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; |
3147 | cq->qplib_cq.dpi = &uctx->dpi; |
3148 | |
3149 | rc = bnxt_qplib_resize_cq(res: &rdev->qplib_res, cq: &cq->qplib_cq, new_cqes: entries); |
3150 | if (rc) { |
3151 | ibdev_err(ibdev: &rdev->ibdev, format: "Resize HW CQ %#x failed!" , |
3152 | cq->qplib_cq.id); |
3153 | goto fail; |
3154 | } |
3155 | |
3156 | cq->ib_cq.cqe = cq->resize_cqe; |
3157 | atomic_inc(v: &rdev->stats.res.resize_count); |
3158 | |
3159 | return 0; |
3160 | |
3161 | fail: |
3162 | if (cq->resize_umem) { |
3163 | ib_umem_release(umem: cq->resize_umem); |
3164 | cq->resize_umem = NULL; |
3165 | cq->resize_cqe = 0; |
3166 | memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info)); |
3167 | cq->qplib_cq.dpi = orig_dpi; |
3168 | } |
3169 | return rc; |
3170 | } |
3171 | |
3172 | static u8 __req_to_ib_wc_status(u8 qstatus) |
3173 | { |
3174 | switch (qstatus) { |
3175 | case CQ_REQ_STATUS_OK: |
3176 | return IB_WC_SUCCESS; |
3177 | case CQ_REQ_STATUS_BAD_RESPONSE_ERR: |
3178 | return IB_WC_BAD_RESP_ERR; |
3179 | case CQ_REQ_STATUS_LOCAL_LENGTH_ERR: |
3180 | return IB_WC_LOC_LEN_ERR; |
3181 | case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR: |
3182 | return IB_WC_LOC_QP_OP_ERR; |
3183 | case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR: |
3184 | return IB_WC_LOC_PROT_ERR; |
3185 | case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR: |
3186 | return IB_WC_GENERAL_ERR; |
3187 | case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR: |
3188 | return IB_WC_REM_INV_REQ_ERR; |
3189 | case CQ_REQ_STATUS_REMOTE_ACCESS_ERR: |
3190 | return IB_WC_REM_ACCESS_ERR; |
3191 | case CQ_REQ_STATUS_REMOTE_OPERATION_ERR: |
3192 | return IB_WC_REM_OP_ERR; |
3193 | case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR: |
3194 | return IB_WC_RNR_RETRY_EXC_ERR; |
3195 | case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR: |
3196 | return IB_WC_RETRY_EXC_ERR; |
3197 | case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR: |
3198 | return IB_WC_WR_FLUSH_ERR; |
3199 | default: |
3200 | return IB_WC_GENERAL_ERR; |
3201 | } |
3202 | return 0; |
3203 | } |
3204 | |
3205 | static u8 __rawqp1_to_ib_wc_status(u8 qstatus) |
3206 | { |
3207 | switch (qstatus) { |
3208 | case CQ_RES_RAWETH_QP1_STATUS_OK: |
3209 | return IB_WC_SUCCESS; |
3210 | case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR: |
3211 | return IB_WC_LOC_ACCESS_ERR; |
3212 | case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR: |
3213 | return IB_WC_LOC_LEN_ERR; |
3214 | case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR: |
3215 | return IB_WC_LOC_PROT_ERR; |
3216 | case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR: |
3217 | return IB_WC_LOC_QP_OP_ERR; |
3218 | case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR: |
3219 | return IB_WC_GENERAL_ERR; |
3220 | case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR: |
3221 | return IB_WC_WR_FLUSH_ERR; |
3222 | case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR: |
3223 | return IB_WC_WR_FLUSH_ERR; |
3224 | default: |
3225 | return IB_WC_GENERAL_ERR; |
3226 | } |
3227 | } |
3228 | |
3229 | static u8 __rc_to_ib_wc_status(u8 qstatus) |
3230 | { |
3231 | switch (qstatus) { |
3232 | case CQ_RES_RC_STATUS_OK: |
3233 | return IB_WC_SUCCESS; |
3234 | case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR: |
3235 | return IB_WC_LOC_ACCESS_ERR; |
3236 | case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR: |
3237 | return IB_WC_LOC_LEN_ERR; |
3238 | case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR: |
3239 | return IB_WC_LOC_PROT_ERR; |
3240 | case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR: |
3241 | return IB_WC_LOC_QP_OP_ERR; |
3242 | case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR: |
3243 | return IB_WC_GENERAL_ERR; |
3244 | case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR: |
3245 | return IB_WC_REM_INV_REQ_ERR; |
3246 | case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR: |
3247 | return IB_WC_WR_FLUSH_ERR; |
3248 | case CQ_RES_RC_STATUS_HW_FLUSH_ERR: |
3249 | return IB_WC_WR_FLUSH_ERR; |
3250 | default: |
3251 | return IB_WC_GENERAL_ERR; |
3252 | } |
3253 | } |
3254 | |
3255 | static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) |
3256 | { |
3257 | switch (cqe->type) { |
3258 | case BNXT_QPLIB_SWQE_TYPE_SEND: |
3259 | wc->opcode = IB_WC_SEND; |
3260 | break; |
3261 | case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: |
3262 | wc->opcode = IB_WC_SEND; |
3263 | wc->wc_flags |= IB_WC_WITH_IMM; |
3264 | break; |
3265 | case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: |
3266 | wc->opcode = IB_WC_SEND; |
3267 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; |
3268 | break; |
3269 | case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE: |
3270 | wc->opcode = IB_WC_RDMA_WRITE; |
3271 | break; |
3272 | case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM: |
3273 | wc->opcode = IB_WC_RDMA_WRITE; |
3274 | wc->wc_flags |= IB_WC_WITH_IMM; |
3275 | break; |
3276 | case BNXT_QPLIB_SWQE_TYPE_RDMA_READ: |
3277 | wc->opcode = IB_WC_RDMA_READ; |
3278 | break; |
3279 | case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP: |
3280 | wc->opcode = IB_WC_COMP_SWAP; |
3281 | break; |
3282 | case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD: |
3283 | wc->opcode = IB_WC_FETCH_ADD; |
3284 | break; |
3285 | case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV: |
3286 | wc->opcode = IB_WC_LOCAL_INV; |
3287 | break; |
3288 | case BNXT_QPLIB_SWQE_TYPE_REG_MR: |
3289 | wc->opcode = IB_WC_REG_MR; |
3290 | break; |
3291 | default: |
3292 | wc->opcode = IB_WC_SEND; |
3293 | break; |
3294 | } |
3295 | |
3296 | wc->status = __req_to_ib_wc_status(qstatus: cqe->status); |
3297 | } |
3298 | |
3299 | static int bnxt_re_check_packet_type(u16 raweth_qp1_flags, |
3300 | u16 raweth_qp1_flags2) |
3301 | { |
3302 | bool is_ipv6 = false, is_ipv4 = false; |
3303 | |
3304 | /* raweth_qp1_flags Bit 9-6 indicates itype */ |
3305 | if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) |
3306 | != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE) |
3307 | return -1; |
3308 | |
3309 | if (raweth_qp1_flags2 & |
3310 | CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC && |
3311 | raweth_qp1_flags2 & |
3312 | CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) { |
3313 | /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */ |
3314 | (raweth_qp1_flags2 & |
3315 | CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ? |
3316 | (is_ipv6 = true) : (is_ipv4 = true); |
3317 | return ((is_ipv6) ? |
3318 | BNXT_RE_ROCEV2_IPV6_PACKET : |
3319 | BNXT_RE_ROCEV2_IPV4_PACKET); |
3320 | } else { |
3321 | return BNXT_RE_ROCE_V1_PACKET; |
3322 | } |
3323 | } |
3324 | |
3325 | static int bnxt_re_to_ib_nw_type(int nw_type) |
3326 | { |
3327 | u8 nw_hdr_type = 0xFF; |
3328 | |
3329 | switch (nw_type) { |
3330 | case BNXT_RE_ROCE_V1_PACKET: |
3331 | nw_hdr_type = RDMA_NETWORK_ROCE_V1; |
3332 | break; |
3333 | case BNXT_RE_ROCEV2_IPV4_PACKET: |
3334 | nw_hdr_type = RDMA_NETWORK_IPV4; |
3335 | break; |
3336 | case BNXT_RE_ROCEV2_IPV6_PACKET: |
3337 | nw_hdr_type = RDMA_NETWORK_IPV6; |
3338 | break; |
3339 | } |
3340 | return nw_hdr_type; |
3341 | } |
3342 | |
3343 | static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev, |
3344 | void *rq_hdr_buf) |
3345 | { |
3346 | u8 *tmp_buf = NULL; |
3347 | struct ethhdr *eth_hdr; |
3348 | u16 eth_type; |
3349 | bool rc = false; |
3350 | |
3351 | tmp_buf = (u8 *)rq_hdr_buf; |
3352 | /* |
3353 | * If dest mac is not same as I/F mac, this could be a |
3354 | * loopback address or multicast address, check whether |
3355 | * it is a loopback packet |
3356 | */ |
3357 | if (!ether_addr_equal(addr1: tmp_buf, addr2: rdev->netdev->dev_addr)) { |
3358 | tmp_buf += 4; |
3359 | /* Check the ether type */ |
3360 | eth_hdr = (struct ethhdr *)tmp_buf; |
3361 | eth_type = ntohs(eth_hdr->h_proto); |
3362 | switch (eth_type) { |
3363 | case ETH_P_IBOE: |
3364 | rc = true; |
3365 | break; |
3366 | case ETH_P_IP: |
3367 | case ETH_P_IPV6: { |
3368 | u32 len; |
3369 | struct udphdr *udp_hdr; |
3370 | |
3371 | len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) : |
3372 | sizeof(struct ipv6hdr)); |
3373 | tmp_buf += sizeof(struct ethhdr) + len; |
3374 | udp_hdr = (struct udphdr *)tmp_buf; |
3375 | if (ntohs(udp_hdr->dest) == |
3376 | ROCE_V2_UDP_DPORT) |
3377 | rc = true; |
3378 | break; |
3379 | } |
3380 | default: |
3381 | break; |
3382 | } |
3383 | } |
3384 | |
3385 | return rc; |
3386 | } |
3387 | |
3388 | static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, |
3389 | struct bnxt_qplib_cqe *cqe) |
3390 | { |
3391 | struct bnxt_re_dev *rdev = gsi_qp->rdev; |
3392 | struct bnxt_re_sqp_entries *sqp_entry = NULL; |
3393 | struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp; |
3394 | dma_addr_t shrq_hdr_buf_map; |
3395 | struct ib_sge s_sge[2] = {}; |
3396 | struct ib_sge r_sge[2] = {}; |
3397 | struct bnxt_re_ah *gsi_sah; |
3398 | struct ib_recv_wr rwr = {}; |
3399 | dma_addr_t rq_hdr_buf_map; |
3400 | struct ib_ud_wr udwr = {}; |
3401 | struct ib_send_wr *swr; |
3402 | u32 skip_bytes = 0; |
3403 | int pkt_type = 0; |
3404 | void *rq_hdr_buf; |
3405 | u32 offset = 0; |
3406 | u32 tbl_idx; |
3407 | int rc; |
3408 | |
3409 | swr = &udwr.wr; |
3410 | tbl_idx = cqe->wr_id; |
3411 | |
3412 | rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf + |
3413 | (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size); |
3414 | rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(qp: &gsi_qp->qplib_qp, |
3415 | index: tbl_idx); |
3416 | |
3417 | /* Shadow QP header buffer */ |
3418 | shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(qp: &gsi_qp->qplib_qp, |
3419 | index: tbl_idx); |
3420 | sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; |
3421 | |
3422 | /* Store this cqe */ |
3423 | memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe)); |
3424 | sqp_entry->qp1_qp = gsi_qp; |
3425 | |
3426 | /* Find packet type from the cqe */ |
3427 | |
3428 | pkt_type = bnxt_re_check_packet_type(raweth_qp1_flags: cqe->raweth_qp1_flags, |
3429 | raweth_qp1_flags2: cqe->raweth_qp1_flags2); |
3430 | if (pkt_type < 0) { |
3431 | ibdev_err(ibdev: &rdev->ibdev, format: "Invalid packet\n" ); |
3432 | return -EINVAL; |
3433 | } |
3434 | |
3435 | /* Adjust the offset for the user buffer and post in the rq */ |
3436 | |
3437 | if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET) |
3438 | offset = 20; |
3439 | |
3440 | /* |
3441 | * QP1 loopback packet has 4 bytes of internal header before |
3442 | * ether header. Skip these four bytes. |
3443 | */ |
3444 | if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf)) |
3445 | skip_bytes = 4; |
3446 | |
3447 | /* First send SGE . Skip the ether header*/ |
3448 | s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE |
3449 | + skip_bytes; |
3450 | s_sge[0].lkey = 0xFFFFFFFF; |
3451 | s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 : |
3452 | BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6; |
3453 | |
3454 | /* Second Send SGE */ |
3455 | s_sge[1].addr = s_sge[0].addr + s_sge[0].length + |
3456 | BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE; |
3457 | if (pkt_type != BNXT_RE_ROCE_V1_PACKET) |
3458 | s_sge[1].addr += 8; |
3459 | s_sge[1].lkey = 0xFFFFFFFF; |
3460 | s_sge[1].length = 256; |
3461 | |
3462 | /* First recv SGE */ |
3463 | |
3464 | r_sge[0].addr = shrq_hdr_buf_map; |
3465 | r_sge[0].lkey = 0xFFFFFFFF; |
3466 | r_sge[0].length = 40; |
3467 | |
3468 | r_sge[1].addr = sqp_entry->sge.addr + offset; |
3469 | r_sge[1].lkey = sqp_entry->sge.lkey; |
3470 | r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset; |
3471 | |
3472 | /* Create receive work request */ |
3473 | rwr.num_sge = 2; |
3474 | rwr.sg_list = r_sge; |
3475 | rwr.wr_id = tbl_idx; |
3476 | rwr.next = NULL; |
3477 | |
3478 | rc = bnxt_re_post_recv_shadow_qp(rdev, qp: gsi_sqp, wr: &rwr); |
3479 | if (rc) { |
3480 | ibdev_err(ibdev: &rdev->ibdev, |
3481 | format: "Failed to post Rx buffers to shadow QP" ); |
3482 | return -ENOMEM; |
3483 | } |
3484 | |
3485 | swr->num_sge = 2; |
3486 | swr->sg_list = s_sge; |
3487 | swr->wr_id = tbl_idx; |
3488 | swr->opcode = IB_WR_SEND; |
3489 | swr->next = NULL; |
3490 | gsi_sah = rdev->gsi_ctx.gsi_sah; |
3491 | udwr.ah = &gsi_sah->ib_ah; |
3492 | udwr.remote_qpn = gsi_sqp->qplib_qp.id; |
3493 | udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; |
3494 | |
3495 | /* post data received in the send queue */ |
3496 | return bnxt_re_post_send_shadow_qp(rdev, qp: gsi_sqp, wr: swr); |
3497 | } |
3498 | |
3499 | static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, |
3500 | struct bnxt_qplib_cqe *cqe) |
3501 | { |
3502 | wc->opcode = IB_WC_RECV; |
3503 | wc->status = __rawqp1_to_ib_wc_status(qstatus: cqe->status); |
3504 | wc->wc_flags |= IB_WC_GRH; |
3505 | } |
3506 | |
3507 | static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, |
3508 | u16 vlan_id) |
3509 | { |
3510 | /* |
3511 | * Check if the vlan is configured in the host. If not configured, it |
3512 | * can be a transparent VLAN. So dont report the vlan id. |
3513 | */ |
3514 | if (!__vlan_find_dev_deep_rcu(real_dev: rdev->netdev, |
3515 | htons(ETH_P_8021Q), vlan_id)) |
3516 | return false; |
3517 | return true; |
3518 | } |
3519 | |
3520 | static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, |
3521 | u16 *vid, u8 *sl) |
3522 | { |
3523 | bool ret = false; |
3524 | u32 metadata; |
3525 | u16 tpid; |
3526 | |
3527 | metadata = orig_cqe->raweth_qp1_metadata; |
3528 | if (orig_cqe->raweth_qp1_flags2 & |
3529 | CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) { |
3530 | tpid = ((metadata & |
3531 | CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >> |
3532 | CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT); |
3533 | if (tpid == ETH_P_8021Q) { |
3534 | *vid = metadata & |
3535 | CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK; |
3536 | *sl = (metadata & |
3537 | CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >> |
3538 | CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT; |
3539 | ret = true; |
3540 | } |
3541 | } |
3542 | |
3543 | return ret; |
3544 | } |
3545 | |
3546 | static void bnxt_re_process_res_rc_wc(struct ib_wc *wc, |
3547 | struct bnxt_qplib_cqe *cqe) |
3548 | { |
3549 | wc->opcode = IB_WC_RECV; |
3550 | wc->status = __rc_to_ib_wc_status(qstatus: cqe->status); |
3551 | |
3552 | if (cqe->flags & CQ_RES_RC_FLAGS_IMM) |
3553 | wc->wc_flags |= IB_WC_WITH_IMM; |
3554 | if (cqe->flags & CQ_RES_RC_FLAGS_INV) |
3555 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; |
3556 | if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) == |
3557 | (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) |
3558 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; |
3559 | } |
3560 | |
3561 | static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, |
3562 | struct ib_wc *wc, |
3563 | struct bnxt_qplib_cqe *cqe) |
3564 | { |
3565 | struct bnxt_re_dev *rdev = gsi_sqp->rdev; |
3566 | struct bnxt_re_qp *gsi_qp = NULL; |
3567 | struct bnxt_qplib_cqe *orig_cqe = NULL; |
3568 | struct bnxt_re_sqp_entries *sqp_entry = NULL; |
3569 | int nw_type; |
3570 | u32 tbl_idx; |
3571 | u16 vlan_id; |
3572 | u8 sl; |
3573 | |
3574 | tbl_idx = cqe->wr_id; |
3575 | |
3576 | sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; |
3577 | gsi_qp = sqp_entry->qp1_qp; |
3578 | orig_cqe = &sqp_entry->cqe; |
3579 | |
3580 | wc->wr_id = sqp_entry->wrid; |
3581 | wc->byte_len = orig_cqe->length; |
3582 | wc->qp = &gsi_qp->ib_qp; |
3583 | |
3584 | wc->ex.imm_data = orig_cqe->immdata; |
3585 | wc->src_qp = orig_cqe->src_qp; |
3586 | memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); |
3587 | if (bnxt_re_is_vlan_pkt(orig_cqe, vid: &vlan_id, sl: &sl)) { |
3588 | if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { |
3589 | wc->vlan_id = vlan_id; |
3590 | wc->sl = sl; |
3591 | wc->wc_flags |= IB_WC_WITH_VLAN; |
3592 | } |
3593 | } |
3594 | wc->port_num = 1; |
3595 | wc->vendor_err = orig_cqe->status; |
3596 | |
3597 | wc->opcode = IB_WC_RECV; |
3598 | wc->status = __rawqp1_to_ib_wc_status(qstatus: orig_cqe->status); |
3599 | wc->wc_flags |= IB_WC_GRH; |
3600 | |
3601 | nw_type = bnxt_re_check_packet_type(raweth_qp1_flags: orig_cqe->raweth_qp1_flags, |
3602 | raweth_qp1_flags2: orig_cqe->raweth_qp1_flags2); |
3603 | if (nw_type >= 0) { |
3604 | wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); |
3605 | wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; |
3606 | } |
3607 | } |
3608 | |
3609 | static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp, |
3610 | struct ib_wc *wc, |
3611 | struct bnxt_qplib_cqe *cqe) |
3612 | { |
3613 | struct bnxt_re_dev *rdev; |
3614 | u16 vlan_id = 0; |
3615 | u8 nw_type; |
3616 | |
3617 | rdev = qp->rdev; |
3618 | wc->opcode = IB_WC_RECV; |
3619 | wc->status = __rc_to_ib_wc_status(qstatus: cqe->status); |
3620 | |
3621 | if (cqe->flags & CQ_RES_UD_FLAGS_IMM) |
3622 | wc->wc_flags |= IB_WC_WITH_IMM; |
3623 | /* report only on GSI QP for Thor */ |
3624 | if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) { |
3625 | wc->wc_flags |= IB_WC_GRH; |
3626 | memcpy(wc->smac, cqe->smac, ETH_ALEN); |
3627 | wc->wc_flags |= IB_WC_WITH_SMAC; |
3628 | if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) { |
3629 | vlan_id = (cqe->cfa_meta & 0xFFF); |
3630 | } |
3631 | /* Mark only if vlan_id is non zero */ |
3632 | if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { |
3633 | wc->vlan_id = vlan_id; |
3634 | wc->wc_flags |= IB_WC_WITH_VLAN; |
3635 | } |
3636 | nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >> |
3637 | CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT; |
3638 | wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); |
3639 | wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; |
3640 | } |
3641 | |
3642 | } |
3643 | |
3644 | static int send_phantom_wqe(struct bnxt_re_qp *qp) |
3645 | { |
3646 | struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; |
3647 | unsigned long flags; |
3648 | int rc; |
3649 | |
3650 | spin_lock_irqsave(&qp->sq_lock, flags); |
3651 | |
3652 | rc = bnxt_re_bind_fence_mw(qplib_qp: lib_qp); |
3653 | if (!rc) { |
3654 | lib_qp->sq.phantom_wqe_cnt++; |
3655 | ibdev_dbg(&qp->rdev->ibdev, |
3656 | "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n" , |
3657 | lib_qp->id, lib_qp->sq.hwq.prod, |
3658 | HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), |
3659 | lib_qp->sq.phantom_wqe_cnt); |
3660 | } |
3661 | |
3662 | spin_unlock_irqrestore(lock: &qp->sq_lock, flags); |
3663 | return rc; |
3664 | } |
3665 | |
3666 | int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) |
3667 | { |
3668 | struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); |
3669 | struct bnxt_re_qp *qp, *sh_qp; |
3670 | struct bnxt_qplib_cqe *cqe; |
3671 | int i, ncqe, budget; |
3672 | struct bnxt_qplib_q *sq; |
3673 | struct bnxt_qplib_qp *lib_qp; |
3674 | u32 tbl_idx; |
3675 | struct bnxt_re_sqp_entries *sqp_entry = NULL; |
3676 | unsigned long flags; |
3677 | |
3678 | /* User CQ; the only processing we do is to |
3679 | * complete any pending CQ resize operation. |
3680 | */ |
3681 | if (cq->umem) { |
3682 | if (cq->resize_umem) |
3683 | bnxt_re_resize_cq_complete(cq); |
3684 | return 0; |
3685 | } |
3686 | |
3687 | spin_lock_irqsave(&cq->cq_lock, flags); |
3688 | budget = min_t(u32, num_entries, cq->max_cql); |
3689 | num_entries = budget; |
3690 | if (!cq->cql) { |
3691 | ibdev_err(ibdev: &cq->rdev->ibdev, format: "POLL CQ : no CQL to use" ); |
3692 | goto exit; |
3693 | } |
3694 | cqe = &cq->cql[0]; |
3695 | while (budget) { |
3696 | lib_qp = NULL; |
3697 | ncqe = bnxt_qplib_poll_cq(cq: &cq->qplib_cq, cqe, num: budget, qp: &lib_qp); |
3698 | if (lib_qp) { |
3699 | sq = &lib_qp->sq; |
3700 | if (sq->send_phantom) { |
3701 | qp = container_of(lib_qp, |
3702 | struct bnxt_re_qp, qplib_qp); |
3703 | if (send_phantom_wqe(qp) == -ENOMEM) |
3704 | ibdev_err(ibdev: &cq->rdev->ibdev, |
3705 | format: "Phantom failed! Scheduled to send again\n" ); |
3706 | else |
3707 | sq->send_phantom = false; |
3708 | } |
3709 | } |
3710 | if (ncqe < budget) |
3711 | ncqe += bnxt_qplib_process_flush_list(cq: &cq->qplib_cq, |
3712 | cqe: cqe + ncqe, |
3713 | num_cqes: budget - ncqe); |
3714 | |
3715 | if (!ncqe) |
3716 | break; |
3717 | |
3718 | for (i = 0; i < ncqe; i++, cqe++) { |
3719 | /* Transcribe each qplib_wqe back to ib_wc */ |
3720 | memset(wc, 0, sizeof(*wc)); |
3721 | |
3722 | wc->wr_id = cqe->wr_id; |
3723 | wc->byte_len = cqe->length; |
3724 | qp = container_of |
3725 | ((struct bnxt_qplib_qp *) |
3726 | (unsigned long)(cqe->qp_handle), |
3727 | struct bnxt_re_qp, qplib_qp); |
3728 | wc->qp = &qp->ib_qp; |
3729 | wc->ex.imm_data = cqe->immdata; |
3730 | wc->src_qp = cqe->src_qp; |
3731 | memcpy(wc->smac, cqe->smac, ETH_ALEN); |
3732 | wc->port_num = 1; |
3733 | wc->vendor_err = cqe->status; |
3734 | |
3735 | switch (cqe->opcode) { |
3736 | case CQ_BASE_CQE_TYPE_REQ: |
3737 | sh_qp = qp->rdev->gsi_ctx.gsi_sqp; |
3738 | if (sh_qp && |
3739 | qp->qplib_qp.id == sh_qp->qplib_qp.id) { |
3740 | /* Handle this completion with |
3741 | * the stored completion |
3742 | */ |
3743 | memset(wc, 0, sizeof(*wc)); |
3744 | continue; |
3745 | } |
3746 | bnxt_re_process_req_wc(wc, cqe); |
3747 | break; |
3748 | case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1: |
3749 | if (!cqe->status) { |
3750 | int rc = 0; |
3751 | |
3752 | rc = bnxt_re_process_raw_qp_pkt_rx |
3753 | (gsi_qp: qp, cqe); |
3754 | if (!rc) { |
3755 | memset(wc, 0, sizeof(*wc)); |
3756 | continue; |
3757 | } |
3758 | cqe->status = -1; |
3759 | } |
3760 | /* Errors need not be looped back. |
3761 | * But change the wr_id to the one |
3762 | * stored in the table |
3763 | */ |
3764 | tbl_idx = cqe->wr_id; |
3765 | sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx]; |
3766 | wc->wr_id = sqp_entry->wrid; |
3767 | bnxt_re_process_res_rawqp1_wc(wc, cqe); |
3768 | break; |
3769 | case CQ_BASE_CQE_TYPE_RES_RC: |
3770 | bnxt_re_process_res_rc_wc(wc, cqe); |
3771 | break; |
3772 | case CQ_BASE_CQE_TYPE_RES_UD: |
3773 | sh_qp = qp->rdev->gsi_ctx.gsi_sqp; |
3774 | if (sh_qp && |
3775 | qp->qplib_qp.id == sh_qp->qplib_qp.id) { |
3776 | /* Handle this completion with |
3777 | * the stored completion |
3778 | */ |
3779 | if (cqe->status) { |
3780 | continue; |
3781 | } else { |
3782 | bnxt_re_process_res_shadow_qp_wc |
3783 | (gsi_sqp: qp, wc, cqe); |
3784 | break; |
3785 | } |
3786 | } |
3787 | bnxt_re_process_res_ud_wc(qp, wc, cqe); |
3788 | break; |
3789 | default: |
3790 | ibdev_err(ibdev: &cq->rdev->ibdev, |
3791 | format: "POLL CQ : type 0x%x not handled" , |
3792 | cqe->opcode); |
3793 | continue; |
3794 | } |
3795 | wc++; |
3796 | budget--; |
3797 | } |
3798 | } |
3799 | exit: |
3800 | spin_unlock_irqrestore(lock: &cq->cq_lock, flags); |
3801 | return num_entries - budget; |
3802 | } |
3803 | |
3804 | int bnxt_re_req_notify_cq(struct ib_cq *ib_cq, |
3805 | enum ib_cq_notify_flags ib_cqn_flags) |
3806 | { |
3807 | struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); |
3808 | int type = 0, rc = 0; |
3809 | unsigned long flags; |
3810 | |
3811 | spin_lock_irqsave(&cq->cq_lock, flags); |
3812 | /* Trigger on the very next completion */ |
3813 | if (ib_cqn_flags & IB_CQ_NEXT_COMP) |
3814 | type = DBC_DBC_TYPE_CQ_ARMALL; |
3815 | /* Trigger on the next solicited completion */ |
3816 | else if (ib_cqn_flags & IB_CQ_SOLICITED) |
3817 | type = DBC_DBC_TYPE_CQ_ARMSE; |
3818 | |
3819 | /* Poll to see if there are missed events */ |
3820 | if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) && |
3821 | !(bnxt_qplib_is_cq_empty(cq: &cq->qplib_cq))) { |
3822 | rc = 1; |
3823 | goto exit; |
3824 | } |
3825 | bnxt_qplib_req_notify_cq(cq: &cq->qplib_cq, arm_type: type); |
3826 | |
3827 | exit: |
3828 | spin_unlock_irqrestore(lock: &cq->cq_lock, flags); |
3829 | return rc; |
3830 | } |
3831 | |
3832 | /* Memory Regions */ |
3833 | struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags) |
3834 | { |
3835 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
3836 | struct bnxt_re_dev *rdev = pd->rdev; |
3837 | struct bnxt_re_mr *mr; |
3838 | u32 active_mrs; |
3839 | int rc; |
3840 | |
3841 | mr = kzalloc(size: sizeof(*mr), GFP_KERNEL); |
3842 | if (!mr) |
3843 | return ERR_PTR(error: -ENOMEM); |
3844 | |
3845 | mr->rdev = rdev; |
3846 | mr->qplib_mr.pd = &pd->qplib_pd; |
3847 | mr->qplib_mr.flags = __from_ib_access_flags(iflags: mr_access_flags); |
3848 | mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; |
3849 | |
3850 | /* Allocate and register 0 as the address */ |
3851 | rc = bnxt_qplib_alloc_mrw(res: &rdev->qplib_res, mrw: &mr->qplib_mr); |
3852 | if (rc) |
3853 | goto fail; |
3854 | |
3855 | mr->qplib_mr.hwq.level = PBL_LVL_MAX; |
3856 | mr->qplib_mr.total_size = -1; /* Infinte length */ |
3857 | rc = bnxt_qplib_reg_mr(res: &rdev->qplib_res, mr: &mr->qplib_mr, NULL, num_pbls: 0, |
3858 | PAGE_SIZE); |
3859 | if (rc) |
3860 | goto fail_mr; |
3861 | |
3862 | mr->ib_mr.lkey = mr->qplib_mr.lkey; |
3863 | if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ | |
3864 | IB_ACCESS_REMOTE_ATOMIC)) |
3865 | mr->ib_mr.rkey = mr->ib_mr.lkey; |
3866 | active_mrs = atomic_inc_return(v: &rdev->stats.res.mr_count); |
3867 | if (active_mrs > rdev->stats.res.mr_watermark) |
3868 | rdev->stats.res.mr_watermark = active_mrs; |
3869 | |
3870 | return &mr->ib_mr; |
3871 | |
3872 | fail_mr: |
3873 | bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mr->qplib_mr); |
3874 | fail: |
3875 | kfree(objp: mr); |
3876 | return ERR_PTR(error: rc); |
3877 | } |
3878 | |
3879 | int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
3880 | { |
3881 | struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); |
3882 | struct bnxt_re_dev *rdev = mr->rdev; |
3883 | int rc; |
3884 | |
3885 | rc = bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mr->qplib_mr); |
3886 | if (rc) { |
3887 | ibdev_err(ibdev: &rdev->ibdev, format: "Dereg MR failed: %#x\n" , rc); |
3888 | return rc; |
3889 | } |
3890 | |
3891 | if (mr->pages) { |
3892 | rc = bnxt_qplib_free_fast_reg_page_list(res: &rdev->qplib_res, |
3893 | frpl: &mr->qplib_frpl); |
3894 | kfree(objp: mr->pages); |
3895 | mr->npages = 0; |
3896 | mr->pages = NULL; |
3897 | } |
3898 | ib_umem_release(umem: mr->ib_umem); |
3899 | |
3900 | kfree(objp: mr); |
3901 | atomic_dec(v: &rdev->stats.res.mr_count); |
3902 | return rc; |
3903 | } |
3904 | |
3905 | static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr) |
3906 | { |
3907 | struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); |
3908 | |
3909 | if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs)) |
3910 | return -ENOMEM; |
3911 | |
3912 | mr->pages[mr->npages++] = addr; |
3913 | return 0; |
3914 | } |
3915 | |
3916 | int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, |
3917 | unsigned int *sg_offset) |
3918 | { |
3919 | struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); |
3920 | |
3921 | mr->npages = 0; |
3922 | return ib_sg_to_pages(mr: ib_mr, sgl: sg, sg_nents, sg_offset, set_page: bnxt_re_set_page); |
3923 | } |
3924 | |
3925 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type, |
3926 | u32 max_num_sg) |
3927 | { |
3928 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
3929 | struct bnxt_re_dev *rdev = pd->rdev; |
3930 | struct bnxt_re_mr *mr = NULL; |
3931 | u32 active_mrs; |
3932 | int rc; |
3933 | |
3934 | if (type != IB_MR_TYPE_MEM_REG) { |
3935 | ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported" , type); |
3936 | return ERR_PTR(error: -EINVAL); |
3937 | } |
3938 | if (max_num_sg > MAX_PBL_LVL_1_PGS) |
3939 | return ERR_PTR(error: -EINVAL); |
3940 | |
3941 | mr = kzalloc(size: sizeof(*mr), GFP_KERNEL); |
3942 | if (!mr) |
3943 | return ERR_PTR(error: -ENOMEM); |
3944 | |
3945 | mr->rdev = rdev; |
3946 | mr->qplib_mr.pd = &pd->qplib_pd; |
3947 | mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR; |
3948 | mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; |
3949 | |
3950 | rc = bnxt_qplib_alloc_mrw(res: &rdev->qplib_res, mrw: &mr->qplib_mr); |
3951 | if (rc) |
3952 | goto bail; |
3953 | |
3954 | mr->ib_mr.lkey = mr->qplib_mr.lkey; |
3955 | mr->ib_mr.rkey = mr->ib_mr.lkey; |
3956 | |
3957 | mr->pages = kcalloc(n: max_num_sg, size: sizeof(u64), GFP_KERNEL); |
3958 | if (!mr->pages) { |
3959 | rc = -ENOMEM; |
3960 | goto fail; |
3961 | } |
3962 | rc = bnxt_qplib_alloc_fast_reg_page_list(res: &rdev->qplib_res, |
3963 | frpl: &mr->qplib_frpl, max: max_num_sg); |
3964 | if (rc) { |
3965 | ibdev_err(ibdev: &rdev->ibdev, |
3966 | format: "Failed to allocate HW FR page list" ); |
3967 | goto fail_mr; |
3968 | } |
3969 | |
3970 | active_mrs = atomic_inc_return(v: &rdev->stats.res.mr_count); |
3971 | if (active_mrs > rdev->stats.res.mr_watermark) |
3972 | rdev->stats.res.mr_watermark = active_mrs; |
3973 | return &mr->ib_mr; |
3974 | |
3975 | fail_mr: |
3976 | kfree(objp: mr->pages); |
3977 | fail: |
3978 | bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mr->qplib_mr); |
3979 | bail: |
3980 | kfree(objp: mr); |
3981 | return ERR_PTR(error: rc); |
3982 | } |
3983 | |
3984 | struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, |
3985 | struct ib_udata *udata) |
3986 | { |
3987 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
3988 | struct bnxt_re_dev *rdev = pd->rdev; |
3989 | struct bnxt_re_mw *mw; |
3990 | u32 active_mws; |
3991 | int rc; |
3992 | |
3993 | mw = kzalloc(size: sizeof(*mw), GFP_KERNEL); |
3994 | if (!mw) |
3995 | return ERR_PTR(error: -ENOMEM); |
3996 | mw->rdev = rdev; |
3997 | mw->qplib_mw.pd = &pd->qplib_pd; |
3998 | |
3999 | mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? |
4000 | CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : |
4001 | CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); |
4002 | rc = bnxt_qplib_alloc_mrw(res: &rdev->qplib_res, mrw: &mw->qplib_mw); |
4003 | if (rc) { |
4004 | ibdev_err(ibdev: &rdev->ibdev, format: "Allocate MW failed!" ); |
4005 | goto fail; |
4006 | } |
4007 | mw->ib_mw.rkey = mw->qplib_mw.rkey; |
4008 | |
4009 | active_mws = atomic_inc_return(v: &rdev->stats.res.mw_count); |
4010 | if (active_mws > rdev->stats.res.mw_watermark) |
4011 | rdev->stats.res.mw_watermark = active_mws; |
4012 | return &mw->ib_mw; |
4013 | |
4014 | fail: |
4015 | kfree(objp: mw); |
4016 | return ERR_PTR(error: rc); |
4017 | } |
4018 | |
4019 | int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) |
4020 | { |
4021 | struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); |
4022 | struct bnxt_re_dev *rdev = mw->rdev; |
4023 | int rc; |
4024 | |
4025 | rc = bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mw->qplib_mw); |
4026 | if (rc) { |
4027 | ibdev_err(ibdev: &rdev->ibdev, format: "Free MW failed: %#x\n" , rc); |
4028 | return rc; |
4029 | } |
4030 | |
4031 | kfree(objp: mw); |
4032 | atomic_dec(v: &rdev->stats.res.mw_count); |
4033 | return rc; |
4034 | } |
4035 | |
4036 | static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr, |
4037 | int mr_access_flags, struct ib_umem *umem) |
4038 | { |
4039 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
4040 | struct bnxt_re_dev *rdev = pd->rdev; |
4041 | unsigned long page_size; |
4042 | struct bnxt_re_mr *mr; |
4043 | int umem_pgs, rc; |
4044 | u32 active_mrs; |
4045 | |
4046 | if (length > BNXT_RE_MAX_MR_SIZE) { |
4047 | ibdev_err(ibdev: &rdev->ibdev, format: "MR Size: %lld > Max supported:%lld\n" , |
4048 | length, BNXT_RE_MAX_MR_SIZE); |
4049 | return ERR_PTR(error: -ENOMEM); |
4050 | } |
4051 | |
4052 | page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt: virt_addr); |
4053 | if (!page_size) { |
4054 | ibdev_err(ibdev: &rdev->ibdev, format: "umem page size unsupported!" ); |
4055 | return ERR_PTR(error: -EINVAL); |
4056 | } |
4057 | |
4058 | mr = kzalloc(size: sizeof(*mr), GFP_KERNEL); |
4059 | if (!mr) |
4060 | return ERR_PTR(error: -ENOMEM); |
4061 | |
4062 | mr->rdev = rdev; |
4063 | mr->qplib_mr.pd = &pd->qplib_pd; |
4064 | mr->qplib_mr.flags = __from_ib_access_flags(iflags: mr_access_flags); |
4065 | mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR; |
4066 | |
4067 | rc = bnxt_qplib_alloc_mrw(res: &rdev->qplib_res, mrw: &mr->qplib_mr); |
4068 | if (rc) { |
4069 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to allocate MR rc = %d" , rc); |
4070 | rc = -EIO; |
4071 | goto free_mr; |
4072 | } |
4073 | /* The fixed portion of the rkey is the same as the lkey */ |
4074 | mr->ib_mr.rkey = mr->qplib_mr.rkey; |
4075 | mr->ib_umem = umem; |
4076 | mr->qplib_mr.va = virt_addr; |
4077 | mr->qplib_mr.total_size = length; |
4078 | |
4079 | umem_pgs = ib_umem_num_dma_blocks(umem, pgsz: page_size); |
4080 | rc = bnxt_qplib_reg_mr(res: &rdev->qplib_res, mr: &mr->qplib_mr, umem, |
4081 | num_pbls: umem_pgs, buf_pg_size: page_size); |
4082 | if (rc) { |
4083 | ibdev_err(ibdev: &rdev->ibdev, format: "Failed to register user MR - rc = %d\n" , rc); |
4084 | rc = -EIO; |
4085 | goto free_mrw; |
4086 | } |
4087 | |
4088 | mr->ib_mr.lkey = mr->qplib_mr.lkey; |
4089 | mr->ib_mr.rkey = mr->qplib_mr.lkey; |
4090 | active_mrs = atomic_inc_return(v: &rdev->stats.res.mr_count); |
4091 | if (active_mrs > rdev->stats.res.mr_watermark) |
4092 | rdev->stats.res.mr_watermark = active_mrs; |
4093 | |
4094 | return &mr->ib_mr; |
4095 | |
4096 | free_mrw: |
4097 | bnxt_qplib_free_mrw(res: &rdev->qplib_res, mr: &mr->qplib_mr); |
4098 | free_mr: |
4099 | kfree(objp: mr); |
4100 | return ERR_PTR(error: rc); |
4101 | } |
4102 | |
4103 | struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, |
4104 | u64 virt_addr, int mr_access_flags, |
4105 | struct ib_udata *udata) |
4106 | { |
4107 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
4108 | struct bnxt_re_dev *rdev = pd->rdev; |
4109 | struct ib_umem *umem; |
4110 | struct ib_mr *ib_mr; |
4111 | |
4112 | umem = ib_umem_get(device: &rdev->ibdev, addr: start, size: length, access: mr_access_flags); |
4113 | if (IS_ERR(ptr: umem)) |
4114 | return ERR_CAST(ptr: umem); |
4115 | |
4116 | ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem); |
4117 | if (IS_ERR(ptr: ib_mr)) |
4118 | ib_umem_release(umem); |
4119 | return ib_mr; |
4120 | } |
4121 | |
4122 | struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start, |
4123 | u64 length, u64 virt_addr, int fd, |
4124 | int mr_access_flags, struct ib_udata *udata) |
4125 | { |
4126 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
4127 | struct bnxt_re_dev *rdev = pd->rdev; |
4128 | struct ib_umem_dmabuf *umem_dmabuf; |
4129 | struct ib_umem *umem; |
4130 | struct ib_mr *ib_mr; |
4131 | |
4132 | umem_dmabuf = ib_umem_dmabuf_get_pinned(device: &rdev->ibdev, offset: start, size: length, |
4133 | fd, access: mr_access_flags); |
4134 | if (IS_ERR(ptr: umem_dmabuf)) |
4135 | return ERR_CAST(ptr: umem_dmabuf); |
4136 | |
4137 | umem = &umem_dmabuf->umem; |
4138 | |
4139 | ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem); |
4140 | if (IS_ERR(ptr: ib_mr)) |
4141 | ib_umem_release(umem); |
4142 | return ib_mr; |
4143 | } |
4144 | |
4145 | int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) |
4146 | { |
4147 | struct ib_device *ibdev = ctx->device; |
4148 | struct bnxt_re_ucontext *uctx = |
4149 | container_of(ctx, struct bnxt_re_ucontext, ib_uctx); |
4150 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
4151 | struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; |
4152 | struct bnxt_re_user_mmap_entry *entry; |
4153 | struct bnxt_re_uctx_resp resp = {}; |
4154 | struct bnxt_re_uctx_req ureq = {}; |
4155 | u32 chip_met_rev_num = 0; |
4156 | int rc; |
4157 | |
4158 | ibdev_dbg(ibdev, "ABI version requested %u" , ibdev->ops.uverbs_abi_ver); |
4159 | |
4160 | if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) { |
4161 | ibdev_dbg(ibdev, " is different from the device %d " , |
4162 | BNXT_RE_ABI_VERSION); |
4163 | return -EPERM; |
4164 | } |
4165 | |
4166 | uctx->rdev = rdev; |
4167 | |
4168 | uctx->shpg = (void *)__get_free_page(GFP_KERNEL); |
4169 | if (!uctx->shpg) { |
4170 | rc = -ENOMEM; |
4171 | goto fail; |
4172 | } |
4173 | spin_lock_init(&uctx->sh_lock); |
4174 | |
4175 | resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX; |
4176 | chip_met_rev_num = rdev->chip_ctx->chip_num; |
4177 | chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) << |
4178 | BNXT_RE_CHIP_ID0_CHIP_REV_SFT; |
4179 | chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) << |
4180 | BNXT_RE_CHIP_ID0_CHIP_MET_SFT; |
4181 | resp.chip_id0 = chip_met_rev_num; |
4182 | /*Temp, Use xa_alloc instead */ |
4183 | resp.dev_id = rdev->en_dev->pdev->devfn; |
4184 | resp.max_qp = rdev->qplib_ctx.qpc_count; |
4185 | resp.pg_size = PAGE_SIZE; |
4186 | resp.cqe_sz = sizeof(struct cq_base); |
4187 | resp.max_cqd = dev_attr->max_cq_wqes; |
4188 | |
4189 | resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE; |
4190 | resp.mode = rdev->chip_ctx->modes.wqe_mode; |
4191 | |
4192 | if (rdev->chip_ctx->modes.db_push) |
4193 | resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED; |
4194 | |
4195 | entry = bnxt_re_mmap_entry_insert(uctx, mem_offset: 0, mmap_flag: BNXT_RE_MMAP_SH_PAGE, NULL); |
4196 | if (!entry) { |
4197 | rc = -ENOMEM; |
4198 | goto cfail; |
4199 | } |
4200 | uctx->shpage_mmap = &entry->rdma_entry; |
4201 | if (rdev->pacing.dbr_pacing) |
4202 | resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED; |
4203 | |
4204 | if (udata->inlen >= sizeof(ureq)) { |
4205 | rc = ib_copy_from_udata(dest: &ureq, udata, min(udata->inlen, sizeof(ureq))); |
4206 | if (rc) |
4207 | goto cfail; |
4208 | if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) { |
4209 | resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED; |
4210 | uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED; |
4211 | } |
4212 | } |
4213 | |
4214 | rc = ib_copy_to_udata(udata, src: &resp, min(udata->outlen, sizeof(resp))); |
4215 | if (rc) { |
4216 | ibdev_err(ibdev, format: "Failed to copy user context" ); |
4217 | rc = -EFAULT; |
4218 | goto cfail; |
4219 | } |
4220 | |
4221 | return 0; |
4222 | cfail: |
4223 | free_page((unsigned long)uctx->shpg); |
4224 | uctx->shpg = NULL; |
4225 | fail: |
4226 | return rc; |
4227 | } |
4228 | |
4229 | void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) |
4230 | { |
4231 | struct bnxt_re_ucontext *uctx = container_of(ib_uctx, |
4232 | struct bnxt_re_ucontext, |
4233 | ib_uctx); |
4234 | |
4235 | struct bnxt_re_dev *rdev = uctx->rdev; |
4236 | |
4237 | rdma_user_mmap_entry_remove(entry: uctx->shpage_mmap); |
4238 | uctx->shpage_mmap = NULL; |
4239 | if (uctx->shpg) |
4240 | free_page((unsigned long)uctx->shpg); |
4241 | |
4242 | if (uctx->dpi.dbr) { |
4243 | /* Free DPI only if this is the first PD allocated by the |
4244 | * application and mark the context dpi as NULL |
4245 | */ |
4246 | bnxt_qplib_dealloc_dpi(res: &rdev->qplib_res, dpi: &uctx->dpi); |
4247 | uctx->dpi.dbr = NULL; |
4248 | } |
4249 | } |
4250 | |
4251 | static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id) |
4252 | { |
4253 | struct bnxt_re_cq *cq = NULL, *tmp_cq; |
4254 | |
4255 | hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) { |
4256 | if (tmp_cq->qplib_cq.id == cq_id) { |
4257 | cq = tmp_cq; |
4258 | break; |
4259 | } |
4260 | } |
4261 | return cq; |
4262 | } |
4263 | |
4264 | /* Helper function to mmap the virtual memory from user app */ |
4265 | int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma) |
4266 | { |
4267 | struct bnxt_re_ucontext *uctx = container_of(ib_uctx, |
4268 | struct bnxt_re_ucontext, |
4269 | ib_uctx); |
4270 | struct bnxt_re_user_mmap_entry *bnxt_entry; |
4271 | struct rdma_user_mmap_entry *rdma_entry; |
4272 | int ret = 0; |
4273 | u64 pfn; |
4274 | |
4275 | rdma_entry = rdma_user_mmap_entry_get(ucontext: &uctx->ib_uctx, vma); |
4276 | if (!rdma_entry) |
4277 | return -EINVAL; |
4278 | |
4279 | bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry, |
4280 | rdma_entry); |
4281 | |
4282 | switch (bnxt_entry->mmap_flag) { |
4283 | case BNXT_RE_MMAP_WC_DB: |
4284 | pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; |
4285 | ret = rdma_user_mmap_io(ucontext: ib_uctx, vma, pfn, PAGE_SIZE, |
4286 | pgprot_writecombine(prot: vma->vm_page_prot), |
4287 | entry: rdma_entry); |
4288 | break; |
4289 | case BNXT_RE_MMAP_UC_DB: |
4290 | pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; |
4291 | ret = rdma_user_mmap_io(ucontext: ib_uctx, vma, pfn, PAGE_SIZE, |
4292 | pgprot_noncached(vma->vm_page_prot), |
4293 | entry: rdma_entry); |
4294 | break; |
4295 | case BNXT_RE_MMAP_SH_PAGE: |
4296 | ret = vm_insert_page(vma, addr: vma->vm_start, virt_to_page(uctx->shpg)); |
4297 | break; |
4298 | case BNXT_RE_MMAP_DBR_BAR: |
4299 | pfn = bnxt_entry->mem_offset >> PAGE_SHIFT; |
4300 | ret = rdma_user_mmap_io(ucontext: ib_uctx, vma, pfn, PAGE_SIZE, |
4301 | pgprot_noncached(vma->vm_page_prot), |
4302 | entry: rdma_entry); |
4303 | break; |
4304 | case BNXT_RE_MMAP_DBR_PAGE: |
4305 | case BNXT_RE_MMAP_TOGGLE_PAGE: |
4306 | /* Driver doesn't expect write access for user space */ |
4307 | if (vma->vm_flags & VM_WRITE) |
4308 | return -EFAULT; |
4309 | ret = vm_insert_page(vma, addr: vma->vm_start, |
4310 | virt_to_page((void *)bnxt_entry->mem_offset)); |
4311 | break; |
4312 | default: |
4313 | ret = -EINVAL; |
4314 | break; |
4315 | } |
4316 | |
4317 | rdma_user_mmap_entry_put(entry: rdma_entry); |
4318 | return ret; |
4319 | } |
4320 | |
4321 | void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry) |
4322 | { |
4323 | struct bnxt_re_user_mmap_entry *bnxt_entry; |
4324 | |
4325 | bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry, |
4326 | rdma_entry); |
4327 | |
4328 | kfree(objp: bnxt_entry); |
4329 | } |
4330 | |
4331 | static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs) |
4332 | { |
4333 | struct bnxt_re_ucontext *uctx; |
4334 | |
4335 | uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx); |
4336 | bnxt_re_pacing_alert(rdev: uctx->rdev); |
4337 | return 0; |
4338 | } |
4339 | |
4340 | static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs) |
4341 | { |
4342 | struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs_bundle: attrs, idx: BNXT_RE_ALLOC_PAGE_HANDLE); |
4343 | enum bnxt_re_alloc_page_type alloc_type; |
4344 | struct bnxt_re_user_mmap_entry *entry; |
4345 | enum bnxt_re_mmap_flag mmap_flag; |
4346 | struct bnxt_qplib_chip_ctx *cctx; |
4347 | struct bnxt_re_ucontext *uctx; |
4348 | struct bnxt_re_dev *rdev; |
4349 | u64 mmap_offset; |
4350 | u32 length; |
4351 | u32 dpi; |
4352 | u64 addr; |
4353 | int err; |
4354 | |
4355 | uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx); |
4356 | if (IS_ERR(ptr: uctx)) |
4357 | return PTR_ERR(ptr: uctx); |
4358 | |
4359 | err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE); |
4360 | if (err) |
4361 | return err; |
4362 | |
4363 | rdev = uctx->rdev; |
4364 | cctx = rdev->chip_ctx; |
4365 | |
4366 | switch (alloc_type) { |
4367 | case BNXT_RE_ALLOC_WC_PAGE: |
4368 | if (cctx->modes.db_push) { |
4369 | if (bnxt_qplib_alloc_dpi(res: &rdev->qplib_res, dpi: &uctx->wcdpi, |
4370 | app: uctx, type: BNXT_QPLIB_DPI_TYPE_WC)) |
4371 | return -ENOMEM; |
4372 | length = PAGE_SIZE; |
4373 | dpi = uctx->wcdpi.dpi; |
4374 | addr = (u64)uctx->wcdpi.umdbr; |
4375 | mmap_flag = BNXT_RE_MMAP_WC_DB; |
4376 | } else { |
4377 | return -EINVAL; |
4378 | } |
4379 | |
4380 | break; |
4381 | case BNXT_RE_ALLOC_DBR_BAR_PAGE: |
4382 | length = PAGE_SIZE; |
4383 | addr = (u64)rdev->pacing.dbr_bar_addr; |
4384 | mmap_flag = BNXT_RE_MMAP_DBR_BAR; |
4385 | break; |
4386 | |
4387 | case BNXT_RE_ALLOC_DBR_PAGE: |
4388 | length = PAGE_SIZE; |
4389 | addr = (u64)rdev->pacing.dbr_page; |
4390 | mmap_flag = BNXT_RE_MMAP_DBR_PAGE; |
4391 | break; |
4392 | |
4393 | default: |
4394 | return -EOPNOTSUPP; |
4395 | } |
4396 | |
4397 | entry = bnxt_re_mmap_entry_insert(uctx, mem_offset: addr, mmap_flag, offset: &mmap_offset); |
4398 | if (!entry) |
4399 | return -ENOMEM; |
4400 | |
4401 | uobj->object = entry; |
4402 | uverbs_finalize_uobj_create(attrs_bundle: attrs, idx: BNXT_RE_ALLOC_PAGE_HANDLE); |
4403 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_ALLOC_PAGE_MMAP_OFFSET, |
4404 | from: &mmap_offset, size: sizeof(mmap_offset)); |
4405 | if (err) |
4406 | return err; |
4407 | |
4408 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_ALLOC_PAGE_MMAP_LENGTH, |
4409 | from: &length, size: sizeof(length)); |
4410 | if (err) |
4411 | return err; |
4412 | |
4413 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_ALLOC_PAGE_DPI, |
4414 | from: &dpi, size: sizeof(length)); |
4415 | if (err) |
4416 | return err; |
4417 | |
4418 | return 0; |
4419 | } |
4420 | |
4421 | static int alloc_page_obj_cleanup(struct ib_uobject *uobject, |
4422 | enum rdma_remove_reason why, |
4423 | struct uverbs_attr_bundle *attrs) |
4424 | { |
4425 | struct bnxt_re_user_mmap_entry *entry = uobject->object; |
4426 | struct bnxt_re_ucontext *uctx = entry->uctx; |
4427 | |
4428 | switch (entry->mmap_flag) { |
4429 | case BNXT_RE_MMAP_WC_DB: |
4430 | if (uctx && uctx->wcdpi.dbr) { |
4431 | struct bnxt_re_dev *rdev = uctx->rdev; |
4432 | |
4433 | bnxt_qplib_dealloc_dpi(res: &rdev->qplib_res, dpi: &uctx->wcdpi); |
4434 | uctx->wcdpi.dbr = NULL; |
4435 | } |
4436 | break; |
4437 | case BNXT_RE_MMAP_DBR_BAR: |
4438 | case BNXT_RE_MMAP_DBR_PAGE: |
4439 | break; |
4440 | default: |
4441 | goto exit; |
4442 | } |
4443 | rdma_user_mmap_entry_remove(entry: &entry->rdma_entry); |
4444 | exit: |
4445 | return 0; |
4446 | } |
4447 | |
4448 | DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE, |
4449 | UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE, |
4450 | BNXT_RE_OBJECT_ALLOC_PAGE, |
4451 | UVERBS_ACCESS_NEW, |
4452 | UA_MANDATORY), |
4453 | UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE, |
4454 | enum bnxt_re_alloc_page_type, |
4455 | UA_MANDATORY), |
4456 | UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET, |
4457 | UVERBS_ATTR_TYPE(u64), |
4458 | UA_MANDATORY), |
4459 | UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH, |
4460 | UVERBS_ATTR_TYPE(u32), |
4461 | UA_MANDATORY), |
4462 | UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI, |
4463 | UVERBS_ATTR_TYPE(u32), |
4464 | UA_MANDATORY)); |
4465 | |
4466 | DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE, |
4467 | UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE, |
4468 | BNXT_RE_OBJECT_ALLOC_PAGE, |
4469 | UVERBS_ACCESS_DESTROY, |
4470 | UA_MANDATORY)); |
4471 | |
4472 | DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE, |
4473 | UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup), |
4474 | &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE), |
4475 | &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE)); |
4476 | |
4477 | DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV); |
4478 | |
4479 | DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV, |
4480 | &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV)); |
4481 | |
4482 | /* Toggle MEM */ |
4483 | static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs) |
4484 | { |
4485 | struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs_bundle: attrs, idx: BNXT_RE_TOGGLE_MEM_HANDLE); |
4486 | enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE; |
4487 | enum bnxt_re_get_toggle_mem_type res_type; |
4488 | struct bnxt_re_user_mmap_entry *entry; |
4489 | struct bnxt_re_ucontext *uctx; |
4490 | struct ib_ucontext *ib_uctx; |
4491 | struct bnxt_re_dev *rdev; |
4492 | struct bnxt_re_cq *cq; |
4493 | u64 mem_offset; |
4494 | u64 addr = 0; |
4495 | u32 length; |
4496 | u32 offset; |
4497 | u32 cq_id; |
4498 | int err; |
4499 | |
4500 | ib_uctx = ib_uverbs_get_ucontext(attrs); |
4501 | if (IS_ERR(ptr: ib_uctx)) |
4502 | return PTR_ERR(ptr: ib_uctx); |
4503 | |
4504 | err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE); |
4505 | if (err) |
4506 | return err; |
4507 | |
4508 | uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); |
4509 | rdev = uctx->rdev; |
4510 | |
4511 | switch (res_type) { |
4512 | case BNXT_RE_CQ_TOGGLE_MEM: |
4513 | err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID); |
4514 | if (err) |
4515 | return err; |
4516 | |
4517 | cq = bnxt_re_search_for_cq(rdev, cq_id); |
4518 | if (!cq) |
4519 | return -EINVAL; |
4520 | |
4521 | length = PAGE_SIZE; |
4522 | addr = (u64)cq->uctx_cq_page; |
4523 | mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE; |
4524 | offset = 0; |
4525 | break; |
4526 | case BNXT_RE_SRQ_TOGGLE_MEM: |
4527 | break; |
4528 | |
4529 | default: |
4530 | return -EOPNOTSUPP; |
4531 | } |
4532 | |
4533 | entry = bnxt_re_mmap_entry_insert(uctx, mem_offset: addr, mmap_flag, offset: &mem_offset); |
4534 | if (!entry) |
4535 | return -ENOMEM; |
4536 | |
4537 | uobj->object = entry; |
4538 | uverbs_finalize_uobj_create(attrs_bundle: attrs, idx: BNXT_RE_TOGGLE_MEM_HANDLE); |
4539 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_TOGGLE_MEM_MMAP_PAGE, |
4540 | from: &mem_offset, size: sizeof(mem_offset)); |
4541 | if (err) |
4542 | return err; |
4543 | |
4544 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_TOGGLE_MEM_MMAP_LENGTH, |
4545 | from: &length, size: sizeof(length)); |
4546 | if (err) |
4547 | return err; |
4548 | |
4549 | err = uverbs_copy_to(attrs_bundle: attrs, idx: BNXT_RE_TOGGLE_MEM_MMAP_OFFSET, |
4550 | from: &offset, size: sizeof(length)); |
4551 | if (err) |
4552 | return err; |
4553 | |
4554 | return 0; |
4555 | } |
4556 | |
4557 | static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject, |
4558 | enum rdma_remove_reason why, |
4559 | struct uverbs_attr_bundle *attrs) |
4560 | { |
4561 | struct bnxt_re_user_mmap_entry *entry = uobject->object; |
4562 | |
4563 | rdma_user_mmap_entry_remove(entry: &entry->rdma_entry); |
4564 | return 0; |
4565 | } |
4566 | |
4567 | DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM, |
4568 | UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE, |
4569 | BNXT_RE_OBJECT_GET_TOGGLE_MEM, |
4570 | UVERBS_ACCESS_NEW, |
4571 | UA_MANDATORY), |
4572 | UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE, |
4573 | enum bnxt_re_get_toggle_mem_type, |
4574 | UA_MANDATORY), |
4575 | UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID, |
4576 | UVERBS_ATTR_TYPE(u32), |
4577 | UA_MANDATORY), |
4578 | UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE, |
4579 | UVERBS_ATTR_TYPE(u64), |
4580 | UA_MANDATORY), |
4581 | UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET, |
4582 | UVERBS_ATTR_TYPE(u32), |
4583 | UA_MANDATORY), |
4584 | UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH, |
4585 | UVERBS_ATTR_TYPE(u32), |
4586 | UA_MANDATORY)); |
4587 | |
4588 | DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM, |
4589 | UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE, |
4590 | BNXT_RE_OBJECT_GET_TOGGLE_MEM, |
4591 | UVERBS_ACCESS_DESTROY, |
4592 | UA_MANDATORY)); |
4593 | |
4594 | DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM, |
4595 | UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup), |
4596 | &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM), |
4597 | &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM)); |
4598 | |
4599 | const struct uapi_definition bnxt_re_uapi_defs[] = { |
4600 | UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE), |
4601 | UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV), |
4602 | UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM), |
4603 | {} |
4604 | }; |
4605 | |