1 | /* |
2 | * Broadcom NetXtreme-E RoCE driver. |
3 | * |
4 | * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term |
5 | * Broadcom refers to Broadcom Limited and/or its subsidiaries. |
6 | * |
7 | * This software is available to you under a choice of one of two |
8 | * licenses. You may choose to be licensed under the terms of the GNU |
9 | * General Public License (GPL) Version 2, available from the file |
10 | * COPYING in the main directory of this source tree, or the |
11 | * BSD license below: |
12 | * |
13 | * Redistribution and use in source and binary forms, with or without |
14 | * modification, are permitted provided that the following conditions |
15 | * are met: |
16 | * |
17 | * 1. Redistributions of source code must retain the above copyright |
18 | * notice, this list of conditions and the following disclaimer. |
19 | * 2. Redistributions in binary form must reproduce the above copyright |
20 | * notice, this list of conditions and the following disclaimer in |
21 | * the documentation and/or other materials provided with the |
22 | * distribution. |
23 | * |
24 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' |
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
26 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
27 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS |
28 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
31 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
32 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE |
33 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN |
34 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
35 | * |
36 | * Description: Slow Path Operators |
37 | */ |
38 | |
39 | #define dev_fmt(fmt) "QPLIB: " fmt |
40 | |
41 | #include <linux/interrupt.h> |
42 | #include <linux/spinlock.h> |
43 | #include <linux/sched.h> |
44 | #include <linux/pci.h> |
45 | |
46 | #include "roce_hsi.h" |
47 | |
48 | #include "qplib_res.h" |
49 | #include "qplib_rcfw.h" |
50 | #include "qplib_sp.h" |
51 | #include "qplib_tlv.h" |
52 | |
53 | const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0, |
54 | 0, 0, 0, 0, 0, 0, 0, 0 } }; |
55 | |
56 | /* Device */ |
57 | |
58 | static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) |
59 | { |
60 | u16 pcie_ctl2 = 0; |
61 | |
62 | if (!bnxt_qplib_is_chip_gen_p5_p7(cctx: rcfw->res->cctx)) |
63 | return false; |
64 | |
65 | pcie_capability_read_word(dev: rcfw->pdev, PCI_EXP_DEVCTL2, val: &pcie_ctl2); |
66 | return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); |
67 | } |
68 | |
69 | static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, |
70 | char *fw_ver) |
71 | { |
72 | struct creq_query_version_resp resp = {}; |
73 | struct bnxt_qplib_cmdqmsg msg = {}; |
74 | struct cmdq_query_version req = {}; |
75 | int rc; |
76 | |
77 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
78 | CMDQ_BASE_OPCODE_QUERY_VERSION, |
79 | cmd_size: sizeof(req)); |
80 | |
81 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), res_sz: sizeof(resp), block: 0); |
82 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
83 | if (rc) |
84 | return; |
85 | fw_ver[0] = resp.fw_maj; |
86 | fw_ver[1] = resp.fw_minor; |
87 | fw_ver[2] = resp.fw_bld; |
88 | fw_ver[3] = resp.fw_rsvd; |
89 | } |
90 | |
91 | int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, |
92 | struct bnxt_qplib_dev_attr *attr) |
93 | { |
94 | struct creq_query_func_resp resp = {}; |
95 | struct bnxt_qplib_cmdqmsg msg = {}; |
96 | struct creq_query_func_resp_sb *sb; |
97 | struct bnxt_qplib_rcfw_sbuf sbuf; |
98 | struct cmdq_query_func req = {}; |
99 | u8 *tqm_alloc; |
100 | int i, rc; |
101 | u32 temp; |
102 | |
103 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
104 | CMDQ_BASE_OPCODE_QUERY_FUNC, |
105 | cmd_size: sizeof(req)); |
106 | |
107 | sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); |
108 | sbuf.sb = dma_alloc_coherent(dev: &rcfw->pdev->dev, size: sbuf.size, |
109 | dma_handle: &sbuf.dma_addr, GFP_KERNEL); |
110 | if (!sbuf.sb) |
111 | return -ENOMEM; |
112 | sb = sbuf.sb; |
113 | req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; |
114 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, sb: &sbuf, req_sz: sizeof(req), |
115 | res_sz: sizeof(resp), block: 0); |
116 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
117 | if (rc) |
118 | goto bail; |
119 | |
120 | /* Extract the context from the side buffer */ |
121 | attr->max_qp = le32_to_cpu(sb->max_qp); |
122 | /* max_qp value reported by FW doesn't include the QP1 */ |
123 | attr->max_qp += 1; |
124 | attr->max_qp_rd_atom = |
125 | sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? |
126 | BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom; |
127 | attr->max_qp_init_rd_atom = |
128 | sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? |
129 | BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; |
130 | attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); |
131 | /* |
132 | * 128 WQEs needs to be reserved for the HW (8916). Prevent |
133 | * reporting the max number |
134 | */ |
135 | attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1; |
136 | attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(cctx: rcfw->res->cctx) ? |
137 | 6 : sb->max_sge; |
138 | attr->max_cq = le32_to_cpu(sb->max_cq); |
139 | attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); |
140 | attr->max_cq_sges = attr->max_qp_sges; |
141 | attr->max_mr = le32_to_cpu(sb->max_mr); |
142 | attr->max_mw = le32_to_cpu(sb->max_mw); |
143 | |
144 | attr->max_mr_size = le64_to_cpu(sb->max_mr_size); |
145 | attr->max_pd = 64 * 1024; |
146 | attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp); |
147 | attr->max_ah = le32_to_cpu(sb->max_ah); |
148 | |
149 | attr->max_srq = le16_to_cpu(sb->max_srq); |
150 | attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1; |
151 | attr->max_srq_sges = sb->max_srq_sge; |
152 | attr->max_pkey = 1; |
153 | attr->max_inline_data = le32_to_cpu(sb->max_inline_data); |
154 | if (!bnxt_qplib_is_chip_gen_p7(cctx: rcfw->res->cctx)) |
155 | attr->l2_db_size = (sb->l2_db_space_size + 1) * |
156 | (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); |
157 | attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; |
158 | attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags); |
159 | |
160 | bnxt_qplib_query_version(rcfw, fw_ver: attr->fw_ver); |
161 | |
162 | for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) { |
163 | temp = le32_to_cpu(sb->tqm_alloc_reqs[i]); |
164 | tqm_alloc = (u8 *)&temp; |
165 | attr->tqm_alloc_reqs[i * 4] = *tqm_alloc; |
166 | attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc); |
167 | attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); |
168 | attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); |
169 | } |
170 | |
171 | if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_DEV_ATTR_MAX_DPI) |
172 | attr->max_dpi = le32_to_cpu(sb->max_dpi); |
173 | |
174 | attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); |
175 | bail: |
176 | dma_free_coherent(dev: &rcfw->pdev->dev, size: sbuf.size, |
177 | cpu_addr: sbuf.sb, dma_handle: sbuf.dma_addr); |
178 | return rc; |
179 | } |
180 | |
181 | int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res, |
182 | struct bnxt_qplib_rcfw *rcfw, |
183 | struct bnxt_qplib_ctx *ctx) |
184 | { |
185 | struct creq_set_func_resources_resp resp = {}; |
186 | struct cmdq_set_func_resources req = {}; |
187 | struct bnxt_qplib_cmdqmsg msg = {}; |
188 | int rc; |
189 | |
190 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
191 | CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES, |
192 | cmd_size: sizeof(req)); |
193 | |
194 | req.number_of_qp = cpu_to_le32(ctx->qpc_count); |
195 | req.number_of_mrw = cpu_to_le32(ctx->mrw_count); |
196 | req.number_of_srq = cpu_to_le32(ctx->srqc_count); |
197 | req.number_of_cq = cpu_to_le32(ctx->cq_count); |
198 | |
199 | req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf); |
200 | req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf); |
201 | req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf); |
202 | req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf); |
203 | req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf); |
204 | |
205 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), |
206 | res_sz: sizeof(resp), block: 0); |
207 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
208 | if (rc) { |
209 | dev_err(&res->pdev->dev, "Failed to set function resources\n" ); |
210 | } |
211 | return rc; |
212 | } |
213 | |
214 | /* SGID */ |
215 | int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, |
216 | struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, |
217 | struct bnxt_qplib_gid *gid) |
218 | { |
219 | if (index >= sgid_tbl->max) { |
220 | dev_err(&res->pdev->dev, |
221 | "Index %d exceeded SGID table max (%d)\n" , |
222 | index, sgid_tbl->max); |
223 | return -EINVAL; |
224 | } |
225 | memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid)); |
226 | return 0; |
227 | } |
228 | |
229 | int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, |
230 | struct bnxt_qplib_gid *gid, u16 vlan_id, bool update) |
231 | { |
232 | struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, |
233 | struct bnxt_qplib_res, |
234 | sgid_tbl); |
235 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
236 | int index; |
237 | |
238 | /* Do we need a sgid_lock here? */ |
239 | if (!sgid_tbl->active) { |
240 | dev_err(&res->pdev->dev, "SGID table has no active entries\n" ); |
241 | return -ENOMEM; |
242 | } |
243 | for (index = 0; index < sgid_tbl->max; index++) { |
244 | if (!memcmp(p: &sgid_tbl->tbl[index].gid, q: gid, size: sizeof(*gid)) && |
245 | vlan_id == sgid_tbl->tbl[index].vlan_id) |
246 | break; |
247 | } |
248 | if (index == sgid_tbl->max) { |
249 | dev_warn(&res->pdev->dev, "GID not found in the SGID table\n" ); |
250 | return 0; |
251 | } |
252 | /* Remove GID from the SGID table */ |
253 | if (update) { |
254 | struct creq_delete_gid_resp resp = {}; |
255 | struct bnxt_qplib_cmdqmsg msg = {}; |
256 | struct cmdq_delete_gid req = {}; |
257 | int rc; |
258 | |
259 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
260 | CMDQ_BASE_OPCODE_DELETE_GID, |
261 | cmd_size: sizeof(req)); |
262 | if (sgid_tbl->hw_id[index] == 0xFFFF) { |
263 | dev_err(&res->pdev->dev, |
264 | "GID entry contains an invalid HW id\n" ); |
265 | return -EINVAL; |
266 | } |
267 | req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); |
268 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), |
269 | res_sz: sizeof(resp), block: 0); |
270 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
271 | if (rc) |
272 | return rc; |
273 | } |
274 | memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero, |
275 | sizeof(bnxt_qplib_gid_zero)); |
276 | sgid_tbl->tbl[index].vlan_id = 0xFFFF; |
277 | sgid_tbl->vlan[index] = 0; |
278 | sgid_tbl->active--; |
279 | dev_dbg(&res->pdev->dev, |
280 | "SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n" , |
281 | index, sgid_tbl->hw_id[index], sgid_tbl->active); |
282 | sgid_tbl->hw_id[index] = (u16)-1; |
283 | |
284 | /* unlock */ |
285 | return 0; |
286 | } |
287 | |
288 | int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, |
289 | struct bnxt_qplib_gid *gid, const u8 *smac, |
290 | u16 vlan_id, bool update, u32 *index) |
291 | { |
292 | struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, |
293 | struct bnxt_qplib_res, |
294 | sgid_tbl); |
295 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
296 | int i, free_idx; |
297 | |
298 | /* Do we need a sgid_lock here? */ |
299 | if (sgid_tbl->active == sgid_tbl->max) { |
300 | dev_err(&res->pdev->dev, "SGID table is full\n" ); |
301 | return -ENOMEM; |
302 | } |
303 | free_idx = sgid_tbl->max; |
304 | for (i = 0; i < sgid_tbl->max; i++) { |
305 | if (!memcmp(p: &sgid_tbl->tbl[i], q: gid, size: sizeof(*gid)) && |
306 | sgid_tbl->tbl[i].vlan_id == vlan_id) { |
307 | dev_dbg(&res->pdev->dev, |
308 | "SGID entry already exist in entry %d!\n" , i); |
309 | *index = i; |
310 | return -EALREADY; |
311 | } else if (!memcmp(p: &sgid_tbl->tbl[i], q: &bnxt_qplib_gid_zero, |
312 | size: sizeof(bnxt_qplib_gid_zero)) && |
313 | free_idx == sgid_tbl->max) { |
314 | free_idx = i; |
315 | } |
316 | } |
317 | if (free_idx == sgid_tbl->max) { |
318 | dev_err(&res->pdev->dev, |
319 | "SGID table is FULL but count is not MAX??\n" ); |
320 | return -ENOMEM; |
321 | } |
322 | if (update) { |
323 | struct creq_add_gid_resp resp = {}; |
324 | struct bnxt_qplib_cmdqmsg msg = {}; |
325 | struct cmdq_add_gid req = {}; |
326 | int rc; |
327 | |
328 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
329 | CMDQ_BASE_OPCODE_ADD_GID, |
330 | cmd_size: sizeof(req)); |
331 | |
332 | req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]); |
333 | req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]); |
334 | req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]); |
335 | req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]); |
336 | /* |
337 | * driver should ensure that all RoCE traffic is always VLAN |
338 | * tagged if RoCE traffic is running on non-zero VLAN ID or |
339 | * RoCE traffic is running on non-zero Priority. |
340 | */ |
341 | if ((vlan_id != 0xFFFF) || res->prio) { |
342 | if (vlan_id != 0xFFFF) |
343 | req.vlan = cpu_to_le16 |
344 | (vlan_id & CMDQ_ADD_GID_VLAN_VLAN_ID_MASK); |
345 | req.vlan |= cpu_to_le16 |
346 | (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 | |
347 | CMDQ_ADD_GID_VLAN_VLAN_EN); |
348 | } |
349 | |
350 | /* MAC in network format */ |
351 | req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]); |
352 | req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); |
353 | req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); |
354 | |
355 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), |
356 | res_sz: sizeof(resp), block: 0); |
357 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
358 | if (rc) |
359 | return rc; |
360 | sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); |
361 | } |
362 | /* Add GID to the sgid_tbl */ |
363 | memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); |
364 | sgid_tbl->tbl[free_idx].vlan_id = vlan_id; |
365 | sgid_tbl->active++; |
366 | if (vlan_id != 0xFFFF) |
367 | sgid_tbl->vlan[free_idx] = 1; |
368 | |
369 | dev_dbg(&res->pdev->dev, |
370 | "SGID added hw_id[0x%x] = 0x%x active = 0x%x\n" , |
371 | free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active); |
372 | |
373 | *index = free_idx; |
374 | /* unlock */ |
375 | return 0; |
376 | } |
377 | |
378 | int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, |
379 | struct bnxt_qplib_gid *gid, u16 gid_idx, |
380 | const u8 *smac) |
381 | { |
382 | struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, |
383 | struct bnxt_qplib_res, |
384 | sgid_tbl); |
385 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
386 | struct creq_modify_gid_resp resp = {}; |
387 | struct bnxt_qplib_cmdqmsg msg = {}; |
388 | struct cmdq_modify_gid req = {}; |
389 | int rc; |
390 | |
391 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
392 | CMDQ_BASE_OPCODE_MODIFY_GID, |
393 | cmd_size: sizeof(req)); |
394 | |
395 | req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]); |
396 | req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]); |
397 | req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]); |
398 | req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]); |
399 | if (res->prio) { |
400 | req.vlan |= cpu_to_le16 |
401 | (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 | |
402 | CMDQ_ADD_GID_VLAN_VLAN_EN); |
403 | } |
404 | |
405 | /* MAC in network format */ |
406 | req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]); |
407 | req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); |
408 | req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); |
409 | |
410 | req.gid_index = cpu_to_le16(gid_idx); |
411 | |
412 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), |
413 | res_sz: sizeof(resp), block: 0); |
414 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
415 | return rc; |
416 | } |
417 | |
418 | /* AH */ |
419 | int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, |
420 | bool block) |
421 | { |
422 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
423 | struct creq_create_ah_resp resp = {}; |
424 | struct bnxt_qplib_cmdqmsg msg = {}; |
425 | struct cmdq_create_ah req = {}; |
426 | u32 temp32[4]; |
427 | u16 temp16[3]; |
428 | int rc; |
429 | |
430 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
431 | CMDQ_BASE_OPCODE_CREATE_AH, |
432 | cmd_size: sizeof(req)); |
433 | |
434 | memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid)); |
435 | req.dgid[0] = cpu_to_le32(temp32[0]); |
436 | req.dgid[1] = cpu_to_le32(temp32[1]); |
437 | req.dgid[2] = cpu_to_le32(temp32[2]); |
438 | req.dgid[3] = cpu_to_le32(temp32[3]); |
439 | |
440 | req.type = ah->nw_type; |
441 | req.hop_limit = ah->hop_limit; |
442 | req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]); |
443 | req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label & |
444 | CMDQ_CREATE_AH_FLOW_LABEL_MASK) | |
445 | CMDQ_CREATE_AH_DEST_VLAN_ID_MASK); |
446 | req.pd_id = cpu_to_le32(ah->pd->id); |
447 | req.traffic_class = ah->traffic_class; |
448 | |
449 | /* MAC in network format */ |
450 | memcpy(temp16, ah->dmac, 6); |
451 | req.dest_mac[0] = cpu_to_le16(temp16[0]); |
452 | req.dest_mac[1] = cpu_to_le16(temp16[1]); |
453 | req.dest_mac[2] = cpu_to_le16(temp16[2]); |
454 | |
455 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), |
456 | res_sz: sizeof(resp), block); |
457 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
458 | if (rc) |
459 | return rc; |
460 | |
461 | ah->id = le32_to_cpu(resp.xid); |
462 | return 0; |
463 | } |
464 | |
465 | int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, |
466 | bool block) |
467 | { |
468 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
469 | struct creq_destroy_ah_resp resp = {}; |
470 | struct bnxt_qplib_cmdqmsg msg = {}; |
471 | struct cmdq_destroy_ah req = {}; |
472 | int rc; |
473 | |
474 | /* Clean up the AH table in the device */ |
475 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
476 | CMDQ_BASE_OPCODE_DESTROY_AH, |
477 | cmd_size: sizeof(req)); |
478 | |
479 | req.ah_cid = cpu_to_le32(ah->id); |
480 | |
481 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), |
482 | res_sz: sizeof(resp), block); |
483 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
484 | return rc; |
485 | } |
486 | |
487 | /* MRW */ |
488 | int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) |
489 | { |
490 | struct creq_deallocate_key_resp resp = {}; |
491 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
492 | struct cmdq_deallocate_key req = {}; |
493 | struct bnxt_qplib_cmdqmsg msg = {}; |
494 | int rc; |
495 | |
496 | if (mrw->lkey == 0xFFFFFFFF) { |
497 | dev_info(&res->pdev->dev, "SP: Free a reserved lkey MRW\n" ); |
498 | return 0; |
499 | } |
500 | |
501 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
502 | CMDQ_BASE_OPCODE_DEALLOCATE_KEY, |
503 | cmd_size: sizeof(req)); |
504 | |
505 | req.mrw_flags = mrw->type; |
506 | |
507 | if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || |
508 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || |
509 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) |
510 | req.key = cpu_to_le32(mrw->rkey); |
511 | else |
512 | req.key = cpu_to_le32(mrw->lkey); |
513 | |
514 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), |
515 | res_sz: sizeof(resp), block: 0); |
516 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
517 | if (rc) |
518 | return rc; |
519 | |
520 | /* Free the qplib's MRW memory */ |
521 | if (mrw->hwq.max_elements) |
522 | bnxt_qplib_free_hwq(res, hwq: &mrw->hwq); |
523 | |
524 | return 0; |
525 | } |
526 | |
527 | int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) |
528 | { |
529 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
530 | struct creq_allocate_mrw_resp resp = {}; |
531 | struct bnxt_qplib_cmdqmsg msg = {}; |
532 | struct cmdq_allocate_mrw req = {}; |
533 | unsigned long tmp; |
534 | int rc; |
535 | |
536 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
537 | CMDQ_BASE_OPCODE_ALLOCATE_MRW, |
538 | cmd_size: sizeof(req)); |
539 | |
540 | req.pd_id = cpu_to_le32(mrw->pd->id); |
541 | req.mrw_flags = mrw->type; |
542 | if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR && |
543 | mrw->flags & BNXT_QPLIB_FR_PMR) || |
544 | mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A || |
545 | mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B) |
546 | req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY; |
547 | tmp = (unsigned long)mrw; |
548 | req.mrw_handle = cpu_to_le64(tmp); |
549 | |
550 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), |
551 | res_sz: sizeof(resp), block: 0); |
552 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
553 | if (rc) |
554 | return rc; |
555 | |
556 | if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || |
557 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || |
558 | (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) |
559 | mrw->rkey = le32_to_cpu(resp.xid); |
560 | else |
561 | mrw->lkey = le32_to_cpu(resp.xid); |
562 | return 0; |
563 | } |
564 | |
565 | int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, |
566 | bool block) |
567 | { |
568 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
569 | struct creq_deregister_mr_resp resp = {}; |
570 | struct bnxt_qplib_cmdqmsg msg = {}; |
571 | struct cmdq_deregister_mr req = {}; |
572 | int rc; |
573 | |
574 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
575 | CMDQ_BASE_OPCODE_DEREGISTER_MR, |
576 | cmd_size: sizeof(req)); |
577 | |
578 | req.lkey = cpu_to_le32(mrw->lkey); |
579 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), |
580 | res_sz: sizeof(resp), block); |
581 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
582 | if (rc) |
583 | return rc; |
584 | |
585 | /* Free the qplib's MR memory */ |
586 | if (mrw->hwq.max_elements) { |
587 | mrw->va = 0; |
588 | mrw->total_size = 0; |
589 | bnxt_qplib_free_hwq(res, hwq: &mrw->hwq); |
590 | } |
591 | |
592 | return 0; |
593 | } |
594 | |
595 | int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, |
596 | struct ib_umem *umem, int num_pbls, u32 buf_pg_size) |
597 | { |
598 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
599 | struct bnxt_qplib_hwq_attr hwq_attr = {}; |
600 | struct bnxt_qplib_sg_info sginfo = {}; |
601 | struct creq_register_mr_resp resp = {}; |
602 | struct bnxt_qplib_cmdqmsg msg = {}; |
603 | struct cmdq_register_mr req = {}; |
604 | int pages, rc; |
605 | u32 pg_size; |
606 | u16 level; |
607 | |
608 | if (num_pbls) { |
609 | pages = roundup_pow_of_two(num_pbls); |
610 | /* Allocate memory for the non-leaf pages to store buf ptrs. |
611 | * Non-leaf pages always uses system PAGE_SIZE |
612 | */ |
613 | /* Free the hwq if it already exist, must be a rereg */ |
614 | if (mr->hwq.max_elements) |
615 | bnxt_qplib_free_hwq(res, hwq: &mr->hwq); |
616 | hwq_attr.res = res; |
617 | hwq_attr.depth = pages; |
618 | hwq_attr.stride = sizeof(dma_addr_t); |
619 | hwq_attr.type = HWQ_TYPE_MR; |
620 | hwq_attr.sginfo = &sginfo; |
621 | hwq_attr.sginfo->umem = umem; |
622 | hwq_attr.sginfo->npages = pages; |
623 | hwq_attr.sginfo->pgsize = buf_pg_size; |
624 | hwq_attr.sginfo->pgshft = ilog2(buf_pg_size); |
625 | rc = bnxt_qplib_alloc_init_hwq(hwq: &mr->hwq, hwq_attr: &hwq_attr); |
626 | if (rc) { |
627 | dev_err(&res->pdev->dev, |
628 | "SP: Reg MR memory allocation failed\n" ); |
629 | return -ENOMEM; |
630 | } |
631 | } |
632 | |
633 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
634 | CMDQ_BASE_OPCODE_REGISTER_MR, |
635 | cmd_size: sizeof(req)); |
636 | |
637 | /* Configure the request */ |
638 | if (mr->hwq.level == PBL_LVL_MAX) { |
639 | /* No PBL provided, just use system PAGE_SIZE */ |
640 | level = 0; |
641 | req.pbl = 0; |
642 | pg_size = PAGE_SIZE; |
643 | } else { |
644 | level = mr->hwq.level; |
645 | req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]); |
646 | } |
647 | pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE; |
648 | req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) | |
649 | ((ilog2(pg_size) << |
650 | CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) & |
651 | CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK); |
652 | req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) << |
653 | CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) & |
654 | CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK)); |
655 | req.access = (mr->flags & 0xFFFF); |
656 | req.va = cpu_to_le64(mr->va); |
657 | req.key = cpu_to_le32(mr->lkey); |
658 | req.mr_size = cpu_to_le64(mr->total_size); |
659 | |
660 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, NULL, req_sz: sizeof(req), |
661 | res_sz: sizeof(resp), block: 0); |
662 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
663 | if (rc) |
664 | goto fail; |
665 | |
666 | return 0; |
667 | |
668 | fail: |
669 | if (mr->hwq.max_elements) |
670 | bnxt_qplib_free_hwq(res, hwq: &mr->hwq); |
671 | return rc; |
672 | } |
673 | |
674 | int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res, |
675 | struct bnxt_qplib_frpl *frpl, |
676 | int max_pg_ptrs) |
677 | { |
678 | struct bnxt_qplib_hwq_attr hwq_attr = {}; |
679 | struct bnxt_qplib_sg_info sginfo = {}; |
680 | int pg_ptrs, pages, rc; |
681 | |
682 | /* Re-calculate the max to fit the HWQ allocation model */ |
683 | pg_ptrs = roundup_pow_of_two(max_pg_ptrs); |
684 | pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT; |
685 | if (!pages) |
686 | pages++; |
687 | |
688 | if (pages > MAX_PBL_LVL_1_PGS) |
689 | return -ENOMEM; |
690 | |
691 | sginfo.pgsize = PAGE_SIZE; |
692 | sginfo.nopte = true; |
693 | |
694 | hwq_attr.res = res; |
695 | hwq_attr.depth = pg_ptrs; |
696 | hwq_attr.stride = PAGE_SIZE; |
697 | hwq_attr.sginfo = &sginfo; |
698 | hwq_attr.type = HWQ_TYPE_CTX; |
699 | rc = bnxt_qplib_alloc_init_hwq(hwq: &frpl->hwq, hwq_attr: &hwq_attr); |
700 | if (!rc) |
701 | frpl->max_pg_ptrs = pg_ptrs; |
702 | |
703 | return rc; |
704 | } |
705 | |
706 | int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res, |
707 | struct bnxt_qplib_frpl *frpl) |
708 | { |
709 | bnxt_qplib_free_hwq(res, hwq: &frpl->hwq); |
710 | return 0; |
711 | } |
712 | |
713 | int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw, |
714 | struct bnxt_qplib_roce_stats *stats) |
715 | { |
716 | struct creq_query_roce_stats_resp resp = {}; |
717 | struct creq_query_roce_stats_resp_sb *sb; |
718 | struct cmdq_query_roce_stats req = {}; |
719 | struct bnxt_qplib_cmdqmsg msg = {}; |
720 | struct bnxt_qplib_rcfw_sbuf sbuf; |
721 | int rc; |
722 | |
723 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
724 | CMDQ_BASE_OPCODE_QUERY_ROCE_STATS, |
725 | cmd_size: sizeof(req)); |
726 | |
727 | sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); |
728 | sbuf.sb = dma_alloc_coherent(dev: &rcfw->pdev->dev, size: sbuf.size, |
729 | dma_handle: &sbuf.dma_addr, GFP_KERNEL); |
730 | if (!sbuf.sb) |
731 | return -ENOMEM; |
732 | sb = sbuf.sb; |
733 | |
734 | req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; |
735 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, sb: &sbuf, req_sz: sizeof(req), |
736 | res_sz: sizeof(resp), block: 0); |
737 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
738 | if (rc) |
739 | goto bail; |
740 | /* Extract the context from the side buffer */ |
741 | stats->to_retransmits = le64_to_cpu(sb->to_retransmits); |
742 | stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd); |
743 | stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded); |
744 | stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd); |
745 | stats->missing_resp = le64_to_cpu(sb->missing_resp); |
746 | stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err); |
747 | stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err); |
748 | stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err); |
749 | stats->local_protection_err = le64_to_cpu(sb->local_protection_err); |
750 | stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err); |
751 | stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err); |
752 | stats->remote_access_err = le64_to_cpu(sb->remote_access_err); |
753 | stats->remote_op_err = le64_to_cpu(sb->remote_op_err); |
754 | stats->dup_req = le64_to_cpu(sb->dup_req); |
755 | stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max); |
756 | stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch); |
757 | stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe); |
758 | stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err); |
759 | stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey); |
760 | stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err); |
761 | stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm); |
762 | stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err); |
763 | stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey); |
764 | stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err); |
765 | stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm); |
766 | stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err); |
767 | stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow); |
768 | stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode); |
769 | stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic); |
770 | stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err); |
771 | stats->res_mem_error = le64_to_cpu(sb->res_mem_error); |
772 | stats->res_srq_err = le64_to_cpu(sb->res_srq_err); |
773 | stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err); |
774 | stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey); |
775 | stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err); |
776 | stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err); |
777 | stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err); |
778 | stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err); |
779 | stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err); |
780 | if (!rcfw->init_oos_stats) { |
781 | rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count); |
782 | rcfw->init_oos_stats = 1; |
783 | } else { |
784 | stats->res_oos_drop_count += |
785 | (le64_to_cpu(sb->res_oos_drop_count) - |
786 | rcfw->oos_prev) & BNXT_QPLIB_OOS_COUNT_MASK; |
787 | rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count); |
788 | } |
789 | |
790 | bail: |
791 | dma_free_coherent(dev: &rcfw->pdev->dev, size: sbuf.size, |
792 | cpu_addr: sbuf.sb, dma_handle: sbuf.dma_addr); |
793 | return rc; |
794 | } |
795 | |
796 | int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, |
797 | struct bnxt_qplib_ext_stat *estat) |
798 | { |
799 | struct creq_query_roce_stats_ext_resp resp = {}; |
800 | struct creq_query_roce_stats_ext_resp_sb *sb; |
801 | struct cmdq_query_roce_stats_ext req = {}; |
802 | struct bnxt_qplib_cmdqmsg msg = {}; |
803 | struct bnxt_qplib_rcfw_sbuf sbuf; |
804 | int rc; |
805 | |
806 | sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); |
807 | sbuf.sb = dma_alloc_coherent(dev: &rcfw->pdev->dev, size: sbuf.size, |
808 | dma_handle: &sbuf.dma_addr, GFP_KERNEL); |
809 | if (!sbuf.sb) |
810 | return -ENOMEM; |
811 | |
812 | sb = sbuf.sb; |
813 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)&req, |
814 | CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS, |
815 | cmd_size: sizeof(req)); |
816 | |
817 | req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; |
818 | req.resp_addr = cpu_to_le64(sbuf.dma_addr); |
819 | req.function_id = cpu_to_le32(fid); |
820 | req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID); |
821 | |
822 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: &req, resp: &resp, sb: &sbuf, req_sz: sizeof(req), |
823 | res_sz: sizeof(resp), block: 0); |
824 | rc = bnxt_qplib_rcfw_send_message(rcfw, msg: &msg); |
825 | if (rc) |
826 | goto bail; |
827 | |
828 | estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts); |
829 | estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts); |
830 | estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts); |
831 | estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts); |
832 | estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts); |
833 | estat->tx_roce_pkts = le64_to_cpu(sb->tx_roce_pkts); |
834 | estat->tx_roce_bytes = le64_to_cpu(sb->tx_roce_bytes); |
835 | estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts); |
836 | estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts); |
837 | estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts); |
838 | estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts); |
839 | estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts); |
840 | estat->rx_roce_pkts = le64_to_cpu(sb->rx_roce_pkts); |
841 | estat->rx_roce_bytes = le64_to_cpu(sb->rx_roce_bytes); |
842 | estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts); |
843 | estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes); |
844 | estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts); |
845 | estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts); |
846 | estat->tx_cnp = le64_to_cpu(sb->tx_cnp_pkts); |
847 | estat->rx_cnp = le64_to_cpu(sb->rx_cnp_pkts); |
848 | estat->rx_ecn_marked = le64_to_cpu(sb->rx_ecn_marked_pkts); |
849 | |
850 | bail: |
851 | dma_free_coherent(dev: &rcfw->pdev->dev, size: sbuf.size, |
852 | cpu_addr: sbuf.sb, dma_handle: sbuf.dma_addr); |
853 | return rc; |
854 | } |
855 | |
856 | static void bnxt_qplib_fill_cc_gen1(struct cmdq_modify_roce_cc_gen1_tlv *ext_req, |
857 | struct bnxt_qplib_cc_param_ext *cc_ext) |
858 | { |
859 | ext_req->modify_mask = cpu_to_le64(cc_ext->ext_mask); |
860 | cc_ext->ext_mask = 0; |
861 | ext_req->inactivity_th_hi = cpu_to_le16(cc_ext->inact_th_hi); |
862 | ext_req->min_time_between_cnps = cpu_to_le16(cc_ext->min_delta_cnp); |
863 | ext_req->init_cp = cpu_to_le16(cc_ext->init_cp); |
864 | ext_req->tr_update_mode = cc_ext->tr_update_mode; |
865 | ext_req->tr_update_cycles = cc_ext->tr_update_cyls; |
866 | ext_req->fr_num_rtts = cc_ext->fr_rtt; |
867 | ext_req->ai_rate_increase = cc_ext->ai_rate_incr; |
868 | ext_req->reduction_relax_rtts_th = cpu_to_le16(cc_ext->rr_rtt_th); |
869 | ext_req->additional_relax_cr_th = cpu_to_le16(cc_ext->ar_cr_th); |
870 | ext_req->cr_min_th = cpu_to_le16(cc_ext->cr_min_th); |
871 | ext_req->bw_avg_weight = cc_ext->bw_avg_weight; |
872 | ext_req->actual_cr_factor = cc_ext->cr_factor; |
873 | ext_req->max_cp_cr_th = cpu_to_le16(cc_ext->cr_th_max_cp); |
874 | ext_req->cp_bias_en = cc_ext->cp_bias_en; |
875 | ext_req->cp_bias = cc_ext->cp_bias; |
876 | ext_req->cnp_ecn = cc_ext->cnp_ecn; |
877 | ext_req->rtt_jitter_en = cc_ext->rtt_jitter_en; |
878 | ext_req->link_bytes_per_usec = cpu_to_le16(cc_ext->bytes_per_usec); |
879 | ext_req->reset_cc_cr_th = cpu_to_le16(cc_ext->cc_cr_reset_th); |
880 | ext_req->cr_width = cc_ext->cr_width; |
881 | ext_req->quota_period_min = cc_ext->min_quota; |
882 | ext_req->quota_period_max = cc_ext->max_quota; |
883 | ext_req->quota_period_abs_max = cc_ext->abs_max_quota; |
884 | ext_req->tr_lower_bound = cpu_to_le16(cc_ext->tr_lb); |
885 | ext_req->cr_prob_factor = cc_ext->cr_prob_fac; |
886 | ext_req->tr_prob_factor = cc_ext->tr_prob_fac; |
887 | ext_req->fairness_cr_th = cpu_to_le16(cc_ext->fair_cr_th); |
888 | ext_req->red_div = cc_ext->red_div; |
889 | ext_req->cnp_ratio_th = cc_ext->cnp_ratio_th; |
890 | ext_req->exp_ai_rtts = cpu_to_le16(cc_ext->ai_ext_rtt); |
891 | ext_req->exp_ai_cr_cp_ratio = cc_ext->exp_crcp_ratio; |
892 | ext_req->use_rate_table = cc_ext->low_rate_en; |
893 | ext_req->cp_exp_update_th = cpu_to_le16(cc_ext->cpcr_update_th); |
894 | ext_req->high_exp_ai_rtts_th1 = cpu_to_le16(cc_ext->ai_rtt_th1); |
895 | ext_req->high_exp_ai_rtts_th2 = cpu_to_le16(cc_ext->ai_rtt_th2); |
896 | ext_req->actual_cr_cong_free_rtts_th = cpu_to_le16(cc_ext->cf_rtt_th); |
897 | ext_req->severe_cong_cr_th1 = cpu_to_le16(cc_ext->sc_cr_th1); |
898 | ext_req->severe_cong_cr_th2 = cpu_to_le16(cc_ext->sc_cr_th2); |
899 | ext_req->link64B_per_rtt = cpu_to_le32(cc_ext->l64B_per_rtt); |
900 | ext_req->cc_ack_bytes = cc_ext->cc_ack_bytes; |
901 | } |
902 | |
903 | int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res, |
904 | struct bnxt_qplib_cc_param *cc_param) |
905 | { |
906 | struct bnxt_qplib_tlv_modify_cc_req tlv_req = {}; |
907 | struct creq_modify_roce_cc_resp resp = {}; |
908 | struct bnxt_qplib_cmdqmsg msg = {}; |
909 | struct cmdq_modify_roce_cc *req; |
910 | int req_size; |
911 | void *cmd; |
912 | int rc; |
913 | |
914 | /* Prepare the older base command */ |
915 | req = &tlv_req.base_req; |
916 | cmd = req; |
917 | req_size = sizeof(*req); |
918 | bnxt_qplib_rcfw_cmd_prep(req: (struct cmdq_base *)req, CMDQ_BASE_OPCODE_MODIFY_ROCE_CC, |
919 | cmd_size: sizeof(*req)); |
920 | req->modify_mask = cpu_to_le32(cc_param->mask); |
921 | req->enable_cc = cc_param->enable; |
922 | req->g = cc_param->g; |
923 | req->num_phases_per_state = cc_param->nph_per_state; |
924 | req->time_per_phase = cc_param->time_pph; |
925 | req->pkts_per_phase = cc_param->pkts_pph; |
926 | req->init_cr = cpu_to_le16(cc_param->init_cr); |
927 | req->init_tr = cpu_to_le16(cc_param->init_tr); |
928 | req->tos_dscp_tos_ecn = (cc_param->tos_dscp << CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT) | |
929 | (cc_param->tos_ecn & CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK); |
930 | req->alt_vlan_pcp = cc_param->alt_vlan_pcp; |
931 | req->alt_tos_dscp = cpu_to_le16(cc_param->alt_tos_dscp); |
932 | req->rtt = cpu_to_le16(cc_param->rtt); |
933 | req->tcp_cp = cpu_to_le16(cc_param->tcp_cp); |
934 | req->cc_mode = cc_param->cc_mode; |
935 | req->inactivity_th = cpu_to_le16(cc_param->inact_th); |
936 | |
937 | /* For chip gen P5 onwards fill extended cmd and header */ |
938 | if (bnxt_qplib_is_chip_gen_p5_p7(cctx: res->cctx)) { |
939 | struct roce_tlv *hdr; |
940 | u32 payload; |
941 | u32 chunks; |
942 | |
943 | cmd = &tlv_req; |
944 | req_size = sizeof(tlv_req); |
945 | /* Prepare primary tlv header */ |
946 | hdr = &tlv_req.tlv_hdr; |
947 | chunks = CHUNKS(sizeof(struct bnxt_qplib_tlv_modify_cc_req)); |
948 | payload = sizeof(struct cmdq_modify_roce_cc); |
949 | __roce_1st_tlv_prep(rtlv: hdr, tot_chunks: chunks, content_bytes: payload, flags: true); |
950 | /* Prepare secondary tlv header */ |
951 | hdr = (struct roce_tlv *)&tlv_req.ext_req; |
952 | payload = sizeof(struct cmdq_modify_roce_cc_gen1_tlv) - |
953 | sizeof(struct roce_tlv); |
954 | __roce_ext_tlv_prep(rtlv: hdr, tlv_type: TLV_TYPE_MODIFY_ROCE_CC_GEN1, content_bytes: payload, more: false, flags: true); |
955 | bnxt_qplib_fill_cc_gen1(ext_req: &tlv_req.ext_req, cc_ext: &cc_param->cc_ext); |
956 | } |
957 | |
958 | bnxt_qplib_fill_cmdqmsg(msg: &msg, req: cmd, resp: &resp, NULL, req_sz: req_size, |
959 | res_sz: sizeof(resp), block: 0); |
960 | rc = bnxt_qplib_rcfw_send_message(rcfw: res->rcfw, msg: &msg); |
961 | return rc; |
962 | } |
963 | |