1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Marvell RVU Admin Function driver |
3 | * |
4 | * Copyright (C) 2018 Marvell. |
5 | * |
6 | */ |
7 | |
8 | #include <linux/module.h> |
9 | #include <linux/pci.h> |
10 | |
11 | #include "rvu_struct.h" |
12 | #include "rvu_reg.h" |
13 | #include "rvu.h" |
14 | #include "npc.h" |
15 | #include "mcs.h" |
16 | #include "cgx.h" |
17 | #include "lmac_common.h" |
18 | #include "rvu_npc_hash.h" |
19 | |
20 | static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); |
21 | static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, |
22 | int type, int chan_id); |
23 | static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, |
24 | int type, bool add); |
25 | static int nix_setup_ipolicers(struct rvu *rvu, |
26 | struct nix_hw *nix_hw, int blkaddr); |
27 | static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); |
28 | static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, |
29 | struct nix_hw *nix_hw, u16 pcifunc); |
30 | static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); |
31 | static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, |
32 | u32 leaf_prof); |
33 | static const char *nix_get_ctx_name(int ctype); |
34 | |
35 | enum mc_tbl_sz { |
36 | MC_TBL_SZ_256, |
37 | MC_TBL_SZ_512, |
38 | MC_TBL_SZ_1K, |
39 | MC_TBL_SZ_2K, |
40 | MC_TBL_SZ_4K, |
41 | MC_TBL_SZ_8K, |
42 | MC_TBL_SZ_16K, |
43 | MC_TBL_SZ_32K, |
44 | MC_TBL_SZ_64K, |
45 | }; |
46 | |
47 | enum mc_buf_cnt { |
48 | MC_BUF_CNT_8, |
49 | MC_BUF_CNT_16, |
50 | MC_BUF_CNT_32, |
51 | MC_BUF_CNT_64, |
52 | MC_BUF_CNT_128, |
53 | MC_BUF_CNT_256, |
54 | MC_BUF_CNT_512, |
55 | MC_BUF_CNT_1024, |
56 | MC_BUF_CNT_2048, |
57 | }; |
58 | |
59 | enum nix_makr_fmt_indexes { |
60 | NIX_MARK_CFG_IP_DSCP_RED, |
61 | NIX_MARK_CFG_IP_DSCP_YELLOW, |
62 | NIX_MARK_CFG_IP_DSCP_YELLOW_RED, |
63 | NIX_MARK_CFG_IP_ECN_RED, |
64 | NIX_MARK_CFG_IP_ECN_YELLOW, |
65 | NIX_MARK_CFG_IP_ECN_YELLOW_RED, |
66 | NIX_MARK_CFG_VLAN_DEI_RED, |
67 | NIX_MARK_CFG_VLAN_DEI_YELLOW, |
68 | NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, |
69 | NIX_MARK_CFG_MAX, |
70 | }; |
71 | |
72 | /* For now considering MC resources needed for broadcast |
73 | * pkt replication only. i.e 256 HWVFs + 12 PFs. |
74 | */ |
75 | #define MC_TBL_SIZE MC_TBL_SZ_2K |
76 | #define MC_BUF_CNT MC_BUF_CNT_1024 |
77 | |
78 | #define MC_TX_MAX 2048 |
79 | |
80 | struct mce { |
81 | struct hlist_node node; |
82 | u32 ; |
83 | u16 pcifunc; |
84 | u16 channel; |
85 | u8 dest_type; |
86 | u8 is_active; |
87 | u8 reserved[2]; |
88 | }; |
89 | |
90 | int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) |
91 | { |
92 | int i = 0; |
93 | |
94 | /*If blkaddr is 0, return the first nix block address*/ |
95 | if (blkaddr == 0) |
96 | return rvu->nix_blkaddr[blkaddr]; |
97 | |
98 | while (i + 1 < MAX_NIX_BLKS) { |
99 | if (rvu->nix_blkaddr[i] == blkaddr) |
100 | return rvu->nix_blkaddr[i + 1]; |
101 | i++; |
102 | } |
103 | |
104 | return 0; |
105 | } |
106 | |
107 | bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) |
108 | { |
109 | struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); |
110 | int blkaddr; |
111 | |
112 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
113 | if (!pfvf->nixlf || blkaddr < 0) |
114 | return false; |
115 | return true; |
116 | } |
117 | |
118 | int rvu_get_nixlf_count(struct rvu *rvu) |
119 | { |
120 | int blkaddr = 0, max = 0; |
121 | struct rvu_block *block; |
122 | |
123 | blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); |
124 | while (blkaddr) { |
125 | block = &rvu->hw->block[blkaddr]; |
126 | max += block->lf.max; |
127 | blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); |
128 | } |
129 | return max; |
130 | } |
131 | |
132 | int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) |
133 | { |
134 | struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); |
135 | struct rvu_hwinfo *hw = rvu->hw; |
136 | int blkaddr; |
137 | |
138 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
139 | if (!pfvf->nixlf || blkaddr < 0) |
140 | return NIX_AF_ERR_AF_LF_INVALID; |
141 | |
142 | *nixlf = rvu_get_lf(rvu, block: &hw->block[blkaddr], pcifunc, slot: 0); |
143 | if (*nixlf < 0) |
144 | return NIX_AF_ERR_AF_LF_INVALID; |
145 | |
146 | if (nix_blkaddr) |
147 | *nix_blkaddr = blkaddr; |
148 | |
149 | return 0; |
150 | } |
151 | |
152 | int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, |
153 | struct nix_hw **nix_hw, int *blkaddr) |
154 | { |
155 | struct rvu_pfvf *pfvf; |
156 | |
157 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
158 | *blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
159 | if (!pfvf->nixlf || *blkaddr < 0) |
160 | return NIX_AF_ERR_AF_LF_INVALID; |
161 | |
162 | *nix_hw = get_nix_hw(hw: rvu->hw, blkaddr: *blkaddr); |
163 | if (!*nix_hw) |
164 | return NIX_AF_ERR_INVALID_NIXBLK; |
165 | return 0; |
166 | } |
167 | |
168 | static void nix_mce_list_init(struct nix_mce_list *list, int max) |
169 | { |
170 | INIT_HLIST_HEAD(&list->head); |
171 | list->count = 0; |
172 | list->max = max; |
173 | } |
174 | |
175 | static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir) |
176 | { |
177 | struct rsrc_bmap *mce_counter; |
178 | int idx; |
179 | |
180 | if (!mcast) |
181 | return -EINVAL; |
182 | |
183 | mce_counter = &mcast->mce_counter[dir]; |
184 | if (!rvu_rsrc_check_contig(rsrc: mce_counter, nrsrc: count)) |
185 | return -ENOSPC; |
186 | |
187 | idx = rvu_alloc_rsrc_contig(rsrc: mce_counter, nrsrc: count); |
188 | return idx; |
189 | } |
190 | |
191 | static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir) |
192 | { |
193 | struct rsrc_bmap *mce_counter; |
194 | |
195 | if (!mcast) |
196 | return; |
197 | |
198 | mce_counter = &mcast->mce_counter[dir]; |
199 | rvu_free_rsrc_contig(rsrc: mce_counter, nrsrc: count, start); |
200 | } |
201 | |
202 | struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) |
203 | { |
204 | int nix_blkaddr = 0, i = 0; |
205 | struct rvu *rvu = hw->rvu; |
206 | |
207 | nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr: nix_blkaddr); |
208 | while (nix_blkaddr) { |
209 | if (blkaddr == nix_blkaddr && hw->nix) |
210 | return &hw->nix[i]; |
211 | nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr: nix_blkaddr); |
212 | i++; |
213 | } |
214 | return NULL; |
215 | } |
216 | |
217 | int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type) |
218 | { |
219 | if (hw->cap.nix_multiple_dwrr_mtu) |
220 | return NIX_AF_DWRR_MTUX(smq_link_type); |
221 | |
222 | if (smq_link_type == SMQ_LINK_TYPE_SDP) |
223 | return NIX_AF_DWRR_SDP_MTU; |
224 | |
225 | /* Here it's same reg for RPM and LBK */ |
226 | return NIX_AF_DWRR_RPM_MTU; |
227 | } |
228 | |
229 | u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) |
230 | { |
231 | dwrr_mtu &= 0x1FULL; |
232 | |
233 | /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. |
234 | * Value of 4 is reserved for MTU value of 9728 bytes. |
235 | * Value of 5 is reserved for MTU value of 10240 bytes. |
236 | */ |
237 | switch (dwrr_mtu) { |
238 | case 4: |
239 | return 9728; |
240 | case 5: |
241 | return 10240; |
242 | default: |
243 | return BIT_ULL(dwrr_mtu); |
244 | } |
245 | |
246 | return 0; |
247 | } |
248 | |
249 | u32 convert_bytes_to_dwrr_mtu(u32 bytes) |
250 | { |
251 | /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. |
252 | * Value of 4 is reserved for MTU value of 9728 bytes. |
253 | * Value of 5 is reserved for MTU value of 10240 bytes. |
254 | */ |
255 | if (bytes > BIT_ULL(16)) |
256 | return 0; |
257 | |
258 | switch (bytes) { |
259 | case 9728: |
260 | return 4; |
261 | case 10240: |
262 | return 5; |
263 | default: |
264 | return ilog2(bytes); |
265 | } |
266 | |
267 | return 0; |
268 | } |
269 | |
270 | static void nix_rx_sync(struct rvu *rvu, int blkaddr) |
271 | { |
272 | int err; |
273 | |
274 | /* Sync all in flight RX packets to LLC/DRAM */ |
275 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); |
276 | err = rvu_poll_reg(rvu, block: blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), zero: true); |
277 | if (err) |
278 | dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n" ); |
279 | |
280 | /* SW_SYNC ensures all existing transactions are finished and pkts |
281 | * are written to LLC/DRAM, queues should be teared down after |
282 | * successful SW_SYNC. Due to a HW errata, in some rare scenarios |
283 | * an existing transaction might end after SW_SYNC operation. To |
284 | * ensure operation is fully done, do the SW_SYNC twice. |
285 | */ |
286 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); |
287 | err = rvu_poll_reg(rvu, block: blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), zero: true); |
288 | if (err) |
289 | dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n" ); |
290 | } |
291 | |
292 | static bool is_valid_txschq(struct rvu *rvu, int blkaddr, |
293 | int lvl, u16 pcifunc, u16 schq) |
294 | { |
295 | struct rvu_hwinfo *hw = rvu->hw; |
296 | struct nix_txsch *txsch; |
297 | struct nix_hw *nix_hw; |
298 | u16 map_func; |
299 | |
300 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
301 | if (!nix_hw) |
302 | return false; |
303 | |
304 | txsch = &nix_hw->txsch[lvl]; |
305 | /* Check out of bounds */ |
306 | if (schq >= txsch->schq.max) |
307 | return false; |
308 | |
309 | mutex_lock(&rvu->rsrc_lock); |
310 | map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); |
311 | mutex_unlock(lock: &rvu->rsrc_lock); |
312 | |
313 | /* TLs aggegating traffic are shared across PF and VFs */ |
314 | if (lvl >= hw->cap.nix_tx_aggr_lvl) { |
315 | if (rvu_get_pf(pcifunc: map_func) != rvu_get_pf(pcifunc)) |
316 | return false; |
317 | else |
318 | return true; |
319 | } |
320 | |
321 | if (map_func != pcifunc) |
322 | return false; |
323 | |
324 | return true; |
325 | } |
326 | |
327 | static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, |
328 | struct nix_lf_alloc_rsp *rsp, bool loop) |
329 | { |
330 | struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); |
331 | u16 req_chan_base, req_chan_end, req_chan_cnt; |
332 | struct rvu_hwinfo *hw = rvu->hw; |
333 | struct sdp_node_info *sdp_info; |
334 | int pkind, pf, vf, lbkid, vfid; |
335 | u8 cgx_id, lmac_id; |
336 | bool from_vf; |
337 | int err; |
338 | |
339 | pf = rvu_get_pf(pcifunc); |
340 | if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && |
341 | type != NIX_INTF_TYPE_SDP) |
342 | return 0; |
343 | |
344 | switch (type) { |
345 | case NIX_INTF_TYPE_CGX: |
346 | pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; |
347 | rvu_get_cgx_lmac_id(map: pfvf->cgx_lmac, cgx_id: &cgx_id, lmac_id: &lmac_id); |
348 | |
349 | pkind = rvu_npc_get_pkind(rvu, pf); |
350 | if (pkind < 0) { |
351 | dev_err(rvu->dev, |
352 | "PF_Func 0x%x: Invalid pkind\n" , pcifunc); |
353 | return -EINVAL; |
354 | } |
355 | pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgxid: cgx_id, lmacid: lmac_id, chan: 0); |
356 | pfvf->tx_chan_base = pfvf->rx_chan_base; |
357 | pfvf->rx_chan_cnt = 1; |
358 | pfvf->tx_chan_cnt = 1; |
359 | rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; |
360 | |
361 | cgx_set_pkind(cgxd: rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); |
362 | rvu_npc_set_pkind(rvu, pkind, pfvf); |
363 | |
364 | break; |
365 | case NIX_INTF_TYPE_LBK: |
366 | vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; |
367 | |
368 | /* If NIX1 block is present on the silicon then NIXes are |
369 | * assigned alternatively for lbk interfaces. NIX0 should |
370 | * send packets on lbk link 1 channels and NIX1 should send |
371 | * on lbk link 0 channels for the communication between |
372 | * NIX0 and NIX1. |
373 | */ |
374 | lbkid = 0; |
375 | if (rvu->hw->lbk_links > 1) |
376 | lbkid = vf & 0x1 ? 0 : 1; |
377 | |
378 | /* By default NIX0 is configured to send packet on lbk link 1 |
379 | * (which corresponds to LBK1), same packet will receive on |
380 | * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 |
381 | * (which corresponds to LBK2) packet will receive on NIX0 lbk |
382 | * link 1. |
383 | * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 |
384 | * transmits and receives on lbk link 0, whick corresponds |
385 | * to LBK1 block, back to back connectivity between NIX and |
386 | * LBK can be achieved (which is similar to 96xx) |
387 | * |
388 | * RX TX |
389 | * NIX0 lbk link 1 (LBK2) 1 (LBK1) |
390 | * NIX0 lbk link 0 (LBK0) 0 (LBK0) |
391 | * NIX1 lbk link 0 (LBK1) 0 (LBK2) |
392 | * NIX1 lbk link 1 (LBK3) 1 (LBK3) |
393 | */ |
394 | if (loop) |
395 | lbkid = !lbkid; |
396 | |
397 | /* Note that AF's VFs work in pairs and talk over consecutive |
398 | * loopback channels.Therefore if odd number of AF VFs are |
399 | * enabled then the last VF remains with no pair. |
400 | */ |
401 | pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, chan: vf); |
402 | pfvf->tx_chan_base = vf & 0x1 ? |
403 | rvu_nix_chan_lbk(rvu, lbkid, chan: vf - 1) : |
404 | rvu_nix_chan_lbk(rvu, lbkid, chan: vf + 1); |
405 | pfvf->rx_chan_cnt = 1; |
406 | pfvf->tx_chan_cnt = 1; |
407 | rsp->tx_link = hw->cgx_links + lbkid; |
408 | pfvf->lbkid = lbkid; |
409 | rvu_npc_set_pkind(rvu, pkind: NPC_RX_LBK_PKIND, pfvf); |
410 | rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, |
411 | chan: pfvf->rx_chan_base, |
412 | chan_cnt: pfvf->rx_chan_cnt); |
413 | |
414 | break; |
415 | case NIX_INTF_TYPE_SDP: |
416 | from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); |
417 | parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; |
418 | sdp_info = parent_pf->sdp_info; |
419 | if (!sdp_info) { |
420 | dev_err(rvu->dev, "Invalid sdp_info pointer\n" ); |
421 | return -EINVAL; |
422 | } |
423 | if (from_vf) { |
424 | req_chan_base = rvu_nix_chan_sdp(rvu, chan: 0) + sdp_info->pf_srn + |
425 | sdp_info->num_pf_rings; |
426 | vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; |
427 | for (vfid = 0; vfid < vf; vfid++) |
428 | req_chan_base += sdp_info->vf_rings[vfid]; |
429 | req_chan_cnt = sdp_info->vf_rings[vf]; |
430 | req_chan_end = req_chan_base + req_chan_cnt - 1; |
431 | if (req_chan_base < rvu_nix_chan_sdp(rvu, chan: 0) || |
432 | req_chan_end > rvu_nix_chan_sdp(rvu, chan: 255)) { |
433 | dev_err(rvu->dev, |
434 | "PF_Func 0x%x: Invalid channel base and count\n" , |
435 | pcifunc); |
436 | return -EINVAL; |
437 | } |
438 | } else { |
439 | req_chan_base = rvu_nix_chan_sdp(rvu, chan: 0) + sdp_info->pf_srn; |
440 | req_chan_cnt = sdp_info->num_pf_rings; |
441 | } |
442 | |
443 | pfvf->rx_chan_base = req_chan_base; |
444 | pfvf->rx_chan_cnt = req_chan_cnt; |
445 | pfvf->tx_chan_base = pfvf->rx_chan_base; |
446 | pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; |
447 | |
448 | rsp->tx_link = hw->cgx_links + hw->lbk_links; |
449 | rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, |
450 | chan: pfvf->rx_chan_base, |
451 | chan_cnt: pfvf->rx_chan_cnt); |
452 | break; |
453 | } |
454 | |
455 | /* Add a UCAST forwarding rule in MCAM with this NIXLF attached |
456 | * RVU PF/VF's MAC address. |
457 | */ |
458 | rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, |
459 | chan: pfvf->rx_chan_base, mac_addr: pfvf->mac_addr); |
460 | |
461 | /* Add this PF_FUNC to bcast pkt replication list */ |
462 | err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, add: true); |
463 | if (err) { |
464 | dev_err(rvu->dev, |
465 | "Bcast list, failed to enable PF_FUNC 0x%x\n" , |
466 | pcifunc); |
467 | return err; |
468 | } |
469 | /* Install MCAM rule matching Ethernet broadcast mac address */ |
470 | rvu_npc_install_bcast_match_entry(rvu, pcifunc, |
471 | nixlf, chan: pfvf->rx_chan_base); |
472 | |
473 | pfvf->maxlen = NIC_HW_MIN_FRS; |
474 | pfvf->minlen = NIC_HW_MIN_FRS; |
475 | |
476 | return 0; |
477 | } |
478 | |
479 | static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) |
480 | { |
481 | struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); |
482 | int err; |
483 | |
484 | pfvf->maxlen = 0; |
485 | pfvf->minlen = 0; |
486 | |
487 | /* Remove this PF_FUNC from bcast pkt replication list */ |
488 | err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, add: false); |
489 | if (err) { |
490 | dev_err(rvu->dev, |
491 | "Bcast list, failed to disable PF_FUNC 0x%x\n" , |
492 | pcifunc); |
493 | } |
494 | |
495 | /* Free and disable any MCAM entries used by this NIX LF */ |
496 | rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); |
497 | |
498 | /* Disable DMAC filters used */ |
499 | rvu_cgx_disable_dmac_entries(rvu, pcifunc); |
500 | } |
501 | |
502 | #define NIX_BPIDS_PER_LMAC 8 |
503 | #define NIX_BPIDS_PER_CPT 1 |
504 | static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr) |
505 | { |
506 | struct nix_bp *bp = &hw->bp; |
507 | int err, max_bpids; |
508 | u64 cfg; |
509 | |
510 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST1); |
511 | max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg); |
512 | |
513 | /* Reserve the BPIds for CGX and SDP */ |
514 | bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC; |
515 | bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg); |
516 | bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt + |
517 | NIX_BPIDS_PER_CPT; |
518 | bp->bpids.max = max_bpids - bp->free_pool_base; |
519 | |
520 | err = rvu_alloc_bitmap(rsrc: &bp->bpids); |
521 | if (err) |
522 | return err; |
523 | |
524 | bp->fn_map = devm_kcalloc(dev: rvu->dev, n: bp->bpids.max, |
525 | size: sizeof(u16), GFP_KERNEL); |
526 | if (!bp->fn_map) |
527 | return -ENOMEM; |
528 | |
529 | bp->intf_map = devm_kcalloc(dev: rvu->dev, n: bp->bpids.max, |
530 | size: sizeof(u8), GFP_KERNEL); |
531 | if (!bp->intf_map) |
532 | return -ENOMEM; |
533 | |
534 | bp->ref_cnt = devm_kcalloc(dev: rvu->dev, n: bp->bpids.max, |
535 | size: sizeof(u8), GFP_KERNEL); |
536 | if (!bp->ref_cnt) |
537 | return -ENOMEM; |
538 | |
539 | return 0; |
540 | } |
541 | |
542 | void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) |
543 | { |
544 | int blkaddr, bpid, err; |
545 | struct nix_hw *nix_hw; |
546 | struct nix_bp *bp; |
547 | |
548 | if (!is_lbk_vf(rvu, pcifunc)) |
549 | return; |
550 | |
551 | err = nix_get_struct_ptrs(rvu, pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
552 | if (err) |
553 | return; |
554 | |
555 | bp = &nix_hw->bp; |
556 | |
557 | mutex_lock(&rvu->rsrc_lock); |
558 | for (bpid = 0; bpid < bp->bpids.max; bpid++) { |
559 | if (bp->fn_map[bpid] == pcifunc) { |
560 | bp->ref_cnt[bpid]--; |
561 | if (bp->ref_cnt[bpid]) |
562 | continue; |
563 | rvu_free_rsrc(rsrc: &bp->bpids, id: bpid); |
564 | bp->fn_map[bpid] = 0; |
565 | } |
566 | } |
567 | mutex_unlock(lock: &rvu->rsrc_lock); |
568 | } |
569 | |
570 | int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, |
571 | struct nix_bp_cfg_req *req, |
572 | struct msg_rsp *rsp) |
573 | { |
574 | u16 pcifunc = req->hdr.pcifunc; |
575 | int blkaddr, pf, type, err; |
576 | u16 chan_base, chan, bpid; |
577 | struct rvu_pfvf *pfvf; |
578 | struct nix_hw *nix_hw; |
579 | struct nix_bp *bp; |
580 | u64 cfg; |
581 | |
582 | pf = rvu_get_pf(pcifunc); |
583 | type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; |
584 | if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) |
585 | return 0; |
586 | |
587 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
588 | err = nix_get_struct_ptrs(rvu, pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
589 | if (err) |
590 | return err; |
591 | |
592 | bp = &nix_hw->bp; |
593 | chan_base = pfvf->rx_chan_base + req->chan_base; |
594 | for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { |
595 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_RX_CHANX_CFG(chan)); |
596 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_CHANX_CFG(chan), |
597 | val: cfg & ~BIT_ULL(16)); |
598 | |
599 | if (type == NIX_INTF_TYPE_LBK) { |
600 | bpid = cfg & GENMASK(8, 0); |
601 | mutex_lock(&rvu->rsrc_lock); |
602 | rvu_free_rsrc(rsrc: &bp->bpids, id: bpid - bp->free_pool_base); |
603 | for (bpid = 0; bpid < bp->bpids.max; bpid++) { |
604 | if (bp->fn_map[bpid] == pcifunc) { |
605 | bp->fn_map[bpid] = 0; |
606 | bp->ref_cnt[bpid] = 0; |
607 | } |
608 | } |
609 | mutex_unlock(lock: &rvu->rsrc_lock); |
610 | } |
611 | } |
612 | return 0; |
613 | } |
614 | |
615 | static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, |
616 | int type, int chan_id) |
617 | { |
618 | int bpid, blkaddr, sdp_chan_base, err; |
619 | struct rvu_hwinfo *hw = rvu->hw; |
620 | struct rvu_pfvf *pfvf; |
621 | struct nix_hw *nix_hw; |
622 | u8 cgx_id, lmac_id; |
623 | struct nix_bp *bp; |
624 | |
625 | pfvf = rvu_get_pfvf(rvu, pcifunc: req->hdr.pcifunc); |
626 | |
627 | err = nix_get_struct_ptrs(rvu, pcifunc: req->hdr.pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
628 | if (err) |
629 | return err; |
630 | |
631 | bp = &nix_hw->bp; |
632 | |
633 | /* Backpressure IDs range division |
634 | * CGX channles are mapped to (0 - 191) BPIDs |
635 | * LBK channles are mapped to (192 - 255) BPIDs |
636 | * SDP channles are mapped to (256 - 511) BPIDs |
637 | * |
638 | * Lmac channles and bpids mapped as follows |
639 | * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) |
640 | * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... |
641 | * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... |
642 | */ |
643 | switch (type) { |
644 | case NIX_INTF_TYPE_CGX: |
645 | if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC) |
646 | return NIX_AF_ERR_INVALID_BPID_REQ; |
647 | rvu_get_cgx_lmac_id(map: pfvf->cgx_lmac, cgx_id: &cgx_id, lmac_id: &lmac_id); |
648 | /* Assign bpid based on cgx, lmac and chan id */ |
649 | bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) + |
650 | (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base; |
651 | |
652 | if (req->bpid_per_chan) |
653 | bpid += chan_id; |
654 | if (bpid > bp->cgx_bpid_cnt) |
655 | return NIX_AF_ERR_INVALID_BPID; |
656 | break; |
657 | |
658 | case NIX_INTF_TYPE_LBK: |
659 | /* Alloc bpid from the free pool */ |
660 | mutex_lock(&rvu->rsrc_lock); |
661 | bpid = rvu_alloc_rsrc(rsrc: &bp->bpids); |
662 | if (bpid < 0) { |
663 | mutex_unlock(lock: &rvu->rsrc_lock); |
664 | return NIX_AF_ERR_INVALID_BPID; |
665 | } |
666 | bp->fn_map[bpid] = req->hdr.pcifunc; |
667 | bp->ref_cnt[bpid]++; |
668 | bpid += bp->free_pool_base; |
669 | mutex_unlock(lock: &rvu->rsrc_lock); |
670 | break; |
671 | case NIX_INTF_TYPE_SDP: |
672 | if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt) |
673 | return NIX_AF_ERR_INVALID_BPID_REQ; |
674 | |
675 | /* Handle usecase of 2 SDP blocks */ |
676 | if (!hw->cap.programmable_chans) |
677 | sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START; |
678 | else |
679 | sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base; |
680 | |
681 | bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base; |
682 | if (req->bpid_per_chan) |
683 | bpid += chan_id; |
684 | |
685 | if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt)) |
686 | return NIX_AF_ERR_INVALID_BPID; |
687 | break; |
688 | default: |
689 | return -EINVAL; |
690 | } |
691 | return bpid; |
692 | } |
693 | |
694 | int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, |
695 | struct nix_bp_cfg_req *req, |
696 | struct nix_bp_cfg_rsp *rsp) |
697 | { |
698 | int blkaddr, pf, type, chan_id = 0; |
699 | u16 pcifunc = req->hdr.pcifunc; |
700 | struct rvu_pfvf *pfvf; |
701 | u16 chan_base, chan; |
702 | s16 bpid, bpid_base; |
703 | u64 cfg; |
704 | |
705 | pf = rvu_get_pf(pcifunc); |
706 | type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; |
707 | if (is_sdp_pfvf(pcifunc)) |
708 | type = NIX_INTF_TYPE_SDP; |
709 | |
710 | /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ |
711 | if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && |
712 | type != NIX_INTF_TYPE_SDP) |
713 | return 0; |
714 | |
715 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
716 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
717 | |
718 | bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); |
719 | chan_base = pfvf->rx_chan_base + req->chan_base; |
720 | bpid = bpid_base; |
721 | |
722 | for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { |
723 | if (bpid < 0) { |
724 | dev_warn(rvu->dev, "Fail to enable backpressure\n" ); |
725 | return -EINVAL; |
726 | } |
727 | |
728 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_RX_CHANX_CFG(chan)); |
729 | cfg &= ~GENMASK_ULL(8, 0); |
730 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_CHANX_CFG(chan), |
731 | val: cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); |
732 | chan_id++; |
733 | bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); |
734 | } |
735 | |
736 | for (chan = 0; chan < req->chan_cnt; chan++) { |
737 | /* Map channel and bpid assign to it */ |
738 | rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | |
739 | (bpid_base & 0x3FF); |
740 | if (req->bpid_per_chan) |
741 | bpid_base++; |
742 | } |
743 | rsp->chan_cnt = req->chan_cnt; |
744 | |
745 | return 0; |
746 | } |
747 | |
748 | static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, |
749 | u64 format, bool v4, u64 *fidx) |
750 | { |
751 | struct nix_lso_format field = {0}; |
752 | |
753 | /* IP's Length field */ |
754 | field.layer = NIX_TXLAYER_OL3; |
755 | /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ |
756 | field.offset = v4 ? 2 : 4; |
757 | field.sizem1 = 1; /* i.e 2 bytes */ |
758 | field.alg = NIX_LSOALG_ADD_PAYLEN; |
759 | rvu_write64(rvu, block: blkaddr, |
760 | NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), |
761 | val: *(u64 *)&field); |
762 | |
763 | /* No ID field in IPv6 header */ |
764 | if (!v4) |
765 | return; |
766 | |
767 | /* IP's ID field */ |
768 | field.layer = NIX_TXLAYER_OL3; |
769 | field.offset = 4; |
770 | field.sizem1 = 1; /* i.e 2 bytes */ |
771 | field.alg = NIX_LSOALG_ADD_SEGNUM; |
772 | rvu_write64(rvu, block: blkaddr, |
773 | NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), |
774 | val: *(u64 *)&field); |
775 | } |
776 | |
777 | static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, |
778 | u64 format, u64 *fidx) |
779 | { |
780 | struct nix_lso_format field = {0}; |
781 | |
782 | /* TCP's sequence number field */ |
783 | field.layer = NIX_TXLAYER_OL4; |
784 | field.offset = 4; |
785 | field.sizem1 = 3; /* i.e 4 bytes */ |
786 | field.alg = NIX_LSOALG_ADD_OFFSET; |
787 | rvu_write64(rvu, block: blkaddr, |
788 | NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), |
789 | val: *(u64 *)&field); |
790 | |
791 | /* TCP's flags field */ |
792 | field.layer = NIX_TXLAYER_OL4; |
793 | field.offset = 12; |
794 | field.sizem1 = 1; /* 2 bytes */ |
795 | field.alg = NIX_LSOALG_TCP_FLAGS; |
796 | rvu_write64(rvu, block: blkaddr, |
797 | NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), |
798 | val: *(u64 *)&field); |
799 | } |
800 | |
801 | static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) |
802 | { |
803 | u64 cfg, idx, fidx = 0; |
804 | |
805 | /* Get max HW supported format indices */ |
806 | cfg = (rvu_read64(rvu, block: blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; |
807 | nix_hw->lso.total = cfg; |
808 | |
809 | /* Enable LSO */ |
810 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_LSO_CFG); |
811 | /* For TSO, set first and middle segment flags to |
812 | * mask out PSH, RST & FIN flags in TCP packet |
813 | */ |
814 | cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); |
815 | cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); |
816 | rvu_write64(rvu, block: blkaddr, NIX_AF_LSO_CFG, val: cfg | BIT_ULL(63)); |
817 | |
818 | /* Setup default static LSO formats |
819 | * |
820 | * Configure format fields for TCPv4 segmentation offload |
821 | */ |
822 | idx = NIX_LSO_FORMAT_IDX_TSOV4; |
823 | nix_setup_lso_tso_l3(rvu, blkaddr, format: idx, v4: true, fidx: &fidx); |
824 | nix_setup_lso_tso_l4(rvu, blkaddr, format: idx, fidx: &fidx); |
825 | |
826 | /* Set rest of the fields to NOP */ |
827 | for (; fidx < 8; fidx++) { |
828 | rvu_write64(rvu, block: blkaddr, |
829 | NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), val: 0x0ULL); |
830 | } |
831 | nix_hw->lso.in_use++; |
832 | |
833 | /* Configure format fields for TCPv6 segmentation offload */ |
834 | idx = NIX_LSO_FORMAT_IDX_TSOV6; |
835 | fidx = 0; |
836 | nix_setup_lso_tso_l3(rvu, blkaddr, format: idx, v4: false, fidx: &fidx); |
837 | nix_setup_lso_tso_l4(rvu, blkaddr, format: idx, fidx: &fidx); |
838 | |
839 | /* Set rest of the fields to NOP */ |
840 | for (; fidx < 8; fidx++) { |
841 | rvu_write64(rvu, block: blkaddr, |
842 | NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), val: 0x0ULL); |
843 | } |
844 | nix_hw->lso.in_use++; |
845 | } |
846 | |
847 | static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) |
848 | { |
849 | kfree(objp: pfvf->rq_bmap); |
850 | kfree(objp: pfvf->sq_bmap); |
851 | kfree(objp: pfvf->cq_bmap); |
852 | if (pfvf->rq_ctx) |
853 | qmem_free(dev: rvu->dev, qmem: pfvf->rq_ctx); |
854 | if (pfvf->sq_ctx) |
855 | qmem_free(dev: rvu->dev, qmem: pfvf->sq_ctx); |
856 | if (pfvf->cq_ctx) |
857 | qmem_free(dev: rvu->dev, qmem: pfvf->cq_ctx); |
858 | if (pfvf->rss_ctx) |
859 | qmem_free(dev: rvu->dev, qmem: pfvf->rss_ctx); |
860 | if (pfvf->nix_qints_ctx) |
861 | qmem_free(dev: rvu->dev, qmem: pfvf->nix_qints_ctx); |
862 | if (pfvf->cq_ints_ctx) |
863 | qmem_free(dev: rvu->dev, qmem: pfvf->cq_ints_ctx); |
864 | |
865 | pfvf->rq_bmap = NULL; |
866 | pfvf->cq_bmap = NULL; |
867 | pfvf->sq_bmap = NULL; |
868 | pfvf->rq_ctx = NULL; |
869 | pfvf->sq_ctx = NULL; |
870 | pfvf->cq_ctx = NULL; |
871 | pfvf->rss_ctx = NULL; |
872 | pfvf->nix_qints_ctx = NULL; |
873 | pfvf->cq_ints_ctx = NULL; |
874 | } |
875 | |
876 | static int (struct rvu *rvu, int blkaddr, |
877 | struct rvu_pfvf *pfvf, int nixlf, |
878 | int , int , int hwctx_size, |
879 | u64 way_mask, bool tag_lsb_as_adder) |
880 | { |
881 | int err, grp, num_indices; |
882 | u64 val; |
883 | |
884 | /* RSS is not requested for this NIXLF */ |
885 | if (!rss_sz) |
886 | return 0; |
887 | num_indices = rss_sz * rss_grps; |
888 | |
889 | /* Alloc NIX RSS HW context memory and config the base */ |
890 | err = qmem_alloc(dev: rvu->dev, q: &pfvf->rss_ctx, qsize: num_indices, entry_sz: hwctx_size); |
891 | if (err) |
892 | return err; |
893 | |
894 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), |
895 | val: (u64)pfvf->rss_ctx->iova); |
896 | |
897 | /* Config full RSS table size, enable RSS and caching */ |
898 | val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | |
899 | ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); |
900 | |
901 | if (tag_lsb_as_adder) |
902 | val |= BIT_ULL(5); |
903 | |
904 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); |
905 | /* Config RSS group offset and sizes */ |
906 | for (grp = 0; grp < rss_grps; grp++) |
907 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), |
908 | val: ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); |
909 | return 0; |
910 | } |
911 | |
912 | static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, |
913 | struct nix_aq_inst_s *inst) |
914 | { |
915 | struct admin_queue *aq = block->aq; |
916 | struct nix_aq_res_s *result; |
917 | int timeout = 1000; |
918 | u64 reg, head; |
919 | int ret; |
920 | |
921 | result = (struct nix_aq_res_s *)aq->res->base; |
922 | |
923 | /* Get current head pointer where to append this instruction */ |
924 | reg = rvu_read64(rvu, block: block->addr, NIX_AF_AQ_STATUS); |
925 | head = (reg >> 4) & AQ_PTR_MASK; |
926 | |
927 | memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), |
928 | (void *)inst, aq->inst->entry_sz); |
929 | memset(result, 0, sizeof(*result)); |
930 | /* sync into memory */ |
931 | wmb(); |
932 | |
933 | /* Ring the doorbell and wait for result */ |
934 | rvu_write64(rvu, block: block->addr, NIX_AF_AQ_DOOR, val: 1); |
935 | while (result->compcode == NIX_AQ_COMP_NOTDONE) { |
936 | cpu_relax(); |
937 | udelay(1); |
938 | timeout--; |
939 | if (!timeout) |
940 | return -EBUSY; |
941 | } |
942 | |
943 | if (result->compcode != NIX_AQ_COMP_GOOD) { |
944 | /* TODO: Replace this with some error code */ |
945 | if (result->compcode == NIX_AQ_COMP_CTX_FAULT || |
946 | result->compcode == NIX_AQ_COMP_LOCKERR || |
947 | result->compcode == NIX_AQ_COMP_CTX_POISON) { |
948 | ret = rvu_ndc_fix_locked_cacheline(rvu, blkaddr: BLKADDR_NDC_NIX0_RX); |
949 | ret |= rvu_ndc_fix_locked_cacheline(rvu, blkaddr: BLKADDR_NDC_NIX0_TX); |
950 | ret |= rvu_ndc_fix_locked_cacheline(rvu, blkaddr: BLKADDR_NDC_NIX1_RX); |
951 | ret |= rvu_ndc_fix_locked_cacheline(rvu, blkaddr: BLKADDR_NDC_NIX1_TX); |
952 | if (ret) |
953 | dev_err(rvu->dev, |
954 | "%s: Not able to unlock cachelines\n" , __func__); |
955 | } |
956 | |
957 | return -EBUSY; |
958 | } |
959 | |
960 | return 0; |
961 | } |
962 | |
963 | static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, |
964 | u16 *smq, u16 *smq_mask) |
965 | { |
966 | struct nix_cn10k_aq_enq_req *aq_req; |
967 | |
968 | if (!is_rvu_otx2(rvu)) { |
969 | aq_req = (struct nix_cn10k_aq_enq_req *)req; |
970 | *smq = aq_req->sq.smq; |
971 | *smq_mask = aq_req->sq_mask.smq; |
972 | } else { |
973 | *smq = req->sq.smq; |
974 | *smq_mask = req->sq_mask.smq; |
975 | } |
976 | } |
977 | |
978 | static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, |
979 | struct nix_aq_enq_req *req, |
980 | struct nix_aq_enq_rsp *rsp) |
981 | { |
982 | struct rvu_hwinfo *hw = rvu->hw; |
983 | u16 pcifunc = req->hdr.pcifunc; |
984 | int nixlf, blkaddr, rc = 0; |
985 | struct nix_aq_inst_s inst; |
986 | struct rvu_block *block; |
987 | struct admin_queue *aq; |
988 | struct rvu_pfvf *pfvf; |
989 | u16 smq, smq_mask; |
990 | void *ctx, *mask; |
991 | bool ena; |
992 | u64 cfg; |
993 | |
994 | blkaddr = nix_hw->blkaddr; |
995 | block = &hw->block[blkaddr]; |
996 | aq = block->aq; |
997 | if (!aq) { |
998 | dev_warn(rvu->dev, "%s: NIX AQ not initialized\n" , __func__); |
999 | return NIX_AF_ERR_AQ_ENQUEUE; |
1000 | } |
1001 | |
1002 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
1003 | nixlf = rvu_get_lf(rvu, block, pcifunc, slot: 0); |
1004 | |
1005 | /* Skip NIXLF check for broadcast MCE entry and bandwidth profile |
1006 | * operations done by AF itself. |
1007 | */ |
1008 | if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || |
1009 | (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { |
1010 | if (!pfvf->nixlf || nixlf < 0) |
1011 | return NIX_AF_ERR_AF_LF_INVALID; |
1012 | } |
1013 | |
1014 | switch (req->ctype) { |
1015 | case NIX_AQ_CTYPE_RQ: |
1016 | /* Check if index exceeds max no of queues */ |
1017 | if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) |
1018 | rc = NIX_AF_ERR_AQ_ENQUEUE; |
1019 | break; |
1020 | case NIX_AQ_CTYPE_SQ: |
1021 | if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) |
1022 | rc = NIX_AF_ERR_AQ_ENQUEUE; |
1023 | break; |
1024 | case NIX_AQ_CTYPE_CQ: |
1025 | if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) |
1026 | rc = NIX_AF_ERR_AQ_ENQUEUE; |
1027 | break; |
1028 | case NIX_AQ_CTYPE_RSS: |
1029 | /* Check if RSS is enabled and qidx is within range */ |
1030 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); |
1031 | if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || |
1032 | (req->qidx >= (256UL << (cfg & 0xF)))) |
1033 | rc = NIX_AF_ERR_AQ_ENQUEUE; |
1034 | break; |
1035 | case NIX_AQ_CTYPE_MCE: |
1036 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_RX_MCAST_CFG); |
1037 | |
1038 | /* Check if index exceeds MCE list length */ |
1039 | if (!nix_hw->mcast.mce_ctx || |
1040 | (req->qidx >= (256UL << (cfg & 0xF)))) |
1041 | rc = NIX_AF_ERR_AQ_ENQUEUE; |
1042 | |
1043 | /* Adding multicast lists for requests from PF/VFs is not |
1044 | * yet supported, so ignore this. |
1045 | */ |
1046 | if (rsp) |
1047 | rc = NIX_AF_ERR_AQ_ENQUEUE; |
1048 | break; |
1049 | case NIX_AQ_CTYPE_BANDPROF: |
1050 | if (nix_verify_bandprof(req: (struct nix_cn10k_aq_enq_req *)req, |
1051 | nix_hw, pcifunc)) |
1052 | rc = NIX_AF_ERR_INVALID_BANDPROF; |
1053 | break; |
1054 | default: |
1055 | rc = NIX_AF_ERR_AQ_ENQUEUE; |
1056 | } |
1057 | |
1058 | if (rc) |
1059 | return rc; |
1060 | |
1061 | nix_get_aq_req_smq(rvu, req, smq: &smq, smq_mask: &smq_mask); |
1062 | /* Check if SQ pointed SMQ belongs to this PF/VF or not */ |
1063 | if (req->ctype == NIX_AQ_CTYPE_SQ && |
1064 | ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || |
1065 | (req->op == NIX_AQ_INSTOP_WRITE && |
1066 | req->sq_mask.ena && req->sq.ena && smq_mask))) { |
1067 | if (!is_valid_txschq(rvu, blkaddr, lvl: NIX_TXSCH_LVL_SMQ, |
1068 | pcifunc, schq: smq)) |
1069 | return NIX_AF_ERR_AQ_ENQUEUE; |
1070 | } |
1071 | |
1072 | memset(&inst, 0, sizeof(struct nix_aq_inst_s)); |
1073 | inst.lf = nixlf; |
1074 | inst.cindex = req->qidx; |
1075 | inst.ctype = req->ctype; |
1076 | inst.op = req->op; |
1077 | /* Currently we are not supporting enqueuing multiple instructions, |
1078 | * so always choose first entry in result memory. |
1079 | */ |
1080 | inst.res_addr = (u64)aq->res->iova; |
1081 | |
1082 | /* Hardware uses same aq->res->base for updating result of |
1083 | * previous instruction hence wait here till it is done. |
1084 | */ |
1085 | spin_lock(lock: &aq->lock); |
1086 | |
1087 | /* Clean result + context memory */ |
1088 | memset(aq->res->base, 0, aq->res->entry_sz); |
1089 | /* Context needs to be written at RES_ADDR + 128 */ |
1090 | ctx = aq->res->base + 128; |
1091 | /* Mask needs to be written at RES_ADDR + 256 */ |
1092 | mask = aq->res->base + 256; |
1093 | |
1094 | switch (req->op) { |
1095 | case NIX_AQ_INSTOP_WRITE: |
1096 | if (req->ctype == NIX_AQ_CTYPE_RQ) |
1097 | memcpy(mask, &req->rq_mask, |
1098 | sizeof(struct nix_rq_ctx_s)); |
1099 | else if (req->ctype == NIX_AQ_CTYPE_SQ) |
1100 | memcpy(mask, &req->sq_mask, |
1101 | sizeof(struct nix_sq_ctx_s)); |
1102 | else if (req->ctype == NIX_AQ_CTYPE_CQ) |
1103 | memcpy(mask, &req->cq_mask, |
1104 | sizeof(struct nix_cq_ctx_s)); |
1105 | else if (req->ctype == NIX_AQ_CTYPE_RSS) |
1106 | memcpy(mask, &req->rss_mask, |
1107 | sizeof(struct nix_rsse_s)); |
1108 | else if (req->ctype == NIX_AQ_CTYPE_MCE) |
1109 | memcpy(mask, &req->mce_mask, |
1110 | sizeof(struct nix_rx_mce_s)); |
1111 | else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) |
1112 | memcpy(mask, &req->prof_mask, |
1113 | sizeof(struct nix_bandprof_s)); |
1114 | fallthrough; |
1115 | case NIX_AQ_INSTOP_INIT: |
1116 | if (req->ctype == NIX_AQ_CTYPE_RQ) |
1117 | memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); |
1118 | else if (req->ctype == NIX_AQ_CTYPE_SQ) |
1119 | memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); |
1120 | else if (req->ctype == NIX_AQ_CTYPE_CQ) |
1121 | memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); |
1122 | else if (req->ctype == NIX_AQ_CTYPE_RSS) |
1123 | memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); |
1124 | else if (req->ctype == NIX_AQ_CTYPE_MCE) |
1125 | memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); |
1126 | else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) |
1127 | memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); |
1128 | break; |
1129 | case NIX_AQ_INSTOP_NOP: |
1130 | case NIX_AQ_INSTOP_READ: |
1131 | case NIX_AQ_INSTOP_LOCK: |
1132 | case NIX_AQ_INSTOP_UNLOCK: |
1133 | break; |
1134 | default: |
1135 | rc = NIX_AF_ERR_AQ_ENQUEUE; |
1136 | spin_unlock(lock: &aq->lock); |
1137 | return rc; |
1138 | } |
1139 | |
1140 | /* Submit the instruction to AQ */ |
1141 | rc = nix_aq_enqueue_wait(rvu, block, inst: &inst); |
1142 | if (rc) { |
1143 | spin_unlock(lock: &aq->lock); |
1144 | return rc; |
1145 | } |
1146 | |
1147 | /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ |
1148 | if (req->op == NIX_AQ_INSTOP_INIT) { |
1149 | if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) |
1150 | __set_bit(req->qidx, pfvf->rq_bmap); |
1151 | if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) |
1152 | __set_bit(req->qidx, pfvf->sq_bmap); |
1153 | if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) |
1154 | __set_bit(req->qidx, pfvf->cq_bmap); |
1155 | } |
1156 | |
1157 | if (req->op == NIX_AQ_INSTOP_WRITE) { |
1158 | if (req->ctype == NIX_AQ_CTYPE_RQ) { |
1159 | ena = (req->rq.ena & req->rq_mask.ena) | |
1160 | (test_bit(req->qidx, pfvf->rq_bmap) & |
1161 | ~req->rq_mask.ena); |
1162 | if (ena) |
1163 | __set_bit(req->qidx, pfvf->rq_bmap); |
1164 | else |
1165 | __clear_bit(req->qidx, pfvf->rq_bmap); |
1166 | } |
1167 | if (req->ctype == NIX_AQ_CTYPE_SQ) { |
1168 | ena = (req->rq.ena & req->sq_mask.ena) | |
1169 | (test_bit(req->qidx, pfvf->sq_bmap) & |
1170 | ~req->sq_mask.ena); |
1171 | if (ena) |
1172 | __set_bit(req->qidx, pfvf->sq_bmap); |
1173 | else |
1174 | __clear_bit(req->qidx, pfvf->sq_bmap); |
1175 | } |
1176 | if (req->ctype == NIX_AQ_CTYPE_CQ) { |
1177 | ena = (req->rq.ena & req->cq_mask.ena) | |
1178 | (test_bit(req->qidx, pfvf->cq_bmap) & |
1179 | ~req->cq_mask.ena); |
1180 | if (ena) |
1181 | __set_bit(req->qidx, pfvf->cq_bmap); |
1182 | else |
1183 | __clear_bit(req->qidx, pfvf->cq_bmap); |
1184 | } |
1185 | } |
1186 | |
1187 | if (rsp) { |
1188 | /* Copy read context into mailbox */ |
1189 | if (req->op == NIX_AQ_INSTOP_READ) { |
1190 | if (req->ctype == NIX_AQ_CTYPE_RQ) |
1191 | memcpy(&rsp->rq, ctx, |
1192 | sizeof(struct nix_rq_ctx_s)); |
1193 | else if (req->ctype == NIX_AQ_CTYPE_SQ) |
1194 | memcpy(&rsp->sq, ctx, |
1195 | sizeof(struct nix_sq_ctx_s)); |
1196 | else if (req->ctype == NIX_AQ_CTYPE_CQ) |
1197 | memcpy(&rsp->cq, ctx, |
1198 | sizeof(struct nix_cq_ctx_s)); |
1199 | else if (req->ctype == NIX_AQ_CTYPE_RSS) |
1200 | memcpy(&rsp->rss, ctx, |
1201 | sizeof(struct nix_rsse_s)); |
1202 | else if (req->ctype == NIX_AQ_CTYPE_MCE) |
1203 | memcpy(&rsp->mce, ctx, |
1204 | sizeof(struct nix_rx_mce_s)); |
1205 | else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) |
1206 | memcpy(&rsp->prof, ctx, |
1207 | sizeof(struct nix_bandprof_s)); |
1208 | } |
1209 | } |
1210 | |
1211 | spin_unlock(lock: &aq->lock); |
1212 | return 0; |
1213 | } |
1214 | |
1215 | static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, |
1216 | struct nix_aq_enq_req *req, u8 ctype) |
1217 | { |
1218 | struct nix_cn10k_aq_enq_req aq_req; |
1219 | struct nix_cn10k_aq_enq_rsp aq_rsp; |
1220 | int rc, word; |
1221 | |
1222 | if (req->ctype != NIX_AQ_CTYPE_CQ) |
1223 | return 0; |
1224 | |
1225 | rc = nix_aq_context_read(rvu, nix_hw, aq_req: &aq_req, aq_rsp: &aq_rsp, |
1226 | pcifunc: req->hdr.pcifunc, ctype, qidx: req->qidx); |
1227 | if (rc) { |
1228 | dev_err(rvu->dev, |
1229 | "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n" , |
1230 | __func__, nix_get_ctx_name(ctype), req->qidx, |
1231 | req->hdr.pcifunc); |
1232 | return rc; |
1233 | } |
1234 | |
1235 | /* Make copy of original context & mask which are required |
1236 | * for resubmission |
1237 | */ |
1238 | memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); |
1239 | memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); |
1240 | |
1241 | /* exclude fields which HW can update */ |
1242 | aq_req.cq_mask.cq_err = 0; |
1243 | aq_req.cq_mask.wrptr = 0; |
1244 | aq_req.cq_mask.tail = 0; |
1245 | aq_req.cq_mask.head = 0; |
1246 | aq_req.cq_mask.avg_level = 0; |
1247 | aq_req.cq_mask.update_time = 0; |
1248 | aq_req.cq_mask.substream = 0; |
1249 | |
1250 | /* Context mask (cq_mask) holds mask value of fields which |
1251 | * are changed in AQ WRITE operation. |
1252 | * for example cq.drop = 0xa; |
1253 | * cq_mask.drop = 0xff; |
1254 | * Below logic performs '&' between cq and cq_mask so that non |
1255 | * updated fields are masked out for request and response |
1256 | * comparison |
1257 | */ |
1258 | for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); |
1259 | word++) { |
1260 | *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= |
1261 | (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); |
1262 | *(u64 *)((u8 *)&aq_req.cq + word * 8) &= |
1263 | (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); |
1264 | } |
1265 | |
1266 | if (memcmp(p: &aq_req.cq, q: &aq_rsp.cq, size: sizeof(struct nix_cq_ctx_s))) |
1267 | return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; |
1268 | |
1269 | return 0; |
1270 | } |
1271 | |
1272 | static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, |
1273 | struct nix_aq_enq_rsp *rsp) |
1274 | { |
1275 | struct nix_hw *nix_hw; |
1276 | int err, retries = 5; |
1277 | int blkaddr; |
1278 | |
1279 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: req->hdr.pcifunc); |
1280 | if (blkaddr < 0) |
1281 | return NIX_AF_ERR_AF_LF_INVALID; |
1282 | |
1283 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
1284 | if (!nix_hw) |
1285 | return NIX_AF_ERR_INVALID_NIXBLK; |
1286 | |
1287 | retry: |
1288 | err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); |
1289 | |
1290 | /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' |
1291 | * As a work around perfrom CQ context read after each AQ write. If AQ |
1292 | * read shows AQ write is not updated perform AQ write again. |
1293 | */ |
1294 | if (!err && req->op == NIX_AQ_INSTOP_WRITE) { |
1295 | err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, ctype: NIX_AQ_CTYPE_CQ); |
1296 | if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { |
1297 | if (retries--) |
1298 | goto retry; |
1299 | else |
1300 | return NIX_AF_ERR_CQ_CTX_WRITE_ERR; |
1301 | } |
1302 | } |
1303 | |
1304 | return err; |
1305 | } |
1306 | |
1307 | static const char *nix_get_ctx_name(int ctype) |
1308 | { |
1309 | switch (ctype) { |
1310 | case NIX_AQ_CTYPE_CQ: |
1311 | return "CQ" ; |
1312 | case NIX_AQ_CTYPE_SQ: |
1313 | return "SQ" ; |
1314 | case NIX_AQ_CTYPE_RQ: |
1315 | return "RQ" ; |
1316 | case NIX_AQ_CTYPE_RSS: |
1317 | return "RSS" ; |
1318 | } |
1319 | return "" ; |
1320 | } |
1321 | |
1322 | static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) |
1323 | { |
1324 | struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc: req->hdr.pcifunc); |
1325 | struct nix_aq_enq_req aq_req; |
1326 | unsigned long *bmap; |
1327 | int qidx, q_cnt = 0; |
1328 | int err = 0, rc; |
1329 | |
1330 | if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) |
1331 | return NIX_AF_ERR_AQ_ENQUEUE; |
1332 | |
1333 | memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); |
1334 | aq_req.hdr.pcifunc = req->hdr.pcifunc; |
1335 | |
1336 | if (req->ctype == NIX_AQ_CTYPE_CQ) { |
1337 | aq_req.cq.ena = 0; |
1338 | aq_req.cq_mask.ena = 1; |
1339 | aq_req.cq.bp_ena = 0; |
1340 | aq_req.cq_mask.bp_ena = 1; |
1341 | q_cnt = pfvf->cq_ctx->qsize; |
1342 | bmap = pfvf->cq_bmap; |
1343 | } |
1344 | if (req->ctype == NIX_AQ_CTYPE_SQ) { |
1345 | aq_req.sq.ena = 0; |
1346 | aq_req.sq_mask.ena = 1; |
1347 | q_cnt = pfvf->sq_ctx->qsize; |
1348 | bmap = pfvf->sq_bmap; |
1349 | } |
1350 | if (req->ctype == NIX_AQ_CTYPE_RQ) { |
1351 | aq_req.rq.ena = 0; |
1352 | aq_req.rq_mask.ena = 1; |
1353 | q_cnt = pfvf->rq_ctx->qsize; |
1354 | bmap = pfvf->rq_bmap; |
1355 | } |
1356 | |
1357 | aq_req.ctype = req->ctype; |
1358 | aq_req.op = NIX_AQ_INSTOP_WRITE; |
1359 | |
1360 | for (qidx = 0; qidx < q_cnt; qidx++) { |
1361 | if (!test_bit(qidx, bmap)) |
1362 | continue; |
1363 | aq_req.qidx = qidx; |
1364 | rc = rvu_nix_aq_enq_inst(rvu, req: &aq_req, NULL); |
1365 | if (rc) { |
1366 | err = rc; |
1367 | dev_err(rvu->dev, "Failed to disable %s:%d context\n" , |
1368 | nix_get_ctx_name(req->ctype), qidx); |
1369 | } |
1370 | } |
1371 | |
1372 | return err; |
1373 | } |
1374 | |
1375 | #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING |
1376 | static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) |
1377 | { |
1378 | struct nix_aq_enq_req lock_ctx_req; |
1379 | int err; |
1380 | |
1381 | if (req->op != NIX_AQ_INSTOP_INIT) |
1382 | return 0; |
1383 | |
1384 | if (req->ctype == NIX_AQ_CTYPE_MCE || |
1385 | req->ctype == NIX_AQ_CTYPE_DYNO) |
1386 | return 0; |
1387 | |
1388 | memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); |
1389 | lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; |
1390 | lock_ctx_req.ctype = req->ctype; |
1391 | lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; |
1392 | lock_ctx_req.qidx = req->qidx; |
1393 | err = rvu_nix_aq_enq_inst(rvu, req: &lock_ctx_req, NULL); |
1394 | if (err) |
1395 | dev_err(rvu->dev, |
1396 | "PFUNC 0x%x: Failed to lock NIX %s:%d context\n" , |
1397 | req->hdr.pcifunc, |
1398 | nix_get_ctx_name(req->ctype), req->qidx); |
1399 | return err; |
1400 | } |
1401 | |
1402 | int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, |
1403 | struct nix_aq_enq_req *req, |
1404 | struct nix_aq_enq_rsp *rsp) |
1405 | { |
1406 | int err; |
1407 | |
1408 | err = rvu_nix_aq_enq_inst(rvu, req, rsp); |
1409 | if (!err) |
1410 | err = nix_lf_hwctx_lockdown(rvu, req); |
1411 | return err; |
1412 | } |
1413 | #else |
1414 | |
1415 | int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, |
1416 | struct nix_aq_enq_req *req, |
1417 | struct nix_aq_enq_rsp *rsp) |
1418 | { |
1419 | return rvu_nix_aq_enq_inst(rvu, req, rsp); |
1420 | } |
1421 | #endif |
1422 | /* CN10K mbox handler */ |
1423 | int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, |
1424 | struct nix_cn10k_aq_enq_req *req, |
1425 | struct nix_cn10k_aq_enq_rsp *rsp) |
1426 | { |
1427 | return rvu_nix_aq_enq_inst(rvu, req: (struct nix_aq_enq_req *)req, |
1428 | rsp: (struct nix_aq_enq_rsp *)rsp); |
1429 | } |
1430 | |
1431 | int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, |
1432 | struct hwctx_disable_req *req, |
1433 | struct msg_rsp *rsp) |
1434 | { |
1435 | return nix_lf_hwctx_disable(rvu, req); |
1436 | } |
1437 | |
1438 | int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, |
1439 | struct nix_lf_alloc_req *req, |
1440 | struct nix_lf_alloc_rsp *rsp) |
1441 | { |
1442 | int nixlf, qints, hwctx_size, intf, err, rc = 0; |
1443 | struct rvu_hwinfo *hw = rvu->hw; |
1444 | u16 pcifunc = req->hdr.pcifunc; |
1445 | struct rvu_block *block; |
1446 | struct rvu_pfvf *pfvf; |
1447 | u64 cfg, ctx_cfg; |
1448 | int blkaddr; |
1449 | |
1450 | if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) |
1451 | return NIX_AF_ERR_PARAM; |
1452 | |
1453 | if (req->way_mask) |
1454 | req->way_mask &= 0xFFFF; |
1455 | |
1456 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
1457 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
1458 | if (!pfvf->nixlf || blkaddr < 0) |
1459 | return NIX_AF_ERR_AF_LF_INVALID; |
1460 | |
1461 | block = &hw->block[blkaddr]; |
1462 | nixlf = rvu_get_lf(rvu, block, pcifunc, slot: 0); |
1463 | if (nixlf < 0) |
1464 | return NIX_AF_ERR_AF_LF_INVALID; |
1465 | |
1466 | /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ |
1467 | if (req->npa_func) { |
1468 | /* If default, use 'this' NIXLF's PFFUNC */ |
1469 | if (req->npa_func == RVU_DEFAULT_PF_FUNC) |
1470 | req->npa_func = pcifunc; |
1471 | if (!is_pffunc_map_valid(rvu, pcifunc: req->npa_func, blktype: BLKTYPE_NPA)) |
1472 | return NIX_AF_INVAL_NPA_PF_FUNC; |
1473 | } |
1474 | |
1475 | /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ |
1476 | if (req->sso_func) { |
1477 | /* If default, use 'this' NIXLF's PFFUNC */ |
1478 | if (req->sso_func == RVU_DEFAULT_PF_FUNC) |
1479 | req->sso_func = pcifunc; |
1480 | if (!is_pffunc_map_valid(rvu, pcifunc: req->sso_func, blktype: BLKTYPE_SSO)) |
1481 | return NIX_AF_INVAL_SSO_PF_FUNC; |
1482 | } |
1483 | |
1484 | /* If RSS is being enabled, check if requested config is valid. |
1485 | * RSS table size should be power of two, otherwise |
1486 | * RSS_GRP::OFFSET + adder might go beyond that group or |
1487 | * won't be able to use entire table. |
1488 | */ |
1489 | if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || |
1490 | !is_power_of_2(n: req->rss_sz))) |
1491 | return NIX_AF_ERR_RSS_SIZE_INVALID; |
1492 | |
1493 | if (req->rss_sz && |
1494 | (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) |
1495 | return NIX_AF_ERR_RSS_GRPS_INVALID; |
1496 | |
1497 | /* Reset this NIX LF */ |
1498 | err = rvu_lf_reset(rvu, block, lf: nixlf); |
1499 | if (err) { |
1500 | dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n" , |
1501 | block->addr - BLKADDR_NIX0, nixlf); |
1502 | return NIX_AF_ERR_LF_RESET; |
1503 | } |
1504 | |
1505 | ctx_cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST3); |
1506 | |
1507 | /* Alloc NIX RQ HW context memory and config the base */ |
1508 | hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); |
1509 | err = qmem_alloc(dev: rvu->dev, q: &pfvf->rq_ctx, qsize: req->rq_cnt, entry_sz: hwctx_size); |
1510 | if (err) |
1511 | goto free_mem; |
1512 | |
1513 | pfvf->rq_bmap = kcalloc(n: req->rq_cnt, size: sizeof(long), GFP_KERNEL); |
1514 | if (!pfvf->rq_bmap) |
1515 | goto free_mem; |
1516 | |
1517 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), |
1518 | val: (u64)pfvf->rq_ctx->iova); |
1519 | |
1520 | /* Set caching and queue count in HW */ |
1521 | cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; |
1522 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), val: cfg); |
1523 | |
1524 | /* Alloc NIX SQ HW context memory and config the base */ |
1525 | hwctx_size = 1UL << (ctx_cfg & 0xF); |
1526 | err = qmem_alloc(dev: rvu->dev, q: &pfvf->sq_ctx, qsize: req->sq_cnt, entry_sz: hwctx_size); |
1527 | if (err) |
1528 | goto free_mem; |
1529 | |
1530 | pfvf->sq_bmap = kcalloc(n: req->sq_cnt, size: sizeof(long), GFP_KERNEL); |
1531 | if (!pfvf->sq_bmap) |
1532 | goto free_mem; |
1533 | |
1534 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), |
1535 | val: (u64)pfvf->sq_ctx->iova); |
1536 | |
1537 | cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; |
1538 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), val: cfg); |
1539 | |
1540 | /* Alloc NIX CQ HW context memory and config the base */ |
1541 | hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); |
1542 | err = qmem_alloc(dev: rvu->dev, q: &pfvf->cq_ctx, qsize: req->cq_cnt, entry_sz: hwctx_size); |
1543 | if (err) |
1544 | goto free_mem; |
1545 | |
1546 | pfvf->cq_bmap = kcalloc(n: req->cq_cnt, size: sizeof(long), GFP_KERNEL); |
1547 | if (!pfvf->cq_bmap) |
1548 | goto free_mem; |
1549 | |
1550 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), |
1551 | val: (u64)pfvf->cq_ctx->iova); |
1552 | |
1553 | cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; |
1554 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), val: cfg); |
1555 | |
1556 | /* Initialize receive side scaling (RSS) */ |
1557 | hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); |
1558 | err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, rss_sz: req->rss_sz, |
1559 | rss_grps: req->rss_grps, hwctx_size, way_mask: req->way_mask, |
1560 | tag_lsb_as_adder: !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); |
1561 | if (err) |
1562 | goto free_mem; |
1563 | |
1564 | /* Alloc memory for CQINT's HW contexts */ |
1565 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST2); |
1566 | qints = (cfg >> 24) & 0xFFF; |
1567 | hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); |
1568 | err = qmem_alloc(dev: rvu->dev, q: &pfvf->cq_ints_ctx, qsize: qints, entry_sz: hwctx_size); |
1569 | if (err) |
1570 | goto free_mem; |
1571 | |
1572 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), |
1573 | val: (u64)pfvf->cq_ints_ctx->iova); |
1574 | |
1575 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), |
1576 | BIT_ULL(36) | req->way_mask << 20); |
1577 | |
1578 | /* Alloc memory for QINT's HW contexts */ |
1579 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST2); |
1580 | qints = (cfg >> 12) & 0xFFF; |
1581 | hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); |
1582 | err = qmem_alloc(dev: rvu->dev, q: &pfvf->nix_qints_ctx, qsize: qints, entry_sz: hwctx_size); |
1583 | if (err) |
1584 | goto free_mem; |
1585 | |
1586 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), |
1587 | val: (u64)pfvf->nix_qints_ctx->iova); |
1588 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), |
1589 | BIT_ULL(36) | req->way_mask << 20); |
1590 | |
1591 | /* Setup VLANX TPID's. |
1592 | * Use VLAN1 for 802.1Q |
1593 | * and VLAN0 for 802.1AD. |
1594 | */ |
1595 | cfg = (0x8100ULL << 16) | 0x88A8ULL; |
1596 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_TX_CFG(nixlf), val: cfg); |
1597 | |
1598 | /* Enable LMTST for this NIX LF */ |
1599 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); |
1600 | |
1601 | /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ |
1602 | if (req->npa_func) |
1603 | cfg = req->npa_func; |
1604 | if (req->sso_func) |
1605 | cfg |= (u64)req->sso_func << 16; |
1606 | |
1607 | cfg |= (u64)req->xqe_sz << 33; |
1608 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_CFG(nixlf), val: cfg); |
1609 | |
1610 | /* Config Rx pkt length, csum checks and apad enable / disable */ |
1611 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RX_CFG(nixlf), val: req->rx_cfg); |
1612 | |
1613 | /* Configure pkind for TX parse config */ |
1614 | cfg = NPC_TX_DEF_PKIND; |
1615 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), val: cfg); |
1616 | |
1617 | intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; |
1618 | if (is_sdp_pfvf(pcifunc)) |
1619 | intf = NIX_INTF_TYPE_SDP; |
1620 | |
1621 | err = nix_interface_init(rvu, pcifunc, type: intf, nixlf, rsp, |
1622 | loop: !!(req->flags & NIX_LF_LBK_BLK_SEL)); |
1623 | if (err) |
1624 | goto free_mem; |
1625 | |
1626 | /* Disable NPC entries as NIXLF's contexts are not initialized yet */ |
1627 | rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); |
1628 | |
1629 | /* Configure RX VTAG Type 7 (strip) for vf vlan */ |
1630 | rvu_write64(rvu, block: blkaddr, |
1631 | NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), |
1632 | val: VTAGSIZE_T4 | VTAG_STRIP); |
1633 | |
1634 | goto exit; |
1635 | |
1636 | free_mem: |
1637 | nix_ctx_free(rvu, pfvf); |
1638 | rc = -ENOMEM; |
1639 | |
1640 | exit: |
1641 | /* Set macaddr of this PF/VF */ |
1642 | ether_addr_copy(dst: rsp->mac_addr, src: pfvf->mac_addr); |
1643 | |
1644 | /* set SQB size info */ |
1645 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_SQ_CONST); |
1646 | rsp->sqb_size = (cfg >> 34) & 0xFFFF; |
1647 | rsp->rx_chan_base = pfvf->rx_chan_base; |
1648 | rsp->tx_chan_base = pfvf->tx_chan_base; |
1649 | rsp->rx_chan_cnt = pfvf->rx_chan_cnt; |
1650 | rsp->tx_chan_cnt = pfvf->tx_chan_cnt; |
1651 | rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; |
1652 | rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; |
1653 | /* Get HW supported stat count */ |
1654 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST1); |
1655 | rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); |
1656 | rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); |
1657 | /* Get count of CQ IRQs and error IRQs supported per LF */ |
1658 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST2); |
1659 | rsp->qints = ((cfg >> 12) & 0xFFF); |
1660 | rsp->cints = ((cfg >> 24) & 0xFFF); |
1661 | rsp->cgx_links = hw->cgx_links; |
1662 | rsp->lbk_links = hw->lbk_links; |
1663 | rsp->sdp_links = hw->sdp_links; |
1664 | |
1665 | return rc; |
1666 | } |
1667 | |
1668 | int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, |
1669 | struct msg_rsp *rsp) |
1670 | { |
1671 | struct rvu_hwinfo *hw = rvu->hw; |
1672 | u16 pcifunc = req->hdr.pcifunc; |
1673 | struct rvu_block *block; |
1674 | int blkaddr, nixlf, err; |
1675 | struct rvu_pfvf *pfvf; |
1676 | |
1677 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
1678 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
1679 | if (!pfvf->nixlf || blkaddr < 0) |
1680 | return NIX_AF_ERR_AF_LF_INVALID; |
1681 | |
1682 | block = &hw->block[blkaddr]; |
1683 | nixlf = rvu_get_lf(rvu, block, pcifunc, slot: 0); |
1684 | if (nixlf < 0) |
1685 | return NIX_AF_ERR_AF_LF_INVALID; |
1686 | |
1687 | if (req->flags & NIX_LF_DISABLE_FLOWS) |
1688 | rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); |
1689 | else |
1690 | rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); |
1691 | |
1692 | /* Free any tx vtag def entries used by this NIX LF */ |
1693 | if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) |
1694 | nix_free_tx_vtag_entries(rvu, pcifunc); |
1695 | |
1696 | nix_interface_deinit(rvu, pcifunc, nixlf); |
1697 | |
1698 | /* Reset this NIX LF */ |
1699 | err = rvu_lf_reset(rvu, block, lf: nixlf); |
1700 | if (err) { |
1701 | dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n" , |
1702 | block->addr - BLKADDR_NIX0, nixlf); |
1703 | return NIX_AF_ERR_LF_RESET; |
1704 | } |
1705 | |
1706 | nix_ctx_free(rvu, pfvf); |
1707 | |
1708 | return 0; |
1709 | } |
1710 | |
1711 | int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, |
1712 | struct nix_mark_format_cfg *req, |
1713 | struct nix_mark_format_cfg_rsp *rsp) |
1714 | { |
1715 | u16 pcifunc = req->hdr.pcifunc; |
1716 | struct nix_hw *nix_hw; |
1717 | struct rvu_pfvf *pfvf; |
1718 | int blkaddr, rc; |
1719 | u32 cfg; |
1720 | |
1721 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
1722 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
1723 | if (!pfvf->nixlf || blkaddr < 0) |
1724 | return NIX_AF_ERR_AF_LF_INVALID; |
1725 | |
1726 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
1727 | if (!nix_hw) |
1728 | return NIX_AF_ERR_INVALID_NIXBLK; |
1729 | |
1730 | cfg = (((u32)req->offset & 0x7) << 16) | |
1731 | (((u32)req->y_mask & 0xF) << 12) | |
1732 | (((u32)req->y_val & 0xF) << 8) | |
1733 | (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); |
1734 | |
1735 | rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); |
1736 | if (rc < 0) { |
1737 | dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)" , |
1738 | rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); |
1739 | return NIX_AF_ERR_MARK_CFG_FAIL; |
1740 | } |
1741 | |
1742 | rsp->mark_format_idx = rc; |
1743 | return 0; |
1744 | } |
1745 | |
1746 | /* Handle shaper update specially for few revisions */ |
1747 | static bool |
1748 | handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, |
1749 | int lvl, u64 reg, u64 regval) |
1750 | { |
1751 | u64 regbase, oldval, sw_xoff = 0; |
1752 | u64 dbgval, md_debug0 = 0; |
1753 | unsigned long poll_tmo; |
1754 | bool rate_reg = 0; |
1755 | u32 schq; |
1756 | |
1757 | regbase = reg & 0xFFFF; |
1758 | schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); |
1759 | |
1760 | /* Check for rate register */ |
1761 | switch (lvl) { |
1762 | case NIX_TXSCH_LVL_TL1: |
1763 | md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); |
1764 | sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); |
1765 | |
1766 | rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); |
1767 | break; |
1768 | case NIX_TXSCH_LVL_TL2: |
1769 | md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); |
1770 | sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); |
1771 | |
1772 | rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || |
1773 | regbase == NIX_AF_TL2X_PIR(0)); |
1774 | break; |
1775 | case NIX_TXSCH_LVL_TL3: |
1776 | md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); |
1777 | sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); |
1778 | |
1779 | rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || |
1780 | regbase == NIX_AF_TL3X_PIR(0)); |
1781 | break; |
1782 | case NIX_TXSCH_LVL_TL4: |
1783 | md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); |
1784 | sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); |
1785 | |
1786 | rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || |
1787 | regbase == NIX_AF_TL4X_PIR(0)); |
1788 | break; |
1789 | case NIX_TXSCH_LVL_MDQ: |
1790 | sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); |
1791 | rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || |
1792 | regbase == NIX_AF_MDQX_PIR(0)); |
1793 | break; |
1794 | } |
1795 | |
1796 | if (!rate_reg) |
1797 | return false; |
1798 | |
1799 | /* Nothing special to do when state is not toggled */ |
1800 | oldval = rvu_read64(rvu, block: blkaddr, offset: reg); |
1801 | if ((oldval & 0x1) == (regval & 0x1)) { |
1802 | rvu_write64(rvu, block: blkaddr, offset: reg, val: regval); |
1803 | return true; |
1804 | } |
1805 | |
1806 | /* PIR/CIR disable */ |
1807 | if (!(regval & 0x1)) { |
1808 | rvu_write64(rvu, block: blkaddr, offset: sw_xoff, val: 1); |
1809 | rvu_write64(rvu, block: blkaddr, offset: reg, val: 0); |
1810 | udelay(4); |
1811 | rvu_write64(rvu, block: blkaddr, offset: sw_xoff, val: 0); |
1812 | return true; |
1813 | } |
1814 | |
1815 | /* PIR/CIR enable */ |
1816 | rvu_write64(rvu, block: blkaddr, offset: sw_xoff, val: 1); |
1817 | if (md_debug0) { |
1818 | poll_tmo = jiffies + usecs_to_jiffies(u: 10000); |
1819 | /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ |
1820 | do { |
1821 | if (time_after(jiffies, poll_tmo)) { |
1822 | dev_err(rvu->dev, |
1823 | "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n" , |
1824 | nixlf, schq, lvl); |
1825 | goto exit; |
1826 | } |
1827 | usleep_range(min: 1, max: 5); |
1828 | dbgval = rvu_read64(rvu, block: blkaddr, offset: md_debug0); |
1829 | } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); |
1830 | } |
1831 | rvu_write64(rvu, block: blkaddr, offset: reg, val: regval); |
1832 | exit: |
1833 | rvu_write64(rvu, block: blkaddr, offset: sw_xoff, val: 0); |
1834 | return true; |
1835 | } |
1836 | |
1837 | static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, |
1838 | int lvl, int schq) |
1839 | { |
1840 | u64 tlx_parent = 0, tlx_schedule = 0; |
1841 | |
1842 | switch (lvl) { |
1843 | case NIX_TXSCH_LVL_TL2: |
1844 | tlx_parent = NIX_AF_TL2X_PARENT(schq); |
1845 | tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); |
1846 | break; |
1847 | case NIX_TXSCH_LVL_TL3: |
1848 | tlx_parent = NIX_AF_TL3X_PARENT(schq); |
1849 | tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); |
1850 | break; |
1851 | case NIX_TXSCH_LVL_TL4: |
1852 | tlx_parent = NIX_AF_TL4X_PARENT(schq); |
1853 | tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); |
1854 | break; |
1855 | case NIX_TXSCH_LVL_MDQ: |
1856 | /* no need to reset SMQ_CFG as HW clears this CSR |
1857 | * on SMQ flush |
1858 | */ |
1859 | tlx_parent = NIX_AF_MDQX_PARENT(schq); |
1860 | tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); |
1861 | break; |
1862 | default: |
1863 | return; |
1864 | } |
1865 | |
1866 | if (tlx_parent) |
1867 | rvu_write64(rvu, block: blkaddr, offset: tlx_parent, val: 0x0); |
1868 | |
1869 | if (tlx_schedule) |
1870 | rvu_write64(rvu, block: blkaddr, offset: tlx_schedule, val: 0x0); |
1871 | } |
1872 | |
1873 | /* Disable shaping of pkts by a scheduler queue |
1874 | * at a given scheduler level. |
1875 | */ |
1876 | static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, |
1877 | int nixlf, int lvl, int schq) |
1878 | { |
1879 | struct rvu_hwinfo *hw = rvu->hw; |
1880 | u64 cir_reg = 0, pir_reg = 0; |
1881 | u64 cfg; |
1882 | |
1883 | switch (lvl) { |
1884 | case NIX_TXSCH_LVL_TL1: |
1885 | cir_reg = NIX_AF_TL1X_CIR(schq); |
1886 | pir_reg = 0; /* PIR not available at TL1 */ |
1887 | break; |
1888 | case NIX_TXSCH_LVL_TL2: |
1889 | cir_reg = NIX_AF_TL2X_CIR(schq); |
1890 | pir_reg = NIX_AF_TL2X_PIR(schq); |
1891 | break; |
1892 | case NIX_TXSCH_LVL_TL3: |
1893 | cir_reg = NIX_AF_TL3X_CIR(schq); |
1894 | pir_reg = NIX_AF_TL3X_PIR(schq); |
1895 | break; |
1896 | case NIX_TXSCH_LVL_TL4: |
1897 | cir_reg = NIX_AF_TL4X_CIR(schq); |
1898 | pir_reg = NIX_AF_TL4X_PIR(schq); |
1899 | break; |
1900 | case NIX_TXSCH_LVL_MDQ: |
1901 | cir_reg = NIX_AF_MDQX_CIR(schq); |
1902 | pir_reg = NIX_AF_MDQX_PIR(schq); |
1903 | break; |
1904 | } |
1905 | |
1906 | /* Shaper state toggle needs wait/poll */ |
1907 | if (hw->cap.nix_shaper_toggle_wait) { |
1908 | if (cir_reg) |
1909 | handle_txschq_shaper_update(rvu, blkaddr, nixlf, |
1910 | lvl, reg: cir_reg, regval: 0); |
1911 | if (pir_reg) |
1912 | handle_txschq_shaper_update(rvu, blkaddr, nixlf, |
1913 | lvl, reg: pir_reg, regval: 0); |
1914 | return; |
1915 | } |
1916 | |
1917 | if (!cir_reg) |
1918 | return; |
1919 | cfg = rvu_read64(rvu, block: blkaddr, offset: cir_reg); |
1920 | rvu_write64(rvu, block: blkaddr, offset: cir_reg, val: cfg & ~BIT_ULL(0)); |
1921 | |
1922 | if (!pir_reg) |
1923 | return; |
1924 | cfg = rvu_read64(rvu, block: blkaddr, offset: pir_reg); |
1925 | rvu_write64(rvu, block: blkaddr, offset: pir_reg, val: cfg & ~BIT_ULL(0)); |
1926 | } |
1927 | |
1928 | static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, |
1929 | int lvl, int schq) |
1930 | { |
1931 | struct rvu_hwinfo *hw = rvu->hw; |
1932 | int link_level; |
1933 | int link; |
1934 | |
1935 | if (lvl >= hw->cap.nix_tx_aggr_lvl) |
1936 | return; |
1937 | |
1938 | /* Reset TL4's SDP link config */ |
1939 | if (lvl == NIX_TXSCH_LVL_TL4) |
1940 | rvu_write64(rvu, block: blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), val: 0x00); |
1941 | |
1942 | link_level = rvu_read64(rvu, block: blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? |
1943 | NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; |
1944 | if (lvl != link_level) |
1945 | return; |
1946 | |
1947 | /* Reset TL2's CGX or LBK link config */ |
1948 | for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) |
1949 | rvu_write64(rvu, block: blkaddr, |
1950 | NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), val: 0x00); |
1951 | } |
1952 | |
1953 | static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, |
1954 | int lvl, int schq) |
1955 | { |
1956 | struct rvu_hwinfo *hw = rvu->hw; |
1957 | u64 reg; |
1958 | |
1959 | /* Skip this if shaping is not supported */ |
1960 | if (!hw->cap.nix_shaping) |
1961 | return; |
1962 | |
1963 | /* Clear level specific SW_XOFF */ |
1964 | switch (lvl) { |
1965 | case NIX_TXSCH_LVL_TL1: |
1966 | reg = NIX_AF_TL1X_SW_XOFF(schq); |
1967 | break; |
1968 | case NIX_TXSCH_LVL_TL2: |
1969 | reg = NIX_AF_TL2X_SW_XOFF(schq); |
1970 | break; |
1971 | case NIX_TXSCH_LVL_TL3: |
1972 | reg = NIX_AF_TL3X_SW_XOFF(schq); |
1973 | break; |
1974 | case NIX_TXSCH_LVL_TL4: |
1975 | reg = NIX_AF_TL4X_SW_XOFF(schq); |
1976 | break; |
1977 | case NIX_TXSCH_LVL_MDQ: |
1978 | reg = NIX_AF_MDQX_SW_XOFF(schq); |
1979 | break; |
1980 | default: |
1981 | return; |
1982 | } |
1983 | |
1984 | rvu_write64(rvu, block: blkaddr, offset: reg, val: 0x0); |
1985 | } |
1986 | |
1987 | static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) |
1988 | { |
1989 | struct rvu_hwinfo *hw = rvu->hw; |
1990 | int pf = rvu_get_pf(pcifunc); |
1991 | u8 cgx_id = 0, lmac_id = 0; |
1992 | |
1993 | if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ |
1994 | return hw->cgx_links; |
1995 | } else if (is_pf_cgxmapped(rvu, pf)) { |
1996 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
1997 | return (cgx_id * hw->lmac_per_cgx) + lmac_id; |
1998 | } |
1999 | |
2000 | /* SDP link */ |
2001 | return hw->cgx_links + hw->lbk_links; |
2002 | } |
2003 | |
2004 | static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, |
2005 | int link, int *start, int *end) |
2006 | { |
2007 | struct rvu_hwinfo *hw = rvu->hw; |
2008 | int pf = rvu_get_pf(pcifunc); |
2009 | |
2010 | if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */ |
2011 | *start = hw->cap.nix_txsch_per_cgx_lmac * link; |
2012 | *end = *start + hw->cap.nix_txsch_per_lbk_lmac; |
2013 | } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ |
2014 | *start = hw->cap.nix_txsch_per_cgx_lmac * link; |
2015 | *end = *start + hw->cap.nix_txsch_per_cgx_lmac; |
2016 | } else { /* SDP link */ |
2017 | *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + |
2018 | (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); |
2019 | *end = *start + hw->cap.nix_txsch_per_sdp_lmac; |
2020 | } |
2021 | } |
2022 | |
2023 | static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, |
2024 | struct nix_hw *nix_hw, |
2025 | struct nix_txsch_alloc_req *req) |
2026 | { |
2027 | struct rvu_hwinfo *hw = rvu->hw; |
2028 | int schq, req_schq, free_cnt; |
2029 | struct nix_txsch *txsch; |
2030 | int link, start, end; |
2031 | |
2032 | txsch = &nix_hw->txsch[lvl]; |
2033 | req_schq = req->schq_contig[lvl] + req->schq[lvl]; |
2034 | |
2035 | if (!req_schq) |
2036 | return 0; |
2037 | |
2038 | link = nix_get_tx_link(rvu, pcifunc); |
2039 | |
2040 | /* For traffic aggregating scheduler level, one queue is enough */ |
2041 | if (lvl >= hw->cap.nix_tx_aggr_lvl) { |
2042 | if (req_schq != 1) |
2043 | return NIX_AF_ERR_TLX_ALLOC_FAIL; |
2044 | return 0; |
2045 | } |
2046 | |
2047 | /* Get free SCHQ count and check if request can be accomodated */ |
2048 | if (hw->cap.nix_fixed_txschq_mapping) { |
2049 | nix_get_txschq_range(rvu, pcifunc, link, start: &start, end: &end); |
2050 | schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); |
2051 | if (end <= txsch->schq.max && schq < end && |
2052 | !test_bit(schq, txsch->schq.bmap)) |
2053 | free_cnt = 1; |
2054 | else |
2055 | free_cnt = 0; |
2056 | } else { |
2057 | free_cnt = rvu_rsrc_free_count(rsrc: &txsch->schq); |
2058 | } |
2059 | |
2060 | if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || |
2061 | req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) |
2062 | return NIX_AF_ERR_TLX_ALLOC_FAIL; |
2063 | |
2064 | /* If contiguous queues are needed, check for availability */ |
2065 | if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && |
2066 | !rvu_rsrc_check_contig(rsrc: &txsch->schq, nrsrc: req->schq_contig[lvl])) |
2067 | return NIX_AF_ERR_TLX_ALLOC_FAIL; |
2068 | |
2069 | return 0; |
2070 | } |
2071 | |
2072 | static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, |
2073 | struct nix_txsch_alloc_rsp *rsp, |
2074 | int lvl, int start, int end) |
2075 | { |
2076 | struct rvu_hwinfo *hw = rvu->hw; |
2077 | u16 pcifunc = rsp->hdr.pcifunc; |
2078 | int idx, schq; |
2079 | |
2080 | /* For traffic aggregating levels, queue alloc is based |
2081 | * on transmit link to which PF_FUNC is mapped to. |
2082 | */ |
2083 | if (lvl >= hw->cap.nix_tx_aggr_lvl) { |
2084 | /* A single TL queue is allocated */ |
2085 | if (rsp->schq_contig[lvl]) { |
2086 | rsp->schq_contig[lvl] = 1; |
2087 | rsp->schq_contig_list[lvl][0] = start; |
2088 | } |
2089 | |
2090 | /* Both contig and non-contig reqs doesn't make sense here */ |
2091 | if (rsp->schq_contig[lvl]) |
2092 | rsp->schq[lvl] = 0; |
2093 | |
2094 | if (rsp->schq[lvl]) { |
2095 | rsp->schq[lvl] = 1; |
2096 | rsp->schq_list[lvl][0] = start; |
2097 | } |
2098 | return; |
2099 | } |
2100 | |
2101 | /* Adjust the queue request count if HW supports |
2102 | * only one queue per level configuration. |
2103 | */ |
2104 | if (hw->cap.nix_fixed_txschq_mapping) { |
2105 | idx = pcifunc & RVU_PFVF_FUNC_MASK; |
2106 | schq = start + idx; |
2107 | if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { |
2108 | rsp->schq_contig[lvl] = 0; |
2109 | rsp->schq[lvl] = 0; |
2110 | return; |
2111 | } |
2112 | |
2113 | if (rsp->schq_contig[lvl]) { |
2114 | rsp->schq_contig[lvl] = 1; |
2115 | set_bit(nr: schq, addr: txsch->schq.bmap); |
2116 | rsp->schq_contig_list[lvl][0] = schq; |
2117 | rsp->schq[lvl] = 0; |
2118 | } else if (rsp->schq[lvl]) { |
2119 | rsp->schq[lvl] = 1; |
2120 | set_bit(nr: schq, addr: txsch->schq.bmap); |
2121 | rsp->schq_list[lvl][0] = schq; |
2122 | } |
2123 | return; |
2124 | } |
2125 | |
2126 | /* Allocate contiguous queue indices requesty first */ |
2127 | if (rsp->schq_contig[lvl]) { |
2128 | schq = bitmap_find_next_zero_area(map: txsch->schq.bmap, |
2129 | size: txsch->schq.max, start, |
2130 | nr: rsp->schq_contig[lvl], align_mask: 0); |
2131 | if (schq >= end) |
2132 | rsp->schq_contig[lvl] = 0; |
2133 | for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { |
2134 | set_bit(nr: schq, addr: txsch->schq.bmap); |
2135 | rsp->schq_contig_list[lvl][idx] = schq; |
2136 | schq++; |
2137 | } |
2138 | } |
2139 | |
2140 | /* Allocate non-contiguous queue indices */ |
2141 | if (rsp->schq[lvl]) { |
2142 | idx = 0; |
2143 | for (schq = start; schq < end; schq++) { |
2144 | if (!test_bit(schq, txsch->schq.bmap)) { |
2145 | set_bit(nr: schq, addr: txsch->schq.bmap); |
2146 | rsp->schq_list[lvl][idx++] = schq; |
2147 | } |
2148 | if (idx == rsp->schq[lvl]) |
2149 | break; |
2150 | } |
2151 | /* Update how many were allocated */ |
2152 | rsp->schq[lvl] = idx; |
2153 | } |
2154 | } |
2155 | |
2156 | int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, |
2157 | struct nix_txsch_alloc_req *req, |
2158 | struct nix_txsch_alloc_rsp *rsp) |
2159 | { |
2160 | struct rvu_hwinfo *hw = rvu->hw; |
2161 | u16 pcifunc = req->hdr.pcifunc; |
2162 | int link, blkaddr, rc = 0; |
2163 | int lvl, idx, start, end; |
2164 | struct nix_txsch *txsch; |
2165 | struct nix_hw *nix_hw; |
2166 | u32 *pfvf_map; |
2167 | int nixlf; |
2168 | u16 schq; |
2169 | |
2170 | rc = nix_get_nixlf(rvu, pcifunc, nixlf: &nixlf, nix_blkaddr: &blkaddr); |
2171 | if (rc) |
2172 | return rc; |
2173 | |
2174 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
2175 | if (!nix_hw) |
2176 | return NIX_AF_ERR_INVALID_NIXBLK; |
2177 | |
2178 | mutex_lock(&rvu->rsrc_lock); |
2179 | |
2180 | /* Check if request is valid as per HW capabilities |
2181 | * and can be accomodated. |
2182 | */ |
2183 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
2184 | rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); |
2185 | if (rc) |
2186 | goto err; |
2187 | } |
2188 | |
2189 | /* Allocate requested Tx scheduler queues */ |
2190 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
2191 | txsch = &nix_hw->txsch[lvl]; |
2192 | pfvf_map = txsch->pfvf_map; |
2193 | |
2194 | if (!req->schq[lvl] && !req->schq_contig[lvl]) |
2195 | continue; |
2196 | |
2197 | rsp->schq[lvl] = req->schq[lvl]; |
2198 | rsp->schq_contig[lvl] = req->schq_contig[lvl]; |
2199 | |
2200 | link = nix_get_tx_link(rvu, pcifunc); |
2201 | |
2202 | if (lvl >= hw->cap.nix_tx_aggr_lvl) { |
2203 | start = link; |
2204 | end = link; |
2205 | } else if (hw->cap.nix_fixed_txschq_mapping) { |
2206 | nix_get_txschq_range(rvu, pcifunc, link, start: &start, end: &end); |
2207 | } else { |
2208 | start = 0; |
2209 | end = txsch->schq.max; |
2210 | } |
2211 | |
2212 | nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); |
2213 | |
2214 | /* Reset queue config */ |
2215 | for (idx = 0; idx < req->schq_contig[lvl]; idx++) { |
2216 | schq = rsp->schq_contig_list[lvl][idx]; |
2217 | if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & |
2218 | NIX_TXSCHQ_CFG_DONE)) |
2219 | pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); |
2220 | nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); |
2221 | nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); |
2222 | nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); |
2223 | } |
2224 | |
2225 | for (idx = 0; idx < req->schq[lvl]; idx++) { |
2226 | schq = rsp->schq_list[lvl][idx]; |
2227 | if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & |
2228 | NIX_TXSCHQ_CFG_DONE)) |
2229 | pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); |
2230 | nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); |
2231 | nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); |
2232 | nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); |
2233 | } |
2234 | } |
2235 | |
2236 | rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; |
2237 | rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; |
2238 | rsp->link_cfg_lvl = rvu_read64(rvu, block: blkaddr, |
2239 | NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? |
2240 | NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; |
2241 | goto exit; |
2242 | err: |
2243 | rc = NIX_AF_ERR_TLX_ALLOC_FAIL; |
2244 | exit: |
2245 | mutex_unlock(lock: &rvu->rsrc_lock); |
2246 | return rc; |
2247 | } |
2248 | |
2249 | static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, |
2250 | struct nix_smq_flush_ctx *smq_flush_ctx) |
2251 | { |
2252 | struct nix_smq_tree_ctx *smq_tree_ctx; |
2253 | u64 parent_off, regval; |
2254 | u16 schq; |
2255 | int lvl; |
2256 | |
2257 | smq_flush_ctx->smq = smq; |
2258 | |
2259 | schq = smq; |
2260 | for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { |
2261 | smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; |
2262 | if (lvl == NIX_TXSCH_LVL_TL1) { |
2263 | smq_flush_ctx->tl1_schq = schq; |
2264 | smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); |
2265 | smq_tree_ctx->pir_off = 0; |
2266 | smq_tree_ctx->pir_val = 0; |
2267 | parent_off = 0; |
2268 | } else if (lvl == NIX_TXSCH_LVL_TL2) { |
2269 | smq_flush_ctx->tl2_schq = schq; |
2270 | smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); |
2271 | smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); |
2272 | parent_off = NIX_AF_TL2X_PARENT(schq); |
2273 | } else if (lvl == NIX_TXSCH_LVL_TL3) { |
2274 | smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq); |
2275 | smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq); |
2276 | parent_off = NIX_AF_TL3X_PARENT(schq); |
2277 | } else if (lvl == NIX_TXSCH_LVL_TL4) { |
2278 | smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq); |
2279 | smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq); |
2280 | parent_off = NIX_AF_TL4X_PARENT(schq); |
2281 | } else if (lvl == NIX_TXSCH_LVL_MDQ) { |
2282 | smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq); |
2283 | smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq); |
2284 | parent_off = NIX_AF_MDQX_PARENT(schq); |
2285 | } |
2286 | /* save cir/pir register values */ |
2287 | smq_tree_ctx->cir_val = rvu_read64(rvu, block: blkaddr, offset: smq_tree_ctx->cir_off); |
2288 | if (smq_tree_ctx->pir_off) |
2289 | smq_tree_ctx->pir_val = rvu_read64(rvu, block: blkaddr, offset: smq_tree_ctx->pir_off); |
2290 | |
2291 | /* get parent txsch node */ |
2292 | if (parent_off) { |
2293 | regval = rvu_read64(rvu, block: blkaddr, offset: parent_off); |
2294 | schq = (regval >> 16) & 0x1FF; |
2295 | } |
2296 | } |
2297 | } |
2298 | |
2299 | static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, |
2300 | struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) |
2301 | { |
2302 | struct nix_txsch *txsch; |
2303 | struct nix_hw *nix_hw; |
2304 | u64 regoff; |
2305 | int tl2; |
2306 | |
2307 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
2308 | if (!nix_hw) |
2309 | return; |
2310 | |
2311 | /* loop through all TL2s with matching PF_FUNC */ |
2312 | txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; |
2313 | for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { |
2314 | /* skip the smq(flush) TL2 */ |
2315 | if (tl2 == smq_flush_ctx->tl2_schq) |
2316 | continue; |
2317 | /* skip unused TL2s */ |
2318 | if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) |
2319 | continue; |
2320 | /* skip if PF_FUNC doesn't match */ |
2321 | if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != |
2322 | (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] & |
2323 | ~RVU_PFVF_FUNC_MASK))) |
2324 | continue; |
2325 | /* enable/disable XOFF */ |
2326 | regoff = NIX_AF_TL2X_SW_XOFF(tl2); |
2327 | if (enable) |
2328 | rvu_write64(rvu, block: blkaddr, offset: regoff, val: 0x1); |
2329 | else |
2330 | rvu_write64(rvu, block: blkaddr, offset: regoff, val: 0x0); |
2331 | } |
2332 | } |
2333 | |
2334 | static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr, |
2335 | struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) |
2336 | { |
2337 | u64 cir_off, pir_off, cir_val, pir_val; |
2338 | struct nix_smq_tree_ctx *smq_tree_ctx; |
2339 | int lvl; |
2340 | |
2341 | for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { |
2342 | smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; |
2343 | cir_off = smq_tree_ctx->cir_off; |
2344 | cir_val = smq_tree_ctx->cir_val; |
2345 | pir_off = smq_tree_ctx->pir_off; |
2346 | pir_val = smq_tree_ctx->pir_val; |
2347 | |
2348 | if (enable) { |
2349 | rvu_write64(rvu, block: blkaddr, offset: cir_off, val: cir_val); |
2350 | if (lvl != NIX_TXSCH_LVL_TL1) |
2351 | rvu_write64(rvu, block: blkaddr, offset: pir_off, val: pir_val); |
2352 | } else { |
2353 | rvu_write64(rvu, block: blkaddr, offset: cir_off, val: 0x0); |
2354 | if (lvl != NIX_TXSCH_LVL_TL1) |
2355 | rvu_write64(rvu, block: blkaddr, offset: pir_off, val: 0x0); |
2356 | } |
2357 | } |
2358 | } |
2359 | |
2360 | static int nix_smq_flush(struct rvu *rvu, int blkaddr, |
2361 | int smq, u16 pcifunc, int nixlf) |
2362 | { |
2363 | struct nix_smq_flush_ctx *smq_flush_ctx; |
2364 | int pf = rvu_get_pf(pcifunc); |
2365 | u8 cgx_id = 0, lmac_id = 0; |
2366 | int err, restore_tx_en = 0; |
2367 | u64 cfg; |
2368 | |
2369 | if (!is_rvu_otx2(rvu)) { |
2370 | /* Skip SMQ flush if pkt count is zero */ |
2371 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq)); |
2372 | if (!cfg) |
2373 | return 0; |
2374 | } |
2375 | |
2376 | /* enable cgx tx if disabled */ |
2377 | if (is_pf_cgxmapped(rvu, pf)) { |
2378 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
2379 | restore_tx_en = !rvu_cgx_config_tx(cgxd: rvu_cgx_pdata(cgx_id, rvu), |
2380 | lmac_id, enable: true); |
2381 | } |
2382 | |
2383 | /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */ |
2384 | smq_flush_ctx = kzalloc(size: sizeof(*smq_flush_ctx), GFP_KERNEL); |
2385 | if (!smq_flush_ctx) |
2386 | return -ENOMEM; |
2387 | nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx); |
2388 | nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, enable: true); |
2389 | nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, enable: false); |
2390 | |
2391 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_SMQX_CFG(smq)); |
2392 | /* Do SMQ flush and set enqueue xoff */ |
2393 | cfg |= BIT_ULL(50) | BIT_ULL(49); |
2394 | rvu_write64(rvu, block: blkaddr, NIX_AF_SMQX_CFG(smq), val: cfg); |
2395 | |
2396 | /* Disable backpressure from physical link, |
2397 | * otherwise SMQ flush may stall. |
2398 | */ |
2399 | rvu_cgx_enadis_rx_bp(rvu, pf, enable: false); |
2400 | |
2401 | /* Wait for flush to complete */ |
2402 | err = rvu_poll_reg(rvu, block: blkaddr, |
2403 | NIX_AF_SMQX_CFG(smq), BIT_ULL(49), zero: true); |
2404 | if (err) |
2405 | dev_info(rvu->dev, |
2406 | "NIXLF%d: SMQ%d flush failed, txlink might be busy\n" , |
2407 | nixlf, smq); |
2408 | |
2409 | /* clear XOFF on TL2s */ |
2410 | nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, enable: true); |
2411 | nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, enable: false); |
2412 | kfree(objp: smq_flush_ctx); |
2413 | |
2414 | rvu_cgx_enadis_rx_bp(rvu, pf, enable: true); |
2415 | /* restore cgx tx state */ |
2416 | if (restore_tx_en) |
2417 | rvu_cgx_config_tx(cgxd: rvu_cgx_pdata(cgx_id, rvu), lmac_id, enable: false); |
2418 | return err; |
2419 | } |
2420 | |
2421 | static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) |
2422 | { |
2423 | int blkaddr, nixlf, lvl, schq, err; |
2424 | struct rvu_hwinfo *hw = rvu->hw; |
2425 | struct nix_txsch *txsch; |
2426 | struct nix_hw *nix_hw; |
2427 | u16 map_func; |
2428 | |
2429 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
2430 | if (blkaddr < 0) |
2431 | return NIX_AF_ERR_AF_LF_INVALID; |
2432 | |
2433 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
2434 | if (!nix_hw) |
2435 | return NIX_AF_ERR_INVALID_NIXBLK; |
2436 | |
2437 | nixlf = rvu_get_lf(rvu, block: &hw->block[blkaddr], pcifunc, slot: 0); |
2438 | if (nixlf < 0) |
2439 | return NIX_AF_ERR_AF_LF_INVALID; |
2440 | |
2441 | /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ |
2442 | mutex_lock(&rvu->rsrc_lock); |
2443 | for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
2444 | txsch = &nix_hw->txsch[lvl]; |
2445 | |
2446 | if (lvl >= hw->cap.nix_tx_aggr_lvl) |
2447 | continue; |
2448 | |
2449 | for (schq = 0; schq < txsch->schq.max; schq++) { |
2450 | if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) |
2451 | continue; |
2452 | nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); |
2453 | nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); |
2454 | nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); |
2455 | } |
2456 | } |
2457 | nix_clear_tx_xoff(rvu, blkaddr, lvl: NIX_TXSCH_LVL_TL1, |
2458 | schq: nix_get_tx_link(rvu, pcifunc)); |
2459 | |
2460 | /* On PF cleanup, clear cfg done flag as |
2461 | * PF would have changed default config. |
2462 | */ |
2463 | if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { |
2464 | txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; |
2465 | schq = nix_get_tx_link(rvu, pcifunc); |
2466 | /* Do not clear pcifunc in txsch->pfvf_map[schq] because |
2467 | * VF might be using this TL1 queue |
2468 | */ |
2469 | map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); |
2470 | txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); |
2471 | } |
2472 | |
2473 | /* Flush SMQs */ |
2474 | txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; |
2475 | for (schq = 0; schq < txsch->schq.max; schq++) { |
2476 | if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) |
2477 | continue; |
2478 | nix_smq_flush(rvu, blkaddr, smq: schq, pcifunc, nixlf); |
2479 | } |
2480 | |
2481 | /* Now free scheduler queues to free pool */ |
2482 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
2483 | /* TLs above aggregation level are shared across all PF |
2484 | * and it's VFs, hence skip freeing them. |
2485 | */ |
2486 | if (lvl >= hw->cap.nix_tx_aggr_lvl) |
2487 | continue; |
2488 | |
2489 | txsch = &nix_hw->txsch[lvl]; |
2490 | for (schq = 0; schq < txsch->schq.max; schq++) { |
2491 | if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) |
2492 | continue; |
2493 | nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); |
2494 | rvu_free_rsrc(rsrc: &txsch->schq, id: schq); |
2495 | txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); |
2496 | } |
2497 | } |
2498 | mutex_unlock(lock: &rvu->rsrc_lock); |
2499 | |
2500 | /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ |
2501 | rvu_write64(rvu, block: blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); |
2502 | err = rvu_poll_reg(rvu, block: blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), zero: true); |
2503 | if (err) |
2504 | dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n" , nixlf); |
2505 | |
2506 | return 0; |
2507 | } |
2508 | |
2509 | static int nix_txschq_free_one(struct rvu *rvu, |
2510 | struct nix_txsch_free_req *req) |
2511 | { |
2512 | struct rvu_hwinfo *hw = rvu->hw; |
2513 | u16 pcifunc = req->hdr.pcifunc; |
2514 | int lvl, schq, nixlf, blkaddr; |
2515 | struct nix_txsch *txsch; |
2516 | struct nix_hw *nix_hw; |
2517 | u32 *pfvf_map; |
2518 | int rc; |
2519 | |
2520 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
2521 | if (blkaddr < 0) |
2522 | return NIX_AF_ERR_AF_LF_INVALID; |
2523 | |
2524 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
2525 | if (!nix_hw) |
2526 | return NIX_AF_ERR_INVALID_NIXBLK; |
2527 | |
2528 | nixlf = rvu_get_lf(rvu, block: &hw->block[blkaddr], pcifunc, slot: 0); |
2529 | if (nixlf < 0) |
2530 | return NIX_AF_ERR_AF_LF_INVALID; |
2531 | |
2532 | lvl = req->schq_lvl; |
2533 | schq = req->schq; |
2534 | txsch = &nix_hw->txsch[lvl]; |
2535 | |
2536 | if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) |
2537 | return 0; |
2538 | |
2539 | pfvf_map = txsch->pfvf_map; |
2540 | mutex_lock(&rvu->rsrc_lock); |
2541 | |
2542 | if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { |
2543 | rc = NIX_AF_ERR_TLX_INVALID; |
2544 | goto err; |
2545 | } |
2546 | |
2547 | /* Clear SW_XOFF of this resource only. |
2548 | * For SMQ level, all path XOFF's |
2549 | * need to be made clear by user |
2550 | */ |
2551 | nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); |
2552 | |
2553 | nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); |
2554 | nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); |
2555 | |
2556 | /* Flush if it is a SMQ. Onus of disabling |
2557 | * TL2/3 queue links before SMQ flush is on user |
2558 | */ |
2559 | if (lvl == NIX_TXSCH_LVL_SMQ && |
2560 | nix_smq_flush(rvu, blkaddr, smq: schq, pcifunc, nixlf)) { |
2561 | rc = NIX_AF_SMQ_FLUSH_FAILED; |
2562 | goto err; |
2563 | } |
2564 | |
2565 | nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); |
2566 | |
2567 | /* Free the resource */ |
2568 | rvu_free_rsrc(rsrc: &txsch->schq, id: schq); |
2569 | txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); |
2570 | mutex_unlock(lock: &rvu->rsrc_lock); |
2571 | return 0; |
2572 | err: |
2573 | mutex_unlock(lock: &rvu->rsrc_lock); |
2574 | return rc; |
2575 | } |
2576 | |
2577 | int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, |
2578 | struct nix_txsch_free_req *req, |
2579 | struct msg_rsp *rsp) |
2580 | { |
2581 | if (req->flags & TXSCHQ_FREE_ALL) |
2582 | return nix_txschq_free(rvu, pcifunc: req->hdr.pcifunc); |
2583 | else |
2584 | return nix_txschq_free_one(rvu, req); |
2585 | } |
2586 | |
2587 | static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, |
2588 | int lvl, u64 reg, u64 regval) |
2589 | { |
2590 | u64 regbase = reg & 0xFFFF; |
2591 | u16 schq, parent; |
2592 | |
2593 | if (!rvu_check_valid_reg(regmap: TXSCHQ_HWREGMAP, regblk: lvl, reg)) |
2594 | return false; |
2595 | |
2596 | schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); |
2597 | /* Check if this schq belongs to this PF/VF or not */ |
2598 | if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) |
2599 | return false; |
2600 | |
2601 | parent = (regval >> 16) & 0x1FF; |
2602 | /* Validate MDQ's TL4 parent */ |
2603 | if (regbase == NIX_AF_MDQX_PARENT(0) && |
2604 | !is_valid_txschq(rvu, blkaddr, lvl: NIX_TXSCH_LVL_TL4, pcifunc, schq: parent)) |
2605 | return false; |
2606 | |
2607 | /* Validate TL4's TL3 parent */ |
2608 | if (regbase == NIX_AF_TL4X_PARENT(0) && |
2609 | !is_valid_txschq(rvu, blkaddr, lvl: NIX_TXSCH_LVL_TL3, pcifunc, schq: parent)) |
2610 | return false; |
2611 | |
2612 | /* Validate TL3's TL2 parent */ |
2613 | if (regbase == NIX_AF_TL3X_PARENT(0) && |
2614 | !is_valid_txschq(rvu, blkaddr, lvl: NIX_TXSCH_LVL_TL2, pcifunc, schq: parent)) |
2615 | return false; |
2616 | |
2617 | /* Validate TL2's TL1 parent */ |
2618 | if (regbase == NIX_AF_TL2X_PARENT(0) && |
2619 | !is_valid_txschq(rvu, blkaddr, lvl: NIX_TXSCH_LVL_TL1, pcifunc, schq: parent)) |
2620 | return false; |
2621 | |
2622 | return true; |
2623 | } |
2624 | |
2625 | static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) |
2626 | { |
2627 | u64 regbase; |
2628 | |
2629 | if (hw->cap.nix_shaping) |
2630 | return true; |
2631 | |
2632 | /* If shaping and coloring is not supported, then |
2633 | * *_CIR and *_PIR registers should not be configured. |
2634 | */ |
2635 | regbase = reg & 0xFFFF; |
2636 | |
2637 | switch (lvl) { |
2638 | case NIX_TXSCH_LVL_TL1: |
2639 | if (regbase == NIX_AF_TL1X_CIR(0)) |
2640 | return false; |
2641 | break; |
2642 | case NIX_TXSCH_LVL_TL2: |
2643 | if (regbase == NIX_AF_TL2X_CIR(0) || |
2644 | regbase == NIX_AF_TL2X_PIR(0)) |
2645 | return false; |
2646 | break; |
2647 | case NIX_TXSCH_LVL_TL3: |
2648 | if (regbase == NIX_AF_TL3X_CIR(0) || |
2649 | regbase == NIX_AF_TL3X_PIR(0)) |
2650 | return false; |
2651 | break; |
2652 | case NIX_TXSCH_LVL_TL4: |
2653 | if (regbase == NIX_AF_TL4X_CIR(0) || |
2654 | regbase == NIX_AF_TL4X_PIR(0)) |
2655 | return false; |
2656 | break; |
2657 | case NIX_TXSCH_LVL_MDQ: |
2658 | if (regbase == NIX_AF_MDQX_CIR(0) || |
2659 | regbase == NIX_AF_MDQX_PIR(0)) |
2660 | return false; |
2661 | break; |
2662 | } |
2663 | return true; |
2664 | } |
2665 | |
2666 | static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, |
2667 | u16 pcifunc, int blkaddr) |
2668 | { |
2669 | u32 *pfvf_map; |
2670 | int schq; |
2671 | |
2672 | schq = nix_get_tx_link(rvu, pcifunc); |
2673 | pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; |
2674 | /* Skip if PF has already done the config */ |
2675 | if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) |
2676 | return; |
2677 | rvu_write64(rvu, block: blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), |
2678 | val: (TXSCH_TL1_DFLT_RR_PRIO << 1)); |
2679 | |
2680 | /* On OcteonTx2 the config was in bytes and newer silcons |
2681 | * it's changed to weight. |
2682 | */ |
2683 | if (!rvu->hw->cap.nix_common_dwrr_mtu) |
2684 | rvu_write64(rvu, block: blkaddr, NIX_AF_TL1X_SCHEDULE(schq), |
2685 | TXSCH_TL1_DFLT_RR_QTM); |
2686 | else |
2687 | rvu_write64(rvu, block: blkaddr, NIX_AF_TL1X_SCHEDULE(schq), |
2688 | CN10K_MAX_DWRR_WEIGHT); |
2689 | |
2690 | rvu_write64(rvu, block: blkaddr, NIX_AF_TL1X_CIR(schq), val: 0x00); |
2691 | pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); |
2692 | } |
2693 | |
2694 | /* Register offset - [15:0] |
2695 | * Scheduler Queue number - [25:16] |
2696 | */ |
2697 | #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) |
2698 | |
2699 | static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, |
2700 | int blkaddr, struct nix_txschq_config *req, |
2701 | struct nix_txschq_config *rsp) |
2702 | { |
2703 | u16 pcifunc = req->hdr.pcifunc; |
2704 | int idx, schq; |
2705 | u64 reg; |
2706 | |
2707 | for (idx = 0; idx < req->num_regs; idx++) { |
2708 | reg = req->reg[idx]; |
2709 | reg &= NIX_TX_SCHQ_MASK; |
2710 | schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); |
2711 | if (!rvu_check_valid_reg(regmap: TXSCHQ_HWREGMAP, regblk: req->lvl, reg) || |
2712 | !is_valid_txschq(rvu, blkaddr, lvl: req->lvl, pcifunc, schq)) |
2713 | return NIX_AF_INVAL_TXSCHQ_CFG; |
2714 | rsp->regval[idx] = rvu_read64(rvu, block: blkaddr, offset: reg); |
2715 | } |
2716 | rsp->lvl = req->lvl; |
2717 | rsp->num_regs = req->num_regs; |
2718 | return 0; |
2719 | } |
2720 | |
2721 | void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, |
2722 | struct nix_txsch *txsch, bool enable) |
2723 | { |
2724 | struct rvu_hwinfo *hw = rvu->hw; |
2725 | int lbk_link_start, lbk_links; |
2726 | u8 pf = rvu_get_pf(pcifunc); |
2727 | int schq; |
2728 | u64 cfg; |
2729 | |
2730 | if (!is_pf_cgxmapped(rvu, pf)) |
2731 | return; |
2732 | |
2733 | cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; |
2734 | lbk_link_start = hw->cgx_links; |
2735 | |
2736 | for (schq = 0; schq < txsch->schq.max; schq++) { |
2737 | if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) |
2738 | continue; |
2739 | /* Enable all LBK links with channel 63 by default so that |
2740 | * packets can be sent to LBK with a NPC TX MCAM rule |
2741 | */ |
2742 | lbk_links = hw->lbk_links; |
2743 | while (lbk_links--) |
2744 | rvu_write64(rvu, block: blkaddr, |
2745 | NIX_AF_TL3_TL2X_LINKX_CFG(schq, |
2746 | lbk_link_start + |
2747 | lbk_links), val: cfg); |
2748 | } |
2749 | } |
2750 | |
2751 | int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, |
2752 | struct nix_txschq_config *req, |
2753 | struct nix_txschq_config *rsp) |
2754 | { |
2755 | u64 reg, val, regval, schq_regbase, val_mask; |
2756 | struct rvu_hwinfo *hw = rvu->hw; |
2757 | u16 pcifunc = req->hdr.pcifunc; |
2758 | struct nix_txsch *txsch; |
2759 | struct nix_hw *nix_hw; |
2760 | int blkaddr, idx, err; |
2761 | int nixlf, schq; |
2762 | u32 *pfvf_map; |
2763 | |
2764 | if (req->lvl >= NIX_TXSCH_LVL_CNT || |
2765 | req->num_regs > MAX_REGS_PER_MBOX_MSG) |
2766 | return NIX_AF_INVAL_TXSCHQ_CFG; |
2767 | |
2768 | err = nix_get_nixlf(rvu, pcifunc, nixlf: &nixlf, nix_blkaddr: &blkaddr); |
2769 | if (err) |
2770 | return err; |
2771 | |
2772 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
2773 | if (!nix_hw) |
2774 | return NIX_AF_ERR_INVALID_NIXBLK; |
2775 | |
2776 | if (req->read) |
2777 | return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); |
2778 | |
2779 | txsch = &nix_hw->txsch[req->lvl]; |
2780 | pfvf_map = txsch->pfvf_map; |
2781 | |
2782 | if (req->lvl >= hw->cap.nix_tx_aggr_lvl && |
2783 | pcifunc & RVU_PFVF_FUNC_MASK) { |
2784 | mutex_lock(&rvu->rsrc_lock); |
2785 | if (req->lvl == NIX_TXSCH_LVL_TL1) |
2786 | nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); |
2787 | mutex_unlock(lock: &rvu->rsrc_lock); |
2788 | return 0; |
2789 | } |
2790 | |
2791 | for (idx = 0; idx < req->num_regs; idx++) { |
2792 | reg = req->reg[idx]; |
2793 | reg &= NIX_TX_SCHQ_MASK; |
2794 | regval = req->regval[idx]; |
2795 | schq_regbase = reg & 0xFFFF; |
2796 | val_mask = req->regval_mask[idx]; |
2797 | |
2798 | if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, |
2799 | lvl: txsch->lvl, reg, regval)) |
2800 | return NIX_AF_INVAL_TXSCHQ_CFG; |
2801 | |
2802 | /* Check if shaping and coloring is supported */ |
2803 | if (!is_txschq_shaping_valid(hw, lvl: req->lvl, reg)) |
2804 | continue; |
2805 | |
2806 | val = rvu_read64(rvu, block: blkaddr, offset: reg); |
2807 | regval = (val & val_mask) | (regval & ~val_mask); |
2808 | |
2809 | /* Handle shaping state toggle specially */ |
2810 | if (hw->cap.nix_shaper_toggle_wait && |
2811 | handle_txschq_shaper_update(rvu, blkaddr, nixlf, |
2812 | lvl: req->lvl, reg, regval)) |
2813 | continue; |
2814 | |
2815 | /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ |
2816 | if (schq_regbase == NIX_AF_SMQX_CFG(0)) { |
2817 | nixlf = rvu_get_lf(rvu, block: &hw->block[blkaddr], |
2818 | pcifunc, slot: 0); |
2819 | regval &= ~(0x7FULL << 24); |
2820 | regval |= ((u64)nixlf << 24); |
2821 | } |
2822 | |
2823 | /* Clear 'BP_ENA' config, if it's not allowed */ |
2824 | if (!hw->cap.nix_tx_link_bp) { |
2825 | if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || |
2826 | (schq_regbase & 0xFF00) == |
2827 | NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) |
2828 | regval &= ~BIT_ULL(13); |
2829 | } |
2830 | |
2831 | /* Mark config as done for TL1 by PF */ |
2832 | if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && |
2833 | schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { |
2834 | schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); |
2835 | mutex_lock(&rvu->rsrc_lock); |
2836 | pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], |
2837 | NIX_TXSCHQ_CFG_DONE); |
2838 | mutex_unlock(lock: &rvu->rsrc_lock); |
2839 | } |
2840 | |
2841 | /* SMQ flush is special hence split register writes such |
2842 | * that flush first and write rest of the bits later. |
2843 | */ |
2844 | if (schq_regbase == NIX_AF_SMQX_CFG(0) && |
2845 | (regval & BIT_ULL(49))) { |
2846 | schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); |
2847 | nix_smq_flush(rvu, blkaddr, smq: schq, pcifunc, nixlf); |
2848 | regval &= ~BIT_ULL(49); |
2849 | } |
2850 | rvu_write64(rvu, block: blkaddr, offset: reg, val: regval); |
2851 | } |
2852 | |
2853 | return 0; |
2854 | } |
2855 | |
2856 | static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, |
2857 | struct nix_vtag_config *req) |
2858 | { |
2859 | u64 regval = req->vtag_size; |
2860 | |
2861 | if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || |
2862 | req->vtag_size > VTAGSIZE_T8) |
2863 | return -EINVAL; |
2864 | |
2865 | /* RX VTAG Type 7 reserved for vf vlan */ |
2866 | if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) |
2867 | return NIX_AF_ERR_RX_VTAG_INUSE; |
2868 | |
2869 | if (req->rx.capture_vtag) |
2870 | regval |= BIT_ULL(5); |
2871 | if (req->rx.strip_vtag) |
2872 | regval |= BIT_ULL(4); |
2873 | |
2874 | rvu_write64(rvu, block: blkaddr, |
2875 | NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), val: regval); |
2876 | return 0; |
2877 | } |
2878 | |
2879 | static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, |
2880 | u16 pcifunc, int index) |
2881 | { |
2882 | struct nix_hw *nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
2883 | struct nix_txvlan *vlan; |
2884 | |
2885 | if (!nix_hw) |
2886 | return NIX_AF_ERR_INVALID_NIXBLK; |
2887 | |
2888 | vlan = &nix_hw->txvlan; |
2889 | if (vlan->entry2pfvf_map[index] != pcifunc) |
2890 | return NIX_AF_ERR_PARAM; |
2891 | |
2892 | rvu_write64(rvu, block: blkaddr, |
2893 | NIX_AF_TX_VTAG_DEFX_DATA(index), val: 0x0ull); |
2894 | rvu_write64(rvu, block: blkaddr, |
2895 | NIX_AF_TX_VTAG_DEFX_CTL(index), val: 0x0ull); |
2896 | |
2897 | vlan->entry2pfvf_map[index] = 0; |
2898 | rvu_free_rsrc(rsrc: &vlan->rsrc, id: index); |
2899 | |
2900 | return 0; |
2901 | } |
2902 | |
2903 | static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) |
2904 | { |
2905 | struct nix_txvlan *vlan; |
2906 | struct nix_hw *nix_hw; |
2907 | int index, blkaddr; |
2908 | |
2909 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
2910 | if (blkaddr < 0) |
2911 | return; |
2912 | |
2913 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
2914 | if (!nix_hw) |
2915 | return; |
2916 | |
2917 | vlan = &nix_hw->txvlan; |
2918 | |
2919 | mutex_lock(&vlan->rsrc_lock); |
2920 | /* Scan all the entries and free the ones mapped to 'pcifunc' */ |
2921 | for (index = 0; index < vlan->rsrc.max; index++) { |
2922 | if (vlan->entry2pfvf_map[index] == pcifunc) |
2923 | nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); |
2924 | } |
2925 | mutex_unlock(lock: &vlan->rsrc_lock); |
2926 | } |
2927 | |
2928 | static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, |
2929 | u64 vtag, u8 size) |
2930 | { |
2931 | struct nix_hw *nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
2932 | struct nix_txvlan *vlan; |
2933 | u64 regval; |
2934 | int index; |
2935 | |
2936 | if (!nix_hw) |
2937 | return NIX_AF_ERR_INVALID_NIXBLK; |
2938 | |
2939 | vlan = &nix_hw->txvlan; |
2940 | |
2941 | mutex_lock(&vlan->rsrc_lock); |
2942 | |
2943 | index = rvu_alloc_rsrc(rsrc: &vlan->rsrc); |
2944 | if (index < 0) { |
2945 | mutex_unlock(lock: &vlan->rsrc_lock); |
2946 | return index; |
2947 | } |
2948 | |
2949 | mutex_unlock(lock: &vlan->rsrc_lock); |
2950 | |
2951 | regval = size ? vtag : vtag << 32; |
2952 | |
2953 | rvu_write64(rvu, block: blkaddr, |
2954 | NIX_AF_TX_VTAG_DEFX_DATA(index), val: regval); |
2955 | rvu_write64(rvu, block: blkaddr, |
2956 | NIX_AF_TX_VTAG_DEFX_CTL(index), val: size); |
2957 | |
2958 | return index; |
2959 | } |
2960 | |
2961 | static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, |
2962 | struct nix_vtag_config *req) |
2963 | { |
2964 | struct nix_hw *nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
2965 | u16 pcifunc = req->hdr.pcifunc; |
2966 | int idx0 = req->tx.vtag0_idx; |
2967 | int idx1 = req->tx.vtag1_idx; |
2968 | struct nix_txvlan *vlan; |
2969 | int err = 0; |
2970 | |
2971 | if (!nix_hw) |
2972 | return NIX_AF_ERR_INVALID_NIXBLK; |
2973 | |
2974 | vlan = &nix_hw->txvlan; |
2975 | if (req->tx.free_vtag0 && req->tx.free_vtag1) |
2976 | if (vlan->entry2pfvf_map[idx0] != pcifunc || |
2977 | vlan->entry2pfvf_map[idx1] != pcifunc) |
2978 | return NIX_AF_ERR_PARAM; |
2979 | |
2980 | mutex_lock(&vlan->rsrc_lock); |
2981 | |
2982 | if (req->tx.free_vtag0) { |
2983 | err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, index: idx0); |
2984 | if (err) |
2985 | goto exit; |
2986 | } |
2987 | |
2988 | if (req->tx.free_vtag1) |
2989 | err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, index: idx1); |
2990 | |
2991 | exit: |
2992 | mutex_unlock(lock: &vlan->rsrc_lock); |
2993 | return err; |
2994 | } |
2995 | |
2996 | static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, |
2997 | struct nix_vtag_config *req, |
2998 | struct nix_vtag_config_rsp *rsp) |
2999 | { |
3000 | struct nix_hw *nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
3001 | struct nix_txvlan *vlan; |
3002 | u16 pcifunc = req->hdr.pcifunc; |
3003 | |
3004 | if (!nix_hw) |
3005 | return NIX_AF_ERR_INVALID_NIXBLK; |
3006 | |
3007 | vlan = &nix_hw->txvlan; |
3008 | if (req->tx.cfg_vtag0) { |
3009 | rsp->vtag0_idx = |
3010 | nix_tx_vtag_alloc(rvu, blkaddr, |
3011 | vtag: req->tx.vtag0, size: req->vtag_size); |
3012 | |
3013 | if (rsp->vtag0_idx < 0) |
3014 | return NIX_AF_ERR_TX_VTAG_NOSPC; |
3015 | |
3016 | vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; |
3017 | } |
3018 | |
3019 | if (req->tx.cfg_vtag1) { |
3020 | rsp->vtag1_idx = |
3021 | nix_tx_vtag_alloc(rvu, blkaddr, |
3022 | vtag: req->tx.vtag1, size: req->vtag_size); |
3023 | |
3024 | if (rsp->vtag1_idx < 0) |
3025 | goto err_free; |
3026 | |
3027 | vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; |
3028 | } |
3029 | |
3030 | return 0; |
3031 | |
3032 | err_free: |
3033 | if (req->tx.cfg_vtag0) |
3034 | nix_tx_vtag_free(rvu, blkaddr, pcifunc, index: rsp->vtag0_idx); |
3035 | |
3036 | return NIX_AF_ERR_TX_VTAG_NOSPC; |
3037 | } |
3038 | |
3039 | int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, |
3040 | struct nix_vtag_config *req, |
3041 | struct nix_vtag_config_rsp *rsp) |
3042 | { |
3043 | u16 pcifunc = req->hdr.pcifunc; |
3044 | int blkaddr, nixlf, err; |
3045 | |
3046 | err = nix_get_nixlf(rvu, pcifunc, nixlf: &nixlf, nix_blkaddr: &blkaddr); |
3047 | if (err) |
3048 | return err; |
3049 | |
3050 | if (req->cfg_type) { |
3051 | /* rx vtag configuration */ |
3052 | err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); |
3053 | if (err) |
3054 | return NIX_AF_ERR_PARAM; |
3055 | } else { |
3056 | /* tx vtag configuration */ |
3057 | if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && |
3058 | (req->tx.free_vtag0 || req->tx.free_vtag1)) |
3059 | return NIX_AF_ERR_PARAM; |
3060 | |
3061 | if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) |
3062 | return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); |
3063 | |
3064 | if (req->tx.free_vtag0 || req->tx.free_vtag1) |
3065 | return nix_tx_vtag_decfg(rvu, blkaddr, req); |
3066 | } |
3067 | |
3068 | return 0; |
3069 | } |
3070 | |
3071 | static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, |
3072 | int mce, u8 op, u16 pcifunc, int next, |
3073 | int index, u8 mce_op, bool eol) |
3074 | { |
3075 | struct nix_aq_enq_req aq_req; |
3076 | int err; |
3077 | |
3078 | aq_req.hdr.pcifunc = 0; |
3079 | aq_req.ctype = NIX_AQ_CTYPE_MCE; |
3080 | aq_req.op = op; |
3081 | aq_req.qidx = mce; |
3082 | |
3083 | /* Use RSS with RSS index 0 */ |
3084 | aq_req.mce.op = mce_op; |
3085 | aq_req.mce.index = index; |
3086 | aq_req.mce.eol = eol; |
3087 | aq_req.mce.pf_func = pcifunc; |
3088 | aq_req.mce.next = next; |
3089 | |
3090 | /* All fields valid */ |
3091 | *(u64 *)(&aq_req.mce_mask) = ~0ULL; |
3092 | |
3093 | err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req: &aq_req, NULL); |
3094 | if (err) { |
3095 | dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n" , |
3096 | rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); |
3097 | return err; |
3098 | } |
3099 | return 0; |
3100 | } |
3101 | |
3102 | static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list) |
3103 | { |
3104 | struct hlist_node *tmp; |
3105 | struct mce *mce; |
3106 | |
3107 | /* Scan through the current list */ |
3108 | hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { |
3109 | hlist_del(n: &mce->node); |
3110 | kfree(objp: mce); |
3111 | } |
3112 | |
3113 | mce_list->count = 0; |
3114 | mce_list->max = 0; |
3115 | } |
3116 | |
3117 | static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem) |
3118 | { |
3119 | return elem->mce_start_index + elem->mcast_mce_list.count - 1; |
3120 | } |
3121 | |
3122 | static int nix_update_ingress_mce_list_hw(struct rvu *rvu, |
3123 | struct nix_hw *nix_hw, |
3124 | struct nix_mcast_grp_elem *elem) |
3125 | { |
3126 | int idx, last_idx, next_idx, err; |
3127 | struct nix_mce_list *mce_list; |
3128 | struct mce *mce, *prev_mce; |
3129 | |
3130 | mce_list = &elem->mcast_mce_list; |
3131 | idx = elem->mce_start_index; |
3132 | last_idx = nix_get_last_mce_list_index(elem); |
3133 | hlist_for_each_entry(mce, &mce_list->head, node) { |
3134 | if (idx > last_idx) |
3135 | break; |
3136 | |
3137 | if (!mce->is_active) { |
3138 | if (idx == elem->mce_start_index) { |
3139 | idx++; |
3140 | prev_mce = mce; |
3141 | elem->mce_start_index = idx; |
3142 | continue; |
3143 | } else if (idx == last_idx) { |
3144 | err = nix_blk_setup_mce(rvu, nix_hw, mce: idx - 1, op: NIX_AQ_INSTOP_WRITE, |
3145 | pcifunc: prev_mce->pcifunc, next: next_idx, |
3146 | index: prev_mce->rq_rss_index, |
3147 | mce_op: prev_mce->dest_type, |
3148 | eol: false); |
3149 | if (err) |
3150 | return err; |
3151 | |
3152 | break; |
3153 | } |
3154 | } |
3155 | |
3156 | next_idx = idx + 1; |
3157 | /* EOL should be set in last MCE */ |
3158 | err = nix_blk_setup_mce(rvu, nix_hw, mce: idx, op: NIX_AQ_INSTOP_WRITE, |
3159 | pcifunc: mce->pcifunc, next: next_idx, |
3160 | index: mce->rq_rss_index, mce_op: mce->dest_type, |
3161 | eol: (next_idx > last_idx) ? true : false); |
3162 | if (err) |
3163 | return err; |
3164 | |
3165 | idx++; |
3166 | prev_mce = mce; |
3167 | } |
3168 | |
3169 | return 0; |
3170 | } |
3171 | |
3172 | static void nix_update_egress_mce_list_hw(struct rvu *rvu, |
3173 | struct nix_hw *nix_hw, |
3174 | struct nix_mcast_grp_elem *elem) |
3175 | { |
3176 | struct nix_mce_list *mce_list; |
3177 | int idx, last_idx, next_idx; |
3178 | struct mce *mce, *prev_mce; |
3179 | u64 regval; |
3180 | u8 eol; |
3181 | |
3182 | mce_list = &elem->mcast_mce_list; |
3183 | idx = elem->mce_start_index; |
3184 | last_idx = nix_get_last_mce_list_index(elem); |
3185 | hlist_for_each_entry(mce, &mce_list->head, node) { |
3186 | if (idx > last_idx) |
3187 | break; |
3188 | |
3189 | if (!mce->is_active) { |
3190 | if (idx == elem->mce_start_index) { |
3191 | idx++; |
3192 | prev_mce = mce; |
3193 | elem->mce_start_index = idx; |
3194 | continue; |
3195 | } else if (idx == last_idx) { |
3196 | regval = (next_idx << 16) | (1 << 12) | prev_mce->channel; |
3197 | rvu_write64(rvu, block: nix_hw->blkaddr, |
3198 | NIX_AF_TX_MCASTX(idx - 1), |
3199 | val: regval); |
3200 | break; |
3201 | } |
3202 | } |
3203 | |
3204 | eol = 0; |
3205 | next_idx = idx + 1; |
3206 | /* EOL should be set in last MCE */ |
3207 | if (next_idx > last_idx) |
3208 | eol = 1; |
3209 | |
3210 | regval = (next_idx << 16) | (eol << 12) | mce->channel; |
3211 | rvu_write64(rvu, block: nix_hw->blkaddr, |
3212 | NIX_AF_TX_MCASTX(idx), |
3213 | val: regval); |
3214 | idx++; |
3215 | prev_mce = mce; |
3216 | } |
3217 | } |
3218 | |
3219 | static int nix_del_mce_list_entry(struct rvu *rvu, |
3220 | struct nix_hw *nix_hw, |
3221 | struct nix_mcast_grp_elem *elem, |
3222 | struct nix_mcast_grp_update_req *req) |
3223 | { |
3224 | u32 num_entry = req->num_mce_entry; |
3225 | struct nix_mce_list *mce_list; |
3226 | struct mce *mce; |
3227 | bool is_found; |
3228 | int i; |
3229 | |
3230 | mce_list = &elem->mcast_mce_list; |
3231 | for (i = 0; i < num_entry; i++) { |
3232 | is_found = false; |
3233 | hlist_for_each_entry(mce, &mce_list->head, node) { |
3234 | /* If already exists, then delete */ |
3235 | if (mce->pcifunc == req->pcifunc[i]) { |
3236 | hlist_del(n: &mce->node); |
3237 | kfree(objp: mce); |
3238 | mce_list->count--; |
3239 | is_found = true; |
3240 | break; |
3241 | } |
3242 | } |
3243 | |
3244 | if (!is_found) |
3245 | return NIX_AF_ERR_INVALID_MCAST_DEL_REQ; |
3246 | } |
3247 | |
3248 | mce_list->max = mce_list->count; |
3249 | /* Dump the updated list to HW */ |
3250 | if (elem->dir == NIX_MCAST_INGRESS) |
3251 | return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); |
3252 | |
3253 | nix_update_egress_mce_list_hw(rvu, nix_hw, elem); |
3254 | return 0; |
3255 | } |
3256 | |
3257 | static int nix_add_mce_list_entry(struct rvu *rvu, |
3258 | struct nix_hw *nix_hw, |
3259 | struct nix_mcast_grp_elem *elem, |
3260 | struct nix_mcast_grp_update_req *req) |
3261 | { |
3262 | u32 num_entry = req->num_mce_entry; |
3263 | struct nix_mce_list *mce_list; |
3264 | struct hlist_node *tmp; |
3265 | struct mce *mce; |
3266 | int i; |
3267 | |
3268 | mce_list = &elem->mcast_mce_list; |
3269 | for (i = 0; i < num_entry; i++) { |
3270 | mce = kzalloc(size: sizeof(*mce), GFP_KERNEL); |
3271 | if (!mce) |
3272 | goto free_mce; |
3273 | |
3274 | mce->pcifunc = req->pcifunc[i]; |
3275 | mce->channel = req->channel[i]; |
3276 | mce->rq_rss_index = req->rq_rss_index[i]; |
3277 | mce->dest_type = req->dest_type[i]; |
3278 | mce->is_active = 1; |
3279 | hlist_add_head(n: &mce->node, h: &mce_list->head); |
3280 | mce_list->count++; |
3281 | } |
3282 | |
3283 | mce_list->max += num_entry; |
3284 | |
3285 | /* Dump the updated list to HW */ |
3286 | if (elem->dir == NIX_MCAST_INGRESS) |
3287 | return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); |
3288 | |
3289 | nix_update_egress_mce_list_hw(rvu, nix_hw, elem); |
3290 | return 0; |
3291 | |
3292 | free_mce: |
3293 | hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { |
3294 | hlist_del(n: &mce->node); |
3295 | kfree(objp: mce); |
3296 | mce_list->count--; |
3297 | } |
3298 | |
3299 | return -ENOMEM; |
3300 | } |
3301 | |
3302 | static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, |
3303 | u16 pcifunc, bool add) |
3304 | { |
3305 | struct mce *mce, *tail = NULL; |
3306 | bool delete = false; |
3307 | |
3308 | /* Scan through the current list */ |
3309 | hlist_for_each_entry(mce, &mce_list->head, node) { |
3310 | /* If already exists, then delete */ |
3311 | if (mce->pcifunc == pcifunc && !add) { |
3312 | delete = true; |
3313 | break; |
3314 | } else if (mce->pcifunc == pcifunc && add) { |
3315 | /* entry already exists */ |
3316 | return 0; |
3317 | } |
3318 | tail = mce; |
3319 | } |
3320 | |
3321 | if (delete) { |
3322 | hlist_del(n: &mce->node); |
3323 | kfree(objp: mce); |
3324 | mce_list->count--; |
3325 | return 0; |
3326 | } |
3327 | |
3328 | if (!add) |
3329 | return 0; |
3330 | |
3331 | /* Add a new one to the list, at the tail */ |
3332 | mce = kzalloc(size: sizeof(*mce), GFP_KERNEL); |
3333 | if (!mce) |
3334 | return -ENOMEM; |
3335 | mce->pcifunc = pcifunc; |
3336 | if (!tail) |
3337 | hlist_add_head(n: &mce->node, h: &mce_list->head); |
3338 | else |
3339 | hlist_add_behind(n: &mce->node, prev: &tail->node); |
3340 | mce_list->count++; |
3341 | return 0; |
3342 | } |
3343 | |
3344 | int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, |
3345 | struct nix_mce_list *mce_list, |
3346 | int mce_idx, int mcam_index, bool add) |
3347 | { |
3348 | int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; |
3349 | struct npc_mcam *mcam = &rvu->hw->mcam; |
3350 | struct nix_mcast *mcast; |
3351 | struct nix_hw *nix_hw; |
3352 | struct mce *mce; |
3353 | |
3354 | if (!mce_list) |
3355 | return -EINVAL; |
3356 | |
3357 | /* Get this PF/VF func's MCE index */ |
3358 | idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); |
3359 | |
3360 | if (idx > (mce_idx + mce_list->max)) { |
3361 | dev_err(rvu->dev, |
3362 | "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n" , |
3363 | __func__, idx, mce_list->max, |
3364 | pcifunc >> RVU_PFVF_PF_SHIFT); |
3365 | return -EINVAL; |
3366 | } |
3367 | |
3368 | err = nix_get_struct_ptrs(rvu, pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
3369 | if (err) |
3370 | return err; |
3371 | |
3372 | mcast = &nix_hw->mcast; |
3373 | mutex_lock(&mcast->mce_lock); |
3374 | |
3375 | err = nix_update_mce_list_entry(mce_list, pcifunc, add); |
3376 | if (err) |
3377 | goto end; |
3378 | |
3379 | /* Disable MCAM entry in NPC */ |
3380 | if (!mce_list->count) { |
3381 | npc_blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPC, pcifunc: 0); |
3382 | npc_enable_mcam_entry(rvu, mcam, blkaddr: npc_blkaddr, index: mcam_index, enable: false); |
3383 | goto end; |
3384 | } |
3385 | |
3386 | /* Dump the updated list to HW */ |
3387 | idx = mce_idx; |
3388 | last_idx = idx + mce_list->count - 1; |
3389 | hlist_for_each_entry(mce, &mce_list->head, node) { |
3390 | if (idx > last_idx) |
3391 | break; |
3392 | |
3393 | next_idx = idx + 1; |
3394 | /* EOL should be set in last MCE */ |
3395 | err = nix_blk_setup_mce(rvu, nix_hw, mce: idx, op: NIX_AQ_INSTOP_WRITE, |
3396 | pcifunc: mce->pcifunc, next: next_idx, |
3397 | index: 0, mce_op: 1, |
3398 | eol: (next_idx > last_idx) ? true : false); |
3399 | if (err) |
3400 | goto end; |
3401 | idx++; |
3402 | } |
3403 | |
3404 | end: |
3405 | mutex_unlock(lock: &mcast->mce_lock); |
3406 | return err; |
3407 | } |
3408 | |
3409 | void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, |
3410 | struct nix_mce_list **mce_list, int *mce_idx) |
3411 | { |
3412 | struct rvu_hwinfo *hw = rvu->hw; |
3413 | struct rvu_pfvf *pfvf; |
3414 | |
3415 | if (!hw->cap.nix_rx_multicast || |
3416 | !is_pf_cgxmapped(rvu, pf: rvu_get_pf(pcifunc: pcifunc & ~RVU_PFVF_FUNC_MASK))) { |
3417 | *mce_list = NULL; |
3418 | *mce_idx = 0; |
3419 | return; |
3420 | } |
3421 | |
3422 | /* Get this PF/VF func's MCE index */ |
3423 | pfvf = rvu_get_pfvf(rvu, pcifunc: pcifunc & ~RVU_PFVF_FUNC_MASK); |
3424 | |
3425 | if (type == NIXLF_BCAST_ENTRY) { |
3426 | *mce_list = &pfvf->bcast_mce_list; |
3427 | *mce_idx = pfvf->bcast_mce_idx; |
3428 | } else if (type == NIXLF_ALLMULTI_ENTRY) { |
3429 | *mce_list = &pfvf->mcast_mce_list; |
3430 | *mce_idx = pfvf->mcast_mce_idx; |
3431 | } else if (type == NIXLF_PROMISC_ENTRY) { |
3432 | *mce_list = &pfvf->promisc_mce_list; |
3433 | *mce_idx = pfvf->promisc_mce_idx; |
3434 | } else { |
3435 | *mce_list = NULL; |
3436 | *mce_idx = 0; |
3437 | } |
3438 | } |
3439 | |
3440 | static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, |
3441 | int type, bool add) |
3442 | { |
3443 | int err = 0, nixlf, blkaddr, mcam_index, mce_idx; |
3444 | struct npc_mcam *mcam = &rvu->hw->mcam; |
3445 | struct rvu_hwinfo *hw = rvu->hw; |
3446 | struct nix_mce_list *mce_list; |
3447 | int pf; |
3448 | |
3449 | /* skip multicast pkt replication for AF's VFs & SDP links */ |
3450 | if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc)) |
3451 | return 0; |
3452 | |
3453 | if (!hw->cap.nix_rx_multicast) |
3454 | return 0; |
3455 | |
3456 | pf = rvu_get_pf(pcifunc); |
3457 | if (!is_pf_cgxmapped(rvu, pf)) |
3458 | return 0; |
3459 | |
3460 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
3461 | if (blkaddr < 0) |
3462 | return -EINVAL; |
3463 | |
3464 | nixlf = rvu_get_lf(rvu, block: &hw->block[blkaddr], pcifunc, slot: 0); |
3465 | if (nixlf < 0) |
3466 | return -EINVAL; |
3467 | |
3468 | nix_get_mce_list(rvu, pcifunc, type, mce_list: &mce_list, mce_idx: &mce_idx); |
3469 | |
3470 | mcam_index = npc_get_nixlf_mcam_index(mcam, |
3471 | pcifunc: pcifunc & ~RVU_PFVF_FUNC_MASK, |
3472 | nixlf, type); |
3473 | err = nix_update_mce_list(rvu, pcifunc, mce_list, |
3474 | mce_idx, mcam_index, add); |
3475 | return err; |
3476 | } |
3477 | |
3478 | static void nix_setup_mcast_grp(struct nix_hw *nix_hw) |
3479 | { |
3480 | struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp; |
3481 | |
3482 | INIT_LIST_HEAD(list: &mcast_grp->mcast_grp_head); |
3483 | mutex_init(&mcast_grp->mcast_grp_lock); |
3484 | mcast_grp->next_grp_index = 1; |
3485 | mcast_grp->count = 0; |
3486 | } |
3487 | |
3488 | static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) |
3489 | { |
3490 | struct nix_mcast *mcast = &nix_hw->mcast; |
3491 | int err, pf, numvfs, idx; |
3492 | struct rvu_pfvf *pfvf; |
3493 | u16 pcifunc; |
3494 | u64 cfg; |
3495 | |
3496 | /* Skip PF0 (i.e AF) */ |
3497 | for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { |
3498 | cfg = rvu_read64(rvu, block: BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); |
3499 | /* If PF is not enabled, nothing to do */ |
3500 | if (!((cfg >> 20) & 0x01)) |
3501 | continue; |
3502 | /* Get numVFs attached to this PF */ |
3503 | numvfs = (cfg >> 12) & 0xFF; |
3504 | |
3505 | pfvf = &rvu->pf[pf]; |
3506 | |
3507 | /* This NIX0/1 block mapped to PF ? */ |
3508 | if (pfvf->nix_blkaddr != nix_hw->blkaddr) |
3509 | continue; |
3510 | |
3511 | /* save start idx of broadcast mce list */ |
3512 | pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, count: numvfs + 1, NIX_MCAST_INGRESS); |
3513 | nix_mce_list_init(list: &pfvf->bcast_mce_list, max: numvfs + 1); |
3514 | |
3515 | /* save start idx of multicast mce list */ |
3516 | pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, count: numvfs + 1, NIX_MCAST_INGRESS); |
3517 | nix_mce_list_init(list: &pfvf->mcast_mce_list, max: numvfs + 1); |
3518 | |
3519 | /* save the start idx of promisc mce list */ |
3520 | pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, count: numvfs + 1, NIX_MCAST_INGRESS); |
3521 | nix_mce_list_init(list: &pfvf->promisc_mce_list, max: numvfs + 1); |
3522 | |
3523 | for (idx = 0; idx < (numvfs + 1); idx++) { |
3524 | /* idx-0 is for PF, followed by VFs */ |
3525 | pcifunc = (pf << RVU_PFVF_PF_SHIFT); |
3526 | pcifunc |= idx; |
3527 | /* Add dummy entries now, so that we don't have to check |
3528 | * for whether AQ_OP should be INIT/WRITE later on. |
3529 | * Will be updated when a NIXLF is attached/detached to |
3530 | * these PF/VFs. |
3531 | */ |
3532 | err = nix_blk_setup_mce(rvu, nix_hw, |
3533 | mce: pfvf->bcast_mce_idx + idx, |
3534 | op: NIX_AQ_INSTOP_INIT, |
3535 | pcifunc, next: 0, index: 0, mce_op: 1, eol: true); |
3536 | if (err) |
3537 | return err; |
3538 | |
3539 | /* add dummy entries to multicast mce list */ |
3540 | err = nix_blk_setup_mce(rvu, nix_hw, |
3541 | mce: pfvf->mcast_mce_idx + idx, |
3542 | op: NIX_AQ_INSTOP_INIT, |
3543 | pcifunc, next: 0, index: 0, mce_op: 1, eol: true); |
3544 | if (err) |
3545 | return err; |
3546 | |
3547 | /* add dummy entries to promisc mce list */ |
3548 | err = nix_blk_setup_mce(rvu, nix_hw, |
3549 | mce: pfvf->promisc_mce_idx + idx, |
3550 | op: NIX_AQ_INSTOP_INIT, |
3551 | pcifunc, next: 0, index: 0, mce_op: 1, eol: true); |
3552 | if (err) |
3553 | return err; |
3554 | } |
3555 | } |
3556 | return 0; |
3557 | } |
3558 | |
3559 | static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) |
3560 | { |
3561 | struct nix_mcast *mcast = &nix_hw->mcast; |
3562 | struct rvu_hwinfo *hw = rvu->hw; |
3563 | int err, size; |
3564 | |
3565 | size = (rvu_read64(rvu, block: blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; |
3566 | size = BIT_ULL(size); |
3567 | |
3568 | /* Allocate bitmap for rx mce entries */ |
3569 | mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE; |
3570 | err = rvu_alloc_bitmap(rsrc: &mcast->mce_counter[NIX_MCAST_INGRESS]); |
3571 | if (err) |
3572 | return -ENOMEM; |
3573 | |
3574 | /* Allocate bitmap for tx mce entries */ |
3575 | mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX; |
3576 | err = rvu_alloc_bitmap(rsrc: &mcast->mce_counter[NIX_MCAST_EGRESS]); |
3577 | if (err) { |
3578 | rvu_free_bitmap(rsrc: &mcast->mce_counter[NIX_MCAST_INGRESS]); |
3579 | return -ENOMEM; |
3580 | } |
3581 | |
3582 | /* Alloc memory for multicast/mirror replication entries */ |
3583 | err = qmem_alloc(dev: rvu->dev, q: &mcast->mce_ctx, |
3584 | qsize: mcast->mce_counter[NIX_MCAST_INGRESS].max, entry_sz: size); |
3585 | if (err) { |
3586 | rvu_free_bitmap(rsrc: &mcast->mce_counter[NIX_MCAST_INGRESS]); |
3587 | rvu_free_bitmap(rsrc: &mcast->mce_counter[NIX_MCAST_EGRESS]); |
3588 | return -ENOMEM; |
3589 | } |
3590 | |
3591 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_MCAST_BASE, |
3592 | val: (u64)mcast->mce_ctx->iova); |
3593 | |
3594 | /* Set max list length equal to max no of VFs per PF + PF itself */ |
3595 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_MCAST_CFG, |
3596 | BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); |
3597 | |
3598 | /* Alloc memory for multicast replication buffers */ |
3599 | size = rvu_read64(rvu, block: blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; |
3600 | err = qmem_alloc(dev: rvu->dev, q: &mcast->mcast_buf, |
3601 | qsize: (8UL << MC_BUF_CNT), entry_sz: size); |
3602 | if (err) { |
3603 | rvu_free_bitmap(rsrc: &mcast->mce_counter[NIX_MCAST_INGRESS]); |
3604 | rvu_free_bitmap(rsrc: &mcast->mce_counter[NIX_MCAST_EGRESS]); |
3605 | return -ENOMEM; |
3606 | } |
3607 | |
3608 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_MCAST_BUF_BASE, |
3609 | val: (u64)mcast->mcast_buf->iova); |
3610 | |
3611 | /* Alloc pkind for NIX internal RX multicast/mirror replay */ |
3612 | mcast->replay_pkind = rvu_alloc_rsrc(rsrc: &hw->pkind.rsrc); |
3613 | |
3614 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_MCAST_BUF_CFG, |
3615 | BIT_ULL(63) | (mcast->replay_pkind << 24) | |
3616 | BIT_ULL(20) | MC_BUF_CNT); |
3617 | |
3618 | mutex_init(&mcast->mce_lock); |
3619 | |
3620 | nix_setup_mcast_grp(nix_hw); |
3621 | |
3622 | return nix_setup_mce_tables(rvu, nix_hw); |
3623 | } |
3624 | |
3625 | static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) |
3626 | { |
3627 | struct nix_txvlan *vlan = &nix_hw->txvlan; |
3628 | int err; |
3629 | |
3630 | /* Allocate resource bimap for tx vtag def registers*/ |
3631 | vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; |
3632 | err = rvu_alloc_bitmap(rsrc: &vlan->rsrc); |
3633 | if (err) |
3634 | return -ENOMEM; |
3635 | |
3636 | /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ |
3637 | vlan->entry2pfvf_map = devm_kcalloc(dev: rvu->dev, n: vlan->rsrc.max, |
3638 | size: sizeof(u16), GFP_KERNEL); |
3639 | if (!vlan->entry2pfvf_map) |
3640 | goto free_mem; |
3641 | |
3642 | mutex_init(&vlan->rsrc_lock); |
3643 | return 0; |
3644 | |
3645 | free_mem: |
3646 | kfree(objp: vlan->rsrc.bmap); |
3647 | return -ENOMEM; |
3648 | } |
3649 | |
3650 | static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) |
3651 | { |
3652 | struct nix_txsch *txsch; |
3653 | int err, lvl, schq; |
3654 | u64 cfg, reg; |
3655 | |
3656 | /* Get scheduler queue count of each type and alloc |
3657 | * bitmap for each for alloc/free/attach operations. |
3658 | */ |
3659 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
3660 | txsch = &nix_hw->txsch[lvl]; |
3661 | txsch->lvl = lvl; |
3662 | switch (lvl) { |
3663 | case NIX_TXSCH_LVL_SMQ: |
3664 | reg = NIX_AF_MDQ_CONST; |
3665 | break; |
3666 | case NIX_TXSCH_LVL_TL4: |
3667 | reg = NIX_AF_TL4_CONST; |
3668 | break; |
3669 | case NIX_TXSCH_LVL_TL3: |
3670 | reg = NIX_AF_TL3_CONST; |
3671 | break; |
3672 | case NIX_TXSCH_LVL_TL2: |
3673 | reg = NIX_AF_TL2_CONST; |
3674 | break; |
3675 | case NIX_TXSCH_LVL_TL1: |
3676 | reg = NIX_AF_TL1_CONST; |
3677 | break; |
3678 | } |
3679 | cfg = rvu_read64(rvu, block: blkaddr, offset: reg); |
3680 | txsch->schq.max = cfg & 0xFFFF; |
3681 | err = rvu_alloc_bitmap(rsrc: &txsch->schq); |
3682 | if (err) |
3683 | return err; |
3684 | |
3685 | /* Allocate memory for scheduler queues to |
3686 | * PF/VF pcifunc mapping info. |
3687 | */ |
3688 | txsch->pfvf_map = devm_kcalloc(dev: rvu->dev, n: txsch->schq.max, |
3689 | size: sizeof(u32), GFP_KERNEL); |
3690 | if (!txsch->pfvf_map) |
3691 | return -ENOMEM; |
3692 | for (schq = 0; schq < txsch->schq.max; schq++) |
3693 | txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); |
3694 | } |
3695 | |
3696 | /* Setup a default value of 8192 as DWRR MTU */ |
3697 | if (rvu->hw->cap.nix_common_dwrr_mtu || |
3698 | rvu->hw->cap.nix_multiple_dwrr_mtu) { |
3699 | rvu_write64(rvu, block: blkaddr, |
3700 | offset: nix_get_dwrr_mtu_reg(hw: rvu->hw, SMQ_LINK_TYPE_RPM), |
3701 | val: convert_bytes_to_dwrr_mtu(bytes: 8192)); |
3702 | rvu_write64(rvu, block: blkaddr, |
3703 | offset: nix_get_dwrr_mtu_reg(hw: rvu->hw, SMQ_LINK_TYPE_LBK), |
3704 | val: convert_bytes_to_dwrr_mtu(bytes: 8192)); |
3705 | rvu_write64(rvu, block: blkaddr, |
3706 | offset: nix_get_dwrr_mtu_reg(hw: rvu->hw, SMQ_LINK_TYPE_SDP), |
3707 | val: convert_bytes_to_dwrr_mtu(bytes: 8192)); |
3708 | } |
3709 | |
3710 | return 0; |
3711 | } |
3712 | |
3713 | int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, |
3714 | int blkaddr, u32 cfg) |
3715 | { |
3716 | int fmt_idx; |
3717 | |
3718 | for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { |
3719 | if (nix_hw->mark_format.cfg[fmt_idx] == cfg) |
3720 | return fmt_idx; |
3721 | } |
3722 | if (fmt_idx >= nix_hw->mark_format.total) |
3723 | return -ERANGE; |
3724 | |
3725 | rvu_write64(rvu, block: blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), val: cfg); |
3726 | nix_hw->mark_format.cfg[fmt_idx] = cfg; |
3727 | nix_hw->mark_format.in_use++; |
3728 | return fmt_idx; |
3729 | } |
3730 | |
3731 | static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, |
3732 | int blkaddr) |
3733 | { |
3734 | u64 cfgs[] = { |
3735 | [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, |
3736 | [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, |
3737 | [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, |
3738 | [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, |
3739 | [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, |
3740 | [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, |
3741 | [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, |
3742 | [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, |
3743 | [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, |
3744 | }; |
3745 | int i, rc; |
3746 | u64 total; |
3747 | |
3748 | total = (rvu_read64(rvu, block: blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; |
3749 | nix_hw->mark_format.total = (u8)total; |
3750 | nix_hw->mark_format.cfg = devm_kcalloc(dev: rvu->dev, n: total, size: sizeof(u32), |
3751 | GFP_KERNEL); |
3752 | if (!nix_hw->mark_format.cfg) |
3753 | return -ENOMEM; |
3754 | for (i = 0; i < NIX_MARK_CFG_MAX; i++) { |
3755 | rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg: cfgs[i]); |
3756 | if (rc < 0) |
3757 | dev_err(rvu->dev, "Err %d in setup mark format %d\n" , |
3758 | i, rc); |
3759 | } |
3760 | |
3761 | return 0; |
3762 | } |
3763 | |
3764 | static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) |
3765 | { |
3766 | /* CN10K supports LBK FIFO size 72 KB */ |
3767 | if (rvu->hw->lbk_bufsize == 0x12000) |
3768 | *max_mtu = CN10K_LBK_LINK_MAX_FRS; |
3769 | else |
3770 | *max_mtu = NIC_HW_MAX_FRS; |
3771 | } |
3772 | |
3773 | static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) |
3774 | { |
3775 | int fifo_size = rvu_cgx_get_fifolen(rvu); |
3776 | |
3777 | /* RPM supports FIFO len 128 KB and RPM2 supports double the |
3778 | * FIFO len to accommodate 8 LMACS |
3779 | */ |
3780 | if (fifo_size == 0x20000 || fifo_size == 0x40000) |
3781 | *max_mtu = CN10K_LMAC_LINK_MAX_FRS; |
3782 | else |
3783 | *max_mtu = NIC_HW_MAX_FRS; |
3784 | } |
3785 | |
3786 | int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, |
3787 | struct nix_hw_info *rsp) |
3788 | { |
3789 | u16 pcifunc = req->hdr.pcifunc; |
3790 | u64 dwrr_mtu; |
3791 | int blkaddr; |
3792 | |
3793 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
3794 | if (blkaddr < 0) |
3795 | return NIX_AF_ERR_AF_LF_INVALID; |
3796 | |
3797 | if (is_lbk_vf(rvu, pcifunc)) |
3798 | rvu_get_lbk_link_max_frs(rvu, max_mtu: &rsp->max_mtu); |
3799 | else |
3800 | rvu_get_lmac_link_max_frs(rvu, max_mtu: &rsp->max_mtu); |
3801 | |
3802 | rsp->min_mtu = NIC_HW_MIN_FRS; |
3803 | |
3804 | if (!rvu->hw->cap.nix_common_dwrr_mtu && |
3805 | !rvu->hw->cap.nix_multiple_dwrr_mtu) { |
3806 | /* Return '1' on OTx2 */ |
3807 | rsp->rpm_dwrr_mtu = 1; |
3808 | rsp->sdp_dwrr_mtu = 1; |
3809 | rsp->lbk_dwrr_mtu = 1; |
3810 | return 0; |
3811 | } |
3812 | |
3813 | /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */ |
3814 | dwrr_mtu = rvu_read64(rvu, block: blkaddr, |
3815 | offset: nix_get_dwrr_mtu_reg(hw: rvu->hw, SMQ_LINK_TYPE_RPM)); |
3816 | rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); |
3817 | |
3818 | dwrr_mtu = rvu_read64(rvu, block: blkaddr, |
3819 | offset: nix_get_dwrr_mtu_reg(hw: rvu->hw, SMQ_LINK_TYPE_SDP)); |
3820 | rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); |
3821 | |
3822 | dwrr_mtu = rvu_read64(rvu, block: blkaddr, |
3823 | offset: nix_get_dwrr_mtu_reg(hw: rvu->hw, SMQ_LINK_TYPE_LBK)); |
3824 | rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); |
3825 | |
3826 | return 0; |
3827 | } |
3828 | |
3829 | int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, |
3830 | struct msg_rsp *rsp) |
3831 | { |
3832 | u16 pcifunc = req->hdr.pcifunc; |
3833 | int i, nixlf, blkaddr, err; |
3834 | u64 stats; |
3835 | |
3836 | err = nix_get_nixlf(rvu, pcifunc, nixlf: &nixlf, nix_blkaddr: &blkaddr); |
3837 | if (err) |
3838 | return err; |
3839 | |
3840 | /* Get stats count supported by HW */ |
3841 | stats = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST1); |
3842 | |
3843 | /* Reset tx stats */ |
3844 | for (i = 0; i < ((stats >> 24) & 0xFF); i++) |
3845 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), val: 0); |
3846 | |
3847 | /* Reset rx stats */ |
3848 | for (i = 0; i < ((stats >> 32) & 0xFF); i++) |
3849 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), val: 0); |
3850 | |
3851 | return 0; |
3852 | } |
3853 | |
3854 | /* Returns the ALG index to be set into NPC_RX_ACTION */ |
3855 | static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) |
3856 | { |
3857 | int i; |
3858 | |
3859 | /* Scan over exiting algo entries to find a match */ |
3860 | for (i = 0; i < nix_hw->flowkey.in_use; i++) |
3861 | if (nix_hw->flowkey.flowkey[i] == flow_cfg) |
3862 | return i; |
3863 | |
3864 | return -ERANGE; |
3865 | } |
3866 | |
3867 | static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) |
3868 | { |
3869 | int idx, nr_field, key_off, field_marker, keyoff_marker; |
3870 | int max_key_off, max_bit_pos, group_member; |
3871 | struct nix_rx_flowkey_alg *field; |
3872 | struct nix_rx_flowkey_alg tmp; |
3873 | u32 key_type, valid_key; |
3874 | u32 l3_l4_src_dst; |
3875 | int l4_key_offset = 0; |
3876 | |
3877 | if (!alg) |
3878 | return -EINVAL; |
3879 | |
3880 | #define FIELDS_PER_ALG 5 |
3881 | #define MAX_KEY_OFF 40 |
3882 | /* Clear all fields */ |
3883 | memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); |
3884 | |
3885 | /* Each of the 32 possible flow key algorithm definitions should |
3886 | * fall into above incremental config (except ALG0). Otherwise a |
3887 | * single NPC MCAM entry is not sufficient for supporting RSS. |
3888 | * |
3889 | * If a different definition or combination needed then NPC MCAM |
3890 | * has to be programmed to filter such pkts and it's action should |
3891 | * point to this definition to calculate flowtag or hash. |
3892 | * |
3893 | * The `for loop` goes over _all_ protocol field and the following |
3894 | * variables depicts the state machine forward progress logic. |
3895 | * |
3896 | * keyoff_marker - Enabled when hash byte length needs to be accounted |
3897 | * in field->key_offset update. |
3898 | * field_marker - Enabled when a new field needs to be selected. |
3899 | * group_member - Enabled when protocol is part of a group. |
3900 | */ |
3901 | |
3902 | /* Last 4 bits (31:28) are reserved to specify SRC, DST |
3903 | * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST, |
3904 | * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST |
3905 | * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST |
3906 | */ |
3907 | l3_l4_src_dst = flow_cfg; |
3908 | /* Reset these 4 bits, so that these won't be part of key */ |
3909 | flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK; |
3910 | |
3911 | keyoff_marker = 0; max_key_off = 0; group_member = 0; |
3912 | nr_field = 0; key_off = 0; field_marker = 1; |
3913 | field = &tmp; max_bit_pos = fls(x: flow_cfg); |
3914 | for (idx = 0; |
3915 | idx < max_bit_pos && nr_field < FIELDS_PER_ALG && |
3916 | key_off < MAX_KEY_OFF; idx++) { |
3917 | key_type = BIT(idx); |
3918 | valid_key = flow_cfg & key_type; |
3919 | /* Found a field marker, reset the field values */ |
3920 | if (field_marker) |
3921 | memset(&tmp, 0, sizeof(tmp)); |
3922 | |
3923 | field_marker = true; |
3924 | keyoff_marker = true; |
3925 | switch (key_type) { |
3926 | case NIX_FLOW_KEY_TYPE_PORT: |
3927 | field->sel_chan = true; |
3928 | /* This should be set to 1, when SEL_CHAN is set */ |
3929 | field->bytesm1 = 1; |
3930 | break; |
3931 | case NIX_FLOW_KEY_TYPE_IPV4_PROTO: |
3932 | field->lid = NPC_LID_LC; |
3933 | field->hdr_offset = 9; /* offset */ |
3934 | field->bytesm1 = 0; /* 1 byte */ |
3935 | field->ltype_match = NPC_LT_LC_IP; |
3936 | field->ltype_mask = 0xF; |
3937 | break; |
3938 | case NIX_FLOW_KEY_TYPE_IPV4: |
3939 | case NIX_FLOW_KEY_TYPE_INNR_IPV4: |
3940 | field->lid = NPC_LID_LC; |
3941 | field->ltype_match = NPC_LT_LC_IP; |
3942 | if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { |
3943 | field->lid = NPC_LID_LG; |
3944 | field->ltype_match = NPC_LT_LG_TU_IP; |
3945 | } |
3946 | field->hdr_offset = 12; /* SIP offset */ |
3947 | field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ |
3948 | |
3949 | /* Only SIP */ |
3950 | if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) |
3951 | field->bytesm1 = 3; /* SIP, 4 bytes */ |
3952 | |
3953 | if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { |
3954 | /* Both SIP + DIP */ |
3955 | if (field->bytesm1 == 3) { |
3956 | field->bytesm1 = 7; /* SIP + DIP, 8B */ |
3957 | } else { |
3958 | /* Only DIP */ |
3959 | field->hdr_offset = 16; /* DIP off */ |
3960 | field->bytesm1 = 3; /* DIP, 4 bytes */ |
3961 | } |
3962 | } |
3963 | |
3964 | field->ltype_mask = 0xF; /* Match only IPv4 */ |
3965 | keyoff_marker = false; |
3966 | break; |
3967 | case NIX_FLOW_KEY_TYPE_IPV6: |
3968 | case NIX_FLOW_KEY_TYPE_INNR_IPV6: |
3969 | field->lid = NPC_LID_LC; |
3970 | field->ltype_match = NPC_LT_LC_IP6; |
3971 | if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { |
3972 | field->lid = NPC_LID_LG; |
3973 | field->ltype_match = NPC_LT_LG_TU_IP6; |
3974 | } |
3975 | field->hdr_offset = 8; /* SIP offset */ |
3976 | field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ |
3977 | |
3978 | /* Only SIP */ |
3979 | if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) |
3980 | field->bytesm1 = 15; /* SIP, 16 bytes */ |
3981 | |
3982 | if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { |
3983 | /* Both SIP + DIP */ |
3984 | if (field->bytesm1 == 15) { |
3985 | /* SIP + DIP, 32 bytes */ |
3986 | field->bytesm1 = 31; |
3987 | } else { |
3988 | /* Only DIP */ |
3989 | field->hdr_offset = 24; /* DIP off */ |
3990 | field->bytesm1 = 15; /* DIP,16 bytes */ |
3991 | } |
3992 | } |
3993 | field->ltype_mask = 0xF; /* Match only IPv6 */ |
3994 | break; |
3995 | case NIX_FLOW_KEY_TYPE_TCP: |
3996 | case NIX_FLOW_KEY_TYPE_UDP: |
3997 | case NIX_FLOW_KEY_TYPE_SCTP: |
3998 | case NIX_FLOW_KEY_TYPE_INNR_TCP: |
3999 | case NIX_FLOW_KEY_TYPE_INNR_UDP: |
4000 | case NIX_FLOW_KEY_TYPE_INNR_SCTP: |
4001 | field->lid = NPC_LID_LD; |
4002 | if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || |
4003 | key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || |
4004 | key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) |
4005 | field->lid = NPC_LID_LH; |
4006 | field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ |
4007 | |
4008 | if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY) |
4009 | field->bytesm1 = 1; /* SRC, 2 bytes */ |
4010 | |
4011 | if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) { |
4012 | /* Both SRC + DST */ |
4013 | if (field->bytesm1 == 1) { |
4014 | /* SRC + DST, 4 bytes */ |
4015 | field->bytesm1 = 3; |
4016 | } else { |
4017 | /* Only DIP */ |
4018 | field->hdr_offset = 2; /* DST off */ |
4019 | field->bytesm1 = 1; /* DST, 2 bytes */ |
4020 | } |
4021 | } |
4022 | |
4023 | /* Enum values for NPC_LID_LD and NPC_LID_LG are same, |
4024 | * so no need to change the ltype_match, just change |
4025 | * the lid for inner protocols |
4026 | */ |
4027 | BUILD_BUG_ON((int)NPC_LT_LD_TCP != |
4028 | (int)NPC_LT_LH_TU_TCP); |
4029 | BUILD_BUG_ON((int)NPC_LT_LD_UDP != |
4030 | (int)NPC_LT_LH_TU_UDP); |
4031 | BUILD_BUG_ON((int)NPC_LT_LD_SCTP != |
4032 | (int)NPC_LT_LH_TU_SCTP); |
4033 | |
4034 | if ((key_type == NIX_FLOW_KEY_TYPE_TCP || |
4035 | key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && |
4036 | valid_key) { |
4037 | field->ltype_match |= NPC_LT_LD_TCP; |
4038 | group_member = true; |
4039 | } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || |
4040 | key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && |
4041 | valid_key) { |
4042 | field->ltype_match |= NPC_LT_LD_UDP; |
4043 | group_member = true; |
4044 | } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || |
4045 | key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && |
4046 | valid_key) { |
4047 | field->ltype_match |= NPC_LT_LD_SCTP; |
4048 | group_member = true; |
4049 | } |
4050 | field->ltype_mask = ~field->ltype_match; |
4051 | if (key_type == NIX_FLOW_KEY_TYPE_SCTP || |
4052 | key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { |
4053 | /* Handle the case where any of the group item |
4054 | * is enabled in the group but not the final one |
4055 | */ |
4056 | if (group_member) { |
4057 | valid_key = true; |
4058 | group_member = false; |
4059 | } |
4060 | } else { |
4061 | field_marker = false; |
4062 | keyoff_marker = false; |
4063 | } |
4064 | |
4065 | /* TCP/UDP/SCTP and ESP/AH falls at same offset so |
4066 | * remember the TCP key offset of 40 byte hash key. |
4067 | */ |
4068 | if (key_type == NIX_FLOW_KEY_TYPE_TCP) |
4069 | l4_key_offset = key_off; |
4070 | break; |
4071 | case NIX_FLOW_KEY_TYPE_NVGRE: |
4072 | field->lid = NPC_LID_LD; |
4073 | field->hdr_offset = 4; /* VSID offset */ |
4074 | field->bytesm1 = 2; |
4075 | field->ltype_match = NPC_LT_LD_NVGRE; |
4076 | field->ltype_mask = 0xF; |
4077 | break; |
4078 | case NIX_FLOW_KEY_TYPE_VXLAN: |
4079 | case NIX_FLOW_KEY_TYPE_GENEVE: |
4080 | field->lid = NPC_LID_LE; |
4081 | field->bytesm1 = 2; |
4082 | field->hdr_offset = 4; |
4083 | field->ltype_mask = 0xF; |
4084 | field_marker = false; |
4085 | keyoff_marker = false; |
4086 | |
4087 | if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { |
4088 | field->ltype_match |= NPC_LT_LE_VXLAN; |
4089 | group_member = true; |
4090 | } |
4091 | |
4092 | if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { |
4093 | field->ltype_match |= NPC_LT_LE_GENEVE; |
4094 | group_member = true; |
4095 | } |
4096 | |
4097 | if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { |
4098 | if (group_member) { |
4099 | field->ltype_mask = ~field->ltype_match; |
4100 | field_marker = true; |
4101 | keyoff_marker = true; |
4102 | valid_key = true; |
4103 | group_member = false; |
4104 | } |
4105 | } |
4106 | break; |
4107 | case NIX_FLOW_KEY_TYPE_ETH_DMAC: |
4108 | case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: |
4109 | field->lid = NPC_LID_LA; |
4110 | field->ltype_match = NPC_LT_LA_ETHER; |
4111 | if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { |
4112 | field->lid = NPC_LID_LF; |
4113 | field->ltype_match = NPC_LT_LF_TU_ETHER; |
4114 | } |
4115 | field->hdr_offset = 0; |
4116 | field->bytesm1 = 5; /* DMAC 6 Byte */ |
4117 | field->ltype_mask = 0xF; |
4118 | break; |
4119 | case NIX_FLOW_KEY_TYPE_IPV6_EXT: |
4120 | field->lid = NPC_LID_LC; |
4121 | field->hdr_offset = 40; /* IPV6 hdr */ |
4122 | field->bytesm1 = 0; /* 1 Byte ext hdr*/ |
4123 | field->ltype_match = NPC_LT_LC_IP6_EXT; |
4124 | field->ltype_mask = 0xF; |
4125 | break; |
4126 | case NIX_FLOW_KEY_TYPE_GTPU: |
4127 | field->lid = NPC_LID_LE; |
4128 | field->hdr_offset = 4; |
4129 | field->bytesm1 = 3; /* 4 bytes TID*/ |
4130 | field->ltype_match = NPC_LT_LE_GTPU; |
4131 | field->ltype_mask = 0xF; |
4132 | break; |
4133 | case NIX_FLOW_KEY_TYPE_CUSTOM0: |
4134 | field->lid = NPC_LID_LC; |
4135 | field->hdr_offset = 6; |
4136 | field->bytesm1 = 1; /* 2 Bytes*/ |
4137 | field->ltype_match = NPC_LT_LC_CUSTOM0; |
4138 | field->ltype_mask = 0xF; |
4139 | break; |
4140 | case NIX_FLOW_KEY_TYPE_VLAN: |
4141 | field->lid = NPC_LID_LB; |
4142 | field->hdr_offset = 2; /* Skip TPID (2-bytes) */ |
4143 | field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ |
4144 | field->ltype_match = NPC_LT_LB_CTAG; |
4145 | field->ltype_mask = 0xF; |
4146 | field->fn_mask = 1; /* Mask out the first nibble */ |
4147 | break; |
4148 | case NIX_FLOW_KEY_TYPE_AH: |
4149 | case NIX_FLOW_KEY_TYPE_ESP: |
4150 | field->hdr_offset = 0; |
4151 | field->bytesm1 = 7; /* SPI + sequence number */ |
4152 | field->ltype_mask = 0xF; |
4153 | field->lid = NPC_LID_LE; |
4154 | field->ltype_match = NPC_LT_LE_ESP; |
4155 | if (key_type == NIX_FLOW_KEY_TYPE_AH) { |
4156 | field->lid = NPC_LID_LD; |
4157 | field->ltype_match = NPC_LT_LD_AH; |
4158 | field->hdr_offset = 4; |
4159 | keyoff_marker = false; |
4160 | } |
4161 | break; |
4162 | } |
4163 | field->ena = 1; |
4164 | |
4165 | /* Found a valid flow key type */ |
4166 | if (valid_key) { |
4167 | /* Use the key offset of TCP/UDP/SCTP fields |
4168 | * for ESP/AH fields. |
4169 | */ |
4170 | if (key_type == NIX_FLOW_KEY_TYPE_ESP || |
4171 | key_type == NIX_FLOW_KEY_TYPE_AH) |
4172 | key_off = l4_key_offset; |
4173 | field->key_offset = key_off; |
4174 | memcpy(&alg[nr_field], field, sizeof(*field)); |
4175 | max_key_off = max(max_key_off, field->bytesm1 + 1); |
4176 | |
4177 | /* Found a field marker, get the next field */ |
4178 | if (field_marker) |
4179 | nr_field++; |
4180 | } |
4181 | |
4182 | /* Found a keyoff marker, update the new key_off */ |
4183 | if (keyoff_marker) { |
4184 | key_off += max_key_off; |
4185 | max_key_off = 0; |
4186 | } |
4187 | } |
4188 | /* Processed all the flow key types */ |
4189 | if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) |
4190 | return 0; |
4191 | else |
4192 | return NIX_AF_ERR_RSS_NOSPC_FIELD; |
4193 | } |
4194 | |
4195 | static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) |
4196 | { |
4197 | u64 field[FIELDS_PER_ALG]; |
4198 | struct nix_hw *hw; |
4199 | int fid, rc; |
4200 | |
4201 | hw = get_nix_hw(hw: rvu->hw, blkaddr); |
4202 | if (!hw) |
4203 | return NIX_AF_ERR_INVALID_NIXBLK; |
4204 | |
4205 | /* No room to add new flow hash algoritham */ |
4206 | if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) |
4207 | return NIX_AF_ERR_RSS_NOSPC_ALGO; |
4208 | |
4209 | /* Generate algo fields for the given flow_cfg */ |
4210 | rc = set_flowkey_fields(alg: (struct nix_rx_flowkey_alg *)field, flow_cfg); |
4211 | if (rc) |
4212 | return rc; |
4213 | |
4214 | /* Update ALGX_FIELDX register with generated fields */ |
4215 | for (fid = 0; fid < FIELDS_PER_ALG; fid++) |
4216 | rvu_write64(rvu, block: blkaddr, |
4217 | NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, |
4218 | fid), val: field[fid]); |
4219 | |
4220 | /* Store the flow_cfg for futher lookup */ |
4221 | rc = hw->flowkey.in_use; |
4222 | hw->flowkey.flowkey[rc] = flow_cfg; |
4223 | hw->flowkey.in_use++; |
4224 | |
4225 | return rc; |
4226 | } |
4227 | |
4228 | int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, |
4229 | struct nix_rss_flowkey_cfg *req, |
4230 | struct nix_rss_flowkey_cfg_rsp *rsp) |
4231 | { |
4232 | u16 pcifunc = req->hdr.pcifunc; |
4233 | int alg_idx, nixlf, blkaddr; |
4234 | struct nix_hw *nix_hw; |
4235 | int err; |
4236 | |
4237 | err = nix_get_nixlf(rvu, pcifunc, nixlf: &nixlf, nix_blkaddr: &blkaddr); |
4238 | if (err) |
4239 | return err; |
4240 | |
4241 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
4242 | if (!nix_hw) |
4243 | return NIX_AF_ERR_INVALID_NIXBLK; |
4244 | |
4245 | alg_idx = get_flowkey_alg_idx(nix_hw, flow_cfg: req->flowkey_cfg); |
4246 | /* Failed to get algo index from the exiting list, reserve new */ |
4247 | if (alg_idx < 0) { |
4248 | alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, |
4249 | flow_cfg: req->flowkey_cfg); |
4250 | if (alg_idx < 0) |
4251 | return alg_idx; |
4252 | } |
4253 | rsp->alg_idx = alg_idx; |
4254 | rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, group: req->group, |
4255 | alg_idx, mcam_index: req->mcam_index); |
4256 | return 0; |
4257 | } |
4258 | |
4259 | static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) |
4260 | { |
4261 | u32 flowkey_cfg, minkey_cfg; |
4262 | int alg, fid, rc; |
4263 | |
4264 | /* Disable all flow key algx fieldx */ |
4265 | for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { |
4266 | for (fid = 0; fid < FIELDS_PER_ALG; fid++) |
4267 | rvu_write64(rvu, block: blkaddr, |
4268 | NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), |
4269 | val: 0); |
4270 | } |
4271 | |
4272 | /* IPv4/IPv6 SIP/DIPs */ |
4273 | flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; |
4274 | rc = reserve_flowkey_alg_idx(rvu, blkaddr, flow_cfg: flowkey_cfg); |
4275 | if (rc < 0) |
4276 | return rc; |
4277 | |
4278 | /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ |
4279 | minkey_cfg = flowkey_cfg; |
4280 | flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; |
4281 | rc = reserve_flowkey_alg_idx(rvu, blkaddr, flow_cfg: flowkey_cfg); |
4282 | if (rc < 0) |
4283 | return rc; |
4284 | |
4285 | /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ |
4286 | flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; |
4287 | rc = reserve_flowkey_alg_idx(rvu, blkaddr, flow_cfg: flowkey_cfg); |
4288 | if (rc < 0) |
4289 | return rc; |
4290 | |
4291 | /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ |
4292 | flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; |
4293 | rc = reserve_flowkey_alg_idx(rvu, blkaddr, flow_cfg: flowkey_cfg); |
4294 | if (rc < 0) |
4295 | return rc; |
4296 | |
4297 | /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ |
4298 | flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | |
4299 | NIX_FLOW_KEY_TYPE_UDP; |
4300 | rc = reserve_flowkey_alg_idx(rvu, blkaddr, flow_cfg: flowkey_cfg); |
4301 | if (rc < 0) |
4302 | return rc; |
4303 | |
4304 | /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ |
4305 | flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | |
4306 | NIX_FLOW_KEY_TYPE_SCTP; |
4307 | rc = reserve_flowkey_alg_idx(rvu, blkaddr, flow_cfg: flowkey_cfg); |
4308 | if (rc < 0) |
4309 | return rc; |
4310 | |
4311 | /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ |
4312 | flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | |
4313 | NIX_FLOW_KEY_TYPE_SCTP; |
4314 | rc = reserve_flowkey_alg_idx(rvu, blkaddr, flow_cfg: flowkey_cfg); |
4315 | if (rc < 0) |
4316 | return rc; |
4317 | |
4318 | /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ |
4319 | flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | |
4320 | NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; |
4321 | rc = reserve_flowkey_alg_idx(rvu, blkaddr, flow_cfg: flowkey_cfg); |
4322 | if (rc < 0) |
4323 | return rc; |
4324 | |
4325 | return 0; |
4326 | } |
4327 | |
4328 | int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, |
4329 | struct nix_set_mac_addr *req, |
4330 | struct msg_rsp *rsp) |
4331 | { |
4332 | bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; |
4333 | u16 pcifunc = req->hdr.pcifunc; |
4334 | int blkaddr, nixlf, err; |
4335 | struct rvu_pfvf *pfvf; |
4336 | |
4337 | err = nix_get_nixlf(rvu, pcifunc, nixlf: &nixlf, nix_blkaddr: &blkaddr); |
4338 | if (err) |
4339 | return err; |
4340 | |
4341 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
4342 | |
4343 | /* untrusted VF can't overwrite admin(PF) changes */ |
4344 | if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && |
4345 | (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { |
4346 | dev_warn(rvu->dev, |
4347 | "MAC address set by admin(PF) cannot be overwritten by untrusted VF" ); |
4348 | return -EPERM; |
4349 | } |
4350 | |
4351 | ether_addr_copy(dst: pfvf->mac_addr, src: req->mac_addr); |
4352 | |
4353 | rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, |
4354 | chan: pfvf->rx_chan_base, mac_addr: req->mac_addr); |
4355 | |
4356 | if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) |
4357 | ether_addr_copy(dst: pfvf->default_mac, src: req->mac_addr); |
4358 | |
4359 | rvu_switch_update_rules(rvu, pcifunc); |
4360 | |
4361 | return 0; |
4362 | } |
4363 | |
4364 | int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, |
4365 | struct msg_req *req, |
4366 | struct nix_get_mac_addr_rsp *rsp) |
4367 | { |
4368 | u16 pcifunc = req->hdr.pcifunc; |
4369 | struct rvu_pfvf *pfvf; |
4370 | |
4371 | if (!is_nixlf_attached(rvu, pcifunc)) |
4372 | return NIX_AF_ERR_AF_LF_INVALID; |
4373 | |
4374 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
4375 | |
4376 | ether_addr_copy(dst: rsp->mac_addr, src: pfvf->mac_addr); |
4377 | |
4378 | return 0; |
4379 | } |
4380 | |
4381 | int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, |
4382 | struct msg_rsp *rsp) |
4383 | { |
4384 | bool allmulti, promisc, nix_rx_multicast; |
4385 | u16 pcifunc = req->hdr.pcifunc; |
4386 | struct rvu_pfvf *pfvf; |
4387 | int nixlf, err; |
4388 | |
4389 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
4390 | promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; |
4391 | allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; |
4392 | pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; |
4393 | |
4394 | nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; |
4395 | |
4396 | if (is_vf(pcifunc) && !nix_rx_multicast && |
4397 | (promisc || allmulti)) { |
4398 | dev_warn_ratelimited(rvu->dev, |
4399 | "VF promisc/multicast not supported\n" ); |
4400 | return 0; |
4401 | } |
4402 | |
4403 | /* untrusted VF can't configure promisc/allmulti */ |
4404 | if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && |
4405 | (promisc || allmulti)) |
4406 | return 0; |
4407 | |
4408 | err = nix_get_nixlf(rvu, pcifunc, nixlf: &nixlf, NULL); |
4409 | if (err) |
4410 | return err; |
4411 | |
4412 | if (nix_rx_multicast) { |
4413 | /* add/del this PF_FUNC to/from mcast pkt replication list */ |
4414 | err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, |
4415 | add: allmulti); |
4416 | if (err) { |
4417 | dev_err(rvu->dev, |
4418 | "Failed to update pcifunc 0x%x to multicast list\n" , |
4419 | pcifunc); |
4420 | return err; |
4421 | } |
4422 | |
4423 | /* add/del this PF_FUNC to/from promisc pkt replication list */ |
4424 | err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, |
4425 | add: promisc); |
4426 | if (err) { |
4427 | dev_err(rvu->dev, |
4428 | "Failed to update pcifunc 0x%x to promisc list\n" , |
4429 | pcifunc); |
4430 | return err; |
4431 | } |
4432 | } |
4433 | |
4434 | /* install/uninstall allmulti entry */ |
4435 | if (allmulti) { |
4436 | rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, |
4437 | chan: pfvf->rx_chan_base); |
4438 | } else { |
4439 | if (!nix_rx_multicast) |
4440 | rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, enable: false); |
4441 | } |
4442 | |
4443 | /* install/uninstall promisc entry */ |
4444 | if (promisc) |
4445 | rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, |
4446 | chan: pfvf->rx_chan_base, |
4447 | chan_cnt: pfvf->rx_chan_cnt); |
4448 | else |
4449 | if (!nix_rx_multicast) |
4450 | rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, enable: false); |
4451 | |
4452 | return 0; |
4453 | } |
4454 | |
4455 | static void nix_find_link_frs(struct rvu *rvu, |
4456 | struct nix_frs_cfg *req, u16 pcifunc) |
4457 | { |
4458 | int pf = rvu_get_pf(pcifunc); |
4459 | struct rvu_pfvf *pfvf; |
4460 | int maxlen, minlen; |
4461 | int numvfs, hwvf; |
4462 | int vf; |
4463 | |
4464 | /* Update with requester's min/max lengths */ |
4465 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
4466 | pfvf->maxlen = req->maxlen; |
4467 | if (req->update_minlen) |
4468 | pfvf->minlen = req->minlen; |
4469 | |
4470 | maxlen = req->maxlen; |
4471 | minlen = req->update_minlen ? req->minlen : 0; |
4472 | |
4473 | /* Get this PF's numVFs and starting hwvf */ |
4474 | rvu_get_pf_numvfs(rvu, pf, numvfs: &numvfs, hwvf: &hwvf); |
4475 | |
4476 | /* For each VF, compare requested max/minlen */ |
4477 | for (vf = 0; vf < numvfs; vf++) { |
4478 | pfvf = &rvu->hwvf[hwvf + vf]; |
4479 | if (pfvf->maxlen > maxlen) |
4480 | maxlen = pfvf->maxlen; |
4481 | if (req->update_minlen && |
4482 | pfvf->minlen && pfvf->minlen < minlen) |
4483 | minlen = pfvf->minlen; |
4484 | } |
4485 | |
4486 | /* Compare requested max/minlen with PF's max/minlen */ |
4487 | pfvf = &rvu->pf[pf]; |
4488 | if (pfvf->maxlen > maxlen) |
4489 | maxlen = pfvf->maxlen; |
4490 | if (req->update_minlen && |
4491 | pfvf->minlen && pfvf->minlen < minlen) |
4492 | minlen = pfvf->minlen; |
4493 | |
4494 | /* Update the request with max/min PF's and it's VF's max/min */ |
4495 | req->maxlen = maxlen; |
4496 | if (req->update_minlen) |
4497 | req->minlen = minlen; |
4498 | } |
4499 | |
4500 | int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, |
4501 | struct msg_rsp *rsp) |
4502 | { |
4503 | struct rvu_hwinfo *hw = rvu->hw; |
4504 | u16 pcifunc = req->hdr.pcifunc; |
4505 | int pf = rvu_get_pf(pcifunc); |
4506 | int blkaddr, link = -1; |
4507 | struct nix_hw *nix_hw; |
4508 | struct rvu_pfvf *pfvf; |
4509 | u8 cgx = 0, lmac = 0; |
4510 | u16 max_mtu; |
4511 | u64 cfg; |
4512 | |
4513 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
4514 | if (blkaddr < 0) |
4515 | return NIX_AF_ERR_AF_LF_INVALID; |
4516 | |
4517 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
4518 | if (!nix_hw) |
4519 | return NIX_AF_ERR_INVALID_NIXBLK; |
4520 | |
4521 | if (is_lbk_vf(rvu, pcifunc)) |
4522 | rvu_get_lbk_link_max_frs(rvu, max_mtu: &max_mtu); |
4523 | else |
4524 | rvu_get_lmac_link_max_frs(rvu, max_mtu: &max_mtu); |
4525 | |
4526 | if (!req->sdp_link && req->maxlen > max_mtu) |
4527 | return NIX_AF_ERR_FRS_INVALID; |
4528 | |
4529 | if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) |
4530 | return NIX_AF_ERR_FRS_INVALID; |
4531 | |
4532 | /* Check if config is for SDP link */ |
4533 | if (req->sdp_link) { |
4534 | if (!hw->sdp_links) |
4535 | return NIX_AF_ERR_RX_LINK_INVALID; |
4536 | link = hw->cgx_links + hw->lbk_links; |
4537 | goto linkcfg; |
4538 | } |
4539 | |
4540 | /* Check if the request is from CGX mapped RVU PF */ |
4541 | if (is_pf_cgxmapped(rvu, pf)) { |
4542 | /* Get CGX and LMAC to which this PF is mapped and find link */ |
4543 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx, lmac_id: &lmac); |
4544 | link = (cgx * hw->lmac_per_cgx) + lmac; |
4545 | } else if (pf == 0) { |
4546 | /* For VFs of PF0 ingress is LBK port, so config LBK link */ |
4547 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
4548 | link = hw->cgx_links + pfvf->lbkid; |
4549 | } |
4550 | |
4551 | if (link < 0) |
4552 | return NIX_AF_ERR_RX_LINK_INVALID; |
4553 | |
4554 | linkcfg: |
4555 | nix_find_link_frs(rvu, req, pcifunc); |
4556 | |
4557 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_RX_LINKX_CFG(link)); |
4558 | cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); |
4559 | if (req->update_minlen) |
4560 | cfg = (cfg & ~0xFFFFULL) | req->minlen; |
4561 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_LINKX_CFG(link), val: cfg); |
4562 | |
4563 | return 0; |
4564 | } |
4565 | |
4566 | int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, |
4567 | struct msg_rsp *rsp) |
4568 | { |
4569 | int nixlf, blkaddr, err; |
4570 | u64 cfg; |
4571 | |
4572 | err = nix_get_nixlf(rvu, pcifunc: req->hdr.pcifunc, nixlf: &nixlf, nix_blkaddr: &blkaddr); |
4573 | if (err) |
4574 | return err; |
4575 | |
4576 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); |
4577 | /* Set the interface configuration */ |
4578 | if (req->len_verify & BIT(0)) |
4579 | cfg |= BIT_ULL(41); |
4580 | else |
4581 | cfg &= ~BIT_ULL(41); |
4582 | |
4583 | if (req->len_verify & BIT(1)) |
4584 | cfg |= BIT_ULL(40); |
4585 | else |
4586 | cfg &= ~BIT_ULL(40); |
4587 | |
4588 | if (req->len_verify & NIX_RX_DROP_RE) |
4589 | cfg |= BIT_ULL(32); |
4590 | else |
4591 | cfg &= ~BIT_ULL(32); |
4592 | |
4593 | if (req->csum_verify & BIT(0)) |
4594 | cfg |= BIT_ULL(37); |
4595 | else |
4596 | cfg &= ~BIT_ULL(37); |
4597 | |
4598 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RX_CFG(nixlf), val: cfg); |
4599 | |
4600 | return 0; |
4601 | } |
4602 | |
4603 | static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) |
4604 | { |
4605 | return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ |
4606 | } |
4607 | |
4608 | static void nix_link_config(struct rvu *rvu, int blkaddr, |
4609 | struct nix_hw *nix_hw) |
4610 | { |
4611 | struct rvu_hwinfo *hw = rvu->hw; |
4612 | int cgx, lmac_cnt, slink, link; |
4613 | u16 lbk_max_frs, lmac_max_frs; |
4614 | unsigned long lmac_bmap; |
4615 | u64 tx_credits, cfg; |
4616 | u64 lmac_fifo_len; |
4617 | int iter; |
4618 | |
4619 | rvu_get_lbk_link_max_frs(rvu, max_mtu: &lbk_max_frs); |
4620 | rvu_get_lmac_link_max_frs(rvu, max_mtu: &lmac_max_frs); |
4621 | |
4622 | /* Set default min/max packet lengths allowed on NIX Rx links. |
4623 | * |
4624 | * With HW reset minlen value of 60byte, HW will treat ARP pkts |
4625 | * as undersize and report them to SW as error pkts, hence |
4626 | * setting it to 40 bytes. |
4627 | */ |
4628 | for (link = 0; link < hw->cgx_links; link++) { |
4629 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_LINKX_CFG(link), |
4630 | val: ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); |
4631 | } |
4632 | |
4633 | for (link = hw->cgx_links; link < hw->lbk_links; link++) { |
4634 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_LINKX_CFG(link), |
4635 | val: ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); |
4636 | } |
4637 | if (hw->sdp_links) { |
4638 | link = hw->cgx_links + hw->lbk_links; |
4639 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_LINKX_CFG(link), |
4640 | SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); |
4641 | } |
4642 | |
4643 | /* Get MCS external bypass status for CN10K-B */ |
4644 | if (mcs_get_blkcnt() == 1) { |
4645 | /* Adjust for 2 credits when external bypass is disabled */ |
4646 | nix_hw->cc_mcs_cnt = is_mcs_bypass(mcs_id: 0) ? 0 : 2; |
4647 | } |
4648 | |
4649 | /* Set credits for Tx links assuming max packet length allowed. |
4650 | * This will be reconfigured based on MTU set for PF/VF. |
4651 | */ |
4652 | for (cgx = 0; cgx < hw->cgx; cgx++) { |
4653 | lmac_cnt = cgx_get_lmac_cnt(cgxd: rvu_cgx_pdata(cgx_id: cgx, rvu)); |
4654 | /* Skip when cgx is not available or lmac cnt is zero */ |
4655 | if (lmac_cnt <= 0) |
4656 | continue; |
4657 | slink = cgx * hw->lmac_per_cgx; |
4658 | |
4659 | /* Get LMAC id's from bitmap */ |
4660 | lmac_bmap = cgx_get_lmac_bmap(cgxd: rvu_cgx_pdata(cgx_id: cgx, rvu)); |
4661 | for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { |
4662 | lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac: iter); |
4663 | if (!lmac_fifo_len) { |
4664 | dev_err(rvu->dev, |
4665 | "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n" , |
4666 | __func__, cgx, iter); |
4667 | continue; |
4668 | } |
4669 | tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; |
4670 | /* Enable credits and set credit pkt count to max allowed */ |
4671 | cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); |
4672 | cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt); |
4673 | |
4674 | link = iter + slink; |
4675 | nix_hw->tx_credits[link] = tx_credits; |
4676 | rvu_write64(rvu, block: blkaddr, |
4677 | NIX_AF_TX_LINKX_NORM_CREDIT(link), val: cfg); |
4678 | } |
4679 | } |
4680 | |
4681 | /* Set Tx credits for LBK link */ |
4682 | slink = hw->cgx_links; |
4683 | for (link = slink; link < (slink + hw->lbk_links); link++) { |
4684 | tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); |
4685 | nix_hw->tx_credits[link] = tx_credits; |
4686 | /* Enable credits and set credit pkt count to max allowed */ |
4687 | tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); |
4688 | rvu_write64(rvu, block: blkaddr, |
4689 | NIX_AF_TX_LINKX_NORM_CREDIT(link), val: tx_credits); |
4690 | } |
4691 | } |
4692 | |
4693 | static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) |
4694 | { |
4695 | int idx, err; |
4696 | u64 status; |
4697 | |
4698 | /* Start X2P bus calibration */ |
4699 | rvu_write64(rvu, block: blkaddr, NIX_AF_CFG, |
4700 | val: rvu_read64(rvu, block: blkaddr, NIX_AF_CFG) | BIT_ULL(9)); |
4701 | /* Wait for calibration to complete */ |
4702 | err = rvu_poll_reg(rvu, block: blkaddr, |
4703 | NIX_AF_STATUS, BIT_ULL(10), zero: false); |
4704 | if (err) { |
4705 | dev_err(rvu->dev, "NIX X2P bus calibration failed\n" ); |
4706 | return err; |
4707 | } |
4708 | |
4709 | status = rvu_read64(rvu, block: blkaddr, NIX_AF_STATUS); |
4710 | /* Check if CGX devices are ready */ |
4711 | for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { |
4712 | /* Skip when cgx port is not available */ |
4713 | if (!rvu_cgx_pdata(cgx_id: idx, rvu) || |
4714 | (status & (BIT_ULL(16 + idx)))) |
4715 | continue; |
4716 | dev_err(rvu->dev, |
4717 | "CGX%d didn't respond to NIX X2P calibration\n" , idx); |
4718 | err = -EBUSY; |
4719 | } |
4720 | |
4721 | /* Check if LBK is ready */ |
4722 | if (!(status & BIT_ULL(19))) { |
4723 | dev_err(rvu->dev, |
4724 | "LBK didn't respond to NIX X2P calibration\n" ); |
4725 | err = -EBUSY; |
4726 | } |
4727 | |
4728 | /* Clear 'calibrate_x2p' bit */ |
4729 | rvu_write64(rvu, block: blkaddr, NIX_AF_CFG, |
4730 | val: rvu_read64(rvu, block: blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); |
4731 | if (err || (status & 0x3FFULL)) |
4732 | dev_err(rvu->dev, |
4733 | "NIX X2P calibration failed, status 0x%llx\n" , status); |
4734 | if (err) |
4735 | return err; |
4736 | return 0; |
4737 | } |
4738 | |
4739 | static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) |
4740 | { |
4741 | u64 cfg; |
4742 | int err; |
4743 | |
4744 | /* Set admin queue endianness */ |
4745 | cfg = rvu_read64(rvu, block: block->addr, NIX_AF_CFG); |
4746 | #ifdef __BIG_ENDIAN |
4747 | cfg |= BIT_ULL(8); |
4748 | rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); |
4749 | #else |
4750 | cfg &= ~BIT_ULL(8); |
4751 | rvu_write64(rvu, block: block->addr, NIX_AF_CFG, val: cfg); |
4752 | #endif |
4753 | |
4754 | /* Do not bypass NDC cache */ |
4755 | cfg = rvu_read64(rvu, block: block->addr, NIX_AF_NDC_CFG); |
4756 | cfg &= ~0x3FFEULL; |
4757 | #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING |
4758 | /* Disable caching of SQB aka SQEs */ |
4759 | cfg |= 0x04ULL; |
4760 | #endif |
4761 | rvu_write64(rvu, block: block->addr, NIX_AF_NDC_CFG, val: cfg); |
4762 | |
4763 | /* Result structure can be followed by RQ/SQ/CQ context at |
4764 | * RES + 128bytes and a write mask at RES + 256 bytes, depending on |
4765 | * operation type. Alloc sufficient result memory for all operations. |
4766 | */ |
4767 | err = rvu_aq_alloc(rvu, ad_queue: &block->aq, |
4768 | Q_COUNT(AQ_SIZE), inst_size: sizeof(struct nix_aq_inst_s), |
4769 | ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); |
4770 | if (err) |
4771 | return err; |
4772 | |
4773 | rvu_write64(rvu, block: block->addr, NIX_AF_AQ_CFG, AQ_SIZE); |
4774 | rvu_write64(rvu, block: block->addr, |
4775 | NIX_AF_AQ_BASE, val: (u64)block->aq->inst->iova); |
4776 | return 0; |
4777 | } |
4778 | |
4779 | static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) |
4780 | { |
4781 | struct rvu_hwinfo *hw = rvu->hw; |
4782 | u64 hw_const; |
4783 | |
4784 | hw_const = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST1); |
4785 | |
4786 | /* On OcteonTx2 DWRR quantum is directly configured into each of |
4787 | * the transmit scheduler queues. And PF/VF drivers were free to |
4788 | * config any value upto 2^24. |
4789 | * On CN10K, HW is modified, the quantum configuration at scheduler |
4790 | * queues is in terms of weight. And SW needs to setup a base DWRR MTU |
4791 | * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do |
4792 | * 'DWRR MTU * weight' to get the quantum. |
4793 | * |
4794 | * Check if HW uses a common MTU for all DWRR quantum configs. |
4795 | * On OcteonTx2 this register field is '0'. |
4796 | */ |
4797 | if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61))) |
4798 | hw->cap.nix_common_dwrr_mtu = true; |
4799 | |
4800 | if (hw_const & BIT_ULL(61)) |
4801 | hw->cap.nix_multiple_dwrr_mtu = true; |
4802 | } |
4803 | |
4804 | static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) |
4805 | { |
4806 | const struct npc_lt_def_cfg *ltdefs; |
4807 | struct rvu_hwinfo *hw = rvu->hw; |
4808 | int blkaddr = nix_hw->blkaddr; |
4809 | struct rvu_block *block; |
4810 | int err; |
4811 | u64 cfg; |
4812 | |
4813 | block = &hw->block[blkaddr]; |
4814 | |
4815 | if (is_rvu_96xx_B0(rvu)) { |
4816 | /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt |
4817 | * internal state when conditional clocks are turned off. |
4818 | * Hence enable them. |
4819 | */ |
4820 | rvu_write64(rvu, block: blkaddr, NIX_AF_CFG, |
4821 | val: rvu_read64(rvu, block: blkaddr, NIX_AF_CFG) | 0x40ULL); |
4822 | } |
4823 | |
4824 | /* Set chan/link to backpressure TL3 instead of TL2 */ |
4825 | rvu_write64(rvu, block: blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, val: 0x01); |
4826 | |
4827 | /* Disable SQ manager's sticky mode operation (set TM6 = 0) |
4828 | * This sticky mode is known to cause SQ stalls when multiple |
4829 | * SQs are mapped to same SMQ and transmitting pkts at a time. |
4830 | */ |
4831 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); |
4832 | cfg &= ~BIT_ULL(15); |
4833 | rvu_write64(rvu, block: blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, val: cfg); |
4834 | |
4835 | ltdefs = rvu->kpu.lt_def; |
4836 | /* Calibrate X2P bus to check if CGX/LBK links are fine */ |
4837 | err = nix_calibrate_x2p(rvu, blkaddr); |
4838 | if (err) |
4839 | return err; |
4840 | |
4841 | /* Setup capabilities of the NIX block */ |
4842 | rvu_nix_setup_capabilities(rvu, blkaddr); |
4843 | |
4844 | /* Initialize admin queue */ |
4845 | err = nix_aq_init(rvu, block); |
4846 | if (err) |
4847 | return err; |
4848 | |
4849 | /* Restore CINT timer delay to HW reset values */ |
4850 | rvu_write64(rvu, block: blkaddr, NIX_AF_CINT_DELAY, val: 0x0ULL); |
4851 | |
4852 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_SEB_CFG); |
4853 | |
4854 | /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ |
4855 | cfg |= 1ULL; |
4856 | if (!is_rvu_otx2(rvu)) |
4857 | cfg |= NIX_PTP_1STEP_EN; |
4858 | |
4859 | rvu_write64(rvu, block: blkaddr, NIX_AF_SEB_CFG, val: cfg); |
4860 | |
4861 | if (!is_rvu_otx2(rvu)) |
4862 | rvu_nix_block_cn10k_init(rvu, nix_hw); |
4863 | |
4864 | if (is_block_implemented(hw, blkaddr)) { |
4865 | err = nix_setup_txschq(rvu, nix_hw, blkaddr); |
4866 | if (err) |
4867 | return err; |
4868 | |
4869 | err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); |
4870 | if (err) |
4871 | return err; |
4872 | |
4873 | err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); |
4874 | if (err) |
4875 | return err; |
4876 | |
4877 | err = nix_setup_mcast(rvu, nix_hw, blkaddr); |
4878 | if (err) |
4879 | return err; |
4880 | |
4881 | err = nix_setup_txvlan(rvu, nix_hw); |
4882 | if (err) |
4883 | return err; |
4884 | |
4885 | err = nix_setup_bpids(rvu, hw: nix_hw, blkaddr); |
4886 | if (err) |
4887 | return err; |
4888 | |
4889 | /* Configure segmentation offload formats */ |
4890 | nix_setup_lso(rvu, nix_hw, blkaddr); |
4891 | |
4892 | /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. |
4893 | * This helps HW protocol checker to identify headers |
4894 | * and validate length and checksums. |
4895 | */ |
4896 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_OL2, |
4897 | val: (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | |
4898 | ltdefs->rx_ol2.ltype_mask); |
4899 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_OIP4, |
4900 | val: (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | |
4901 | ltdefs->rx_oip4.ltype_mask); |
4902 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_IIP4, |
4903 | val: (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | |
4904 | ltdefs->rx_iip4.ltype_mask); |
4905 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_OIP6, |
4906 | val: (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | |
4907 | ltdefs->rx_oip6.ltype_mask); |
4908 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_IIP6, |
4909 | val: (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | |
4910 | ltdefs->rx_iip6.ltype_mask); |
4911 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_OTCP, |
4912 | val: (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | |
4913 | ltdefs->rx_otcp.ltype_mask); |
4914 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_ITCP, |
4915 | val: (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | |
4916 | ltdefs->rx_itcp.ltype_mask); |
4917 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_OUDP, |
4918 | val: (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | |
4919 | ltdefs->rx_oudp.ltype_mask); |
4920 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_IUDP, |
4921 | val: (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | |
4922 | ltdefs->rx_iudp.ltype_mask); |
4923 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_OSCTP, |
4924 | val: (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | |
4925 | ltdefs->rx_osctp.ltype_mask); |
4926 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_ISCTP, |
4927 | val: (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | |
4928 | ltdefs->rx_isctp.ltype_mask); |
4929 | |
4930 | if (!is_rvu_otx2(rvu)) { |
4931 | /* Enable APAD calculation for other protocols |
4932 | * matching APAD0 and APAD1 lt def registers. |
4933 | */ |
4934 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_CST_APAD0, |
4935 | val: (ltdefs->rx_apad0.valid << 11) | |
4936 | (ltdefs->rx_apad0.lid << 8) | |
4937 | (ltdefs->rx_apad0.ltype_match << 4) | |
4938 | ltdefs->rx_apad0.ltype_mask); |
4939 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_CST_APAD1, |
4940 | val: (ltdefs->rx_apad1.valid << 11) | |
4941 | (ltdefs->rx_apad1.lid << 8) | |
4942 | (ltdefs->rx_apad1.ltype_match << 4) | |
4943 | ltdefs->rx_apad1.ltype_mask); |
4944 | |
4945 | /* Receive ethertype defination register defines layer |
4946 | * information in NPC_RESULT_S to identify the Ethertype |
4947 | * location in L2 header. Used for Ethertype overwriting |
4948 | * in inline IPsec flow. |
4949 | */ |
4950 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_ET(0), |
4951 | val: (ltdefs->rx_et[0].offset << 12) | |
4952 | (ltdefs->rx_et[0].valid << 11) | |
4953 | (ltdefs->rx_et[0].lid << 8) | |
4954 | (ltdefs->rx_et[0].ltype_match << 4) | |
4955 | ltdefs->rx_et[0].ltype_mask); |
4956 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_ET(1), |
4957 | val: (ltdefs->rx_et[1].offset << 12) | |
4958 | (ltdefs->rx_et[1].valid << 11) | |
4959 | (ltdefs->rx_et[1].lid << 8) | |
4960 | (ltdefs->rx_et[1].ltype_match << 4) | |
4961 | ltdefs->rx_et[1].ltype_mask); |
4962 | } |
4963 | |
4964 | err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); |
4965 | if (err) |
4966 | return err; |
4967 | |
4968 | nix_hw->tx_credits = kcalloc(n: hw->cgx_links + hw->lbk_links, |
4969 | size: sizeof(u64), GFP_KERNEL); |
4970 | if (!nix_hw->tx_credits) |
4971 | return -ENOMEM; |
4972 | |
4973 | /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ |
4974 | nix_link_config(rvu, blkaddr, nix_hw); |
4975 | |
4976 | /* Enable Channel backpressure */ |
4977 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); |
4978 | } |
4979 | return 0; |
4980 | } |
4981 | |
4982 | int rvu_nix_init(struct rvu *rvu) |
4983 | { |
4984 | struct rvu_hwinfo *hw = rvu->hw; |
4985 | struct nix_hw *nix_hw; |
4986 | int blkaddr = 0, err; |
4987 | int i = 0; |
4988 | |
4989 | hw->nix = devm_kcalloc(dev: rvu->dev, MAX_NIX_BLKS, size: sizeof(struct nix_hw), |
4990 | GFP_KERNEL); |
4991 | if (!hw->nix) |
4992 | return -ENOMEM; |
4993 | |
4994 | blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); |
4995 | while (blkaddr) { |
4996 | nix_hw = &hw->nix[i]; |
4997 | nix_hw->rvu = rvu; |
4998 | nix_hw->blkaddr = blkaddr; |
4999 | err = rvu_nix_block_init(rvu, nix_hw); |
5000 | if (err) |
5001 | return err; |
5002 | blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); |
5003 | i++; |
5004 | } |
5005 | |
5006 | return 0; |
5007 | } |
5008 | |
5009 | static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, |
5010 | struct rvu_block *block) |
5011 | { |
5012 | struct nix_txsch *txsch; |
5013 | struct nix_mcast *mcast; |
5014 | struct nix_txvlan *vlan; |
5015 | struct nix_hw *nix_hw; |
5016 | int lvl; |
5017 | |
5018 | rvu_aq_free(rvu, aq: block->aq); |
5019 | |
5020 | if (is_block_implemented(hw: rvu->hw, blkaddr)) { |
5021 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
5022 | if (!nix_hw) |
5023 | return; |
5024 | |
5025 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { |
5026 | txsch = &nix_hw->txsch[lvl]; |
5027 | kfree(objp: txsch->schq.bmap); |
5028 | } |
5029 | |
5030 | kfree(objp: nix_hw->tx_credits); |
5031 | |
5032 | nix_ipolicer_freemem(rvu, nix_hw); |
5033 | |
5034 | vlan = &nix_hw->txvlan; |
5035 | kfree(objp: vlan->rsrc.bmap); |
5036 | mutex_destroy(lock: &vlan->rsrc_lock); |
5037 | |
5038 | mcast = &nix_hw->mcast; |
5039 | qmem_free(dev: rvu->dev, qmem: mcast->mce_ctx); |
5040 | qmem_free(dev: rvu->dev, qmem: mcast->mcast_buf); |
5041 | mutex_destroy(lock: &mcast->mce_lock); |
5042 | } |
5043 | } |
5044 | |
5045 | void rvu_nix_freemem(struct rvu *rvu) |
5046 | { |
5047 | struct rvu_hwinfo *hw = rvu->hw; |
5048 | struct rvu_block *block; |
5049 | int blkaddr = 0; |
5050 | |
5051 | blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); |
5052 | while (blkaddr) { |
5053 | block = &hw->block[blkaddr]; |
5054 | rvu_nix_block_freemem(rvu, blkaddr, block); |
5055 | blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); |
5056 | } |
5057 | } |
5058 | |
5059 | static void nix_mcast_update_action(struct rvu *rvu, |
5060 | struct nix_mcast_grp_elem *elem) |
5061 | { |
5062 | struct npc_mcam *mcam = &rvu->hw->mcam; |
5063 | struct nix_rx_action rx_action = { 0 }; |
5064 | struct nix_tx_action tx_action = { 0 }; |
5065 | int npc_blkaddr; |
5066 | |
5067 | npc_blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPC, pcifunc: 0); |
5068 | if (elem->dir == NIX_MCAST_INGRESS) { |
5069 | *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam, |
5070 | blkaddr: npc_blkaddr, |
5071 | index: elem->mcam_index); |
5072 | rx_action.index = elem->mce_start_index; |
5073 | npc_set_mcam_action(rvu, mcam, blkaddr: npc_blkaddr, index: elem->mcam_index, |
5074 | cfg: *(u64 *)&rx_action); |
5075 | } else { |
5076 | *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam, |
5077 | blkaddr: npc_blkaddr, |
5078 | index: elem->mcam_index); |
5079 | tx_action.index = elem->mce_start_index; |
5080 | npc_set_mcam_action(rvu, mcam, blkaddr: npc_blkaddr, index: elem->mcam_index, |
5081 | cfg: *(u64 *)&tx_action); |
5082 | } |
5083 | } |
5084 | |
5085 | static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active) |
5086 | { |
5087 | struct nix_mcast_grp_elem *elem; |
5088 | struct nix_mcast_grp *mcast_grp; |
5089 | struct nix_hw *nix_hw; |
5090 | int blkaddr; |
5091 | |
5092 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
5093 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
5094 | if (!nix_hw) |
5095 | return; |
5096 | |
5097 | mcast_grp = &nix_hw->mcast_grp; |
5098 | |
5099 | mutex_lock(&mcast_grp->mcast_grp_lock); |
5100 | list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) { |
5101 | struct nix_mce_list *mce_list; |
5102 | struct mce *mce; |
5103 | |
5104 | /* Iterate the group elements and disable the element which |
5105 | * received the disable request. |
5106 | */ |
5107 | mce_list = &elem->mcast_mce_list; |
5108 | hlist_for_each_entry(mce, &mce_list->head, node) { |
5109 | if (mce->pcifunc == pcifunc) { |
5110 | mce->is_active = is_active; |
5111 | break; |
5112 | } |
5113 | } |
5114 | |
5115 | /* Dump the updated list to HW */ |
5116 | if (elem->dir == NIX_MCAST_INGRESS) |
5117 | nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); |
5118 | else |
5119 | nix_update_egress_mce_list_hw(rvu, nix_hw, elem); |
5120 | |
5121 | /* Update the multicast index in NPC rule */ |
5122 | nix_mcast_update_action(rvu, elem); |
5123 | } |
5124 | mutex_unlock(lock: &mcast_grp->mcast_grp_lock); |
5125 | } |
5126 | |
5127 | int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, |
5128 | struct msg_rsp *rsp) |
5129 | { |
5130 | u16 pcifunc = req->hdr.pcifunc; |
5131 | struct rvu_pfvf *pfvf; |
5132 | int nixlf, err; |
5133 | |
5134 | err = nix_get_nixlf(rvu, pcifunc, nixlf: &nixlf, NULL); |
5135 | if (err) |
5136 | return err; |
5137 | |
5138 | /* Enable the interface if it is in any multicast list */ |
5139 | nix_mcast_update_mce_entry(rvu, pcifunc, is_active: 1); |
5140 | |
5141 | rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); |
5142 | |
5143 | npc_mcam_enable_flows(rvu, target: pcifunc); |
5144 | |
5145 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
5146 | set_bit(nr: NIXLF_INITIALIZED, addr: &pfvf->flags); |
5147 | |
5148 | rvu_switch_update_rules(rvu, pcifunc); |
5149 | |
5150 | return rvu_cgx_start_stop_io(rvu, pcifunc, start: true); |
5151 | } |
5152 | |
5153 | int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, |
5154 | struct msg_rsp *rsp) |
5155 | { |
5156 | u16 pcifunc = req->hdr.pcifunc; |
5157 | struct rvu_pfvf *pfvf; |
5158 | int nixlf, err; |
5159 | |
5160 | err = nix_get_nixlf(rvu, pcifunc, nixlf: &nixlf, NULL); |
5161 | if (err) |
5162 | return err; |
5163 | |
5164 | rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); |
5165 | /* Disable the interface if it is in any multicast list */ |
5166 | nix_mcast_update_mce_entry(rvu, pcifunc, is_active: 0); |
5167 | |
5168 | |
5169 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
5170 | clear_bit(nr: NIXLF_INITIALIZED, addr: &pfvf->flags); |
5171 | |
5172 | err = rvu_cgx_start_stop_io(rvu, pcifunc, start: false); |
5173 | if (err) |
5174 | return err; |
5175 | |
5176 | rvu_cgx_tx_enable(rvu, pcifunc, enable: true); |
5177 | |
5178 | return 0; |
5179 | } |
5180 | |
5181 | #define RX_SA_BASE GENMASK_ULL(52, 7) |
5182 | |
5183 | void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) |
5184 | { |
5185 | struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); |
5186 | struct hwctx_disable_req ctx_req; |
5187 | int pf = rvu_get_pf(pcifunc); |
5188 | struct mac_ops *mac_ops; |
5189 | u8 cgx_id, lmac_id; |
5190 | u64 sa_base; |
5191 | void *cgxd; |
5192 | int err; |
5193 | |
5194 | ctx_req.hdr.pcifunc = pcifunc; |
5195 | |
5196 | /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ |
5197 | rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); |
5198 | rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); |
5199 | nix_interface_deinit(rvu, pcifunc, nixlf); |
5200 | nix_rx_sync(rvu, blkaddr); |
5201 | nix_txschq_free(rvu, pcifunc); |
5202 | |
5203 | clear_bit(nr: NIXLF_INITIALIZED, addr: &pfvf->flags); |
5204 | |
5205 | rvu_cgx_start_stop_io(rvu, pcifunc, start: false); |
5206 | |
5207 | if (pfvf->sq_ctx) { |
5208 | ctx_req.ctype = NIX_AQ_CTYPE_SQ; |
5209 | err = nix_lf_hwctx_disable(rvu, req: &ctx_req); |
5210 | if (err) |
5211 | dev_err(rvu->dev, "SQ ctx disable failed\n" ); |
5212 | } |
5213 | |
5214 | if (pfvf->rq_ctx) { |
5215 | ctx_req.ctype = NIX_AQ_CTYPE_RQ; |
5216 | err = nix_lf_hwctx_disable(rvu, req: &ctx_req); |
5217 | if (err) |
5218 | dev_err(rvu->dev, "RQ ctx disable failed\n" ); |
5219 | } |
5220 | |
5221 | if (pfvf->cq_ctx) { |
5222 | ctx_req.ctype = NIX_AQ_CTYPE_CQ; |
5223 | err = nix_lf_hwctx_disable(rvu, req: &ctx_req); |
5224 | if (err) |
5225 | dev_err(rvu->dev, "CQ ctx disable failed\n" ); |
5226 | } |
5227 | |
5228 | /* reset HW config done for Switch headers */ |
5229 | rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, |
5230 | dir: (PKIND_TX | PKIND_RX), pkind: 0, var_len_off: 0, var_len_off_mask: 0, shift_dir: 0); |
5231 | |
5232 | /* Disabling CGX and NPC config done for PTP */ |
5233 | if (pfvf->hw_rx_tstamp_en) { |
5234 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
5235 | cgxd = rvu_cgx_pdata(cgx_id, rvu); |
5236 | mac_ops = get_mac_ops(cgxd); |
5237 | mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); |
5238 | /* Undo NPC config done for PTP */ |
5239 | if (npc_config_ts_kpuaction(rvu, pf, pcifunc, en: false)) |
5240 | dev_err(rvu->dev, "NPC config for PTP failed\n" ); |
5241 | pfvf->hw_rx_tstamp_en = false; |
5242 | } |
5243 | |
5244 | /* reset priority flow control config */ |
5245 | rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, tx_pause: 0, rx_pause: 0, pfc_en: 0); |
5246 | |
5247 | /* reset 802.3x flow control config */ |
5248 | rvu_cgx_cfg_pause_frm(rvu, pcifunc, tx_pause: 0, rx_pause: 0); |
5249 | |
5250 | nix_ctx_free(rvu, pfvf); |
5251 | |
5252 | nix_free_all_bandprof(rvu, pcifunc); |
5253 | |
5254 | sa_base = rvu_read64(rvu, block: blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); |
5255 | if (FIELD_GET(RX_SA_BASE, sa_base)) { |
5256 | err = rvu_cpt_ctx_flush(rvu, pcifunc); |
5257 | if (err) |
5258 | dev_err(rvu->dev, |
5259 | "CPT ctx flush failed with error: %d\n" , err); |
5260 | } |
5261 | } |
5262 | |
5263 | #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) |
5264 | |
5265 | static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) |
5266 | { |
5267 | struct rvu_hwinfo *hw = rvu->hw; |
5268 | struct rvu_block *block; |
5269 | int blkaddr, pf; |
5270 | int nixlf; |
5271 | u64 cfg; |
5272 | |
5273 | pf = rvu_get_pf(pcifunc); |
5274 | if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) |
5275 | return 0; |
5276 | |
5277 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
5278 | if (blkaddr < 0) |
5279 | return NIX_AF_ERR_AF_LF_INVALID; |
5280 | |
5281 | block = &hw->block[blkaddr]; |
5282 | nixlf = rvu_get_lf(rvu, block, pcifunc, slot: 0); |
5283 | if (nixlf < 0) |
5284 | return NIX_AF_ERR_AF_LF_INVALID; |
5285 | |
5286 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); |
5287 | |
5288 | if (enable) |
5289 | cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; |
5290 | else |
5291 | cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; |
5292 | |
5293 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_TX_CFG(nixlf), val: cfg); |
5294 | |
5295 | return 0; |
5296 | } |
5297 | |
5298 | int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, |
5299 | struct msg_rsp *rsp) |
5300 | { |
5301 | return rvu_nix_lf_ptp_tx_cfg(rvu, pcifunc: req->hdr.pcifunc, enable: true); |
5302 | } |
5303 | |
5304 | int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, |
5305 | struct msg_rsp *rsp) |
5306 | { |
5307 | return rvu_nix_lf_ptp_tx_cfg(rvu, pcifunc: req->hdr.pcifunc, enable: false); |
5308 | } |
5309 | |
5310 | int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, |
5311 | struct nix_lso_format_cfg *req, |
5312 | struct nix_lso_format_cfg_rsp *rsp) |
5313 | { |
5314 | u16 pcifunc = req->hdr.pcifunc; |
5315 | struct nix_hw *nix_hw; |
5316 | struct rvu_pfvf *pfvf; |
5317 | int blkaddr, idx, f; |
5318 | u64 reg; |
5319 | |
5320 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
5321 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
5322 | if (!pfvf->nixlf || blkaddr < 0) |
5323 | return NIX_AF_ERR_AF_LF_INVALID; |
5324 | |
5325 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
5326 | if (!nix_hw) |
5327 | return NIX_AF_ERR_INVALID_NIXBLK; |
5328 | |
5329 | /* Find existing matching LSO format, if any */ |
5330 | for (idx = 0; idx < nix_hw->lso.in_use; idx++) { |
5331 | for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { |
5332 | reg = rvu_read64(rvu, block: blkaddr, |
5333 | NIX_AF_LSO_FORMATX_FIELDX(idx, f)); |
5334 | if (req->fields[f] != (reg & req->field_mask)) |
5335 | break; |
5336 | } |
5337 | |
5338 | if (f == NIX_LSO_FIELD_MAX) |
5339 | break; |
5340 | } |
5341 | |
5342 | if (idx < nix_hw->lso.in_use) { |
5343 | /* Match found */ |
5344 | rsp->lso_format_idx = idx; |
5345 | return 0; |
5346 | } |
5347 | |
5348 | if (nix_hw->lso.in_use == nix_hw->lso.total) |
5349 | return NIX_AF_ERR_LSO_CFG_FAIL; |
5350 | |
5351 | rsp->lso_format_idx = nix_hw->lso.in_use++; |
5352 | |
5353 | for (f = 0; f < NIX_LSO_FIELD_MAX; f++) |
5354 | rvu_write64(rvu, block: blkaddr, |
5355 | NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), |
5356 | val: req->fields[f]); |
5357 | |
5358 | return 0; |
5359 | } |
5360 | |
5361 | #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) |
5362 | #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) |
5363 | #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) |
5364 | #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) |
5365 | |
5366 | #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) |
5367 | #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) |
5368 | #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) |
5369 | |
5370 | #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) |
5371 | #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) |
5372 | #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) |
5373 | |
5374 | static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, |
5375 | int blkaddr) |
5376 | { |
5377 | u8 cpt_idx, cpt_blkaddr; |
5378 | u64 val; |
5379 | |
5380 | cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; |
5381 | if (req->enable) { |
5382 | val = 0; |
5383 | /* Enable context prefetching */ |
5384 | if (!is_rvu_otx2(rvu)) |
5385 | val |= BIT_ULL(51); |
5386 | |
5387 | /* Set OPCODE and EGRP */ |
5388 | val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); |
5389 | val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); |
5390 | val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); |
5391 | val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); |
5392 | |
5393 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); |
5394 | |
5395 | /* Set CPT queue for inline IPSec */ |
5396 | val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); |
5397 | val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, |
5398 | req->inst_qsel.cpt_pf_func); |
5399 | |
5400 | if (!is_rvu_otx2(rvu)) { |
5401 | cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : |
5402 | BLKADDR_CPT1; |
5403 | val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); |
5404 | } |
5405 | |
5406 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), |
5407 | val); |
5408 | |
5409 | /* Set CPT credit */ |
5410 | val = rvu_read64(rvu, block: blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); |
5411 | if ((val & 0x3FFFFF) != 0x3FFFFF) |
5412 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), |
5413 | val: 0x3FFFFF - val); |
5414 | |
5415 | val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); |
5416 | val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); |
5417 | val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); |
5418 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); |
5419 | } else { |
5420 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val: 0x0); |
5421 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), |
5422 | val: 0x0); |
5423 | val = rvu_read64(rvu, block: blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); |
5424 | if ((val & 0x3FFFFF) != 0x3FFFFF) |
5425 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), |
5426 | val: 0x3FFFFF - val); |
5427 | } |
5428 | } |
5429 | |
5430 | int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, |
5431 | struct nix_inline_ipsec_cfg *req, |
5432 | struct msg_rsp *rsp) |
5433 | { |
5434 | if (!is_block_implemented(hw: rvu->hw, blkaddr: BLKADDR_CPT0)) |
5435 | return 0; |
5436 | |
5437 | nix_inline_ipsec_cfg(rvu, req, blkaddr: BLKADDR_NIX0); |
5438 | if (is_block_implemented(hw: rvu->hw, blkaddr: BLKADDR_CPT1)) |
5439 | nix_inline_ipsec_cfg(rvu, req, blkaddr: BLKADDR_NIX1); |
5440 | |
5441 | return 0; |
5442 | } |
5443 | |
5444 | int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, |
5445 | struct msg_req *req, |
5446 | struct nix_inline_ipsec_cfg *rsp) |
5447 | |
5448 | { |
5449 | u64 val; |
5450 | |
5451 | if (!is_block_implemented(hw: rvu->hw, blkaddr: BLKADDR_CPT0)) |
5452 | return 0; |
5453 | |
5454 | val = rvu_read64(rvu, block: BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); |
5455 | rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); |
5456 | rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); |
5457 | rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); |
5458 | rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); |
5459 | |
5460 | val = rvu_read64(rvu, block: BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); |
5461 | rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); |
5462 | rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); |
5463 | rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); |
5464 | |
5465 | return 0; |
5466 | } |
5467 | |
5468 | int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, |
5469 | struct nix_inline_ipsec_lf_cfg *req, |
5470 | struct msg_rsp *rsp) |
5471 | { |
5472 | int lf, blkaddr, err; |
5473 | u64 val; |
5474 | |
5475 | if (!is_block_implemented(hw: rvu->hw, blkaddr: BLKADDR_CPT0)) |
5476 | return 0; |
5477 | |
5478 | err = nix_get_nixlf(rvu, pcifunc: req->hdr.pcifunc, nixlf: &lf, nix_blkaddr: &blkaddr); |
5479 | if (err) |
5480 | return err; |
5481 | |
5482 | if (req->enable) { |
5483 | /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ |
5484 | val = (u64)req->ipsec_cfg0.tt << 44 | |
5485 | (u64)req->ipsec_cfg0.tag_const << 20 | |
5486 | (u64)req->ipsec_cfg0.sa_pow2_size << 16 | |
5487 | req->ipsec_cfg0.lenm1_max; |
5488 | |
5489 | if (blkaddr == BLKADDR_NIX1) |
5490 | val |= BIT_ULL(46); |
5491 | |
5492 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); |
5493 | |
5494 | /* Set SA_IDX_W and SA_IDX_MAX */ |
5495 | val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | |
5496 | req->ipsec_cfg1.sa_idx_max; |
5497 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); |
5498 | |
5499 | /* Set SA base address */ |
5500 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), |
5501 | val: req->sa_base_addr); |
5502 | } else { |
5503 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val: 0x0); |
5504 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val: 0x0); |
5505 | rvu_write64(rvu, block: blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), |
5506 | val: 0x0); |
5507 | } |
5508 | |
5509 | return 0; |
5510 | } |
5511 | |
5512 | void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) |
5513 | { |
5514 | bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); |
5515 | |
5516 | /* overwrite vf mac address with default_mac */ |
5517 | if (from_vf) |
5518 | ether_addr_copy(dst: pfvf->mac_addr, src: pfvf->default_mac); |
5519 | } |
5520 | |
5521 | /* NIX ingress policers or bandwidth profiles APIs */ |
5522 | static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) |
5523 | { |
5524 | struct npc_lt_def_cfg defs, *ltdefs; |
5525 | |
5526 | ltdefs = &defs; |
5527 | memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); |
5528 | |
5529 | /* Extract PCP and DEI fields from outer VLAN from byte offset |
5530 | * 2 from the start of LB_PTR (ie TAG). |
5531 | * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN |
5532 | * fields are considered when 'Tunnel enable' is set in profile. |
5533 | */ |
5534 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, |
5535 | val: (2UL << 12) | (ltdefs->ovlan.lid << 8) | |
5536 | (ltdefs->ovlan.ltype_match << 4) | |
5537 | ltdefs->ovlan.ltype_mask); |
5538 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, |
5539 | val: (2UL << 12) | (ltdefs->ivlan.lid << 8) | |
5540 | (ltdefs->ivlan.ltype_match << 4) | |
5541 | ltdefs->ivlan.ltype_mask); |
5542 | |
5543 | /* DSCP field in outer and tunneled IPv4 packets */ |
5544 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, |
5545 | val: (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | |
5546 | (ltdefs->rx_oip4.ltype_match << 4) | |
5547 | ltdefs->rx_oip4.ltype_mask); |
5548 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, |
5549 | val: (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | |
5550 | (ltdefs->rx_iip4.ltype_match << 4) | |
5551 | ltdefs->rx_iip4.ltype_mask); |
5552 | |
5553 | /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ |
5554 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, |
5555 | val: (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | |
5556 | (ltdefs->rx_oip6.ltype_match << 4) | |
5557 | ltdefs->rx_oip6.ltype_mask); |
5558 | rvu_write64(rvu, block: blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, |
5559 | val: (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | |
5560 | (ltdefs->rx_iip6.ltype_match << 4) | |
5561 | ltdefs->rx_iip6.ltype_mask); |
5562 | } |
5563 | |
5564 | static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, |
5565 | int layer, int prof_idx) |
5566 | { |
5567 | struct nix_cn10k_aq_enq_req aq_req; |
5568 | int rc; |
5569 | |
5570 | memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); |
5571 | |
5572 | aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); |
5573 | aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; |
5574 | aq_req.op = NIX_AQ_INSTOP_INIT; |
5575 | |
5576 | /* Context is all zeros, submit to AQ */ |
5577 | rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, |
5578 | req: (struct nix_aq_enq_req *)&aq_req, NULL); |
5579 | if (rc) |
5580 | dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n" , |
5581 | layer, prof_idx); |
5582 | return rc; |
5583 | } |
5584 | |
5585 | static int nix_setup_ipolicers(struct rvu *rvu, |
5586 | struct nix_hw *nix_hw, int blkaddr) |
5587 | { |
5588 | struct rvu_hwinfo *hw = rvu->hw; |
5589 | struct nix_ipolicer *ipolicer; |
5590 | int err, layer, prof_idx; |
5591 | u64 cfg; |
5592 | |
5593 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST); |
5594 | if (!(cfg & BIT_ULL(61))) { |
5595 | hw->cap.ipolicer = false; |
5596 | return 0; |
5597 | } |
5598 | |
5599 | hw->cap.ipolicer = true; |
5600 | nix_hw->ipolicer = devm_kcalloc(dev: rvu->dev, n: BAND_PROF_NUM_LAYERS, |
5601 | size: sizeof(*ipolicer), GFP_KERNEL); |
5602 | if (!nix_hw->ipolicer) |
5603 | return -ENOMEM; |
5604 | |
5605 | cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_PL_CONST); |
5606 | |
5607 | for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { |
5608 | ipolicer = &nix_hw->ipolicer[layer]; |
5609 | switch (layer) { |
5610 | case BAND_PROF_LEAF_LAYER: |
5611 | ipolicer->band_prof.max = cfg & 0XFFFF; |
5612 | break; |
5613 | case BAND_PROF_MID_LAYER: |
5614 | ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; |
5615 | break; |
5616 | case BAND_PROF_TOP_LAYER: |
5617 | ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; |
5618 | break; |
5619 | } |
5620 | |
5621 | if (!ipolicer->band_prof.max) |
5622 | continue; |
5623 | |
5624 | err = rvu_alloc_bitmap(rsrc: &ipolicer->band_prof); |
5625 | if (err) |
5626 | return err; |
5627 | |
5628 | ipolicer->pfvf_map = devm_kcalloc(dev: rvu->dev, |
5629 | n: ipolicer->band_prof.max, |
5630 | size: sizeof(u16), GFP_KERNEL); |
5631 | if (!ipolicer->pfvf_map) |
5632 | return -ENOMEM; |
5633 | |
5634 | ipolicer->match_id = devm_kcalloc(dev: rvu->dev, |
5635 | n: ipolicer->band_prof.max, |
5636 | size: sizeof(u16), GFP_KERNEL); |
5637 | if (!ipolicer->match_id) |
5638 | return -ENOMEM; |
5639 | |
5640 | for (prof_idx = 0; |
5641 | prof_idx < ipolicer->band_prof.max; prof_idx++) { |
5642 | /* Set AF as current owner for INIT ops to succeed */ |
5643 | ipolicer->pfvf_map[prof_idx] = 0x00; |
5644 | |
5645 | /* There is no enable bit in the profile context, |
5646 | * so no context disable. So let's INIT them here |
5647 | * so that PF/VF later on have to just do WRITE to |
5648 | * setup policer rates and config. |
5649 | */ |
5650 | err = nix_init_policer_context(rvu, nix_hw, |
5651 | layer, prof_idx); |
5652 | if (err) |
5653 | return err; |
5654 | } |
5655 | |
5656 | /* Allocate memory for maintaining ref_counts for MID level |
5657 | * profiles, this will be needed for leaf layer profiles' |
5658 | * aggregation. |
5659 | */ |
5660 | if (layer != BAND_PROF_MID_LAYER) |
5661 | continue; |
5662 | |
5663 | ipolicer->ref_count = devm_kcalloc(dev: rvu->dev, |
5664 | n: ipolicer->band_prof.max, |
5665 | size: sizeof(u16), GFP_KERNEL); |
5666 | if (!ipolicer->ref_count) |
5667 | return -ENOMEM; |
5668 | } |
5669 | |
5670 | /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ |
5671 | rvu_write64(rvu, block: blkaddr, NIX_AF_PL_TS, val: 19); |
5672 | |
5673 | nix_config_rx_pkt_policer_precolor(rvu, blkaddr); |
5674 | |
5675 | return 0; |
5676 | } |
5677 | |
5678 | static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) |
5679 | { |
5680 | struct nix_ipolicer *ipolicer; |
5681 | int layer; |
5682 | |
5683 | if (!rvu->hw->cap.ipolicer) |
5684 | return; |
5685 | |
5686 | for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { |
5687 | ipolicer = &nix_hw->ipolicer[layer]; |
5688 | |
5689 | if (!ipolicer->band_prof.max) |
5690 | continue; |
5691 | |
5692 | kfree(objp: ipolicer->band_prof.bmap); |
5693 | } |
5694 | } |
5695 | |
5696 | static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, |
5697 | struct nix_hw *nix_hw, u16 pcifunc) |
5698 | { |
5699 | struct nix_ipolicer *ipolicer; |
5700 | int layer, hi_layer, prof_idx; |
5701 | |
5702 | /* Bits [15:14] in profile index represent layer */ |
5703 | layer = (req->qidx >> 14) & 0x03; |
5704 | prof_idx = req->qidx & 0x3FFF; |
5705 | |
5706 | ipolicer = &nix_hw->ipolicer[layer]; |
5707 | if (prof_idx >= ipolicer->band_prof.max) |
5708 | return -EINVAL; |
5709 | |
5710 | /* Check if the profile is allocated to the requesting PCIFUNC or not |
5711 | * with the exception of AF. AF is allowed to read and update contexts. |
5712 | */ |
5713 | if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) |
5714 | return -EINVAL; |
5715 | |
5716 | /* If this profile is linked to higher layer profile then check |
5717 | * if that profile is also allocated to the requesting PCIFUNC |
5718 | * or not. |
5719 | */ |
5720 | if (!req->prof.hl_en) |
5721 | return 0; |
5722 | |
5723 | /* Leaf layer profile can link only to mid layer and |
5724 | * mid layer to top layer. |
5725 | */ |
5726 | if (layer == BAND_PROF_LEAF_LAYER) |
5727 | hi_layer = BAND_PROF_MID_LAYER; |
5728 | else if (layer == BAND_PROF_MID_LAYER) |
5729 | hi_layer = BAND_PROF_TOP_LAYER; |
5730 | else |
5731 | return -EINVAL; |
5732 | |
5733 | ipolicer = &nix_hw->ipolicer[hi_layer]; |
5734 | prof_idx = req->prof.band_prof_id; |
5735 | if (prof_idx >= ipolicer->band_prof.max || |
5736 | ipolicer->pfvf_map[prof_idx] != pcifunc) |
5737 | return -EINVAL; |
5738 | |
5739 | return 0; |
5740 | } |
5741 | |
5742 | int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, |
5743 | struct nix_bandprof_alloc_req *req, |
5744 | struct nix_bandprof_alloc_rsp *rsp) |
5745 | { |
5746 | int blkaddr, layer, prof, idx, err; |
5747 | u16 pcifunc = req->hdr.pcifunc; |
5748 | struct nix_ipolicer *ipolicer; |
5749 | struct nix_hw *nix_hw; |
5750 | |
5751 | if (!rvu->hw->cap.ipolicer) |
5752 | return NIX_AF_ERR_IPOLICER_NOTSUPP; |
5753 | |
5754 | err = nix_get_struct_ptrs(rvu, pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
5755 | if (err) |
5756 | return err; |
5757 | |
5758 | mutex_lock(&rvu->rsrc_lock); |
5759 | for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { |
5760 | if (layer == BAND_PROF_INVAL_LAYER) |
5761 | continue; |
5762 | if (!req->prof_count[layer]) |
5763 | continue; |
5764 | |
5765 | ipolicer = &nix_hw->ipolicer[layer]; |
5766 | for (idx = 0; idx < req->prof_count[layer]; idx++) { |
5767 | /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ |
5768 | if (idx == MAX_BANDPROF_PER_PFFUNC) |
5769 | break; |
5770 | |
5771 | prof = rvu_alloc_rsrc(rsrc: &ipolicer->band_prof); |
5772 | if (prof < 0) |
5773 | break; |
5774 | rsp->prof_count[layer]++; |
5775 | rsp->prof_idx[layer][idx] = prof; |
5776 | ipolicer->pfvf_map[prof] = pcifunc; |
5777 | } |
5778 | } |
5779 | mutex_unlock(lock: &rvu->rsrc_lock); |
5780 | return 0; |
5781 | } |
5782 | |
5783 | static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) |
5784 | { |
5785 | int blkaddr, layer, prof_idx, err; |
5786 | struct nix_ipolicer *ipolicer; |
5787 | struct nix_hw *nix_hw; |
5788 | |
5789 | if (!rvu->hw->cap.ipolicer) |
5790 | return NIX_AF_ERR_IPOLICER_NOTSUPP; |
5791 | |
5792 | err = nix_get_struct_ptrs(rvu, pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
5793 | if (err) |
5794 | return err; |
5795 | |
5796 | mutex_lock(&rvu->rsrc_lock); |
5797 | /* Free all the profiles allocated to the PCIFUNC */ |
5798 | for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { |
5799 | if (layer == BAND_PROF_INVAL_LAYER) |
5800 | continue; |
5801 | ipolicer = &nix_hw->ipolicer[layer]; |
5802 | |
5803 | for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { |
5804 | if (ipolicer->pfvf_map[prof_idx] != pcifunc) |
5805 | continue; |
5806 | |
5807 | /* Clear ratelimit aggregation, if any */ |
5808 | if (layer == BAND_PROF_LEAF_LAYER && |
5809 | ipolicer->match_id[prof_idx]) |
5810 | nix_clear_ratelimit_aggr(rvu, nix_hw, leaf_prof: prof_idx); |
5811 | |
5812 | ipolicer->pfvf_map[prof_idx] = 0x00; |
5813 | ipolicer->match_id[prof_idx] = 0; |
5814 | rvu_free_rsrc(rsrc: &ipolicer->band_prof, id: prof_idx); |
5815 | } |
5816 | } |
5817 | mutex_unlock(lock: &rvu->rsrc_lock); |
5818 | return 0; |
5819 | } |
5820 | |
5821 | int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, |
5822 | struct nix_bandprof_free_req *req, |
5823 | struct msg_rsp *rsp) |
5824 | { |
5825 | int blkaddr, layer, prof_idx, idx, err; |
5826 | u16 pcifunc = req->hdr.pcifunc; |
5827 | struct nix_ipolicer *ipolicer; |
5828 | struct nix_hw *nix_hw; |
5829 | |
5830 | if (req->free_all) |
5831 | return nix_free_all_bandprof(rvu, pcifunc); |
5832 | |
5833 | if (!rvu->hw->cap.ipolicer) |
5834 | return NIX_AF_ERR_IPOLICER_NOTSUPP; |
5835 | |
5836 | err = nix_get_struct_ptrs(rvu, pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
5837 | if (err) |
5838 | return err; |
5839 | |
5840 | mutex_lock(&rvu->rsrc_lock); |
5841 | /* Free the requested profile indices */ |
5842 | for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { |
5843 | if (layer == BAND_PROF_INVAL_LAYER) |
5844 | continue; |
5845 | if (!req->prof_count[layer]) |
5846 | continue; |
5847 | |
5848 | ipolicer = &nix_hw->ipolicer[layer]; |
5849 | for (idx = 0; idx < req->prof_count[layer]; idx++) { |
5850 | if (idx == MAX_BANDPROF_PER_PFFUNC) |
5851 | break; |
5852 | prof_idx = req->prof_idx[layer][idx]; |
5853 | if (prof_idx >= ipolicer->band_prof.max || |
5854 | ipolicer->pfvf_map[prof_idx] != pcifunc) |
5855 | continue; |
5856 | |
5857 | /* Clear ratelimit aggregation, if any */ |
5858 | if (layer == BAND_PROF_LEAF_LAYER && |
5859 | ipolicer->match_id[prof_idx]) |
5860 | nix_clear_ratelimit_aggr(rvu, nix_hw, leaf_prof: prof_idx); |
5861 | |
5862 | ipolicer->pfvf_map[prof_idx] = 0x00; |
5863 | ipolicer->match_id[prof_idx] = 0; |
5864 | rvu_free_rsrc(rsrc: &ipolicer->band_prof, id: prof_idx); |
5865 | } |
5866 | } |
5867 | mutex_unlock(lock: &rvu->rsrc_lock); |
5868 | return 0; |
5869 | } |
5870 | |
5871 | int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, |
5872 | struct nix_cn10k_aq_enq_req *aq_req, |
5873 | struct nix_cn10k_aq_enq_rsp *aq_rsp, |
5874 | u16 pcifunc, u8 ctype, u32 qidx) |
5875 | { |
5876 | memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); |
5877 | aq_req->hdr.pcifunc = pcifunc; |
5878 | aq_req->ctype = ctype; |
5879 | aq_req->op = NIX_AQ_INSTOP_READ; |
5880 | aq_req->qidx = qidx; |
5881 | |
5882 | return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, |
5883 | req: (struct nix_aq_enq_req *)aq_req, |
5884 | rsp: (struct nix_aq_enq_rsp *)aq_rsp); |
5885 | } |
5886 | |
5887 | static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, |
5888 | struct nix_hw *nix_hw, |
5889 | struct nix_cn10k_aq_enq_req *aq_req, |
5890 | struct nix_cn10k_aq_enq_rsp *aq_rsp, |
5891 | u32 leaf_prof, u16 mid_prof) |
5892 | { |
5893 | memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); |
5894 | aq_req->hdr.pcifunc = 0x00; |
5895 | aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; |
5896 | aq_req->op = NIX_AQ_INSTOP_WRITE; |
5897 | aq_req->qidx = leaf_prof; |
5898 | |
5899 | aq_req->prof.band_prof_id = mid_prof; |
5900 | aq_req->prof_mask.band_prof_id = GENMASK(6, 0); |
5901 | aq_req->prof.hl_en = 1; |
5902 | aq_req->prof_mask.hl_en = 1; |
5903 | |
5904 | return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, |
5905 | req: (struct nix_aq_enq_req *)aq_req, |
5906 | rsp: (struct nix_aq_enq_rsp *)aq_rsp); |
5907 | } |
5908 | |
5909 | int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, |
5910 | u16 rq_idx, u16 match_id) |
5911 | { |
5912 | int leaf_prof, mid_prof, leaf_match; |
5913 | struct nix_cn10k_aq_enq_req aq_req; |
5914 | struct nix_cn10k_aq_enq_rsp aq_rsp; |
5915 | struct nix_ipolicer *ipolicer; |
5916 | struct nix_hw *nix_hw; |
5917 | int blkaddr, idx, rc; |
5918 | |
5919 | if (!rvu->hw->cap.ipolicer) |
5920 | return 0; |
5921 | |
5922 | rc = nix_get_struct_ptrs(rvu, pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
5923 | if (rc) |
5924 | return rc; |
5925 | |
5926 | /* Fetch the RQ's context to see if policing is enabled */ |
5927 | rc = nix_aq_context_read(rvu, nix_hw, aq_req: &aq_req, aq_rsp: &aq_rsp, pcifunc, |
5928 | ctype: NIX_AQ_CTYPE_RQ, qidx: rq_idx); |
5929 | if (rc) { |
5930 | dev_err(rvu->dev, |
5931 | "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n" , |
5932 | __func__, rq_idx, pcifunc); |
5933 | return rc; |
5934 | } |
5935 | |
5936 | if (!aq_rsp.rq.policer_ena) |
5937 | return 0; |
5938 | |
5939 | /* Get the bandwidth profile ID mapped to this RQ */ |
5940 | leaf_prof = aq_rsp.rq.band_prof_id; |
5941 | |
5942 | ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; |
5943 | ipolicer->match_id[leaf_prof] = match_id; |
5944 | |
5945 | /* Check if any other leaf profile is marked with same match_id */ |
5946 | for (idx = 0; idx < ipolicer->band_prof.max; idx++) { |
5947 | if (idx == leaf_prof) |
5948 | continue; |
5949 | if (ipolicer->match_id[idx] != match_id) |
5950 | continue; |
5951 | |
5952 | leaf_match = idx; |
5953 | break; |
5954 | } |
5955 | |
5956 | if (idx == ipolicer->band_prof.max) |
5957 | return 0; |
5958 | |
5959 | /* Fetch the matching profile's context to check if it's already |
5960 | * mapped to a mid level profile. |
5961 | */ |
5962 | rc = nix_aq_context_read(rvu, nix_hw, aq_req: &aq_req, aq_rsp: &aq_rsp, pcifunc: 0x00, |
5963 | ctype: NIX_AQ_CTYPE_BANDPROF, qidx: leaf_match); |
5964 | if (rc) { |
5965 | dev_err(rvu->dev, |
5966 | "%s: Failed to fetch context of leaf profile %d\n" , |
5967 | __func__, leaf_match); |
5968 | return rc; |
5969 | } |
5970 | |
5971 | ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; |
5972 | if (aq_rsp.prof.hl_en) { |
5973 | /* Get Mid layer prof index and map leaf_prof index |
5974 | * also such that flows that are being steered |
5975 | * to different RQs and marked with same match_id |
5976 | * are rate limited in a aggregate fashion |
5977 | */ |
5978 | mid_prof = aq_rsp.prof.band_prof_id; |
5979 | rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, |
5980 | aq_req: &aq_req, aq_rsp: &aq_rsp, |
5981 | leaf_prof, mid_prof); |
5982 | if (rc) { |
5983 | dev_err(rvu->dev, |
5984 | "%s: Failed to map leaf(%d) and mid(%d) profiles\n" , |
5985 | __func__, leaf_prof, mid_prof); |
5986 | goto exit; |
5987 | } |
5988 | |
5989 | mutex_lock(&rvu->rsrc_lock); |
5990 | ipolicer->ref_count[mid_prof]++; |
5991 | mutex_unlock(lock: &rvu->rsrc_lock); |
5992 | goto exit; |
5993 | } |
5994 | |
5995 | /* Allocate a mid layer profile and |
5996 | * map both 'leaf_prof' and 'leaf_match' profiles to it. |
5997 | */ |
5998 | mutex_lock(&rvu->rsrc_lock); |
5999 | mid_prof = rvu_alloc_rsrc(rsrc: &ipolicer->band_prof); |
6000 | if (mid_prof < 0) { |
6001 | dev_err(rvu->dev, |
6002 | "%s: Unable to allocate mid layer profile\n" , __func__); |
6003 | mutex_unlock(lock: &rvu->rsrc_lock); |
6004 | goto exit; |
6005 | } |
6006 | mutex_unlock(lock: &rvu->rsrc_lock); |
6007 | ipolicer->pfvf_map[mid_prof] = 0x00; |
6008 | ipolicer->ref_count[mid_prof] = 0; |
6009 | |
6010 | /* Initialize mid layer profile same as 'leaf_prof' */ |
6011 | rc = nix_aq_context_read(rvu, nix_hw, aq_req: &aq_req, aq_rsp: &aq_rsp, pcifunc: 0x00, |
6012 | ctype: NIX_AQ_CTYPE_BANDPROF, qidx: leaf_prof); |
6013 | if (rc) { |
6014 | dev_err(rvu->dev, |
6015 | "%s: Failed to fetch context of leaf profile %d\n" , |
6016 | __func__, leaf_prof); |
6017 | goto exit; |
6018 | } |
6019 | |
6020 | memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); |
6021 | aq_req.hdr.pcifunc = 0x00; |
6022 | aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); |
6023 | aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; |
6024 | aq_req.op = NIX_AQ_INSTOP_WRITE; |
6025 | memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); |
6026 | memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s)); |
6027 | /* Clear higher layer enable bit in the mid profile, just in case */ |
6028 | aq_req.prof.hl_en = 0; |
6029 | aq_req.prof_mask.hl_en = 1; |
6030 | |
6031 | rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, |
6032 | req: (struct nix_aq_enq_req *)&aq_req, NULL); |
6033 | if (rc) { |
6034 | dev_err(rvu->dev, |
6035 | "%s: Failed to INIT context of mid layer profile %d\n" , |
6036 | __func__, mid_prof); |
6037 | goto exit; |
6038 | } |
6039 | |
6040 | /* Map both leaf profiles to this mid layer profile */ |
6041 | rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, |
6042 | aq_req: &aq_req, aq_rsp: &aq_rsp, |
6043 | leaf_prof, mid_prof); |
6044 | if (rc) { |
6045 | dev_err(rvu->dev, |
6046 | "%s: Failed to map leaf(%d) and mid(%d) profiles\n" , |
6047 | __func__, leaf_prof, mid_prof); |
6048 | goto exit; |
6049 | } |
6050 | |
6051 | mutex_lock(&rvu->rsrc_lock); |
6052 | ipolicer->ref_count[mid_prof]++; |
6053 | mutex_unlock(lock: &rvu->rsrc_lock); |
6054 | |
6055 | rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, |
6056 | aq_req: &aq_req, aq_rsp: &aq_rsp, |
6057 | leaf_prof: leaf_match, mid_prof); |
6058 | if (rc) { |
6059 | dev_err(rvu->dev, |
6060 | "%s: Failed to map leaf(%d) and mid(%d) profiles\n" , |
6061 | __func__, leaf_match, mid_prof); |
6062 | ipolicer->ref_count[mid_prof]--; |
6063 | goto exit; |
6064 | } |
6065 | |
6066 | mutex_lock(&rvu->rsrc_lock); |
6067 | ipolicer->ref_count[mid_prof]++; |
6068 | mutex_unlock(lock: &rvu->rsrc_lock); |
6069 | |
6070 | exit: |
6071 | return rc; |
6072 | } |
6073 | |
6074 | /* Called with mutex rsrc_lock */ |
6075 | static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, |
6076 | u32 leaf_prof) |
6077 | { |
6078 | struct nix_cn10k_aq_enq_req aq_req; |
6079 | struct nix_cn10k_aq_enq_rsp aq_rsp; |
6080 | struct nix_ipolicer *ipolicer; |
6081 | u16 mid_prof; |
6082 | int rc; |
6083 | |
6084 | mutex_unlock(lock: &rvu->rsrc_lock); |
6085 | |
6086 | rc = nix_aq_context_read(rvu, nix_hw, aq_req: &aq_req, aq_rsp: &aq_rsp, pcifunc: 0x00, |
6087 | ctype: NIX_AQ_CTYPE_BANDPROF, qidx: leaf_prof); |
6088 | |
6089 | mutex_lock(&rvu->rsrc_lock); |
6090 | if (rc) { |
6091 | dev_err(rvu->dev, |
6092 | "%s: Failed to fetch context of leaf profile %d\n" , |
6093 | __func__, leaf_prof); |
6094 | return; |
6095 | } |
6096 | |
6097 | if (!aq_rsp.prof.hl_en) |
6098 | return; |
6099 | |
6100 | mid_prof = aq_rsp.prof.band_prof_id; |
6101 | ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; |
6102 | ipolicer->ref_count[mid_prof]--; |
6103 | /* If ref_count is zero, free mid layer profile */ |
6104 | if (!ipolicer->ref_count[mid_prof]) { |
6105 | ipolicer->pfvf_map[mid_prof] = 0x00; |
6106 | rvu_free_rsrc(rsrc: &ipolicer->band_prof, id: mid_prof); |
6107 | } |
6108 | } |
6109 | |
6110 | int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, |
6111 | struct nix_bandprof_get_hwinfo_rsp *rsp) |
6112 | { |
6113 | struct nix_ipolicer *ipolicer; |
6114 | int blkaddr, layer, err; |
6115 | struct nix_hw *nix_hw; |
6116 | u64 tu; |
6117 | |
6118 | if (!rvu->hw->cap.ipolicer) |
6119 | return NIX_AF_ERR_IPOLICER_NOTSUPP; |
6120 | |
6121 | err = nix_get_struct_ptrs(rvu, pcifunc: req->hdr.pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
6122 | if (err) |
6123 | return err; |
6124 | |
6125 | /* Return number of bandwidth profiles free at each layer */ |
6126 | mutex_lock(&rvu->rsrc_lock); |
6127 | for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { |
6128 | if (layer == BAND_PROF_INVAL_LAYER) |
6129 | continue; |
6130 | |
6131 | ipolicer = &nix_hw->ipolicer[layer]; |
6132 | rsp->prof_count[layer] = rvu_rsrc_free_count(rsrc: &ipolicer->band_prof); |
6133 | } |
6134 | mutex_unlock(lock: &rvu->rsrc_lock); |
6135 | |
6136 | /* Set the policer timeunit in nanosec */ |
6137 | tu = rvu_read64(rvu, block: blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); |
6138 | rsp->policer_timeunit = (tu + 1) * 100; |
6139 | |
6140 | return 0; |
6141 | } |
6142 | |
6143 | static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp, |
6144 | u32 mcast_grp_idx) |
6145 | { |
6146 | struct nix_mcast_grp_elem *iter; |
6147 | bool is_found = false; |
6148 | |
6149 | list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) { |
6150 | if (iter->mcast_grp_idx == mcast_grp_idx) { |
6151 | is_found = true; |
6152 | break; |
6153 | } |
6154 | } |
6155 | |
6156 | if (is_found) |
6157 | return iter; |
6158 | |
6159 | return NULL; |
6160 | } |
6161 | |
6162 | int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx) |
6163 | { |
6164 | struct nix_mcast_grp_elem *elem; |
6165 | struct nix_mcast_grp *mcast_grp; |
6166 | struct nix_hw *nix_hw; |
6167 | int blkaddr, ret; |
6168 | |
6169 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
6170 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
6171 | if (!nix_hw) |
6172 | return NIX_AF_ERR_INVALID_NIXBLK; |
6173 | |
6174 | mcast_grp = &nix_hw->mcast_grp; |
6175 | mutex_lock(&mcast_grp->mcast_grp_lock); |
6176 | elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); |
6177 | if (!elem) |
6178 | ret = NIX_AF_ERR_INVALID_MCAST_GRP; |
6179 | else |
6180 | ret = elem->mce_start_index; |
6181 | |
6182 | mutex_unlock(lock: &mcast_grp->mcast_grp_lock); |
6183 | return ret; |
6184 | } |
6185 | |
6186 | void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc) |
6187 | { |
6188 | struct nix_mcast_grp_destroy_req dreq = { 0 }; |
6189 | struct nix_mcast_grp_update_req ureq = { 0 }; |
6190 | struct nix_mcast_grp_update_rsp ursp = { 0 }; |
6191 | struct nix_mcast_grp_elem *elem, *tmp; |
6192 | struct nix_mcast_grp *mcast_grp; |
6193 | struct nix_hw *nix_hw; |
6194 | int blkaddr; |
6195 | |
6196 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
6197 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
6198 | if (!nix_hw) |
6199 | return; |
6200 | |
6201 | mcast_grp = &nix_hw->mcast_grp; |
6202 | |
6203 | mutex_lock(&mcast_grp->mcast_grp_lock); |
6204 | list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) { |
6205 | struct nix_mce_list *mce_list; |
6206 | struct hlist_node *tmp; |
6207 | struct mce *mce; |
6208 | |
6209 | /* If the pcifunc which created the multicast/mirror |
6210 | * group received an FLR, then delete the entire group. |
6211 | */ |
6212 | if (elem->pcifunc == pcifunc) { |
6213 | /* Delete group */ |
6214 | dreq.hdr.pcifunc = elem->pcifunc; |
6215 | dreq.mcast_grp_idx = elem->mcast_grp_idx; |
6216 | dreq.is_af = 1; |
6217 | rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); |
6218 | continue; |
6219 | } |
6220 | |
6221 | /* Iterate the group elements and delete the element which |
6222 | * received the FLR. |
6223 | */ |
6224 | mce_list = &elem->mcast_mce_list; |
6225 | hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { |
6226 | if (mce->pcifunc == pcifunc) { |
6227 | ureq.hdr.pcifunc = pcifunc; |
6228 | ureq.num_mce_entry = 1; |
6229 | ureq.mcast_grp_idx = elem->mcast_grp_idx; |
6230 | ureq.op = NIX_MCAST_OP_DEL_ENTRY; |
6231 | ureq.pcifunc[0] = pcifunc; |
6232 | ureq.is_af = 1; |
6233 | rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp); |
6234 | break; |
6235 | } |
6236 | } |
6237 | } |
6238 | mutex_unlock(lock: &mcast_grp->mcast_grp_lock); |
6239 | } |
6240 | |
6241 | int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, |
6242 | u32 mcast_grp_idx, u16 mcam_index) |
6243 | { |
6244 | struct nix_mcast_grp_elem *elem; |
6245 | struct nix_mcast_grp *mcast_grp; |
6246 | struct nix_hw *nix_hw; |
6247 | int blkaddr, ret = 0; |
6248 | |
6249 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
6250 | nix_hw = get_nix_hw(hw: rvu->hw, blkaddr); |
6251 | if (!nix_hw) |
6252 | return NIX_AF_ERR_INVALID_NIXBLK; |
6253 | |
6254 | mcast_grp = &nix_hw->mcast_grp; |
6255 | mutex_lock(&mcast_grp->mcast_grp_lock); |
6256 | elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); |
6257 | if (!elem) |
6258 | ret = NIX_AF_ERR_INVALID_MCAST_GRP; |
6259 | else |
6260 | elem->mcam_index = mcam_index; |
6261 | |
6262 | mutex_unlock(lock: &mcast_grp->mcast_grp_lock); |
6263 | return ret; |
6264 | } |
6265 | |
6266 | int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu, |
6267 | struct nix_mcast_grp_create_req *req, |
6268 | struct nix_mcast_grp_create_rsp *rsp) |
6269 | { |
6270 | struct nix_mcast_grp_elem *elem; |
6271 | struct nix_mcast_grp *mcast_grp; |
6272 | struct nix_hw *nix_hw; |
6273 | int blkaddr, err; |
6274 | |
6275 | err = nix_get_struct_ptrs(rvu, pcifunc: req->hdr.pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
6276 | if (err) |
6277 | return err; |
6278 | |
6279 | mcast_grp = &nix_hw->mcast_grp; |
6280 | elem = kzalloc(size: sizeof(*elem), GFP_KERNEL); |
6281 | if (!elem) |
6282 | return -ENOMEM; |
6283 | |
6284 | INIT_HLIST_HEAD(&elem->mcast_mce_list.head); |
6285 | elem->mcam_index = -1; |
6286 | elem->mce_start_index = -1; |
6287 | elem->pcifunc = req->hdr.pcifunc; |
6288 | elem->dir = req->dir; |
6289 | elem->mcast_grp_idx = mcast_grp->next_grp_index++; |
6290 | |
6291 | mutex_lock(&mcast_grp->mcast_grp_lock); |
6292 | list_add_tail(new: &elem->list, head: &mcast_grp->mcast_grp_head); |
6293 | mcast_grp->count++; |
6294 | mutex_unlock(lock: &mcast_grp->mcast_grp_lock); |
6295 | |
6296 | rsp->mcast_grp_idx = elem->mcast_grp_idx; |
6297 | return 0; |
6298 | } |
6299 | |
6300 | int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu, |
6301 | struct nix_mcast_grp_destroy_req *req, |
6302 | struct msg_rsp *rsp) |
6303 | { |
6304 | struct npc_delete_flow_req uninstall_req = { 0 }; |
6305 | struct npc_delete_flow_rsp uninstall_rsp = { 0 }; |
6306 | struct nix_mcast_grp_elem *elem; |
6307 | struct nix_mcast_grp *mcast_grp; |
6308 | int blkaddr, err, ret = 0; |
6309 | struct nix_mcast *mcast; |
6310 | struct nix_hw *nix_hw; |
6311 | |
6312 | err = nix_get_struct_ptrs(rvu, pcifunc: req->hdr.pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
6313 | if (err) |
6314 | return err; |
6315 | |
6316 | mcast_grp = &nix_hw->mcast_grp; |
6317 | |
6318 | /* If AF is requesting for the deletion, |
6319 | * then AF is already taking the lock |
6320 | */ |
6321 | if (!req->is_af) |
6322 | mutex_lock(&mcast_grp->mcast_grp_lock); |
6323 | |
6324 | elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx: req->mcast_grp_idx); |
6325 | if (!elem) { |
6326 | ret = NIX_AF_ERR_INVALID_MCAST_GRP; |
6327 | goto unlock_grp; |
6328 | } |
6329 | |
6330 | /* If no mce entries are associated with the group |
6331 | * then just remove it from the global list. |
6332 | */ |
6333 | if (!elem->mcast_mce_list.count) |
6334 | goto delete_grp; |
6335 | |
6336 | /* Delete the associated mcam entry and |
6337 | * remove all mce entries from the group |
6338 | */ |
6339 | mcast = &nix_hw->mcast; |
6340 | mutex_lock(&mcast->mce_lock); |
6341 | if (elem->mcam_index != -1) { |
6342 | uninstall_req.hdr.pcifunc = req->hdr.pcifunc; |
6343 | uninstall_req.entry = elem->mcam_index; |
6344 | rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); |
6345 | } |
6346 | |
6347 | nix_free_mce_list(mcast, count: elem->mcast_mce_list.count, |
6348 | start: elem->mce_start_index, dir: elem->dir); |
6349 | nix_delete_mcast_mce_list(mce_list: &elem->mcast_mce_list); |
6350 | mutex_unlock(lock: &mcast->mce_lock); |
6351 | |
6352 | delete_grp: |
6353 | list_del(entry: &elem->list); |
6354 | kfree(objp: elem); |
6355 | mcast_grp->count--; |
6356 | |
6357 | unlock_grp: |
6358 | if (!req->is_af) |
6359 | mutex_unlock(lock: &mcast_grp->mcast_grp_lock); |
6360 | |
6361 | return ret; |
6362 | } |
6363 | |
6364 | int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, |
6365 | struct nix_mcast_grp_update_req *req, |
6366 | struct nix_mcast_grp_update_rsp *rsp) |
6367 | { |
6368 | struct nix_mcast_grp_destroy_req dreq = { 0 }; |
6369 | struct npc_mcam *mcam = &rvu->hw->mcam; |
6370 | struct nix_mcast_grp_elem *elem; |
6371 | struct nix_mcast_grp *mcast_grp; |
6372 | int blkaddr, err, npc_blkaddr; |
6373 | u16 prev_count, new_count; |
6374 | struct nix_mcast *mcast; |
6375 | struct nix_hw *nix_hw; |
6376 | int i, ret; |
6377 | |
6378 | if (!req->num_mce_entry) |
6379 | return 0; |
6380 | |
6381 | err = nix_get_struct_ptrs(rvu, pcifunc: req->hdr.pcifunc, nix_hw: &nix_hw, blkaddr: &blkaddr); |
6382 | if (err) |
6383 | return err; |
6384 | |
6385 | mcast_grp = &nix_hw->mcast_grp; |
6386 | |
6387 | /* If AF is requesting for the updation, |
6388 | * then AF is already taking the lock |
6389 | */ |
6390 | if (!req->is_af) |
6391 | mutex_lock(&mcast_grp->mcast_grp_lock); |
6392 | |
6393 | elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx: req->mcast_grp_idx); |
6394 | if (!elem) { |
6395 | ret = NIX_AF_ERR_INVALID_MCAST_GRP; |
6396 | goto unlock_grp; |
6397 | } |
6398 | |
6399 | /* If any pcifunc matches the group's pcifunc, then we can |
6400 | * delete the entire group. |
6401 | */ |
6402 | if (req->op == NIX_MCAST_OP_DEL_ENTRY) { |
6403 | for (i = 0; i < req->num_mce_entry; i++) { |
6404 | if (elem->pcifunc == req->pcifunc[i]) { |
6405 | /* Delete group */ |
6406 | dreq.hdr.pcifunc = elem->pcifunc; |
6407 | dreq.mcast_grp_idx = elem->mcast_grp_idx; |
6408 | dreq.is_af = 1; |
6409 | rvu_mbox_handler_nix_mcast_grp_destroy(rvu, req: &dreq, NULL); |
6410 | ret = 0; |
6411 | goto unlock_grp; |
6412 | } |
6413 | } |
6414 | } |
6415 | |
6416 | mcast = &nix_hw->mcast; |
6417 | mutex_lock(&mcast->mce_lock); |
6418 | npc_blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPC, pcifunc: 0); |
6419 | if (elem->mcam_index != -1) |
6420 | npc_enable_mcam_entry(rvu, mcam, blkaddr: npc_blkaddr, index: elem->mcam_index, enable: false); |
6421 | |
6422 | prev_count = elem->mcast_mce_list.count; |
6423 | if (req->op == NIX_MCAST_OP_ADD_ENTRY) { |
6424 | new_count = prev_count + req->num_mce_entry; |
6425 | if (prev_count) |
6426 | nix_free_mce_list(mcast, count: prev_count, start: elem->mce_start_index, dir: elem->dir); |
6427 | |
6428 | elem->mce_start_index = nix_alloc_mce_list(mcast, count: new_count, dir: elem->dir); |
6429 | |
6430 | /* It is possible not to get contiguous memory */ |
6431 | if (elem->mce_start_index < 0) { |
6432 | if (elem->mcam_index != -1) { |
6433 | npc_enable_mcam_entry(rvu, mcam, blkaddr: npc_blkaddr, |
6434 | index: elem->mcam_index, enable: true); |
6435 | ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST; |
6436 | goto unlock_mce; |
6437 | } |
6438 | } |
6439 | |
6440 | ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req); |
6441 | if (ret) { |
6442 | nix_free_mce_list(mcast, count: new_count, start: elem->mce_start_index, dir: elem->dir); |
6443 | if (prev_count) |
6444 | elem->mce_start_index = nix_alloc_mce_list(mcast, |
6445 | count: prev_count, |
6446 | dir: elem->dir); |
6447 | |
6448 | if (elem->mcam_index != -1) |
6449 | npc_enable_mcam_entry(rvu, mcam, blkaddr: npc_blkaddr, |
6450 | index: elem->mcam_index, enable: true); |
6451 | |
6452 | goto unlock_mce; |
6453 | } |
6454 | } else { |
6455 | if (!prev_count || prev_count < req->num_mce_entry) { |
6456 | if (elem->mcam_index != -1) |
6457 | npc_enable_mcam_entry(rvu, mcam, blkaddr: npc_blkaddr, |
6458 | index: elem->mcam_index, enable: true); |
6459 | ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ; |
6460 | goto unlock_mce; |
6461 | } |
6462 | |
6463 | nix_free_mce_list(mcast, count: prev_count, start: elem->mce_start_index, dir: elem->dir); |
6464 | new_count = prev_count - req->num_mce_entry; |
6465 | elem->mce_start_index = nix_alloc_mce_list(mcast, count: new_count, dir: elem->dir); |
6466 | ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req); |
6467 | if (ret) { |
6468 | nix_free_mce_list(mcast, count: new_count, start: elem->mce_start_index, dir: elem->dir); |
6469 | elem->mce_start_index = nix_alloc_mce_list(mcast, count: prev_count, dir: elem->dir); |
6470 | if (elem->mcam_index != -1) |
6471 | npc_enable_mcam_entry(rvu, mcam, |
6472 | blkaddr: npc_blkaddr, |
6473 | index: elem->mcam_index, |
6474 | enable: true); |
6475 | |
6476 | goto unlock_mce; |
6477 | } |
6478 | } |
6479 | |
6480 | if (elem->mcam_index == -1) { |
6481 | rsp->mce_start_index = elem->mce_start_index; |
6482 | ret = 0; |
6483 | goto unlock_mce; |
6484 | } |
6485 | |
6486 | nix_mcast_update_action(rvu, elem); |
6487 | npc_enable_mcam_entry(rvu, mcam, blkaddr: npc_blkaddr, index: elem->mcam_index, enable: true); |
6488 | rsp->mce_start_index = elem->mce_start_index; |
6489 | ret = 0; |
6490 | |
6491 | unlock_mce: |
6492 | mutex_unlock(lock: &mcast->mce_lock); |
6493 | |
6494 | unlock_grp: |
6495 | if (!req->is_af) |
6496 | mutex_unlock(lock: &mcast_grp->mcast_grp_lock); |
6497 | |
6498 | return ret; |
6499 | } |
6500 | |