1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RPM CN10K driver
3 *
4 * Copyright (C) 2020 Marvell.
5 */
6
7#include <linux/bitfield.h>
8#include <linux/pci.h>
9#include "rvu.h"
10#include "cgx.h"
11#include "rvu_reg.h"
12
13/* RVU LMTST */
14#define LMT_TBL_OP_READ 0
15#define LMT_TBL_OP_WRITE 1
16#define LMT_MAP_TABLE_SIZE (128 * 1024)
17#define LMT_MAPTBL_ENTRY_SIZE 16
18
19/* Function to perform operations (read/write) on lmtst map table */
20static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
21 int lmt_tbl_op)
22{
23 void __iomem *lmt_map_base;
24 u64 tbl_base;
25
26 tbl_base = rvu_read64(rvu, block: BLKADDR_APR, APR_AF_LMT_MAP_BASE);
27
28 lmt_map_base = ioremap_wc(offset: tbl_base, LMT_MAP_TABLE_SIZE);
29 if (!lmt_map_base) {
30 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
31 return -ENOMEM;
32 }
33
34 if (lmt_tbl_op == LMT_TBL_OP_READ) {
35 *val = readq(addr: lmt_map_base + index);
36 } else {
37 writeq(val: (*val), addr: (lmt_map_base + index));
38 /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
39 * changes effective. Write 1 for flush and read is being used as a
40 * barrier and sets up a data dependency. Write to 0 after a write
41 * to 1 to complete the flush.
42 */
43 rvu_write64(rvu, block: BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
44 rvu_read64(rvu, block: BLKADDR_APR, APR_AF_LMT_CTL);
45 rvu_write64(rvu, block: BLKADDR_APR, APR_AF_LMT_CTL, val: 0x00);
46 }
47
48 iounmap(addr: lmt_map_base);
49 return 0;
50}
51
52#define LMT_MAP_TBL_W1_OFF 8
53static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
54{
55 return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
56 (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
57}
58
59static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
60 u64 iova, u64 *lmt_addr)
61{
62 u64 pa, val, pf;
63 int err = 0;
64
65 if (!iova) {
66 dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
67 return -EINVAL;
68 }
69
70 mutex_lock(&rvu->rsrc_lock);
71 rvu_write64(rvu, block: BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, val: iova);
72 pf = rvu_get_pf(pcifunc) & 0x1F;
73 val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
74 ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
75 rvu_write64(rvu, block: BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
76
77 err = rvu_poll_reg(rvu, block: BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), zero: false);
78 if (err) {
79 dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
80 goto exit;
81 }
82 val = rvu_read64(rvu, block: BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
83 if (val & ~0x1ULL) {
84 dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
85 err = -EIO;
86 goto exit;
87 }
88 /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
89 * PA[11:0] = IOVA[11:0]
90 */
91 pa = rvu_read64(rvu, block: BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
92 pa &= GENMASK_ULL(39, 0);
93 *lmt_addr = (pa << 12) | (iova & 0xFFF);
94exit:
95 mutex_unlock(lock: &rvu->rsrc_lock);
96 return err;
97}
98
99static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
100{
101 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
102 u32 tbl_idx;
103 int err = 0;
104 u64 val;
105
106 /* Read the current lmt addr of pcifunc */
107 tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
108 err = lmtst_map_table_ops(rvu, index: tbl_idx, val: &val, LMT_TBL_OP_READ);
109 if (err) {
110 dev_err(rvu->dev,
111 "Failed to read LMT map table: index 0x%x err %d\n",
112 tbl_idx, err);
113 return err;
114 }
115
116 /* Storing the seondary's lmt base address as this needs to be
117 * reverted in FLR. Also making sure this default value doesn't
118 * get overwritten on multiple calls to this mailbox.
119 */
120 if (!pfvf->lmt_base_addr)
121 pfvf->lmt_base_addr = val;
122
123 /* Update the LMT table with new addr */
124 err = lmtst_map_table_ops(rvu, index: tbl_idx, val: &lmt_addr, LMT_TBL_OP_WRITE);
125 if (err) {
126 dev_err(rvu->dev,
127 "Failed to update LMT map table: index 0x%x err %d\n",
128 tbl_idx, err);
129 return err;
130 }
131 return 0;
132}
133
134int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
135 struct lmtst_tbl_setup_req *req,
136 struct msg_rsp *rsp)
137{
138 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc: req->hdr.pcifunc);
139 u32 pri_tbl_idx, tbl_idx;
140 u64 lmt_addr;
141 int err = 0;
142 u64 val;
143
144 /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
145 * region, if so, convert that IOVA to physical address and
146 * populate LMT table with that address
147 */
148 if (req->use_local_lmt_region) {
149 err = rvu_get_lmtaddr(rvu, pcifunc: req->hdr.pcifunc,
150 iova: req->lmt_iova, lmt_addr: &lmt_addr);
151 if (err < 0)
152 return err;
153
154 /* Update the lmt addr for this PFFUNC in the LMT table */
155 err = rvu_update_lmtaddr(rvu, pcifunc: req->hdr.pcifunc, lmt_addr);
156 if (err)
157 return err;
158 }
159
160 /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
161 * multiple PF_FUNCs to share an LMTLINE region, so primary/base
162 * pcifunc (which is passed as an argument to mailbox) is the one
163 * whose lmt base address will be shared among other secondary
164 * pcifunc (will be the one who is calling this mailbox).
165 */
166 if (req->base_pcifunc) {
167 /* Calculating the LMT table index equivalent to primary
168 * pcifunc.
169 */
170 pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc: req->base_pcifunc);
171
172 /* Read the base lmt addr of the primary pcifunc */
173 err = lmtst_map_table_ops(rvu, index: pri_tbl_idx, val: &val,
174 LMT_TBL_OP_READ);
175 if (err) {
176 dev_err(rvu->dev,
177 "Failed to read LMT map table: index 0x%x err %d\n",
178 pri_tbl_idx, err);
179 goto error;
180 }
181
182 /* Update the base lmt addr of secondary with primary's base
183 * lmt addr.
184 */
185 err = rvu_update_lmtaddr(rvu, pcifunc: req->hdr.pcifunc, lmt_addr: val);
186 if (err)
187 return err;
188 }
189
190 /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S
191 * like enabling scheduled LMTST, disable LMTLINE prefetch, disable
192 * early completion for ordered LMTST.
193 */
194 if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
195 tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc: req->hdr.pcifunc);
196 err = lmtst_map_table_ops(rvu, index: tbl_idx + LMT_MAP_TBL_W1_OFF,
197 val: &val, LMT_TBL_OP_READ);
198 if (err) {
199 dev_err(rvu->dev,
200 "Failed to read LMT map table: index 0x%x err %d\n",
201 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
202 goto error;
203 }
204
205 /* Storing lmt map table entry word1 default value as this needs
206 * to be reverted in FLR. Also making sure this default value
207 * doesn't get overwritten on multiple calls to this mailbox.
208 */
209 if (!pfvf->lmt_map_ent_w1)
210 pfvf->lmt_map_ent_w1 = val;
211
212 /* Disable early completion for Ordered LMTSTs. */
213 if (req->dis_sched_early_comp)
214 val |= (req->dis_sched_early_comp <<
215 APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
216 /* Enable scheduled LMTST */
217 if (req->sch_ena)
218 val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
219 req->ssow_pf_func;
220 /* Disables LMTLINE prefetch before receiving store data. */
221 if (req->dis_line_pref)
222 val |= (req->dis_line_pref <<
223 APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
224
225 err = lmtst_map_table_ops(rvu, index: tbl_idx + LMT_MAP_TBL_W1_OFF,
226 val: &val, LMT_TBL_OP_WRITE);
227 if (err) {
228 dev_err(rvu->dev,
229 "Failed to update LMT map table: index 0x%x err %d\n",
230 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
231 goto error;
232 }
233 }
234
235error:
236 return err;
237}
238
239/* Resetting the lmtst map table to original base addresses */
240void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
241{
242 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
243 u32 tbl_idx;
244 int err;
245
246 if (is_rvu_otx2(rvu))
247 return;
248
249 if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
250 /* This corresponds to lmt map table index */
251 tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
252 /* Reverting back original lmt base addr for respective
253 * pcifunc.
254 */
255 if (pfvf->lmt_base_addr) {
256 err = lmtst_map_table_ops(rvu, index: tbl_idx,
257 val: &pfvf->lmt_base_addr,
258 LMT_TBL_OP_WRITE);
259 if (err)
260 dev_err(rvu->dev,
261 "Failed to update LMT map table: index 0x%x err %d\n",
262 tbl_idx, err);
263 pfvf->lmt_base_addr = 0;
264 }
265 /* Reverting back to orginal word1 val of lmtst map table entry
266 * which underwent changes.
267 */
268 if (pfvf->lmt_map_ent_w1) {
269 err = lmtst_map_table_ops(rvu,
270 index: tbl_idx + LMT_MAP_TBL_W1_OFF,
271 val: &pfvf->lmt_map_ent_w1,
272 LMT_TBL_OP_WRITE);
273 if (err)
274 dev_err(rvu->dev,
275 "Failed to update LMT map table: index 0x%x err %d\n",
276 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
277 pfvf->lmt_map_ent_w1 = 0;
278 }
279 }
280}
281
282int rvu_set_channels_base(struct rvu *rvu)
283{
284 u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
285 u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
286 struct rvu_hwinfo *hw = rvu->hw;
287 u64 nix_const, nix_const1;
288 int blkaddr;
289
290 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
291 if (blkaddr < 0)
292 return blkaddr;
293
294 nix_const = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST);
295 nix_const1 = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST1);
296
297 hw->cgx = (nix_const >> 12) & 0xFULL;
298 hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
299 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
300 hw->lbk_links = (nix_const >> 24) & 0xFULL;
301 hw->cpt_links = (nix_const >> 44) & 0xFULL;
302 hw->sdp_links = 1;
303
304 hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
305 hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
306 hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
307
308 /* No Programmable channels */
309 if (!(nix_const & BIT_ULL(60)))
310 return 0;
311
312 hw->cap.programmable_chans = true;
313
314 /* If programmable channels are present then configure
315 * channels such that all channel numbers are contiguous
316 * leaving no holes. This way the new CPT channels can be
317 * accomodated. The order of channel numbers assigned is
318 * LBK, SDP, CGX and CPT. Also the base channel number
319 * of a block must be multiple of number of channels
320 * of the block.
321 */
322 nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
323 nr_sdp_chans = nix_const1 & 0xFFFULL;
324 nr_cgx_chans = nix_const & 0xFFULL;
325 nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
326
327 sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
328 /* Round up base channel to multiple of number of channels */
329 hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
330
331 cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
332 hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
333
334 cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
335 hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
336
337 /* Out of 4096 channels start CPT from 2048 so
338 * that MSB for CPT channels is always set
339 */
340 if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
341 hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
342 } else {
343 dev_err(rvu->dev,
344 "CPT channels could not fit in the range 2048-4095\n");
345 return -EINVAL;
346 }
347
348 return 0;
349}
350
351#define LBK_CONNECT_NIXX(a) (0x0 + (a))
352
353static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
354 u64 offset, int lbkid, u16 chans)
355{
356 struct rvu_hwinfo *hw = rvu->hw;
357 u64 cfg;
358
359 cfg = readq(addr: base + offset);
360 cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
361 LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
362 cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
363 cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
364 cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
365
366 writeq(val: cfg, addr: base + offset);
367}
368
369static void rvu_lbk_set_channels(struct rvu *rvu)
370{
371 struct pci_dev *pdev = NULL;
372 void __iomem *base;
373 u64 lbk_const;
374 u8 src, dst;
375 u16 chans;
376
377 /* To loopback packets between multiple NIX blocks
378 * mutliple LBK blocks are needed. With two NIX blocks,
379 * four LBK blocks are needed and each LBK block
380 * source and destination are as follows:
381 * LBK0 - source NIX0 and destination NIX1
382 * LBK1 - source NIX0 and destination NIX1
383 * LBK2 - source NIX1 and destination NIX0
384 * LBK3 - source NIX1 and destination NIX1
385 * As per the HRM channel numbers should be programmed as:
386 * P2X and X2P of LBK0 as same
387 * P2X and X2P of LBK3 as same
388 * P2X of LBK1 and X2P of LBK2 as same
389 * P2X of LBK2 and X2P of LBK1 as same
390 */
391 while (true) {
392 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
393 PCI_DEVID_OCTEONTX2_LBK, from: pdev);
394 if (!pdev)
395 return;
396
397 base = pci_ioremap_bar(pdev, bar: 0);
398 if (!base)
399 goto err_put;
400
401 lbk_const = readq(addr: base + LBK_CONST);
402 chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
403 dst = FIELD_GET(LBK_CONST_DST, lbk_const);
404 src = FIELD_GET(LBK_CONST_SRC, lbk_const);
405
406 if (src == dst) {
407 if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
408 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
409 lbkid: 0, chans);
410 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
411 lbkid: 0, chans);
412 } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
413 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
414 lbkid: 1, chans);
415 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
416 lbkid: 1, chans);
417 }
418 } else {
419 if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
420 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
421 lbkid: 0, chans);
422 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
423 lbkid: 1, chans);
424 } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
425 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
426 lbkid: 1, chans);
427 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
428 lbkid: 0, chans);
429 }
430 }
431 iounmap(addr: base);
432 }
433err_put:
434 pci_dev_put(dev: pdev);
435}
436
437static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
438{
439 u64 nix_const1 = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST1);
440 u64 nix_const = rvu_read64(rvu, block: blkaddr, NIX_AF_CONST);
441 u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
442 struct rvu_hwinfo *hw = rvu->hw;
443 int link, nix_link = 0;
444 u16 start;
445 u64 cfg;
446
447 cgx_chans = nix_const & 0xFFULL;
448 lbk_chans = (nix_const >> 16) & 0xFFULL;
449 sdp_chans = nix_const1 & 0xFFFULL;
450 cpt_chans = (nix_const >> 32) & 0xFFFULL;
451
452 start = hw->cgx_chan_base;
453 for (link = 0; link < hw->cgx_links; link++, nix_link++) {
454 cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_LINKX_CFG(nix_link));
455 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
456 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
457 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
458 rvu_write64(rvu, block: blkaddr, NIX_AF_LINKX_CFG(nix_link), val: cfg);
459 start += cgx_chans;
460 }
461
462 start = hw->lbk_chan_base;
463 for (link = 0; link < hw->lbk_links; link++, nix_link++) {
464 cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_LINKX_CFG(nix_link));
465 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
466 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
467 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
468 rvu_write64(rvu, block: blkaddr, NIX_AF_LINKX_CFG(nix_link), val: cfg);
469 start += lbk_chans;
470 }
471
472 start = hw->sdp_chan_base;
473 for (link = 0; link < hw->sdp_links; link++, nix_link++) {
474 cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_LINKX_CFG(nix_link));
475 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
476 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
477 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
478 rvu_write64(rvu, block: blkaddr, NIX_AF_LINKX_CFG(nix_link), val: cfg);
479 start += sdp_chans;
480 }
481
482 start = hw->cpt_chan_base;
483 for (link = 0; link < hw->cpt_links; link++, nix_link++) {
484 cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_LINKX_CFG(nix_link));
485 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
486 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
487 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
488 rvu_write64(rvu, block: blkaddr, NIX_AF_LINKX_CFG(nix_link), val: cfg);
489 start += cpt_chans;
490 }
491}
492
493static void rvu_nix_set_channels(struct rvu *rvu)
494{
495 int blkaddr = 0;
496
497 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
498 while (blkaddr) {
499 __rvu_nix_set_channels(rvu, blkaddr);
500 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
501 }
502}
503
504static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
505{
506 u64 cfg;
507
508 cfg = cgx_lmac_read(cgx_id: cgxid, lmac_id: lmacid, RPMX_CMRX_LINK_CFG);
509 cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
510
511 /* There is no read-only constant register to read
512 * the number of channels for LMAC and it is always 16.
513 */
514 cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
515 cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
516 cgx_lmac_write(cgx_id: cgxid, lmac_id: lmacid, RPMX_CMRX_LINK_CFG, val: cfg);
517}
518
519static void rvu_rpm_set_channels(struct rvu *rvu)
520{
521 struct rvu_hwinfo *hw = rvu->hw;
522 u16 base = hw->cgx_chan_base;
523 int cgx, lmac;
524
525 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
526 for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
527 __rvu_rpm_set_channels(cgxid: cgx, lmacid: lmac, base);
528 base += 16;
529 }
530 }
531}
532
533void rvu_program_channels(struct rvu *rvu)
534{
535 struct rvu_hwinfo *hw = rvu->hw;
536
537 if (!hw->cap.programmable_chans)
538 return;
539
540 rvu_nix_set_channels(rvu);
541 rvu_lbk_set_channels(rvu);
542 rvu_rpm_set_channels(rvu);
543}
544
545void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
546{
547 int blkaddr = nix_hw->blkaddr;
548 u64 cfg;
549
550 /* Set AF vWQE timer interval to a LF configurable range of
551 * 6.4us to 1.632ms.
552 */
553 rvu_write64(rvu, block: blkaddr, NIX_AF_VWQE_TIMER, val: 0x3FULL);
554
555 /* Enable NIX RX stream and global conditional clock to
556 * avoild multiple free of NPA buffers.
557 */
558 cfg = rvu_read64(rvu, block: blkaddr, NIX_AF_CFG);
559 cfg |= BIT_ULL(1) | BIT_ULL(2);
560 rvu_write64(rvu, block: blkaddr, NIX_AF_CFG, val: cfg);
561}
562
563void rvu_apr_block_cn10k_init(struct rvu *rvu)
564{
565 u64 reg;
566
567 reg = rvu_read64(rvu, block: BLKADDR_APR, APR_AF_LMT_CFG);
568 reg |= FIELD_PREP(LMTST_THROTTLE_MASK, LMTST_WR_PEND_MAX);
569 rvu_write64(rvu, block: BLKADDR_APR, APR_AF_LMT_CFG, val: reg);
570}
571

source code of linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c