1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RVU Admin Function Devlink
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8#include <linux/bitfield.h>
9
10#include "rvu.h"
11#include "rvu_reg.h"
12#include "rvu_struct.h"
13#include "rvu_npc_hash.h"
14
15#define DRV_NAME "octeontx2-af"
16
17static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
18{
19 devlink_fmsg_pair_nest_start(fmsg, name);
20 devlink_fmsg_obj_nest_start(fmsg);
21}
22
23static void rvu_report_pair_end(struct devlink_fmsg *fmsg)
24{
25 devlink_fmsg_obj_nest_end(fmsg);
26 devlink_fmsg_pair_nest_end(fmsg);
27}
28
29static bool rvu_common_request_irq(struct rvu *rvu, int offset,
30 const char *name, irq_handler_t fn)
31{
32 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
33 int rc;
34
35 sprintf(buf: &rvu->irq_name[offset * NAME_SIZE], fmt: "%s", name);
36 rc = request_irq(irq: pci_irq_vector(dev: rvu->pdev, nr: offset), handler: fn, flags: 0,
37 name: &rvu->irq_name[offset * NAME_SIZE], dev: rvu_dl);
38 if (rc)
39 dev_warn(rvu->dev, "Failed to register %s irq\n", name);
40 else
41 rvu->irq_allocated[offset] = true;
42
43 return rvu->irq_allocated[offset];
44}
45
46static void rvu_nix_intr_work(struct work_struct *work)
47{
48 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
49
50 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
51 devlink_health_report(reporter: rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
52 msg: "NIX_AF_RVU Error",
53 priv_ctx: rvu_nix_health_reporter->nix_event_ctx);
54}
55
56static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
57{
58 struct rvu_nix_event_ctx *nix_event_context;
59 struct rvu_devlink *rvu_dl = rvu_irq;
60 struct rvu *rvu;
61 int blkaddr;
62 u64 intr;
63
64 rvu = rvu_dl->rvu;
65 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
66 if (blkaddr < 0)
67 return IRQ_NONE;
68
69 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
70 intr = rvu_read64(rvu, block: blkaddr, NIX_AF_RVU_INT);
71 nix_event_context->nix_af_rvu_int = intr;
72
73 /* Clear interrupts */
74 rvu_write64(rvu, block: blkaddr, NIX_AF_RVU_INT, val: intr);
75 rvu_write64(rvu, block: blkaddr, NIX_AF_RVU_INT_ENA_W1C, val: ~0ULL);
76 queue_work(wq: rvu_dl->devlink_wq, work: &rvu_dl->rvu_nix_health_reporter->intr_work);
77
78 return IRQ_HANDLED;
79}
80
81static void rvu_nix_gen_work(struct work_struct *work)
82{
83 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
84
85 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
86 devlink_health_report(reporter: rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
87 msg: "NIX_AF_GEN Error",
88 priv_ctx: rvu_nix_health_reporter->nix_event_ctx);
89}
90
91static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
92{
93 struct rvu_nix_event_ctx *nix_event_context;
94 struct rvu_devlink *rvu_dl = rvu_irq;
95 struct rvu *rvu;
96 int blkaddr;
97 u64 intr;
98
99 rvu = rvu_dl->rvu;
100 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
101 if (blkaddr < 0)
102 return IRQ_NONE;
103
104 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
105 intr = rvu_read64(rvu, block: blkaddr, NIX_AF_GEN_INT);
106 nix_event_context->nix_af_rvu_gen = intr;
107
108 /* Clear interrupts */
109 rvu_write64(rvu, block: blkaddr, NIX_AF_GEN_INT, val: intr);
110 rvu_write64(rvu, block: blkaddr, NIX_AF_GEN_INT_ENA_W1C, val: ~0ULL);
111 queue_work(wq: rvu_dl->devlink_wq, work: &rvu_dl->rvu_nix_health_reporter->gen_work);
112
113 return IRQ_HANDLED;
114}
115
116static void rvu_nix_err_work(struct work_struct *work)
117{
118 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
119
120 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
121 devlink_health_report(reporter: rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
122 msg: "NIX_AF_ERR Error",
123 priv_ctx: rvu_nix_health_reporter->nix_event_ctx);
124}
125
126static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
127{
128 struct rvu_nix_event_ctx *nix_event_context;
129 struct rvu_devlink *rvu_dl = rvu_irq;
130 struct rvu *rvu;
131 int blkaddr;
132 u64 intr;
133
134 rvu = rvu_dl->rvu;
135 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
136 if (blkaddr < 0)
137 return IRQ_NONE;
138
139 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
140 intr = rvu_read64(rvu, block: blkaddr, NIX_AF_ERR_INT);
141 nix_event_context->nix_af_rvu_err = intr;
142
143 /* Clear interrupts */
144 rvu_write64(rvu, block: blkaddr, NIX_AF_ERR_INT, val: intr);
145 rvu_write64(rvu, block: blkaddr, NIX_AF_ERR_INT_ENA_W1C, val: ~0ULL);
146 queue_work(wq: rvu_dl->devlink_wq, work: &rvu_dl->rvu_nix_health_reporter->err_work);
147
148 return IRQ_HANDLED;
149}
150
151static void rvu_nix_ras_work(struct work_struct *work)
152{
153 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
154
155 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
156 devlink_health_report(reporter: rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
157 msg: "NIX_AF_RAS Error",
158 priv_ctx: rvu_nix_health_reporter->nix_event_ctx);
159}
160
161static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
162{
163 struct rvu_nix_event_ctx *nix_event_context;
164 struct rvu_devlink *rvu_dl = rvu_irq;
165 struct rvu *rvu;
166 int blkaddr;
167 u64 intr;
168
169 rvu = rvu_dl->rvu;
170 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
171 if (blkaddr < 0)
172 return IRQ_NONE;
173
174 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
175 intr = rvu_read64(rvu, block: blkaddr, NIX_AF_ERR_INT);
176 nix_event_context->nix_af_rvu_ras = intr;
177
178 /* Clear interrupts */
179 rvu_write64(rvu, block: blkaddr, NIX_AF_RAS, val: intr);
180 rvu_write64(rvu, block: blkaddr, NIX_AF_RAS_ENA_W1C, val: ~0ULL);
181 queue_work(wq: rvu_dl->devlink_wq, work: &rvu_dl->rvu_nix_health_reporter->ras_work);
182
183 return IRQ_HANDLED;
184}
185
186static void rvu_nix_unregister_interrupts(struct rvu *rvu)
187{
188 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
189 int offs, i, blkaddr;
190
191 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
192 if (blkaddr < 0)
193 return;
194
195 offs = rvu_read64(rvu, block: blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
196 if (!offs)
197 return;
198
199 rvu_write64(rvu, block: blkaddr, NIX_AF_RVU_INT_ENA_W1C, val: ~0ULL);
200 rvu_write64(rvu, block: blkaddr, NIX_AF_GEN_INT_ENA_W1C, val: ~0ULL);
201 rvu_write64(rvu, block: blkaddr, NIX_AF_ERR_INT_ENA_W1C, val: ~0ULL);
202 rvu_write64(rvu, block: blkaddr, NIX_AF_RAS_ENA_W1C, val: ~0ULL);
203
204 if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
205 free_irq(pci_irq_vector(dev: rvu->pdev, nr: offs + NIX_AF_INT_VEC_RVU),
206 rvu_dl);
207 rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
208 }
209
210 for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
211 if (rvu->irq_allocated[offs + i]) {
212 free_irq(pci_irq_vector(dev: rvu->pdev, nr: offs + i), rvu_dl);
213 rvu->irq_allocated[offs + i] = false;
214 }
215}
216
217static int rvu_nix_register_interrupts(struct rvu *rvu)
218{
219 int blkaddr, base;
220 bool rc;
221
222 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
223 if (blkaddr < 0)
224 return blkaddr;
225
226 /* Get NIX AF MSIX vectors offset. */
227 base = rvu_read64(rvu, block: blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
228 if (!base) {
229 dev_warn(rvu->dev,
230 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
231 blkaddr - BLKADDR_NIX0);
232 return 0;
233 }
234 /* Register and enable NIX_AF_RVU_INT interrupt */
235 rc = rvu_common_request_irq(rvu, offset: base + NIX_AF_INT_VEC_RVU,
236 name: "NIX_AF_RVU_INT",
237 fn: rvu_nix_af_rvu_intr_handler);
238 if (!rc)
239 goto err;
240 rvu_write64(rvu, block: blkaddr, NIX_AF_RVU_INT_ENA_W1S, val: ~0ULL);
241
242 /* Register and enable NIX_AF_GEN_INT interrupt */
243 rc = rvu_common_request_irq(rvu, offset: base + NIX_AF_INT_VEC_GEN,
244 name: "NIX_AF_GEN_INT",
245 fn: rvu_nix_af_rvu_gen_handler);
246 if (!rc)
247 goto err;
248 rvu_write64(rvu, block: blkaddr, NIX_AF_GEN_INT_ENA_W1S, val: ~0ULL);
249
250 /* Register and enable NIX_AF_ERR_INT interrupt */
251 rc = rvu_common_request_irq(rvu, offset: base + NIX_AF_INT_VEC_AF_ERR,
252 name: "NIX_AF_ERR_INT",
253 fn: rvu_nix_af_rvu_err_handler);
254 if (!rc)
255 goto err;
256 rvu_write64(rvu, block: blkaddr, NIX_AF_ERR_INT_ENA_W1S, val: ~0ULL);
257
258 /* Register and enable NIX_AF_RAS interrupt */
259 rc = rvu_common_request_irq(rvu, offset: base + NIX_AF_INT_VEC_POISON,
260 name: "NIX_AF_RAS",
261 fn: rvu_nix_af_rvu_ras_handler);
262 if (!rc)
263 goto err;
264 rvu_write64(rvu, block: blkaddr, NIX_AF_RAS_ENA_W1S, val: ~0ULL);
265
266 return 0;
267err:
268 rvu_nix_unregister_interrupts(rvu);
269 return rc;
270}
271
272static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
273 enum nix_af_rvu_health health_reporter)
274{
275 struct rvu_nix_event_ctx *nix_event_context;
276 u64 intr_val;
277
278 nix_event_context = ctx;
279 switch (health_reporter) {
280 case NIX_AF_RVU_INTR:
281 intr_val = nix_event_context->nix_af_rvu_int;
282 rvu_report_pair_start(fmsg, name: "NIX_AF_RVU");
283 devlink_fmsg_u64_pair_put(fmsg, name: "\tNIX RVU Interrupt Reg ",
284 value: nix_event_context->nix_af_rvu_int);
285 if (intr_val & BIT_ULL(0))
286 devlink_fmsg_string_put(fmsg, value: "\n\tUnmap Slot Error");
287 rvu_report_pair_end(fmsg);
288 break;
289 case NIX_AF_RVU_GEN:
290 intr_val = nix_event_context->nix_af_rvu_gen;
291 rvu_report_pair_start(fmsg, name: "NIX_AF_GENERAL");
292 devlink_fmsg_u64_pair_put(fmsg, name: "\tNIX General Interrupt Reg ",
293 value: nix_event_context->nix_af_rvu_gen);
294 if (intr_val & BIT_ULL(0))
295 devlink_fmsg_string_put(fmsg, value: "\n\tRx multicast pkt drop");
296 if (intr_val & BIT_ULL(1))
297 devlink_fmsg_string_put(fmsg, value: "\n\tRx mirror pkt drop");
298 if (intr_val & BIT_ULL(4))
299 devlink_fmsg_string_put(fmsg, value: "\n\tSMQ flush done");
300 rvu_report_pair_end(fmsg);
301 break;
302 case NIX_AF_RVU_ERR:
303 intr_val = nix_event_context->nix_af_rvu_err;
304 rvu_report_pair_start(fmsg, name: "NIX_AF_ERR");
305 devlink_fmsg_u64_pair_put(fmsg, name: "\tNIX Error Interrupt Reg ",
306 value: nix_event_context->nix_af_rvu_err);
307 if (intr_val & BIT_ULL(14))
308 devlink_fmsg_string_put(fmsg, value: "\n\tFault on NIX_AQ_INST_S read");
309 if (intr_val & BIT_ULL(13))
310 devlink_fmsg_string_put(fmsg, value: "\n\tFault on NIX_AQ_RES_S write");
311 if (intr_val & BIT_ULL(12))
312 devlink_fmsg_string_put(fmsg, value: "\n\tAQ Doorbell Error");
313 if (intr_val & BIT_ULL(6))
314 devlink_fmsg_string_put(fmsg, value: "\n\tRx on unmapped PF_FUNC");
315 if (intr_val & BIT_ULL(5))
316 devlink_fmsg_string_put(fmsg, value: "\n\tRx multicast replication error");
317 if (intr_val & BIT_ULL(4))
318 devlink_fmsg_string_put(fmsg, value: "\n\tFault on NIX_RX_MCE_S read");
319 if (intr_val & BIT_ULL(3))
320 devlink_fmsg_string_put(fmsg, value: "\n\tFault on multicast WQE read");
321 if (intr_val & BIT_ULL(2))
322 devlink_fmsg_string_put(fmsg, value: "\n\tFault on mirror WQE read");
323 if (intr_val & BIT_ULL(1))
324 devlink_fmsg_string_put(fmsg, value: "\n\tFault on mirror pkt write");
325 if (intr_val & BIT_ULL(0))
326 devlink_fmsg_string_put(fmsg, value: "\n\tFault on multicast pkt write");
327 rvu_report_pair_end(fmsg);
328 break;
329 case NIX_AF_RVU_RAS:
330 intr_val = nix_event_context->nix_af_rvu_err;
331 rvu_report_pair_start(fmsg, name: "NIX_AF_RAS");
332 devlink_fmsg_u64_pair_put(fmsg, name: "\tNIX RAS Interrupt Reg ",
333 value: nix_event_context->nix_af_rvu_err);
334 devlink_fmsg_string_put(fmsg, value: "\n\tPoison Data on:");
335 if (intr_val & BIT_ULL(34))
336 devlink_fmsg_string_put(fmsg, value: "\n\tNIX_AQ_INST_S");
337 if (intr_val & BIT_ULL(33))
338 devlink_fmsg_string_put(fmsg, value: "\n\tNIX_AQ_RES_S");
339 if (intr_val & BIT_ULL(32))
340 devlink_fmsg_string_put(fmsg, value: "\n\tHW ctx");
341 if (intr_val & BIT_ULL(4))
342 devlink_fmsg_string_put(fmsg, value: "\n\tPacket from mirror buffer");
343 if (intr_val & BIT_ULL(3))
344 devlink_fmsg_string_put(fmsg, value: "\n\tPacket from multicast buffer");
345 if (intr_val & BIT_ULL(2))
346 devlink_fmsg_string_put(fmsg, value: "\n\tWQE read from mirror buffer");
347 if (intr_val & BIT_ULL(1))
348 devlink_fmsg_string_put(fmsg, value: "\n\tWQE read from multicast buffer");
349 if (intr_val & BIT_ULL(0))
350 devlink_fmsg_string_put(fmsg, value: "\n\tNIX_RX_MCE_S read");
351 rvu_report_pair_end(fmsg);
352 break;
353 default:
354 return -EINVAL;
355 }
356
357 return 0;
358}
359
360static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
361 struct devlink_fmsg *fmsg, void *ctx,
362 struct netlink_ext_ack *netlink_extack)
363{
364 struct rvu *rvu = devlink_health_reporter_priv(reporter);
365 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
366 struct rvu_nix_event_ctx *nix_ctx;
367
368 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
369
370 return ctx ? rvu_nix_report_show(fmsg, ctx, health_reporter: NIX_AF_RVU_INTR) :
371 rvu_nix_report_show(fmsg, ctx: nix_ctx, health_reporter: NIX_AF_RVU_INTR);
372}
373
374static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
375 void *ctx, struct netlink_ext_ack *netlink_extack)
376{
377 struct rvu *rvu = devlink_health_reporter_priv(reporter);
378 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
379 int blkaddr;
380
381 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
382 if (blkaddr < 0)
383 return blkaddr;
384
385 if (nix_event_ctx->nix_af_rvu_int)
386 rvu_write64(rvu, block: blkaddr, NIX_AF_RVU_INT_ENA_W1S, val: ~0ULL);
387
388 return 0;
389}
390
391static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
392 struct devlink_fmsg *fmsg, void *ctx,
393 struct netlink_ext_ack *netlink_extack)
394{
395 struct rvu *rvu = devlink_health_reporter_priv(reporter);
396 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
397 struct rvu_nix_event_ctx *nix_ctx;
398
399 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
400
401 return ctx ? rvu_nix_report_show(fmsg, ctx, health_reporter: NIX_AF_RVU_GEN) :
402 rvu_nix_report_show(fmsg, ctx: nix_ctx, health_reporter: NIX_AF_RVU_GEN);
403}
404
405static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
406 void *ctx, struct netlink_ext_ack *netlink_extack)
407{
408 struct rvu *rvu = devlink_health_reporter_priv(reporter);
409 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
410 int blkaddr;
411
412 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
413 if (blkaddr < 0)
414 return blkaddr;
415
416 if (nix_event_ctx->nix_af_rvu_gen)
417 rvu_write64(rvu, block: blkaddr, NIX_AF_GEN_INT_ENA_W1S, val: ~0ULL);
418
419 return 0;
420}
421
422static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
423 struct devlink_fmsg *fmsg, void *ctx,
424 struct netlink_ext_ack *netlink_extack)
425{
426 struct rvu *rvu = devlink_health_reporter_priv(reporter);
427 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
428 struct rvu_nix_event_ctx *nix_ctx;
429
430 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
431
432 return ctx ? rvu_nix_report_show(fmsg, ctx, health_reporter: NIX_AF_RVU_ERR) :
433 rvu_nix_report_show(fmsg, ctx: nix_ctx, health_reporter: NIX_AF_RVU_ERR);
434}
435
436static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
437 void *ctx, struct netlink_ext_ack *netlink_extack)
438{
439 struct rvu *rvu = devlink_health_reporter_priv(reporter);
440 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
441 int blkaddr;
442
443 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
444 if (blkaddr < 0)
445 return blkaddr;
446
447 if (nix_event_ctx->nix_af_rvu_err)
448 rvu_write64(rvu, block: blkaddr, NIX_AF_ERR_INT_ENA_W1S, val: ~0ULL);
449
450 return 0;
451}
452
453static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
454 struct devlink_fmsg *fmsg, void *ctx,
455 struct netlink_ext_ack *netlink_extack)
456{
457 struct rvu *rvu = devlink_health_reporter_priv(reporter);
458 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
459 struct rvu_nix_event_ctx *nix_ctx;
460
461 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
462
463 return ctx ? rvu_nix_report_show(fmsg, ctx, health_reporter: NIX_AF_RVU_RAS) :
464 rvu_nix_report_show(fmsg, ctx: nix_ctx, health_reporter: NIX_AF_RVU_RAS);
465}
466
467static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
468 void *ctx, struct netlink_ext_ack *netlink_extack)
469{
470 struct rvu *rvu = devlink_health_reporter_priv(reporter);
471 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
472 int blkaddr;
473
474 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc: 0);
475 if (blkaddr < 0)
476 return blkaddr;
477
478 if (nix_event_ctx->nix_af_rvu_int)
479 rvu_write64(rvu, block: blkaddr, NIX_AF_RAS_ENA_W1S, val: ~0ULL);
480
481 return 0;
482}
483
484RVU_REPORTERS(hw_nix_intr);
485RVU_REPORTERS(hw_nix_gen);
486RVU_REPORTERS(hw_nix_err);
487RVU_REPORTERS(hw_nix_ras);
488
489static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
490
491static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
492{
493 struct rvu_nix_health_reporters *rvu_reporters;
494 struct rvu_nix_event_ctx *nix_event_context;
495 struct rvu *rvu = rvu_dl->rvu;
496
497 rvu_reporters = kzalloc(size: sizeof(*rvu_reporters), GFP_KERNEL);
498 if (!rvu_reporters)
499 return -ENOMEM;
500
501 rvu_dl->rvu_nix_health_reporter = rvu_reporters;
502 nix_event_context = kzalloc(size: sizeof(*nix_event_context), GFP_KERNEL);
503 if (!nix_event_context)
504 return -ENOMEM;
505
506 rvu_reporters->nix_event_ctx = nix_event_context;
507 rvu_reporters->rvu_hw_nix_intr_reporter =
508 devlink_health_reporter_create(devlink: rvu_dl->dl, ops: &rvu_hw_nix_intr_reporter_ops, graceful_period: 0, priv: rvu);
509 if (IS_ERR(ptr: rvu_reporters->rvu_hw_nix_intr_reporter)) {
510 dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
511 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
512 return PTR_ERR(ptr: rvu_reporters->rvu_hw_nix_intr_reporter);
513 }
514
515 rvu_reporters->rvu_hw_nix_gen_reporter =
516 devlink_health_reporter_create(devlink: rvu_dl->dl, ops: &rvu_hw_nix_gen_reporter_ops, graceful_period: 0, priv: rvu);
517 if (IS_ERR(ptr: rvu_reporters->rvu_hw_nix_gen_reporter)) {
518 dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
519 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
520 return PTR_ERR(ptr: rvu_reporters->rvu_hw_nix_gen_reporter);
521 }
522
523 rvu_reporters->rvu_hw_nix_err_reporter =
524 devlink_health_reporter_create(devlink: rvu_dl->dl, ops: &rvu_hw_nix_err_reporter_ops, graceful_period: 0, priv: rvu);
525 if (IS_ERR(ptr: rvu_reporters->rvu_hw_nix_err_reporter)) {
526 dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
527 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
528 return PTR_ERR(ptr: rvu_reporters->rvu_hw_nix_err_reporter);
529 }
530
531 rvu_reporters->rvu_hw_nix_ras_reporter =
532 devlink_health_reporter_create(devlink: rvu_dl->dl, ops: &rvu_hw_nix_ras_reporter_ops, graceful_period: 0, priv: rvu);
533 if (IS_ERR(ptr: rvu_reporters->rvu_hw_nix_ras_reporter)) {
534 dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
535 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
536 return PTR_ERR(ptr: rvu_reporters->rvu_hw_nix_ras_reporter);
537 }
538
539 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
540 if (!rvu_dl->devlink_wq)
541 return -ENOMEM;
542
543 INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
544 INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
545 INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
546 INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
547
548 return 0;
549}
550
551static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
552{
553 struct rvu *rvu = rvu_dl->rvu;
554 int err;
555
556 err = rvu_nix_register_reporters(rvu_dl);
557 if (err) {
558 dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
559 err);
560 return err;
561 }
562 rvu_nix_register_interrupts(rvu);
563
564 return 0;
565}
566
567static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
568{
569 struct rvu_nix_health_reporters *nix_reporters;
570 struct rvu *rvu = rvu_dl->rvu;
571
572 nix_reporters = rvu_dl->rvu_nix_health_reporter;
573
574 if (!nix_reporters->rvu_hw_nix_ras_reporter)
575 return;
576 if (!IS_ERR_OR_NULL(ptr: nix_reporters->rvu_hw_nix_intr_reporter))
577 devlink_health_reporter_destroy(reporter: nix_reporters->rvu_hw_nix_intr_reporter);
578
579 if (!IS_ERR_OR_NULL(ptr: nix_reporters->rvu_hw_nix_gen_reporter))
580 devlink_health_reporter_destroy(reporter: nix_reporters->rvu_hw_nix_gen_reporter);
581
582 if (!IS_ERR_OR_NULL(ptr: nix_reporters->rvu_hw_nix_err_reporter))
583 devlink_health_reporter_destroy(reporter: nix_reporters->rvu_hw_nix_err_reporter);
584
585 if (!IS_ERR_OR_NULL(ptr: nix_reporters->rvu_hw_nix_ras_reporter))
586 devlink_health_reporter_destroy(reporter: nix_reporters->rvu_hw_nix_ras_reporter);
587
588 rvu_nix_unregister_interrupts(rvu);
589 kfree(objp: rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
590 kfree(objp: rvu_dl->rvu_nix_health_reporter);
591}
592
593static void rvu_npa_intr_work(struct work_struct *work)
594{
595 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
596
597 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
598 devlink_health_report(reporter: rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
599 msg: "NPA_AF_RVU Error",
600 priv_ctx: rvu_npa_health_reporter->npa_event_ctx);
601}
602
603static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
604{
605 struct rvu_npa_event_ctx *npa_event_context;
606 struct rvu_devlink *rvu_dl = rvu_irq;
607 struct rvu *rvu;
608 int blkaddr;
609 u64 intr;
610
611 rvu = rvu_dl->rvu;
612 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPA, pcifunc: 0);
613 if (blkaddr < 0)
614 return IRQ_NONE;
615
616 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
617 intr = rvu_read64(rvu, block: blkaddr, NPA_AF_RVU_INT);
618 npa_event_context->npa_af_rvu_int = intr;
619
620 /* Clear interrupts */
621 rvu_write64(rvu, block: blkaddr, NPA_AF_RVU_INT, val: intr);
622 rvu_write64(rvu, block: blkaddr, NPA_AF_RVU_INT_ENA_W1C, val: ~0ULL);
623 queue_work(wq: rvu_dl->devlink_wq, work: &rvu_dl->rvu_npa_health_reporter->intr_work);
624
625 return IRQ_HANDLED;
626}
627
628static void rvu_npa_gen_work(struct work_struct *work)
629{
630 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
631
632 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
633 devlink_health_report(reporter: rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
634 msg: "NPA_AF_GEN Error",
635 priv_ctx: rvu_npa_health_reporter->npa_event_ctx);
636}
637
638static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
639{
640 struct rvu_npa_event_ctx *npa_event_context;
641 struct rvu_devlink *rvu_dl = rvu_irq;
642 struct rvu *rvu;
643 int blkaddr;
644 u64 intr;
645
646 rvu = rvu_dl->rvu;
647 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPA, pcifunc: 0);
648 if (blkaddr < 0)
649 return IRQ_NONE;
650
651 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
652 intr = rvu_read64(rvu, block: blkaddr, NPA_AF_GEN_INT);
653 npa_event_context->npa_af_rvu_gen = intr;
654
655 /* Clear interrupts */
656 rvu_write64(rvu, block: blkaddr, NPA_AF_GEN_INT, val: intr);
657 rvu_write64(rvu, block: blkaddr, NPA_AF_GEN_INT_ENA_W1C, val: ~0ULL);
658 queue_work(wq: rvu_dl->devlink_wq, work: &rvu_dl->rvu_npa_health_reporter->gen_work);
659
660 return IRQ_HANDLED;
661}
662
663static void rvu_npa_err_work(struct work_struct *work)
664{
665 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
666
667 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
668 devlink_health_report(reporter: rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
669 msg: "NPA_AF_ERR Error",
670 priv_ctx: rvu_npa_health_reporter->npa_event_ctx);
671}
672
673static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
674{
675 struct rvu_npa_event_ctx *npa_event_context;
676 struct rvu_devlink *rvu_dl = rvu_irq;
677 struct rvu *rvu;
678 int blkaddr;
679 u64 intr;
680
681 rvu = rvu_dl->rvu;
682 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPA, pcifunc: 0);
683 if (blkaddr < 0)
684 return IRQ_NONE;
685 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
686 intr = rvu_read64(rvu, block: blkaddr, NPA_AF_ERR_INT);
687 npa_event_context->npa_af_rvu_err = intr;
688
689 /* Clear interrupts */
690 rvu_write64(rvu, block: blkaddr, NPA_AF_ERR_INT, val: intr);
691 rvu_write64(rvu, block: blkaddr, NPA_AF_ERR_INT_ENA_W1C, val: ~0ULL);
692 queue_work(wq: rvu_dl->devlink_wq, work: &rvu_dl->rvu_npa_health_reporter->err_work);
693
694 return IRQ_HANDLED;
695}
696
697static void rvu_npa_ras_work(struct work_struct *work)
698{
699 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
700
701 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
702 devlink_health_report(reporter: rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
703 msg: "HW NPA_AF_RAS Error reported",
704 priv_ctx: rvu_npa_health_reporter->npa_event_ctx);
705}
706
707static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
708{
709 struct rvu_npa_event_ctx *npa_event_context;
710 struct rvu_devlink *rvu_dl = rvu_irq;
711 struct rvu *rvu;
712 int blkaddr;
713 u64 intr;
714
715 rvu = rvu_dl->rvu;
716 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPA, pcifunc: 0);
717 if (blkaddr < 0)
718 return IRQ_NONE;
719
720 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
721 intr = rvu_read64(rvu, block: blkaddr, NPA_AF_RAS);
722 npa_event_context->npa_af_rvu_ras = intr;
723
724 /* Clear interrupts */
725 rvu_write64(rvu, block: blkaddr, NPA_AF_RAS, val: intr);
726 rvu_write64(rvu, block: blkaddr, NPA_AF_RAS_ENA_W1C, val: ~0ULL);
727 queue_work(wq: rvu_dl->devlink_wq, work: &rvu_dl->rvu_npa_health_reporter->ras_work);
728
729 return IRQ_HANDLED;
730}
731
732static void rvu_npa_unregister_interrupts(struct rvu *rvu)
733{
734 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
735 int i, offs, blkaddr;
736 u64 reg;
737
738 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPA, pcifunc: 0);
739 if (blkaddr < 0)
740 return;
741
742 reg = rvu_read64(rvu, block: blkaddr, NPA_PRIV_AF_INT_CFG);
743 offs = reg & 0x3FF;
744
745 rvu_write64(rvu, block: blkaddr, NPA_AF_RVU_INT_ENA_W1C, val: ~0ULL);
746 rvu_write64(rvu, block: blkaddr, NPA_AF_GEN_INT_ENA_W1C, val: ~0ULL);
747 rvu_write64(rvu, block: blkaddr, NPA_AF_ERR_INT_ENA_W1C, val: ~0ULL);
748 rvu_write64(rvu, block: blkaddr, NPA_AF_RAS_ENA_W1C, val: ~0ULL);
749
750 for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
751 if (rvu->irq_allocated[offs + i]) {
752 free_irq(pci_irq_vector(dev: rvu->pdev, nr: offs + i), rvu_dl);
753 rvu->irq_allocated[offs + i] = false;
754 }
755}
756
757static int rvu_npa_register_interrupts(struct rvu *rvu)
758{
759 int blkaddr, base;
760 bool rc;
761
762 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPA, pcifunc: 0);
763 if (blkaddr < 0)
764 return blkaddr;
765
766 /* Get NPA AF MSIX vectors offset. */
767 base = rvu_read64(rvu, block: blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
768 if (!base) {
769 dev_warn(rvu->dev,
770 "Failed to get NPA_AF_INT vector offsets\n");
771 return 0;
772 }
773
774 /* Register and enable NPA_AF_RVU_INT interrupt */
775 rc = rvu_common_request_irq(rvu, offset: base + NPA_AF_INT_VEC_RVU,
776 name: "NPA_AF_RVU_INT",
777 fn: rvu_npa_af_rvu_intr_handler);
778 if (!rc)
779 goto err;
780 rvu_write64(rvu, block: blkaddr, NPA_AF_RVU_INT_ENA_W1S, val: ~0ULL);
781
782 /* Register and enable NPA_AF_GEN_INT interrupt */
783 rc = rvu_common_request_irq(rvu, offset: base + NPA_AF_INT_VEC_GEN,
784 name: "NPA_AF_RVU_GEN",
785 fn: rvu_npa_af_gen_intr_handler);
786 if (!rc)
787 goto err;
788 rvu_write64(rvu, block: blkaddr, NPA_AF_GEN_INT_ENA_W1S, val: ~0ULL);
789
790 /* Register and enable NPA_AF_ERR_INT interrupt */
791 rc = rvu_common_request_irq(rvu, offset: base + NPA_AF_INT_VEC_AF_ERR,
792 name: "NPA_AF_ERR_INT",
793 fn: rvu_npa_af_err_intr_handler);
794 if (!rc)
795 goto err;
796 rvu_write64(rvu, block: blkaddr, NPA_AF_ERR_INT_ENA_W1S, val: ~0ULL);
797
798 /* Register and enable NPA_AF_RAS interrupt */
799 rc = rvu_common_request_irq(rvu, offset: base + NPA_AF_INT_VEC_POISON,
800 name: "NPA_AF_RAS",
801 fn: rvu_npa_af_ras_intr_handler);
802 if (!rc)
803 goto err;
804 rvu_write64(rvu, block: blkaddr, NPA_AF_RAS_ENA_W1S, val: ~0ULL);
805
806 return 0;
807err:
808 rvu_npa_unregister_interrupts(rvu);
809 return rc;
810}
811
812static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
813 enum npa_af_rvu_health health_reporter)
814{
815 struct rvu_npa_event_ctx *npa_event_context;
816 unsigned int alloc_dis, free_dis;
817 u64 intr_val;
818
819 npa_event_context = ctx;
820 switch (health_reporter) {
821 case NPA_AF_RVU_GEN:
822 intr_val = npa_event_context->npa_af_rvu_gen;
823 rvu_report_pair_start(fmsg, name: "NPA_AF_GENERAL");
824 devlink_fmsg_u64_pair_put(fmsg, name: "\tNPA General Interrupt Reg ",
825 value: npa_event_context->npa_af_rvu_gen);
826 if (intr_val & BIT_ULL(32))
827 devlink_fmsg_string_put(fmsg, value: "\n\tUnmap PF Error");
828
829 free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
830 if (free_dis & BIT(NPA_INPQ_NIX0_RX))
831 devlink_fmsg_string_put(fmsg, value: "\n\tNIX0: free disabled RX");
832 if (free_dis & BIT(NPA_INPQ_NIX0_TX))
833 devlink_fmsg_string_put(fmsg, value: "\n\tNIX0:free disabled TX");
834 if (free_dis & BIT(NPA_INPQ_NIX1_RX))
835 devlink_fmsg_string_put(fmsg, value: "\n\tNIX1: free disabled RX");
836 if (free_dis & BIT(NPA_INPQ_NIX1_TX))
837 devlink_fmsg_string_put(fmsg, value: "\n\tNIX1:free disabled TX");
838 if (free_dis & BIT(NPA_INPQ_SSO))
839 devlink_fmsg_string_put(fmsg, value: "\n\tFree Disabled for SSO");
840 if (free_dis & BIT(NPA_INPQ_TIM))
841 devlink_fmsg_string_put(fmsg, value: "\n\tFree Disabled for TIM");
842 if (free_dis & BIT(NPA_INPQ_DPI))
843 devlink_fmsg_string_put(fmsg, value: "\n\tFree Disabled for DPI");
844 if (free_dis & BIT(NPA_INPQ_AURA_OP))
845 devlink_fmsg_string_put(fmsg, value: "\n\tFree Disabled for AURA");
846
847 alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
848 if (alloc_dis & BIT(NPA_INPQ_NIX0_RX))
849 devlink_fmsg_string_put(fmsg, value: "\n\tNIX0: alloc disabled RX");
850 if (alloc_dis & BIT(NPA_INPQ_NIX0_TX))
851 devlink_fmsg_string_put(fmsg, value: "\n\tNIX0:alloc disabled TX");
852 if (alloc_dis & BIT(NPA_INPQ_NIX1_RX))
853 devlink_fmsg_string_put(fmsg, value: "\n\tNIX1: alloc disabled RX");
854 if (alloc_dis & BIT(NPA_INPQ_NIX1_TX))
855 devlink_fmsg_string_put(fmsg, value: "\n\tNIX1:alloc disabled TX");
856 if (alloc_dis & BIT(NPA_INPQ_SSO))
857 devlink_fmsg_string_put(fmsg, value: "\n\tAlloc Disabled for SSO");
858 if (alloc_dis & BIT(NPA_INPQ_TIM))
859 devlink_fmsg_string_put(fmsg, value: "\n\tAlloc Disabled for TIM");
860 if (alloc_dis & BIT(NPA_INPQ_DPI))
861 devlink_fmsg_string_put(fmsg, value: "\n\tAlloc Disabled for DPI");
862 if (alloc_dis & BIT(NPA_INPQ_AURA_OP))
863 devlink_fmsg_string_put(fmsg, value: "\n\tAlloc Disabled for AURA");
864
865 rvu_report_pair_end(fmsg);
866 break;
867 case NPA_AF_RVU_ERR:
868 rvu_report_pair_start(fmsg, name: "NPA_AF_ERR");
869 devlink_fmsg_u64_pair_put(fmsg, name: "\tNPA Error Interrupt Reg ",
870 value: npa_event_context->npa_af_rvu_err);
871 if (npa_event_context->npa_af_rvu_err & BIT_ULL(14))
872 devlink_fmsg_string_put(fmsg, value: "\n\tFault on NPA_AQ_INST_S read");
873 if (npa_event_context->npa_af_rvu_err & BIT_ULL(13))
874 devlink_fmsg_string_put(fmsg, value: "\n\tFault on NPA_AQ_RES_S write");
875 if (npa_event_context->npa_af_rvu_err & BIT_ULL(12))
876 devlink_fmsg_string_put(fmsg, value: "\n\tAQ Doorbell Error");
877 rvu_report_pair_end(fmsg);
878 break;
879 case NPA_AF_RVU_RAS:
880 rvu_report_pair_start(fmsg, name: "NPA_AF_RVU_RAS");
881 devlink_fmsg_u64_pair_put(fmsg, name: "\tNPA RAS Interrupt Reg ",
882 value: npa_event_context->npa_af_rvu_ras);
883 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34))
884 devlink_fmsg_string_put(fmsg, value: "\n\tPoison data on NPA_AQ_INST_S");
885 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33))
886 devlink_fmsg_string_put(fmsg, value: "\n\tPoison data on NPA_AQ_RES_S");
887 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32))
888 devlink_fmsg_string_put(fmsg, value: "\n\tPoison data on HW context");
889 rvu_report_pair_end(fmsg);
890 break;
891 case NPA_AF_RVU_INTR:
892 rvu_report_pair_start(fmsg, name: "NPA_AF_RVU");
893 devlink_fmsg_u64_pair_put(fmsg, name: "\tNPA RVU Interrupt Reg ",
894 value: npa_event_context->npa_af_rvu_int);
895 if (npa_event_context->npa_af_rvu_int & BIT_ULL(0))
896 devlink_fmsg_string_put(fmsg, value: "\n\tUnmap Slot Error");
897 rvu_report_pair_end(fmsg);
898 break;
899 default:
900 return -EINVAL;
901 }
902
903 return 0;
904}
905
906static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
907 struct devlink_fmsg *fmsg, void *ctx,
908 struct netlink_ext_ack *netlink_extack)
909{
910 struct rvu *rvu = devlink_health_reporter_priv(reporter);
911 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
912 struct rvu_npa_event_ctx *npa_ctx;
913
914 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
915
916 return ctx ? rvu_npa_report_show(fmsg, ctx, health_reporter: NPA_AF_RVU_INTR) :
917 rvu_npa_report_show(fmsg, ctx: npa_ctx, health_reporter: NPA_AF_RVU_INTR);
918}
919
920static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
921 void *ctx, struct netlink_ext_ack *netlink_extack)
922{
923 struct rvu *rvu = devlink_health_reporter_priv(reporter);
924 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
925 int blkaddr;
926
927 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPA, pcifunc: 0);
928 if (blkaddr < 0)
929 return blkaddr;
930
931 if (npa_event_ctx->npa_af_rvu_int)
932 rvu_write64(rvu, block: blkaddr, NPA_AF_RVU_INT_ENA_W1S, val: ~0ULL);
933
934 return 0;
935}
936
937static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
938 struct devlink_fmsg *fmsg, void *ctx,
939 struct netlink_ext_ack *netlink_extack)
940{
941 struct rvu *rvu = devlink_health_reporter_priv(reporter);
942 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
943 struct rvu_npa_event_ctx *npa_ctx;
944
945 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
946
947 return ctx ? rvu_npa_report_show(fmsg, ctx, health_reporter: NPA_AF_RVU_GEN) :
948 rvu_npa_report_show(fmsg, ctx: npa_ctx, health_reporter: NPA_AF_RVU_GEN);
949}
950
951static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
952 void *ctx, struct netlink_ext_ack *netlink_extack)
953{
954 struct rvu *rvu = devlink_health_reporter_priv(reporter);
955 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
956 int blkaddr;
957
958 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPA, pcifunc: 0);
959 if (blkaddr < 0)
960 return blkaddr;
961
962 if (npa_event_ctx->npa_af_rvu_gen)
963 rvu_write64(rvu, block: blkaddr, NPA_AF_GEN_INT_ENA_W1S, val: ~0ULL);
964
965 return 0;
966}
967
968static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
969 struct devlink_fmsg *fmsg, void *ctx,
970 struct netlink_ext_ack *netlink_extack)
971{
972 struct rvu *rvu = devlink_health_reporter_priv(reporter);
973 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
974 struct rvu_npa_event_ctx *npa_ctx;
975
976 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
977
978 return ctx ? rvu_npa_report_show(fmsg, ctx, health_reporter: NPA_AF_RVU_ERR) :
979 rvu_npa_report_show(fmsg, ctx: npa_ctx, health_reporter: NPA_AF_RVU_ERR);
980}
981
982static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
983 void *ctx, struct netlink_ext_ack *netlink_extack)
984{
985 struct rvu *rvu = devlink_health_reporter_priv(reporter);
986 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
987 int blkaddr;
988
989 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPA, pcifunc: 0);
990 if (blkaddr < 0)
991 return blkaddr;
992
993 if (npa_event_ctx->npa_af_rvu_err)
994 rvu_write64(rvu, block: blkaddr, NPA_AF_ERR_INT_ENA_W1S, val: ~0ULL);
995
996 return 0;
997}
998
999static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1000 struct devlink_fmsg *fmsg, void *ctx,
1001 struct netlink_ext_ack *netlink_extack)
1002{
1003 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1004 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1005 struct rvu_npa_event_ctx *npa_ctx;
1006
1007 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1008
1009 return ctx ? rvu_npa_report_show(fmsg, ctx, health_reporter: NPA_AF_RVU_RAS) :
1010 rvu_npa_report_show(fmsg, ctx: npa_ctx, health_reporter: NPA_AF_RVU_RAS);
1011}
1012
1013static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1014 void *ctx, struct netlink_ext_ack *netlink_extack)
1015{
1016 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1017 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1018 int blkaddr;
1019
1020 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPA, pcifunc: 0);
1021 if (blkaddr < 0)
1022 return blkaddr;
1023
1024 if (npa_event_ctx->npa_af_rvu_ras)
1025 rvu_write64(rvu, block: blkaddr, NPA_AF_RAS_ENA_W1S, val: ~0ULL);
1026
1027 return 0;
1028}
1029
1030RVU_REPORTERS(hw_npa_intr);
1031RVU_REPORTERS(hw_npa_gen);
1032RVU_REPORTERS(hw_npa_err);
1033RVU_REPORTERS(hw_npa_ras);
1034
1035static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1036
1037static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1038{
1039 struct rvu_npa_health_reporters *rvu_reporters;
1040 struct rvu_npa_event_ctx *npa_event_context;
1041 struct rvu *rvu = rvu_dl->rvu;
1042
1043 rvu_reporters = kzalloc(size: sizeof(*rvu_reporters), GFP_KERNEL);
1044 if (!rvu_reporters)
1045 return -ENOMEM;
1046
1047 rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1048 npa_event_context = kzalloc(size: sizeof(*npa_event_context), GFP_KERNEL);
1049 if (!npa_event_context)
1050 return -ENOMEM;
1051
1052 rvu_reporters->npa_event_ctx = npa_event_context;
1053 rvu_reporters->rvu_hw_npa_intr_reporter =
1054 devlink_health_reporter_create(devlink: rvu_dl->dl, ops: &rvu_hw_npa_intr_reporter_ops, graceful_period: 0, priv: rvu);
1055 if (IS_ERR(ptr: rvu_reporters->rvu_hw_npa_intr_reporter)) {
1056 dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1057 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1058 return PTR_ERR(ptr: rvu_reporters->rvu_hw_npa_intr_reporter);
1059 }
1060
1061 rvu_reporters->rvu_hw_npa_gen_reporter =
1062 devlink_health_reporter_create(devlink: rvu_dl->dl, ops: &rvu_hw_npa_gen_reporter_ops, graceful_period: 0, priv: rvu);
1063 if (IS_ERR(ptr: rvu_reporters->rvu_hw_npa_gen_reporter)) {
1064 dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1065 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1066 return PTR_ERR(ptr: rvu_reporters->rvu_hw_npa_gen_reporter);
1067 }
1068
1069 rvu_reporters->rvu_hw_npa_err_reporter =
1070 devlink_health_reporter_create(devlink: rvu_dl->dl, ops: &rvu_hw_npa_err_reporter_ops, graceful_period: 0, priv: rvu);
1071 if (IS_ERR(ptr: rvu_reporters->rvu_hw_npa_err_reporter)) {
1072 dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1073 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1074 return PTR_ERR(ptr: rvu_reporters->rvu_hw_npa_err_reporter);
1075 }
1076
1077 rvu_reporters->rvu_hw_npa_ras_reporter =
1078 devlink_health_reporter_create(devlink: rvu_dl->dl, ops: &rvu_hw_npa_ras_reporter_ops, graceful_period: 0, priv: rvu);
1079 if (IS_ERR(ptr: rvu_reporters->rvu_hw_npa_ras_reporter)) {
1080 dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1081 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1082 return PTR_ERR(ptr: rvu_reporters->rvu_hw_npa_ras_reporter);
1083 }
1084
1085 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1086 if (!rvu_dl->devlink_wq)
1087 return -ENOMEM;
1088
1089 INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1090 INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1091 INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1092 INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1093
1094 return 0;
1095}
1096
1097static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1098{
1099 struct rvu *rvu = rvu_dl->rvu;
1100 int err;
1101
1102 err = rvu_npa_register_reporters(rvu_dl);
1103 if (err) {
1104 dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1105 err);
1106 return err;
1107 }
1108 rvu_npa_register_interrupts(rvu);
1109
1110 return 0;
1111}
1112
1113static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1114{
1115 struct rvu_npa_health_reporters *npa_reporters;
1116 struct rvu *rvu = rvu_dl->rvu;
1117
1118 npa_reporters = rvu_dl->rvu_npa_health_reporter;
1119
1120 if (!npa_reporters->rvu_hw_npa_ras_reporter)
1121 return;
1122 if (!IS_ERR_OR_NULL(ptr: npa_reporters->rvu_hw_npa_intr_reporter))
1123 devlink_health_reporter_destroy(reporter: npa_reporters->rvu_hw_npa_intr_reporter);
1124
1125 if (!IS_ERR_OR_NULL(ptr: npa_reporters->rvu_hw_npa_gen_reporter))
1126 devlink_health_reporter_destroy(reporter: npa_reporters->rvu_hw_npa_gen_reporter);
1127
1128 if (!IS_ERR_OR_NULL(ptr: npa_reporters->rvu_hw_npa_err_reporter))
1129 devlink_health_reporter_destroy(reporter: npa_reporters->rvu_hw_npa_err_reporter);
1130
1131 if (!IS_ERR_OR_NULL(ptr: npa_reporters->rvu_hw_npa_ras_reporter))
1132 devlink_health_reporter_destroy(reporter: npa_reporters->rvu_hw_npa_ras_reporter);
1133
1134 rvu_npa_unregister_interrupts(rvu);
1135 kfree(objp: rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1136 kfree(objp: rvu_dl->rvu_npa_health_reporter);
1137}
1138
1139static int rvu_health_reporters_create(struct rvu *rvu)
1140{
1141 struct rvu_devlink *rvu_dl;
1142 int err;
1143
1144 rvu_dl = rvu->rvu_dl;
1145 err = rvu_npa_health_reporters_create(rvu_dl);
1146 if (err)
1147 return err;
1148
1149 return rvu_nix_health_reporters_create(rvu_dl);
1150}
1151
1152static void rvu_health_reporters_destroy(struct rvu *rvu)
1153{
1154 struct rvu_devlink *rvu_dl;
1155
1156 if (!rvu->rvu_dl)
1157 return;
1158
1159 rvu_dl = rvu->rvu_dl;
1160 rvu_npa_health_reporters_destroy(rvu_dl);
1161 rvu_nix_health_reporters_destroy(rvu_dl);
1162}
1163
1164/* Devlink Params APIs */
1165static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1166 union devlink_param_value val,
1167 struct netlink_ext_ack *extack)
1168{
1169 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1170 struct rvu *rvu = rvu_dl->rvu;
1171 int dwrr_mtu = val.vu32;
1172 struct nix_txsch *txsch;
1173 struct nix_hw *nix_hw;
1174
1175 if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1176 NL_SET_ERR_MSG_MOD(extack,
1177 "Setting DWRR_MTU is not supported on this silicon");
1178 return -EOPNOTSUPP;
1179 }
1180
1181 if ((dwrr_mtu > 65536 || !is_power_of_2(n: dwrr_mtu)) &&
1182 (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1183 NL_SET_ERR_MSG_MOD(extack,
1184 "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1185 return -EINVAL;
1186 }
1187
1188 nix_hw = get_nix_hw(hw: rvu->hw, blkaddr: BLKADDR_NIX0);
1189 if (!nix_hw)
1190 return -ENODEV;
1191
1192 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1193 if (rvu_rsrc_free_count(rsrc: &txsch->schq) != txsch->schq.max) {
1194 NL_SET_ERR_MSG_MOD(extack,
1195 "Changing DWRR MTU is not supported when there are active NIXLFs");
1196 NL_SET_ERR_MSG_MOD(extack,
1197 "Make sure none of the PF/VF interfaces are initialized and retry");
1198 return -EOPNOTSUPP;
1199 }
1200
1201 return 0;
1202}
1203
1204static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1205 struct devlink_param_gset_ctx *ctx)
1206{
1207 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1208 struct rvu *rvu = rvu_dl->rvu;
1209 u64 dwrr_mtu;
1210
1211 dwrr_mtu = convert_bytes_to_dwrr_mtu(bytes: ctx->val.vu32);
1212 rvu_write64(rvu, block: BLKADDR_NIX0,
1213 offset: nix_get_dwrr_mtu_reg(hw: rvu->hw, SMQ_LINK_TYPE_RPM), val: dwrr_mtu);
1214
1215 return 0;
1216}
1217
1218static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1219 struct devlink_param_gset_ctx *ctx)
1220{
1221 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1222 struct rvu *rvu = rvu_dl->rvu;
1223 u64 dwrr_mtu;
1224
1225 if (!rvu->hw->cap.nix_common_dwrr_mtu)
1226 return -EOPNOTSUPP;
1227
1228 dwrr_mtu = rvu_read64(rvu, block: BLKADDR_NIX0,
1229 offset: nix_get_dwrr_mtu_reg(hw: rvu->hw, SMQ_LINK_TYPE_RPM));
1230 ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1231
1232 return 0;
1233}
1234
1235enum rvu_af_dl_param_id {
1236 RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1237 RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1238 RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1239 RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1240 RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1241};
1242
1243static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
1244 struct devlink_param_gset_ctx *ctx)
1245{
1246 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1247 struct rvu *rvu = rvu_dl->rvu;
1248 bool enabled;
1249
1250 enabled = rvu_npc_exact_has_match_table(rvu);
1251
1252 snprintf(buf: ctx->val.vstr, size: sizeof(ctx->val.vstr), fmt: "%s",
1253 enabled ? "enabled" : "disabled");
1254
1255 return 0;
1256}
1257
1258static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
1259 struct devlink_param_gset_ctx *ctx)
1260{
1261 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1262 struct rvu *rvu = rvu_dl->rvu;
1263
1264 rvu_npc_exact_disable_feature(rvu);
1265
1266 return 0;
1267}
1268
1269static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
1270 union devlink_param_value val,
1271 struct netlink_ext_ack *extack)
1272{
1273 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1274 struct rvu *rvu = rvu_dl->rvu;
1275 u64 enable;
1276
1277 if (kstrtoull(s: val.vstr, base: 10, res: &enable)) {
1278 NL_SET_ERR_MSG_MOD(extack,
1279 "Only 1 value is supported");
1280 return -EINVAL;
1281 }
1282
1283 if (enable != 1) {
1284 NL_SET_ERR_MSG_MOD(extack,
1285 "Only disabling exact match feature is supported");
1286 return -EINVAL;
1287 }
1288
1289 if (rvu_npc_exact_can_disable_feature(rvu))
1290 return 0;
1291
1292 NL_SET_ERR_MSG_MOD(extack,
1293 "Can't disable exact match feature; Please try before any configuration");
1294 return -EFAULT;
1295}
1296
1297static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
1298 struct devlink_param_gset_ctx *ctx)
1299{
1300 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1301 struct rvu *rvu = rvu_dl->rvu;
1302 struct npc_mcam *mcam;
1303 u32 percent;
1304
1305 mcam = &rvu->hw->mcam;
1306 percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
1307 ctx->val.vu8 = (u8)percent;
1308
1309 return 0;
1310}
1311
1312static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
1313 struct devlink_param_gset_ctx *ctx)
1314{
1315 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1316 struct rvu *rvu = rvu_dl->rvu;
1317 struct npc_mcam *mcam;
1318 u32 percent;
1319
1320 percent = ctx->val.vu8;
1321 mcam = &rvu->hw->mcam;
1322 mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
1323 mcam->hprio_end = mcam->hprio_count;
1324 mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
1325 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
1326
1327 return 0;
1328}
1329
1330static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
1331 union devlink_param_value val,
1332 struct netlink_ext_ack *extack)
1333{
1334 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1335 struct rvu *rvu = rvu_dl->rvu;
1336 struct npc_mcam *mcam;
1337
1338 /* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1339 if (val.vu8 < 12 || val.vu8 > 100) {
1340 NL_SET_ERR_MSG_MOD(extack,
1341 "mcam high zone percent must be between 12% to 100%");
1342 return -EINVAL;
1343 }
1344
1345 /* Do not allow user to modify the high priority zone entries while mcam entries
1346 * have already been assigned.
1347 */
1348 mcam = &rvu->hw->mcam;
1349 if (mcam->bmap_fcnt < mcam->bmap_entries) {
1350 NL_SET_ERR_MSG_MOD(extack,
1351 "mcam entries have already been assigned, can't resize");
1352 return -EPERM;
1353 }
1354
1355 return 0;
1356}
1357
1358static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
1359 struct devlink_param_gset_ctx *ctx)
1360{
1361 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1362 struct rvu *rvu = rvu_dl->rvu;
1363
1364 ctx->val.vu16 = (u16)rvu_get_nixlf_count(rvu);
1365
1366 return 0;
1367}
1368
1369static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
1370 struct devlink_param_gset_ctx *ctx)
1371{
1372 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1373 struct rvu *rvu = rvu_dl->rvu;
1374 struct rvu_block *block;
1375 int blkaddr = 0;
1376
1377 npc_mcam_rsrcs_deinit(rvu);
1378 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1379 while (blkaddr) {
1380 block = &rvu->hw->block[blkaddr];
1381 block->lf.max = ctx->val.vu16;
1382 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1383 }
1384
1385 blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NPC, pcifunc: 0);
1386 npc_mcam_rsrcs_init(rvu, blkaddr);
1387
1388 return 0;
1389}
1390
1391static int rvu_af_dl_nix_maxlf_validate(struct devlink *devlink, u32 id,
1392 union devlink_param_value val,
1393 struct netlink_ext_ack *extack)
1394{
1395 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1396 struct rvu *rvu = rvu_dl->rvu;
1397 u16 max_nix0_lf, max_nix1_lf;
1398 struct npc_mcam *mcam;
1399 u64 cfg;
1400
1401 cfg = rvu_read64(rvu, block: BLKADDR_NIX0, NIX_AF_CONST2);
1402 max_nix0_lf = cfg & 0xFFF;
1403 cfg = rvu_read64(rvu, block: BLKADDR_NIX1, NIX_AF_CONST2);
1404 max_nix1_lf = cfg & 0xFFF;
1405
1406 /* Do not allow user to modify maximum NIX LFs while mcam entries
1407 * have already been assigned.
1408 */
1409 mcam = &rvu->hw->mcam;
1410 if (mcam->bmap_fcnt < mcam->bmap_entries) {
1411 NL_SET_ERR_MSG_MOD(extack,
1412 "mcam entries have already been assigned, can't resize");
1413 return -EPERM;
1414 }
1415
1416 if (max_nix0_lf && val.vu16 > max_nix0_lf) {
1417 NL_SET_ERR_MSG_MOD(extack,
1418 "requested nixlf is greater than the max supported nix0_lf");
1419 return -EPERM;
1420 }
1421
1422 if (max_nix1_lf && val.vu16 > max_nix1_lf) {
1423 NL_SET_ERR_MSG_MOD(extack,
1424 "requested nixlf is greater than the max supported nix1_lf");
1425 return -EINVAL;
1426 }
1427
1428 return 0;
1429}
1430
1431static const struct devlink_param rvu_af_dl_params[] = {
1432 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1433 "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1434 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1435 rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1436 rvu_af_dl_dwrr_mtu_validate),
1437 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1438 "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
1439 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1440 rvu_af_dl_npc_mcam_high_zone_percent_get,
1441 rvu_af_dl_npc_mcam_high_zone_percent_set,
1442 rvu_af_dl_npc_mcam_high_zone_percent_validate),
1443 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1444 "nix_maxlf", DEVLINK_PARAM_TYPE_U16,
1445 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1446 rvu_af_dl_nix_maxlf_get,
1447 rvu_af_dl_nix_maxlf_set,
1448 rvu_af_dl_nix_maxlf_validate),
1449};
1450
1451static const struct devlink_param rvu_af_dl_param_exact_match[] = {
1452 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1453 "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
1454 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1455 rvu_af_npc_exact_feature_get,
1456 rvu_af_npc_exact_feature_disable,
1457 rvu_af_npc_exact_feature_validate),
1458};
1459
1460/* Devlink switch mode */
1461static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1462{
1463 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1464 struct rvu *rvu = rvu_dl->rvu;
1465 struct rvu_switch *rswitch;
1466
1467 rswitch = &rvu->rswitch;
1468 *mode = rswitch->mode;
1469
1470 return 0;
1471}
1472
1473static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1474 struct netlink_ext_ack *extack)
1475{
1476 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1477 struct rvu *rvu = rvu_dl->rvu;
1478 struct rvu_switch *rswitch;
1479
1480 rswitch = &rvu->rswitch;
1481 switch (mode) {
1482 case DEVLINK_ESWITCH_MODE_LEGACY:
1483 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1484 if (rswitch->mode == mode)
1485 return 0;
1486 rswitch->mode = mode;
1487 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1488 rvu_switch_enable(rvu);
1489 else
1490 rvu_switch_disable(rvu);
1491 break;
1492 default:
1493 return -EINVAL;
1494 }
1495
1496 return 0;
1497}
1498
1499static const struct devlink_ops rvu_devlink_ops = {
1500 .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1501 .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1502};
1503
1504int rvu_register_dl(struct rvu *rvu)
1505{
1506 struct rvu_devlink *rvu_dl;
1507 struct devlink *dl;
1508 int err;
1509
1510 dl = devlink_alloc(ops: &rvu_devlink_ops, priv_size: sizeof(struct rvu_devlink),
1511 dev: rvu->dev);
1512 if (!dl) {
1513 dev_warn(rvu->dev, "devlink_alloc failed\n");
1514 return -ENOMEM;
1515 }
1516
1517 rvu_dl = devlink_priv(devlink: dl);
1518 rvu_dl->dl = dl;
1519 rvu_dl->rvu = rvu;
1520 rvu->rvu_dl = rvu_dl;
1521
1522 err = rvu_health_reporters_create(rvu);
1523 if (err) {
1524 dev_err(rvu->dev,
1525 "devlink health reporter creation failed with error %d\n", err);
1526 goto err_dl_health;
1527 }
1528
1529 err = devlink_params_register(devlink: dl, params: rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1530 if (err) {
1531 dev_err(rvu->dev,
1532 "devlink params register failed with error %d", err);
1533 goto err_dl_health;
1534 }
1535
1536 /* Register exact match devlink only for CN10K-B */
1537 if (!rvu_npc_exact_has_match_table(rvu))
1538 goto done;
1539
1540 err = devlink_params_register(devlink: dl, params: rvu_af_dl_param_exact_match,
1541 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1542 if (err) {
1543 dev_err(rvu->dev,
1544 "devlink exact match params register failed with error %d", err);
1545 goto err_dl_exact_match;
1546 }
1547
1548done:
1549 devlink_register(devlink: dl);
1550 return 0;
1551
1552err_dl_exact_match:
1553 devlink_params_unregister(devlink: dl, params: rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1554
1555err_dl_health:
1556 rvu_health_reporters_destroy(rvu);
1557 devlink_free(devlink: dl);
1558 return err;
1559}
1560
1561void rvu_unregister_dl(struct rvu *rvu)
1562{
1563 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1564 struct devlink *dl = rvu_dl->dl;
1565
1566 devlink_unregister(devlink: dl);
1567
1568 devlink_params_unregister(devlink: dl, params: rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1569
1570 /* Unregister exact match devlink only for CN10K-B */
1571 if (rvu_npc_exact_has_match_table(rvu))
1572 devlink_params_unregister(devlink: dl, params: rvu_af_dl_param_exact_match,
1573 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1574
1575 rvu_health_reporters_destroy(rvu);
1576 devlink_free(devlink: dl);
1577}
1578

source code of linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c