1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Marvell RVU Admin Function driver |
3 | * |
4 | * Copyright (C) 2018 Marvell. |
5 | * |
6 | */ |
7 | |
8 | #include <linux/types.h> |
9 | #include <linux/module.h> |
10 | #include <linux/pci.h> |
11 | |
12 | #include "rvu.h" |
13 | #include "cgx.h" |
14 | #include "lmac_common.h" |
15 | #include "rvu_reg.h" |
16 | #include "rvu_trace.h" |
17 | #include "rvu_npc_hash.h" |
18 | |
19 | struct cgx_evq_entry { |
20 | struct list_head evq_node; |
21 | struct cgx_link_event link_event; |
22 | }; |
23 | |
24 | #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ |
25 | static struct _req_type __maybe_unused \ |
26 | *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ |
27 | { \ |
28 | struct _req_type *req; \ |
29 | \ |
30 | req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ |
31 | &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ |
32 | sizeof(struct _rsp_type)); \ |
33 | if (!req) \ |
34 | return NULL; \ |
35 | req->hdr.sig = OTX2_MBOX_REQ_SIG; \ |
36 | req->hdr.id = _id; \ |
37 | trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \ |
38 | return req; \ |
39 | } |
40 | |
41 | MBOX_UP_CGX_MESSAGES |
42 | #undef M |
43 | |
44 | bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) |
45 | { |
46 | u8 cgx_id, lmac_id; |
47 | void *cgxd; |
48 | |
49 | if (!is_pf_cgxmapped(rvu, pf)) |
50 | return 0; |
51 | |
52 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
53 | cgxd = rvu_cgx_pdata(cgx_id, rvu); |
54 | |
55 | return (cgx_features_get(cgxd) & feature); |
56 | } |
57 | |
58 | #define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx) |
59 | /* Returns bitmap of mapped PFs */ |
60 | static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) |
61 | { |
62 | return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; |
63 | } |
64 | |
65 | int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) |
66 | { |
67 | unsigned long pfmap; |
68 | |
69 | pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id); |
70 | |
71 | /* Assumes only one pf mapped to a cgx lmac port */ |
72 | if (!pfmap) |
73 | return -ENODEV; |
74 | else |
75 | return find_first_bit(addr: &pfmap, |
76 | size: rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); |
77 | } |
78 | |
79 | static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) |
80 | { |
81 | return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF); |
82 | } |
83 | |
84 | void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) |
85 | { |
86 | if (cgx_id >= rvu->cgx_cnt_max) |
87 | return NULL; |
88 | |
89 | return rvu->cgx_idmap[cgx_id]; |
90 | } |
91 | |
92 | /* Return first enabled CGX instance if none are enabled then return NULL */ |
93 | void *rvu_first_cgx_pdata(struct rvu *rvu) |
94 | { |
95 | int first_enabled_cgx = 0; |
96 | void *cgxd = NULL; |
97 | |
98 | for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) { |
99 | cgxd = rvu_cgx_pdata(cgx_id: first_enabled_cgx, rvu); |
100 | if (cgxd) |
101 | break; |
102 | } |
103 | |
104 | return cgxd; |
105 | } |
106 | |
107 | /* Based on P2X connectivity find mapped NIX block for a PF */ |
108 | static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, |
109 | int cgx_id, int lmac_id) |
110 | { |
111 | struct rvu_pfvf *pfvf = &rvu->pf[pf]; |
112 | u8 p2x; |
113 | |
114 | p2x = cgx_lmac_get_p2x(cgx_id, lmac_id); |
115 | /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */ |
116 | pfvf->nix_blkaddr = BLKADDR_NIX0; |
117 | if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1) |
118 | pfvf->nix_blkaddr = BLKADDR_NIX1; |
119 | } |
120 | |
121 | static int rvu_map_cgx_lmac_pf(struct rvu *rvu) |
122 | { |
123 | struct npc_pkind *pkind = &rvu->hw->pkind; |
124 | int cgx_cnt_max = rvu->cgx_cnt_max; |
125 | int pf = PF_CGXMAP_BASE; |
126 | unsigned long lmac_bmap; |
127 | int size, free_pkind; |
128 | int cgx, lmac, iter; |
129 | int numvfs, hwvfs; |
130 | |
131 | if (!cgx_cnt_max) |
132 | return 0; |
133 | |
134 | if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF) |
135 | return -EINVAL; |
136 | |
137 | /* Alloc map table |
138 | * An additional entry is required since PF id starts from 1 and |
139 | * hence entry at offset 0 is invalid. |
140 | */ |
141 | size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8); |
142 | rvu->pf2cgxlmac_map = devm_kmalloc(dev: rvu->dev, size, GFP_KERNEL); |
143 | if (!rvu->pf2cgxlmac_map) |
144 | return -ENOMEM; |
145 | |
146 | /* Initialize all entries with an invalid cgx and lmac id */ |
147 | memset(rvu->pf2cgxlmac_map, 0xFF, size); |
148 | |
149 | /* Reverse map table */ |
150 | rvu->cgxlmac2pf_map = |
151 | devm_kzalloc(dev: rvu->dev, |
152 | size: cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64), |
153 | GFP_KERNEL); |
154 | if (!rvu->cgxlmac2pf_map) |
155 | return -ENOMEM; |
156 | |
157 | rvu->cgx_mapped_pfs = 0; |
158 | for (cgx = 0; cgx < cgx_cnt_max; cgx++) { |
159 | if (!rvu_cgx_pdata(cgx_id: cgx, rvu)) |
160 | continue; |
161 | lmac_bmap = cgx_get_lmac_bmap(cgxd: rvu_cgx_pdata(cgx_id: cgx, rvu)); |
162 | for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { |
163 | if (iter >= MAX_LMAC_COUNT) |
164 | continue; |
165 | lmac = cgx_get_lmacid(cgxd: rvu_cgx_pdata(cgx_id: cgx, rvu), |
166 | lmac_index: iter); |
167 | rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx_id: cgx, lmac_id: lmac); |
168 | rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; |
169 | free_pkind = rvu_alloc_rsrc(rsrc: &pkind->rsrc); |
170 | pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; |
171 | rvu_map_cgx_nix_block(rvu, pf, cgx_id: cgx, lmac_id: lmac); |
172 | rvu->cgx_mapped_pfs++; |
173 | rvu_get_pf_numvfs(rvu, pf, numvfs: &numvfs, hwvf: &hwvfs); |
174 | rvu->cgx_mapped_vfs += numvfs; |
175 | pf++; |
176 | } |
177 | } |
178 | return 0; |
179 | } |
180 | |
181 | static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu) |
182 | { |
183 | struct cgx_evq_entry *qentry; |
184 | unsigned long flags; |
185 | int err; |
186 | |
187 | qentry = kmalloc(size: sizeof(*qentry), GFP_KERNEL); |
188 | if (!qentry) |
189 | return -ENOMEM; |
190 | |
191 | /* Lock the event queue before we read the local link status */ |
192 | spin_lock_irqsave(&rvu->cgx_evq_lock, flags); |
193 | err = cgx_get_link_info(cgxd: rvu_cgx_pdata(cgx_id, rvu), lmac_id, |
194 | linfo: &qentry->link_event.link_uinfo); |
195 | qentry->link_event.cgx_id = cgx_id; |
196 | qentry->link_event.lmac_id = lmac_id; |
197 | if (err) { |
198 | kfree(objp: qentry); |
199 | goto skip_add; |
200 | } |
201 | list_add_tail(new: &qentry->evq_node, head: &rvu->cgx_evq_head); |
202 | skip_add: |
203 | spin_unlock_irqrestore(lock: &rvu->cgx_evq_lock, flags); |
204 | |
205 | /* start worker to process the events */ |
206 | queue_work(wq: rvu->cgx_evh_wq, work: &rvu->cgx_evh_work); |
207 | |
208 | return 0; |
209 | } |
210 | |
211 | /* This is called from interrupt context and is expected to be atomic */ |
212 | static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) |
213 | { |
214 | struct cgx_evq_entry *qentry; |
215 | struct rvu *rvu = data; |
216 | |
217 | /* post event to the event queue */ |
218 | qentry = kmalloc(size: sizeof(*qentry), GFP_ATOMIC); |
219 | if (!qentry) |
220 | return -ENOMEM; |
221 | qentry->link_event = *event; |
222 | spin_lock(lock: &rvu->cgx_evq_lock); |
223 | list_add_tail(new: &qentry->evq_node, head: &rvu->cgx_evq_head); |
224 | spin_unlock(lock: &rvu->cgx_evq_lock); |
225 | |
226 | /* start worker to process the events */ |
227 | queue_work(wq: rvu->cgx_evh_wq, work: &rvu->cgx_evh_work); |
228 | |
229 | return 0; |
230 | } |
231 | |
232 | static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) |
233 | { |
234 | struct cgx_link_user_info *linfo; |
235 | struct cgx_link_info_msg *msg; |
236 | unsigned long pfmap; |
237 | int pfid; |
238 | |
239 | linfo = &event->link_uinfo; |
240 | pfmap = cgxlmac_to_pfmap(rvu, cgx_id: event->cgx_id, lmac_id: event->lmac_id); |
241 | if (!pfmap) { |
242 | dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n" , |
243 | event->cgx_id, event->lmac_id); |
244 | return; |
245 | } |
246 | |
247 | do { |
248 | pfid = find_first_bit(addr: &pfmap, |
249 | size: rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); |
250 | clear_bit(nr: pfid, addr: &pfmap); |
251 | |
252 | /* check if notification is enabled */ |
253 | if (!test_bit(pfid, &rvu->pf_notify_bmap)) { |
254 | dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n" , |
255 | event->cgx_id, event->lmac_id, |
256 | linfo->link_up ? "UP" : "DOWN" ); |
257 | continue; |
258 | } |
259 | |
260 | mutex_lock(&rvu->mbox_lock); |
261 | |
262 | /* Send mbox message to PF */ |
263 | msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, devid: pfid); |
264 | if (!msg) { |
265 | mutex_unlock(lock: &rvu->mbox_lock); |
266 | continue; |
267 | } |
268 | |
269 | msg->link_info = *linfo; |
270 | |
271 | otx2_mbox_wait_for_zero(mbox: &rvu->afpf_wq_info.mbox_up, devid: pfid); |
272 | |
273 | otx2_mbox_msg_send_up(mbox: &rvu->afpf_wq_info.mbox_up, devid: pfid); |
274 | |
275 | mutex_unlock(lock: &rvu->mbox_lock); |
276 | } while (pfmap); |
277 | } |
278 | |
279 | static void cgx_evhandler_task(struct work_struct *work) |
280 | { |
281 | struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); |
282 | struct cgx_evq_entry *qentry; |
283 | struct cgx_link_event *event; |
284 | unsigned long flags; |
285 | |
286 | do { |
287 | /* Dequeue an event */ |
288 | spin_lock_irqsave(&rvu->cgx_evq_lock, flags); |
289 | qentry = list_first_entry_or_null(&rvu->cgx_evq_head, |
290 | struct cgx_evq_entry, |
291 | evq_node); |
292 | if (qentry) |
293 | list_del(entry: &qentry->evq_node); |
294 | spin_unlock_irqrestore(lock: &rvu->cgx_evq_lock, flags); |
295 | if (!qentry) |
296 | break; /* nothing more to process */ |
297 | |
298 | event = &qentry->link_event; |
299 | |
300 | /* process event */ |
301 | cgx_notify_pfs(event, rvu); |
302 | kfree(objp: qentry); |
303 | } while (1); |
304 | } |
305 | |
306 | static int cgx_lmac_event_handler_init(struct rvu *rvu) |
307 | { |
308 | unsigned long lmac_bmap; |
309 | struct cgx_event_cb cb; |
310 | int cgx, lmac, err; |
311 | void *cgxd; |
312 | |
313 | spin_lock_init(&rvu->cgx_evq_lock); |
314 | INIT_LIST_HEAD(list: &rvu->cgx_evq_head); |
315 | INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); |
316 | rvu->cgx_evh_wq = alloc_workqueue(fmt: "rvu_evh_wq" , flags: 0, max_active: 0); |
317 | if (!rvu->cgx_evh_wq) { |
318 | dev_err(rvu->dev, "alloc workqueue failed" ); |
319 | return -ENOMEM; |
320 | } |
321 | |
322 | cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ |
323 | cb.data = rvu; |
324 | |
325 | for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { |
326 | cgxd = rvu_cgx_pdata(cgx_id: cgx, rvu); |
327 | if (!cgxd) |
328 | continue; |
329 | lmac_bmap = cgx_get_lmac_bmap(cgxd); |
330 | for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) { |
331 | err = cgx_lmac_evh_register(cb: &cb, cgxd, lmac_id: lmac); |
332 | if (err) |
333 | dev_err(rvu->dev, |
334 | "%d:%d handler register failed\n" , |
335 | cgx, lmac); |
336 | } |
337 | } |
338 | |
339 | return 0; |
340 | } |
341 | |
342 | static void rvu_cgx_wq_destroy(struct rvu *rvu) |
343 | { |
344 | if (rvu->cgx_evh_wq) { |
345 | destroy_workqueue(wq: rvu->cgx_evh_wq); |
346 | rvu->cgx_evh_wq = NULL; |
347 | } |
348 | } |
349 | |
350 | int rvu_cgx_init(struct rvu *rvu) |
351 | { |
352 | int cgx, err; |
353 | void *cgxd; |
354 | |
355 | /* CGX port id starts from 0 and are not necessarily contiguous |
356 | * Hence we allocate resources based on the maximum port id value. |
357 | */ |
358 | rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); |
359 | if (!rvu->cgx_cnt_max) { |
360 | dev_info(rvu->dev, "No CGX devices found!\n" ); |
361 | return 0; |
362 | } |
363 | |
364 | rvu->cgx_idmap = devm_kzalloc(dev: rvu->dev, size: rvu->cgx_cnt_max * |
365 | sizeof(void *), GFP_KERNEL); |
366 | if (!rvu->cgx_idmap) |
367 | return -ENOMEM; |
368 | |
369 | /* Initialize the cgxdata table */ |
370 | for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) |
371 | rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx_id: cgx); |
372 | |
373 | /* Map CGX LMAC interfaces to RVU PFs */ |
374 | err = rvu_map_cgx_lmac_pf(rvu); |
375 | if (err) |
376 | return err; |
377 | |
378 | /* Register for CGX events */ |
379 | err = cgx_lmac_event_handler_init(rvu); |
380 | if (err) |
381 | return err; |
382 | |
383 | mutex_init(&rvu->cgx_cfg_lock); |
384 | |
385 | /* Ensure event handler registration is completed, before |
386 | * we turn on the links |
387 | */ |
388 | mb(); |
389 | |
390 | /* Do link up for all CGX ports */ |
391 | for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { |
392 | cgxd = rvu_cgx_pdata(cgx_id: cgx, rvu); |
393 | if (!cgxd) |
394 | continue; |
395 | err = cgx_lmac_linkup_start(cgxd); |
396 | if (err) |
397 | dev_err(rvu->dev, |
398 | "Link up process failed to start on cgx %d\n" , |
399 | cgx); |
400 | } |
401 | |
402 | return 0; |
403 | } |
404 | |
405 | int rvu_cgx_exit(struct rvu *rvu) |
406 | { |
407 | unsigned long lmac_bmap; |
408 | int cgx, lmac; |
409 | void *cgxd; |
410 | |
411 | for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { |
412 | cgxd = rvu_cgx_pdata(cgx_id: cgx, rvu); |
413 | if (!cgxd) |
414 | continue; |
415 | lmac_bmap = cgx_get_lmac_bmap(cgxd); |
416 | for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) |
417 | cgx_lmac_evh_unregister(cgxd, lmac_id: lmac); |
418 | } |
419 | |
420 | /* Ensure event handler unregister is completed */ |
421 | mb(); |
422 | |
423 | rvu_cgx_wq_destroy(rvu); |
424 | return 0; |
425 | } |
426 | |
427 | /* Most of the CGX configuration is restricted to the mapped PF only, |
428 | * VF's of mapped PF and other PFs are not allowed. This fn() checks |
429 | * whether a PFFUNC is permitted to do the config or not. |
430 | */ |
431 | inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) |
432 | { |
433 | if ((pcifunc & RVU_PFVF_FUNC_MASK) || |
434 | !is_pf_cgxmapped(rvu, pf: rvu_get_pf(pcifunc))) |
435 | return false; |
436 | return true; |
437 | } |
438 | |
439 | void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) |
440 | { |
441 | struct mac_ops *mac_ops; |
442 | u8 cgx_id, lmac_id; |
443 | void *cgxd; |
444 | |
445 | if (!is_pf_cgxmapped(rvu, pf)) |
446 | return; |
447 | |
448 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
449 | cgxd = rvu_cgx_pdata(cgx_id, rvu); |
450 | |
451 | mac_ops = get_mac_ops(cgxd); |
452 | /* Set / clear CTL_BCK to control pause frame forwarding to NIX */ |
453 | if (enable) |
454 | mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true); |
455 | else |
456 | mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false); |
457 | } |
458 | |
459 | int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) |
460 | { |
461 | int pf = rvu_get_pf(pcifunc); |
462 | struct mac_ops *mac_ops; |
463 | u8 cgx_id, lmac_id; |
464 | void *cgxd; |
465 | |
466 | if (!is_cgx_config_permitted(rvu, pcifunc)) |
467 | return LMAC_AF_ERR_PERM_DENIED; |
468 | |
469 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
470 | cgxd = rvu_cgx_pdata(cgx_id, rvu); |
471 | mac_ops = get_mac_ops(cgxd); |
472 | |
473 | return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start); |
474 | } |
475 | |
476 | int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) |
477 | { |
478 | int pf = rvu_get_pf(pcifunc); |
479 | struct mac_ops *mac_ops; |
480 | u8 cgx_id, lmac_id; |
481 | void *cgxd; |
482 | |
483 | if (!is_cgx_config_permitted(rvu, pcifunc)) |
484 | return LMAC_AF_ERR_PERM_DENIED; |
485 | |
486 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
487 | cgxd = rvu_cgx_pdata(cgx_id, rvu); |
488 | mac_ops = get_mac_ops(cgxd); |
489 | |
490 | return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); |
491 | } |
492 | |
493 | int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) |
494 | { |
495 | struct mac_ops *mac_ops; |
496 | |
497 | mac_ops = get_mac_ops(cgxd); |
498 | return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); |
499 | } |
500 | |
501 | void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) |
502 | { |
503 | int pf = rvu_get_pf(pcifunc); |
504 | int i = 0, lmac_count = 0; |
505 | struct mac_ops *mac_ops; |
506 | u8 max_dmac_filters; |
507 | u8 cgx_id, lmac_id; |
508 | void *cgx_dev; |
509 | |
510 | if (!is_cgx_config_permitted(rvu, pcifunc)) |
511 | return; |
512 | |
513 | if (rvu_npc_exact_has_match_table(rvu)) { |
514 | rvu_npc_exact_reset(rvu, pcifunc); |
515 | return; |
516 | } |
517 | |
518 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
519 | cgx_dev = cgx_get_pdata(cgx_id); |
520 | lmac_count = cgx_get_lmac_cnt(cgxd: cgx_dev); |
521 | |
522 | mac_ops = get_mac_ops(cgxd: cgx_dev); |
523 | if (!mac_ops) |
524 | return; |
525 | |
526 | max_dmac_filters = mac_ops->dmac_filter_count / lmac_count; |
527 | |
528 | for (i = 0; i < max_dmac_filters; i++) |
529 | cgx_lmac_addr_del(cgx_id, lmac_id, index: i); |
530 | |
531 | /* As cgx_lmac_addr_del does not clear entry for index 0 |
532 | * so it needs to be done explicitly |
533 | */ |
534 | cgx_lmac_addr_reset(cgx_id, lmac_id); |
535 | } |
536 | |
537 | int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, |
538 | struct msg_rsp *rsp) |
539 | { |
540 | rvu_cgx_config_rxtx(rvu, pcifunc: req->hdr.pcifunc, start: true); |
541 | return 0; |
542 | } |
543 | |
544 | int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, |
545 | struct msg_rsp *rsp) |
546 | { |
547 | rvu_cgx_config_rxtx(rvu, pcifunc: req->hdr.pcifunc, start: false); |
548 | return 0; |
549 | } |
550 | |
551 | static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, |
552 | void *rsp) |
553 | { |
554 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
555 | struct mac_ops *mac_ops; |
556 | int stat = 0, err = 0; |
557 | u64 tx_stat, rx_stat; |
558 | u8 cgx_idx, lmac; |
559 | void *cgxd; |
560 | |
561 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
562 | return LMAC_AF_ERR_PERM_DENIED; |
563 | |
564 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_idx, lmac_id: &lmac); |
565 | cgxd = rvu_cgx_pdata(cgx_id: cgx_idx, rvu); |
566 | mac_ops = get_mac_ops(cgxd); |
567 | |
568 | /* Rx stats */ |
569 | while (stat < mac_ops->rx_stats_cnt) { |
570 | err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat); |
571 | if (err) |
572 | return err; |
573 | if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT) |
574 | ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; |
575 | else |
576 | ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; |
577 | stat++; |
578 | } |
579 | |
580 | /* Tx stats */ |
581 | stat = 0; |
582 | while (stat < mac_ops->tx_stats_cnt) { |
583 | err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat); |
584 | if (err) |
585 | return err; |
586 | if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT) |
587 | ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; |
588 | else |
589 | ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; |
590 | stat++; |
591 | } |
592 | return 0; |
593 | } |
594 | |
595 | int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, |
596 | struct cgx_stats_rsp *rsp) |
597 | { |
598 | return rvu_lmac_get_stats(rvu, req, rsp: (void *)rsp); |
599 | } |
600 | |
601 | int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, |
602 | struct rpm_stats_rsp *rsp) |
603 | { |
604 | return rvu_lmac_get_stats(rvu, req, rsp: (void *)rsp); |
605 | } |
606 | |
607 | int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, |
608 | struct msg_req *req, |
609 | struct cgx_fec_stats_rsp *rsp) |
610 | { |
611 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
612 | struct mac_ops *mac_ops; |
613 | u8 cgx_idx, lmac; |
614 | void *cgxd; |
615 | |
616 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
617 | return LMAC_AF_ERR_PERM_DENIED; |
618 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_idx, lmac_id: &lmac); |
619 | |
620 | cgxd = rvu_cgx_pdata(cgx_id: cgx_idx, rvu); |
621 | mac_ops = get_mac_ops(cgxd); |
622 | return mac_ops->get_fec_stats(cgxd, lmac, rsp); |
623 | } |
624 | |
625 | int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, |
626 | struct cgx_mac_addr_set_or_get *req, |
627 | struct cgx_mac_addr_set_or_get *rsp) |
628 | { |
629 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
630 | u8 cgx_id, lmac_id; |
631 | |
632 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
633 | return -EPERM; |
634 | |
635 | if (rvu_npc_exact_has_match_table(rvu)) |
636 | return rvu_npc_exact_mac_addr_set(rvu, req, rsp); |
637 | |
638 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
639 | |
640 | cgx_lmac_addr_set(cgx_id, lmac_id, mac_addr: req->mac_addr); |
641 | |
642 | return 0; |
643 | } |
644 | |
645 | int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, |
646 | struct cgx_mac_addr_add_req *req, |
647 | struct cgx_mac_addr_add_rsp *rsp) |
648 | { |
649 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
650 | u8 cgx_id, lmac_id; |
651 | int rc = 0; |
652 | |
653 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
654 | return -EPERM; |
655 | |
656 | if (rvu_npc_exact_has_match_table(rvu)) |
657 | return rvu_npc_exact_mac_addr_add(rvu, req, rsp); |
658 | |
659 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
660 | rc = cgx_lmac_addr_add(cgx_id, lmac_id, mac_addr: req->mac_addr); |
661 | if (rc >= 0) { |
662 | rsp->index = rc; |
663 | return 0; |
664 | } |
665 | |
666 | return rc; |
667 | } |
668 | |
669 | int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, |
670 | struct cgx_mac_addr_del_req *req, |
671 | struct msg_rsp *rsp) |
672 | { |
673 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
674 | u8 cgx_id, lmac_id; |
675 | |
676 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
677 | return -EPERM; |
678 | |
679 | if (rvu_npc_exact_has_match_table(rvu)) |
680 | return rvu_npc_exact_mac_addr_del(rvu, req, rsp); |
681 | |
682 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
683 | return cgx_lmac_addr_del(cgx_id, lmac_id, index: req->index); |
684 | } |
685 | |
686 | int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, |
687 | struct msg_req *req, |
688 | struct cgx_max_dmac_entries_get_rsp |
689 | *rsp) |
690 | { |
691 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
692 | u8 cgx_id, lmac_id; |
693 | |
694 | /* If msg is received from PFs(which are not mapped to CGX LMACs) |
695 | * or VF then no entries are allocated for DMAC filters at CGX level. |
696 | * So returning zero. |
697 | */ |
698 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) { |
699 | rsp->max_dmac_filters = 0; |
700 | return 0; |
701 | } |
702 | |
703 | if (rvu_npc_exact_has_match_table(rvu)) { |
704 | rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu); |
705 | return 0; |
706 | } |
707 | |
708 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
709 | rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id); |
710 | return 0; |
711 | } |
712 | |
713 | int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, |
714 | struct cgx_mac_addr_set_or_get *req, |
715 | struct cgx_mac_addr_set_or_get *rsp) |
716 | { |
717 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
718 | u8 cgx_id, lmac_id; |
719 | int rc = 0; |
720 | u64 cfg; |
721 | |
722 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
723 | return -EPERM; |
724 | |
725 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
726 | |
727 | rsp->hdr.rc = rc; |
728 | cfg = cgx_lmac_addr_get(cgx_id, lmac_id); |
729 | /* copy 48 bit mac address to req->mac_addr */ |
730 | u64_to_ether_addr(u: cfg, addr: rsp->mac_addr); |
731 | return 0; |
732 | } |
733 | |
734 | int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, |
735 | struct msg_rsp *rsp) |
736 | { |
737 | u16 pcifunc = req->hdr.pcifunc; |
738 | int pf = rvu_get_pf(pcifunc); |
739 | u8 cgx_id, lmac_id; |
740 | |
741 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
742 | return -EPERM; |
743 | |
744 | /* Disable drop on non hit rule */ |
745 | if (rvu_npc_exact_has_match_table(rvu)) |
746 | return rvu_npc_exact_promisc_enable(rvu, pcifunc: req->hdr.pcifunc); |
747 | |
748 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
749 | |
750 | cgx_lmac_promisc_config(cgx_id, lmac_id, enable: true); |
751 | return 0; |
752 | } |
753 | |
754 | int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, |
755 | struct msg_rsp *rsp) |
756 | { |
757 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
758 | u8 cgx_id, lmac_id; |
759 | |
760 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
761 | return -EPERM; |
762 | |
763 | /* Disable drop on non hit rule */ |
764 | if (rvu_npc_exact_has_match_table(rvu)) |
765 | return rvu_npc_exact_promisc_disable(rvu, pcifunc: req->hdr.pcifunc); |
766 | |
767 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
768 | |
769 | cgx_lmac_promisc_config(cgx_id, lmac_id, enable: false); |
770 | return 0; |
771 | } |
772 | |
773 | static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) |
774 | { |
775 | struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); |
776 | int pf = rvu_get_pf(pcifunc); |
777 | struct mac_ops *mac_ops; |
778 | u8 cgx_id, lmac_id; |
779 | void *cgxd; |
780 | |
781 | if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) |
782 | return 0; |
783 | |
784 | /* This msg is expected only from PF/VFs that are mapped to CGX/RPM LMACs, |
785 | * if received from other PF/VF simply ACK, nothing to do. |
786 | */ |
787 | if (!is_pf_cgxmapped(rvu, pf)) |
788 | return -EPERM; |
789 | |
790 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
791 | cgxd = rvu_cgx_pdata(cgx_id, rvu); |
792 | |
793 | mac_ops = get_mac_ops(cgxd); |
794 | mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable); |
795 | /* If PTP is enabled then inform NPC that packets to be |
796 | * parsed by this PF will have their data shifted by 8 bytes |
797 | * and if PTP is disabled then no shift is required |
798 | */ |
799 | if (npc_config_ts_kpuaction(rvu, pf, pcifunc, en: enable)) |
800 | return -EINVAL; |
801 | /* This flag is required to clean up CGX conf if app gets killed */ |
802 | pfvf->hw_rx_tstamp_en = enable; |
803 | |
804 | /* Inform MCS about 8B RX header */ |
805 | rvu_mcs_ptp_cfg(rvu, rpm_id: cgx_id, lmac_id, ena: enable); |
806 | return 0; |
807 | } |
808 | |
809 | int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, |
810 | struct msg_rsp *rsp) |
811 | { |
812 | if (!is_pf_cgxmapped(rvu, pf: rvu_get_pf(pcifunc: req->hdr.pcifunc))) |
813 | return -EPERM; |
814 | |
815 | return rvu_cgx_ptp_rx_cfg(rvu, pcifunc: req->hdr.pcifunc, enable: true); |
816 | } |
817 | |
818 | int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, |
819 | struct msg_rsp *rsp) |
820 | { |
821 | return rvu_cgx_ptp_rx_cfg(rvu, pcifunc: req->hdr.pcifunc, enable: false); |
822 | } |
823 | |
824 | static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) |
825 | { |
826 | int pf = rvu_get_pf(pcifunc); |
827 | u8 cgx_id, lmac_id; |
828 | |
829 | if (!is_cgx_config_permitted(rvu, pcifunc)) |
830 | return -EPERM; |
831 | |
832 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
833 | |
834 | if (en) { |
835 | set_bit(nr: pf, addr: &rvu->pf_notify_bmap); |
836 | /* Send the current link status to PF */ |
837 | rvu_cgx_send_link_info(cgx_id, lmac_id, rvu); |
838 | } else { |
839 | clear_bit(nr: pf, addr: &rvu->pf_notify_bmap); |
840 | } |
841 | |
842 | return 0; |
843 | } |
844 | |
845 | int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, |
846 | struct msg_rsp *rsp) |
847 | { |
848 | rvu_cgx_config_linkevents(rvu, pcifunc: req->hdr.pcifunc, en: true); |
849 | return 0; |
850 | } |
851 | |
852 | int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, |
853 | struct msg_rsp *rsp) |
854 | { |
855 | rvu_cgx_config_linkevents(rvu, pcifunc: req->hdr.pcifunc, en: false); |
856 | return 0; |
857 | } |
858 | |
859 | int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, |
860 | struct cgx_link_info_msg *rsp) |
861 | { |
862 | u8 cgx_id, lmac_id; |
863 | int pf, err; |
864 | |
865 | pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
866 | |
867 | if (!is_pf_cgxmapped(rvu, pf)) |
868 | return -ENODEV; |
869 | |
870 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
871 | |
872 | err = cgx_get_link_info(cgxd: rvu_cgx_pdata(cgx_id, rvu), lmac_id, |
873 | linfo: &rsp->link_info); |
874 | return err; |
875 | } |
876 | |
877 | int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, |
878 | struct msg_req *req, |
879 | struct cgx_features_info_msg *rsp) |
880 | { |
881 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
882 | u8 cgx_idx, lmac; |
883 | void *cgxd; |
884 | |
885 | if (!is_pf_cgxmapped(rvu, pf)) |
886 | return 0; |
887 | |
888 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_idx, lmac_id: &lmac); |
889 | cgxd = rvu_cgx_pdata(cgx_id: cgx_idx, rvu); |
890 | rsp->lmac_features = cgx_features_get(cgxd); |
891 | |
892 | return 0; |
893 | } |
894 | |
895 | u32 rvu_cgx_get_fifolen(struct rvu *rvu) |
896 | { |
897 | struct mac_ops *mac_ops; |
898 | u32 fifo_len; |
899 | |
900 | mac_ops = get_mac_ops(cgxd: rvu_first_cgx_pdata(rvu)); |
901 | fifo_len = mac_ops ? mac_ops->fifo_len : 0; |
902 | |
903 | return fifo_len; |
904 | } |
905 | |
906 | u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) |
907 | { |
908 | struct mac_ops *mac_ops; |
909 | void *cgxd; |
910 | |
911 | cgxd = rvu_cgx_pdata(cgx_id: cgx, rvu); |
912 | if (!cgxd) |
913 | return 0; |
914 | |
915 | mac_ops = get_mac_ops(cgxd); |
916 | if (!mac_ops->lmac_fifo_len) |
917 | return 0; |
918 | |
919 | return mac_ops->lmac_fifo_len(cgxd, lmac); |
920 | } |
921 | |
922 | static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) |
923 | { |
924 | int pf = rvu_get_pf(pcifunc); |
925 | struct mac_ops *mac_ops; |
926 | u8 cgx_id, lmac_id; |
927 | |
928 | if (!is_cgx_config_permitted(rvu, pcifunc)) |
929 | return -EPERM; |
930 | |
931 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
932 | mac_ops = get_mac_ops(cgxd: rvu_cgx_pdata(cgx_id, rvu)); |
933 | |
934 | return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu), |
935 | lmac_id, en); |
936 | } |
937 | |
938 | int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, |
939 | struct msg_rsp *rsp) |
940 | { |
941 | rvu_cgx_config_intlbk(rvu, pcifunc: req->hdr.pcifunc, en: true); |
942 | return 0; |
943 | } |
944 | |
945 | int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, |
946 | struct msg_rsp *rsp) |
947 | { |
948 | rvu_cgx_config_intlbk(rvu, pcifunc: req->hdr.pcifunc, en: false); |
949 | return 0; |
950 | } |
951 | |
952 | int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause) |
953 | { |
954 | int pf = rvu_get_pf(pcifunc); |
955 | u8 rx_pfc = 0, tx_pfc = 0; |
956 | struct mac_ops *mac_ops; |
957 | u8 cgx_id, lmac_id; |
958 | void *cgxd; |
959 | |
960 | if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC)) |
961 | return 0; |
962 | |
963 | /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, |
964 | * if received from other PF/VF simply ACK, nothing to do. |
965 | */ |
966 | if (!is_pf_cgxmapped(rvu, pf)) |
967 | return LMAC_AF_ERR_PF_NOT_MAPPED; |
968 | |
969 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
970 | cgxd = rvu_cgx_pdata(cgx_id, rvu); |
971 | mac_ops = get_mac_ops(cgxd); |
972 | |
973 | mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc); |
974 | if (tx_pfc || rx_pfc) { |
975 | dev_warn(rvu->dev, |
976 | "Can not configure 802.3X flow control as PFC frames are enabled" ); |
977 | return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED; |
978 | } |
979 | |
980 | mutex_lock(&rvu->rsrc_lock); |
981 | if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause, |
982 | pfvf_idx: pcifunc & RVU_PFVF_FUNC_MASK)) { |
983 | mutex_unlock(lock: &rvu->rsrc_lock); |
984 | return LMAC_AF_ERR_PERM_DENIED; |
985 | } |
986 | mutex_unlock(lock: &rvu->rsrc_lock); |
987 | |
988 | return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause); |
989 | } |
990 | |
991 | int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, |
992 | struct cgx_pause_frm_cfg *req, |
993 | struct cgx_pause_frm_cfg *rsp) |
994 | { |
995 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
996 | struct mac_ops *mac_ops; |
997 | u8 cgx_id, lmac_id; |
998 | int err = 0; |
999 | void *cgxd; |
1000 | |
1001 | /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, |
1002 | * if received from other PF/VF simply ACK, nothing to do. |
1003 | */ |
1004 | if (!is_pf_cgxmapped(rvu, pf)) |
1005 | return -ENODEV; |
1006 | |
1007 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
1008 | cgxd = rvu_cgx_pdata(cgx_id, rvu); |
1009 | mac_ops = get_mac_ops(cgxd); |
1010 | |
1011 | if (req->set) |
1012 | err = rvu_cgx_cfg_pause_frm(rvu, pcifunc: req->hdr.pcifunc, tx_pause: req->tx_pause, rx_pause: req->rx_pause); |
1013 | else |
1014 | mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause); |
1015 | |
1016 | return err; |
1017 | } |
1018 | |
1019 | int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, |
1020 | struct msg_rsp *rsp) |
1021 | { |
1022 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
1023 | u8 cgx_id, lmac_id; |
1024 | |
1025 | if (!is_pf_cgxmapped(rvu, pf)) |
1026 | return LMAC_AF_ERR_PF_NOT_MAPPED; |
1027 | |
1028 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
1029 | return cgx_get_phy_fec_stats(cgxd: rvu_cgx_pdata(cgx_id, rvu), lmac_id); |
1030 | } |
1031 | |
1032 | /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those |
1033 | * from its VFs as well. ie. NIX rx/tx counters at the CGX port level |
1034 | */ |
1035 | int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, |
1036 | int index, int rxtxflag, u64 *stat) |
1037 | { |
1038 | struct rvu_block *block; |
1039 | int blkaddr; |
1040 | u16 pcifunc; |
1041 | int pf, lf; |
1042 | |
1043 | *stat = 0; |
1044 | |
1045 | if (!cgxd || !rvu) |
1046 | return -EINVAL; |
1047 | |
1048 | pf = cgxlmac_to_pf(rvu, cgx_id: cgx_get_cgxid(cgxd), lmac_id); |
1049 | if (pf < 0) |
1050 | return pf; |
1051 | |
1052 | /* Assumes LF of a PF and all of its VF belongs to the same |
1053 | * NIX block |
1054 | */ |
1055 | pcifunc = pf << RVU_PFVF_PF_SHIFT; |
1056 | blkaddr = rvu_get_blkaddr(rvu, blktype: BLKTYPE_NIX, pcifunc); |
1057 | if (blkaddr < 0) |
1058 | return 0; |
1059 | block = &rvu->hw->block[blkaddr]; |
1060 | |
1061 | for (lf = 0; lf < block->lf.max; lf++) { |
1062 | /* Check if a lf is attached to this PF or one of its VFs */ |
1063 | if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc & |
1064 | ~RVU_PFVF_FUNC_MASK))) |
1065 | continue; |
1066 | if (rxtxflag == NIX_STATS_RX) |
1067 | *stat += rvu_read64(rvu, block: blkaddr, |
1068 | NIX_AF_LFX_RX_STATX(lf, index)); |
1069 | else |
1070 | *stat += rvu_read64(rvu, block: blkaddr, |
1071 | NIX_AF_LFX_TX_STATX(lf, index)); |
1072 | } |
1073 | |
1074 | return 0; |
1075 | } |
1076 | |
1077 | int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) |
1078 | { |
1079 | struct rvu_pfvf *parent_pf, *pfvf; |
1080 | int cgx_users, err = 0; |
1081 | |
1082 | if (!is_pf_cgxmapped(rvu, pf: rvu_get_pf(pcifunc))) |
1083 | return 0; |
1084 | |
1085 | parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; |
1086 | pfvf = rvu_get_pfvf(rvu, pcifunc); |
1087 | |
1088 | mutex_lock(&rvu->cgx_cfg_lock); |
1089 | |
1090 | if (start && pfvf->cgx_in_use) |
1091 | goto exit; /* CGX is already started hence nothing to do */ |
1092 | if (!start && !pfvf->cgx_in_use) |
1093 | goto exit; /* CGX is already stopped hence nothing to do */ |
1094 | |
1095 | if (start) { |
1096 | cgx_users = parent_pf->cgx_users; |
1097 | parent_pf->cgx_users++; |
1098 | } else { |
1099 | parent_pf->cgx_users--; |
1100 | cgx_users = parent_pf->cgx_users; |
1101 | } |
1102 | |
1103 | /* Start CGX when first of all NIXLFs is started. |
1104 | * Stop CGX when last of all NIXLFs is stopped. |
1105 | */ |
1106 | if (!cgx_users) { |
1107 | err = rvu_cgx_config_rxtx(rvu, pcifunc: pcifunc & ~RVU_PFVF_FUNC_MASK, |
1108 | start); |
1109 | if (err) { |
1110 | dev_err(rvu->dev, "Unable to %s CGX\n" , |
1111 | start ? "start" : "stop" ); |
1112 | /* Revert the usage count in case of error */ |
1113 | parent_pf->cgx_users = start ? parent_pf->cgx_users - 1 |
1114 | : parent_pf->cgx_users + 1; |
1115 | goto exit; |
1116 | } |
1117 | } |
1118 | pfvf->cgx_in_use = start; |
1119 | exit: |
1120 | mutex_unlock(lock: &rvu->cgx_cfg_lock); |
1121 | return err; |
1122 | } |
1123 | |
1124 | int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, |
1125 | struct fec_mode *req, |
1126 | struct fec_mode *rsp) |
1127 | { |
1128 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
1129 | u8 cgx_id, lmac_id; |
1130 | |
1131 | if (!is_pf_cgxmapped(rvu, pf)) |
1132 | return -EPERM; |
1133 | |
1134 | if (req->fec == OTX2_FEC_OFF) |
1135 | req->fec = OTX2_FEC_NONE; |
1136 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
1137 | rsp->fec = cgx_set_fec(fec: req->fec, cgx_id, lmac_id); |
1138 | return 0; |
1139 | } |
1140 | |
1141 | int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, |
1142 | struct cgx_fw_data *rsp) |
1143 | { |
1144 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
1145 | u8 cgx_id, lmac_id; |
1146 | |
1147 | if (!rvu->fwdata) |
1148 | return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED; |
1149 | |
1150 | if (!is_pf_cgxmapped(rvu, pf)) |
1151 | return -EPERM; |
1152 | |
1153 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
1154 | |
1155 | if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) |
1156 | memcpy(&rsp->fwdata, |
1157 | &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id], |
1158 | sizeof(struct cgx_lmac_fwdata_s)); |
1159 | else |
1160 | memcpy(&rsp->fwdata, |
1161 | &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id], |
1162 | sizeof(struct cgx_lmac_fwdata_s)); |
1163 | |
1164 | return 0; |
1165 | } |
1166 | |
1167 | int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, |
1168 | struct cgx_set_link_mode_req *req, |
1169 | struct cgx_set_link_mode_rsp *rsp) |
1170 | { |
1171 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
1172 | u8 cgx_idx, lmac; |
1173 | void *cgxd; |
1174 | |
1175 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
1176 | return -EPERM; |
1177 | |
1178 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_idx, lmac_id: &lmac); |
1179 | cgxd = rvu_cgx_pdata(cgx_id: cgx_idx, rvu); |
1180 | rsp->status = cgx_set_link_mode(cgxd, args: req->args, cgx_id: cgx_idx, lmac_id: lmac); |
1181 | return 0; |
1182 | } |
1183 | |
1184 | int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, |
1185 | struct msg_rsp *rsp) |
1186 | { |
1187 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
1188 | u8 cgx_id, lmac_id; |
1189 | |
1190 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
1191 | return LMAC_AF_ERR_PERM_DENIED; |
1192 | |
1193 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
1194 | |
1195 | if (rvu_npc_exact_has_match_table(rvu)) |
1196 | return rvu_npc_exact_mac_addr_reset(rvu, req, rsp); |
1197 | |
1198 | return cgx_lmac_addr_reset(cgx_id, lmac_id); |
1199 | } |
1200 | |
1201 | int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, |
1202 | struct cgx_mac_addr_update_req *req, |
1203 | struct cgx_mac_addr_update_rsp *rsp) |
1204 | { |
1205 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
1206 | u8 cgx_id, lmac_id; |
1207 | |
1208 | if (!is_cgx_config_permitted(rvu, pcifunc: req->hdr.pcifunc)) |
1209 | return LMAC_AF_ERR_PERM_DENIED; |
1210 | |
1211 | if (rvu_npc_exact_has_match_table(rvu)) |
1212 | return rvu_npc_exact_mac_addr_update(rvu, req, rsp); |
1213 | |
1214 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
1215 | return cgx_lmac_addr_update(cgx_id, lmac_id, mac_addr: req->mac_addr, index: req->index); |
1216 | } |
1217 | |
1218 | int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, |
1219 | u8 rx_pause, u16 pfc_en) |
1220 | { |
1221 | int pf = rvu_get_pf(pcifunc); |
1222 | u8 rx_8023 = 0, tx_8023 = 0; |
1223 | struct mac_ops *mac_ops; |
1224 | u8 cgx_id, lmac_id; |
1225 | void *cgxd; |
1226 | |
1227 | /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, |
1228 | * if received from other PF/VF simply ACK, nothing to do. |
1229 | */ |
1230 | if (!is_pf_cgxmapped(rvu, pf)) |
1231 | return -ENODEV; |
1232 | |
1233 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
1234 | cgxd = rvu_cgx_pdata(cgx_id, rvu); |
1235 | mac_ops = get_mac_ops(cgxd); |
1236 | |
1237 | mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023); |
1238 | if (tx_8023 || rx_8023) { |
1239 | dev_warn(rvu->dev, |
1240 | "Can not configure PFC as 802.3X pause frames are enabled" ); |
1241 | return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED; |
1242 | } |
1243 | |
1244 | mutex_lock(&rvu->rsrc_lock); |
1245 | if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause, |
1246 | pfvf_idx: pcifunc & RVU_PFVF_FUNC_MASK)) { |
1247 | mutex_unlock(lock: &rvu->rsrc_lock); |
1248 | return LMAC_AF_ERR_PERM_DENIED; |
1249 | } |
1250 | mutex_unlock(lock: &rvu->rsrc_lock); |
1251 | |
1252 | return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en); |
1253 | } |
1254 | |
1255 | int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, |
1256 | struct cgx_pfc_cfg *req, |
1257 | struct cgx_pfc_rsp *rsp) |
1258 | { |
1259 | int pf = rvu_get_pf(pcifunc: req->hdr.pcifunc); |
1260 | struct mac_ops *mac_ops; |
1261 | u8 cgx_id, lmac_id; |
1262 | void *cgxd; |
1263 | int err; |
1264 | |
1265 | /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, |
1266 | * if received from other PF/VF simply ACK, nothing to do. |
1267 | */ |
1268 | if (!is_pf_cgxmapped(rvu, pf)) |
1269 | return -ENODEV; |
1270 | |
1271 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx_id, lmac_id: &lmac_id); |
1272 | cgxd = rvu_cgx_pdata(cgx_id, rvu); |
1273 | mac_ops = get_mac_ops(cgxd); |
1274 | |
1275 | err = rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc: req->hdr.pcifunc, tx_pause: req->tx_pause, |
1276 | rx_pause: req->rx_pause, pfc_en: req->pfc_en); |
1277 | |
1278 | mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause); |
1279 | return err; |
1280 | } |
1281 | |
1282 | void rvu_mac_reset(struct rvu *rvu, u16 pcifunc) |
1283 | { |
1284 | int pf = rvu_get_pf(pcifunc); |
1285 | struct mac_ops *mac_ops; |
1286 | struct cgx *cgxd; |
1287 | u8 cgx, lmac; |
1288 | |
1289 | if (!is_pf_cgxmapped(rvu, pf)) |
1290 | return; |
1291 | |
1292 | rvu_get_cgx_lmac_id(map: rvu->pf2cgxlmac_map[pf], cgx_id: &cgx, lmac_id: &lmac); |
1293 | cgxd = rvu_cgx_pdata(cgx_id: cgx, rvu); |
1294 | mac_ops = get_mac_ops(cgxd); |
1295 | |
1296 | if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc))) |
1297 | dev_err(rvu->dev, "Failed to reset MAC\n" ); |
1298 | } |
1299 | |