1/**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18#include <linux/pci.h>
19#include <linux/if_vlan.h>
20#include "liquidio_common.h"
21#include "octeon_droq.h"
22#include "octeon_iq.h"
23#include "response_manager.h"
24#include "octeon_device.h"
25#include "octeon_nic.h"
26#include "octeon_main.h"
27#include "octeon_network.h"
28
29MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
30MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
31MODULE_LICENSE("GPL");
32
33/* OOM task polling interval */
34#define LIO_OOM_POLL_INTERVAL_MS 250
35
36#define OCTNIC_MAX_SG MAX_SKB_FRAGS
37
38/**
39 * lio_delete_glists - Delete gather lists
40 * @lio: per-network private data
41 */
42void lio_delete_glists(struct lio *lio)
43{
44 struct octnic_gather *g;
45 int i;
46
47 kfree(objp: lio->glist_lock);
48 lio->glist_lock = NULL;
49
50 if (!lio->glist)
51 return;
52
53 for (i = 0; i < lio->oct_dev->num_iqs; i++) {
54 do {
55 g = (struct octnic_gather *)
56 lio_list_delete_head(root: &lio->glist[i]);
57 kfree(objp: g);
58 } while (g);
59
60 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
61 lio->glists_dma_base && lio->glists_dma_base[i]) {
62 lio_dma_free(lio->oct_dev,
63 lio->glist_entry_size * lio->tx_qsize,
64 lio->glists_virt_base[i],
65 lio->glists_dma_base[i]);
66 }
67 }
68
69 kfree(objp: lio->glists_virt_base);
70 lio->glists_virt_base = NULL;
71
72 kfree(objp: lio->glists_dma_base);
73 lio->glists_dma_base = NULL;
74
75 kfree(objp: lio->glist);
76 lio->glist = NULL;
77}
78EXPORT_SYMBOL_GPL(lio_delete_glists);
79
80/**
81 * lio_setup_glists - Setup gather lists
82 * @oct: octeon_device
83 * @lio: per-network private data
84 * @num_iqs: count of iqs to allocate
85 */
86int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
87{
88 struct octnic_gather *g;
89 int i, j;
90
91 lio->glist_lock =
92 kcalloc(n: num_iqs, size: sizeof(*lio->glist_lock), GFP_KERNEL);
93 if (!lio->glist_lock)
94 return -ENOMEM;
95
96 lio->glist =
97 kcalloc(n: num_iqs, size: sizeof(*lio->glist), GFP_KERNEL);
98 if (!lio->glist) {
99 kfree(objp: lio->glist_lock);
100 lio->glist_lock = NULL;
101 return -ENOMEM;
102 }
103
104 lio->glist_entry_size =
105 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
106
107 /* allocate memory to store virtual and dma base address of
108 * per glist consistent memory
109 */
110 lio->glists_virt_base = kcalloc(n: num_iqs, size: sizeof(*lio->glists_virt_base),
111 GFP_KERNEL);
112 lio->glists_dma_base = kcalloc(n: num_iqs, size: sizeof(*lio->glists_dma_base),
113 GFP_KERNEL);
114
115 if (!lio->glists_virt_base || !lio->glists_dma_base) {
116 lio_delete_glists(lio);
117 return -ENOMEM;
118 }
119
120 for (i = 0; i < num_iqs; i++) {
121 int numa_node = dev_to_node(dev: &oct->pci_dev->dev);
122
123 spin_lock_init(&lio->glist_lock[i]);
124
125 INIT_LIST_HEAD(list: &lio->glist[i]);
126
127 lio->glists_virt_base[i] =
128 lio_dma_alloc(oct,
129 lio->glist_entry_size * lio->tx_qsize,
130 &lio->glists_dma_base[i]);
131
132 if (!lio->glists_virt_base[i]) {
133 lio_delete_glists(lio);
134 return -ENOMEM;
135 }
136
137 for (j = 0; j < lio->tx_qsize; j++) {
138 g = kzalloc_node(size: sizeof(*g), GFP_KERNEL,
139 node: numa_node);
140 if (!g)
141 g = kzalloc(size: sizeof(*g), GFP_KERNEL);
142 if (!g)
143 break;
144
145 g->sg = lio->glists_virt_base[i] +
146 (j * lio->glist_entry_size);
147
148 g->sg_dma_ptr = lio->glists_dma_base[i] +
149 (j * lio->glist_entry_size);
150
151 list_add_tail(new: &g->list, head: &lio->glist[i]);
152 }
153
154 if (j != lio->tx_qsize) {
155 lio_delete_glists(lio);
156 return -ENOMEM;
157 }
158 }
159
160 return 0;
161}
162EXPORT_SYMBOL_GPL(lio_setup_glists);
163
164int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
165{
166 struct lio *lio = GET_LIO(netdev);
167 struct octeon_device *oct = lio->oct_dev;
168 struct octnic_ctrl_pkt nctrl;
169 int ret = 0;
170
171 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
172
173 nctrl.ncmd.u64 = 0;
174 nctrl.ncmd.s.cmd = cmd;
175 nctrl.ncmd.s.param1 = param1;
176 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
177 nctrl.netpndev = (u64)netdev;
178 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
179
180 ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl);
181 if (ret) {
182 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
183 ret);
184 if (ret > 0)
185 ret = -EIO;
186 }
187 return ret;
188}
189EXPORT_SYMBOL_GPL(liquidio_set_feature);
190
191void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
192 unsigned int bytes_compl)
193{
194 struct netdev_queue *netdev_queue = txq;
195
196 netdev_tx_completed_queue(dev_queue: netdev_queue, pkts: pkts_compl, bytes: bytes_compl);
197}
198
199void octeon_update_tx_completion_counters(void *buf, int reqtype,
200 unsigned int *pkts_compl,
201 unsigned int *bytes_compl)
202{
203 struct octnet_buf_free_info *finfo;
204 struct sk_buff *skb = NULL;
205 struct octeon_soft_command *sc;
206
207 switch (reqtype) {
208 case REQTYPE_NORESP_NET:
209 case REQTYPE_NORESP_NET_SG:
210 finfo = buf;
211 skb = finfo->skb;
212 break;
213
214 case REQTYPE_RESP_NET_SG:
215 case REQTYPE_RESP_NET:
216 sc = buf;
217 skb = sc->callback_arg;
218 break;
219
220 default:
221 return;
222 }
223
224 (*pkts_compl)++;
225 *bytes_compl += skb->len;
226}
227
228int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
229{
230 struct octnet_buf_free_info *finfo;
231 struct sk_buff *skb;
232 struct octeon_soft_command *sc;
233 struct netdev_queue *txq;
234
235 switch (reqtype) {
236 case REQTYPE_NORESP_NET:
237 case REQTYPE_NORESP_NET_SG:
238 finfo = buf;
239 skb = finfo->skb;
240 break;
241
242 case REQTYPE_RESP_NET_SG:
243 case REQTYPE_RESP_NET:
244 sc = buf;
245 skb = sc->callback_arg;
246 break;
247
248 default:
249 return 0;
250 }
251
252 txq = netdev_get_tx_queue(dev: skb->dev, index: skb_get_queue_mapping(skb));
253 netdev_tx_sent_queue(dev_queue: txq, bytes: skb->len);
254
255 return netif_xmit_stopped(dev_queue: txq);
256}
257
258void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
259{
260 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
261 struct net_device *netdev = (struct net_device *)nctrl->netpndev;
262 struct lio *lio = GET_LIO(netdev);
263 struct octeon_device *oct = lio->oct_dev;
264 u8 *mac;
265
266 if (nctrl->sc_status)
267 return;
268
269 switch (nctrl->ncmd.s.cmd) {
270 case OCTNET_CMD_CHANGE_DEVFLAGS:
271 case OCTNET_CMD_SET_MULTI_LIST:
272 case OCTNET_CMD_SET_UC_LIST:
273 break;
274
275 case OCTNET_CMD_CHANGE_MACADDR:
276 mac = ((u8 *)&nctrl->udd[0]) + 2;
277 if (nctrl->ncmd.s.param1) {
278 /* vfidx is 0 based, but vf_num (param1) is 1 based */
279 int vfidx = nctrl->ncmd.s.param1 - 1;
280 bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
281
282 if (mac_is_admin_assigned)
283 netif_info(lio, probe, lio->netdev,
284 "MAC Address %pM is configured for VF %d\n",
285 mac, vfidx);
286 } else {
287 netif_info(lio, probe, lio->netdev,
288 " MACAddr changed to %pM\n",
289 mac);
290 }
291 break;
292
293 case OCTNET_CMD_GPIO_ACCESS:
294 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
295
296 break;
297
298 case OCTNET_CMD_ID_ACTIVE:
299 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
300
301 break;
302
303 case OCTNET_CMD_LRO_ENABLE:
304 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
305 break;
306
307 case OCTNET_CMD_LRO_DISABLE:
308 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
309 netdev->name);
310 break;
311
312 case OCTNET_CMD_VERBOSE_ENABLE:
313 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
314 netdev->name);
315 break;
316
317 case OCTNET_CMD_VERBOSE_DISABLE:
318 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
319 netdev->name);
320 break;
321
322 case OCTNET_CMD_VLAN_FILTER_CTL:
323 if (nctrl->ncmd.s.param1)
324 dev_info(&oct->pci_dev->dev,
325 "%s VLAN filter enabled\n", netdev->name);
326 else
327 dev_info(&oct->pci_dev->dev,
328 "%s VLAN filter disabled\n", netdev->name);
329 break;
330
331 case OCTNET_CMD_ADD_VLAN_FILTER:
332 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
333 netdev->name, nctrl->ncmd.s.param1);
334 break;
335
336 case OCTNET_CMD_DEL_VLAN_FILTER:
337 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
338 netdev->name, nctrl->ncmd.s.param1);
339 break;
340
341 case OCTNET_CMD_SET_SETTINGS:
342 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
343 netdev->name);
344
345 break;
346
347 /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
348 * Command passed by NIC driver
349 */
350 case OCTNET_CMD_TNL_RX_CSUM_CTL:
351 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
352 netif_info(lio, probe, lio->netdev,
353 "RX Checksum Offload Enabled\n");
354 } else if (nctrl->ncmd.s.param1 ==
355 OCTNET_CMD_RXCSUM_DISABLE) {
356 netif_info(lio, probe, lio->netdev,
357 "RX Checksum Offload Disabled\n");
358 }
359 break;
360
361 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
362 * Command passed by NIC driver
363 */
364 case OCTNET_CMD_TNL_TX_CSUM_CTL:
365 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
366 netif_info(lio, probe, lio->netdev,
367 "TX Checksum Offload Enabled\n");
368 } else if (nctrl->ncmd.s.param1 ==
369 OCTNET_CMD_TXCSUM_DISABLE) {
370 netif_info(lio, probe, lio->netdev,
371 "TX Checksum Offload Disabled\n");
372 }
373 break;
374
375 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
376 * Command passed by NIC driver
377 */
378 case OCTNET_CMD_VXLAN_PORT_CONFIG:
379 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
380 netif_info(lio, probe, lio->netdev,
381 "VxLAN Destination UDP PORT:%d ADDED\n",
382 nctrl->ncmd.s.param1);
383 } else if (nctrl->ncmd.s.more ==
384 OCTNET_CMD_VXLAN_PORT_DEL) {
385 netif_info(lio, probe, lio->netdev,
386 "VxLAN Destination UDP PORT:%d DELETED\n",
387 nctrl->ncmd.s.param1);
388 }
389 break;
390
391 case OCTNET_CMD_SET_FLOW_CTL:
392 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
393 break;
394
395 case OCTNET_CMD_QUEUE_COUNT_CTL:
396 netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
397 nctrl->ncmd.s.param1);
398 break;
399
400 default:
401 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
402 nctrl->ncmd.s.cmd);
403 }
404}
405EXPORT_SYMBOL_GPL(liquidio_link_ctrl_cmd_completion);
406
407void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
408{
409 bool macaddr_changed = false;
410 struct net_device *netdev;
411 struct lio *lio;
412
413 rtnl_lock();
414
415 netdev = oct->props[0].netdev;
416 lio = GET_LIO(netdev);
417
418 lio->linfo.macaddr_is_admin_asgnd = true;
419
420 if (!ether_addr_equal(addr1: netdev->dev_addr, addr2: mac)) {
421 macaddr_changed = true;
422 eth_hw_addr_set(dev: netdev, addr: mac);
423 ether_addr_copy(dst: ((u8 *)&lio->linfo.hw_addr) + 2, src: mac);
424 call_netdevice_notifiers(val: NETDEV_CHANGEADDR, dev: netdev);
425 }
426
427 rtnl_unlock();
428
429 if (macaddr_changed)
430 dev_info(&oct->pci_dev->dev,
431 "PF changed VF's MAC address to %pM\n", mac);
432
433 /* no need to notify the firmware of the macaddr change because
434 * the PF did that already
435 */
436}
437
438void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
439 struct octeon_droq *droq)
440{
441 struct net_device *netdev = oct->props[0].netdev;
442 struct lio *lio = GET_LIO(netdev);
443 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
444
445 queue_delayed_work(wq: wq->wq, dwork: &wq->wk.work,
446 delay: msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
447}
448
449static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
450{
451 struct cavium_wk *wk = (struct cavium_wk *)work;
452 struct lio *lio = (struct lio *)wk->ctxptr;
453 struct octeon_device *oct = lio->oct_dev;
454 int q_no = wk->ctxul;
455 struct octeon_droq *droq = oct->droq[q_no];
456
457 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
458 return;
459
460 if (octeon_retry_droq_refill(droq))
461 octeon_schedule_rxq_oom_work(oct, droq);
462}
463
464int setup_rx_oom_poll_fn(struct net_device *netdev)
465{
466 struct lio *lio = GET_LIO(netdev);
467 struct octeon_device *oct = lio->oct_dev;
468 struct cavium_wq *wq;
469 int q, q_no;
470
471 for (q = 0; q < oct->num_oqs; q++) {
472 q_no = lio->linfo.rxpciq[q].s.q_no;
473 wq = &lio->rxq_status_wq[q_no];
474 wq->wq = alloc_workqueue(fmt: "rxq-oom-status",
475 flags: WQ_MEM_RECLAIM, max_active: 0);
476 if (!wq->wq) {
477 dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
478 return -ENOMEM;
479 }
480
481 INIT_DELAYED_WORK(&wq->wk.work,
482 octnet_poll_check_rxq_oom_status);
483 wq->wk.ctxptr = lio;
484 wq->wk.ctxul = q_no;
485 }
486
487 return 0;
488}
489EXPORT_SYMBOL_GPL(setup_rx_oom_poll_fn);
490
491void cleanup_rx_oom_poll_fn(struct net_device *netdev)
492{
493 struct lio *lio = GET_LIO(netdev);
494 struct octeon_device *oct = lio->oct_dev;
495 struct cavium_wq *wq;
496 int q_no;
497
498 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
499 wq = &lio->rxq_status_wq[q_no];
500 if (wq->wq) {
501 cancel_delayed_work_sync(dwork: &wq->wk.work);
502 destroy_workqueue(wq: wq->wq);
503 wq->wq = NULL;
504 }
505 }
506}
507EXPORT_SYMBOL_GPL(cleanup_rx_oom_poll_fn);
508
509/* Runs in interrupt context. */
510static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
511{
512 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
513 struct net_device *netdev;
514 struct lio *lio;
515
516 netdev = oct->props[iq->ifidx].netdev;
517
518 /* This is needed because the first IQ does not have
519 * a netdev associated with it.
520 */
521 if (!netdev)
522 return;
523
524 lio = GET_LIO(netdev);
525 if (__netif_subqueue_stopped(dev: netdev, queue_index: iq->q_index) &&
526 lio->linfo.link.s.link_up &&
527 (!octnet_iq_is_full(oct, q_no: iq_num))) {
528 netif_wake_subqueue(dev: netdev, queue_index: iq->q_index);
529 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
530 tx_restart, 1);
531 }
532}
533
534/**
535 * octeon_setup_droq - Setup output queue
536 * @oct: octeon device
537 * @q_no: which queue
538 * @num_descs: how many descriptors
539 * @desc_size: size of each descriptor
540 * @app_ctx: application context
541 */
542static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
543 int desc_size, void *app_ctx)
544{
545 int ret_val;
546
547 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
548 /* droq creation and local register settings. */
549 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
550 if (ret_val < 0)
551 return ret_val;
552
553 if (ret_val == 1) {
554 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
555 return 0;
556 }
557
558 /* Enable the droq queues */
559 octeon_set_droq_pkt_op(oct, q_no, enable: 1);
560
561 /* Send Credit for Octeon Output queues. Credits are always
562 * sent after the output queue is enabled.
563 */
564 writel(val: oct->droq[q_no]->max_count, addr: oct->droq[q_no]->pkts_credit_reg);
565
566 return ret_val;
567}
568
569/**
570 * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer.
571 * @octeon_id:octeon device id.
572 * @skbuff: skbuff struct to be passed to network layer.
573 * @len: size of total data received.
574 * @rh: Control header associated with the packet
575 * @param: additional control data with the packet
576 * @arg: farg registered in droq_ops
577 */
578static void
579liquidio_push_packet(u32 __maybe_unused octeon_id,
580 void *skbuff,
581 u32 len,
582 union octeon_rh *rh,
583 void *param,
584 void *arg)
585{
586 struct net_device *netdev = (struct net_device *)arg;
587 struct octeon_droq *droq =
588 container_of(param, struct octeon_droq, napi);
589 struct sk_buff *skb = (struct sk_buff *)skbuff;
590 struct skb_shared_hwtstamps *shhwtstamps;
591 struct napi_struct *napi = param;
592 u16 vtag = 0;
593 u32 r_dh_off;
594 u64 ns;
595
596 if (netdev) {
597 struct lio *lio = GET_LIO(netdev);
598 struct octeon_device *oct = lio->oct_dev;
599
600 /* Do not proceed if the interface is not in RUNNING state. */
601 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
602 recv_buffer_free(buffer: skb);
603 droq->stats.rx_dropped++;
604 return;
605 }
606
607 skb->dev = netdev;
608
609 skb_record_rx_queue(skb, rx_queue: droq->q_no);
610 if (likely(len > MIN_SKB_SIZE)) {
611 struct octeon_skb_page_info *pg_info;
612 unsigned char *va;
613
614 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
615 if (pg_info->page) {
616 /* For Paged allocation use the frags */
617 va = page_address(pg_info->page) +
618 pg_info->page_offset;
619 memcpy(skb->data, va, MIN_SKB_SIZE);
620 skb_put(skb, MIN_SKB_SIZE);
621 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
622 page: pg_info->page,
623 off: pg_info->page_offset +
624 MIN_SKB_SIZE,
625 size: len - MIN_SKB_SIZE,
626 LIO_RXBUFFER_SZ);
627 }
628 } else {
629 struct octeon_skb_page_info *pg_info =
630 ((struct octeon_skb_page_info *)(skb->cb));
631 skb_copy_to_linear_data(skb, page_address(pg_info->page)
632 + pg_info->page_offset, len);
633 skb_put(skb, len);
634 put_page(page: pg_info->page);
635 }
636
637 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
638
639 if (oct->ptp_enable) {
640 if (rh->r_dh.has_hwtstamp) {
641 /* timestamp is included from the hardware at
642 * the beginning of the packet.
643 */
644 if (ifstate_check
645 (lio,
646 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
647 /* Nanoseconds are in the first 64-bits
648 * of the packet.
649 */
650 memcpy(&ns, (skb->data + r_dh_off),
651 sizeof(ns));
652 r_dh_off -= BYTES_PER_DHLEN_UNIT;
653 shhwtstamps = skb_hwtstamps(skb);
654 shhwtstamps->hwtstamp =
655 ns_to_ktime(ns: ns +
656 lio->ptp_adjust);
657 }
658 }
659 }
660
661 if (rh->r_dh.has_hash) {
662 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
663 u32 hash = be32_to_cpu(*hash_be);
664
665 skb_set_hash(skb, hash, type: PKT_HASH_TYPE_L4);
666 r_dh_off -= BYTES_PER_DHLEN_UNIT;
667 }
668
669 skb_pull(skb, len: rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
670 skb->protocol = eth_type_trans(skb, dev: skb->dev);
671
672 if ((netdev->features & NETIF_F_RXCSUM) &&
673 (((rh->r_dh.encap_on) &&
674 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
675 (!(rh->r_dh.encap_on) &&
676 ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) ==
677 CNNIC_CSUM_VERIFIED))))
678 /* checksum has already been verified */
679 skb->ip_summed = CHECKSUM_UNNECESSARY;
680 else
681 skb->ip_summed = CHECKSUM_NONE;
682
683 /* Setting Encapsulation field on basis of status received
684 * from the firmware
685 */
686 if (rh->r_dh.encap_on) {
687 skb->encapsulation = 1;
688 skb->csum_level = 1;
689 droq->stats.rx_vxlan++;
690 }
691
692 /* inbound VLAN tag */
693 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
694 rh->r_dh.vlan) {
695 u16 priority = rh->r_dh.priority;
696 u16 vid = rh->r_dh.vlan;
697
698 vtag = (priority << VLAN_PRIO_SHIFT) | vid;
699 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vtag);
700 }
701
702 napi_gro_receive(napi, skb);
703
704 droq->stats.rx_bytes_received += len -
705 rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
706 droq->stats.rx_pkts_received++;
707 } else {
708 recv_buffer_free(buffer: skb);
709 }
710}
711
712/**
713 * napi_schedule_wrapper - wrapper for calling napi_schedule
714 * @param: parameters to pass to napi_schedule
715 *
716 * Used when scheduling on different CPUs
717 */
718static void napi_schedule_wrapper(void *param)
719{
720 struct napi_struct *napi = param;
721
722 napi_schedule(n: napi);
723}
724
725/**
726 * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode
727 * @arg: pointer to octeon output queue
728 */
729static void liquidio_napi_drv_callback(void *arg)
730{
731 struct octeon_device *oct;
732 struct octeon_droq *droq = arg;
733 int this_cpu = smp_processor_id();
734
735 oct = droq->oct_dev;
736
737 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
738 droq->cpu_id == this_cpu) {
739 napi_schedule_irqoff(n: &droq->napi);
740 } else {
741 INIT_CSD(&droq->csd, napi_schedule_wrapper, &droq->napi);
742 smp_call_function_single_async(cpu: droq->cpu_id, csd: &droq->csd);
743 }
744}
745
746/**
747 * liquidio_napi_poll - Entry point for NAPI polling
748 * @napi: NAPI structure
749 * @budget: maximum number of items to process
750 */
751static int liquidio_napi_poll(struct napi_struct *napi, int budget)
752{
753 struct octeon_instr_queue *iq;
754 struct octeon_device *oct;
755 struct octeon_droq *droq;
756 int tx_done = 0, iq_no;
757 int work_done;
758
759 droq = container_of(napi, struct octeon_droq, napi);
760 oct = droq->oct_dev;
761 iq_no = droq->q_no;
762
763 /* Handle Droq descriptors */
764 work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
765
766 /* Flush the instruction queue */
767 iq = oct->instr_queue[iq_no];
768 if (iq) {
769 /* TODO: move this check to inside octeon_flush_iq,
770 * once check_db_timeout is removed
771 */
772 if (atomic_read(v: &iq->instr_pending))
773 /* Process iq buffers with in the budget limits */
774 tx_done = octeon_flush_iq(oct, iq, napi_budget: budget);
775 else
776 tx_done = 1;
777 /* Update iq read-index rather than waiting for next interrupt.
778 * Return back if tx_done is false.
779 */
780 /* sub-queue status update */
781 lio_update_txq_status(oct, iq_num: iq_no);
782 } else {
783 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
784 __func__, iq_no);
785 }
786
787#define MAX_REG_CNT 2000000U
788 /* force enable interrupt if reg cnts are high to avoid wraparound */
789 if ((work_done < budget && tx_done) ||
790 (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
791 (droq->pkt_count >= MAX_REG_CNT)) {
792 napi_complete_done(n: napi, work_done);
793
794 octeon_enable_irq(oct: droq->oct_dev, q_no: droq->q_no);
795 return 0;
796 }
797
798 return (!tx_done) ? (budget) : (work_done);
799}
800
801/**
802 * liquidio_setup_io_queues - Setup input and output queues
803 * @octeon_dev: octeon device
804 * @ifidx: Interface index
805 * @num_iqs: input io queue count
806 * @num_oqs: output io queue count
807 *
808 * Note: Queues are with respect to the octeon device. Thus
809 * an input queue is for egress packets, and output queues
810 * are for ingress packets.
811 */
812int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
813 u32 num_iqs, u32 num_oqs)
814{
815 struct octeon_droq_ops droq_ops;
816 struct net_device *netdev;
817 struct octeon_droq *droq;
818 struct napi_struct *napi;
819 int cpu_id_modulus;
820 int num_tx_descs;
821 struct lio *lio;
822 int retval = 0;
823 int q, q_no;
824 int cpu_id;
825
826 netdev = octeon_dev->props[ifidx].netdev;
827
828 lio = GET_LIO(netdev);
829
830 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
831
832 droq_ops.fptr = liquidio_push_packet;
833 droq_ops.farg = netdev;
834
835 droq_ops.poll_mode = 1;
836 droq_ops.napi_fn = liquidio_napi_drv_callback;
837 cpu_id = 0;
838 cpu_id_modulus = num_present_cpus();
839
840 /* set up DROQs. */
841 for (q = 0; q < num_oqs; q++) {
842 q_no = lio->linfo.rxpciq[q].s.q_no;
843 dev_dbg(&octeon_dev->pci_dev->dev,
844 "%s index:%d linfo.rxpciq.s.q_no:%d\n",
845 __func__, q, q_no);
846 retval = octeon_setup_droq(
847 oct: octeon_dev, q_no,
848 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
849 lio->ifidx),
850 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
851 lio->ifidx),
852 NULL);
853 if (retval) {
854 dev_err(&octeon_dev->pci_dev->dev,
855 "%s : Runtime DROQ(RxQ) creation failed.\n",
856 __func__);
857 return 1;
858 }
859
860 droq = octeon_dev->droq[q_no];
861 napi = &droq->napi;
862 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
863 (u64)netdev, (u64)octeon_dev);
864 netif_napi_add(dev: netdev, napi, poll: liquidio_napi_poll);
865
866 /* designate a CPU for this droq */
867 droq->cpu_id = cpu_id;
868 cpu_id++;
869 if (cpu_id >= cpu_id_modulus)
870 cpu_id = 0;
871
872 octeon_register_droq_ops(oct: octeon_dev, q_no, ops: &droq_ops);
873 }
874
875 if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
876 /* 23XX PF/VF can send/recv control messages (via the first
877 * PF/VF-owned droq) from the firmware even if the ethX
878 * interface is down, so that's why poll_mode must be off
879 * for the first droq.
880 */
881 octeon_dev->droq[0]->ops.poll_mode = 0;
882 }
883
884 /* set up IQs. */
885 for (q = 0; q < num_iqs; q++) {
886 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
887 octeon_get_conf(octeon_dev), lio->ifidx);
888 retval = octeon_setup_iq(oct: octeon_dev, ifidx, q_index: q,
889 iq_no: lio->linfo.txpciq[q], num_descs: num_tx_descs,
890 app_ctx: netdev_get_tx_queue(dev: netdev, index: q));
891 if (retval) {
892 dev_err(&octeon_dev->pci_dev->dev,
893 " %s : Runtime IQ(TxQ) creation failed.\n",
894 __func__);
895 return 1;
896 }
897
898 /* XPS */
899 if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
900 octeon_dev->ioq_vector) {
901 struct octeon_ioq_vector *ioq_vector;
902
903 ioq_vector = &octeon_dev->ioq_vector[q];
904 netif_set_xps_queue(dev: netdev,
905 mask: &ioq_vector->affinity_mask,
906 index: ioq_vector->iq_index);
907 }
908 }
909
910 return 0;
911}
912EXPORT_SYMBOL_GPL(liquidio_setup_io_queues);
913
914static
915int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
916{
917 struct octeon_device *oct = droq->oct_dev;
918 struct octeon_device_priv *oct_priv =
919 (struct octeon_device_priv *)oct->priv;
920
921 if (droq->ops.poll_mode) {
922 droq->ops.napi_fn(droq);
923 } else {
924 if (ret & MSIX_PO_INT) {
925 if (OCTEON_CN23XX_VF(oct))
926 dev_err(&oct->pci_dev->dev,
927 "should not come here should not get rx when poll mode = 0 for vf\n");
928 tasklet_schedule(t: &oct_priv->droq_tasklet);
929 return 1;
930 }
931 /* this will be flushed periodically by check iq db */
932 if (ret & MSIX_PI_INT)
933 return 0;
934 }
935
936 return 0;
937}
938
939irqreturn_t
940liquidio_msix_intr_handler(int __maybe_unused irq, void *dev)
941{
942 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
943 struct octeon_device *oct = ioq_vector->oct_dev;
944 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
945 u64 ret;
946
947 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
948
949 if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
950 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
951
952 return IRQ_HANDLED;
953}
954
955/**
956 * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler
957 * @oct: octeon device
958 */
959static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
960{
961 struct octeon_device_priv *oct_priv =
962 (struct octeon_device_priv *)oct->priv;
963 struct octeon_droq *droq;
964 u64 oq_no;
965
966 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
967 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
968 oq_no++) {
969 if (!(oct->droq_intr & BIT_ULL(oq_no)))
970 continue;
971
972 droq = oct->droq[oq_no];
973
974 if (droq->ops.poll_mode) {
975 droq->ops.napi_fn(droq);
976 oct_priv->napi_mask |= BIT_ULL(oq_no);
977 } else {
978 tasklet_schedule(t: &oct_priv->droq_tasklet);
979 }
980 }
981 }
982}
983
984/**
985 * liquidio_legacy_intr_handler - Interrupt handler for octeon
986 * @irq: unused
987 * @dev: octeon device
988 */
989static
990irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev)
991{
992 struct octeon_device *oct = (struct octeon_device *)dev;
993 irqreturn_t ret;
994
995 /* Disable our interrupts for the duration of ISR */
996 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
997
998 ret = oct->fn_list.process_interrupt_regs(oct);
999
1000 if (ret == IRQ_HANDLED)
1001 liquidio_schedule_droq_pkt_handlers(oct);
1002
1003 /* Re-enable our interrupts */
1004 if (!(atomic_read(v: &oct->status) == OCT_DEV_IN_RESET))
1005 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
1006
1007 return ret;
1008}
1009
1010/**
1011 * octeon_setup_interrupt - Setup interrupt for octeon device
1012 * @oct: octeon device
1013 * @num_ioqs: number of queues
1014 *
1015 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1016 */
1017int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
1018{
1019 struct msix_entry *msix_entries;
1020 char *queue_irq_names = NULL;
1021 int i, num_interrupts = 0;
1022 int num_alloc_ioq_vectors;
1023 char *aux_irq_name = NULL;
1024 int num_ioq_vectors;
1025 int irqret, err;
1026
1027 if (oct->msix_on) {
1028 oct->num_msix_irqs = num_ioqs;
1029 if (OCTEON_CN23XX_PF(oct)) {
1030 num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
1031
1032 /* one non ioq interrupt for handling
1033 * sli_mac_pf_int_sum
1034 */
1035 oct->num_msix_irqs += 1;
1036 } else if (OCTEON_CN23XX_VF(oct)) {
1037 num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
1038 }
1039
1040 /* allocate storage for the names assigned to each irq */
1041 oct->irq_name_storage =
1042 kcalloc(n: num_interrupts, INTRNAMSIZ, GFP_KERNEL);
1043 if (!oct->irq_name_storage) {
1044 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1045 return -ENOMEM;
1046 }
1047
1048 queue_irq_names = oct->irq_name_storage;
1049
1050 if (OCTEON_CN23XX_PF(oct))
1051 aux_irq_name = &queue_irq_names
1052 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1053
1054 oct->msix_entries = kcalloc(n: oct->num_msix_irqs,
1055 size: sizeof(struct msix_entry),
1056 GFP_KERNEL);
1057 if (!oct->msix_entries) {
1058 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1059 kfree(objp: oct->irq_name_storage);
1060 oct->irq_name_storage = NULL;
1061 return -ENOMEM;
1062 }
1063
1064 msix_entries = (struct msix_entry *)oct->msix_entries;
1065
1066 /*Assumption is that pf msix vectors start from pf srn to pf to
1067 * trs and not from 0. if not change this code
1068 */
1069 if (OCTEON_CN23XX_PF(oct)) {
1070 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1071 msix_entries[i].entry =
1072 oct->sriov_info.pf_srn + i;
1073
1074 msix_entries[oct->num_msix_irqs - 1].entry =
1075 oct->sriov_info.trs;
1076 } else if (OCTEON_CN23XX_VF(oct)) {
1077 for (i = 0; i < oct->num_msix_irqs; i++)
1078 msix_entries[i].entry = i;
1079 }
1080 num_alloc_ioq_vectors = pci_enable_msix_range(
1081 dev: oct->pci_dev, entries: msix_entries,
1082 minvec: oct->num_msix_irqs,
1083 maxvec: oct->num_msix_irqs);
1084 if (num_alloc_ioq_vectors < 0) {
1085 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1086 kfree(objp: oct->msix_entries);
1087 oct->msix_entries = NULL;
1088 kfree(objp: oct->irq_name_storage);
1089 oct->irq_name_storage = NULL;
1090 return num_alloc_ioq_vectors;
1091 }
1092
1093 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1094
1095 num_ioq_vectors = oct->num_msix_irqs;
1096 /* For PF, there is one non-ioq interrupt handler */
1097 if (OCTEON_CN23XX_PF(oct)) {
1098 num_ioq_vectors -= 1;
1099
1100 snprintf(buf: aux_irq_name, INTRNAMSIZ,
1101 fmt: "LiquidIO%u-pf%u-aux", oct->octeon_id,
1102 oct->pf_num);
1103 irqret = request_irq(
1104 irq: msix_entries[num_ioq_vectors].vector,
1105 handler: liquidio_legacy_intr_handler, flags: 0,
1106 name: aux_irq_name, dev: oct);
1107 if (irqret) {
1108 dev_err(&oct->pci_dev->dev,
1109 "Request_irq failed for MSIX interrupt Error: %d\n",
1110 irqret);
1111 pci_disable_msix(dev: oct->pci_dev);
1112 kfree(objp: oct->msix_entries);
1113 kfree(objp: oct->irq_name_storage);
1114 oct->irq_name_storage = NULL;
1115 oct->msix_entries = NULL;
1116 return irqret;
1117 }
1118 }
1119 for (i = 0 ; i < num_ioq_vectors ; i++) {
1120 if (OCTEON_CN23XX_PF(oct))
1121 snprintf(buf: &queue_irq_names[IRQ_NAME_OFF(i)],
1122 INTRNAMSIZ, fmt: "LiquidIO%u-pf%u-rxtx-%u",
1123 oct->octeon_id, oct->pf_num, i);
1124
1125 if (OCTEON_CN23XX_VF(oct))
1126 snprintf(buf: &queue_irq_names[IRQ_NAME_OFF(i)],
1127 INTRNAMSIZ, fmt: "LiquidIO%u-vf%u-rxtx-%u",
1128 oct->octeon_id, oct->vf_num, i);
1129
1130 irqret = request_irq(irq: msix_entries[i].vector,
1131 handler: liquidio_msix_intr_handler, flags: 0,
1132 name: &queue_irq_names[IRQ_NAME_OFF(i)],
1133 dev: &oct->ioq_vector[i]);
1134
1135 if (irqret) {
1136 dev_err(&oct->pci_dev->dev,
1137 "Request_irq failed for MSIX interrupt Error: %d\n",
1138 irqret);
1139 /* Freeing the non-ioq irq vector here . */
1140 free_irq(msix_entries[num_ioq_vectors].vector,
1141 oct);
1142
1143 while (i) {
1144 i--;
1145 /* clearing affinity mask. */
1146 irq_set_affinity_hint(
1147 irq: msix_entries[i].vector,
1148 NULL);
1149 free_irq(msix_entries[i].vector,
1150 &oct->ioq_vector[i]);
1151 }
1152 pci_disable_msix(dev: oct->pci_dev);
1153 kfree(objp: oct->msix_entries);
1154 kfree(objp: oct->irq_name_storage);
1155 oct->irq_name_storage = NULL;
1156 oct->msix_entries = NULL;
1157 return irqret;
1158 }
1159 oct->ioq_vector[i].vector = msix_entries[i].vector;
1160 /* assign the cpu mask for this msix interrupt vector */
1161 irq_set_affinity_hint(irq: msix_entries[i].vector,
1162 m: &oct->ioq_vector[i].affinity_mask
1163 );
1164 }
1165 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1166 oct->octeon_id);
1167 } else {
1168 err = pci_enable_msi(dev: oct->pci_dev);
1169 if (err)
1170 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1171 err);
1172 else
1173 oct->flags |= LIO_FLAG_MSI_ENABLED;
1174
1175 /* allocate storage for the names assigned to the irq */
1176 oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL);
1177 if (!oct->irq_name_storage)
1178 return -ENOMEM;
1179
1180 queue_irq_names = oct->irq_name_storage;
1181
1182 if (OCTEON_CN23XX_PF(oct))
1183 snprintf(buf: &queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1184 fmt: "LiquidIO%u-pf%u-rxtx-%u",
1185 oct->octeon_id, oct->pf_num, 0);
1186
1187 if (OCTEON_CN23XX_VF(oct))
1188 snprintf(buf: &queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1189 fmt: "LiquidIO%u-vf%u-rxtx-%u",
1190 oct->octeon_id, oct->vf_num, 0);
1191
1192 irqret = request_irq(irq: oct->pci_dev->irq,
1193 handler: liquidio_legacy_intr_handler,
1194 IRQF_SHARED,
1195 name: &queue_irq_names[IRQ_NAME_OFF(0)], dev: oct);
1196 if (irqret) {
1197 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1198 pci_disable_msi(dev: oct->pci_dev);
1199 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1200 irqret);
1201 kfree(objp: oct->irq_name_storage);
1202 oct->irq_name_storage = NULL;
1203 return irqret;
1204 }
1205 }
1206 return 0;
1207}
1208EXPORT_SYMBOL_GPL(octeon_setup_interrupt);
1209
1210/**
1211 * liquidio_change_mtu - Net device change_mtu
1212 * @netdev: network device
1213 * @new_mtu: the new max transmit unit size
1214 */
1215int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1216{
1217 struct lio *lio = GET_LIO(netdev);
1218 struct octeon_device *oct = lio->oct_dev;
1219 struct octeon_soft_command *sc;
1220 union octnet_cmd *ncmd;
1221 int ret = 0;
1222
1223 sc = (struct octeon_soft_command *)
1224 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, rdatasize: 16, ctxsize: 0);
1225 if (!sc) {
1226 netif_info(lio, rx_err, lio->netdev,
1227 "Failed to allocate soft command\n");
1228 return -ENOMEM;
1229 }
1230
1231 ncmd = (union octnet_cmd *)sc->virtdptr;
1232
1233 init_completion(x: &sc->complete);
1234 sc->sc_status = OCTEON_REQUEST_PENDING;
1235
1236 ncmd->u64 = 0;
1237 ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
1238 ncmd->s.param1 = new_mtu;
1239
1240 octeon_swap_8B_data(data: (u64 *)ncmd, blocks: (OCTNET_CMD_SIZE >> 3));
1241
1242 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1243
1244 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1245 OPCODE_NIC_CMD, irh_ossp: 0, ossp0: 0, ossp1: 0);
1246
1247 ret = octeon_send_soft_command(oct, sc);
1248 if (ret == IQ_SEND_FAILED) {
1249 netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1250 octeon_free_soft_command(oct, sc);
1251 return -EINVAL;
1252 }
1253 /* Sleep on a wait queue till the cond flag indicates that the
1254 * response arrived or timed-out.
1255 */
1256 ret = wait_for_sc_completion_timeout(oct_dev: oct, sc, timeout: 0);
1257 if (ret)
1258 return ret;
1259
1260 if (sc->sc_status) {
1261 WRITE_ONCE(sc->caller_is_done, true);
1262 return -EINVAL;
1263 }
1264
1265 netdev->mtu = new_mtu;
1266 lio->mtu = new_mtu;
1267
1268 WRITE_ONCE(sc->caller_is_done, true);
1269 return 0;
1270}
1271EXPORT_SYMBOL_GPL(liquidio_change_mtu);
1272
1273int lio_wait_for_clean_oq(struct octeon_device *oct)
1274{
1275 int retry = 100, pending_pkts = 0;
1276 int idx;
1277
1278 do {
1279 pending_pkts = 0;
1280
1281 for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
1282 if (!(oct->io_qmask.oq & BIT_ULL(idx)))
1283 continue;
1284 pending_pkts +=
1285 atomic_read(v: &oct->droq[idx]->pkts_pending);
1286 }
1287
1288 if (pending_pkts > 0)
1289 schedule_timeout_uninterruptible(timeout: 1);
1290
1291 } while (retry-- && pending_pkts);
1292
1293 return pending_pkts;
1294}
1295EXPORT_SYMBOL_GPL(lio_wait_for_clean_oq);
1296
1297static void
1298octnet_nic_stats_callback(struct octeon_device *oct_dev,
1299 u32 status, void *ptr)
1300{
1301 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1302 struct oct_nic_stats_resp *resp =
1303 (struct oct_nic_stats_resp *)sc->virtrptr;
1304 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1305 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1306 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1307 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1308
1309 if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1310 octeon_swap_8B_data(data: (u64 *)&resp->stats,
1311 blocks: (sizeof(struct oct_link_stats)) >> 3);
1312
1313 /* RX link-level stats */
1314 rstats->total_rcvd = rsp_rstats->total_rcvd;
1315 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1316 rstats->total_bcst = rsp_rstats->total_bcst;
1317 rstats->total_mcst = rsp_rstats->total_mcst;
1318 rstats->runts = rsp_rstats->runts;
1319 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1320 /* Accounts for over/under-run of buffers */
1321 rstats->fifo_err = rsp_rstats->fifo_err;
1322 rstats->dmac_drop = rsp_rstats->dmac_drop;
1323 rstats->fcs_err = rsp_rstats->fcs_err;
1324 rstats->jabber_err = rsp_rstats->jabber_err;
1325 rstats->l2_err = rsp_rstats->l2_err;
1326 rstats->frame_err = rsp_rstats->frame_err;
1327 rstats->red_drops = rsp_rstats->red_drops;
1328
1329 /* RX firmware stats */
1330 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1331 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1332 rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1333 rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1334 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1335 rstats->fw_err_link = rsp_rstats->fw_err_link;
1336 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1337 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1338 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1339
1340 /* Number of packets that are LROed */
1341 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1342 /* Number of octets that are LROed */
1343 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1344 /* Number of LRO packets formed */
1345 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1346 /* Number of times lRO of packet aborted */
1347 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1348 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1349 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1350 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1351 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1352 /* intrmod: packet forward rate */
1353 rstats->fwd_rate = rsp_rstats->fwd_rate;
1354
1355 /* TX link-level stats */
1356 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1357 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1358 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1359 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1360 tstats->ctl_sent = rsp_tstats->ctl_sent;
1361 /* Packets sent after one collision*/
1362 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1363 /* Packets sent after multiple collision*/
1364 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1365 /* Packets not sent due to max collisions */
1366 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1367 /* Packets not sent due to max deferrals */
1368 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1369 /* Accounts for over/under-run of buffers */
1370 tstats->fifo_err = rsp_tstats->fifo_err;
1371 tstats->runts = rsp_tstats->runts;
1372 /* Total number of collisions detected */
1373 tstats->total_collisions = rsp_tstats->total_collisions;
1374
1375 /* firmware stats */
1376 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1377 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1378 tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1379 tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1380 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1381 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1382 tstats->fw_err_link = rsp_tstats->fw_err_link;
1383 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1384 tstats->fw_tso = rsp_tstats->fw_tso;
1385 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1386 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1387 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1388
1389 resp->status = 1;
1390 } else {
1391 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1392 resp->status = -1;
1393 }
1394}
1395
1396static int lio_fetch_vf_stats(struct lio *lio)
1397{
1398 struct octeon_device *oct_dev = lio->oct_dev;
1399 struct octeon_soft_command *sc;
1400 struct oct_nic_vf_stats_resp *resp;
1401
1402 int retval;
1403
1404 /* Alloc soft command */
1405 sc = (struct octeon_soft_command *)
1406 octeon_alloc_soft_command(oct: oct_dev,
1407 datasize: 0,
1408 rdatasize: sizeof(struct oct_nic_vf_stats_resp),
1409 ctxsize: 0);
1410
1411 if (!sc) {
1412 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1413 retval = -ENOMEM;
1414 goto lio_fetch_vf_stats_exit;
1415 }
1416
1417 resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
1418 memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
1419
1420 init_completion(x: &sc->complete);
1421 sc->sc_status = OCTEON_REQUEST_PENDING;
1422
1423 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1424
1425 octeon_prepare_soft_command(oct: oct_dev, sc, OPCODE_NIC,
1426 OPCODE_NIC_VF_PORT_STATS, irh_ossp: 0, ossp0: 0, ossp1: 0);
1427
1428 retval = octeon_send_soft_command(oct: oct_dev, sc);
1429 if (retval == IQ_SEND_FAILED) {
1430 octeon_free_soft_command(oct: oct_dev, sc);
1431 goto lio_fetch_vf_stats_exit;
1432 }
1433
1434 retval =
1435 wait_for_sc_completion_timeout(oct_dev, sc,
1436 timeout: (2 * LIO_SC_MAX_TMO_MS));
1437 if (retval) {
1438 dev_err(&oct_dev->pci_dev->dev,
1439 "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
1440 goto lio_fetch_vf_stats_exit;
1441 }
1442
1443 if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1444 octeon_swap_8B_data(data: (u64 *)&resp->spoofmac_cnt,
1445 blocks: (sizeof(u64)) >> 3);
1446
1447 if (resp->spoofmac_cnt != 0) {
1448 dev_warn(&oct_dev->pci_dev->dev,
1449 "%llu Spoofed packets detected\n",
1450 resp->spoofmac_cnt);
1451 }
1452 }
1453 WRITE_ONCE(sc->caller_is_done, 1);
1454
1455lio_fetch_vf_stats_exit:
1456 return retval;
1457}
1458
1459void lio_fetch_stats(struct work_struct *work)
1460{
1461 struct cavium_wk *wk = (struct cavium_wk *)work;
1462 struct lio *lio = wk->ctxptr;
1463 struct octeon_device *oct_dev = lio->oct_dev;
1464 struct octeon_soft_command *sc;
1465 struct oct_nic_stats_resp *resp;
1466 unsigned long time_in_jiffies;
1467 int retval;
1468
1469 if (OCTEON_CN23XX_PF(oct_dev)) {
1470 /* report spoofchk every 2 seconds */
1471 if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
1472 (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
1473 oct_dev->sriov_info.num_vfs_alloced) {
1474 lio_fetch_vf_stats(lio);
1475 }
1476
1477 oct_dev->vfstats_poll++;
1478 }
1479
1480 /* Alloc soft command */
1481 sc = (struct octeon_soft_command *)
1482 octeon_alloc_soft_command(oct: oct_dev,
1483 datasize: 0,
1484 rdatasize: sizeof(struct oct_nic_stats_resp),
1485 ctxsize: 0);
1486
1487 if (!sc) {
1488 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1489 goto lio_fetch_stats_exit;
1490 }
1491
1492 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1493 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1494
1495 init_completion(x: &sc->complete);
1496 sc->sc_status = OCTEON_REQUEST_PENDING;
1497
1498 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1499
1500 octeon_prepare_soft_command(oct: oct_dev, sc, OPCODE_NIC,
1501 OPCODE_NIC_PORT_STATS, irh_ossp: 0, ossp0: 0, ossp1: 0);
1502
1503 retval = octeon_send_soft_command(oct: oct_dev, sc);
1504 if (retval == IQ_SEND_FAILED) {
1505 octeon_free_soft_command(oct: oct_dev, sc);
1506 goto lio_fetch_stats_exit;
1507 }
1508
1509 retval = wait_for_sc_completion_timeout(oct_dev, sc,
1510 timeout: (2 * LIO_SC_MAX_TMO_MS));
1511 if (retval) {
1512 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1513 goto lio_fetch_stats_exit;
1514 }
1515
1516 octnet_nic_stats_callback(oct_dev, status: sc->sc_status, ptr: sc);
1517 WRITE_ONCE(sc->caller_is_done, true);
1518
1519lio_fetch_stats_exit:
1520 time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
1521 if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
1522 schedule_delayed_work(dwork: &lio->stats_wk.work, delay: time_in_jiffies);
1523
1524 return;
1525}
1526EXPORT_SYMBOL_GPL(lio_fetch_stats);
1527
1528int liquidio_set_speed(struct lio *lio, int speed)
1529{
1530 struct octeon_device *oct = lio->oct_dev;
1531 struct oct_nic_seapi_resp *resp;
1532 struct octeon_soft_command *sc;
1533 union octnet_cmd *ncmd;
1534 int retval;
1535 u32 var;
1536
1537 if (oct->speed_setting == speed)
1538 return 0;
1539
1540 if (!OCTEON_CN23XX_PF(oct)) {
1541 dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1542 __func__);
1543 return -EOPNOTSUPP;
1544 }
1545
1546 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1547 rdatasize: sizeof(struct oct_nic_seapi_resp),
1548 ctxsize: 0);
1549 if (!sc)
1550 return -ENOMEM;
1551
1552 ncmd = sc->virtdptr;
1553 resp = sc->virtrptr;
1554 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1555
1556 init_completion(x: &sc->complete);
1557 sc->sc_status = OCTEON_REQUEST_PENDING;
1558
1559 ncmd->u64 = 0;
1560 ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1561 ncmd->s.param1 = speed;
1562
1563 octeon_swap_8B_data(data: (u64 *)ncmd, blocks: (OCTNET_CMD_SIZE >> 3));
1564
1565 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1566
1567 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1568 OPCODE_NIC_UBOOT_CTL, irh_ossp: 0, ossp0: 0, ossp1: 0);
1569
1570 retval = octeon_send_soft_command(oct, sc);
1571 if (retval == IQ_SEND_FAILED) {
1572 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1573 octeon_free_soft_command(oct, sc);
1574 retval = -EBUSY;
1575 } else {
1576 /* Wait for response or timeout */
1577 retval = wait_for_sc_completion_timeout(oct_dev: oct, sc, timeout: 0);
1578 if (retval)
1579 return retval;
1580
1581 retval = resp->status;
1582
1583 if (retval) {
1584 dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1585 __func__, retval);
1586 WRITE_ONCE(sc->caller_is_done, true);
1587
1588 return -EIO;
1589 }
1590
1591 var = be32_to_cpu((__force __be32)resp->speed);
1592 if (var != speed) {
1593 dev_err(&oct->pci_dev->dev,
1594 "%s: setting failed speed= %x, expect %x\n",
1595 __func__, var, speed);
1596 }
1597
1598 oct->speed_setting = var;
1599 WRITE_ONCE(sc->caller_is_done, true);
1600 }
1601
1602 return retval;
1603}
1604
1605int liquidio_get_speed(struct lio *lio)
1606{
1607 struct octeon_device *oct = lio->oct_dev;
1608 struct oct_nic_seapi_resp *resp;
1609 struct octeon_soft_command *sc;
1610 union octnet_cmd *ncmd;
1611 int retval;
1612
1613 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1614 rdatasize: sizeof(struct oct_nic_seapi_resp),
1615 ctxsize: 0);
1616 if (!sc)
1617 return -ENOMEM;
1618
1619 ncmd = sc->virtdptr;
1620 resp = sc->virtrptr;
1621 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1622
1623 init_completion(x: &sc->complete);
1624 sc->sc_status = OCTEON_REQUEST_PENDING;
1625
1626 ncmd->u64 = 0;
1627 ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1628
1629 octeon_swap_8B_data(data: (u64 *)ncmd, blocks: (OCTNET_CMD_SIZE >> 3));
1630
1631 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1632
1633 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1634 OPCODE_NIC_UBOOT_CTL, irh_ossp: 0, ossp0: 0, ossp1: 0);
1635
1636 retval = octeon_send_soft_command(oct, sc);
1637 if (retval == IQ_SEND_FAILED) {
1638 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1639 octeon_free_soft_command(oct, sc);
1640 retval = -EIO;
1641 } else {
1642 retval = wait_for_sc_completion_timeout(oct_dev: oct, sc, timeout: 0);
1643 if (retval)
1644 return retval;
1645
1646 retval = resp->status;
1647 if (retval) {
1648 dev_err(&oct->pci_dev->dev,
1649 "%s failed retval=%d\n", __func__, retval);
1650 retval = -EIO;
1651 } else {
1652 u32 var;
1653
1654 var = be32_to_cpu((__force __be32)resp->speed);
1655 oct->speed_setting = var;
1656 if (var == 0xffff) {
1657 /* unable to access boot variables
1658 * get the default value based on the NIC type
1659 */
1660 if (oct->subsystem_id ==
1661 OCTEON_CN2350_25GB_SUBSYS_ID ||
1662 oct->subsystem_id ==
1663 OCTEON_CN2360_25GB_SUBSYS_ID) {
1664 oct->no_speed_setting = 1;
1665 oct->speed_setting = 25;
1666 } else {
1667 oct->speed_setting = 10;
1668 }
1669 }
1670
1671 }
1672 WRITE_ONCE(sc->caller_is_done, true);
1673 }
1674
1675 return retval;
1676}
1677EXPORT_SYMBOL_GPL(liquidio_get_speed);
1678
1679int liquidio_set_fec(struct lio *lio, int on_off)
1680{
1681 struct oct_nic_seapi_resp *resp;
1682 struct octeon_soft_command *sc;
1683 struct octeon_device *oct;
1684 union octnet_cmd *ncmd;
1685 int retval;
1686 u32 var;
1687
1688 oct = lio->oct_dev;
1689
1690 if (oct->props[lio->ifidx].fec == on_off)
1691 return 0;
1692
1693 if (!OCTEON_CN23XX_PF(oct)) {
1694 dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n",
1695 __func__);
1696 return -1;
1697 }
1698
1699 if (oct->speed_boot != 25) {
1700 dev_err(&oct->pci_dev->dev,
1701 "Set FEC only when link speed is 25G during insmod\n");
1702 return -1;
1703 }
1704
1705 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1706 rdatasize: sizeof(struct oct_nic_seapi_resp), ctxsize: 0);
1707 if (!sc) {
1708 dev_err(&oct->pci_dev->dev,
1709 "Failed to allocate soft command\n");
1710 return -ENOMEM;
1711 }
1712
1713 ncmd = sc->virtdptr;
1714 resp = sc->virtrptr;
1715 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1716
1717 init_completion(x: &sc->complete);
1718 sc->sc_status = OCTEON_REQUEST_PENDING;
1719
1720 ncmd->u64 = 0;
1721 ncmd->s.cmd = SEAPI_CMD_FEC_SET;
1722 ncmd->s.param1 = on_off;
1723 /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */
1724
1725 octeon_swap_8B_data(data: (u64 *)ncmd, blocks: (OCTNET_CMD_SIZE >> 3));
1726
1727 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1728
1729 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1730 OPCODE_NIC_UBOOT_CTL, irh_ossp: 0, ossp0: 0, ossp1: 0);
1731
1732 retval = octeon_send_soft_command(oct, sc);
1733 if (retval == IQ_SEND_FAILED) {
1734 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1735 octeon_free_soft_command(oct, sc);
1736 return -EIO;
1737 }
1738
1739 retval = wait_for_sc_completion_timeout(oct_dev: oct, sc, timeout: 0);
1740 if (retval)
1741 return (-EIO);
1742
1743 var = be32_to_cpu(resp->fec_setting);
1744 resp->fec_setting = var;
1745 if (var != on_off) {
1746 dev_err(&oct->pci_dev->dev,
1747 "Setting failed fec= %x, expect %x\n",
1748 var, on_off);
1749 oct->props[lio->ifidx].fec = var;
1750 if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1751 oct->props[lio->ifidx].fec = 1;
1752 else
1753 oct->props[lio->ifidx].fec = 0;
1754 }
1755
1756 WRITE_ONCE(sc->caller_is_done, true);
1757
1758 if (oct->props[lio->ifidx].fec !=
1759 oct->props[lio->ifidx].fec_boot) {
1760 dev_dbg(&oct->pci_dev->dev,
1761 "Reload driver to change fec to %s\n",
1762 oct->props[lio->ifidx].fec ? "on" : "off");
1763 }
1764
1765 return retval;
1766}
1767
1768int liquidio_get_fec(struct lio *lio)
1769{
1770 struct oct_nic_seapi_resp *resp;
1771 struct octeon_soft_command *sc;
1772 struct octeon_device *oct;
1773 union octnet_cmd *ncmd;
1774 int retval;
1775 u32 var;
1776
1777 oct = lio->oct_dev;
1778
1779 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1780 rdatasize: sizeof(struct oct_nic_seapi_resp), ctxsize: 0);
1781 if (!sc)
1782 return -ENOMEM;
1783
1784 ncmd = sc->virtdptr;
1785 resp = sc->virtrptr;
1786 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1787
1788 init_completion(x: &sc->complete);
1789 sc->sc_status = OCTEON_REQUEST_PENDING;
1790
1791 ncmd->u64 = 0;
1792 ncmd->s.cmd = SEAPI_CMD_FEC_GET;
1793
1794 octeon_swap_8B_data(data: (u64 *)ncmd, blocks: (OCTNET_CMD_SIZE >> 3));
1795
1796 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1797
1798 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1799 OPCODE_NIC_UBOOT_CTL, irh_ossp: 0, ossp0: 0, ossp1: 0);
1800
1801 retval = octeon_send_soft_command(oct, sc);
1802 if (retval == IQ_SEND_FAILED) {
1803 dev_info(&oct->pci_dev->dev,
1804 "%s: Failed to send soft command\n", __func__);
1805 octeon_free_soft_command(oct, sc);
1806 return -EIO;
1807 }
1808
1809 retval = wait_for_sc_completion_timeout(oct_dev: oct, sc, timeout: 0);
1810 if (retval)
1811 return retval;
1812
1813 var = be32_to_cpu(resp->fec_setting);
1814 resp->fec_setting = var;
1815 if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1816 oct->props[lio->ifidx].fec = 1;
1817 else
1818 oct->props[lio->ifidx].fec = 0;
1819
1820 WRITE_ONCE(sc->caller_is_done, true);
1821
1822 if (oct->props[lio->ifidx].fec !=
1823 oct->props[lio->ifidx].fec_boot) {
1824 dev_dbg(&oct->pci_dev->dev,
1825 "Reload driver to change fec to %s\n",
1826 oct->props[lio->ifidx].fec ? "on" : "off");
1827 }
1828
1829 return retval;
1830}
1831EXPORT_SYMBOL_GPL(liquidio_get_fec);
1832

source code of linux/drivers/net/ethernet/cavium/liquidio/lio_core.c