1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Marvell RVU Virtual Function ethernet driver |
3 | * |
4 | * Copyright (C) 2020 Marvell. |
5 | * |
6 | */ |
7 | |
8 | #include <linux/etherdevice.h> |
9 | #include <linux/module.h> |
10 | #include <linux/pci.h> |
11 | #include <linux/net_tstamp.h> |
12 | |
13 | #include "otx2_common.h" |
14 | #include "otx2_reg.h" |
15 | #include "otx2_ptp.h" |
16 | #include "cn10k.h" |
17 | |
18 | #define DRV_NAME "rvu_nicvf" |
19 | #define DRV_STRING "Marvell RVU NIC Virtual Function Driver" |
20 | |
21 | static const struct pci_device_id otx2_vf_id_table[] = { |
22 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, |
23 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) }, |
24 | { } |
25 | }; |
26 | |
27 | MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>" ); |
28 | MODULE_DESCRIPTION(DRV_STRING); |
29 | MODULE_LICENSE("GPL v2" ); |
30 | MODULE_DEVICE_TABLE(pci, otx2_vf_id_table); |
31 | |
32 | /* RVU VF Interrupt Vector Enumeration */ |
33 | enum { |
34 | RVU_VF_INT_VEC_MBOX = 0x0, |
35 | }; |
36 | |
37 | static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf, |
38 | struct mbox_msghdr *msg) |
39 | { |
40 | if (msg->id >= MBOX_MSG_MAX) { |
41 | dev_err(vf->dev, |
42 | "Mbox msg with unknown ID %d\n" , msg->id); |
43 | return; |
44 | } |
45 | |
46 | if (msg->sig != OTX2_MBOX_RSP_SIG) { |
47 | dev_err(vf->dev, |
48 | "Mbox msg with wrong signature %x, ID %d\n" , |
49 | msg->sig, msg->id); |
50 | return; |
51 | } |
52 | |
53 | if (msg->rc == MBOX_MSG_INVALID) { |
54 | dev_err(vf->dev, |
55 | "PF/AF says the sent msg(s) %d were invalid\n" , |
56 | msg->id); |
57 | return; |
58 | } |
59 | |
60 | switch (msg->id) { |
61 | case MBOX_MSG_READY: |
62 | vf->pcifunc = msg->pcifunc; |
63 | break; |
64 | case MBOX_MSG_MSIX_OFFSET: |
65 | mbox_handler_msix_offset(pfvf: vf, rsp: (struct msix_offset_rsp *)msg); |
66 | break; |
67 | case MBOX_MSG_NPA_LF_ALLOC: |
68 | mbox_handler_npa_lf_alloc(pfvf: vf, rsp: (struct npa_lf_alloc_rsp *)msg); |
69 | break; |
70 | case MBOX_MSG_NIX_LF_ALLOC: |
71 | mbox_handler_nix_lf_alloc(pfvf: vf, rsp: (struct nix_lf_alloc_rsp *)msg); |
72 | break; |
73 | case MBOX_MSG_NIX_BP_ENABLE: |
74 | mbox_handler_nix_bp_enable(pfvf: vf, rsp: (struct nix_bp_cfg_rsp *)msg); |
75 | break; |
76 | default: |
77 | if (msg->rc) |
78 | dev_err(vf->dev, |
79 | "Mbox msg response has err %d, ID %d\n" , |
80 | msg->rc, msg->id); |
81 | } |
82 | } |
83 | |
84 | static void otx2vf_vfaf_mbox_handler(struct work_struct *work) |
85 | { |
86 | struct otx2_mbox_dev *mdev; |
87 | struct mbox_hdr *rsp_hdr; |
88 | struct mbox_msghdr *msg; |
89 | struct otx2_mbox *mbox; |
90 | struct mbox *af_mbox; |
91 | int offset, id; |
92 | u16 num_msgs; |
93 | |
94 | af_mbox = container_of(work, struct mbox, mbox_wrk); |
95 | mbox = &af_mbox->mbox; |
96 | mdev = &mbox->dev[0]; |
97 | rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); |
98 | num_msgs = rsp_hdr->num_msgs; |
99 | |
100 | if (num_msgs == 0) |
101 | return; |
102 | |
103 | offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); |
104 | |
105 | for (id = 0; id < num_msgs; id++) { |
106 | msg = (struct mbox_msghdr *)(mdev->mbase + offset); |
107 | otx2vf_process_vfaf_mbox_msg(vf: af_mbox->pfvf, msg); |
108 | offset = mbox->rx_start + msg->next_msgoff; |
109 | if (mdev->msgs_acked == (af_mbox->num_msgs - 1)) |
110 | __otx2_mbox_reset(mbox, devid: 0); |
111 | mdev->msgs_acked++; |
112 | } |
113 | } |
114 | |
115 | static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, |
116 | struct mbox_msghdr *req) |
117 | { |
118 | struct msg_rsp *rsp; |
119 | int err; |
120 | |
121 | /* Check if valid, if not reply with a invalid msg */ |
122 | if (req->sig != OTX2_MBOX_REQ_SIG) { |
123 | otx2_reply_invalid_msg(mbox: &vf->mbox.mbox_up, devid: 0, pcifunc: 0, id: req->id); |
124 | return -ENODEV; |
125 | } |
126 | |
127 | switch (req->id) { |
128 | case MBOX_MSG_CGX_LINK_EVENT: |
129 | rsp = (struct msg_rsp *)otx2_mbox_alloc_msg( |
130 | mbox: &vf->mbox.mbox_up, devid: 0, |
131 | size: sizeof(struct msg_rsp)); |
132 | if (!rsp) |
133 | return -ENOMEM; |
134 | |
135 | rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT; |
136 | rsp->hdr.sig = OTX2_MBOX_RSP_SIG; |
137 | rsp->hdr.pcifunc = 0; |
138 | rsp->hdr.rc = 0; |
139 | err = otx2_mbox_up_handler_cgx_link_event( |
140 | pfvf: vf, req: (struct cgx_link_info_msg *)req, rsp); |
141 | return err; |
142 | default: |
143 | otx2_reply_invalid_msg(mbox: &vf->mbox.mbox_up, devid: 0, pcifunc: 0, id: req->id); |
144 | return -ENODEV; |
145 | } |
146 | return 0; |
147 | } |
148 | |
149 | static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) |
150 | { |
151 | struct otx2_mbox_dev *mdev; |
152 | struct mbox_hdr *rsp_hdr; |
153 | struct mbox_msghdr *msg; |
154 | struct otx2_mbox *mbox; |
155 | struct mbox *vf_mbox; |
156 | struct otx2_nic *vf; |
157 | int offset, id; |
158 | u16 num_msgs; |
159 | |
160 | vf_mbox = container_of(work, struct mbox, mbox_up_wrk); |
161 | vf = vf_mbox->pfvf; |
162 | mbox = &vf_mbox->mbox_up; |
163 | mdev = &mbox->dev[0]; |
164 | |
165 | rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); |
166 | num_msgs = rsp_hdr->num_msgs; |
167 | |
168 | if (num_msgs == 0) |
169 | return; |
170 | |
171 | offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); |
172 | |
173 | for (id = 0; id < num_msgs; id++) { |
174 | msg = (struct mbox_msghdr *)(mdev->mbase + offset); |
175 | otx2vf_process_mbox_msg_up(vf, req: msg); |
176 | offset = mbox->rx_start + msg->next_msgoff; |
177 | } |
178 | |
179 | otx2_mbox_msg_send(mbox, devid: 0); |
180 | } |
181 | |
182 | static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq) |
183 | { |
184 | struct otx2_nic *vf = (struct otx2_nic *)vf_irq; |
185 | struct otx2_mbox_dev *mdev; |
186 | struct otx2_mbox *mbox; |
187 | struct mbox_hdr *hdr; |
188 | u64 mbox_data; |
189 | |
190 | /* Clear the IRQ */ |
191 | otx2_write64(nic: vf, RVU_VF_INT, BIT_ULL(0)); |
192 | |
193 | mbox_data = otx2_read64(nic: vf, RVU_VF_VFPF_MBOX0); |
194 | |
195 | /* Read latest mbox data */ |
196 | smp_rmb(); |
197 | |
198 | if (mbox_data & MBOX_DOWN_MSG) { |
199 | mbox_data &= ~MBOX_DOWN_MSG; |
200 | otx2_write64(nic: vf, RVU_VF_VFPF_MBOX0, val: mbox_data); |
201 | |
202 | /* Check for PF => VF response messages */ |
203 | mbox = &vf->mbox.mbox; |
204 | mdev = &mbox->dev[0]; |
205 | otx2_sync_mbox_bbuf(mbox, devid: 0); |
206 | |
207 | hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); |
208 | if (hdr->num_msgs) |
209 | queue_work(wq: vf->mbox_wq, work: &vf->mbox.mbox_wrk); |
210 | |
211 | trace_otx2_msg_interrupt(pdev: mbox->pdev, msg: "DOWN reply from PF to VF" , |
212 | BIT_ULL(0)); |
213 | } |
214 | |
215 | if (mbox_data & MBOX_UP_MSG) { |
216 | mbox_data &= ~MBOX_UP_MSG; |
217 | otx2_write64(nic: vf, RVU_VF_VFPF_MBOX0, val: mbox_data); |
218 | |
219 | /* Check for PF => VF notification messages */ |
220 | mbox = &vf->mbox.mbox_up; |
221 | mdev = &mbox->dev[0]; |
222 | otx2_sync_mbox_bbuf(mbox, devid: 0); |
223 | |
224 | hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); |
225 | if (hdr->num_msgs) |
226 | queue_work(wq: vf->mbox_wq, work: &vf->mbox.mbox_up_wrk); |
227 | |
228 | trace_otx2_msg_interrupt(pdev: mbox->pdev, msg: "UP message from PF to VF" , |
229 | BIT_ULL(0)); |
230 | } |
231 | |
232 | return IRQ_HANDLED; |
233 | } |
234 | |
235 | static void otx2vf_disable_mbox_intr(struct otx2_nic *vf) |
236 | { |
237 | int vector = pci_irq_vector(dev: vf->pdev, nr: RVU_VF_INT_VEC_MBOX); |
238 | |
239 | /* Disable VF => PF mailbox IRQ */ |
240 | otx2_write64(nic: vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0)); |
241 | free_irq(vector, vf); |
242 | } |
243 | |
244 | static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) |
245 | { |
246 | struct otx2_hw *hw = &vf->hw; |
247 | struct msg_req *req; |
248 | char *irq_name; |
249 | int err; |
250 | |
251 | /* Register mailbox interrupt handler */ |
252 | irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; |
253 | snprintf(buf: irq_name, NAME_SIZE, fmt: "RVUVFAF Mbox" ); |
254 | err = request_irq(irq: pci_irq_vector(dev: vf->pdev, nr: RVU_VF_INT_VEC_MBOX), |
255 | handler: otx2vf_vfaf_mbox_intr_handler, flags: 0, name: irq_name, dev: vf); |
256 | if (err) { |
257 | dev_err(vf->dev, |
258 | "RVUPF: IRQ registration failed for VFAF mbox irq\n" ); |
259 | return err; |
260 | } |
261 | |
262 | /* Enable mailbox interrupt for msgs coming from PF. |
263 | * First clear to avoid spurious interrupts, if any. |
264 | */ |
265 | otx2_write64(nic: vf, RVU_VF_INT, BIT_ULL(0)); |
266 | otx2_write64(nic: vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); |
267 | |
268 | if (!probe_pf) |
269 | return 0; |
270 | |
271 | /* Check mailbox communication with PF */ |
272 | req = otx2_mbox_alloc_msg_ready(mbox: &vf->mbox); |
273 | if (!req) { |
274 | otx2vf_disable_mbox_intr(vf); |
275 | return -ENOMEM; |
276 | } |
277 | |
278 | err = otx2_sync_mbox_msg(mbox: &vf->mbox); |
279 | if (err) { |
280 | dev_warn(vf->dev, |
281 | "AF not responding to mailbox, deferring probe\n" ); |
282 | otx2vf_disable_mbox_intr(vf); |
283 | return -EPROBE_DEFER; |
284 | } |
285 | return 0; |
286 | } |
287 | |
288 | static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf) |
289 | { |
290 | struct mbox *mbox = &vf->mbox; |
291 | |
292 | if (vf->mbox_wq) { |
293 | destroy_workqueue(wq: vf->mbox_wq); |
294 | vf->mbox_wq = NULL; |
295 | } |
296 | |
297 | if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag)) |
298 | iounmap(addr: (void __iomem *)mbox->mbox.hwbase); |
299 | |
300 | otx2_mbox_destroy(mbox: &mbox->mbox); |
301 | otx2_mbox_destroy(mbox: &mbox->mbox_up); |
302 | } |
303 | |
304 | static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) |
305 | { |
306 | struct mbox *mbox = &vf->mbox; |
307 | void __iomem *hwbase; |
308 | int err; |
309 | |
310 | mbox->pfvf = vf; |
311 | vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox" , |
312 | WQ_HIGHPRI | WQ_MEM_RECLAIM); |
313 | if (!vf->mbox_wq) |
314 | return -ENOMEM; |
315 | |
316 | if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { |
317 | /* For cn10k platform, VF mailbox region is in its BAR2 |
318 | * register space |
319 | */ |
320 | hwbase = vf->reg_base + RVU_VF_MBOX_REGION; |
321 | } else { |
322 | /* Mailbox is a reserved memory (in RAM) region shared between |
323 | * admin function (i.e PF0) and this VF, shouldn't be mapped as |
324 | * device memory to allow unaligned accesses. |
325 | */ |
326 | hwbase = ioremap_wc(pci_resource_start(vf->pdev, |
327 | PCI_MBOX_BAR_NUM), |
328 | pci_resource_len(vf->pdev, |
329 | PCI_MBOX_BAR_NUM)); |
330 | if (!hwbase) { |
331 | dev_err(vf->dev, "Unable to map VFAF mailbox region\n" ); |
332 | err = -ENOMEM; |
333 | goto exit; |
334 | } |
335 | } |
336 | |
337 | err = otx2_mbox_init(mbox: &mbox->mbox, hwbase, pdev: vf->pdev, reg_base: vf->reg_base, |
338 | MBOX_DIR_VFPF, ndevs: 1); |
339 | if (err) |
340 | goto exit; |
341 | |
342 | err = otx2_mbox_init(mbox: &mbox->mbox_up, hwbase, pdev: vf->pdev, reg_base: vf->reg_base, |
343 | MBOX_DIR_VFPF_UP, ndevs: 1); |
344 | if (err) |
345 | goto exit; |
346 | |
347 | err = otx2_mbox_bbuf_init(mbox, pdev: vf->pdev); |
348 | if (err) |
349 | goto exit; |
350 | |
351 | INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler); |
352 | INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler); |
353 | mutex_init(&mbox->lock); |
354 | |
355 | return 0; |
356 | exit: |
357 | if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag)) |
358 | iounmap(addr: hwbase); |
359 | destroy_workqueue(wq: vf->mbox_wq); |
360 | return err; |
361 | } |
362 | |
363 | static int otx2vf_open(struct net_device *netdev) |
364 | { |
365 | struct otx2_nic *vf; |
366 | int err; |
367 | |
368 | err = otx2_open(netdev); |
369 | if (err) |
370 | return err; |
371 | |
372 | /* LBKs do not receive link events so tell everyone we are up here */ |
373 | vf = netdev_priv(dev: netdev); |
374 | if (is_otx2_lbkvf(pdev: vf->pdev)) { |
375 | pr_info("%s NIC Link is UP\n" , netdev->name); |
376 | netif_carrier_on(dev: netdev); |
377 | netif_tx_start_all_queues(dev: netdev); |
378 | } |
379 | |
380 | return 0; |
381 | } |
382 | |
383 | static int otx2vf_stop(struct net_device *netdev) |
384 | { |
385 | return otx2_stop(netdev); |
386 | } |
387 | |
388 | static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) |
389 | { |
390 | struct otx2_nic *vf = netdev_priv(dev: netdev); |
391 | int qidx = skb_get_queue_mapping(skb); |
392 | struct otx2_snd_queue *sq; |
393 | struct netdev_queue *txq; |
394 | |
395 | sq = &vf->qset.sq[qidx]; |
396 | txq = netdev_get_tx_queue(dev: netdev, index: qidx); |
397 | |
398 | if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { |
399 | netif_tx_stop_queue(dev_queue: txq); |
400 | |
401 | /* Check again, incase SQBs got freed up */ |
402 | smp_mb(); |
403 | if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) |
404 | > sq->sqe_thresh) |
405 | netif_tx_wake_queue(dev_queue: txq); |
406 | |
407 | return NETDEV_TX_BUSY; |
408 | } |
409 | |
410 | return NETDEV_TX_OK; |
411 | } |
412 | |
413 | static void otx2vf_set_rx_mode(struct net_device *netdev) |
414 | { |
415 | struct otx2_nic *vf = netdev_priv(dev: netdev); |
416 | |
417 | queue_work(wq: vf->otx2_wq, work: &vf->rx_mode_work); |
418 | } |
419 | |
420 | static void otx2vf_do_set_rx_mode(struct work_struct *work) |
421 | { |
422 | struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work); |
423 | struct net_device *netdev = vf->netdev; |
424 | unsigned int flags = netdev->flags; |
425 | struct nix_rx_mode *req; |
426 | |
427 | mutex_lock(&vf->mbox.lock); |
428 | |
429 | req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox: &vf->mbox); |
430 | if (!req) { |
431 | mutex_unlock(lock: &vf->mbox.lock); |
432 | return; |
433 | } |
434 | |
435 | req->mode = NIX_RX_MODE_UCAST; |
436 | |
437 | if (flags & IFF_PROMISC) |
438 | req->mode |= NIX_RX_MODE_PROMISC; |
439 | if (flags & (IFF_ALLMULTI | IFF_MULTICAST)) |
440 | req->mode |= NIX_RX_MODE_ALLMULTI; |
441 | |
442 | req->mode |= NIX_RX_MODE_USE_MCE; |
443 | |
444 | otx2_sync_mbox_msg(mbox: &vf->mbox); |
445 | |
446 | mutex_unlock(lock: &vf->mbox.lock); |
447 | } |
448 | |
449 | static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu) |
450 | { |
451 | bool if_up = netif_running(dev: netdev); |
452 | int err = 0; |
453 | |
454 | if (if_up) |
455 | otx2vf_stop(netdev); |
456 | |
457 | netdev_info(dev: netdev, format: "Changing MTU from %d to %d\n" , |
458 | netdev->mtu, new_mtu); |
459 | netdev->mtu = new_mtu; |
460 | |
461 | if (if_up) |
462 | err = otx2vf_open(netdev); |
463 | |
464 | return err; |
465 | } |
466 | |
467 | static void otx2vf_reset_task(struct work_struct *work) |
468 | { |
469 | struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task); |
470 | |
471 | rtnl_lock(); |
472 | |
473 | if (netif_running(dev: vf->netdev)) { |
474 | otx2vf_stop(netdev: vf->netdev); |
475 | vf->reset_count++; |
476 | otx2vf_open(netdev: vf->netdev); |
477 | } |
478 | |
479 | rtnl_unlock(); |
480 | } |
481 | |
482 | static int otx2vf_set_features(struct net_device *netdev, |
483 | netdev_features_t features) |
484 | { |
485 | return otx2_handle_ntuple_tc_features(netdev, features); |
486 | } |
487 | |
488 | static const struct net_device_ops otx2vf_netdev_ops = { |
489 | .ndo_open = otx2vf_open, |
490 | .ndo_stop = otx2vf_stop, |
491 | .ndo_start_xmit = otx2vf_xmit, |
492 | .ndo_select_queue = otx2_select_queue, |
493 | .ndo_set_rx_mode = otx2vf_set_rx_mode, |
494 | .ndo_set_mac_address = otx2_set_mac_address, |
495 | .ndo_change_mtu = otx2vf_change_mtu, |
496 | .ndo_set_features = otx2vf_set_features, |
497 | .ndo_get_stats64 = otx2_get_stats64, |
498 | .ndo_tx_timeout = otx2_tx_timeout, |
499 | .ndo_eth_ioctl = otx2_ioctl, |
500 | .ndo_setup_tc = otx2_setup_tc, |
501 | }; |
502 | |
503 | static int otx2_wq_init(struct otx2_nic *vf) |
504 | { |
505 | vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq" ); |
506 | if (!vf->otx2_wq) |
507 | return -ENOMEM; |
508 | |
509 | INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode); |
510 | INIT_WORK(&vf->reset_task, otx2vf_reset_task); |
511 | return 0; |
512 | } |
513 | |
514 | static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf) |
515 | { |
516 | struct otx2_hw *hw = &vf->hw; |
517 | int num_vec, err; |
518 | |
519 | num_vec = hw->nix_msixoff; |
520 | num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; |
521 | |
522 | otx2vf_disable_mbox_intr(vf); |
523 | pci_free_irq_vectors(dev: hw->pdev); |
524 | err = pci_alloc_irq_vectors(dev: hw->pdev, min_vecs: num_vec, max_vecs: num_vec, PCI_IRQ_MSIX); |
525 | if (err < 0) { |
526 | dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n" , |
527 | __func__, num_vec); |
528 | return err; |
529 | } |
530 | |
531 | return otx2vf_register_mbox_intr(vf, probe_pf: false); |
532 | } |
533 | |
534 | static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
535 | { |
536 | int num_vec = pci_msix_vec_count(dev: pdev); |
537 | struct device *dev = &pdev->dev; |
538 | int err, qcount, qos_txqs; |
539 | struct net_device *netdev; |
540 | struct otx2_nic *vf; |
541 | struct otx2_hw *hw; |
542 | |
543 | err = pcim_enable_device(pdev); |
544 | if (err) { |
545 | dev_err(dev, "Failed to enable PCI device\n" ); |
546 | return err; |
547 | } |
548 | |
549 | err = pci_request_regions(pdev, DRV_NAME); |
550 | if (err) { |
551 | dev_err(dev, "PCI request regions failed 0x%x\n" , err); |
552 | return err; |
553 | } |
554 | |
555 | err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); |
556 | if (err) { |
557 | dev_err(dev, "DMA mask config failed, abort\n" ); |
558 | goto err_release_regions; |
559 | } |
560 | |
561 | pci_set_master(dev: pdev); |
562 | |
563 | qcount = num_online_cpus(); |
564 | qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); |
565 | netdev = alloc_etherdev_mqs(sizeof_priv: sizeof(*vf), txqs: qcount + qos_txqs, rxqs: qcount); |
566 | if (!netdev) { |
567 | err = -ENOMEM; |
568 | goto err_release_regions; |
569 | } |
570 | |
571 | pci_set_drvdata(pdev, data: netdev); |
572 | SET_NETDEV_DEV(netdev, &pdev->dev); |
573 | vf = netdev_priv(dev: netdev); |
574 | vf->netdev = netdev; |
575 | vf->pdev = pdev; |
576 | vf->dev = dev; |
577 | vf->iommu_domain = iommu_get_domain_for_dev(dev); |
578 | |
579 | vf->flags |= OTX2_FLAG_INTF_DOWN; |
580 | hw = &vf->hw; |
581 | hw->pdev = vf->pdev; |
582 | hw->rx_queues = qcount; |
583 | hw->tx_queues = qcount; |
584 | hw->max_queues = qcount; |
585 | hw->non_qos_queues = qcount; |
586 | hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; |
587 | /* Use CQE of 128 byte descriptor size by default */ |
588 | hw->xqe_size = 128; |
589 | |
590 | hw->irq_name = devm_kmalloc_array(dev: &hw->pdev->dev, n: num_vec, NAME_SIZE, |
591 | GFP_KERNEL); |
592 | if (!hw->irq_name) { |
593 | err = -ENOMEM; |
594 | goto err_free_netdev; |
595 | } |
596 | |
597 | hw->affinity_mask = devm_kcalloc(dev: &hw->pdev->dev, n: num_vec, |
598 | size: sizeof(cpumask_var_t), GFP_KERNEL); |
599 | if (!hw->affinity_mask) { |
600 | err = -ENOMEM; |
601 | goto err_free_netdev; |
602 | } |
603 | |
604 | err = pci_alloc_irq_vectors(dev: hw->pdev, min_vecs: num_vec, max_vecs: num_vec, PCI_IRQ_MSIX); |
605 | if (err < 0) { |
606 | dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n" , |
607 | __func__, num_vec); |
608 | goto err_free_netdev; |
609 | } |
610 | |
611 | vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, maxlen: 0); |
612 | if (!vf->reg_base) { |
613 | dev_err(dev, "Unable to map physical function CSRs, aborting\n" ); |
614 | err = -ENOMEM; |
615 | goto err_free_irq_vectors; |
616 | } |
617 | |
618 | otx2_setup_dev_hw_settings(pfvf: vf); |
619 | /* Init VF <=> PF mailbox stuff */ |
620 | err = otx2vf_vfaf_mbox_init(vf); |
621 | if (err) |
622 | goto err_free_irq_vectors; |
623 | |
624 | /* Register mailbox interrupt */ |
625 | err = otx2vf_register_mbox_intr(vf, probe_pf: true); |
626 | if (err) |
627 | goto err_mbox_destroy; |
628 | |
629 | /* Request AF to attach NPA and LIX LFs to this AF */ |
630 | err = otx2_attach_npa_nix(pfvf: vf); |
631 | if (err) |
632 | goto err_disable_mbox_intr; |
633 | |
634 | err = otx2vf_realloc_msix_vectors(vf); |
635 | if (err) |
636 | goto err_detach_rsrc; |
637 | |
638 | err = otx2_set_real_num_queues(netdev, tx_queues: qcount, rx_queues: qcount); |
639 | if (err) |
640 | goto err_detach_rsrc; |
641 | |
642 | err = cn10k_lmtst_init(pfvf: vf); |
643 | if (err) |
644 | goto err_detach_rsrc; |
645 | |
646 | /* Don't check for error. Proceed without ptp */ |
647 | otx2_ptp_init(pfvf: vf); |
648 | |
649 | /* Assign default mac address */ |
650 | otx2_get_mac_from_af(netdev); |
651 | |
652 | netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | |
653 | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | |
654 | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | |
655 | NETIF_F_GSO_UDP_L4; |
656 | netdev->features = netdev->hw_features; |
657 | /* Support TSO on tag interface */ |
658 | netdev->vlan_features |= netdev->features; |
659 | netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | |
660 | NETIF_F_HW_VLAN_STAG_TX; |
661 | netdev->features |= netdev->hw_features; |
662 | |
663 | netdev->hw_features |= NETIF_F_NTUPLE; |
664 | netdev->hw_features |= NETIF_F_RXALL; |
665 | netdev->hw_features |= NETIF_F_HW_TC; |
666 | |
667 | netif_set_tso_max_segs(dev: netdev, OTX2_MAX_GSO_SEGS); |
668 | netdev->watchdog_timeo = OTX2_TX_TIMEOUT; |
669 | |
670 | netdev->netdev_ops = &otx2vf_netdev_ops; |
671 | |
672 | netdev->min_mtu = OTX2_MIN_MTU; |
673 | netdev->max_mtu = otx2_get_max_mtu(pfvf: vf); |
674 | |
675 | /* To distinguish, for LBK VFs set netdev name explicitly */ |
676 | if (is_otx2_lbkvf(pdev: vf->pdev)) { |
677 | int n; |
678 | |
679 | n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK; |
680 | /* Need to subtract 1 to get proper VF number */ |
681 | n -= 1; |
682 | snprintf(buf: netdev->name, size: sizeof(netdev->name), fmt: "lbk%d" , n); |
683 | } |
684 | |
685 | err = register_netdev(dev: netdev); |
686 | if (err) { |
687 | dev_err(dev, "Failed to register netdevice\n" ); |
688 | goto err_ptp_destroy; |
689 | } |
690 | |
691 | err = otx2_wq_init(vf); |
692 | if (err) |
693 | goto err_unreg_netdev; |
694 | |
695 | otx2vf_set_ethtool_ops(netdev); |
696 | |
697 | err = otx2vf_mcam_flow_init(pfvf: vf); |
698 | if (err) |
699 | goto err_unreg_netdev; |
700 | |
701 | err = otx2_init_tc(nic: vf); |
702 | if (err) |
703 | goto err_unreg_netdev; |
704 | |
705 | err = otx2_register_dl(pfvf: vf); |
706 | if (err) |
707 | goto err_shutdown_tc; |
708 | |
709 | #ifdef CONFIG_DCB |
710 | err = otx2_dcbnl_set_ops(dev: netdev); |
711 | if (err) |
712 | goto err_shutdown_tc; |
713 | #endif |
714 | otx2_qos_init(pfvf: vf, qos_txqs); |
715 | |
716 | return 0; |
717 | |
718 | err_shutdown_tc: |
719 | otx2_shutdown_tc(nic: vf); |
720 | err_unreg_netdev: |
721 | unregister_netdev(dev: netdev); |
722 | err_ptp_destroy: |
723 | otx2_ptp_destroy(pfvf: vf); |
724 | err_detach_rsrc: |
725 | free_percpu(pdata: vf->hw.lmt_info); |
726 | if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) |
727 | qmem_free(dev: vf->dev, qmem: vf->dync_lmt); |
728 | otx2_detach_resources(mbox: &vf->mbox); |
729 | err_disable_mbox_intr: |
730 | otx2vf_disable_mbox_intr(vf); |
731 | err_mbox_destroy: |
732 | otx2vf_vfaf_mbox_destroy(vf); |
733 | err_free_irq_vectors: |
734 | pci_free_irq_vectors(dev: hw->pdev); |
735 | err_free_netdev: |
736 | pci_set_drvdata(pdev, NULL); |
737 | free_netdev(dev: netdev); |
738 | err_release_regions: |
739 | pci_release_regions(pdev); |
740 | return err; |
741 | } |
742 | |
743 | static void otx2vf_remove(struct pci_dev *pdev) |
744 | { |
745 | struct net_device *netdev = pci_get_drvdata(pdev); |
746 | struct otx2_nic *vf; |
747 | |
748 | if (!netdev) |
749 | return; |
750 | |
751 | vf = netdev_priv(dev: netdev); |
752 | |
753 | /* Disable 802.3x pause frames */ |
754 | if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED || |
755 | (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) { |
756 | vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; |
757 | vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; |
758 | otx2_config_pause_frm(pfvf: vf); |
759 | } |
760 | |
761 | #ifdef CONFIG_DCB |
762 | /* Disable PFC config */ |
763 | if (vf->pfc_en) { |
764 | vf->pfc_en = 0; |
765 | otx2_config_priority_flow_ctrl(pfvf: vf); |
766 | } |
767 | #endif |
768 | |
769 | cancel_work_sync(work: &vf->reset_task); |
770 | otx2_unregister_dl(pfvf: vf); |
771 | unregister_netdev(dev: netdev); |
772 | if (vf->otx2_wq) |
773 | destroy_workqueue(wq: vf->otx2_wq); |
774 | otx2_ptp_destroy(pfvf: vf); |
775 | otx2_mcam_flow_del(pf: vf); |
776 | otx2_shutdown_tc(nic: vf); |
777 | otx2_shutdown_qos(pfvf: vf); |
778 | otx2_detach_resources(mbox: &vf->mbox); |
779 | otx2vf_disable_mbox_intr(vf); |
780 | free_percpu(pdata: vf->hw.lmt_info); |
781 | if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) |
782 | qmem_free(dev: vf->dev, qmem: vf->dync_lmt); |
783 | otx2vf_vfaf_mbox_destroy(vf); |
784 | pci_free_irq_vectors(dev: vf->pdev); |
785 | pci_set_drvdata(pdev, NULL); |
786 | free_netdev(dev: netdev); |
787 | |
788 | pci_release_regions(pdev); |
789 | } |
790 | |
791 | static struct pci_driver otx2vf_driver = { |
792 | .name = DRV_NAME, |
793 | .id_table = otx2_vf_id_table, |
794 | .probe = otx2vf_probe, |
795 | .remove = otx2vf_remove, |
796 | .shutdown = otx2vf_remove, |
797 | }; |
798 | |
799 | static int __init otx2vf_init_module(void) |
800 | { |
801 | pr_info("%s: %s\n" , DRV_NAME, DRV_STRING); |
802 | |
803 | return pci_register_driver(&otx2vf_driver); |
804 | } |
805 | |
806 | static void __exit otx2vf_cleanup_module(void) |
807 | { |
808 | pci_unregister_driver(dev: &otx2vf_driver); |
809 | } |
810 | |
811 | module_init(otx2vf_init_module); |
812 | module_exit(otx2vf_cleanup_module); |
813 | |