1 | /********************************************************************** |
2 | * Author: Cavium, Inc. |
3 | * |
4 | * Contact: support@cavium.com |
5 | * Please include "LiquidIO" in the subject. |
6 | * |
7 | * Copyright (c) 2003-2016 Cavium, Inc. |
8 | * |
9 | * This file is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License, Version 2, as |
11 | * published by the Free Software Foundation. |
12 | * |
13 | * This file is distributed in the hope that it will be useful, but |
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty |
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or |
16 | * NONINFRINGEMENT. See the GNU General Public License for more details. |
17 | ***********************************************************************/ |
18 | #include <linux/module.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/pci.h> |
21 | #include <net/vxlan.h> |
22 | #include "liquidio_common.h" |
23 | #include "octeon_droq.h" |
24 | #include "octeon_iq.h" |
25 | #include "response_manager.h" |
26 | #include "octeon_device.h" |
27 | #include "octeon_nic.h" |
28 | #include "octeon_main.h" |
29 | #include "octeon_network.h" |
30 | #include "cn23xx_vf_device.h" |
31 | |
32 | MODULE_AUTHOR("Cavium Networks, <support@cavium.com>" ); |
33 | MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver" ); |
34 | MODULE_LICENSE("GPL" ); |
35 | |
36 | static int debug = -1; |
37 | module_param(debug, int, 0644); |
38 | MODULE_PARM_DESC(debug, "NETIF_MSG debug bits" ); |
39 | |
40 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) |
41 | |
42 | struct oct_timestamp_resp { |
43 | u64 rh; |
44 | u64 timestamp; |
45 | u64 status; |
46 | }; |
47 | |
48 | union tx_info { |
49 | u64 u64; |
50 | struct { |
51 | #ifdef __BIG_ENDIAN_BITFIELD |
52 | u16 gso_size; |
53 | u16 gso_segs; |
54 | u32 reserved; |
55 | #else |
56 | u32 reserved; |
57 | u16 gso_segs; |
58 | u16 gso_size; |
59 | #endif |
60 | } s; |
61 | }; |
62 | |
63 | #define 128 |
64 | #define OCTNIC_GSO_MAX_SIZE \ |
65 | (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) |
66 | |
67 | static int |
68 | liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); |
69 | static void liquidio_vf_remove(struct pci_dev *pdev); |
70 | static int octeon_device_init(struct octeon_device *oct); |
71 | static int liquidio_stop(struct net_device *netdev); |
72 | |
73 | static int lio_wait_for_oq_pkts(struct octeon_device *oct) |
74 | { |
75 | struct octeon_device_priv *oct_priv = oct->priv; |
76 | int retry = MAX_IO_PENDING_PKT_COUNT; |
77 | int pkt_cnt = 0, pending_pkts; |
78 | int i; |
79 | |
80 | do { |
81 | pending_pkts = 0; |
82 | |
83 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
84 | if (!(oct->io_qmask.oq & BIT_ULL(i))) |
85 | continue; |
86 | pkt_cnt += octeon_droq_check_hw_for_pkts(droq: oct->droq[i]); |
87 | } |
88 | if (pkt_cnt > 0) { |
89 | pending_pkts += pkt_cnt; |
90 | tasklet_schedule(t: &oct_priv->droq_tasklet); |
91 | } |
92 | pkt_cnt = 0; |
93 | schedule_timeout_uninterruptible(timeout: 1); |
94 | |
95 | } while (retry-- && pending_pkts); |
96 | |
97 | return pkt_cnt; |
98 | } |
99 | |
100 | /** |
101 | * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc |
102 | * @oct: Pointer to Octeon device |
103 | */ |
104 | static void pcierror_quiesce_device(struct octeon_device *oct) |
105 | { |
106 | int i; |
107 | |
108 | /* Disable the input and output queues now. No more packets will |
109 | * arrive from Octeon, but we should wait for all packet processing |
110 | * to finish. |
111 | */ |
112 | |
113 | /* To allow for in-flight requests */ |
114 | schedule_timeout_uninterruptible(timeout: 100); |
115 | |
116 | if (wait_for_pending_requests(oct)) |
117 | dev_err(&oct->pci_dev->dev, "There were pending requests\n" ); |
118 | |
119 | /* Force all requests waiting to be fetched by OCTEON to complete. */ |
120 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
121 | struct octeon_instr_queue *iq; |
122 | |
123 | if (!(oct->io_qmask.iq & BIT_ULL(i))) |
124 | continue; |
125 | iq = oct->instr_queue[i]; |
126 | |
127 | if (atomic_read(v: &iq->instr_pending)) { |
128 | spin_lock_bh(lock: &iq->lock); |
129 | iq->fill_cnt = 0; |
130 | iq->octeon_read_index = iq->host_write_index; |
131 | iq->stats.instr_processed += |
132 | atomic_read(v: &iq->instr_pending); |
133 | lio_process_iq_request_list(oct, iq, napi_budget: 0); |
134 | spin_unlock_bh(lock: &iq->lock); |
135 | } |
136 | } |
137 | |
138 | /* Force all pending ordered list requests to time out. */ |
139 | lio_process_ordered_list(octeon_dev: oct, force_quit: 1); |
140 | |
141 | /* We do not need to wait for output queue packets to be processed. */ |
142 | } |
143 | |
144 | /** |
145 | * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status |
146 | * @dev: Pointer to PCI device |
147 | */ |
148 | static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) |
149 | { |
150 | u32 status, mask; |
151 | int pos = 0x100; |
152 | |
153 | pr_info("%s :\n" , __func__); |
154 | |
155 | pci_read_config_dword(dev, where: pos + PCI_ERR_UNCOR_STATUS, val: &status); |
156 | pci_read_config_dword(dev, where: pos + PCI_ERR_UNCOR_SEVER, val: &mask); |
157 | if (dev->error_state == pci_channel_io_normal) |
158 | status &= ~mask; /* Clear corresponding nonfatal bits */ |
159 | else |
160 | status &= mask; /* Clear corresponding fatal bits */ |
161 | pci_write_config_dword(dev, where: pos + PCI_ERR_UNCOR_STATUS, val: status); |
162 | } |
163 | |
164 | /** |
165 | * stop_pci_io - Stop all PCI IO to a given device |
166 | * @oct: Pointer to Octeon device |
167 | */ |
168 | static void stop_pci_io(struct octeon_device *oct) |
169 | { |
170 | struct msix_entry *msix_entries; |
171 | int i; |
172 | |
173 | /* No more instructions will be forwarded. */ |
174 | atomic_set(v: &oct->status, OCT_DEV_IN_RESET); |
175 | |
176 | for (i = 0; i < oct->ifcount; i++) |
177 | netif_device_detach(dev: oct->props[i].netdev); |
178 | |
179 | /* Disable interrupts */ |
180 | oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); |
181 | |
182 | pcierror_quiesce_device(oct); |
183 | if (oct->msix_on) { |
184 | msix_entries = (struct msix_entry *)oct->msix_entries; |
185 | for (i = 0; i < oct->num_msix_irqs; i++) { |
186 | /* clear the affinity_cpumask */ |
187 | irq_set_affinity_hint(irq: msix_entries[i].vector, |
188 | NULL); |
189 | free_irq(msix_entries[i].vector, |
190 | &oct->ioq_vector[i]); |
191 | } |
192 | pci_disable_msix(dev: oct->pci_dev); |
193 | kfree(objp: oct->msix_entries); |
194 | oct->msix_entries = NULL; |
195 | octeon_free_ioq_vector(oct); |
196 | } |
197 | dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n" , |
198 | lio_get_state_string(&oct->status)); |
199 | |
200 | /* making it a common function for all OCTEON models */ |
201 | cleanup_aer_uncorrect_error_status(dev: oct->pci_dev); |
202 | |
203 | pci_disable_device(dev: oct->pci_dev); |
204 | } |
205 | |
206 | /** |
207 | * liquidio_pcie_error_detected - called when PCI error is detected |
208 | * @pdev: Pointer to PCI device |
209 | * @state: The current pci connection state |
210 | * |
211 | * This function is called after a PCI bus error affecting |
212 | * this device has been detected. |
213 | */ |
214 | static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, |
215 | pci_channel_state_t state) |
216 | { |
217 | struct octeon_device *oct = pci_get_drvdata(pdev); |
218 | |
219 | /* Non-correctable Non-fatal errors */ |
220 | if (state == pci_channel_io_normal) { |
221 | dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n" ); |
222 | cleanup_aer_uncorrect_error_status(dev: oct->pci_dev); |
223 | return PCI_ERS_RESULT_CAN_RECOVER; |
224 | } |
225 | |
226 | /* Non-correctable Fatal errors */ |
227 | dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n" ); |
228 | stop_pci_io(oct); |
229 | |
230 | return PCI_ERS_RESULT_DISCONNECT; |
231 | } |
232 | |
233 | /* For PCI-E Advanced Error Recovery (AER) Interface */ |
234 | static const struct pci_error_handlers liquidio_vf_err_handler = { |
235 | .error_detected = liquidio_pcie_error_detected, |
236 | }; |
237 | |
238 | static const struct pci_device_id liquidio_vf_pci_tbl[] = { |
239 | { |
240 | PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, |
241 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 |
242 | }, |
243 | { |
244 | 0, 0, 0, 0, 0, 0, 0 |
245 | } |
246 | }; |
247 | MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); |
248 | |
249 | static struct pci_driver liquidio_vf_pci_driver = { |
250 | .name = "LiquidIO_VF" , |
251 | .id_table = liquidio_vf_pci_tbl, |
252 | .probe = liquidio_vf_probe, |
253 | .remove = liquidio_vf_remove, |
254 | .err_handler = &liquidio_vf_err_handler, /* For AER */ |
255 | }; |
256 | |
257 | /** |
258 | * print_link_info - Print link information |
259 | * @netdev: network device |
260 | */ |
261 | static void print_link_info(struct net_device *netdev) |
262 | { |
263 | struct lio *lio = GET_LIO(netdev); |
264 | |
265 | if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && |
266 | ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { |
267 | struct oct_link_info *linfo = &lio->linfo; |
268 | |
269 | if (linfo->link.s.link_up) { |
270 | netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n" , |
271 | linfo->link.s.speed, |
272 | (linfo->link.s.duplex) ? "Full" : "Half" ); |
273 | } else { |
274 | netif_info(lio, link, lio->netdev, "Link Down\n" ); |
275 | } |
276 | } |
277 | } |
278 | |
279 | /** |
280 | * octnet_link_status_change - Routine to notify MTU change |
281 | * @work: work_struct data structure |
282 | */ |
283 | static void octnet_link_status_change(struct work_struct *work) |
284 | { |
285 | struct cavium_wk *wk = (struct cavium_wk *)work; |
286 | struct lio *lio = (struct lio *)wk->ctxptr; |
287 | |
288 | /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. |
289 | * this API is invoked only when new max-MTU of the interface is |
290 | * less than current MTU. |
291 | */ |
292 | rtnl_lock(); |
293 | dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); |
294 | rtnl_unlock(); |
295 | } |
296 | |
297 | /** |
298 | * setup_link_status_change_wq - Sets up the mtu status change work |
299 | * @netdev: network device |
300 | */ |
301 | static int setup_link_status_change_wq(struct net_device *netdev) |
302 | { |
303 | struct lio *lio = GET_LIO(netdev); |
304 | struct octeon_device *oct = lio->oct_dev; |
305 | |
306 | lio->link_status_wq.wq = alloc_workqueue(fmt: "link-status" , |
307 | flags: WQ_MEM_RECLAIM, max_active: 0); |
308 | if (!lio->link_status_wq.wq) { |
309 | dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n" ); |
310 | return -1; |
311 | } |
312 | INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, |
313 | octnet_link_status_change); |
314 | lio->link_status_wq.wk.ctxptr = lio; |
315 | |
316 | return 0; |
317 | } |
318 | |
319 | static void cleanup_link_status_change_wq(struct net_device *netdev) |
320 | { |
321 | struct lio *lio = GET_LIO(netdev); |
322 | |
323 | if (lio->link_status_wq.wq) { |
324 | cancel_delayed_work_sync(dwork: &lio->link_status_wq.wk.work); |
325 | destroy_workqueue(wq: lio->link_status_wq.wq); |
326 | } |
327 | } |
328 | |
329 | /** |
330 | * update_link_status - Update link status |
331 | * @netdev: network device |
332 | * @ls: link status structure |
333 | * |
334 | * Called on receipt of a link status response from the core application to |
335 | * update each interface's link status. |
336 | */ |
337 | static void update_link_status(struct net_device *netdev, |
338 | union oct_link_status *ls) |
339 | { |
340 | struct lio *lio = GET_LIO(netdev); |
341 | int current_max_mtu = lio->linfo.link.s.mtu; |
342 | struct octeon_device *oct = lio->oct_dev; |
343 | |
344 | if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { |
345 | lio->linfo.link.u64 = ls->u64; |
346 | |
347 | print_link_info(netdev); |
348 | lio->link_changes++; |
349 | |
350 | if (lio->linfo.link.s.link_up) { |
351 | netif_carrier_on(dev: netdev); |
352 | wake_txqs(netdev); |
353 | } else { |
354 | netif_carrier_off(dev: netdev); |
355 | stop_txqs(netdev); |
356 | } |
357 | |
358 | if (lio->linfo.link.s.mtu != current_max_mtu) { |
359 | dev_info(&oct->pci_dev->dev, |
360 | "Max MTU Changed from %d to %d\n" , |
361 | current_max_mtu, lio->linfo.link.s.mtu); |
362 | netdev->max_mtu = lio->linfo.link.s.mtu; |
363 | } |
364 | |
365 | if (lio->linfo.link.s.mtu < netdev->mtu) { |
366 | dev_warn(&oct->pci_dev->dev, |
367 | "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n" , |
368 | netdev->mtu, lio->linfo.link.s.mtu); |
369 | queue_delayed_work(wq: lio->link_status_wq.wq, |
370 | dwork: &lio->link_status_wq.wk.work, delay: 0); |
371 | } |
372 | } |
373 | } |
374 | |
375 | /** |
376 | * liquidio_vf_probe - PCI probe handler |
377 | * @pdev: PCI device structure |
378 | * @ent: unused |
379 | */ |
380 | static int |
381 | liquidio_vf_probe(struct pci_dev *pdev, |
382 | const struct pci_device_id __maybe_unused *ent) |
383 | { |
384 | struct octeon_device *oct_dev = NULL; |
385 | |
386 | oct_dev = octeon_allocate_device(pci_id: pdev->device, |
387 | priv_size: sizeof(struct octeon_device_priv)); |
388 | |
389 | if (!oct_dev) { |
390 | dev_err(&pdev->dev, "Unable to allocate device\n" ); |
391 | return -ENOMEM; |
392 | } |
393 | oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; |
394 | |
395 | dev_info(&pdev->dev, "Initializing device %x:%x.\n" , |
396 | (u32)pdev->vendor, (u32)pdev->device); |
397 | |
398 | /* Assign octeon_device for this device to the private data area. */ |
399 | pci_set_drvdata(pdev, data: oct_dev); |
400 | |
401 | /* set linux specific device pointer */ |
402 | oct_dev->pci_dev = pdev; |
403 | |
404 | oct_dev->subsystem_id = pdev->subsystem_vendor | |
405 | (pdev->subsystem_device << 16); |
406 | |
407 | if (octeon_device_init(oct: oct_dev)) { |
408 | liquidio_vf_remove(pdev); |
409 | return -ENOMEM; |
410 | } |
411 | |
412 | dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n" ); |
413 | |
414 | return 0; |
415 | } |
416 | |
417 | /** |
418 | * octeon_pci_flr - PCI FLR for each Octeon device. |
419 | * @oct: octeon device |
420 | */ |
421 | static void octeon_pci_flr(struct octeon_device *oct) |
422 | { |
423 | pci_save_state(dev: oct->pci_dev); |
424 | |
425 | pci_cfg_access_lock(dev: oct->pci_dev); |
426 | |
427 | /* Quiesce the device completely */ |
428 | pci_write_config_word(dev: oct->pci_dev, PCI_COMMAND, |
429 | PCI_COMMAND_INTX_DISABLE); |
430 | |
431 | pcie_flr(dev: oct->pci_dev); |
432 | |
433 | pci_cfg_access_unlock(dev: oct->pci_dev); |
434 | |
435 | pci_restore_state(dev: oct->pci_dev); |
436 | } |
437 | |
438 | /** |
439 | * octeon_destroy_resources - Destroy resources associated with octeon device |
440 | * @oct: octeon device |
441 | */ |
442 | static void octeon_destroy_resources(struct octeon_device *oct) |
443 | { |
444 | struct octeon_device_priv *oct_priv = oct->priv; |
445 | struct msix_entry *msix_entries; |
446 | int i; |
447 | |
448 | switch (atomic_read(v: &oct->status)) { |
449 | case OCT_DEV_RUNNING: |
450 | case OCT_DEV_CORE_OK: |
451 | /* No more instructions will be forwarded. */ |
452 | atomic_set(v: &oct->status, OCT_DEV_IN_RESET); |
453 | |
454 | oct->app_mode = CVM_DRV_INVALID_APP; |
455 | dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n" , |
456 | lio_get_state_string(&oct->status)); |
457 | |
458 | schedule_timeout_uninterruptible(HZ / 10); |
459 | |
460 | fallthrough; |
461 | case OCT_DEV_HOST_OK: |
462 | case OCT_DEV_IO_QUEUES_DONE: |
463 | if (lio_wait_for_instr_fetch(oct)) |
464 | dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n" ); |
465 | |
466 | if (wait_for_pending_requests(oct)) |
467 | dev_err(&oct->pci_dev->dev, "There were pending requests\n" ); |
468 | |
469 | /* Disable the input and output queues now. No more packets will |
470 | * arrive from Octeon, but we should wait for all packet |
471 | * processing to finish. |
472 | */ |
473 | oct->fn_list.disable_io_queues(oct); |
474 | |
475 | if (lio_wait_for_oq_pkts(oct)) |
476 | dev_err(&oct->pci_dev->dev, "OQ had pending packets\n" ); |
477 | |
478 | /* Force all requests waiting to be fetched by OCTEON to |
479 | * complete. |
480 | */ |
481 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
482 | struct octeon_instr_queue *iq; |
483 | |
484 | if (!(oct->io_qmask.iq & BIT_ULL(i))) |
485 | continue; |
486 | iq = oct->instr_queue[i]; |
487 | |
488 | if (atomic_read(v: &iq->instr_pending)) { |
489 | spin_lock_bh(lock: &iq->lock); |
490 | iq->fill_cnt = 0; |
491 | iq->octeon_read_index = iq->host_write_index; |
492 | iq->stats.instr_processed += |
493 | atomic_read(v: &iq->instr_pending); |
494 | lio_process_iq_request_list(oct, iq, napi_budget: 0); |
495 | spin_unlock_bh(lock: &iq->lock); |
496 | } |
497 | } |
498 | |
499 | lio_process_ordered_list(octeon_dev: oct, force_quit: 1); |
500 | octeon_free_sc_done_list(oct); |
501 | octeon_free_sc_zombie_list(oct); |
502 | |
503 | fallthrough; |
504 | case OCT_DEV_INTR_SET_DONE: |
505 | /* Disable interrupts */ |
506 | oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); |
507 | |
508 | if (oct->msix_on) { |
509 | msix_entries = (struct msix_entry *)oct->msix_entries; |
510 | for (i = 0; i < oct->num_msix_irqs; i++) { |
511 | if (oct->ioq_vector[i].vector) { |
512 | irq_set_affinity_hint( |
513 | irq: msix_entries[i].vector, |
514 | NULL); |
515 | free_irq(msix_entries[i].vector, |
516 | &oct->ioq_vector[i]); |
517 | oct->ioq_vector[i].vector = 0; |
518 | } |
519 | } |
520 | pci_disable_msix(dev: oct->pci_dev); |
521 | kfree(objp: oct->msix_entries); |
522 | oct->msix_entries = NULL; |
523 | kfree(objp: oct->irq_name_storage); |
524 | oct->irq_name_storage = NULL; |
525 | } |
526 | /* Soft reset the octeon device before exiting */ |
527 | if (!pcie_reset_flr(dev: oct->pci_dev, PCI_RESET_PROBE)) |
528 | octeon_pci_flr(oct); |
529 | else |
530 | cn23xx_vf_ask_pf_to_do_flr(oct); |
531 | |
532 | fallthrough; |
533 | case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: |
534 | octeon_free_ioq_vector(oct); |
535 | |
536 | fallthrough; |
537 | case OCT_DEV_MBOX_SETUP_DONE: |
538 | oct->fn_list.free_mbox(oct); |
539 | |
540 | fallthrough; |
541 | case OCT_DEV_IN_RESET: |
542 | case OCT_DEV_DROQ_INIT_DONE: |
543 | mdelay(100); |
544 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
545 | if (!(oct->io_qmask.oq & BIT_ULL(i))) |
546 | continue; |
547 | octeon_delete_droq(oct_dev: oct, q_no: i); |
548 | } |
549 | |
550 | fallthrough; |
551 | case OCT_DEV_RESP_LIST_INIT_DONE: |
552 | octeon_delete_response_list(octeon_dev: oct); |
553 | |
554 | fallthrough; |
555 | case OCT_DEV_INSTR_QUEUE_INIT_DONE: |
556 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
557 | if (!(oct->io_qmask.iq & BIT_ULL(i))) |
558 | continue; |
559 | octeon_delete_instr_queue(octeon_dev: oct, iq_no: i); |
560 | } |
561 | |
562 | fallthrough; |
563 | case OCT_DEV_SC_BUFF_POOL_INIT_DONE: |
564 | octeon_free_sc_buffer_pool(oct); |
565 | |
566 | fallthrough; |
567 | case OCT_DEV_DISPATCH_INIT_DONE: |
568 | octeon_delete_dispatch_list(octeon_dev: oct); |
569 | cancel_delayed_work_sync(dwork: &oct->nic_poll_work.work); |
570 | |
571 | fallthrough; |
572 | case OCT_DEV_PCI_MAP_DONE: |
573 | octeon_unmap_pci_barx(oct, baridx: 0); |
574 | octeon_unmap_pci_barx(oct, baridx: 1); |
575 | |
576 | fallthrough; |
577 | case OCT_DEV_PCI_ENABLE_DONE: |
578 | /* Disable the device, releasing the PCI INT */ |
579 | pci_disable_device(dev: oct->pci_dev); |
580 | |
581 | fallthrough; |
582 | case OCT_DEV_BEGIN_STATE: |
583 | /* Nothing to be done here either */ |
584 | break; |
585 | } |
586 | |
587 | tasklet_kill(t: &oct_priv->droq_tasklet); |
588 | } |
589 | |
590 | /** |
591 | * send_rx_ctrl_cmd - Send Rx control command |
592 | * @lio: per-network private data |
593 | * @start_stop: whether to start or stop |
594 | */ |
595 | static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) |
596 | { |
597 | struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; |
598 | struct octeon_soft_command *sc; |
599 | union octnet_cmd *ncmd; |
600 | int retval; |
601 | |
602 | if (oct->props[lio->ifidx].rx_on == start_stop) |
603 | return 0; |
604 | |
605 | sc = (struct octeon_soft_command *) |
606 | octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, |
607 | rdatasize: 16, ctxsize: 0); |
608 | if (!sc) { |
609 | netif_info(lio, rx_err, lio->netdev, |
610 | "Failed to allocate octeon_soft_command struct\n" ); |
611 | return -ENOMEM; |
612 | } |
613 | |
614 | ncmd = (union octnet_cmd *)sc->virtdptr; |
615 | |
616 | ncmd->u64 = 0; |
617 | ncmd->s.cmd = OCTNET_CMD_RX_CTL; |
618 | ncmd->s.param1 = start_stop; |
619 | |
620 | octeon_swap_8B_data(data: (u64 *)ncmd, blocks: (OCTNET_CMD_SIZE >> 3)); |
621 | |
622 | sc->iq_no = lio->linfo.txpciq[0].s.q_no; |
623 | |
624 | octeon_prepare_soft_command(oct, sc, OPCODE_NIC, |
625 | OPCODE_NIC_CMD, irh_ossp: 0, ossp0: 0, ossp1: 0); |
626 | |
627 | init_completion(x: &sc->complete); |
628 | sc->sc_status = OCTEON_REQUEST_PENDING; |
629 | |
630 | retval = octeon_send_soft_command(oct, sc); |
631 | if (retval == IQ_SEND_FAILED) { |
632 | netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n" ); |
633 | octeon_free_soft_command(oct, sc); |
634 | } else { |
635 | /* Sleep on a wait queue till the cond flag indicates that the |
636 | * response arrived or timed-out. |
637 | */ |
638 | retval = wait_for_sc_completion_timeout(oct_dev: oct, sc, timeout: 0); |
639 | if (retval) |
640 | return retval; |
641 | |
642 | oct->props[lio->ifidx].rx_on = start_stop; |
643 | WRITE_ONCE(sc->caller_is_done, true); |
644 | } |
645 | |
646 | return retval; |
647 | } |
648 | |
649 | /** |
650 | * liquidio_destroy_nic_device - Destroy NIC device interface |
651 | * @oct: octeon device |
652 | * @ifidx: which interface to destroy |
653 | * |
654 | * Cleanup associated with each interface for an Octeon device when NIC |
655 | * module is being unloaded or if initialization fails during load. |
656 | */ |
657 | static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) |
658 | { |
659 | struct net_device *netdev = oct->props[ifidx].netdev; |
660 | struct octeon_device_priv *oct_priv = oct->priv; |
661 | struct napi_struct *napi, *n; |
662 | struct lio *lio; |
663 | |
664 | if (!netdev) { |
665 | dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n" , |
666 | __func__, ifidx); |
667 | return; |
668 | } |
669 | |
670 | lio = GET_LIO(netdev); |
671 | |
672 | dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n" ); |
673 | |
674 | if (atomic_read(v: &lio->ifstate) & LIO_IFSTATE_RUNNING) |
675 | liquidio_stop(netdev); |
676 | |
677 | if (oct->props[lio->ifidx].napi_enabled == 1) { |
678 | list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) |
679 | napi_disable(n: napi); |
680 | |
681 | oct->props[lio->ifidx].napi_enabled = 0; |
682 | |
683 | oct->droq[0]->ops.poll_mode = 0; |
684 | } |
685 | |
686 | /* Delete NAPI */ |
687 | list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) |
688 | netif_napi_del(napi); |
689 | |
690 | tasklet_enable(t: &oct_priv->droq_tasklet); |
691 | |
692 | if (atomic_read(v: &lio->ifstate) & LIO_IFSTATE_REGISTERED) |
693 | unregister_netdev(dev: netdev); |
694 | |
695 | cleanup_rx_oom_poll_fn(netdev); |
696 | |
697 | cleanup_link_status_change_wq(netdev); |
698 | |
699 | lio_delete_glists(lio); |
700 | |
701 | free_netdev(dev: netdev); |
702 | |
703 | oct->props[ifidx].gmxport = -1; |
704 | |
705 | oct->props[ifidx].netdev = NULL; |
706 | } |
707 | |
708 | /** |
709 | * liquidio_stop_nic_module - Stop complete NIC functionality |
710 | * @oct: octeon device |
711 | */ |
712 | static int liquidio_stop_nic_module(struct octeon_device *oct) |
713 | { |
714 | struct lio *lio; |
715 | int i, j; |
716 | |
717 | dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n" ); |
718 | if (!oct->ifcount) { |
719 | dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n" ); |
720 | return 1; |
721 | } |
722 | |
723 | spin_lock_bh(lock: &oct->cmd_resp_wqlock); |
724 | oct->cmd_resp_state = OCT_DRV_OFFLINE; |
725 | spin_unlock_bh(lock: &oct->cmd_resp_wqlock); |
726 | |
727 | for (i = 0; i < oct->ifcount; i++) { |
728 | lio = GET_LIO(oct->props[i].netdev); |
729 | for (j = 0; j < oct->num_oqs; j++) |
730 | octeon_unregister_droq_ops(oct, |
731 | q_no: lio->linfo.rxpciq[j].s.q_no); |
732 | } |
733 | |
734 | for (i = 0; i < oct->ifcount; i++) |
735 | liquidio_destroy_nic_device(oct, ifidx: i); |
736 | |
737 | dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n" ); |
738 | return 0; |
739 | } |
740 | |
741 | /** |
742 | * liquidio_vf_remove - Cleans up resources at unload time |
743 | * @pdev: PCI device structure |
744 | */ |
745 | static void liquidio_vf_remove(struct pci_dev *pdev) |
746 | { |
747 | struct octeon_device *oct_dev = pci_get_drvdata(pdev); |
748 | |
749 | dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n" ); |
750 | |
751 | if (oct_dev->app_mode == CVM_DRV_NIC_APP) |
752 | liquidio_stop_nic_module(oct: oct_dev); |
753 | |
754 | /* Reset the octeon device and cleanup all memory allocated for |
755 | * the octeon device by driver. |
756 | */ |
757 | octeon_destroy_resources(oct: oct_dev); |
758 | |
759 | dev_info(&oct_dev->pci_dev->dev, "Device removed\n" ); |
760 | |
761 | /* This octeon device has been removed. Update the global |
762 | * data structure to reflect this. Free the device structure. |
763 | */ |
764 | octeon_free_device_mem(oct: oct_dev); |
765 | } |
766 | |
767 | /** |
768 | * octeon_pci_os_setup - PCI initialization for each Octeon device. |
769 | * @oct: octeon device |
770 | */ |
771 | static int octeon_pci_os_setup(struct octeon_device *oct) |
772 | { |
773 | #ifdef CONFIG_PCI_IOV |
774 | /* setup PCI stuff first */ |
775 | if (!oct->pci_dev->physfn) |
776 | octeon_pci_flr(oct); |
777 | #endif |
778 | |
779 | if (pci_enable_device(dev: oct->pci_dev)) { |
780 | dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n" ); |
781 | return 1; |
782 | } |
783 | |
784 | if (dma_set_mask_and_coherent(dev: &oct->pci_dev->dev, DMA_BIT_MASK(64))) { |
785 | dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n" ); |
786 | pci_disable_device(dev: oct->pci_dev); |
787 | return 1; |
788 | } |
789 | |
790 | /* Enable PCI DMA Master. */ |
791 | pci_set_master(dev: oct->pci_dev); |
792 | |
793 | return 0; |
794 | } |
795 | |
796 | /** |
797 | * free_netbuf - Unmap and free network buffer |
798 | * @buf: buffer |
799 | */ |
800 | static void free_netbuf(void *buf) |
801 | { |
802 | struct octnet_buf_free_info *finfo; |
803 | struct sk_buff *skb; |
804 | struct lio *lio; |
805 | |
806 | finfo = (struct octnet_buf_free_info *)buf; |
807 | skb = finfo->skb; |
808 | lio = finfo->lio; |
809 | |
810 | dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, |
811 | DMA_TO_DEVICE); |
812 | |
813 | tx_buffer_free(buffer: skb); |
814 | } |
815 | |
816 | /** |
817 | * free_netsgbuf - Unmap and free gather buffer |
818 | * @buf: buffer |
819 | */ |
820 | static void free_netsgbuf(void *buf) |
821 | { |
822 | struct octnet_buf_free_info *finfo; |
823 | struct octnic_gather *g; |
824 | struct sk_buff *skb; |
825 | int i, frags, iq; |
826 | struct lio *lio; |
827 | |
828 | finfo = (struct octnet_buf_free_info *)buf; |
829 | skb = finfo->skb; |
830 | lio = finfo->lio; |
831 | g = finfo->g; |
832 | frags = skb_shinfo(skb)->nr_frags; |
833 | |
834 | dma_unmap_single(&lio->oct_dev->pci_dev->dev, |
835 | g->sg[0].ptr[0], (skb->len - skb->data_len), |
836 | DMA_TO_DEVICE); |
837 | |
838 | i = 1; |
839 | while (frags--) { |
840 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; |
841 | |
842 | dma_unmap_page(&lio->oct_dev->pci_dev->dev, |
843 | g->sg[(i >> 2)].ptr[(i & 3)], |
844 | skb_frag_size(frag), DMA_TO_DEVICE); |
845 | i++; |
846 | } |
847 | |
848 | iq = skb_iq(oct: lio->oct_dev, skb); |
849 | |
850 | spin_lock(lock: &lio->glist_lock[iq]); |
851 | list_add_tail(new: &g->list, head: &lio->glist[iq]); |
852 | spin_unlock(lock: &lio->glist_lock[iq]); |
853 | |
854 | tx_buffer_free(buffer: skb); |
855 | } |
856 | |
857 | /** |
858 | * free_netsgbuf_with_resp - Unmap and free gather buffer with response |
859 | * @buf: buffer |
860 | */ |
861 | static void free_netsgbuf_with_resp(void *buf) |
862 | { |
863 | struct octnet_buf_free_info *finfo; |
864 | struct octeon_soft_command *sc; |
865 | struct octnic_gather *g; |
866 | struct sk_buff *skb; |
867 | int i, frags, iq; |
868 | struct lio *lio; |
869 | |
870 | sc = (struct octeon_soft_command *)buf; |
871 | skb = (struct sk_buff *)sc->callback_arg; |
872 | finfo = (struct octnet_buf_free_info *)&skb->cb; |
873 | |
874 | lio = finfo->lio; |
875 | g = finfo->g; |
876 | frags = skb_shinfo(skb)->nr_frags; |
877 | |
878 | dma_unmap_single(&lio->oct_dev->pci_dev->dev, |
879 | g->sg[0].ptr[0], (skb->len - skb->data_len), |
880 | DMA_TO_DEVICE); |
881 | |
882 | i = 1; |
883 | while (frags--) { |
884 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; |
885 | |
886 | dma_unmap_page(&lio->oct_dev->pci_dev->dev, |
887 | g->sg[(i >> 2)].ptr[(i & 3)], |
888 | skb_frag_size(frag), DMA_TO_DEVICE); |
889 | i++; |
890 | } |
891 | |
892 | iq = skb_iq(oct: lio->oct_dev, skb); |
893 | |
894 | spin_lock(lock: &lio->glist_lock[iq]); |
895 | list_add_tail(new: &g->list, head: &lio->glist[iq]); |
896 | spin_unlock(lock: &lio->glist_lock[iq]); |
897 | |
898 | /* Don't free the skb yet */ |
899 | } |
900 | |
901 | /** |
902 | * liquidio_open - Net device open for LiquidIO |
903 | * @netdev: network device |
904 | */ |
905 | static int liquidio_open(struct net_device *netdev) |
906 | { |
907 | struct lio *lio = GET_LIO(netdev); |
908 | struct octeon_device *oct = lio->oct_dev; |
909 | struct octeon_device_priv *oct_priv = oct->priv; |
910 | struct napi_struct *napi, *n; |
911 | int ret = 0; |
912 | |
913 | if (!oct->props[lio->ifidx].napi_enabled) { |
914 | tasklet_disable(t: &oct_priv->droq_tasklet); |
915 | |
916 | list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) |
917 | napi_enable(n: napi); |
918 | |
919 | oct->props[lio->ifidx].napi_enabled = 1; |
920 | |
921 | oct->droq[0]->ops.poll_mode = 1; |
922 | } |
923 | |
924 | ifstate_set(lio, LIO_IFSTATE_RUNNING); |
925 | |
926 | /* Ready for link status updates */ |
927 | lio->intf_open = 1; |
928 | |
929 | netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n" ); |
930 | start_txqs(netdev); |
931 | |
932 | INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); |
933 | lio->stats_wk.ctxptr = lio; |
934 | schedule_delayed_work(dwork: &lio->stats_wk.work, delay: msecs_to_jiffies |
935 | (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); |
936 | |
937 | /* tell Octeon to start forwarding packets to host */ |
938 | ret = send_rx_ctrl_cmd(lio, start_stop: 1); |
939 | if (ret) |
940 | return ret; |
941 | |
942 | dev_info(&oct->pci_dev->dev, "%s interface is opened\n" , netdev->name); |
943 | |
944 | return ret; |
945 | } |
946 | |
947 | /** |
948 | * liquidio_stop - jNet device stop for LiquidIO |
949 | * @netdev: network device |
950 | */ |
951 | static int liquidio_stop(struct net_device *netdev) |
952 | { |
953 | struct lio *lio = GET_LIO(netdev); |
954 | struct octeon_device *oct = lio->oct_dev; |
955 | struct octeon_device_priv *oct_priv = oct->priv; |
956 | struct napi_struct *napi, *n; |
957 | int ret = 0; |
958 | |
959 | /* tell Octeon to stop forwarding packets to host */ |
960 | ret = send_rx_ctrl_cmd(lio, start_stop: 0); |
961 | if (ret) |
962 | return ret; |
963 | |
964 | netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n" ); |
965 | /* Inform that netif carrier is down */ |
966 | lio->intf_open = 0; |
967 | lio->linfo.link.s.link_up = 0; |
968 | |
969 | netif_carrier_off(dev: netdev); |
970 | lio->link_changes++; |
971 | |
972 | ifstate_reset(lio, LIO_IFSTATE_RUNNING); |
973 | |
974 | stop_txqs(netdev); |
975 | |
976 | /* Wait for any pending Rx descriptors */ |
977 | if (lio_wait_for_clean_oq(oct)) |
978 | netif_info(lio, rx_err, lio->netdev, |
979 | "Proceeding with stop interface after partial RX desc processing\n" ); |
980 | |
981 | if (oct->props[lio->ifidx].napi_enabled == 1) { |
982 | list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) |
983 | napi_disable(n: napi); |
984 | |
985 | oct->props[lio->ifidx].napi_enabled = 0; |
986 | |
987 | oct->droq[0]->ops.poll_mode = 0; |
988 | |
989 | tasklet_enable(t: &oct_priv->droq_tasklet); |
990 | } |
991 | |
992 | cancel_delayed_work_sync(dwork: &lio->stats_wk.work); |
993 | |
994 | dev_info(&oct->pci_dev->dev, "%s interface is stopped\n" , netdev->name); |
995 | |
996 | return ret; |
997 | } |
998 | |
999 | /** |
1000 | * get_new_flags - Converts a mask based on net device flags |
1001 | * @netdev: network device |
1002 | * |
1003 | * This routine generates a octnet_ifflags mask from the net device flags |
1004 | * received from the OS. |
1005 | */ |
1006 | static enum octnet_ifflags get_new_flags(struct net_device *netdev) |
1007 | { |
1008 | enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; |
1009 | |
1010 | if (netdev->flags & IFF_PROMISC) |
1011 | f |= OCTNET_IFFLAG_PROMISC; |
1012 | |
1013 | if (netdev->flags & IFF_ALLMULTI) |
1014 | f |= OCTNET_IFFLAG_ALLMULTI; |
1015 | |
1016 | if (netdev->flags & IFF_MULTICAST) { |
1017 | f |= OCTNET_IFFLAG_MULTICAST; |
1018 | |
1019 | /* Accept all multicast addresses if there are more than we |
1020 | * can handle |
1021 | */ |
1022 | if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) |
1023 | f |= OCTNET_IFFLAG_ALLMULTI; |
1024 | } |
1025 | |
1026 | if (netdev->flags & IFF_BROADCAST) |
1027 | f |= OCTNET_IFFLAG_BROADCAST; |
1028 | |
1029 | return f; |
1030 | } |
1031 | |
1032 | static void liquidio_set_uc_list(struct net_device *netdev) |
1033 | { |
1034 | struct lio *lio = GET_LIO(netdev); |
1035 | struct octeon_device *oct = lio->oct_dev; |
1036 | struct octnic_ctrl_pkt nctrl; |
1037 | struct netdev_hw_addr *ha; |
1038 | u64 *mac; |
1039 | |
1040 | if (lio->netdev_uc_count == netdev_uc_count(netdev)) |
1041 | return; |
1042 | |
1043 | if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { |
1044 | dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n" ); |
1045 | return; |
1046 | } |
1047 | |
1048 | lio->netdev_uc_count = netdev_uc_count(netdev); |
1049 | |
1050 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
1051 | nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; |
1052 | nctrl.ncmd.s.more = lio->netdev_uc_count; |
1053 | nctrl.ncmd.s.param1 = oct->vf_num; |
1054 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
1055 | nctrl.netpndev = (u64)netdev; |
1056 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
1057 | |
1058 | /* copy all the addresses into the udd */ |
1059 | mac = &nctrl.udd[0]; |
1060 | netdev_for_each_uc_addr(ha, netdev) { |
1061 | ether_addr_copy(dst: ((u8 *)mac) + 2, src: ha->addr); |
1062 | mac++; |
1063 | } |
1064 | |
1065 | octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
1066 | } |
1067 | |
1068 | /** |
1069 | * liquidio_set_mcast_list - Net device set_multicast_list |
1070 | * @netdev: network device |
1071 | */ |
1072 | static void liquidio_set_mcast_list(struct net_device *netdev) |
1073 | { |
1074 | int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); |
1075 | struct lio *lio = GET_LIO(netdev); |
1076 | struct octeon_device *oct = lio->oct_dev; |
1077 | struct octnic_ctrl_pkt nctrl; |
1078 | struct netdev_hw_addr *ha; |
1079 | u64 *mc; |
1080 | int ret; |
1081 | |
1082 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
1083 | |
1084 | /* Create a ctrl pkt command to be sent to core app. */ |
1085 | nctrl.ncmd.u64 = 0; |
1086 | nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; |
1087 | nctrl.ncmd.s.param1 = get_new_flags(netdev); |
1088 | nctrl.ncmd.s.param2 = mc_count; |
1089 | nctrl.ncmd.s.more = mc_count; |
1090 | nctrl.netpndev = (u64)netdev; |
1091 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
1092 | |
1093 | /* copy all the addresses into the udd */ |
1094 | mc = &nctrl.udd[0]; |
1095 | netdev_for_each_mc_addr(ha, netdev) { |
1096 | *mc = 0; |
1097 | ether_addr_copy(dst: ((u8 *)mc) + 2, src: ha->addr); |
1098 | /* no need to swap bytes */ |
1099 | if (++mc > &nctrl.udd[mc_count]) |
1100 | break; |
1101 | } |
1102 | |
1103 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
1104 | |
1105 | /* Apparently, any activity in this call from the kernel has to |
1106 | * be atomic. So we won't wait for response. |
1107 | */ |
1108 | |
1109 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
1110 | if (ret) { |
1111 | dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n" , |
1112 | ret); |
1113 | } |
1114 | |
1115 | liquidio_set_uc_list(netdev); |
1116 | } |
1117 | |
1118 | /** |
1119 | * liquidio_set_mac - Net device set_mac_address |
1120 | * @netdev: network device |
1121 | * @p: opaque pointer to sockaddr |
1122 | */ |
1123 | static int liquidio_set_mac(struct net_device *netdev, void *p) |
1124 | { |
1125 | struct sockaddr *addr = (struct sockaddr *)p; |
1126 | struct lio *lio = GET_LIO(netdev); |
1127 | struct octeon_device *oct = lio->oct_dev; |
1128 | struct octnic_ctrl_pkt nctrl; |
1129 | int ret = 0; |
1130 | |
1131 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
1132 | return -EADDRNOTAVAIL; |
1133 | |
1134 | if (ether_addr_equal(addr1: addr->sa_data, addr2: netdev->dev_addr)) |
1135 | return 0; |
1136 | |
1137 | if (lio->linfo.macaddr_is_admin_asgnd) |
1138 | return -EPERM; |
1139 | |
1140 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
1141 | |
1142 | nctrl.ncmd.u64 = 0; |
1143 | nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; |
1144 | nctrl.ncmd.s.param1 = 0; |
1145 | nctrl.ncmd.s.more = 1; |
1146 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
1147 | nctrl.netpndev = (u64)netdev; |
1148 | |
1149 | nctrl.udd[0] = 0; |
1150 | /* The MAC Address is presented in network byte order. */ |
1151 | ether_addr_copy(dst: (u8 *)&nctrl.udd[0] + 2, src: addr->sa_data); |
1152 | |
1153 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
1154 | if (ret < 0) { |
1155 | dev_err(&oct->pci_dev->dev, "MAC Address change failed\n" ); |
1156 | return -ENOMEM; |
1157 | } |
1158 | |
1159 | if (nctrl.sc_status == |
1160 | FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) { |
1161 | dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n" ); |
1162 | return -EPERM; |
1163 | } |
1164 | |
1165 | eth_hw_addr_set(dev: netdev, addr: addr->sa_data); |
1166 | ether_addr_copy(dst: ((u8 *)&lio->linfo.hw_addr) + 2, src: addr->sa_data); |
1167 | |
1168 | return 0; |
1169 | } |
1170 | |
1171 | static void |
1172 | liquidio_get_stats64(struct net_device *netdev, |
1173 | struct rtnl_link_stats64 *lstats) |
1174 | { |
1175 | struct lio *lio = GET_LIO(netdev); |
1176 | struct octeon_device *oct; |
1177 | u64 pkts = 0, drop = 0, bytes = 0; |
1178 | struct oct_droq_stats *oq_stats; |
1179 | struct oct_iq_stats *iq_stats; |
1180 | int i, iq_no, oq_no; |
1181 | |
1182 | oct = lio->oct_dev; |
1183 | |
1184 | if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) |
1185 | return; |
1186 | |
1187 | for (i = 0; i < oct->num_iqs; i++) { |
1188 | iq_no = lio->linfo.txpciq[i].s.q_no; |
1189 | iq_stats = &oct->instr_queue[iq_no]->stats; |
1190 | pkts += iq_stats->tx_done; |
1191 | drop += iq_stats->tx_dropped; |
1192 | bytes += iq_stats->tx_tot_bytes; |
1193 | } |
1194 | |
1195 | lstats->tx_packets = pkts; |
1196 | lstats->tx_bytes = bytes; |
1197 | lstats->tx_dropped = drop; |
1198 | |
1199 | pkts = 0; |
1200 | drop = 0; |
1201 | bytes = 0; |
1202 | |
1203 | for (i = 0; i < oct->num_oqs; i++) { |
1204 | oq_no = lio->linfo.rxpciq[i].s.q_no; |
1205 | oq_stats = &oct->droq[oq_no]->stats; |
1206 | pkts += oq_stats->rx_pkts_received; |
1207 | drop += (oq_stats->rx_dropped + |
1208 | oq_stats->dropped_nodispatch + |
1209 | oq_stats->dropped_toomany + |
1210 | oq_stats->dropped_nomem); |
1211 | bytes += oq_stats->rx_bytes_received; |
1212 | } |
1213 | |
1214 | lstats->rx_bytes = bytes; |
1215 | lstats->rx_packets = pkts; |
1216 | lstats->rx_dropped = drop; |
1217 | |
1218 | lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; |
1219 | |
1220 | /* detailed rx_errors: */ |
1221 | lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; |
1222 | /* recved pkt with crc error */ |
1223 | lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; |
1224 | /* recv'd frame alignment error */ |
1225 | lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; |
1226 | |
1227 | lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + |
1228 | lstats->rx_frame_errors; |
1229 | |
1230 | /* detailed tx_errors */ |
1231 | lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; |
1232 | lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; |
1233 | |
1234 | lstats->tx_errors = lstats->tx_aborted_errors + |
1235 | lstats->tx_carrier_errors; |
1236 | } |
1237 | |
1238 | /** |
1239 | * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl |
1240 | * @netdev: network device |
1241 | * @ifr: interface request |
1242 | */ |
1243 | static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) |
1244 | { |
1245 | struct lio *lio = GET_LIO(netdev); |
1246 | struct hwtstamp_config conf; |
1247 | |
1248 | if (copy_from_user(to: &conf, from: ifr->ifr_data, n: sizeof(conf))) |
1249 | return -EFAULT; |
1250 | |
1251 | switch (conf.tx_type) { |
1252 | case HWTSTAMP_TX_ON: |
1253 | case HWTSTAMP_TX_OFF: |
1254 | break; |
1255 | default: |
1256 | return -ERANGE; |
1257 | } |
1258 | |
1259 | switch (conf.rx_filter) { |
1260 | case HWTSTAMP_FILTER_NONE: |
1261 | break; |
1262 | case HWTSTAMP_FILTER_ALL: |
1263 | case HWTSTAMP_FILTER_SOME: |
1264 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
1265 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
1266 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
1267 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
1268 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
1269 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
1270 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: |
1271 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: |
1272 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: |
1273 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
1274 | case HWTSTAMP_FILTER_PTP_V2_SYNC: |
1275 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
1276 | case HWTSTAMP_FILTER_NTP_ALL: |
1277 | conf.rx_filter = HWTSTAMP_FILTER_ALL; |
1278 | break; |
1279 | default: |
1280 | return -ERANGE; |
1281 | } |
1282 | |
1283 | if (conf.rx_filter == HWTSTAMP_FILTER_ALL) |
1284 | ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); |
1285 | |
1286 | else |
1287 | ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); |
1288 | |
1289 | return copy_to_user(to: ifr->ifr_data, from: &conf, n: sizeof(conf)) ? -EFAULT : 0; |
1290 | } |
1291 | |
1292 | /** |
1293 | * liquidio_ioctl - ioctl handler |
1294 | * @netdev: network device |
1295 | * @ifr: interface request |
1296 | * @cmd: command |
1297 | */ |
1298 | static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
1299 | { |
1300 | switch (cmd) { |
1301 | case SIOCSHWTSTAMP: |
1302 | return hwtstamp_ioctl(netdev, ifr); |
1303 | default: |
1304 | return -EOPNOTSUPP; |
1305 | } |
1306 | } |
1307 | |
1308 | static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) |
1309 | { |
1310 | struct sk_buff *skb = (struct sk_buff *)buf; |
1311 | struct octnet_buf_free_info *finfo; |
1312 | struct oct_timestamp_resp *resp; |
1313 | struct octeon_soft_command *sc; |
1314 | struct lio *lio; |
1315 | |
1316 | finfo = (struct octnet_buf_free_info *)skb->cb; |
1317 | lio = finfo->lio; |
1318 | sc = finfo->sc; |
1319 | oct = lio->oct_dev; |
1320 | resp = (struct oct_timestamp_resp *)sc->virtrptr; |
1321 | |
1322 | if (status != OCTEON_REQUEST_DONE) { |
1323 | dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n" , |
1324 | CVM_CAST64(status)); |
1325 | resp->timestamp = 0; |
1326 | } |
1327 | |
1328 | octeon_swap_8B_data(data: &resp->timestamp, blocks: 1); |
1329 | |
1330 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
1331 | struct skb_shared_hwtstamps ts; |
1332 | u64 ns = resp->timestamp; |
1333 | |
1334 | netif_info(lio, tx_done, lio->netdev, |
1335 | "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n" , |
1336 | skb, (unsigned long long)ns); |
1337 | ts.hwtstamp = ns_to_ktime(ns: ns + lio->ptp_adjust); |
1338 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &ts); |
1339 | } |
1340 | |
1341 | octeon_free_soft_command(oct, sc); |
1342 | tx_buffer_free(buffer: skb); |
1343 | } |
1344 | |
1345 | /* send_nic_timestamp_pkt - Send a data packet that will be timestamped |
1346 | * @oct: octeon device |
1347 | * @ndata: pointer to network data |
1348 | * @finfo: pointer to private network data |
1349 | */ |
1350 | static int send_nic_timestamp_pkt(struct octeon_device *oct, |
1351 | struct octnic_data_pkt *ndata, |
1352 | struct octnet_buf_free_info *finfo, |
1353 | int xmit_more) |
1354 | { |
1355 | struct octeon_soft_command *sc; |
1356 | int ring_doorbell; |
1357 | struct lio *lio; |
1358 | int retval; |
1359 | u32 len; |
1360 | |
1361 | lio = finfo->lio; |
1362 | |
1363 | sc = octeon_alloc_soft_command_resp(oct, cmd: &ndata->cmd, |
1364 | rdatasize: sizeof(struct oct_timestamp_resp)); |
1365 | finfo->sc = sc; |
1366 | |
1367 | if (!sc) { |
1368 | dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n" ); |
1369 | return IQ_SEND_FAILED; |
1370 | } |
1371 | |
1372 | if (ndata->reqtype == REQTYPE_NORESP_NET) |
1373 | ndata->reqtype = REQTYPE_RESP_NET; |
1374 | else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) |
1375 | ndata->reqtype = REQTYPE_RESP_NET_SG; |
1376 | |
1377 | sc->callback = handle_timestamp; |
1378 | sc->callback_arg = finfo->skb; |
1379 | sc->iq_no = ndata->q_no; |
1380 | |
1381 | len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; |
1382 | |
1383 | ring_doorbell = !xmit_more; |
1384 | |
1385 | retval = octeon_send_command(oct, iq_no: sc->iq_no, force_db: ring_doorbell, cmd: &sc->cmd, |
1386 | buf: sc, datasize: len, reqtype: ndata->reqtype); |
1387 | |
1388 | if (retval == IQ_SEND_FAILED) { |
1389 | dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n" , |
1390 | retval); |
1391 | octeon_free_soft_command(oct, sc); |
1392 | } else { |
1393 | netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n" ); |
1394 | } |
1395 | |
1396 | return retval; |
1397 | } |
1398 | |
1399 | /** |
1400 | * liquidio_xmit - Transmit networks packets to the Octeon interface |
1401 | * @skb: skbuff struct to be passed to network layer. |
1402 | * @netdev: pointer to network device |
1403 | * @returns whether the packet was transmitted to the device okay or not |
1404 | * (NETDEV_TX_OK or NETDEV_TX_BUSY) |
1405 | */ |
1406 | static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) |
1407 | { |
1408 | struct octnet_buf_free_info *finfo; |
1409 | union octnic_cmd_setup cmdsetup; |
1410 | struct octnic_data_pkt ndata; |
1411 | struct octeon_instr_irh *irh; |
1412 | struct oct_iq_stats *stats; |
1413 | struct octeon_device *oct; |
1414 | int q_idx = 0, iq_no = 0; |
1415 | union tx_info *tx_info; |
1416 | int xmit_more = 0; |
1417 | struct lio *lio; |
1418 | int status = 0; |
1419 | u64 dptr = 0; |
1420 | u32 tag = 0; |
1421 | int j; |
1422 | |
1423 | lio = GET_LIO(netdev); |
1424 | oct = lio->oct_dev; |
1425 | |
1426 | q_idx = skb_iq(oct: lio->oct_dev, skb); |
1427 | tag = q_idx; |
1428 | iq_no = lio->linfo.txpciq[q_idx].s.q_no; |
1429 | |
1430 | stats = &oct->instr_queue[iq_no]->stats; |
1431 | |
1432 | /* Check for all conditions in which the current packet cannot be |
1433 | * transmitted. |
1434 | */ |
1435 | if (!(atomic_read(v: &lio->ifstate) & LIO_IFSTATE_RUNNING) || |
1436 | (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { |
1437 | netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n" , |
1438 | lio->linfo.link.s.link_up); |
1439 | goto lio_xmit_failed; |
1440 | } |
1441 | |
1442 | /* Use space in skb->cb to store info used to unmap and |
1443 | * free the buffers. |
1444 | */ |
1445 | finfo = (struct octnet_buf_free_info *)skb->cb; |
1446 | finfo->lio = lio; |
1447 | finfo->skb = skb; |
1448 | finfo->sc = NULL; |
1449 | |
1450 | /* Prepare the attributes for the data to be passed to OSI. */ |
1451 | memset(&ndata, 0, sizeof(struct octnic_data_pkt)); |
1452 | |
1453 | ndata.buf = finfo; |
1454 | |
1455 | ndata.q_no = iq_no; |
1456 | |
1457 | if (octnet_iq_is_full(oct, q_no: ndata.q_no)) { |
1458 | /* defer sending if queue is full */ |
1459 | netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n" , |
1460 | ndata.q_no); |
1461 | stats->tx_iq_busy++; |
1462 | return NETDEV_TX_BUSY; |
1463 | } |
1464 | |
1465 | ndata.datasize = skb->len; |
1466 | |
1467 | cmdsetup.u64 = 0; |
1468 | cmdsetup.s.iq_no = iq_no; |
1469 | |
1470 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1471 | if (skb->encapsulation) { |
1472 | cmdsetup.s.tnl_csum = 1; |
1473 | stats->tx_vxlan++; |
1474 | } else { |
1475 | cmdsetup.s.transport_csum = 1; |
1476 | } |
1477 | } |
1478 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { |
1479 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1480 | cmdsetup.s.timestamp = 1; |
1481 | } |
1482 | |
1483 | if (!skb_shinfo(skb)->nr_frags) { |
1484 | cmdsetup.s.u.datasize = skb->len; |
1485 | octnet_prepare_pci_cmd(oct, cmd: &ndata.cmd, setup: &cmdsetup, tag); |
1486 | /* Offload checksum calculation for TCP/UDP packets */ |
1487 | dptr = dma_map_single(&oct->pci_dev->dev, |
1488 | skb->data, |
1489 | skb->len, |
1490 | DMA_TO_DEVICE); |
1491 | if (dma_mapping_error(dev: &oct->pci_dev->dev, dma_addr: dptr)) { |
1492 | dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n" , |
1493 | __func__); |
1494 | return NETDEV_TX_BUSY; |
1495 | } |
1496 | |
1497 | ndata.cmd.cmd3.dptr = dptr; |
1498 | finfo->dptr = dptr; |
1499 | ndata.reqtype = REQTYPE_NORESP_NET; |
1500 | |
1501 | } else { |
1502 | skb_frag_t *frag; |
1503 | struct octnic_gather *g; |
1504 | int i, frags; |
1505 | |
1506 | spin_lock(lock: &lio->glist_lock[q_idx]); |
1507 | g = (struct octnic_gather *) |
1508 | lio_list_delete_head(root: &lio->glist[q_idx]); |
1509 | spin_unlock(lock: &lio->glist_lock[q_idx]); |
1510 | |
1511 | if (!g) { |
1512 | netif_info(lio, tx_err, lio->netdev, |
1513 | "Transmit scatter gather: glist null!\n" ); |
1514 | goto lio_xmit_failed; |
1515 | } |
1516 | |
1517 | cmdsetup.s.gather = 1; |
1518 | cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); |
1519 | octnet_prepare_pci_cmd(oct, cmd: &ndata.cmd, setup: &cmdsetup, tag); |
1520 | |
1521 | memset(g->sg, 0, g->sg_size); |
1522 | |
1523 | g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, |
1524 | skb->data, |
1525 | (skb->len - skb->data_len), |
1526 | DMA_TO_DEVICE); |
1527 | if (dma_mapping_error(dev: &oct->pci_dev->dev, dma_addr: g->sg[0].ptr[0])) { |
1528 | dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n" , |
1529 | __func__); |
1530 | return NETDEV_TX_BUSY; |
1531 | } |
1532 | add_sg_size(sg_entry: &g->sg[0], size: (skb->len - skb->data_len), pos: 0); |
1533 | |
1534 | frags = skb_shinfo(skb)->nr_frags; |
1535 | i = 1; |
1536 | while (frags--) { |
1537 | frag = &skb_shinfo(skb)->frags[i - 1]; |
1538 | |
1539 | g->sg[(i >> 2)].ptr[(i & 3)] = |
1540 | skb_frag_dma_map(dev: &oct->pci_dev->dev, |
1541 | frag, offset: 0, size: skb_frag_size(frag), |
1542 | dir: DMA_TO_DEVICE); |
1543 | if (dma_mapping_error(dev: &oct->pci_dev->dev, |
1544 | dma_addr: g->sg[i >> 2].ptr[i & 3])) { |
1545 | dma_unmap_single(&oct->pci_dev->dev, |
1546 | g->sg[0].ptr[0], |
1547 | skb->len - skb->data_len, |
1548 | DMA_TO_DEVICE); |
1549 | for (j = 1; j < i; j++) { |
1550 | frag = &skb_shinfo(skb)->frags[j - 1]; |
1551 | dma_unmap_page(&oct->pci_dev->dev, |
1552 | g->sg[j >> 2].ptr[j & 3], |
1553 | skb_frag_size(frag), |
1554 | DMA_TO_DEVICE); |
1555 | } |
1556 | dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n" , |
1557 | __func__); |
1558 | return NETDEV_TX_BUSY; |
1559 | } |
1560 | |
1561 | add_sg_size(sg_entry: &g->sg[(i >> 2)], size: skb_frag_size(frag), |
1562 | pos: (i & 3)); |
1563 | i++; |
1564 | } |
1565 | |
1566 | dptr = g->sg_dma_ptr; |
1567 | |
1568 | ndata.cmd.cmd3.dptr = dptr; |
1569 | finfo->dptr = dptr; |
1570 | finfo->g = g; |
1571 | |
1572 | ndata.reqtype = REQTYPE_NORESP_NET_SG; |
1573 | } |
1574 | |
1575 | irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; |
1576 | tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; |
1577 | |
1578 | if (skb_shinfo(skb)->gso_size) { |
1579 | tx_info->s.gso_size = skb_shinfo(skb)->gso_size; |
1580 | tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; |
1581 | } |
1582 | |
1583 | /* HW insert VLAN tag */ |
1584 | if (skb_vlan_tag_present(skb)) { |
1585 | irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; |
1586 | irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; |
1587 | } |
1588 | |
1589 | xmit_more = netdev_xmit_more(); |
1590 | |
1591 | if (unlikely(cmdsetup.s.timestamp)) |
1592 | status = send_nic_timestamp_pkt(oct, ndata: &ndata, finfo, xmit_more); |
1593 | else |
1594 | status = octnet_send_nic_data_pkt(oct, ndata: &ndata, xmit_more); |
1595 | if (status == IQ_SEND_FAILED) |
1596 | goto lio_xmit_failed; |
1597 | |
1598 | netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n" ); |
1599 | |
1600 | if (status == IQ_SEND_STOP) { |
1601 | dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n" , |
1602 | iq_no); |
1603 | netif_stop_subqueue(dev: netdev, queue_index: q_idx); |
1604 | } |
1605 | |
1606 | netif_trans_update(dev: netdev); |
1607 | |
1608 | if (tx_info->s.gso_segs) |
1609 | stats->tx_done += tx_info->s.gso_segs; |
1610 | else |
1611 | stats->tx_done++; |
1612 | stats->tx_tot_bytes += ndata.datasize; |
1613 | |
1614 | return NETDEV_TX_OK; |
1615 | |
1616 | lio_xmit_failed: |
1617 | stats->tx_dropped++; |
1618 | netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n" , |
1619 | iq_no, stats->tx_dropped); |
1620 | if (dptr) |
1621 | dma_unmap_single(&oct->pci_dev->dev, dptr, |
1622 | ndata.datasize, DMA_TO_DEVICE); |
1623 | |
1624 | octeon_ring_doorbell_locked(oct, iq_no); |
1625 | |
1626 | tx_buffer_free(buffer: skb); |
1627 | return NETDEV_TX_OK; |
1628 | } |
1629 | |
1630 | /** |
1631 | * liquidio_tx_timeout - Network device Tx timeout |
1632 | * @netdev: pointer to network device |
1633 | * @txqueue: index of the hung transmit queue |
1634 | */ |
1635 | static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
1636 | { |
1637 | struct lio *lio; |
1638 | |
1639 | lio = GET_LIO(netdev); |
1640 | |
1641 | netif_info(lio, tx_err, lio->netdev, |
1642 | "Transmit timeout tx_dropped:%ld, waking up queues now!!\n" , |
1643 | netdev->stats.tx_dropped); |
1644 | netif_trans_update(dev: netdev); |
1645 | wake_txqs(netdev); |
1646 | } |
1647 | |
1648 | static int |
1649 | liquidio_vlan_rx_add_vid(struct net_device *netdev, |
1650 | __be16 proto __attribute__((unused)), u16 vid) |
1651 | { |
1652 | struct lio *lio = GET_LIO(netdev); |
1653 | struct octeon_device *oct = lio->oct_dev; |
1654 | struct octnic_ctrl_pkt nctrl; |
1655 | int ret = 0; |
1656 | |
1657 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
1658 | |
1659 | nctrl.ncmd.u64 = 0; |
1660 | nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; |
1661 | nctrl.ncmd.s.param1 = vid; |
1662 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
1663 | nctrl.netpndev = (u64)netdev; |
1664 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
1665 | |
1666 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
1667 | if (ret) { |
1668 | dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n" , |
1669 | ret); |
1670 | return -EPERM; |
1671 | } |
1672 | |
1673 | return 0; |
1674 | } |
1675 | |
1676 | static int |
1677 | liquidio_vlan_rx_kill_vid(struct net_device *netdev, |
1678 | __be16 proto __attribute__((unused)), u16 vid) |
1679 | { |
1680 | struct lio *lio = GET_LIO(netdev); |
1681 | struct octeon_device *oct = lio->oct_dev; |
1682 | struct octnic_ctrl_pkt nctrl; |
1683 | int ret = 0; |
1684 | |
1685 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
1686 | |
1687 | nctrl.ncmd.u64 = 0; |
1688 | nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; |
1689 | nctrl.ncmd.s.param1 = vid; |
1690 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
1691 | nctrl.netpndev = (u64)netdev; |
1692 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
1693 | |
1694 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
1695 | if (ret) { |
1696 | dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n" , |
1697 | ret); |
1698 | if (ret > 0) |
1699 | ret = -EIO; |
1700 | } |
1701 | return ret; |
1702 | } |
1703 | |
1704 | /** Sending command to enable/disable RX checksum offload |
1705 | * @param netdev pointer to network device |
1706 | * @param command OCTNET_CMD_TNL_RX_CSUM_CTL |
1707 | * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ |
1708 | * OCTNET_CMD_RXCSUM_DISABLE |
1709 | * @returns SUCCESS or FAILURE |
1710 | */ |
1711 | static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, |
1712 | u8 rx_cmd) |
1713 | { |
1714 | struct lio *lio = GET_LIO(netdev); |
1715 | struct octeon_device *oct = lio->oct_dev; |
1716 | struct octnic_ctrl_pkt nctrl; |
1717 | int ret = 0; |
1718 | |
1719 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
1720 | |
1721 | nctrl.ncmd.u64 = 0; |
1722 | nctrl.ncmd.s.cmd = command; |
1723 | nctrl.ncmd.s.param1 = rx_cmd; |
1724 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
1725 | nctrl.netpndev = (u64)netdev; |
1726 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
1727 | |
1728 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
1729 | if (ret) { |
1730 | dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n" , |
1731 | ret); |
1732 | if (ret > 0) |
1733 | ret = -EIO; |
1734 | } |
1735 | return ret; |
1736 | } |
1737 | |
1738 | /** Sending command to add/delete VxLAN UDP port to firmware |
1739 | * @param netdev pointer to network device |
1740 | * @param command OCTNET_CMD_VXLAN_PORT_CONFIG |
1741 | * @param vxlan_port VxLAN port to be added or deleted |
1742 | * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, |
1743 | * OCTNET_CMD_VXLAN_PORT_DEL |
1744 | * @returns SUCCESS or FAILURE |
1745 | */ |
1746 | static int liquidio_vxlan_port_command(struct net_device *netdev, int command, |
1747 | u16 vxlan_port, u8 vxlan_cmd_bit) |
1748 | { |
1749 | struct lio *lio = GET_LIO(netdev); |
1750 | struct octeon_device *oct = lio->oct_dev; |
1751 | struct octnic_ctrl_pkt nctrl; |
1752 | int ret = 0; |
1753 | |
1754 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
1755 | |
1756 | nctrl.ncmd.u64 = 0; |
1757 | nctrl.ncmd.s.cmd = command; |
1758 | nctrl.ncmd.s.more = vxlan_cmd_bit; |
1759 | nctrl.ncmd.s.param1 = vxlan_port; |
1760 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
1761 | nctrl.netpndev = (u64)netdev; |
1762 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
1763 | |
1764 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
1765 | if (ret) { |
1766 | dev_err(&oct->pci_dev->dev, |
1767 | "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n" , |
1768 | ret); |
1769 | if (ret > 0) |
1770 | ret = -EIO; |
1771 | } |
1772 | return ret; |
1773 | } |
1774 | |
1775 | static int liquidio_udp_tunnel_set_port(struct net_device *netdev, |
1776 | unsigned int table, unsigned int entry, |
1777 | struct udp_tunnel_info *ti) |
1778 | { |
1779 | return liquidio_vxlan_port_command(netdev, |
1780 | OCTNET_CMD_VXLAN_PORT_CONFIG, |
1781 | htons(ti->port), |
1782 | OCTNET_CMD_VXLAN_PORT_ADD); |
1783 | } |
1784 | |
1785 | static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, |
1786 | unsigned int table, |
1787 | unsigned int entry, |
1788 | struct udp_tunnel_info *ti) |
1789 | { |
1790 | return liquidio_vxlan_port_command(netdev, |
1791 | OCTNET_CMD_VXLAN_PORT_CONFIG, |
1792 | htons(ti->port), |
1793 | OCTNET_CMD_VXLAN_PORT_DEL); |
1794 | } |
1795 | |
1796 | static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { |
1797 | .set_port = liquidio_udp_tunnel_set_port, |
1798 | .unset_port = liquidio_udp_tunnel_unset_port, |
1799 | .tables = { |
1800 | { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
1801 | }, |
1802 | }; |
1803 | |
1804 | /** \brief Net device fix features |
1805 | * @param netdev pointer to network device |
1806 | * @param request features requested |
1807 | * @returns updated features list |
1808 | */ |
1809 | static netdev_features_t liquidio_fix_features(struct net_device *netdev, |
1810 | netdev_features_t request) |
1811 | { |
1812 | struct lio *lio = netdev_priv(dev: netdev); |
1813 | |
1814 | if ((request & NETIF_F_RXCSUM) && |
1815 | !(lio->dev_capability & NETIF_F_RXCSUM)) |
1816 | request &= ~NETIF_F_RXCSUM; |
1817 | |
1818 | if ((request & NETIF_F_HW_CSUM) && |
1819 | !(lio->dev_capability & NETIF_F_HW_CSUM)) |
1820 | request &= ~NETIF_F_HW_CSUM; |
1821 | |
1822 | if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) |
1823 | request &= ~NETIF_F_TSO; |
1824 | |
1825 | if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) |
1826 | request &= ~NETIF_F_TSO6; |
1827 | |
1828 | if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) |
1829 | request &= ~NETIF_F_LRO; |
1830 | |
1831 | /* Disable LRO if RXCSUM is off */ |
1832 | if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && |
1833 | (lio->dev_capability & NETIF_F_LRO)) |
1834 | request &= ~NETIF_F_LRO; |
1835 | |
1836 | return request; |
1837 | } |
1838 | |
1839 | /** \brief Net device set features |
1840 | * @param netdev pointer to network device |
1841 | * @param features features to enable/disable |
1842 | */ |
1843 | static int liquidio_set_features(struct net_device *netdev, |
1844 | netdev_features_t features) |
1845 | { |
1846 | struct lio *lio = netdev_priv(dev: netdev); |
1847 | |
1848 | if (!((netdev->features ^ features) & NETIF_F_LRO)) |
1849 | return 0; |
1850 | |
1851 | if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) |
1852 | liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, |
1853 | OCTNIC_LROIPV4 | OCTNIC_LROIPV6); |
1854 | else if (!(features & NETIF_F_LRO) && |
1855 | (lio->dev_capability & NETIF_F_LRO)) |
1856 | liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, |
1857 | OCTNIC_LROIPV4 | OCTNIC_LROIPV6); |
1858 | if (!(netdev->features & NETIF_F_RXCSUM) && |
1859 | (lio->enc_dev_capability & NETIF_F_RXCSUM) && |
1860 | (features & NETIF_F_RXCSUM)) |
1861 | liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, |
1862 | OCTNET_CMD_RXCSUM_ENABLE); |
1863 | else if ((netdev->features & NETIF_F_RXCSUM) && |
1864 | (lio->enc_dev_capability & NETIF_F_RXCSUM) && |
1865 | !(features & NETIF_F_RXCSUM)) |
1866 | liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, |
1867 | OCTNET_CMD_RXCSUM_DISABLE); |
1868 | |
1869 | return 0; |
1870 | } |
1871 | |
1872 | static const struct net_device_ops lionetdevops = { |
1873 | .ndo_open = liquidio_open, |
1874 | .ndo_stop = liquidio_stop, |
1875 | .ndo_start_xmit = liquidio_xmit, |
1876 | .ndo_get_stats64 = liquidio_get_stats64, |
1877 | .ndo_set_mac_address = liquidio_set_mac, |
1878 | .ndo_set_rx_mode = liquidio_set_mcast_list, |
1879 | .ndo_tx_timeout = liquidio_tx_timeout, |
1880 | .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, |
1881 | .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, |
1882 | .ndo_change_mtu = liquidio_change_mtu, |
1883 | .ndo_eth_ioctl = liquidio_ioctl, |
1884 | .ndo_fix_features = liquidio_fix_features, |
1885 | .ndo_set_features = liquidio_set_features, |
1886 | }; |
1887 | |
1888 | static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) |
1889 | { |
1890 | struct octeon_device *oct = (struct octeon_device *)buf; |
1891 | struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; |
1892 | union oct_link_status *ls; |
1893 | int gmxport = 0; |
1894 | int i; |
1895 | |
1896 | if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { |
1897 | dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n" , |
1898 | recv_pkt->buffer_size[0], |
1899 | recv_pkt->rh.r_nic_info.gmxport); |
1900 | goto nic_info_err; |
1901 | } |
1902 | |
1903 | gmxport = recv_pkt->rh.r_nic_info.gmxport; |
1904 | ls = (union oct_link_status *)(get_rbd(skb: recv_pkt->buffer_ptr[0]) + |
1905 | OCT_DROQ_INFO_SIZE); |
1906 | |
1907 | octeon_swap_8B_data(data: (u64 *)ls, blocks: (sizeof(union oct_link_status)) >> 3); |
1908 | |
1909 | for (i = 0; i < oct->ifcount; i++) { |
1910 | if (oct->props[i].gmxport == gmxport) { |
1911 | update_link_status(netdev: oct->props[i].netdev, ls); |
1912 | break; |
1913 | } |
1914 | } |
1915 | |
1916 | nic_info_err: |
1917 | for (i = 0; i < recv_pkt->buffer_count; i++) |
1918 | recv_buffer_free(buffer: recv_pkt->buffer_ptr[i]); |
1919 | octeon_free_recv_info(recv_info); |
1920 | return 0; |
1921 | } |
1922 | |
1923 | /** |
1924 | * setup_nic_devices - Setup network interfaces |
1925 | * @octeon_dev: octeon device |
1926 | * |
1927 | * Called during init time for each device. It assumes the NIC |
1928 | * is already up and running. The link information for each |
1929 | * interface is passed in link_info. |
1930 | */ |
1931 | static int setup_nic_devices(struct octeon_device *octeon_dev) |
1932 | { |
1933 | int retval, num_iqueues, num_oqueues; |
1934 | u32 resp_size, data_size; |
1935 | struct liquidio_if_cfg_resp *resp; |
1936 | struct octeon_soft_command *sc; |
1937 | union oct_nic_if_cfg if_cfg; |
1938 | struct octdev_props *props; |
1939 | struct net_device *netdev; |
1940 | struct lio_version *vdata; |
1941 | struct lio *lio = NULL; |
1942 | u8 mac[ETH_ALEN], i, j; |
1943 | u32 ifidx_or_pfnum; |
1944 | |
1945 | ifidx_or_pfnum = octeon_dev->pf_num; |
1946 | |
1947 | /* This is to handle link status changes */ |
1948 | octeon_register_dispatch_fn(oct: octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, |
1949 | fn: lio_nic_info, fn_arg: octeon_dev); |
1950 | |
1951 | /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. |
1952 | * They are handled directly. |
1953 | */ |
1954 | octeon_register_reqtype_free_fn(oct: octeon_dev, REQTYPE_NORESP_NET, |
1955 | fn: free_netbuf); |
1956 | |
1957 | octeon_register_reqtype_free_fn(oct: octeon_dev, REQTYPE_NORESP_NET_SG, |
1958 | fn: free_netsgbuf); |
1959 | |
1960 | octeon_register_reqtype_free_fn(oct: octeon_dev, REQTYPE_RESP_NET_SG, |
1961 | fn: free_netsgbuf_with_resp); |
1962 | |
1963 | for (i = 0; i < octeon_dev->ifcount; i++) { |
1964 | resp_size = sizeof(struct liquidio_if_cfg_resp); |
1965 | data_size = sizeof(struct lio_version); |
1966 | sc = (struct octeon_soft_command *) |
1967 | octeon_alloc_soft_command(oct: octeon_dev, datasize: data_size, |
1968 | rdatasize: resp_size, ctxsize: 0); |
1969 | resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; |
1970 | vdata = (struct lio_version *)sc->virtdptr; |
1971 | |
1972 | *((u64 *)vdata) = 0; |
1973 | vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); |
1974 | vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); |
1975 | vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); |
1976 | |
1977 | if_cfg.u64 = 0; |
1978 | |
1979 | if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; |
1980 | if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; |
1981 | if_cfg.s.base_queue = 0; |
1982 | |
1983 | sc->iq_no = 0; |
1984 | |
1985 | octeon_prepare_soft_command(oct: octeon_dev, sc, OPCODE_NIC, |
1986 | OPCODE_NIC_IF_CFG, irh_ossp: 0, ossp0: if_cfg.u64, |
1987 | ossp1: 0); |
1988 | |
1989 | init_completion(x: &sc->complete); |
1990 | sc->sc_status = OCTEON_REQUEST_PENDING; |
1991 | |
1992 | retval = octeon_send_soft_command(oct: octeon_dev, sc); |
1993 | if (retval == IQ_SEND_FAILED) { |
1994 | dev_err(&octeon_dev->pci_dev->dev, |
1995 | "iq/oq config failed status: %x\n" , retval); |
1996 | /* Soft instr is freed by driver in case of failure. */ |
1997 | octeon_free_soft_command(oct: octeon_dev, sc); |
1998 | return(-EIO); |
1999 | } |
2000 | |
2001 | /* Sleep on a wait queue till the cond flag indicates that the |
2002 | * response arrived or timed-out. |
2003 | */ |
2004 | retval = wait_for_sc_completion_timeout(oct_dev: octeon_dev, sc, timeout: 0); |
2005 | if (retval) |
2006 | return retval; |
2007 | |
2008 | retval = resp->status; |
2009 | if (retval) { |
2010 | dev_err(&octeon_dev->pci_dev->dev, |
2011 | "iq/oq config failed, retval = %d\n" , retval); |
2012 | WRITE_ONCE(sc->caller_is_done, true); |
2013 | return -EIO; |
2014 | } |
2015 | |
2016 | snprintf(buf: octeon_dev->fw_info.liquidio_firmware_version, |
2017 | size: 32, fmt: "%s" , |
2018 | resp->cfg_info.liquidio_firmware_version); |
2019 | |
2020 | octeon_swap_8B_data(data: (u64 *)(&resp->cfg_info), |
2021 | blocks: (sizeof(struct liquidio_if_cfg_info)) >> 3); |
2022 | |
2023 | num_iqueues = hweight64(resp->cfg_info.iqmask); |
2024 | num_oqueues = hweight64(resp->cfg_info.oqmask); |
2025 | |
2026 | if (!(num_iqueues) || !(num_oqueues)) { |
2027 | dev_err(&octeon_dev->pci_dev->dev, |
2028 | "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n" , |
2029 | resp->cfg_info.iqmask, resp->cfg_info.oqmask); |
2030 | WRITE_ONCE(sc->caller_is_done, true); |
2031 | goto setup_nic_dev_done; |
2032 | } |
2033 | dev_dbg(&octeon_dev->pci_dev->dev, |
2034 | "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n" , |
2035 | i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, |
2036 | num_iqueues, num_oqueues); |
2037 | |
2038 | netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); |
2039 | |
2040 | if (!netdev) { |
2041 | dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n" ); |
2042 | WRITE_ONCE(sc->caller_is_done, true); |
2043 | goto setup_nic_dev_done; |
2044 | } |
2045 | |
2046 | SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); |
2047 | |
2048 | /* Associate the routines that will handle different |
2049 | * netdev tasks. |
2050 | */ |
2051 | netdev->netdev_ops = &lionetdevops; |
2052 | |
2053 | lio = GET_LIO(netdev); |
2054 | |
2055 | memset(lio, 0, sizeof(struct lio)); |
2056 | |
2057 | lio->ifidx = ifidx_or_pfnum; |
2058 | |
2059 | props = &octeon_dev->props[i]; |
2060 | props->gmxport = resp->cfg_info.linfo.gmxport; |
2061 | props->netdev = netdev; |
2062 | |
2063 | lio->linfo.num_rxpciq = num_oqueues; |
2064 | lio->linfo.num_txpciq = num_iqueues; |
2065 | |
2066 | for (j = 0; j < num_oqueues; j++) { |
2067 | lio->linfo.rxpciq[j].u64 = |
2068 | resp->cfg_info.linfo.rxpciq[j].u64; |
2069 | } |
2070 | for (j = 0; j < num_iqueues; j++) { |
2071 | lio->linfo.txpciq[j].u64 = |
2072 | resp->cfg_info.linfo.txpciq[j].u64; |
2073 | } |
2074 | |
2075 | lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; |
2076 | lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; |
2077 | lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; |
2078 | lio->linfo.macaddr_is_admin_asgnd = |
2079 | resp->cfg_info.linfo.macaddr_is_admin_asgnd; |
2080 | lio->linfo.macaddr_spoofchk = |
2081 | resp->cfg_info.linfo.macaddr_spoofchk; |
2082 | |
2083 | lio->msg_enable = netif_msg_init(debug_value: debug, DEFAULT_MSG_ENABLE); |
2084 | |
2085 | lio->dev_capability = NETIF_F_HIGHDMA |
2086 | | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2087 | | NETIF_F_SG | NETIF_F_RXCSUM |
2088 | | NETIF_F_TSO | NETIF_F_TSO6 |
2089 | | NETIF_F_GRO |
2090 | | NETIF_F_LRO; |
2091 | netif_set_tso_max_size(dev: netdev, OCTNIC_GSO_MAX_SIZE); |
2092 | |
2093 | /* Copy of transmit encapsulation capabilities: |
2094 | * TSO, TSO6, Checksums for this device |
2095 | */ |
2096 | lio->enc_dev_capability = NETIF_F_IP_CSUM |
2097 | | NETIF_F_IPV6_CSUM |
2098 | | NETIF_F_GSO_UDP_TUNNEL |
2099 | | NETIF_F_HW_CSUM | NETIF_F_SG |
2100 | | NETIF_F_RXCSUM |
2101 | | NETIF_F_TSO | NETIF_F_TSO6 |
2102 | | NETIF_F_LRO; |
2103 | |
2104 | netdev->hw_enc_features = |
2105 | (lio->enc_dev_capability & ~NETIF_F_LRO); |
2106 | netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; |
2107 | |
2108 | netdev->vlan_features = lio->dev_capability; |
2109 | /* Add any unchangeable hw features */ |
2110 | lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | |
2111 | NETIF_F_HW_VLAN_CTAG_RX | |
2112 | NETIF_F_HW_VLAN_CTAG_TX; |
2113 | |
2114 | netdev->features = (lio->dev_capability & ~NETIF_F_LRO); |
2115 | |
2116 | netdev->hw_features = lio->dev_capability; |
2117 | netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; |
2118 | |
2119 | /* MTU range: 68 - 16000 */ |
2120 | netdev->min_mtu = LIO_MIN_MTU_SIZE; |
2121 | netdev->max_mtu = LIO_MAX_MTU_SIZE; |
2122 | |
2123 | WRITE_ONCE(sc->caller_is_done, true); |
2124 | |
2125 | /* Point to the properties for octeon device to which this |
2126 | * interface belongs. |
2127 | */ |
2128 | lio->oct_dev = octeon_dev; |
2129 | lio->octprops = props; |
2130 | lio->netdev = netdev; |
2131 | |
2132 | dev_dbg(&octeon_dev->pci_dev->dev, |
2133 | "if%d gmx: %d hw_addr: 0x%llx\n" , i, |
2134 | lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); |
2135 | |
2136 | /* 64-bit swap required on LE machines */ |
2137 | octeon_swap_8B_data(data: &lio->linfo.hw_addr, blocks: 1); |
2138 | for (j = 0; j < ETH_ALEN; j++) |
2139 | mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); |
2140 | |
2141 | /* Copy MAC Address to OS network device structure */ |
2142 | eth_hw_addr_set(dev: netdev, addr: mac); |
2143 | |
2144 | if (liquidio_setup_io_queues(octeon_dev, ifidx: i, |
2145 | num_iqs: lio->linfo.num_txpciq, |
2146 | num_oqs: lio->linfo.num_rxpciq)) { |
2147 | dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n" ); |
2148 | goto setup_nic_dev_free; |
2149 | } |
2150 | |
2151 | ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); |
2152 | |
2153 | /* For VFs, enable Octeon device interrupts here, |
2154 | * as this is contingent upon IO queue setup |
2155 | */ |
2156 | octeon_dev->fn_list.enable_interrupt(octeon_dev, |
2157 | OCTEON_ALL_INTR); |
2158 | |
2159 | /* By default all interfaces on a single Octeon uses the same |
2160 | * tx and rx queues |
2161 | */ |
2162 | lio->txq = lio->linfo.txpciq[0].s.q_no; |
2163 | lio->rxq = lio->linfo.rxpciq[0].s.q_no; |
2164 | |
2165 | lio->tx_qsize = octeon_get_tx_qsize(oct: octeon_dev, q_no: lio->txq); |
2166 | lio->rx_qsize = octeon_get_rx_qsize(oct: octeon_dev, q_no: lio->rxq); |
2167 | |
2168 | if (lio_setup_glists(oct: octeon_dev, lio, num_qs: num_iqueues)) { |
2169 | dev_err(&octeon_dev->pci_dev->dev, |
2170 | "Gather list allocation failed\n" ); |
2171 | goto setup_nic_dev_free; |
2172 | } |
2173 | |
2174 | /* Register ethtool support */ |
2175 | liquidio_set_ethtool_ops(netdev); |
2176 | if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) |
2177 | octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; |
2178 | else |
2179 | octeon_dev->priv_flags = 0x0; |
2180 | |
2181 | if (netdev->features & NETIF_F_LRO) |
2182 | liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, |
2183 | OCTNIC_LROIPV4 | OCTNIC_LROIPV6); |
2184 | |
2185 | if (setup_link_status_change_wq(netdev)) |
2186 | goto setup_nic_dev_free; |
2187 | |
2188 | if (setup_rx_oom_poll_fn(netdev)) |
2189 | goto setup_nic_dev_free; |
2190 | |
2191 | /* Register the network device with the OS */ |
2192 | if (register_netdev(dev: netdev)) { |
2193 | dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n" ); |
2194 | goto setup_nic_dev_free; |
2195 | } |
2196 | |
2197 | dev_dbg(&octeon_dev->pci_dev->dev, |
2198 | "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n" , |
2199 | i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); |
2200 | netif_carrier_off(dev: netdev); |
2201 | lio->link_changes++; |
2202 | |
2203 | ifstate_set(lio, LIO_IFSTATE_REGISTERED); |
2204 | |
2205 | /* Sending command to firmware to enable Rx checksum offload |
2206 | * by default at the time of setup of Liquidio driver for |
2207 | * this device |
2208 | */ |
2209 | liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, |
2210 | OCTNET_CMD_RXCSUM_ENABLE); |
2211 | liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, |
2212 | OCTNET_CMD_TXCSUM_ENABLE); |
2213 | |
2214 | dev_dbg(&octeon_dev->pci_dev->dev, |
2215 | "NIC ifidx:%d Setup successful\n" , i); |
2216 | |
2217 | octeon_dev->no_speed_setting = 1; |
2218 | } |
2219 | |
2220 | return 0; |
2221 | |
2222 | setup_nic_dev_free: |
2223 | |
2224 | while (i--) { |
2225 | dev_err(&octeon_dev->pci_dev->dev, |
2226 | "NIC ifidx:%d Setup failed\n" , i); |
2227 | liquidio_destroy_nic_device(oct: octeon_dev, ifidx: i); |
2228 | } |
2229 | |
2230 | setup_nic_dev_done: |
2231 | |
2232 | return -ENODEV; |
2233 | } |
2234 | |
2235 | /** |
2236 | * liquidio_init_nic_module - initialize the NIC |
2237 | * @oct: octeon device |
2238 | * |
2239 | * This initialization routine is called once the Octeon device application is |
2240 | * up and running |
2241 | */ |
2242 | static int liquidio_init_nic_module(struct octeon_device *oct) |
2243 | { |
2244 | int num_nic_ports = 1; |
2245 | int i, retval = 0; |
2246 | |
2247 | dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n" ); |
2248 | |
2249 | /* only default iq and oq were initialized |
2250 | * initialize the rest as well run port_config command for each port |
2251 | */ |
2252 | oct->ifcount = num_nic_ports; |
2253 | memset(oct->props, 0, |
2254 | sizeof(struct octdev_props) * num_nic_ports); |
2255 | |
2256 | for (i = 0; i < MAX_OCTEON_LINKS; i++) |
2257 | oct->props[i].gmxport = -1; |
2258 | |
2259 | retval = setup_nic_devices(oct); |
2260 | if (retval) { |
2261 | dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n" ); |
2262 | goto octnet_init_failure; |
2263 | } |
2264 | |
2265 | dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n" ); |
2266 | |
2267 | return retval; |
2268 | |
2269 | octnet_init_failure: |
2270 | |
2271 | oct->ifcount = 0; |
2272 | |
2273 | return retval; |
2274 | } |
2275 | |
2276 | /** |
2277 | * octeon_device_init - Device initialization for each Octeon device that is probed |
2278 | * @oct: octeon device |
2279 | */ |
2280 | static int octeon_device_init(struct octeon_device *oct) |
2281 | { |
2282 | u32 rev_id; |
2283 | int j; |
2284 | |
2285 | atomic_set(v: &oct->status, OCT_DEV_BEGIN_STATE); |
2286 | |
2287 | /* Enable access to the octeon device and make its DMA capability |
2288 | * known to the OS. |
2289 | */ |
2290 | if (octeon_pci_os_setup(oct)) |
2291 | return 1; |
2292 | atomic_set(v: &oct->status, OCT_DEV_PCI_ENABLE_DONE); |
2293 | |
2294 | oct->chip_id = OCTEON_CN23XX_VF_VID; |
2295 | pci_read_config_dword(dev: oct->pci_dev, where: 8, val: &rev_id); |
2296 | oct->rev_id = rev_id & 0xff; |
2297 | |
2298 | if (cn23xx_setup_octeon_vf_device(oct)) |
2299 | return 1; |
2300 | |
2301 | atomic_set(v: &oct->status, OCT_DEV_PCI_MAP_DONE); |
2302 | |
2303 | oct->app_mode = CVM_DRV_NIC_APP; |
2304 | |
2305 | /* Initialize the dispatch mechanism used to push packets arriving on |
2306 | * Octeon Output queues. |
2307 | */ |
2308 | if (octeon_init_dispatch_list(octeon_dev: oct)) |
2309 | return 1; |
2310 | |
2311 | atomic_set(v: &oct->status, OCT_DEV_DISPATCH_INIT_DONE); |
2312 | |
2313 | if (octeon_set_io_queues_off(oct)) { |
2314 | dev_err(&oct->pci_dev->dev, "setting io queues off failed\n" ); |
2315 | return 1; |
2316 | } |
2317 | |
2318 | if (oct->fn_list.setup_device_regs(oct)) { |
2319 | dev_err(&oct->pci_dev->dev, "device registers configuration failed\n" ); |
2320 | return 1; |
2321 | } |
2322 | |
2323 | /* Initialize soft command buffer pool */ |
2324 | if (octeon_setup_sc_buffer_pool(oct)) { |
2325 | dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n" ); |
2326 | return 1; |
2327 | } |
2328 | atomic_set(v: &oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); |
2329 | |
2330 | /* Setup the data structures that manage this Octeon's Input queues. */ |
2331 | if (octeon_setup_instr_queues(oct)) { |
2332 | dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n" ); |
2333 | return 1; |
2334 | } |
2335 | atomic_set(v: &oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); |
2336 | |
2337 | /* Initialize lists to manage the requests of different types that |
2338 | * arrive from user & kernel applications for this octeon device. |
2339 | */ |
2340 | if (octeon_setup_response_list(octeon_dev: oct)) { |
2341 | dev_err(&oct->pci_dev->dev, "Response list allocation failed\n" ); |
2342 | return 1; |
2343 | } |
2344 | atomic_set(v: &oct->status, OCT_DEV_RESP_LIST_INIT_DONE); |
2345 | |
2346 | if (octeon_setup_output_queues(oct)) { |
2347 | dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n" ); |
2348 | return 1; |
2349 | } |
2350 | atomic_set(v: &oct->status, OCT_DEV_DROQ_INIT_DONE); |
2351 | |
2352 | if (oct->fn_list.setup_mbox(oct)) { |
2353 | dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n" ); |
2354 | return 1; |
2355 | } |
2356 | atomic_set(v: &oct->status, OCT_DEV_MBOX_SETUP_DONE); |
2357 | |
2358 | if (octeon_allocate_ioq_vector(oct, num_ioqs: oct->sriov_info.rings_per_vf)) { |
2359 | dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n" ); |
2360 | return 1; |
2361 | } |
2362 | atomic_set(v: &oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); |
2363 | |
2364 | dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n" , |
2365 | oct->sriov_info.rings_per_vf); |
2366 | |
2367 | /* Setup the interrupt handler and record the INT SUM register address*/ |
2368 | if (octeon_setup_interrupt(oct, num_ioqs: oct->sriov_info.rings_per_vf)) |
2369 | return 1; |
2370 | |
2371 | atomic_set(v: &oct->status, OCT_DEV_INTR_SET_DONE); |
2372 | |
2373 | /* *************************************************************** |
2374 | * The interrupts need to be enabled for the PF<-->VF handshake. |
2375 | * They are [re]-enabled after the PF<-->VF handshake so that the |
2376 | * correct OQ tick value is used (i.e. the value retrieved from |
2377 | * the PF as part of the handshake). |
2378 | */ |
2379 | |
2380 | /* Enable Octeon device interrupts */ |
2381 | oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); |
2382 | |
2383 | if (cn23xx_octeon_pfvf_handshake(oct)) |
2384 | return 1; |
2385 | |
2386 | /* Here we [re]-enable the interrupts so that the correct OQ tick value |
2387 | * is used (i.e. the value that was retrieved during the handshake) |
2388 | */ |
2389 | |
2390 | /* Enable Octeon device interrupts */ |
2391 | oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); |
2392 | /* *************************************************************** */ |
2393 | |
2394 | /* Enable the input and output queues for this Octeon device */ |
2395 | if (oct->fn_list.enable_io_queues(oct)) { |
2396 | dev_err(&oct->pci_dev->dev, "enabling io queues failed\n" ); |
2397 | return 1; |
2398 | } |
2399 | |
2400 | atomic_set(v: &oct->status, OCT_DEV_IO_QUEUES_DONE); |
2401 | |
2402 | atomic_set(v: &oct->status, OCT_DEV_HOST_OK); |
2403 | |
2404 | /* Send Credit for Octeon Output queues. Credits are always sent after |
2405 | * the output queue is enabled. |
2406 | */ |
2407 | for (j = 0; j < oct->num_oqs; j++) |
2408 | writel(val: oct->droq[j]->max_count, addr: oct->droq[j]->pkts_credit_reg); |
2409 | |
2410 | /* Packets can start arriving on the output queues from this point. */ |
2411 | |
2412 | atomic_set(v: &oct->status, OCT_DEV_CORE_OK); |
2413 | |
2414 | atomic_set(v: &oct->status, OCT_DEV_RUNNING); |
2415 | |
2416 | if (liquidio_init_nic_module(oct)) |
2417 | return 1; |
2418 | |
2419 | return 0; |
2420 | } |
2421 | |
2422 | static int __init liquidio_vf_init(void) |
2423 | { |
2424 | octeon_init_device_list(conf_type: 0); |
2425 | return pci_register_driver(&liquidio_vf_pci_driver); |
2426 | } |
2427 | |
2428 | static void __exit liquidio_vf_exit(void) |
2429 | { |
2430 | pci_unregister_driver(dev: &liquidio_vf_pci_driver); |
2431 | |
2432 | pr_info("LiquidIO_VF network module is now unloaded\n" ); |
2433 | } |
2434 | |
2435 | module_init(liquidio_vf_init); |
2436 | module_exit(liquidio_vf_exit); |
2437 | |