1 | /********************************************************************** |
2 | * Author: Cavium, Inc. |
3 | * |
4 | * Contact: support@cavium.com |
5 | * Please include "LiquidIO" in the subject. |
6 | * |
7 | * Copyright (c) 2003-2016 Cavium, Inc. |
8 | * |
9 | * This file is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License, Version 2, as |
11 | * published by the Free Software Foundation. |
12 | * |
13 | * This file is distributed in the hope that it will be useful, but |
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty |
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or |
16 | * NONINFRINGEMENT. See the GNU General Public License for more details. |
17 | ***********************************************************************/ |
18 | #include <linux/module.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/pci.h> |
21 | #include <linux/firmware.h> |
22 | #include <net/vxlan.h> |
23 | #include <linux/kthread.h> |
24 | #include "liquidio_common.h" |
25 | #include "octeon_droq.h" |
26 | #include "octeon_iq.h" |
27 | #include "response_manager.h" |
28 | #include "octeon_device.h" |
29 | #include "octeon_nic.h" |
30 | #include "octeon_main.h" |
31 | #include "octeon_network.h" |
32 | #include "cn66xx_regs.h" |
33 | #include "cn66xx_device.h" |
34 | #include "cn68xx_device.h" |
35 | #include "cn23xx_pf_device.h" |
36 | #include "liquidio_image.h" |
37 | #include "lio_vf_rep.h" |
38 | |
39 | MODULE_AUTHOR("Cavium Networks, <support@cavium.com>" ); |
40 | MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver" ); |
41 | MODULE_LICENSE("GPL" ); |
42 | MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME |
43 | "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); |
44 | MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME |
45 | "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); |
46 | MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME |
47 | "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); |
48 | MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME |
49 | "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); |
50 | |
51 | static int ddr_timeout = 10000; |
52 | module_param(ddr_timeout, int, 0644); |
53 | MODULE_PARM_DESC(ddr_timeout, |
54 | "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check" ); |
55 | |
56 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) |
57 | |
58 | static int debug = -1; |
59 | module_param(debug, int, 0644); |
60 | MODULE_PARM_DESC(debug, "NETIF_MSG debug bits" ); |
61 | |
62 | static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO; |
63 | module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); |
64 | MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\"." ); |
65 | |
66 | static u32 console_bitmask; |
67 | module_param(console_bitmask, int, 0644); |
68 | MODULE_PARM_DESC(console_bitmask, |
69 | "Bitmask indicating which consoles have debug output redirected to syslog." ); |
70 | |
71 | /** |
72 | * octeon_console_debug_enabled - determines if a given console has debug enabled. |
73 | * @console: console to check |
74 | * Return: 1 = enabled. 0 otherwise |
75 | */ |
76 | static int octeon_console_debug_enabled(u32 console) |
77 | { |
78 | return (console_bitmask >> (console)) & 0x1; |
79 | } |
80 | |
81 | /* Polling interval for determining when NIC application is alive */ |
82 | #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 |
83 | |
84 | /* runtime link query interval */ |
85 | #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 |
86 | /* update localtime to octeon firmware every 60 seconds. |
87 | * make firmware to use same time reference, so that it will be easy to |
88 | * correlate firmware logged events/errors with host events, for debugging. |
89 | */ |
90 | #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 |
91 | |
92 | /* time to wait for possible in-flight requests in milliseconds */ |
93 | #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) |
94 | |
95 | struct oct_link_status_resp { |
96 | u64 rh; |
97 | struct oct_link_info link_info; |
98 | u64 status; |
99 | }; |
100 | |
101 | struct oct_timestamp_resp { |
102 | u64 rh; |
103 | u64 timestamp; |
104 | u64 status; |
105 | }; |
106 | |
107 | #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) |
108 | |
109 | union tx_info { |
110 | u64 u64; |
111 | struct { |
112 | #ifdef __BIG_ENDIAN_BITFIELD |
113 | u16 gso_size; |
114 | u16 gso_segs; |
115 | u32 reserved; |
116 | #else |
117 | u32 reserved; |
118 | u16 gso_segs; |
119 | u16 gso_size; |
120 | #endif |
121 | } s; |
122 | }; |
123 | |
124 | /* Octeon device properties to be used by the NIC module. |
125 | * Each octeon device in the system will be represented |
126 | * by this structure in the NIC module. |
127 | */ |
128 | |
129 | #define 128 |
130 | #define OCTNIC_GSO_MAX_SIZE \ |
131 | (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) |
132 | |
133 | struct handshake { |
134 | struct completion init; |
135 | struct completion started; |
136 | struct pci_dev *pci_dev; |
137 | int init_ok; |
138 | int started_ok; |
139 | }; |
140 | |
141 | #ifdef CONFIG_PCI_IOV |
142 | static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); |
143 | #endif |
144 | |
145 | static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, |
146 | char *prefix, char *suffix); |
147 | |
148 | static int octeon_device_init(struct octeon_device *); |
149 | static int liquidio_stop(struct net_device *netdev); |
150 | static void liquidio_remove(struct pci_dev *pdev); |
151 | static int liquidio_probe(struct pci_dev *pdev, |
152 | const struct pci_device_id *ent); |
153 | static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, |
154 | int linkstate); |
155 | |
156 | static struct handshake handshake[MAX_OCTEON_DEVICES]; |
157 | static struct completion first_stage; |
158 | |
159 | static void octeon_droq_bh(struct tasklet_struct *t) |
160 | { |
161 | int q_no; |
162 | int reschedule = 0; |
163 | struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t, |
164 | droq_tasklet); |
165 | struct octeon_device *oct = oct_priv->dev; |
166 | |
167 | for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { |
168 | if (!(oct->io_qmask.oq & BIT_ULL(q_no))) |
169 | continue; |
170 | reschedule |= octeon_droq_process_packets(oct, droq: oct->droq[q_no], |
171 | MAX_PACKET_BUDGET); |
172 | lio_enable_irq(droq: oct->droq[q_no], NULL); |
173 | |
174 | if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { |
175 | /* set time and cnt interrupt thresholds for this DROQ |
176 | * for NAPI |
177 | */ |
178 | int adjusted_q_no = q_no + oct->sriov_info.pf_srn; |
179 | |
180 | octeon_write_csr64( |
181 | oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), |
182 | 0x5700000040ULL); |
183 | octeon_write_csr64( |
184 | oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); |
185 | } |
186 | } |
187 | |
188 | if (reschedule) |
189 | tasklet_schedule(t: &oct_priv->droq_tasklet); |
190 | } |
191 | |
192 | static int lio_wait_for_oq_pkts(struct octeon_device *oct) |
193 | { |
194 | struct octeon_device_priv *oct_priv = oct->priv; |
195 | int retry = 100, pkt_cnt = 0, pending_pkts = 0; |
196 | int i; |
197 | |
198 | do { |
199 | pending_pkts = 0; |
200 | |
201 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
202 | if (!(oct->io_qmask.oq & BIT_ULL(i))) |
203 | continue; |
204 | pkt_cnt += octeon_droq_check_hw_for_pkts(droq: oct->droq[i]); |
205 | } |
206 | if (pkt_cnt > 0) { |
207 | pending_pkts += pkt_cnt; |
208 | tasklet_schedule(t: &oct_priv->droq_tasklet); |
209 | } |
210 | pkt_cnt = 0; |
211 | schedule_timeout_uninterruptible(timeout: 1); |
212 | |
213 | } while (retry-- && pending_pkts); |
214 | |
215 | return pkt_cnt; |
216 | } |
217 | |
218 | /** |
219 | * force_io_queues_off - Forces all IO queues off on a given device |
220 | * @oct: Pointer to Octeon device |
221 | */ |
222 | static void force_io_queues_off(struct octeon_device *oct) |
223 | { |
224 | if ((oct->chip_id == OCTEON_CN66XX) || |
225 | (oct->chip_id == OCTEON_CN68XX)) { |
226 | /* Reset the Enable bits for Input Queues. */ |
227 | octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); |
228 | |
229 | /* Reset the Enable bits for Output Queues. */ |
230 | octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); |
231 | } |
232 | } |
233 | |
234 | /** |
235 | * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc |
236 | * @oct: Pointer to Octeon device |
237 | */ |
238 | static inline void pcierror_quiesce_device(struct octeon_device *oct) |
239 | { |
240 | int i; |
241 | |
242 | /* Disable the input and output queues now. No more packets will |
243 | * arrive from Octeon, but we should wait for all packet processing |
244 | * to finish. |
245 | */ |
246 | force_io_queues_off(oct); |
247 | |
248 | /* To allow for in-flight requests */ |
249 | schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST); |
250 | |
251 | if (wait_for_pending_requests(oct)) |
252 | dev_err(&oct->pci_dev->dev, "There were pending requests\n" ); |
253 | |
254 | /* Force all requests waiting to be fetched by OCTEON to complete. */ |
255 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
256 | struct octeon_instr_queue *iq; |
257 | |
258 | if (!(oct->io_qmask.iq & BIT_ULL(i))) |
259 | continue; |
260 | iq = oct->instr_queue[i]; |
261 | |
262 | if (atomic_read(v: &iq->instr_pending)) { |
263 | spin_lock_bh(lock: &iq->lock); |
264 | iq->fill_cnt = 0; |
265 | iq->octeon_read_index = iq->host_write_index; |
266 | iq->stats.instr_processed += |
267 | atomic_read(v: &iq->instr_pending); |
268 | lio_process_iq_request_list(oct, iq, napi_budget: 0); |
269 | spin_unlock_bh(lock: &iq->lock); |
270 | } |
271 | } |
272 | |
273 | /* Force all pending ordered list requests to time out. */ |
274 | lio_process_ordered_list(octeon_dev: oct, force_quit: 1); |
275 | |
276 | /* We do not need to wait for output queue packets to be processed. */ |
277 | } |
278 | |
279 | /** |
280 | * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status |
281 | * @dev: Pointer to PCI device |
282 | */ |
283 | static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) |
284 | { |
285 | int pos = 0x100; |
286 | u32 status, mask; |
287 | |
288 | pr_info("%s :\n" , __func__); |
289 | |
290 | pci_read_config_dword(dev, where: pos + PCI_ERR_UNCOR_STATUS, val: &status); |
291 | pci_read_config_dword(dev, where: pos + PCI_ERR_UNCOR_SEVER, val: &mask); |
292 | if (dev->error_state == pci_channel_io_normal) |
293 | status &= ~mask; /* Clear corresponding nonfatal bits */ |
294 | else |
295 | status &= mask; /* Clear corresponding fatal bits */ |
296 | pci_write_config_dword(dev, where: pos + PCI_ERR_UNCOR_STATUS, val: status); |
297 | } |
298 | |
299 | /** |
300 | * stop_pci_io - Stop all PCI IO to a given device |
301 | * @oct: Pointer to Octeon device |
302 | */ |
303 | static void stop_pci_io(struct octeon_device *oct) |
304 | { |
305 | /* No more instructions will be forwarded. */ |
306 | atomic_set(v: &oct->status, OCT_DEV_IN_RESET); |
307 | |
308 | pci_disable_device(dev: oct->pci_dev); |
309 | |
310 | /* Disable interrupts */ |
311 | oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); |
312 | |
313 | pcierror_quiesce_device(oct); |
314 | |
315 | /* Release the interrupt line */ |
316 | free_irq(oct->pci_dev->irq, oct); |
317 | |
318 | if (oct->flags & LIO_FLAG_MSI_ENABLED) |
319 | pci_disable_msi(dev: oct->pci_dev); |
320 | |
321 | dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n" , |
322 | lio_get_state_string(&oct->status)); |
323 | |
324 | /* making it a common function for all OCTEON models */ |
325 | cleanup_aer_uncorrect_error_status(dev: oct->pci_dev); |
326 | } |
327 | |
328 | /** |
329 | * liquidio_pcie_error_detected - called when PCI error is detected |
330 | * @pdev: Pointer to PCI device |
331 | * @state: The current pci connection state |
332 | * |
333 | * This function is called after a PCI bus error affecting |
334 | * this device has been detected. |
335 | */ |
336 | static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, |
337 | pci_channel_state_t state) |
338 | { |
339 | struct octeon_device *oct = pci_get_drvdata(pdev); |
340 | |
341 | /* Non-correctable Non-fatal errors */ |
342 | if (state == pci_channel_io_normal) { |
343 | dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n" ); |
344 | cleanup_aer_uncorrect_error_status(dev: oct->pci_dev); |
345 | return PCI_ERS_RESULT_CAN_RECOVER; |
346 | } |
347 | |
348 | /* Non-correctable Fatal errors */ |
349 | dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n" ); |
350 | stop_pci_io(oct); |
351 | |
352 | /* Always return a DISCONNECT. There is no support for recovery but only |
353 | * for a clean shutdown. |
354 | */ |
355 | return PCI_ERS_RESULT_DISCONNECT; |
356 | } |
357 | |
358 | /** |
359 | * liquidio_pcie_mmio_enabled - mmio handler |
360 | * @pdev: Pointer to PCI device |
361 | */ |
362 | static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev) |
363 | { |
364 | /* We should never hit this since we never ask for a reset for a Fatal |
365 | * Error. We always return DISCONNECT in io_error above. |
366 | * But play safe and return RECOVERED for now. |
367 | */ |
368 | return PCI_ERS_RESULT_RECOVERED; |
369 | } |
370 | |
371 | /** |
372 | * liquidio_pcie_slot_reset - called after the pci bus has been reset. |
373 | * @pdev: Pointer to PCI device |
374 | * |
375 | * Restart the card from scratch, as if from a cold-boot. Implementation |
376 | * resembles the first-half of the octeon_resume routine. |
377 | */ |
378 | static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev) |
379 | { |
380 | /* We should never hit this since we never ask for a reset for a Fatal |
381 | * Error. We always return DISCONNECT in io_error above. |
382 | * But play safe and return RECOVERED for now. |
383 | */ |
384 | return PCI_ERS_RESULT_RECOVERED; |
385 | } |
386 | |
387 | /** |
388 | * liquidio_pcie_resume - called when traffic can start flowing again. |
389 | * @pdev: Pointer to PCI device |
390 | * |
391 | * This callback is called when the error recovery driver tells us that |
392 | * its OK to resume normal operation. Implementation resembles the |
393 | * second-half of the octeon_resume routine. |
394 | */ |
395 | static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev) |
396 | { |
397 | /* Nothing to be done here. */ |
398 | } |
399 | |
400 | #define liquidio_suspend NULL |
401 | #define liquidio_resume NULL |
402 | |
403 | /* For PCI-E Advanced Error Recovery (AER) Interface */ |
404 | static const struct pci_error_handlers liquidio_err_handler = { |
405 | .error_detected = liquidio_pcie_error_detected, |
406 | .mmio_enabled = liquidio_pcie_mmio_enabled, |
407 | .slot_reset = liquidio_pcie_slot_reset, |
408 | .resume = liquidio_pcie_resume, |
409 | }; |
410 | |
411 | static const struct pci_device_id liquidio_pci_tbl[] = { |
412 | { /* 68xx */ |
413 | PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 |
414 | }, |
415 | { /* 66xx */ |
416 | PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 |
417 | }, |
418 | { /* 23xx pf */ |
419 | PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 |
420 | }, |
421 | { |
422 | 0, 0, 0, 0, 0, 0, 0 |
423 | } |
424 | }; |
425 | MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); |
426 | |
427 | static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume); |
428 | |
429 | static struct pci_driver liquidio_pci_driver = { |
430 | .name = "LiquidIO" , |
431 | .id_table = liquidio_pci_tbl, |
432 | .probe = liquidio_probe, |
433 | .remove = liquidio_remove, |
434 | .err_handler = &liquidio_err_handler, /* For AER */ |
435 | .driver.pm = &liquidio_pm_ops, |
436 | #ifdef CONFIG_PCI_IOV |
437 | .sriov_configure = liquidio_enable_sriov, |
438 | #endif |
439 | }; |
440 | |
441 | /** |
442 | * liquidio_init_pci - register PCI driver |
443 | */ |
444 | static int liquidio_init_pci(void) |
445 | { |
446 | return pci_register_driver(&liquidio_pci_driver); |
447 | } |
448 | |
449 | /** |
450 | * liquidio_deinit_pci - unregister PCI driver |
451 | */ |
452 | static void liquidio_deinit_pci(void) |
453 | { |
454 | pci_unregister_driver(dev: &liquidio_pci_driver); |
455 | } |
456 | |
457 | /** |
458 | * check_txq_status - Check Tx queue status, and take appropriate action |
459 | * @lio: per-network private data |
460 | * Return: 0 if full, number of queues woken up otherwise |
461 | */ |
462 | static inline int check_txq_status(struct lio *lio) |
463 | { |
464 | int numqs = lio->netdev->real_num_tx_queues; |
465 | int ret_val = 0; |
466 | int q, iq; |
467 | |
468 | /* check each sub-queue state */ |
469 | for (q = 0; q < numqs; q++) { |
470 | iq = lio->linfo.txpciq[q % |
471 | lio->oct_dev->num_iqs].s.q_no; |
472 | if (octnet_iq_is_full(oct: lio->oct_dev, q_no: iq)) |
473 | continue; |
474 | if (__netif_subqueue_stopped(dev: lio->netdev, queue_index: q)) { |
475 | netif_wake_subqueue(dev: lio->netdev, queue_index: q); |
476 | INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, |
477 | tx_restart, 1); |
478 | ret_val++; |
479 | } |
480 | } |
481 | |
482 | return ret_val; |
483 | } |
484 | |
485 | /** |
486 | * print_link_info - Print link information |
487 | * @netdev: network device |
488 | */ |
489 | static void print_link_info(struct net_device *netdev) |
490 | { |
491 | struct lio *lio = GET_LIO(netdev); |
492 | |
493 | if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && |
494 | ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { |
495 | struct oct_link_info *linfo = &lio->linfo; |
496 | |
497 | if (linfo->link.s.link_up) { |
498 | netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n" , |
499 | linfo->link.s.speed, |
500 | (linfo->link.s.duplex) ? "Full" : "Half" ); |
501 | } else { |
502 | netif_info(lio, link, lio->netdev, "Link Down\n" ); |
503 | } |
504 | } |
505 | } |
506 | |
507 | /** |
508 | * octnet_link_status_change - Routine to notify MTU change |
509 | * @work: work_struct data structure |
510 | */ |
511 | static void octnet_link_status_change(struct work_struct *work) |
512 | { |
513 | struct cavium_wk *wk = (struct cavium_wk *)work; |
514 | struct lio *lio = (struct lio *)wk->ctxptr; |
515 | |
516 | /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. |
517 | * this API is invoked only when new max-MTU of the interface is |
518 | * less than current MTU. |
519 | */ |
520 | rtnl_lock(); |
521 | dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); |
522 | rtnl_unlock(); |
523 | } |
524 | |
525 | /** |
526 | * setup_link_status_change_wq - Sets up the mtu status change work |
527 | * @netdev: network device |
528 | */ |
529 | static inline int setup_link_status_change_wq(struct net_device *netdev) |
530 | { |
531 | struct lio *lio = GET_LIO(netdev); |
532 | struct octeon_device *oct = lio->oct_dev; |
533 | |
534 | lio->link_status_wq.wq = alloc_workqueue(fmt: "link-status" , |
535 | flags: WQ_MEM_RECLAIM, max_active: 0); |
536 | if (!lio->link_status_wq.wq) { |
537 | dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n" ); |
538 | return -1; |
539 | } |
540 | INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, |
541 | octnet_link_status_change); |
542 | lio->link_status_wq.wk.ctxptr = lio; |
543 | |
544 | return 0; |
545 | } |
546 | |
547 | static inline void cleanup_link_status_change_wq(struct net_device *netdev) |
548 | { |
549 | struct lio *lio = GET_LIO(netdev); |
550 | |
551 | if (lio->link_status_wq.wq) { |
552 | cancel_delayed_work_sync(dwork: &lio->link_status_wq.wk.work); |
553 | destroy_workqueue(wq: lio->link_status_wq.wq); |
554 | } |
555 | } |
556 | |
557 | /** |
558 | * update_link_status - Update link status |
559 | * @netdev: network device |
560 | * @ls: link status structure |
561 | * |
562 | * Called on receipt of a link status response from the core application to |
563 | * update each interface's link status. |
564 | */ |
565 | static inline void update_link_status(struct net_device *netdev, |
566 | union oct_link_status *ls) |
567 | { |
568 | struct lio *lio = GET_LIO(netdev); |
569 | int changed = (lio->linfo.link.u64 != ls->u64); |
570 | int current_max_mtu = lio->linfo.link.s.mtu; |
571 | struct octeon_device *oct = lio->oct_dev; |
572 | |
573 | dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n" , |
574 | __func__, lio->linfo.link.u64, ls->u64); |
575 | lio->linfo.link.u64 = ls->u64; |
576 | |
577 | if ((lio->intf_open) && (changed)) { |
578 | print_link_info(netdev); |
579 | lio->link_changes++; |
580 | |
581 | if (lio->linfo.link.s.link_up) { |
582 | dev_dbg(&oct->pci_dev->dev, "%s: link_up" , __func__); |
583 | netif_carrier_on(dev: netdev); |
584 | wake_txqs(netdev); |
585 | } else { |
586 | dev_dbg(&oct->pci_dev->dev, "%s: link_off" , __func__); |
587 | netif_carrier_off(dev: netdev); |
588 | stop_txqs(netdev); |
589 | } |
590 | if (lio->linfo.link.s.mtu != current_max_mtu) { |
591 | netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n" , |
592 | current_max_mtu, lio->linfo.link.s.mtu); |
593 | netdev->max_mtu = lio->linfo.link.s.mtu; |
594 | } |
595 | if (lio->linfo.link.s.mtu < netdev->mtu) { |
596 | dev_warn(&oct->pci_dev->dev, |
597 | "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n" , |
598 | netdev->mtu, lio->linfo.link.s.mtu); |
599 | queue_delayed_work(wq: lio->link_status_wq.wq, |
600 | dwork: &lio->link_status_wq.wk.work, delay: 0); |
601 | } |
602 | } |
603 | } |
604 | |
605 | /** |
606 | * lio_sync_octeon_time - send latest localtime to octeon firmware so that |
607 | * firmware will correct it's time, in case there is a time skew |
608 | * |
609 | * @work: work scheduled to send time update to octeon firmware |
610 | **/ |
611 | static void lio_sync_octeon_time(struct work_struct *work) |
612 | { |
613 | struct cavium_wk *wk = (struct cavium_wk *)work; |
614 | struct lio *lio = (struct lio *)wk->ctxptr; |
615 | struct octeon_device *oct = lio->oct_dev; |
616 | struct octeon_soft_command *sc; |
617 | struct timespec64 ts; |
618 | struct lio_time *lt; |
619 | int ret; |
620 | |
621 | sc = octeon_alloc_soft_command(oct, datasize: sizeof(struct lio_time), rdatasize: 16, ctxsize: 0); |
622 | if (!sc) { |
623 | dev_err(&oct->pci_dev->dev, |
624 | "Failed to sync time to octeon: soft command allocation failed\n" ); |
625 | return; |
626 | } |
627 | |
628 | lt = (struct lio_time *)sc->virtdptr; |
629 | |
630 | /* Get time of the day */ |
631 | ktime_get_real_ts64(tv: &ts); |
632 | lt->sec = ts.tv_sec; |
633 | lt->nsec = ts.tv_nsec; |
634 | octeon_swap_8B_data(data: (u64 *)lt, blocks: (sizeof(struct lio_time)) / 8); |
635 | |
636 | sc->iq_no = lio->linfo.txpciq[0].s.q_no; |
637 | octeon_prepare_soft_command(oct, sc, OPCODE_NIC, |
638 | OPCODE_NIC_SYNC_OCTEON_TIME, irh_ossp: 0, ossp0: 0, ossp1: 0); |
639 | |
640 | init_completion(x: &sc->complete); |
641 | sc->sc_status = OCTEON_REQUEST_PENDING; |
642 | |
643 | ret = octeon_send_soft_command(oct, sc); |
644 | if (ret == IQ_SEND_FAILED) { |
645 | dev_err(&oct->pci_dev->dev, |
646 | "Failed to sync time to octeon: failed to send soft command\n" ); |
647 | octeon_free_soft_command(oct, sc); |
648 | } else { |
649 | WRITE_ONCE(sc->caller_is_done, true); |
650 | } |
651 | |
652 | queue_delayed_work(wq: lio->sync_octeon_time_wq.wq, |
653 | dwork: &lio->sync_octeon_time_wq.wk.work, |
654 | delay: msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); |
655 | } |
656 | |
657 | /** |
658 | * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware |
659 | * |
660 | * @netdev: network device which should send time update to firmware |
661 | **/ |
662 | static inline int setup_sync_octeon_time_wq(struct net_device *netdev) |
663 | { |
664 | struct lio *lio = GET_LIO(netdev); |
665 | struct octeon_device *oct = lio->oct_dev; |
666 | |
667 | lio->sync_octeon_time_wq.wq = |
668 | alloc_workqueue(fmt: "update-octeon-time" , flags: WQ_MEM_RECLAIM, max_active: 0); |
669 | if (!lio->sync_octeon_time_wq.wq) { |
670 | dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n" ); |
671 | return -1; |
672 | } |
673 | INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, |
674 | lio_sync_octeon_time); |
675 | lio->sync_octeon_time_wq.wk.ctxptr = lio; |
676 | queue_delayed_work(wq: lio->sync_octeon_time_wq.wq, |
677 | dwork: &lio->sync_octeon_time_wq.wk.work, |
678 | delay: msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); |
679 | |
680 | return 0; |
681 | } |
682 | |
683 | /** |
684 | * cleanup_sync_octeon_time_wq - destroy wq |
685 | * |
686 | * @netdev: network device which should send time update to firmware |
687 | * |
688 | * Stop scheduling and destroy the work created to periodically update local |
689 | * time to octeon firmware. |
690 | **/ |
691 | static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev) |
692 | { |
693 | struct lio *lio = GET_LIO(netdev); |
694 | struct cavium_wq *time_wq = &lio->sync_octeon_time_wq; |
695 | |
696 | if (time_wq->wq) { |
697 | cancel_delayed_work_sync(dwork: &time_wq->wk.work); |
698 | destroy_workqueue(wq: time_wq->wq); |
699 | } |
700 | } |
701 | |
702 | static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) |
703 | { |
704 | struct octeon_device *other_oct; |
705 | |
706 | other_oct = lio_get_device(octeon_id: oct->octeon_id + 1); |
707 | |
708 | if (other_oct && other_oct->pci_dev) { |
709 | int oct_busnum, other_oct_busnum; |
710 | |
711 | oct_busnum = oct->pci_dev->bus->number; |
712 | other_oct_busnum = other_oct->pci_dev->bus->number; |
713 | |
714 | if (oct_busnum == other_oct_busnum) { |
715 | int oct_slot, other_oct_slot; |
716 | |
717 | oct_slot = PCI_SLOT(oct->pci_dev->devfn); |
718 | other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn); |
719 | |
720 | if (oct_slot == other_oct_slot) |
721 | return other_oct; |
722 | } |
723 | } |
724 | |
725 | return NULL; |
726 | } |
727 | |
728 | static void disable_all_vf_links(struct octeon_device *oct) |
729 | { |
730 | struct net_device *netdev; |
731 | int max_vfs, vf, i; |
732 | |
733 | if (!oct) |
734 | return; |
735 | |
736 | max_vfs = oct->sriov_info.max_vfs; |
737 | |
738 | for (i = 0; i < oct->ifcount; i++) { |
739 | netdev = oct->props[i].netdev; |
740 | if (!netdev) |
741 | continue; |
742 | |
743 | for (vf = 0; vf < max_vfs; vf++) |
744 | liquidio_set_vf_link_state(netdev, vfidx: vf, |
745 | linkstate: IFLA_VF_LINK_STATE_DISABLE); |
746 | } |
747 | } |
748 | |
749 | static int liquidio_watchdog(void *param) |
750 | { |
751 | bool err_msg_was_printed[LIO_MAX_CORES]; |
752 | u16 mask_of_crashed_or_stuck_cores = 0; |
753 | bool all_vf_links_are_disabled = false; |
754 | struct octeon_device *oct = param; |
755 | struct octeon_device *other_oct; |
756 | #ifdef CONFIG_MODULE_UNLOAD |
757 | long refcount, vfs_referencing_pf; |
758 | u64 vfs_mask1, vfs_mask2; |
759 | #endif |
760 | int core; |
761 | |
762 | memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed)); |
763 | |
764 | while (!kthread_should_stop()) { |
765 | /* sleep for a couple of seconds so that we don't hog the CPU */ |
766 | set_current_state(TASK_INTERRUPTIBLE); |
767 | schedule_timeout(timeout: msecs_to_jiffies(m: 2000)); |
768 | |
769 | mask_of_crashed_or_stuck_cores = |
770 | (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); |
771 | |
772 | if (!mask_of_crashed_or_stuck_cores) |
773 | continue; |
774 | |
775 | WRITE_ONCE(oct->cores_crashed, true); |
776 | other_oct = get_other_octeon_device(oct); |
777 | if (other_oct) |
778 | WRITE_ONCE(other_oct->cores_crashed, true); |
779 | |
780 | for (core = 0; core < LIO_MAX_CORES; core++) { |
781 | bool core_crashed_or_got_stuck; |
782 | |
783 | core_crashed_or_got_stuck = |
784 | (mask_of_crashed_or_stuck_cores |
785 | >> core) & 1; |
786 | |
787 | if (core_crashed_or_got_stuck && |
788 | !err_msg_was_printed[core]) { |
789 | dev_err(&oct->pci_dev->dev, |
790 | "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n" , |
791 | core); |
792 | err_msg_was_printed[core] = true; |
793 | } |
794 | } |
795 | |
796 | if (all_vf_links_are_disabled) |
797 | continue; |
798 | |
799 | disable_all_vf_links(oct); |
800 | disable_all_vf_links(oct: other_oct); |
801 | all_vf_links_are_disabled = true; |
802 | |
803 | #ifdef CONFIG_MODULE_UNLOAD |
804 | vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); |
805 | vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); |
806 | |
807 | vfs_referencing_pf = hweight64(vfs_mask1); |
808 | vfs_referencing_pf += hweight64(vfs_mask2); |
809 | |
810 | refcount = module_refcount(THIS_MODULE); |
811 | if (refcount >= vfs_referencing_pf) { |
812 | while (vfs_referencing_pf) { |
813 | module_put(THIS_MODULE); |
814 | vfs_referencing_pf--; |
815 | } |
816 | } |
817 | #endif |
818 | } |
819 | |
820 | return 0; |
821 | } |
822 | |
823 | /** |
824 | * liquidio_probe - PCI probe handler |
825 | * @pdev: PCI device structure |
826 | * @ent: unused |
827 | */ |
828 | static int |
829 | liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent) |
830 | { |
831 | struct octeon_device *oct_dev = NULL; |
832 | struct handshake *hs; |
833 | |
834 | oct_dev = octeon_allocate_device(pci_id: pdev->device, |
835 | priv_size: sizeof(struct octeon_device_priv)); |
836 | if (!oct_dev) { |
837 | dev_err(&pdev->dev, "Unable to allocate device\n" ); |
838 | return -ENOMEM; |
839 | } |
840 | |
841 | if (pdev->device == OCTEON_CN23XX_PF_VID) |
842 | oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; |
843 | |
844 | /* Enable PTP for 6XXX Device */ |
845 | if (((pdev->device == OCTEON_CN66XX) || |
846 | (pdev->device == OCTEON_CN68XX))) |
847 | oct_dev->ptp_enable = true; |
848 | else |
849 | oct_dev->ptp_enable = false; |
850 | |
851 | dev_info(&pdev->dev, "Initializing device %x:%x.\n" , |
852 | (u32)pdev->vendor, (u32)pdev->device); |
853 | |
854 | /* Assign octeon_device for this device to the private data area. */ |
855 | pci_set_drvdata(pdev, data: oct_dev); |
856 | |
857 | /* set linux specific device pointer */ |
858 | oct_dev->pci_dev = (void *)pdev; |
859 | |
860 | oct_dev->subsystem_id = pdev->subsystem_vendor | |
861 | (pdev->subsystem_device << 16); |
862 | |
863 | hs = &handshake[oct_dev->octeon_id]; |
864 | init_completion(x: &hs->init); |
865 | init_completion(x: &hs->started); |
866 | hs->pci_dev = pdev; |
867 | |
868 | if (oct_dev->octeon_id == 0) |
869 | /* first LiquidIO NIC is detected */ |
870 | complete(&first_stage); |
871 | |
872 | if (octeon_device_init(oct_dev)) { |
873 | complete(&hs->init); |
874 | liquidio_remove(pdev); |
875 | return -ENOMEM; |
876 | } |
877 | |
878 | if (OCTEON_CN23XX_PF(oct_dev)) { |
879 | u8 bus, device, function; |
880 | |
881 | if (atomic_read(v: oct_dev->adapter_refcount) == 1) { |
882 | /* Each NIC gets one watchdog kernel thread. The first |
883 | * PF (of each NIC) that gets pci_driver->probe()'d |
884 | * creates that thread. |
885 | */ |
886 | bus = pdev->bus->number; |
887 | device = PCI_SLOT(pdev->devfn); |
888 | function = PCI_FUNC(pdev->devfn); |
889 | oct_dev->watchdog_task = kthread_run(liquidio_watchdog, |
890 | oct_dev, |
891 | "liowd/%02hhx:%02hhx.%hhx" , |
892 | bus, device, function); |
893 | if (IS_ERR(ptr: oct_dev->watchdog_task)) { |
894 | oct_dev->watchdog_task = NULL; |
895 | dev_err(&oct_dev->pci_dev->dev, |
896 | "failed to create kernel_thread\n" ); |
897 | liquidio_remove(pdev); |
898 | return -1; |
899 | } |
900 | } |
901 | } |
902 | |
903 | oct_dev->rx_pause = 1; |
904 | oct_dev->tx_pause = 1; |
905 | |
906 | dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n" ); |
907 | |
908 | return 0; |
909 | } |
910 | |
911 | static bool fw_type_is_auto(void) |
912 | { |
913 | return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO, |
914 | sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0; |
915 | } |
916 | |
917 | /** |
918 | * octeon_pci_flr - PCI FLR for each Octeon device. |
919 | * @oct: octeon device |
920 | */ |
921 | static void octeon_pci_flr(struct octeon_device *oct) |
922 | { |
923 | int rc; |
924 | |
925 | pci_save_state(dev: oct->pci_dev); |
926 | |
927 | pci_cfg_access_lock(dev: oct->pci_dev); |
928 | |
929 | /* Quiesce the device completely */ |
930 | pci_write_config_word(dev: oct->pci_dev, PCI_COMMAND, |
931 | PCI_COMMAND_INTX_DISABLE); |
932 | |
933 | rc = __pci_reset_function_locked(dev: oct->pci_dev); |
934 | |
935 | if (rc != 0) |
936 | dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n" , |
937 | rc, oct->pf_num); |
938 | |
939 | pci_cfg_access_unlock(dev: oct->pci_dev); |
940 | |
941 | pci_restore_state(dev: oct->pci_dev); |
942 | } |
943 | |
944 | /** |
945 | * octeon_destroy_resources - Destroy resources associated with octeon device |
946 | * @oct: octeon device |
947 | */ |
948 | static void octeon_destroy_resources(struct octeon_device *oct) |
949 | { |
950 | int i, refcount; |
951 | struct msix_entry *msix_entries; |
952 | struct octeon_device_priv *oct_priv = oct->priv; |
953 | |
954 | struct handshake *hs; |
955 | |
956 | switch (atomic_read(v: &oct->status)) { |
957 | case OCT_DEV_RUNNING: |
958 | case OCT_DEV_CORE_OK: |
959 | |
960 | /* No more instructions will be forwarded. */ |
961 | atomic_set(v: &oct->status, OCT_DEV_IN_RESET); |
962 | |
963 | oct->app_mode = CVM_DRV_INVALID_APP; |
964 | dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n" , |
965 | lio_get_state_string(&oct->status)); |
966 | |
967 | schedule_timeout_uninterruptible(HZ / 10); |
968 | |
969 | fallthrough; |
970 | case OCT_DEV_HOST_OK: |
971 | |
972 | case OCT_DEV_CONSOLE_INIT_DONE: |
973 | /* Remove any consoles */ |
974 | octeon_remove_consoles(oct); |
975 | |
976 | fallthrough; |
977 | case OCT_DEV_IO_QUEUES_DONE: |
978 | if (lio_wait_for_instr_fetch(oct)) |
979 | dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n" ); |
980 | |
981 | if (wait_for_pending_requests(oct)) |
982 | dev_err(&oct->pci_dev->dev, "There were pending requests\n" ); |
983 | |
984 | /* Disable the input and output queues now. No more packets will |
985 | * arrive from Octeon, but we should wait for all packet |
986 | * processing to finish. |
987 | */ |
988 | oct->fn_list.disable_io_queues(oct); |
989 | |
990 | if (lio_wait_for_oq_pkts(oct)) |
991 | dev_err(&oct->pci_dev->dev, "OQ had pending packets\n" ); |
992 | |
993 | /* Force all requests waiting to be fetched by OCTEON to |
994 | * complete. |
995 | */ |
996 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
997 | struct octeon_instr_queue *iq; |
998 | |
999 | if (!(oct->io_qmask.iq & BIT_ULL(i))) |
1000 | continue; |
1001 | iq = oct->instr_queue[i]; |
1002 | |
1003 | if (atomic_read(v: &iq->instr_pending)) { |
1004 | spin_lock_bh(lock: &iq->lock); |
1005 | iq->fill_cnt = 0; |
1006 | iq->octeon_read_index = iq->host_write_index; |
1007 | iq->stats.instr_processed += |
1008 | atomic_read(v: &iq->instr_pending); |
1009 | lio_process_iq_request_list(oct, iq, napi_budget: 0); |
1010 | spin_unlock_bh(lock: &iq->lock); |
1011 | } |
1012 | } |
1013 | |
1014 | lio_process_ordered_list(octeon_dev: oct, force_quit: 1); |
1015 | octeon_free_sc_done_list(oct); |
1016 | octeon_free_sc_zombie_list(oct); |
1017 | |
1018 | fallthrough; |
1019 | case OCT_DEV_INTR_SET_DONE: |
1020 | /* Disable interrupts */ |
1021 | oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); |
1022 | |
1023 | if (oct->msix_on) { |
1024 | msix_entries = (struct msix_entry *)oct->msix_entries; |
1025 | for (i = 0; i < oct->num_msix_irqs - 1; i++) { |
1026 | if (oct->ioq_vector[i].vector) { |
1027 | /* clear the affinity_cpumask */ |
1028 | irq_set_affinity_hint( |
1029 | irq: msix_entries[i].vector, |
1030 | NULL); |
1031 | free_irq(msix_entries[i].vector, |
1032 | &oct->ioq_vector[i]); |
1033 | oct->ioq_vector[i].vector = 0; |
1034 | } |
1035 | } |
1036 | /* non-iov vector's argument is oct struct */ |
1037 | free_irq(msix_entries[i].vector, oct); |
1038 | |
1039 | pci_disable_msix(dev: oct->pci_dev); |
1040 | kfree(objp: oct->msix_entries); |
1041 | oct->msix_entries = NULL; |
1042 | } else { |
1043 | /* Release the interrupt line */ |
1044 | free_irq(oct->pci_dev->irq, oct); |
1045 | |
1046 | if (oct->flags & LIO_FLAG_MSI_ENABLED) |
1047 | pci_disable_msi(dev: oct->pci_dev); |
1048 | } |
1049 | |
1050 | kfree(objp: oct->irq_name_storage); |
1051 | oct->irq_name_storage = NULL; |
1052 | |
1053 | fallthrough; |
1054 | case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: |
1055 | if (OCTEON_CN23XX_PF(oct)) |
1056 | octeon_free_ioq_vector(oct); |
1057 | |
1058 | fallthrough; |
1059 | case OCT_DEV_MBOX_SETUP_DONE: |
1060 | if (OCTEON_CN23XX_PF(oct)) |
1061 | oct->fn_list.free_mbox(oct); |
1062 | |
1063 | fallthrough; |
1064 | case OCT_DEV_IN_RESET: |
1065 | case OCT_DEV_DROQ_INIT_DONE: |
1066 | /* Wait for any pending operations */ |
1067 | mdelay(100); |
1068 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
1069 | if (!(oct->io_qmask.oq & BIT_ULL(i))) |
1070 | continue; |
1071 | octeon_delete_droq(oct_dev: oct, q_no: i); |
1072 | } |
1073 | |
1074 | /* Force any pending handshakes to complete */ |
1075 | for (i = 0; i < MAX_OCTEON_DEVICES; i++) { |
1076 | hs = &handshake[i]; |
1077 | |
1078 | if (hs->pci_dev) { |
1079 | handshake[oct->octeon_id].init_ok = 0; |
1080 | complete(&handshake[oct->octeon_id].init); |
1081 | handshake[oct->octeon_id].started_ok = 0; |
1082 | complete(&handshake[oct->octeon_id].started); |
1083 | } |
1084 | } |
1085 | |
1086 | fallthrough; |
1087 | case OCT_DEV_RESP_LIST_INIT_DONE: |
1088 | octeon_delete_response_list(octeon_dev: oct); |
1089 | |
1090 | fallthrough; |
1091 | case OCT_DEV_INSTR_QUEUE_INIT_DONE: |
1092 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
1093 | if (!(oct->io_qmask.iq & BIT_ULL(i))) |
1094 | continue; |
1095 | octeon_delete_instr_queue(octeon_dev: oct, iq_no: i); |
1096 | } |
1097 | #ifdef CONFIG_PCI_IOV |
1098 | if (oct->sriov_info.sriov_enabled) |
1099 | pci_disable_sriov(dev: oct->pci_dev); |
1100 | #endif |
1101 | fallthrough; |
1102 | case OCT_DEV_SC_BUFF_POOL_INIT_DONE: |
1103 | octeon_free_sc_buffer_pool(oct); |
1104 | |
1105 | fallthrough; |
1106 | case OCT_DEV_DISPATCH_INIT_DONE: |
1107 | octeon_delete_dispatch_list(octeon_dev: oct); |
1108 | cancel_delayed_work_sync(dwork: &oct->nic_poll_work.work); |
1109 | |
1110 | fallthrough; |
1111 | case OCT_DEV_PCI_MAP_DONE: |
1112 | refcount = octeon_deregister_device(oct); |
1113 | |
1114 | /* Soft reset the octeon device before exiting. |
1115 | * However, if fw was loaded from card (i.e. autoboot), |
1116 | * perform an FLR instead. |
1117 | * Implementation note: only soft-reset the device |
1118 | * if it is a CN6XXX OR the LAST CN23XX device. |
1119 | */ |
1120 | if (atomic_read(v: oct->adapter_fw_state) == FW_IS_PRELOADED) |
1121 | octeon_pci_flr(oct); |
1122 | else if (OCTEON_CN6XXX(oct) || !refcount) |
1123 | oct->fn_list.soft_reset(oct); |
1124 | |
1125 | octeon_unmap_pci_barx(oct, baridx: 0); |
1126 | octeon_unmap_pci_barx(oct, baridx: 1); |
1127 | |
1128 | fallthrough; |
1129 | case OCT_DEV_PCI_ENABLE_DONE: |
1130 | /* Disable the device, releasing the PCI INT */ |
1131 | pci_disable_device(dev: oct->pci_dev); |
1132 | |
1133 | fallthrough; |
1134 | case OCT_DEV_BEGIN_STATE: |
1135 | /* Nothing to be done here either */ |
1136 | break; |
1137 | } /* end switch (oct->status) */ |
1138 | |
1139 | tasklet_kill(t: &oct_priv->droq_tasklet); |
1140 | } |
1141 | |
1142 | /** |
1143 | * send_rx_ctrl_cmd - Send Rx control command |
1144 | * @lio: per-network private data |
1145 | * @start_stop: whether to start or stop |
1146 | */ |
1147 | static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) |
1148 | { |
1149 | struct octeon_soft_command *sc; |
1150 | union octnet_cmd *ncmd; |
1151 | struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; |
1152 | int retval; |
1153 | |
1154 | if (oct->props[lio->ifidx].rx_on == start_stop) |
1155 | return 0; |
1156 | |
1157 | sc = (struct octeon_soft_command *) |
1158 | octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, |
1159 | rdatasize: 16, ctxsize: 0); |
1160 | if (!sc) { |
1161 | netif_info(lio, rx_err, lio->netdev, |
1162 | "Failed to allocate octeon_soft_command struct\n" ); |
1163 | return -ENOMEM; |
1164 | } |
1165 | |
1166 | ncmd = (union octnet_cmd *)sc->virtdptr; |
1167 | |
1168 | ncmd->u64 = 0; |
1169 | ncmd->s.cmd = OCTNET_CMD_RX_CTL; |
1170 | ncmd->s.param1 = start_stop; |
1171 | |
1172 | octeon_swap_8B_data(data: (u64 *)ncmd, blocks: (OCTNET_CMD_SIZE >> 3)); |
1173 | |
1174 | sc->iq_no = lio->linfo.txpciq[0].s.q_no; |
1175 | |
1176 | octeon_prepare_soft_command(oct, sc, OPCODE_NIC, |
1177 | OPCODE_NIC_CMD, irh_ossp: 0, ossp0: 0, ossp1: 0); |
1178 | |
1179 | init_completion(x: &sc->complete); |
1180 | sc->sc_status = OCTEON_REQUEST_PENDING; |
1181 | |
1182 | retval = octeon_send_soft_command(oct, sc); |
1183 | if (retval == IQ_SEND_FAILED) { |
1184 | netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n" ); |
1185 | octeon_free_soft_command(oct, sc); |
1186 | } else { |
1187 | /* Sleep on a wait queue till the cond flag indicates that the |
1188 | * response arrived or timed-out. |
1189 | */ |
1190 | retval = wait_for_sc_completion_timeout(oct_dev: oct, sc, timeout: 0); |
1191 | if (retval) |
1192 | return retval; |
1193 | |
1194 | oct->props[lio->ifidx].rx_on = start_stop; |
1195 | WRITE_ONCE(sc->caller_is_done, true); |
1196 | } |
1197 | |
1198 | return retval; |
1199 | } |
1200 | |
1201 | /** |
1202 | * liquidio_destroy_nic_device - Destroy NIC device interface |
1203 | * @oct: octeon device |
1204 | * @ifidx: which interface to destroy |
1205 | * |
1206 | * Cleanup associated with each interface for an Octeon device when NIC |
1207 | * module is being unloaded or if initialization fails during load. |
1208 | */ |
1209 | static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) |
1210 | { |
1211 | struct net_device *netdev = oct->props[ifidx].netdev; |
1212 | struct octeon_device_priv *oct_priv = oct->priv; |
1213 | struct napi_struct *napi, *n; |
1214 | struct lio *lio; |
1215 | |
1216 | if (!netdev) { |
1217 | dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n" , |
1218 | __func__, ifidx); |
1219 | return; |
1220 | } |
1221 | |
1222 | lio = GET_LIO(netdev); |
1223 | |
1224 | dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n" ); |
1225 | |
1226 | if (atomic_read(v: &lio->ifstate) & LIO_IFSTATE_RUNNING) |
1227 | liquidio_stop(netdev); |
1228 | |
1229 | if (oct->props[lio->ifidx].napi_enabled == 1) { |
1230 | list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) |
1231 | napi_disable(n: napi); |
1232 | |
1233 | oct->props[lio->ifidx].napi_enabled = 0; |
1234 | |
1235 | if (OCTEON_CN23XX_PF(oct)) |
1236 | oct->droq[0]->ops.poll_mode = 0; |
1237 | } |
1238 | |
1239 | /* Delete NAPI */ |
1240 | list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) |
1241 | netif_napi_del(napi); |
1242 | |
1243 | tasklet_enable(t: &oct_priv->droq_tasklet); |
1244 | |
1245 | if (atomic_read(v: &lio->ifstate) & LIO_IFSTATE_REGISTERED) |
1246 | unregister_netdev(dev: netdev); |
1247 | |
1248 | cleanup_sync_octeon_time_wq(netdev); |
1249 | cleanup_link_status_change_wq(netdev); |
1250 | |
1251 | cleanup_rx_oom_poll_fn(netdev); |
1252 | |
1253 | lio_delete_glists(lio); |
1254 | |
1255 | free_netdev(dev: netdev); |
1256 | |
1257 | oct->props[ifidx].gmxport = -1; |
1258 | |
1259 | oct->props[ifidx].netdev = NULL; |
1260 | } |
1261 | |
1262 | /** |
1263 | * liquidio_stop_nic_module - Stop complete NIC functionality |
1264 | * @oct: octeon device |
1265 | */ |
1266 | static int liquidio_stop_nic_module(struct octeon_device *oct) |
1267 | { |
1268 | int i, j; |
1269 | struct lio *lio; |
1270 | |
1271 | dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n" ); |
1272 | device_lock(dev: &oct->pci_dev->dev); |
1273 | if (oct->devlink) { |
1274 | devlink_unregister(devlink: oct->devlink); |
1275 | devlink_free(devlink: oct->devlink); |
1276 | oct->devlink = NULL; |
1277 | } |
1278 | device_unlock(dev: &oct->pci_dev->dev); |
1279 | |
1280 | if (!oct->ifcount) { |
1281 | dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n" ); |
1282 | return 1; |
1283 | } |
1284 | |
1285 | spin_lock_bh(lock: &oct->cmd_resp_wqlock); |
1286 | oct->cmd_resp_state = OCT_DRV_OFFLINE; |
1287 | spin_unlock_bh(lock: &oct->cmd_resp_wqlock); |
1288 | |
1289 | lio_vf_rep_destroy(oct); |
1290 | |
1291 | for (i = 0; i < oct->ifcount; i++) { |
1292 | lio = GET_LIO(oct->props[i].netdev); |
1293 | for (j = 0; j < oct->num_oqs; j++) |
1294 | octeon_unregister_droq_ops(oct, |
1295 | q_no: lio->linfo.rxpciq[j].s.q_no); |
1296 | } |
1297 | |
1298 | for (i = 0; i < oct->ifcount; i++) |
1299 | liquidio_destroy_nic_device(oct, ifidx: i); |
1300 | |
1301 | dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n" ); |
1302 | return 0; |
1303 | } |
1304 | |
1305 | /** |
1306 | * liquidio_remove - Cleans up resources at unload time |
1307 | * @pdev: PCI device structure |
1308 | */ |
1309 | static void liquidio_remove(struct pci_dev *pdev) |
1310 | { |
1311 | struct octeon_device *oct_dev = pci_get_drvdata(pdev); |
1312 | |
1313 | dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n" ); |
1314 | |
1315 | if (oct_dev->watchdog_task) |
1316 | kthread_stop(k: oct_dev->watchdog_task); |
1317 | |
1318 | if (!oct_dev->octeon_id && |
1319 | oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) |
1320 | lio_vf_rep_modexit(); |
1321 | |
1322 | if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) |
1323 | liquidio_stop_nic_module(oct: oct_dev); |
1324 | |
1325 | /* Reset the octeon device and cleanup all memory allocated for |
1326 | * the octeon device by driver. |
1327 | */ |
1328 | octeon_destroy_resources(oct: oct_dev); |
1329 | |
1330 | dev_info(&oct_dev->pci_dev->dev, "Device removed\n" ); |
1331 | |
1332 | /* This octeon device has been removed. Update the global |
1333 | * data structure to reflect this. Free the device structure. |
1334 | */ |
1335 | octeon_free_device_mem(oct: oct_dev); |
1336 | } |
1337 | |
1338 | /** |
1339 | * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space |
1340 | * @oct: octeon device |
1341 | */ |
1342 | static int octeon_chip_specific_setup(struct octeon_device *oct) |
1343 | { |
1344 | u32 dev_id, rev_id; |
1345 | int ret = 1; |
1346 | |
1347 | pci_read_config_dword(dev: oct->pci_dev, where: 0, val: &dev_id); |
1348 | pci_read_config_dword(dev: oct->pci_dev, where: 8, val: &rev_id); |
1349 | oct->rev_id = rev_id & 0xff; |
1350 | |
1351 | switch (dev_id) { |
1352 | case OCTEON_CN68XX_PCIID: |
1353 | oct->chip_id = OCTEON_CN68XX; |
1354 | ret = lio_setup_cn68xx_octeon_device(oct); |
1355 | break; |
1356 | |
1357 | case OCTEON_CN66XX_PCIID: |
1358 | oct->chip_id = OCTEON_CN66XX; |
1359 | ret = lio_setup_cn66xx_octeon_device(oct); |
1360 | break; |
1361 | |
1362 | case OCTEON_CN23XX_PCIID_PF: |
1363 | oct->chip_id = OCTEON_CN23XX_PF_VID; |
1364 | ret = setup_cn23xx_octeon_pf_device(oct); |
1365 | if (ret) |
1366 | break; |
1367 | #ifdef CONFIG_PCI_IOV |
1368 | if (!ret) |
1369 | pci_sriov_set_totalvfs(dev: oct->pci_dev, |
1370 | numvfs: oct->sriov_info.max_vfs); |
1371 | #endif |
1372 | break; |
1373 | |
1374 | default: |
1375 | dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n" , |
1376 | dev_id); |
1377 | } |
1378 | |
1379 | return ret; |
1380 | } |
1381 | |
1382 | /** |
1383 | * octeon_pci_os_setup - PCI initialization for each Octeon device. |
1384 | * @oct: octeon device |
1385 | */ |
1386 | static int octeon_pci_os_setup(struct octeon_device *oct) |
1387 | { |
1388 | /* setup PCI stuff first */ |
1389 | if (pci_enable_device(dev: oct->pci_dev)) { |
1390 | dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n" ); |
1391 | return 1; |
1392 | } |
1393 | |
1394 | if (dma_set_mask_and_coherent(dev: &oct->pci_dev->dev, DMA_BIT_MASK(64))) { |
1395 | dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n" ); |
1396 | pci_disable_device(dev: oct->pci_dev); |
1397 | return 1; |
1398 | } |
1399 | |
1400 | /* Enable PCI DMA Master. */ |
1401 | pci_set_master(dev: oct->pci_dev); |
1402 | |
1403 | return 0; |
1404 | } |
1405 | |
1406 | /** |
1407 | * free_netbuf - Unmap and free network buffer |
1408 | * @buf: buffer |
1409 | */ |
1410 | static void free_netbuf(void *buf) |
1411 | { |
1412 | struct sk_buff *skb; |
1413 | struct octnet_buf_free_info *finfo; |
1414 | struct lio *lio; |
1415 | |
1416 | finfo = (struct octnet_buf_free_info *)buf; |
1417 | skb = finfo->skb; |
1418 | lio = finfo->lio; |
1419 | |
1420 | dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, |
1421 | DMA_TO_DEVICE); |
1422 | |
1423 | tx_buffer_free(buffer: skb); |
1424 | } |
1425 | |
1426 | /** |
1427 | * free_netsgbuf - Unmap and free gather buffer |
1428 | * @buf: buffer |
1429 | */ |
1430 | static void free_netsgbuf(void *buf) |
1431 | { |
1432 | struct octnet_buf_free_info *finfo; |
1433 | struct sk_buff *skb; |
1434 | struct lio *lio; |
1435 | struct octnic_gather *g; |
1436 | int i, frags, iq; |
1437 | |
1438 | finfo = (struct octnet_buf_free_info *)buf; |
1439 | skb = finfo->skb; |
1440 | lio = finfo->lio; |
1441 | g = finfo->g; |
1442 | frags = skb_shinfo(skb)->nr_frags; |
1443 | |
1444 | dma_unmap_single(&lio->oct_dev->pci_dev->dev, |
1445 | g->sg[0].ptr[0], (skb->len - skb->data_len), |
1446 | DMA_TO_DEVICE); |
1447 | |
1448 | i = 1; |
1449 | while (frags--) { |
1450 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; |
1451 | |
1452 | dma_unmap_page(&lio->oct_dev->pci_dev->dev, |
1453 | g->sg[(i >> 2)].ptr[(i & 3)], |
1454 | skb_frag_size(frag), DMA_TO_DEVICE); |
1455 | i++; |
1456 | } |
1457 | |
1458 | iq = skb_iq(oct: lio->oct_dev, skb); |
1459 | spin_lock(lock: &lio->glist_lock[iq]); |
1460 | list_add_tail(new: &g->list, head: &lio->glist[iq]); |
1461 | spin_unlock(lock: &lio->glist_lock[iq]); |
1462 | |
1463 | tx_buffer_free(buffer: skb); |
1464 | } |
1465 | |
1466 | /** |
1467 | * free_netsgbuf_with_resp - Unmap and free gather buffer with response |
1468 | * @buf: buffer |
1469 | */ |
1470 | static void free_netsgbuf_with_resp(void *buf) |
1471 | { |
1472 | struct octeon_soft_command *sc; |
1473 | struct octnet_buf_free_info *finfo; |
1474 | struct sk_buff *skb; |
1475 | struct lio *lio; |
1476 | struct octnic_gather *g; |
1477 | int i, frags, iq; |
1478 | |
1479 | sc = (struct octeon_soft_command *)buf; |
1480 | skb = (struct sk_buff *)sc->callback_arg; |
1481 | finfo = (struct octnet_buf_free_info *)&skb->cb; |
1482 | |
1483 | lio = finfo->lio; |
1484 | g = finfo->g; |
1485 | frags = skb_shinfo(skb)->nr_frags; |
1486 | |
1487 | dma_unmap_single(&lio->oct_dev->pci_dev->dev, |
1488 | g->sg[0].ptr[0], (skb->len - skb->data_len), |
1489 | DMA_TO_DEVICE); |
1490 | |
1491 | i = 1; |
1492 | while (frags--) { |
1493 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; |
1494 | |
1495 | dma_unmap_page(&lio->oct_dev->pci_dev->dev, |
1496 | g->sg[(i >> 2)].ptr[(i & 3)], |
1497 | skb_frag_size(frag), DMA_TO_DEVICE); |
1498 | i++; |
1499 | } |
1500 | |
1501 | iq = skb_iq(oct: lio->oct_dev, skb); |
1502 | |
1503 | spin_lock(lock: &lio->glist_lock[iq]); |
1504 | list_add_tail(new: &g->list, head: &lio->glist[iq]); |
1505 | spin_unlock(lock: &lio->glist_lock[iq]); |
1506 | |
1507 | /* Don't free the skb yet */ |
1508 | } |
1509 | |
1510 | /** |
1511 | * liquidio_ptp_adjfine - Adjust ptp frequency |
1512 | * @ptp: PTP clock info |
1513 | * @scaled_ppm: how much to adjust by, in scaled parts-per-million |
1514 | * |
1515 | * Scaled parts per million is ppm with a 16-bit binary fractional field. |
1516 | */ |
1517 | static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) |
1518 | { |
1519 | struct lio *lio = container_of(ptp, struct lio, ptp_info); |
1520 | struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; |
1521 | s32 ppb = scaled_ppm_to_ppb(ppm: scaled_ppm); |
1522 | u64 comp, delta; |
1523 | unsigned long flags; |
1524 | bool neg_adj = false; |
1525 | |
1526 | if (ppb < 0) { |
1527 | neg_adj = true; |
1528 | ppb = -ppb; |
1529 | } |
1530 | |
1531 | /* The hardware adds the clock compensation value to the |
1532 | * PTP clock on every coprocessor clock cycle, so we |
1533 | * compute the delta in terms of coprocessor clocks. |
1534 | */ |
1535 | delta = (u64)ppb << 32; |
1536 | do_div(delta, oct->coproc_clock_rate); |
1537 | |
1538 | spin_lock_irqsave(&lio->ptp_lock, flags); |
1539 | comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); |
1540 | if (neg_adj) |
1541 | comp -= delta; |
1542 | else |
1543 | comp += delta; |
1544 | lio_pci_writeq(oct, val: comp, CN6XXX_MIO_PTP_CLOCK_COMP); |
1545 | spin_unlock_irqrestore(lock: &lio->ptp_lock, flags); |
1546 | |
1547 | return 0; |
1548 | } |
1549 | |
1550 | /** |
1551 | * liquidio_ptp_adjtime - Adjust ptp time |
1552 | * @ptp: PTP clock info |
1553 | * @delta: how much to adjust by, in nanosecs |
1554 | */ |
1555 | static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
1556 | { |
1557 | unsigned long flags; |
1558 | struct lio *lio = container_of(ptp, struct lio, ptp_info); |
1559 | |
1560 | spin_lock_irqsave(&lio->ptp_lock, flags); |
1561 | lio->ptp_adjust += delta; |
1562 | spin_unlock_irqrestore(lock: &lio->ptp_lock, flags); |
1563 | |
1564 | return 0; |
1565 | } |
1566 | |
1567 | /** |
1568 | * liquidio_ptp_gettime - Get hardware clock time, including any adjustment |
1569 | * @ptp: PTP clock info |
1570 | * @ts: timespec |
1571 | */ |
1572 | static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, |
1573 | struct timespec64 *ts) |
1574 | { |
1575 | u64 ns; |
1576 | unsigned long flags; |
1577 | struct lio *lio = container_of(ptp, struct lio, ptp_info); |
1578 | struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; |
1579 | |
1580 | spin_lock_irqsave(&lio->ptp_lock, flags); |
1581 | ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); |
1582 | ns += lio->ptp_adjust; |
1583 | spin_unlock_irqrestore(lock: &lio->ptp_lock, flags); |
1584 | |
1585 | *ts = ns_to_timespec64(nsec: ns); |
1586 | |
1587 | return 0; |
1588 | } |
1589 | |
1590 | /** |
1591 | * liquidio_ptp_settime - Set hardware clock time. Reset adjustment |
1592 | * @ptp: PTP clock info |
1593 | * @ts: timespec |
1594 | */ |
1595 | static int liquidio_ptp_settime(struct ptp_clock_info *ptp, |
1596 | const struct timespec64 *ts) |
1597 | { |
1598 | u64 ns; |
1599 | unsigned long flags; |
1600 | struct lio *lio = container_of(ptp, struct lio, ptp_info); |
1601 | struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; |
1602 | |
1603 | ns = timespec64_to_ns(ts); |
1604 | |
1605 | spin_lock_irqsave(&lio->ptp_lock, flags); |
1606 | lio_pci_writeq(oct, val: ns, CN6XXX_MIO_PTP_CLOCK_HI); |
1607 | lio->ptp_adjust = 0; |
1608 | spin_unlock_irqrestore(lock: &lio->ptp_lock, flags); |
1609 | |
1610 | return 0; |
1611 | } |
1612 | |
1613 | /** |
1614 | * liquidio_ptp_enable - Check if PTP is enabled |
1615 | * @ptp: PTP clock info |
1616 | * @rq: request |
1617 | * @on: is it on |
1618 | */ |
1619 | static int |
1620 | liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp, |
1621 | struct ptp_clock_request __maybe_unused *rq, |
1622 | int __maybe_unused on) |
1623 | { |
1624 | return -EOPNOTSUPP; |
1625 | } |
1626 | |
1627 | /** |
1628 | * oct_ptp_open - Open PTP clock source |
1629 | * @netdev: network device |
1630 | */ |
1631 | static void oct_ptp_open(struct net_device *netdev) |
1632 | { |
1633 | struct lio *lio = GET_LIO(netdev); |
1634 | struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; |
1635 | |
1636 | spin_lock_init(&lio->ptp_lock); |
1637 | |
1638 | snprintf(buf: lio->ptp_info.name, size: 16, fmt: "%s" , netdev->name); |
1639 | lio->ptp_info.owner = THIS_MODULE; |
1640 | lio->ptp_info.max_adj = 250000000; |
1641 | lio->ptp_info.n_alarm = 0; |
1642 | lio->ptp_info.n_ext_ts = 0; |
1643 | lio->ptp_info.n_per_out = 0; |
1644 | lio->ptp_info.pps = 0; |
1645 | lio->ptp_info.adjfine = liquidio_ptp_adjfine; |
1646 | lio->ptp_info.adjtime = liquidio_ptp_adjtime; |
1647 | lio->ptp_info.gettime64 = liquidio_ptp_gettime; |
1648 | lio->ptp_info.settime64 = liquidio_ptp_settime; |
1649 | lio->ptp_info.enable = liquidio_ptp_enable; |
1650 | |
1651 | lio->ptp_adjust = 0; |
1652 | |
1653 | lio->ptp_clock = ptp_clock_register(info: &lio->ptp_info, |
1654 | parent: &oct->pci_dev->dev); |
1655 | |
1656 | if (IS_ERR(ptr: lio->ptp_clock)) |
1657 | lio->ptp_clock = NULL; |
1658 | } |
1659 | |
1660 | /** |
1661 | * liquidio_ptp_init - Init PTP clock |
1662 | * @oct: octeon device |
1663 | */ |
1664 | static void liquidio_ptp_init(struct octeon_device *oct) |
1665 | { |
1666 | u64 clock_comp, cfg; |
1667 | |
1668 | clock_comp = (u64)NSEC_PER_SEC << 32; |
1669 | do_div(clock_comp, oct->coproc_clock_rate); |
1670 | lio_pci_writeq(oct, val: clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); |
1671 | |
1672 | /* Enable */ |
1673 | cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); |
1674 | lio_pci_writeq(oct, val: cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); |
1675 | } |
1676 | |
1677 | /** |
1678 | * load_firmware - Load firmware to device |
1679 | * @oct: octeon device |
1680 | * |
1681 | * Maps device to firmware filename, requests firmware, and downloads it |
1682 | */ |
1683 | static int load_firmware(struct octeon_device *oct) |
1684 | { |
1685 | int ret = 0; |
1686 | const struct firmware *fw; |
1687 | char fw_name[LIO_MAX_FW_FILENAME_LEN]; |
1688 | char *tmp_fw_type; |
1689 | |
1690 | if (fw_type_is_auto()) { |
1691 | tmp_fw_type = LIO_FW_NAME_TYPE_NIC; |
1692 | strscpy_pad(fw_type, tmp_fw_type, sizeof(fw_type)); |
1693 | } else { |
1694 | tmp_fw_type = fw_type; |
1695 | } |
1696 | |
1697 | sprintf(buf: fw_name, fmt: "%s%s%s_%s%s" , LIO_FW_DIR, LIO_FW_BASE_NAME, |
1698 | octeon_get_conf(oct)->card_name, tmp_fw_type, |
1699 | LIO_FW_NAME_SUFFIX); |
1700 | |
1701 | ret = request_firmware(fw: &fw, name: fw_name, device: &oct->pci_dev->dev); |
1702 | if (ret) { |
1703 | dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n" , |
1704 | fw_name); |
1705 | release_firmware(fw); |
1706 | return ret; |
1707 | } |
1708 | |
1709 | ret = octeon_download_firmware(oct, data: fw->data, size: fw->size); |
1710 | |
1711 | release_firmware(fw); |
1712 | |
1713 | return ret; |
1714 | } |
1715 | |
1716 | /** |
1717 | * octnet_poll_check_txq_status - Poll routine for checking transmit queue status |
1718 | * @work: work_struct data structure |
1719 | */ |
1720 | static void octnet_poll_check_txq_status(struct work_struct *work) |
1721 | { |
1722 | struct cavium_wk *wk = (struct cavium_wk *)work; |
1723 | struct lio *lio = (struct lio *)wk->ctxptr; |
1724 | |
1725 | if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) |
1726 | return; |
1727 | |
1728 | check_txq_status(lio); |
1729 | queue_delayed_work(wq: lio->txq_status_wq.wq, |
1730 | dwork: &lio->txq_status_wq.wk.work, delay: msecs_to_jiffies(m: 1)); |
1731 | } |
1732 | |
1733 | /** |
1734 | * setup_tx_poll_fn - Sets up the txq poll check |
1735 | * @netdev: network device |
1736 | */ |
1737 | static inline int setup_tx_poll_fn(struct net_device *netdev) |
1738 | { |
1739 | struct lio *lio = GET_LIO(netdev); |
1740 | struct octeon_device *oct = lio->oct_dev; |
1741 | |
1742 | lio->txq_status_wq.wq = alloc_workqueue(fmt: "txq-status" , |
1743 | flags: WQ_MEM_RECLAIM, max_active: 0); |
1744 | if (!lio->txq_status_wq.wq) { |
1745 | dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n" ); |
1746 | return -1; |
1747 | } |
1748 | INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, |
1749 | octnet_poll_check_txq_status); |
1750 | lio->txq_status_wq.wk.ctxptr = lio; |
1751 | queue_delayed_work(wq: lio->txq_status_wq.wq, |
1752 | dwork: &lio->txq_status_wq.wk.work, delay: msecs_to_jiffies(m: 1)); |
1753 | return 0; |
1754 | } |
1755 | |
1756 | static inline void cleanup_tx_poll_fn(struct net_device *netdev) |
1757 | { |
1758 | struct lio *lio = GET_LIO(netdev); |
1759 | |
1760 | if (lio->txq_status_wq.wq) { |
1761 | cancel_delayed_work_sync(dwork: &lio->txq_status_wq.wk.work); |
1762 | destroy_workqueue(wq: lio->txq_status_wq.wq); |
1763 | } |
1764 | } |
1765 | |
1766 | /** |
1767 | * liquidio_open - Net device open for LiquidIO |
1768 | * @netdev: network device |
1769 | */ |
1770 | static int liquidio_open(struct net_device *netdev) |
1771 | { |
1772 | struct lio *lio = GET_LIO(netdev); |
1773 | struct octeon_device *oct = lio->oct_dev; |
1774 | struct octeon_device_priv *oct_priv = oct->priv; |
1775 | struct napi_struct *napi, *n; |
1776 | int ret = 0; |
1777 | |
1778 | if (oct->props[lio->ifidx].napi_enabled == 0) { |
1779 | tasklet_disable(t: &oct_priv->droq_tasklet); |
1780 | |
1781 | list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) |
1782 | napi_enable(n: napi); |
1783 | |
1784 | oct->props[lio->ifidx].napi_enabled = 1; |
1785 | |
1786 | if (OCTEON_CN23XX_PF(oct)) |
1787 | oct->droq[0]->ops.poll_mode = 1; |
1788 | } |
1789 | |
1790 | if (oct->ptp_enable) |
1791 | oct_ptp_open(netdev); |
1792 | |
1793 | ifstate_set(lio, LIO_IFSTATE_RUNNING); |
1794 | |
1795 | if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { |
1796 | ret = setup_tx_poll_fn(netdev); |
1797 | if (ret) |
1798 | goto err_poll; |
1799 | } |
1800 | |
1801 | netif_tx_start_all_queues(dev: netdev); |
1802 | |
1803 | /* Ready for link status updates */ |
1804 | lio->intf_open = 1; |
1805 | |
1806 | netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n" ); |
1807 | |
1808 | /* tell Octeon to start forwarding packets to host */ |
1809 | ret = send_rx_ctrl_cmd(lio, start_stop: 1); |
1810 | if (ret) |
1811 | goto err_rx_ctrl; |
1812 | |
1813 | /* start periodical statistics fetch */ |
1814 | INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); |
1815 | lio->stats_wk.ctxptr = lio; |
1816 | schedule_delayed_work(dwork: &lio->stats_wk.work, delay: msecs_to_jiffies |
1817 | (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); |
1818 | |
1819 | dev_info(&oct->pci_dev->dev, "%s interface is opened\n" , |
1820 | netdev->name); |
1821 | |
1822 | return 0; |
1823 | |
1824 | err_rx_ctrl: |
1825 | if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) |
1826 | cleanup_tx_poll_fn(netdev); |
1827 | err_poll: |
1828 | if (lio->ptp_clock) { |
1829 | ptp_clock_unregister(ptp: lio->ptp_clock); |
1830 | lio->ptp_clock = NULL; |
1831 | } |
1832 | |
1833 | if (oct->props[lio->ifidx].napi_enabled == 1) { |
1834 | list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) |
1835 | napi_disable(n: napi); |
1836 | |
1837 | oct->props[lio->ifidx].napi_enabled = 0; |
1838 | |
1839 | if (OCTEON_CN23XX_PF(oct)) |
1840 | oct->droq[0]->ops.poll_mode = 0; |
1841 | } |
1842 | |
1843 | return ret; |
1844 | } |
1845 | |
1846 | /** |
1847 | * liquidio_stop - Net device stop for LiquidIO |
1848 | * @netdev: network device |
1849 | */ |
1850 | static int liquidio_stop(struct net_device *netdev) |
1851 | { |
1852 | struct lio *lio = GET_LIO(netdev); |
1853 | struct octeon_device *oct = lio->oct_dev; |
1854 | struct octeon_device_priv *oct_priv = oct->priv; |
1855 | struct napi_struct *napi, *n; |
1856 | int ret = 0; |
1857 | |
1858 | ifstate_reset(lio, LIO_IFSTATE_RUNNING); |
1859 | |
1860 | /* Stop any link updates */ |
1861 | lio->intf_open = 0; |
1862 | |
1863 | stop_txqs(netdev); |
1864 | |
1865 | /* Inform that netif carrier is down */ |
1866 | netif_carrier_off(dev: netdev); |
1867 | netif_tx_disable(dev: netdev); |
1868 | |
1869 | lio->linfo.link.s.link_up = 0; |
1870 | lio->link_changes++; |
1871 | |
1872 | /* Tell Octeon that nic interface is down. */ |
1873 | ret = send_rx_ctrl_cmd(lio, start_stop: 0); |
1874 | if (ret) |
1875 | return ret; |
1876 | |
1877 | if (OCTEON_CN23XX_PF(oct)) { |
1878 | if (!oct->msix_on) |
1879 | cleanup_tx_poll_fn(netdev); |
1880 | } else { |
1881 | cleanup_tx_poll_fn(netdev); |
1882 | } |
1883 | |
1884 | cancel_delayed_work_sync(dwork: &lio->stats_wk.work); |
1885 | |
1886 | if (lio->ptp_clock) { |
1887 | ptp_clock_unregister(ptp: lio->ptp_clock); |
1888 | lio->ptp_clock = NULL; |
1889 | } |
1890 | |
1891 | /* Wait for any pending Rx descriptors */ |
1892 | if (lio_wait_for_clean_oq(oct)) |
1893 | netif_info(lio, rx_err, lio->netdev, |
1894 | "Proceeding with stop interface after partial RX desc processing\n" ); |
1895 | |
1896 | if (oct->props[lio->ifidx].napi_enabled == 1) { |
1897 | list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) |
1898 | napi_disable(n: napi); |
1899 | |
1900 | oct->props[lio->ifidx].napi_enabled = 0; |
1901 | |
1902 | if (OCTEON_CN23XX_PF(oct)) |
1903 | oct->droq[0]->ops.poll_mode = 0; |
1904 | |
1905 | tasklet_enable(t: &oct_priv->droq_tasklet); |
1906 | } |
1907 | |
1908 | dev_info(&oct->pci_dev->dev, "%s interface is stopped\n" , netdev->name); |
1909 | |
1910 | return ret; |
1911 | } |
1912 | |
1913 | /** |
1914 | * get_new_flags - Converts a mask based on net device flags |
1915 | * @netdev: network device |
1916 | * |
1917 | * This routine generates a octnet_ifflags mask from the net device flags |
1918 | * received from the OS. |
1919 | */ |
1920 | static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) |
1921 | { |
1922 | enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; |
1923 | |
1924 | if (netdev->flags & IFF_PROMISC) |
1925 | f |= OCTNET_IFFLAG_PROMISC; |
1926 | |
1927 | if (netdev->flags & IFF_ALLMULTI) |
1928 | f |= OCTNET_IFFLAG_ALLMULTI; |
1929 | |
1930 | if (netdev->flags & IFF_MULTICAST) { |
1931 | f |= OCTNET_IFFLAG_MULTICAST; |
1932 | |
1933 | /* Accept all multicast addresses if there are more than we |
1934 | * can handle |
1935 | */ |
1936 | if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) |
1937 | f |= OCTNET_IFFLAG_ALLMULTI; |
1938 | } |
1939 | |
1940 | if (netdev->flags & IFF_BROADCAST) |
1941 | f |= OCTNET_IFFLAG_BROADCAST; |
1942 | |
1943 | return f; |
1944 | } |
1945 | |
1946 | /** |
1947 | * liquidio_set_mcast_list - Net device set_multicast_list |
1948 | * @netdev: network device |
1949 | */ |
1950 | static void liquidio_set_mcast_list(struct net_device *netdev) |
1951 | { |
1952 | struct lio *lio = GET_LIO(netdev); |
1953 | struct octeon_device *oct = lio->oct_dev; |
1954 | struct octnic_ctrl_pkt nctrl; |
1955 | struct netdev_hw_addr *ha; |
1956 | u64 *mc; |
1957 | int ret; |
1958 | int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); |
1959 | |
1960 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
1961 | |
1962 | /* Create a ctrl pkt command to be sent to core app. */ |
1963 | nctrl.ncmd.u64 = 0; |
1964 | nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; |
1965 | nctrl.ncmd.s.param1 = get_new_flags(netdev); |
1966 | nctrl.ncmd.s.param2 = mc_count; |
1967 | nctrl.ncmd.s.more = mc_count; |
1968 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
1969 | nctrl.netpndev = (u64)netdev; |
1970 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
1971 | |
1972 | /* copy all the addresses into the udd */ |
1973 | mc = &nctrl.udd[0]; |
1974 | netdev_for_each_mc_addr(ha, netdev) { |
1975 | *mc = 0; |
1976 | memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); |
1977 | /* no need to swap bytes */ |
1978 | |
1979 | if (++mc > &nctrl.udd[mc_count]) |
1980 | break; |
1981 | } |
1982 | |
1983 | /* Apparently, any activity in this call from the kernel has to |
1984 | * be atomic. So we won't wait for response. |
1985 | */ |
1986 | |
1987 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
1988 | if (ret) { |
1989 | dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n" , |
1990 | ret); |
1991 | } |
1992 | } |
1993 | |
1994 | /** |
1995 | * liquidio_set_mac - Net device set_mac_address |
1996 | * @netdev: network device |
1997 | * @p: pointer to sockaddr |
1998 | */ |
1999 | static int liquidio_set_mac(struct net_device *netdev, void *p) |
2000 | { |
2001 | int ret = 0; |
2002 | struct lio *lio = GET_LIO(netdev); |
2003 | struct octeon_device *oct = lio->oct_dev; |
2004 | struct sockaddr *addr = (struct sockaddr *)p; |
2005 | struct octnic_ctrl_pkt nctrl; |
2006 | |
2007 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
2008 | return -EADDRNOTAVAIL; |
2009 | |
2010 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
2011 | |
2012 | nctrl.ncmd.u64 = 0; |
2013 | nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; |
2014 | nctrl.ncmd.s.param1 = 0; |
2015 | nctrl.ncmd.s.more = 1; |
2016 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
2017 | nctrl.netpndev = (u64)netdev; |
2018 | |
2019 | nctrl.udd[0] = 0; |
2020 | /* The MAC Address is presented in network byte order. */ |
2021 | memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); |
2022 | |
2023 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
2024 | if (ret < 0) { |
2025 | dev_err(&oct->pci_dev->dev, "MAC Address change failed\n" ); |
2026 | return -ENOMEM; |
2027 | } |
2028 | |
2029 | if (nctrl.sc_status) { |
2030 | dev_err(&oct->pci_dev->dev, |
2031 | "%s: MAC Address change failed. sc return=%x\n" , |
2032 | __func__, nctrl.sc_status); |
2033 | return -EIO; |
2034 | } |
2035 | |
2036 | eth_hw_addr_set(dev: netdev, addr: addr->sa_data); |
2037 | memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); |
2038 | |
2039 | return 0; |
2040 | } |
2041 | |
2042 | static void |
2043 | liquidio_get_stats64(struct net_device *netdev, |
2044 | struct rtnl_link_stats64 *lstats) |
2045 | { |
2046 | struct lio *lio = GET_LIO(netdev); |
2047 | struct octeon_device *oct; |
2048 | u64 pkts = 0, drop = 0, bytes = 0; |
2049 | struct oct_droq_stats *oq_stats; |
2050 | struct oct_iq_stats *iq_stats; |
2051 | int i, iq_no, oq_no; |
2052 | |
2053 | oct = lio->oct_dev; |
2054 | |
2055 | if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) |
2056 | return; |
2057 | |
2058 | for (i = 0; i < oct->num_iqs; i++) { |
2059 | iq_no = lio->linfo.txpciq[i].s.q_no; |
2060 | iq_stats = &oct->instr_queue[iq_no]->stats; |
2061 | pkts += iq_stats->tx_done; |
2062 | drop += iq_stats->tx_dropped; |
2063 | bytes += iq_stats->tx_tot_bytes; |
2064 | } |
2065 | |
2066 | lstats->tx_packets = pkts; |
2067 | lstats->tx_bytes = bytes; |
2068 | lstats->tx_dropped = drop; |
2069 | |
2070 | pkts = 0; |
2071 | drop = 0; |
2072 | bytes = 0; |
2073 | |
2074 | for (i = 0; i < oct->num_oqs; i++) { |
2075 | oq_no = lio->linfo.rxpciq[i].s.q_no; |
2076 | oq_stats = &oct->droq[oq_no]->stats; |
2077 | pkts += oq_stats->rx_pkts_received; |
2078 | drop += (oq_stats->rx_dropped + |
2079 | oq_stats->dropped_nodispatch + |
2080 | oq_stats->dropped_toomany + |
2081 | oq_stats->dropped_nomem); |
2082 | bytes += oq_stats->rx_bytes_received; |
2083 | } |
2084 | |
2085 | lstats->rx_bytes = bytes; |
2086 | lstats->rx_packets = pkts; |
2087 | lstats->rx_dropped = drop; |
2088 | |
2089 | lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; |
2090 | lstats->collisions = oct->link_stats.fromhost.total_collisions; |
2091 | |
2092 | /* detailed rx_errors: */ |
2093 | lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; |
2094 | /* recved pkt with crc error */ |
2095 | lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; |
2096 | /* recv'd frame alignment error */ |
2097 | lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; |
2098 | /* recv'r fifo overrun */ |
2099 | lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err; |
2100 | |
2101 | lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + |
2102 | lstats->rx_frame_errors + lstats->rx_fifo_errors; |
2103 | |
2104 | /* detailed tx_errors */ |
2105 | lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; |
2106 | lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; |
2107 | lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err; |
2108 | |
2109 | lstats->tx_errors = lstats->tx_aborted_errors + |
2110 | lstats->tx_carrier_errors + |
2111 | lstats->tx_fifo_errors; |
2112 | } |
2113 | |
2114 | /** |
2115 | * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl |
2116 | * @netdev: network device |
2117 | * @ifr: interface request |
2118 | */ |
2119 | static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) |
2120 | { |
2121 | struct hwtstamp_config conf; |
2122 | struct lio *lio = GET_LIO(netdev); |
2123 | |
2124 | if (copy_from_user(to: &conf, from: ifr->ifr_data, n: sizeof(conf))) |
2125 | return -EFAULT; |
2126 | |
2127 | switch (conf.tx_type) { |
2128 | case HWTSTAMP_TX_ON: |
2129 | case HWTSTAMP_TX_OFF: |
2130 | break; |
2131 | default: |
2132 | return -ERANGE; |
2133 | } |
2134 | |
2135 | switch (conf.rx_filter) { |
2136 | case HWTSTAMP_FILTER_NONE: |
2137 | break; |
2138 | case HWTSTAMP_FILTER_ALL: |
2139 | case HWTSTAMP_FILTER_SOME: |
2140 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
2141 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
2142 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
2143 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
2144 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
2145 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
2146 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: |
2147 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: |
2148 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: |
2149 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
2150 | case HWTSTAMP_FILTER_PTP_V2_SYNC: |
2151 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
2152 | case HWTSTAMP_FILTER_NTP_ALL: |
2153 | conf.rx_filter = HWTSTAMP_FILTER_ALL; |
2154 | break; |
2155 | default: |
2156 | return -ERANGE; |
2157 | } |
2158 | |
2159 | if (conf.rx_filter == HWTSTAMP_FILTER_ALL) |
2160 | ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); |
2161 | |
2162 | else |
2163 | ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); |
2164 | |
2165 | return copy_to_user(to: ifr->ifr_data, from: &conf, n: sizeof(conf)) ? -EFAULT : 0; |
2166 | } |
2167 | |
2168 | /** |
2169 | * liquidio_ioctl - ioctl handler |
2170 | * @netdev: network device |
2171 | * @ifr: interface request |
2172 | * @cmd: command |
2173 | */ |
2174 | static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
2175 | { |
2176 | struct lio *lio = GET_LIO(netdev); |
2177 | |
2178 | switch (cmd) { |
2179 | case SIOCSHWTSTAMP: |
2180 | if (lio->oct_dev->ptp_enable) |
2181 | return hwtstamp_ioctl(netdev, ifr); |
2182 | fallthrough; |
2183 | default: |
2184 | return -EOPNOTSUPP; |
2185 | } |
2186 | } |
2187 | |
2188 | /** |
2189 | * handle_timestamp - handle a Tx timestamp response |
2190 | * @oct: octeon device |
2191 | * @status: response status |
2192 | * @buf: pointer to skb |
2193 | */ |
2194 | static void handle_timestamp(struct octeon_device *oct, |
2195 | u32 status, |
2196 | void *buf) |
2197 | { |
2198 | struct octnet_buf_free_info *finfo; |
2199 | struct octeon_soft_command *sc; |
2200 | struct oct_timestamp_resp *resp; |
2201 | struct lio *lio; |
2202 | struct sk_buff *skb = (struct sk_buff *)buf; |
2203 | |
2204 | finfo = (struct octnet_buf_free_info *)skb->cb; |
2205 | lio = finfo->lio; |
2206 | sc = finfo->sc; |
2207 | oct = lio->oct_dev; |
2208 | resp = (struct oct_timestamp_resp *)sc->virtrptr; |
2209 | |
2210 | if (status != OCTEON_REQUEST_DONE) { |
2211 | dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n" , |
2212 | CVM_CAST64(status)); |
2213 | resp->timestamp = 0; |
2214 | } |
2215 | |
2216 | octeon_swap_8B_data(data: &resp->timestamp, blocks: 1); |
2217 | |
2218 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { |
2219 | struct skb_shared_hwtstamps ts; |
2220 | u64 ns = resp->timestamp; |
2221 | |
2222 | netif_info(lio, tx_done, lio->netdev, |
2223 | "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n" , |
2224 | skb, (unsigned long long)ns); |
2225 | ts.hwtstamp = ns_to_ktime(ns: ns + lio->ptp_adjust); |
2226 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &ts); |
2227 | } |
2228 | |
2229 | octeon_free_soft_command(oct, sc); |
2230 | tx_buffer_free(buffer: skb); |
2231 | } |
2232 | |
2233 | /** |
2234 | * send_nic_timestamp_pkt - Send a data packet that will be timestamped |
2235 | * @oct: octeon device |
2236 | * @ndata: pointer to network data |
2237 | * @finfo: pointer to private network data |
2238 | * @xmit_more: more is coming |
2239 | */ |
2240 | static inline int send_nic_timestamp_pkt(struct octeon_device *oct, |
2241 | struct octnic_data_pkt *ndata, |
2242 | struct octnet_buf_free_info *finfo, |
2243 | int xmit_more) |
2244 | { |
2245 | int retval; |
2246 | struct octeon_soft_command *sc; |
2247 | struct lio *lio; |
2248 | int ring_doorbell; |
2249 | u32 len; |
2250 | |
2251 | lio = finfo->lio; |
2252 | |
2253 | sc = octeon_alloc_soft_command_resp(oct, cmd: &ndata->cmd, |
2254 | rdatasize: sizeof(struct oct_timestamp_resp)); |
2255 | finfo->sc = sc; |
2256 | |
2257 | if (!sc) { |
2258 | dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n" ); |
2259 | return IQ_SEND_FAILED; |
2260 | } |
2261 | |
2262 | if (ndata->reqtype == REQTYPE_NORESP_NET) |
2263 | ndata->reqtype = REQTYPE_RESP_NET; |
2264 | else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) |
2265 | ndata->reqtype = REQTYPE_RESP_NET_SG; |
2266 | |
2267 | sc->callback = handle_timestamp; |
2268 | sc->callback_arg = finfo->skb; |
2269 | sc->iq_no = ndata->q_no; |
2270 | |
2271 | if (OCTEON_CN23XX_PF(oct)) |
2272 | len = (u32)((struct octeon_instr_ih3 *) |
2273 | (&sc->cmd.cmd3.ih3))->dlengsz; |
2274 | else |
2275 | len = (u32)((struct octeon_instr_ih2 *) |
2276 | (&sc->cmd.cmd2.ih2))->dlengsz; |
2277 | |
2278 | ring_doorbell = !xmit_more; |
2279 | |
2280 | retval = octeon_send_command(oct, iq_no: sc->iq_no, force_db: ring_doorbell, cmd: &sc->cmd, |
2281 | buf: sc, datasize: len, reqtype: ndata->reqtype); |
2282 | |
2283 | if (retval == IQ_SEND_FAILED) { |
2284 | dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n" , |
2285 | retval); |
2286 | octeon_free_soft_command(oct, sc); |
2287 | } else { |
2288 | netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n" ); |
2289 | } |
2290 | |
2291 | return retval; |
2292 | } |
2293 | |
2294 | /** |
2295 | * liquidio_xmit - Transmit networks packets to the Octeon interface |
2296 | * @skb: skbuff struct to be passed to network layer. |
2297 | * @netdev: pointer to network device |
2298 | * |
2299 | * Return: whether the packet was transmitted to the device okay or not |
2300 | * (NETDEV_TX_OK or NETDEV_TX_BUSY) |
2301 | */ |
2302 | static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) |
2303 | { |
2304 | struct lio *lio; |
2305 | struct octnet_buf_free_info *finfo; |
2306 | union octnic_cmd_setup cmdsetup; |
2307 | struct octnic_data_pkt ndata; |
2308 | struct octeon_device *oct; |
2309 | struct oct_iq_stats *stats; |
2310 | struct octeon_instr_irh *irh; |
2311 | union tx_info *tx_info; |
2312 | int status = 0; |
2313 | int q_idx = 0, iq_no = 0; |
2314 | int j, xmit_more = 0; |
2315 | u64 dptr = 0; |
2316 | u32 tag = 0; |
2317 | |
2318 | lio = GET_LIO(netdev); |
2319 | oct = lio->oct_dev; |
2320 | |
2321 | q_idx = skb_iq(oct, skb); |
2322 | tag = q_idx; |
2323 | iq_no = lio->linfo.txpciq[q_idx].s.q_no; |
2324 | |
2325 | stats = &oct->instr_queue[iq_no]->stats; |
2326 | |
2327 | /* Check for all conditions in which the current packet cannot be |
2328 | * transmitted. |
2329 | */ |
2330 | if (!(atomic_read(v: &lio->ifstate) & LIO_IFSTATE_RUNNING) || |
2331 | (!lio->linfo.link.s.link_up) || |
2332 | (skb->len <= 0)) { |
2333 | netif_info(lio, tx_err, lio->netdev, |
2334 | "Transmit failed link_status : %d\n" , |
2335 | lio->linfo.link.s.link_up); |
2336 | goto lio_xmit_failed; |
2337 | } |
2338 | |
2339 | /* Use space in skb->cb to store info used to unmap and |
2340 | * free the buffers. |
2341 | */ |
2342 | finfo = (struct octnet_buf_free_info *)skb->cb; |
2343 | finfo->lio = lio; |
2344 | finfo->skb = skb; |
2345 | finfo->sc = NULL; |
2346 | |
2347 | /* Prepare the attributes for the data to be passed to OSI. */ |
2348 | memset(&ndata, 0, sizeof(struct octnic_data_pkt)); |
2349 | |
2350 | ndata.buf = (void *)finfo; |
2351 | |
2352 | ndata.q_no = iq_no; |
2353 | |
2354 | if (octnet_iq_is_full(oct, q_no: ndata.q_no)) { |
2355 | /* defer sending if queue is full */ |
2356 | netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n" , |
2357 | ndata.q_no); |
2358 | stats->tx_iq_busy++; |
2359 | return NETDEV_TX_BUSY; |
2360 | } |
2361 | |
2362 | /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", |
2363 | * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); |
2364 | */ |
2365 | |
2366 | ndata.datasize = skb->len; |
2367 | |
2368 | cmdsetup.u64 = 0; |
2369 | cmdsetup.s.iq_no = iq_no; |
2370 | |
2371 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2372 | if (skb->encapsulation) { |
2373 | cmdsetup.s.tnl_csum = 1; |
2374 | stats->tx_vxlan++; |
2375 | } else { |
2376 | cmdsetup.s.transport_csum = 1; |
2377 | } |
2378 | } |
2379 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { |
2380 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
2381 | cmdsetup.s.timestamp = 1; |
2382 | } |
2383 | |
2384 | if (skb_shinfo(skb)->nr_frags == 0) { |
2385 | cmdsetup.s.u.datasize = skb->len; |
2386 | octnet_prepare_pci_cmd(oct, cmd: &ndata.cmd, setup: &cmdsetup, tag); |
2387 | |
2388 | /* Offload checksum calculation for TCP/UDP packets */ |
2389 | dptr = dma_map_single(&oct->pci_dev->dev, |
2390 | skb->data, |
2391 | skb->len, |
2392 | DMA_TO_DEVICE); |
2393 | if (dma_mapping_error(dev: &oct->pci_dev->dev, dma_addr: dptr)) { |
2394 | dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n" , |
2395 | __func__); |
2396 | stats->tx_dmamap_fail++; |
2397 | return NETDEV_TX_BUSY; |
2398 | } |
2399 | |
2400 | if (OCTEON_CN23XX_PF(oct)) |
2401 | ndata.cmd.cmd3.dptr = dptr; |
2402 | else |
2403 | ndata.cmd.cmd2.dptr = dptr; |
2404 | finfo->dptr = dptr; |
2405 | ndata.reqtype = REQTYPE_NORESP_NET; |
2406 | |
2407 | } else { |
2408 | int i, frags; |
2409 | skb_frag_t *frag; |
2410 | struct octnic_gather *g; |
2411 | |
2412 | spin_lock(lock: &lio->glist_lock[q_idx]); |
2413 | g = (struct octnic_gather *) |
2414 | lio_list_delete_head(root: &lio->glist[q_idx]); |
2415 | spin_unlock(lock: &lio->glist_lock[q_idx]); |
2416 | |
2417 | if (!g) { |
2418 | netif_info(lio, tx_err, lio->netdev, |
2419 | "Transmit scatter gather: glist null!\n" ); |
2420 | goto lio_xmit_failed; |
2421 | } |
2422 | |
2423 | cmdsetup.s.gather = 1; |
2424 | cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); |
2425 | octnet_prepare_pci_cmd(oct, cmd: &ndata.cmd, setup: &cmdsetup, tag); |
2426 | |
2427 | memset(g->sg, 0, g->sg_size); |
2428 | |
2429 | g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, |
2430 | skb->data, |
2431 | (skb->len - skb->data_len), |
2432 | DMA_TO_DEVICE); |
2433 | if (dma_mapping_error(dev: &oct->pci_dev->dev, dma_addr: g->sg[0].ptr[0])) { |
2434 | dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n" , |
2435 | __func__); |
2436 | stats->tx_dmamap_fail++; |
2437 | return NETDEV_TX_BUSY; |
2438 | } |
2439 | add_sg_size(sg_entry: &g->sg[0], size: (skb->len - skb->data_len), pos: 0); |
2440 | |
2441 | frags = skb_shinfo(skb)->nr_frags; |
2442 | i = 1; |
2443 | while (frags--) { |
2444 | frag = &skb_shinfo(skb)->frags[i - 1]; |
2445 | |
2446 | g->sg[(i >> 2)].ptr[(i & 3)] = |
2447 | skb_frag_dma_map(dev: &oct->pci_dev->dev, |
2448 | frag, offset: 0, size: skb_frag_size(frag), |
2449 | dir: DMA_TO_DEVICE); |
2450 | |
2451 | if (dma_mapping_error(dev: &oct->pci_dev->dev, |
2452 | dma_addr: g->sg[i >> 2].ptr[i & 3])) { |
2453 | dma_unmap_single(&oct->pci_dev->dev, |
2454 | g->sg[0].ptr[0], |
2455 | skb->len - skb->data_len, |
2456 | DMA_TO_DEVICE); |
2457 | for (j = 1; j < i; j++) { |
2458 | frag = &skb_shinfo(skb)->frags[j - 1]; |
2459 | dma_unmap_page(&oct->pci_dev->dev, |
2460 | g->sg[j >> 2].ptr[j & 3], |
2461 | skb_frag_size(frag), |
2462 | DMA_TO_DEVICE); |
2463 | } |
2464 | dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n" , |
2465 | __func__); |
2466 | return NETDEV_TX_BUSY; |
2467 | } |
2468 | |
2469 | add_sg_size(sg_entry: &g->sg[(i >> 2)], size: skb_frag_size(frag), |
2470 | pos: (i & 3)); |
2471 | i++; |
2472 | } |
2473 | |
2474 | dptr = g->sg_dma_ptr; |
2475 | |
2476 | if (OCTEON_CN23XX_PF(oct)) |
2477 | ndata.cmd.cmd3.dptr = dptr; |
2478 | else |
2479 | ndata.cmd.cmd2.dptr = dptr; |
2480 | finfo->dptr = dptr; |
2481 | finfo->g = g; |
2482 | |
2483 | ndata.reqtype = REQTYPE_NORESP_NET_SG; |
2484 | } |
2485 | |
2486 | if (OCTEON_CN23XX_PF(oct)) { |
2487 | irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; |
2488 | tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; |
2489 | } else { |
2490 | irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; |
2491 | tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; |
2492 | } |
2493 | |
2494 | if (skb_shinfo(skb)->gso_size) { |
2495 | tx_info->s.gso_size = skb_shinfo(skb)->gso_size; |
2496 | tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; |
2497 | stats->tx_gso++; |
2498 | } |
2499 | |
2500 | /* HW insert VLAN tag */ |
2501 | if (skb_vlan_tag_present(skb)) { |
2502 | irh->priority = skb_vlan_tag_get(skb) >> 13; |
2503 | irh->vlan = skb_vlan_tag_get(skb) & 0xfff; |
2504 | } |
2505 | |
2506 | xmit_more = netdev_xmit_more(); |
2507 | |
2508 | if (unlikely(cmdsetup.s.timestamp)) |
2509 | status = send_nic_timestamp_pkt(oct, ndata: &ndata, finfo, xmit_more); |
2510 | else |
2511 | status = octnet_send_nic_data_pkt(oct, ndata: &ndata, xmit_more); |
2512 | if (status == IQ_SEND_FAILED) |
2513 | goto lio_xmit_failed; |
2514 | |
2515 | netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n" ); |
2516 | |
2517 | if (status == IQ_SEND_STOP) |
2518 | netif_stop_subqueue(dev: netdev, queue_index: q_idx); |
2519 | |
2520 | netif_trans_update(dev: netdev); |
2521 | |
2522 | if (tx_info->s.gso_segs) |
2523 | stats->tx_done += tx_info->s.gso_segs; |
2524 | else |
2525 | stats->tx_done++; |
2526 | stats->tx_tot_bytes += ndata.datasize; |
2527 | |
2528 | return NETDEV_TX_OK; |
2529 | |
2530 | lio_xmit_failed: |
2531 | stats->tx_dropped++; |
2532 | netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n" , |
2533 | iq_no, stats->tx_dropped); |
2534 | if (dptr) |
2535 | dma_unmap_single(&oct->pci_dev->dev, dptr, |
2536 | ndata.datasize, DMA_TO_DEVICE); |
2537 | |
2538 | octeon_ring_doorbell_locked(oct, iq_no); |
2539 | |
2540 | tx_buffer_free(buffer: skb); |
2541 | return NETDEV_TX_OK; |
2542 | } |
2543 | |
2544 | /** |
2545 | * liquidio_tx_timeout - Network device Tx timeout |
2546 | * @netdev: pointer to network device |
2547 | * @txqueue: index of the hung transmit queue |
2548 | */ |
2549 | static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
2550 | { |
2551 | struct lio *lio; |
2552 | |
2553 | lio = GET_LIO(netdev); |
2554 | |
2555 | netif_info(lio, tx_err, lio->netdev, |
2556 | "Transmit timeout tx_dropped:%ld, waking up queues now!!\n" , |
2557 | netdev->stats.tx_dropped); |
2558 | netif_trans_update(dev: netdev); |
2559 | wake_txqs(netdev); |
2560 | } |
2561 | |
2562 | static int liquidio_vlan_rx_add_vid(struct net_device *netdev, |
2563 | __be16 proto __attribute__((unused)), |
2564 | u16 vid) |
2565 | { |
2566 | struct lio *lio = GET_LIO(netdev); |
2567 | struct octeon_device *oct = lio->oct_dev; |
2568 | struct octnic_ctrl_pkt nctrl; |
2569 | int ret = 0; |
2570 | |
2571 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
2572 | |
2573 | nctrl.ncmd.u64 = 0; |
2574 | nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; |
2575 | nctrl.ncmd.s.param1 = vid; |
2576 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
2577 | nctrl.netpndev = (u64)netdev; |
2578 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
2579 | |
2580 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
2581 | if (ret) { |
2582 | dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n" , |
2583 | ret); |
2584 | if (ret > 0) |
2585 | ret = -EIO; |
2586 | } |
2587 | |
2588 | return ret; |
2589 | } |
2590 | |
2591 | static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, |
2592 | __be16 proto __attribute__((unused)), |
2593 | u16 vid) |
2594 | { |
2595 | struct lio *lio = GET_LIO(netdev); |
2596 | struct octeon_device *oct = lio->oct_dev; |
2597 | struct octnic_ctrl_pkt nctrl; |
2598 | int ret = 0; |
2599 | |
2600 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
2601 | |
2602 | nctrl.ncmd.u64 = 0; |
2603 | nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; |
2604 | nctrl.ncmd.s.param1 = vid; |
2605 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
2606 | nctrl.netpndev = (u64)netdev; |
2607 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
2608 | |
2609 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
2610 | if (ret) { |
2611 | dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n" , |
2612 | ret); |
2613 | if (ret > 0) |
2614 | ret = -EIO; |
2615 | } |
2616 | return ret; |
2617 | } |
2618 | |
2619 | /** |
2620 | * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload |
2621 | * @netdev: pointer to network device |
2622 | * @command: OCTNET_CMD_TNL_RX_CSUM_CTL |
2623 | * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE |
2624 | * Returns: SUCCESS or FAILURE |
2625 | */ |
2626 | static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, |
2627 | u8 rx_cmd) |
2628 | { |
2629 | struct lio *lio = GET_LIO(netdev); |
2630 | struct octeon_device *oct = lio->oct_dev; |
2631 | struct octnic_ctrl_pkt nctrl; |
2632 | int ret = 0; |
2633 | |
2634 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
2635 | |
2636 | nctrl.ncmd.u64 = 0; |
2637 | nctrl.ncmd.s.cmd = command; |
2638 | nctrl.ncmd.s.param1 = rx_cmd; |
2639 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
2640 | nctrl.netpndev = (u64)netdev; |
2641 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
2642 | |
2643 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
2644 | if (ret) { |
2645 | dev_err(&oct->pci_dev->dev, |
2646 | "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n" , |
2647 | ret); |
2648 | if (ret > 0) |
2649 | ret = -EIO; |
2650 | } |
2651 | return ret; |
2652 | } |
2653 | |
2654 | /** |
2655 | * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware |
2656 | * @netdev: pointer to network device |
2657 | * @command: OCTNET_CMD_VXLAN_PORT_CONFIG |
2658 | * @vxlan_port: VxLAN port to be added or deleted |
2659 | * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD, |
2660 | * OCTNET_CMD_VXLAN_PORT_DEL |
2661 | * Return: SUCCESS or FAILURE |
2662 | */ |
2663 | static int liquidio_vxlan_port_command(struct net_device *netdev, int command, |
2664 | u16 vxlan_port, u8 vxlan_cmd_bit) |
2665 | { |
2666 | struct lio *lio = GET_LIO(netdev); |
2667 | struct octeon_device *oct = lio->oct_dev; |
2668 | struct octnic_ctrl_pkt nctrl; |
2669 | int ret = 0; |
2670 | |
2671 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
2672 | |
2673 | nctrl.ncmd.u64 = 0; |
2674 | nctrl.ncmd.s.cmd = command; |
2675 | nctrl.ncmd.s.more = vxlan_cmd_bit; |
2676 | nctrl.ncmd.s.param1 = vxlan_port; |
2677 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
2678 | nctrl.netpndev = (u64)netdev; |
2679 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
2680 | |
2681 | ret = octnet_send_nic_ctrl_pkt(oct: lio->oct_dev, nctrl: &nctrl); |
2682 | if (ret) { |
2683 | dev_err(&oct->pci_dev->dev, |
2684 | "VxLAN port add/delete failed in core (ret:0x%x)\n" , |
2685 | ret); |
2686 | if (ret > 0) |
2687 | ret = -EIO; |
2688 | } |
2689 | return ret; |
2690 | } |
2691 | |
2692 | static int liquidio_udp_tunnel_set_port(struct net_device *netdev, |
2693 | unsigned int table, unsigned int entry, |
2694 | struct udp_tunnel_info *ti) |
2695 | { |
2696 | return liquidio_vxlan_port_command(netdev, |
2697 | OCTNET_CMD_VXLAN_PORT_CONFIG, |
2698 | htons(ti->port), |
2699 | OCTNET_CMD_VXLAN_PORT_ADD); |
2700 | } |
2701 | |
2702 | static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, |
2703 | unsigned int table, |
2704 | unsigned int entry, |
2705 | struct udp_tunnel_info *ti) |
2706 | { |
2707 | return liquidio_vxlan_port_command(netdev, |
2708 | OCTNET_CMD_VXLAN_PORT_CONFIG, |
2709 | htons(ti->port), |
2710 | OCTNET_CMD_VXLAN_PORT_DEL); |
2711 | } |
2712 | |
2713 | static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { |
2714 | .set_port = liquidio_udp_tunnel_set_port, |
2715 | .unset_port = liquidio_udp_tunnel_unset_port, |
2716 | .tables = { |
2717 | { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
2718 | }, |
2719 | }; |
2720 | |
2721 | /** |
2722 | * liquidio_fix_features - Net device fix features |
2723 | * @netdev: pointer to network device |
2724 | * @request: features requested |
2725 | * Return: updated features list |
2726 | */ |
2727 | static netdev_features_t liquidio_fix_features(struct net_device *netdev, |
2728 | netdev_features_t request) |
2729 | { |
2730 | struct lio *lio = netdev_priv(dev: netdev); |
2731 | |
2732 | if ((request & NETIF_F_RXCSUM) && |
2733 | !(lio->dev_capability & NETIF_F_RXCSUM)) |
2734 | request &= ~NETIF_F_RXCSUM; |
2735 | |
2736 | if ((request & NETIF_F_HW_CSUM) && |
2737 | !(lio->dev_capability & NETIF_F_HW_CSUM)) |
2738 | request &= ~NETIF_F_HW_CSUM; |
2739 | |
2740 | if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) |
2741 | request &= ~NETIF_F_TSO; |
2742 | |
2743 | if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) |
2744 | request &= ~NETIF_F_TSO6; |
2745 | |
2746 | if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) |
2747 | request &= ~NETIF_F_LRO; |
2748 | |
2749 | /*Disable LRO if RXCSUM is off */ |
2750 | if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && |
2751 | (lio->dev_capability & NETIF_F_LRO)) |
2752 | request &= ~NETIF_F_LRO; |
2753 | |
2754 | if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) && |
2755 | !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER)) |
2756 | request &= ~NETIF_F_HW_VLAN_CTAG_FILTER; |
2757 | |
2758 | return request; |
2759 | } |
2760 | |
2761 | /** |
2762 | * liquidio_set_features - Net device set features |
2763 | * @netdev: pointer to network device |
2764 | * @features: features to enable/disable |
2765 | */ |
2766 | static int liquidio_set_features(struct net_device *netdev, |
2767 | netdev_features_t features) |
2768 | { |
2769 | struct lio *lio = netdev_priv(dev: netdev); |
2770 | |
2771 | if ((features & NETIF_F_LRO) && |
2772 | (lio->dev_capability & NETIF_F_LRO) && |
2773 | !(netdev->features & NETIF_F_LRO)) |
2774 | liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, |
2775 | OCTNIC_LROIPV4 | OCTNIC_LROIPV6); |
2776 | else if (!(features & NETIF_F_LRO) && |
2777 | (lio->dev_capability & NETIF_F_LRO) && |
2778 | (netdev->features & NETIF_F_LRO)) |
2779 | liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, |
2780 | OCTNIC_LROIPV4 | OCTNIC_LROIPV6); |
2781 | |
2782 | /* Sending command to firmware to enable/disable RX checksum |
2783 | * offload settings using ethtool |
2784 | */ |
2785 | if (!(netdev->features & NETIF_F_RXCSUM) && |
2786 | (lio->enc_dev_capability & NETIF_F_RXCSUM) && |
2787 | (features & NETIF_F_RXCSUM)) |
2788 | liquidio_set_rxcsum_command(netdev, |
2789 | OCTNET_CMD_TNL_RX_CSUM_CTL, |
2790 | OCTNET_CMD_RXCSUM_ENABLE); |
2791 | else if ((netdev->features & NETIF_F_RXCSUM) && |
2792 | (lio->enc_dev_capability & NETIF_F_RXCSUM) && |
2793 | !(features & NETIF_F_RXCSUM)) |
2794 | liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, |
2795 | OCTNET_CMD_RXCSUM_DISABLE); |
2796 | |
2797 | if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && |
2798 | (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && |
2799 | !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) |
2800 | liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, |
2801 | OCTNET_CMD_VLAN_FILTER_ENABLE); |
2802 | else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && |
2803 | (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && |
2804 | (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) |
2805 | liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, |
2806 | OCTNET_CMD_VLAN_FILTER_DISABLE); |
2807 | |
2808 | return 0; |
2809 | } |
2810 | |
2811 | static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, |
2812 | u8 *mac, bool is_admin_assigned) |
2813 | { |
2814 | struct lio *lio = GET_LIO(netdev); |
2815 | struct octeon_device *oct = lio->oct_dev; |
2816 | struct octnic_ctrl_pkt nctrl; |
2817 | int ret = 0; |
2818 | |
2819 | if (!is_valid_ether_addr(addr: mac)) |
2820 | return -EINVAL; |
2821 | |
2822 | if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) |
2823 | return -EINVAL; |
2824 | |
2825 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
2826 | |
2827 | nctrl.ncmd.u64 = 0; |
2828 | nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; |
2829 | /* vfidx is 0 based, but vf_num (param1) is 1 based */ |
2830 | nctrl.ncmd.s.param1 = vfidx + 1; |
2831 | nctrl.ncmd.s.more = 1; |
2832 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
2833 | nctrl.netpndev = (u64)netdev; |
2834 | if (is_admin_assigned) { |
2835 | nctrl.ncmd.s.param2 = true; |
2836 | nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; |
2837 | } |
2838 | |
2839 | nctrl.udd[0] = 0; |
2840 | /* The MAC Address is presented in network byte order. */ |
2841 | ether_addr_copy(dst: (u8 *)&nctrl.udd[0] + 2, src: mac); |
2842 | |
2843 | oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; |
2844 | |
2845 | ret = octnet_send_nic_ctrl_pkt(oct, nctrl: &nctrl); |
2846 | if (ret > 0) |
2847 | ret = -EIO; |
2848 | |
2849 | return ret; |
2850 | } |
2851 | |
2852 | static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) |
2853 | { |
2854 | struct lio *lio = GET_LIO(netdev); |
2855 | struct octeon_device *oct = lio->oct_dev; |
2856 | int retval; |
2857 | |
2858 | if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) |
2859 | return -EINVAL; |
2860 | |
2861 | retval = __liquidio_set_vf_mac(netdev, vfidx, mac, is_admin_assigned: true); |
2862 | if (!retval) |
2863 | cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); |
2864 | |
2865 | return retval; |
2866 | } |
2867 | |
2868 | static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, |
2869 | bool enable) |
2870 | { |
2871 | struct lio *lio = GET_LIO(netdev); |
2872 | struct octeon_device *oct = lio->oct_dev; |
2873 | struct octnic_ctrl_pkt nctrl; |
2874 | int retval; |
2875 | |
2876 | if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { |
2877 | netif_info(lio, drv, lio->netdev, |
2878 | "firmware does not support spoofchk\n" ); |
2879 | return -EOPNOTSUPP; |
2880 | } |
2881 | |
2882 | if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { |
2883 | netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n" , vfidx); |
2884 | return -EINVAL; |
2885 | } |
2886 | |
2887 | if (enable) { |
2888 | if (oct->sriov_info.vf_spoofchk[vfidx]) |
2889 | return 0; |
2890 | } else { |
2891 | /* Clear */ |
2892 | if (!oct->sriov_info.vf_spoofchk[vfidx]) |
2893 | return 0; |
2894 | } |
2895 | |
2896 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
2897 | nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; |
2898 | nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; |
2899 | nctrl.ncmd.s.param1 = |
2900 | vfidx + 1; /* vfidx is 0 based, |
2901 | * but vf_num (param1) is 1 based |
2902 | */ |
2903 | nctrl.ncmd.s.param2 = enable; |
2904 | nctrl.ncmd.s.more = 0; |
2905 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
2906 | nctrl.cb_fn = NULL; |
2907 | |
2908 | retval = octnet_send_nic_ctrl_pkt(oct, nctrl: &nctrl); |
2909 | |
2910 | if (retval) { |
2911 | netif_info(lio, drv, lio->netdev, |
2912 | "Failed to set VF %d spoofchk %s\n" , vfidx, |
2913 | enable ? "on" : "off" ); |
2914 | return -1; |
2915 | } |
2916 | |
2917 | oct->sriov_info.vf_spoofchk[vfidx] = enable; |
2918 | netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n" , vfidx, |
2919 | enable ? "on" : "off" ); |
2920 | |
2921 | return 0; |
2922 | } |
2923 | |
2924 | static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, |
2925 | u16 vlan, u8 qos, __be16 vlan_proto) |
2926 | { |
2927 | struct lio *lio = GET_LIO(netdev); |
2928 | struct octeon_device *oct = lio->oct_dev; |
2929 | struct octnic_ctrl_pkt nctrl; |
2930 | u16 vlantci; |
2931 | int ret = 0; |
2932 | |
2933 | if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) |
2934 | return -EINVAL; |
2935 | |
2936 | if (vlan_proto != htons(ETH_P_8021Q)) |
2937 | return -EPROTONOSUPPORT; |
2938 | |
2939 | if (vlan >= VLAN_N_VID || qos > 7) |
2940 | return -EINVAL; |
2941 | |
2942 | if (vlan) |
2943 | vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; |
2944 | else |
2945 | vlantci = 0; |
2946 | |
2947 | if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) |
2948 | return 0; |
2949 | |
2950 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
2951 | |
2952 | if (vlan) |
2953 | nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; |
2954 | else |
2955 | nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; |
2956 | |
2957 | nctrl.ncmd.s.param1 = vlantci; |
2958 | nctrl.ncmd.s.param2 = |
2959 | vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ |
2960 | nctrl.ncmd.s.more = 0; |
2961 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
2962 | nctrl.cb_fn = NULL; |
2963 | |
2964 | ret = octnet_send_nic_ctrl_pkt(oct, nctrl: &nctrl); |
2965 | if (ret) { |
2966 | if (ret > 0) |
2967 | ret = -EIO; |
2968 | return ret; |
2969 | } |
2970 | |
2971 | oct->sriov_info.vf_vlantci[vfidx] = vlantci; |
2972 | |
2973 | return ret; |
2974 | } |
2975 | |
2976 | static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, |
2977 | struct ifla_vf_info *ivi) |
2978 | { |
2979 | struct lio *lio = GET_LIO(netdev); |
2980 | struct octeon_device *oct = lio->oct_dev; |
2981 | u8 *macaddr; |
2982 | |
2983 | if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) |
2984 | return -EINVAL; |
2985 | |
2986 | memset(ivi, 0, sizeof(struct ifla_vf_info)); |
2987 | |
2988 | ivi->vf = vfidx; |
2989 | macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; |
2990 | ether_addr_copy(dst: &ivi->mac[0], src: macaddr); |
2991 | ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; |
2992 | ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; |
2993 | if (oct->sriov_info.trusted_vf.active && |
2994 | oct->sriov_info.trusted_vf.id == vfidx) |
2995 | ivi->trusted = true; |
2996 | else |
2997 | ivi->trusted = false; |
2998 | ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; |
2999 | ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; |
3000 | ivi->max_tx_rate = lio->linfo.link.s.speed; |
3001 | ivi->min_tx_rate = 0; |
3002 | |
3003 | return 0; |
3004 | } |
3005 | |
3006 | static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) |
3007 | { |
3008 | struct octeon_device *oct = lio->oct_dev; |
3009 | struct octeon_soft_command *sc; |
3010 | int retval; |
3011 | |
3012 | sc = octeon_alloc_soft_command(oct, datasize: 0, rdatasize: 16, ctxsize: 0); |
3013 | if (!sc) |
3014 | return -ENOMEM; |
3015 | |
3016 | sc->iq_no = lio->linfo.txpciq[0].s.q_no; |
3017 | |
3018 | /* vfidx is 0 based, but vf_num (param1) is 1 based */ |
3019 | octeon_prepare_soft_command(oct, sc, OPCODE_NIC, |
3020 | OPCODE_NIC_SET_TRUSTED_VF, irh_ossp: 0, ossp0: vfidx + 1, |
3021 | ossp1: trusted); |
3022 | |
3023 | init_completion(x: &sc->complete); |
3024 | sc->sc_status = OCTEON_REQUEST_PENDING; |
3025 | |
3026 | retval = octeon_send_soft_command(oct, sc); |
3027 | if (retval == IQ_SEND_FAILED) { |
3028 | octeon_free_soft_command(oct, sc); |
3029 | retval = -1; |
3030 | } else { |
3031 | /* Wait for response or timeout */ |
3032 | retval = wait_for_sc_completion_timeout(oct_dev: oct, sc, timeout: 0); |
3033 | if (retval) |
3034 | return (retval); |
3035 | |
3036 | WRITE_ONCE(sc->caller_is_done, true); |
3037 | } |
3038 | |
3039 | return retval; |
3040 | } |
3041 | |
3042 | static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx, |
3043 | bool setting) |
3044 | { |
3045 | struct lio *lio = GET_LIO(netdev); |
3046 | struct octeon_device *oct = lio->oct_dev; |
3047 | |
3048 | if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1" ) < 0) { |
3049 | /* trusted vf is not supported by firmware older than 1.7.1 */ |
3050 | return -EOPNOTSUPP; |
3051 | } |
3052 | |
3053 | if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { |
3054 | netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n" , vfidx); |
3055 | return -EINVAL; |
3056 | } |
3057 | |
3058 | if (setting) { |
3059 | /* Set */ |
3060 | |
3061 | if (oct->sriov_info.trusted_vf.active && |
3062 | oct->sriov_info.trusted_vf.id == vfidx) |
3063 | return 0; |
3064 | |
3065 | if (oct->sriov_info.trusted_vf.active) { |
3066 | netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n" ); |
3067 | return -EPERM; |
3068 | } |
3069 | } else { |
3070 | /* Clear */ |
3071 | |
3072 | if (!oct->sriov_info.trusted_vf.active) |
3073 | return 0; |
3074 | } |
3075 | |
3076 | if (!liquidio_send_vf_trust_cmd(lio, vfidx, trusted: setting)) { |
3077 | if (setting) { |
3078 | oct->sriov_info.trusted_vf.id = vfidx; |
3079 | oct->sriov_info.trusted_vf.active = true; |
3080 | } else { |
3081 | oct->sriov_info.trusted_vf.active = false; |
3082 | } |
3083 | |
3084 | netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n" , vfidx, |
3085 | setting ? "" : "not " ); |
3086 | } else { |
3087 | netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n" ); |
3088 | return -1; |
3089 | } |
3090 | |
3091 | return 0; |
3092 | } |
3093 | |
3094 | static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, |
3095 | int linkstate) |
3096 | { |
3097 | struct lio *lio = GET_LIO(netdev); |
3098 | struct octeon_device *oct = lio->oct_dev; |
3099 | struct octnic_ctrl_pkt nctrl; |
3100 | int ret = 0; |
3101 | |
3102 | if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) |
3103 | return -EINVAL; |
3104 | |
3105 | if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) |
3106 | return 0; |
3107 | |
3108 | memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); |
3109 | nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; |
3110 | nctrl.ncmd.s.param1 = |
3111 | vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ |
3112 | nctrl.ncmd.s.param2 = linkstate; |
3113 | nctrl.ncmd.s.more = 0; |
3114 | nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; |
3115 | nctrl.cb_fn = NULL; |
3116 | |
3117 | ret = octnet_send_nic_ctrl_pkt(oct, nctrl: &nctrl); |
3118 | |
3119 | if (!ret) |
3120 | oct->sriov_info.vf_linkstate[vfidx] = linkstate; |
3121 | else if (ret > 0) |
3122 | ret = -EIO; |
3123 | |
3124 | return ret; |
3125 | } |
3126 | |
3127 | static int |
3128 | liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) |
3129 | { |
3130 | struct lio_devlink_priv *priv; |
3131 | struct octeon_device *oct; |
3132 | |
3133 | priv = devlink_priv(devlink); |
3134 | oct = priv->oct; |
3135 | |
3136 | *mode = oct->eswitch_mode; |
3137 | |
3138 | return 0; |
3139 | } |
3140 | |
3141 | static int |
3142 | liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, |
3143 | struct netlink_ext_ack *extack) |
3144 | { |
3145 | struct lio_devlink_priv *priv; |
3146 | struct octeon_device *oct; |
3147 | int ret = 0; |
3148 | |
3149 | priv = devlink_priv(devlink); |
3150 | oct = priv->oct; |
3151 | |
3152 | if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)) |
3153 | return -EINVAL; |
3154 | |
3155 | if (oct->eswitch_mode == mode) |
3156 | return 0; |
3157 | |
3158 | switch (mode) { |
3159 | case DEVLINK_ESWITCH_MODE_SWITCHDEV: |
3160 | oct->eswitch_mode = mode; |
3161 | ret = lio_vf_rep_create(oct); |
3162 | break; |
3163 | |
3164 | case DEVLINK_ESWITCH_MODE_LEGACY: |
3165 | lio_vf_rep_destroy(oct); |
3166 | oct->eswitch_mode = mode; |
3167 | break; |
3168 | |
3169 | default: |
3170 | ret = -EINVAL; |
3171 | } |
3172 | |
3173 | return ret; |
3174 | } |
3175 | |
3176 | static const struct devlink_ops liquidio_devlink_ops = { |
3177 | .eswitch_mode_get = liquidio_eswitch_mode_get, |
3178 | .eswitch_mode_set = liquidio_eswitch_mode_set, |
3179 | }; |
3180 | |
3181 | static int |
3182 | liquidio_get_port_parent_id(struct net_device *dev, |
3183 | struct netdev_phys_item_id *ppid) |
3184 | { |
3185 | struct lio *lio = GET_LIO(dev); |
3186 | struct octeon_device *oct = lio->oct_dev; |
3187 | |
3188 | if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) |
3189 | return -EOPNOTSUPP; |
3190 | |
3191 | ppid->id_len = ETH_ALEN; |
3192 | ether_addr_copy(dst: ppid->id, src: (void *)&lio->linfo.hw_addr + 2); |
3193 | |
3194 | return 0; |
3195 | } |
3196 | |
3197 | static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, |
3198 | struct ifla_vf_stats *vf_stats) |
3199 | { |
3200 | struct lio *lio = GET_LIO(netdev); |
3201 | struct octeon_device *oct = lio->oct_dev; |
3202 | struct oct_vf_stats stats; |
3203 | int ret; |
3204 | |
3205 | if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) |
3206 | return -EINVAL; |
3207 | |
3208 | memset(&stats, 0, sizeof(struct oct_vf_stats)); |
3209 | ret = cn23xx_get_vf_stats(oct, ifidx: vfidx, stats: &stats); |
3210 | if (!ret) { |
3211 | vf_stats->rx_packets = stats.rx_packets; |
3212 | vf_stats->tx_packets = stats.tx_packets; |
3213 | vf_stats->rx_bytes = stats.rx_bytes; |
3214 | vf_stats->tx_bytes = stats.tx_bytes; |
3215 | vf_stats->broadcast = stats.broadcast; |
3216 | vf_stats->multicast = stats.multicast; |
3217 | } |
3218 | |
3219 | return ret; |
3220 | } |
3221 | |
3222 | static const struct net_device_ops lionetdevops = { |
3223 | .ndo_open = liquidio_open, |
3224 | .ndo_stop = liquidio_stop, |
3225 | .ndo_start_xmit = liquidio_xmit, |
3226 | .ndo_get_stats64 = liquidio_get_stats64, |
3227 | .ndo_set_mac_address = liquidio_set_mac, |
3228 | .ndo_set_rx_mode = liquidio_set_mcast_list, |
3229 | .ndo_tx_timeout = liquidio_tx_timeout, |
3230 | |
3231 | .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, |
3232 | .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, |
3233 | .ndo_change_mtu = liquidio_change_mtu, |
3234 | .ndo_eth_ioctl = liquidio_ioctl, |
3235 | .ndo_fix_features = liquidio_fix_features, |
3236 | .ndo_set_features = liquidio_set_features, |
3237 | .ndo_set_vf_mac = liquidio_set_vf_mac, |
3238 | .ndo_set_vf_vlan = liquidio_set_vf_vlan, |
3239 | .ndo_get_vf_config = liquidio_get_vf_config, |
3240 | .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, |
3241 | .ndo_set_vf_trust = liquidio_set_vf_trust, |
3242 | .ndo_set_vf_link_state = liquidio_set_vf_link_state, |
3243 | .ndo_get_vf_stats = liquidio_get_vf_stats, |
3244 | .ndo_get_port_parent_id = liquidio_get_port_parent_id, |
3245 | }; |
3246 | |
3247 | /** |
3248 | * liquidio_init - Entry point for the liquidio module |
3249 | */ |
3250 | static int __init liquidio_init(void) |
3251 | { |
3252 | int i; |
3253 | struct handshake *hs; |
3254 | |
3255 | init_completion(x: &first_stage); |
3256 | |
3257 | octeon_init_device_list(conf_type: OCTEON_CONFIG_TYPE_DEFAULT); |
3258 | |
3259 | if (liquidio_init_pci()) |
3260 | return -EINVAL; |
3261 | |
3262 | wait_for_completion_timeout(x: &first_stage, timeout: msecs_to_jiffies(m: 1000)); |
3263 | |
3264 | for (i = 0; i < MAX_OCTEON_DEVICES; i++) { |
3265 | hs = &handshake[i]; |
3266 | if (hs->pci_dev) { |
3267 | wait_for_completion(&hs->init); |
3268 | if (!hs->init_ok) { |
3269 | /* init handshake failed */ |
3270 | dev_err(&hs->pci_dev->dev, |
3271 | "Failed to init device\n" ); |
3272 | liquidio_deinit_pci(); |
3273 | return -EIO; |
3274 | } |
3275 | } |
3276 | } |
3277 | |
3278 | for (i = 0; i < MAX_OCTEON_DEVICES; i++) { |
3279 | hs = &handshake[i]; |
3280 | if (hs->pci_dev) { |
3281 | wait_for_completion_timeout(x: &hs->started, |
3282 | timeout: msecs_to_jiffies(m: 30000)); |
3283 | if (!hs->started_ok) { |
3284 | /* starter handshake failed */ |
3285 | dev_err(&hs->pci_dev->dev, |
3286 | "Firmware failed to start\n" ); |
3287 | liquidio_deinit_pci(); |
3288 | return -EIO; |
3289 | } |
3290 | } |
3291 | } |
3292 | |
3293 | return 0; |
3294 | } |
3295 | |
3296 | static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) |
3297 | { |
3298 | struct octeon_device *oct = (struct octeon_device *)buf; |
3299 | struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; |
3300 | int gmxport = 0; |
3301 | union oct_link_status *ls; |
3302 | int i; |
3303 | |
3304 | if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { |
3305 | dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n" , |
3306 | recv_pkt->buffer_size[0], |
3307 | recv_pkt->rh.r_nic_info.gmxport); |
3308 | goto nic_info_err; |
3309 | } |
3310 | |
3311 | gmxport = recv_pkt->rh.r_nic_info.gmxport; |
3312 | ls = (union oct_link_status *)(get_rbd(skb: recv_pkt->buffer_ptr[0]) + |
3313 | OCT_DROQ_INFO_SIZE); |
3314 | |
3315 | octeon_swap_8B_data(data: (u64 *)ls, blocks: (sizeof(union oct_link_status)) >> 3); |
3316 | for (i = 0; i < oct->ifcount; i++) { |
3317 | if (oct->props[i].gmxport == gmxport) { |
3318 | update_link_status(netdev: oct->props[i].netdev, ls); |
3319 | break; |
3320 | } |
3321 | } |
3322 | |
3323 | nic_info_err: |
3324 | for (i = 0; i < recv_pkt->buffer_count; i++) |
3325 | recv_buffer_free(buffer: recv_pkt->buffer_ptr[i]); |
3326 | octeon_free_recv_info(recv_info); |
3327 | return 0; |
3328 | } |
3329 | |
3330 | /** |
3331 | * setup_nic_devices - Setup network interfaces |
3332 | * @octeon_dev: octeon device |
3333 | * |
3334 | * Called during init time for each device. It assumes the NIC |
3335 | * is already up and running. The link information for each |
3336 | * interface is passed in link_info. |
3337 | */ |
3338 | static int setup_nic_devices(struct octeon_device *octeon_dev) |
3339 | { |
3340 | struct lio *lio = NULL; |
3341 | struct net_device *netdev; |
3342 | u8 mac[6], i, j, *fw_ver, *micro_ver; |
3343 | unsigned long micro; |
3344 | u32 cur_ver; |
3345 | struct octeon_soft_command *sc; |
3346 | struct liquidio_if_cfg_resp *resp; |
3347 | struct octdev_props *props; |
3348 | int retval, num_iqueues, num_oqueues; |
3349 | int max_num_queues = 0; |
3350 | union oct_nic_if_cfg if_cfg; |
3351 | unsigned int base_queue; |
3352 | unsigned int gmx_port_id; |
3353 | u32 resp_size, data_size; |
3354 | u32 ifidx_or_pfnum; |
3355 | struct lio_version *vdata; |
3356 | struct devlink *devlink; |
3357 | struct lio_devlink_priv *lio_devlink; |
3358 | |
3359 | /* This is to handle link status changes */ |
3360 | octeon_register_dispatch_fn(oct: octeon_dev, OPCODE_NIC, |
3361 | OPCODE_NIC_INFO, |
3362 | fn: lio_nic_info, fn_arg: octeon_dev); |
3363 | |
3364 | /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. |
3365 | * They are handled directly. |
3366 | */ |
3367 | octeon_register_reqtype_free_fn(oct: octeon_dev, REQTYPE_NORESP_NET, |
3368 | fn: free_netbuf); |
3369 | |
3370 | octeon_register_reqtype_free_fn(oct: octeon_dev, REQTYPE_NORESP_NET_SG, |
3371 | fn: free_netsgbuf); |
3372 | |
3373 | octeon_register_reqtype_free_fn(oct: octeon_dev, REQTYPE_RESP_NET_SG, |
3374 | fn: free_netsgbuf_with_resp); |
3375 | |
3376 | for (i = 0; i < octeon_dev->ifcount; i++) { |
3377 | resp_size = sizeof(struct liquidio_if_cfg_resp); |
3378 | data_size = sizeof(struct lio_version); |
3379 | sc = (struct octeon_soft_command *) |
3380 | octeon_alloc_soft_command(oct: octeon_dev, datasize: data_size, |
3381 | rdatasize: resp_size, ctxsize: 0); |
3382 | resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; |
3383 | vdata = (struct lio_version *)sc->virtdptr; |
3384 | |
3385 | *((u64 *)vdata) = 0; |
3386 | vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); |
3387 | vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); |
3388 | vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); |
3389 | |
3390 | if (OCTEON_CN23XX_PF(octeon_dev)) { |
3391 | num_iqueues = octeon_dev->sriov_info.num_pf_rings; |
3392 | num_oqueues = octeon_dev->sriov_info.num_pf_rings; |
3393 | base_queue = octeon_dev->sriov_info.pf_srn; |
3394 | |
3395 | gmx_port_id = octeon_dev->pf_num; |
3396 | ifidx_or_pfnum = octeon_dev->pf_num; |
3397 | } else { |
3398 | num_iqueues = CFG_GET_NUM_TXQS_NIC_IF( |
3399 | octeon_get_conf(octeon_dev), i); |
3400 | num_oqueues = CFG_GET_NUM_RXQS_NIC_IF( |
3401 | octeon_get_conf(octeon_dev), i); |
3402 | base_queue = CFG_GET_BASE_QUE_NIC_IF( |
3403 | octeon_get_conf(octeon_dev), i); |
3404 | gmx_port_id = CFG_GET_GMXID_NIC_IF( |
3405 | octeon_get_conf(octeon_dev), i); |
3406 | ifidx_or_pfnum = i; |
3407 | } |
3408 | |
3409 | dev_dbg(&octeon_dev->pci_dev->dev, |
3410 | "requesting config for interface %d, iqs %d, oqs %d\n" , |
3411 | ifidx_or_pfnum, num_iqueues, num_oqueues); |
3412 | |
3413 | if_cfg.u64 = 0; |
3414 | if_cfg.s.num_iqueues = num_iqueues; |
3415 | if_cfg.s.num_oqueues = num_oqueues; |
3416 | if_cfg.s.base_queue = base_queue; |
3417 | if_cfg.s.gmx_port_id = gmx_port_id; |
3418 | |
3419 | sc->iq_no = 0; |
3420 | |
3421 | octeon_prepare_soft_command(oct: octeon_dev, sc, OPCODE_NIC, |
3422 | OPCODE_NIC_IF_CFG, irh_ossp: 0, |
3423 | ossp0: if_cfg.u64, ossp1: 0); |
3424 | |
3425 | init_completion(x: &sc->complete); |
3426 | sc->sc_status = OCTEON_REQUEST_PENDING; |
3427 | |
3428 | retval = octeon_send_soft_command(oct: octeon_dev, sc); |
3429 | if (retval == IQ_SEND_FAILED) { |
3430 | dev_err(&octeon_dev->pci_dev->dev, |
3431 | "iq/oq config failed status: %x\n" , |
3432 | retval); |
3433 | /* Soft instr is freed by driver in case of failure. */ |
3434 | octeon_free_soft_command(oct: octeon_dev, sc); |
3435 | return(-EIO); |
3436 | } |
3437 | |
3438 | /* Sleep on a wait queue till the cond flag indicates that the |
3439 | * response arrived or timed-out. |
3440 | */ |
3441 | retval = wait_for_sc_completion_timeout(oct_dev: octeon_dev, sc, timeout: 0); |
3442 | if (retval) |
3443 | return retval; |
3444 | |
3445 | retval = resp->status; |
3446 | if (retval) { |
3447 | dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n" ); |
3448 | WRITE_ONCE(sc->caller_is_done, true); |
3449 | goto setup_nic_dev_done; |
3450 | } |
3451 | snprintf(buf: octeon_dev->fw_info.liquidio_firmware_version, |
3452 | size: 32, fmt: "%s" , |
3453 | resp->cfg_info.liquidio_firmware_version); |
3454 | |
3455 | /* Verify f/w version (in case of 'auto' loading from flash) */ |
3456 | fw_ver = octeon_dev->fw_info.liquidio_firmware_version; |
3457 | if (memcmp(LIQUIDIO_BASE_VERSION, |
3458 | q: fw_ver, |
3459 | strlen(LIQUIDIO_BASE_VERSION))) { |
3460 | dev_err(&octeon_dev->pci_dev->dev, |
3461 | "Unmatched firmware version. Expected %s.x, got %s.\n" , |
3462 | LIQUIDIO_BASE_VERSION, fw_ver); |
3463 | WRITE_ONCE(sc->caller_is_done, true); |
3464 | goto setup_nic_dev_done; |
3465 | } else if (atomic_read(v: octeon_dev->adapter_fw_state) == |
3466 | FW_IS_PRELOADED) { |
3467 | dev_info(&octeon_dev->pci_dev->dev, |
3468 | "Using auto-loaded firmware version %s.\n" , |
3469 | fw_ver); |
3470 | } |
3471 | |
3472 | /* extract micro version field; point past '<maj>.<min>.' */ |
3473 | micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; |
3474 | if (kstrtoul(s: micro_ver, base: 10, res: µ) != 0) |
3475 | micro = 0; |
3476 | octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; |
3477 | octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; |
3478 | octeon_dev->fw_info.ver.rev = micro; |
3479 | |
3480 | octeon_swap_8B_data(data: (u64 *)(&resp->cfg_info), |
3481 | blocks: (sizeof(struct liquidio_if_cfg_info)) >> 3); |
3482 | |
3483 | num_iqueues = hweight64(resp->cfg_info.iqmask); |
3484 | num_oqueues = hweight64(resp->cfg_info.oqmask); |
3485 | |
3486 | if (!(num_iqueues) || !(num_oqueues)) { |
3487 | dev_err(&octeon_dev->pci_dev->dev, |
3488 | "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n" , |
3489 | resp->cfg_info.iqmask, |
3490 | resp->cfg_info.oqmask); |
3491 | WRITE_ONCE(sc->caller_is_done, true); |
3492 | goto setup_nic_dev_done; |
3493 | } |
3494 | |
3495 | if (OCTEON_CN6XXX(octeon_dev)) { |
3496 | max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, |
3497 | cn6xxx)); |
3498 | } else if (OCTEON_CN23XX_PF(octeon_dev)) { |
3499 | max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, |
3500 | cn23xx_pf)); |
3501 | } |
3502 | |
3503 | dev_dbg(&octeon_dev->pci_dev->dev, |
3504 | "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n" , |
3505 | i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, |
3506 | num_iqueues, num_oqueues, max_num_queues); |
3507 | netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); |
3508 | |
3509 | if (!netdev) { |
3510 | dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n" ); |
3511 | WRITE_ONCE(sc->caller_is_done, true); |
3512 | goto setup_nic_dev_done; |
3513 | } |
3514 | |
3515 | SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); |
3516 | |
3517 | /* Associate the routines that will handle different |
3518 | * netdev tasks. |
3519 | */ |
3520 | netdev->netdev_ops = &lionetdevops; |
3521 | |
3522 | retval = netif_set_real_num_rx_queues(dev: netdev, rxq: num_oqueues); |
3523 | if (retval) { |
3524 | dev_err(&octeon_dev->pci_dev->dev, |
3525 | "setting real number rx failed\n" ); |
3526 | WRITE_ONCE(sc->caller_is_done, true); |
3527 | goto setup_nic_dev_free; |
3528 | } |
3529 | |
3530 | retval = netif_set_real_num_tx_queues(dev: netdev, txq: num_iqueues); |
3531 | if (retval) { |
3532 | dev_err(&octeon_dev->pci_dev->dev, |
3533 | "setting real number tx failed\n" ); |
3534 | WRITE_ONCE(sc->caller_is_done, true); |
3535 | goto setup_nic_dev_free; |
3536 | } |
3537 | |
3538 | lio = GET_LIO(netdev); |
3539 | |
3540 | memset(lio, 0, sizeof(struct lio)); |
3541 | |
3542 | lio->ifidx = ifidx_or_pfnum; |
3543 | |
3544 | props = &octeon_dev->props[i]; |
3545 | props->gmxport = resp->cfg_info.linfo.gmxport; |
3546 | props->netdev = netdev; |
3547 | |
3548 | lio->linfo.num_rxpciq = num_oqueues; |
3549 | lio->linfo.num_txpciq = num_iqueues; |
3550 | for (j = 0; j < num_oqueues; j++) { |
3551 | lio->linfo.rxpciq[j].u64 = |
3552 | resp->cfg_info.linfo.rxpciq[j].u64; |
3553 | } |
3554 | for (j = 0; j < num_iqueues; j++) { |
3555 | lio->linfo.txpciq[j].u64 = |
3556 | resp->cfg_info.linfo.txpciq[j].u64; |
3557 | } |
3558 | lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; |
3559 | lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; |
3560 | lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; |
3561 | |
3562 | WRITE_ONCE(sc->caller_is_done, true); |
3563 | |
3564 | lio->msg_enable = netif_msg_init(debug_value: debug, DEFAULT_MSG_ENABLE); |
3565 | |
3566 | if (OCTEON_CN23XX_PF(octeon_dev) || |
3567 | OCTEON_CN6XXX(octeon_dev)) { |
3568 | lio->dev_capability = NETIF_F_HIGHDMA |
3569 | | NETIF_F_IP_CSUM |
3570 | | NETIF_F_IPV6_CSUM |
3571 | | NETIF_F_SG | NETIF_F_RXCSUM |
3572 | | NETIF_F_GRO |
3573 | | NETIF_F_TSO | NETIF_F_TSO6 |
3574 | | NETIF_F_LRO; |
3575 | } |
3576 | netif_set_tso_max_size(dev: netdev, OCTNIC_GSO_MAX_SIZE); |
3577 | |
3578 | /* Copy of transmit encapsulation capabilities: |
3579 | * TSO, TSO6, Checksums for this device |
3580 | */ |
3581 | lio->enc_dev_capability = NETIF_F_IP_CSUM |
3582 | | NETIF_F_IPV6_CSUM |
3583 | | NETIF_F_GSO_UDP_TUNNEL |
3584 | | NETIF_F_HW_CSUM | NETIF_F_SG |
3585 | | NETIF_F_RXCSUM |
3586 | | NETIF_F_TSO | NETIF_F_TSO6 |
3587 | | NETIF_F_LRO; |
3588 | |
3589 | netdev->hw_enc_features = (lio->enc_dev_capability & |
3590 | ~NETIF_F_LRO); |
3591 | |
3592 | netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; |
3593 | |
3594 | lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; |
3595 | |
3596 | netdev->vlan_features = lio->dev_capability; |
3597 | /* Add any unchangeable hw features */ |
3598 | lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | |
3599 | NETIF_F_HW_VLAN_CTAG_RX | |
3600 | NETIF_F_HW_VLAN_CTAG_TX; |
3601 | |
3602 | netdev->features = (lio->dev_capability & ~NETIF_F_LRO); |
3603 | |
3604 | netdev->hw_features = lio->dev_capability; |
3605 | /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ |
3606 | netdev->hw_features = netdev->hw_features & |
3607 | ~NETIF_F_HW_VLAN_CTAG_RX; |
3608 | |
3609 | /* MTU range: 68 - 16000 */ |
3610 | netdev->min_mtu = LIO_MIN_MTU_SIZE; |
3611 | netdev->max_mtu = LIO_MAX_MTU_SIZE; |
3612 | |
3613 | /* Point to the properties for octeon device to which this |
3614 | * interface belongs. |
3615 | */ |
3616 | lio->oct_dev = octeon_dev; |
3617 | lio->octprops = props; |
3618 | lio->netdev = netdev; |
3619 | |
3620 | dev_dbg(&octeon_dev->pci_dev->dev, |
3621 | "if%d gmx: %d hw_addr: 0x%llx\n" , i, |
3622 | lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); |
3623 | |
3624 | for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { |
3625 | u8 vfmac[ETH_ALEN]; |
3626 | |
3627 | eth_random_addr(addr: vfmac); |
3628 | if (__liquidio_set_vf_mac(netdev, vfidx: j, mac: vfmac, is_admin_assigned: false)) { |
3629 | dev_err(&octeon_dev->pci_dev->dev, |
3630 | "Error setting VF%d MAC address\n" , |
3631 | j); |
3632 | goto setup_nic_dev_free; |
3633 | } |
3634 | } |
3635 | |
3636 | /* 64-bit swap required on LE machines */ |
3637 | octeon_swap_8B_data(data: &lio->linfo.hw_addr, blocks: 1); |
3638 | for (j = 0; j < 6; j++) |
3639 | mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); |
3640 | |
3641 | /* Copy MAC Address to OS network device structure */ |
3642 | |
3643 | eth_hw_addr_set(dev: netdev, addr: mac); |
3644 | |
3645 | /* By default all interfaces on a single Octeon uses the same |
3646 | * tx and rx queues |
3647 | */ |
3648 | lio->txq = lio->linfo.txpciq[0].s.q_no; |
3649 | lio->rxq = lio->linfo.rxpciq[0].s.q_no; |
3650 | if (liquidio_setup_io_queues(octeon_dev, ifidx: i, |
3651 | num_iqs: lio->linfo.num_txpciq, |
3652 | num_oqs: lio->linfo.num_rxpciq)) { |
3653 | dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n" ); |
3654 | goto setup_nic_dev_free; |
3655 | } |
3656 | |
3657 | ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); |
3658 | |
3659 | lio->tx_qsize = octeon_get_tx_qsize(oct: octeon_dev, q_no: lio->txq); |
3660 | lio->rx_qsize = octeon_get_rx_qsize(oct: octeon_dev, q_no: lio->rxq); |
3661 | |
3662 | if (lio_setup_glists(oct: octeon_dev, lio, num_qs: num_iqueues)) { |
3663 | dev_err(&octeon_dev->pci_dev->dev, |
3664 | "Gather list allocation failed\n" ); |
3665 | goto setup_nic_dev_free; |
3666 | } |
3667 | |
3668 | /* Register ethtool support */ |
3669 | liquidio_set_ethtool_ops(netdev); |
3670 | if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID) |
3671 | octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; |
3672 | else |
3673 | octeon_dev->priv_flags = 0x0; |
3674 | |
3675 | if (netdev->features & NETIF_F_LRO) |
3676 | liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, |
3677 | OCTNIC_LROIPV4 | OCTNIC_LROIPV6); |
3678 | |
3679 | liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, |
3680 | OCTNET_CMD_VLAN_FILTER_ENABLE); |
3681 | |
3682 | if ((debug != -1) && (debug & NETIF_MSG_HW)) |
3683 | liquidio_set_feature(netdev, |
3684 | OCTNET_CMD_VERBOSE_ENABLE, param1: 0); |
3685 | |
3686 | if (setup_link_status_change_wq(netdev)) |
3687 | goto setup_nic_dev_free; |
3688 | |
3689 | if ((octeon_dev->fw_info.app_cap_flags & |
3690 | LIQUIDIO_TIME_SYNC_CAP) && |
3691 | setup_sync_octeon_time_wq(netdev)) |
3692 | goto setup_nic_dev_free; |
3693 | |
3694 | if (setup_rx_oom_poll_fn(netdev)) |
3695 | goto setup_nic_dev_free; |
3696 | |
3697 | /* Register the network device with the OS */ |
3698 | if (register_netdev(dev: netdev)) { |
3699 | dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n" ); |
3700 | goto setup_nic_dev_free; |
3701 | } |
3702 | |
3703 | dev_dbg(&octeon_dev->pci_dev->dev, |
3704 | "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n" , |
3705 | i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); |
3706 | netif_carrier_off(dev: netdev); |
3707 | lio->link_changes++; |
3708 | |
3709 | ifstate_set(lio, LIO_IFSTATE_REGISTERED); |
3710 | |
3711 | /* Sending command to firmware to enable Rx checksum offload |
3712 | * by default at the time of setup of Liquidio driver for |
3713 | * this device |
3714 | */ |
3715 | liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, |
3716 | OCTNET_CMD_RXCSUM_ENABLE); |
3717 | liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, |
3718 | OCTNET_CMD_TXCSUM_ENABLE); |
3719 | |
3720 | dev_dbg(&octeon_dev->pci_dev->dev, |
3721 | "NIC ifidx:%d Setup successful\n" , i); |
3722 | |
3723 | if (octeon_dev->subsystem_id == |
3724 | OCTEON_CN2350_25GB_SUBSYS_ID || |
3725 | octeon_dev->subsystem_id == |
3726 | OCTEON_CN2360_25GB_SUBSYS_ID) { |
3727 | cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, |
3728 | octeon_dev->fw_info.ver.min, |
3729 | octeon_dev->fw_info.ver.rev); |
3730 | |
3731 | /* speed control unsupported in f/w older than 1.7.2 */ |
3732 | if (cur_ver < OCT_FW_VER(1, 7, 2)) { |
3733 | dev_info(&octeon_dev->pci_dev->dev, |
3734 | "speed setting not supported by f/w." ); |
3735 | octeon_dev->speed_setting = 25; |
3736 | octeon_dev->no_speed_setting = 1; |
3737 | } else { |
3738 | liquidio_get_speed(lio); |
3739 | } |
3740 | |
3741 | if (octeon_dev->speed_setting == 0) { |
3742 | octeon_dev->speed_setting = 25; |
3743 | octeon_dev->no_speed_setting = 1; |
3744 | } |
3745 | } else { |
3746 | octeon_dev->no_speed_setting = 1; |
3747 | octeon_dev->speed_setting = 10; |
3748 | } |
3749 | octeon_dev->speed_boot = octeon_dev->speed_setting; |
3750 | |
3751 | /* don't read FEC setting if unsupported by f/w (see above) */ |
3752 | if (octeon_dev->speed_boot == 25 && |
3753 | !octeon_dev->no_speed_setting) { |
3754 | liquidio_get_fec(lio); |
3755 | octeon_dev->props[lio->ifidx].fec_boot = |
3756 | octeon_dev->props[lio->ifidx].fec; |
3757 | } |
3758 | } |
3759 | |
3760 | device_lock(dev: &octeon_dev->pci_dev->dev); |
3761 | devlink = devlink_alloc(ops: &liquidio_devlink_ops, |
3762 | priv_size: sizeof(struct lio_devlink_priv), |
3763 | dev: &octeon_dev->pci_dev->dev); |
3764 | if (!devlink) { |
3765 | device_unlock(dev: &octeon_dev->pci_dev->dev); |
3766 | dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n" ); |
3767 | goto setup_nic_dev_free; |
3768 | } |
3769 | |
3770 | lio_devlink = devlink_priv(devlink); |
3771 | lio_devlink->oct = octeon_dev; |
3772 | |
3773 | octeon_dev->devlink = devlink; |
3774 | octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; |
3775 | devlink_register(devlink); |
3776 | device_unlock(dev: &octeon_dev->pci_dev->dev); |
3777 | |
3778 | return 0; |
3779 | |
3780 | setup_nic_dev_free: |
3781 | |
3782 | while (i--) { |
3783 | dev_err(&octeon_dev->pci_dev->dev, |
3784 | "NIC ifidx:%d Setup failed\n" , i); |
3785 | liquidio_destroy_nic_device(oct: octeon_dev, ifidx: i); |
3786 | } |
3787 | |
3788 | setup_nic_dev_done: |
3789 | |
3790 | return -ENODEV; |
3791 | } |
3792 | |
3793 | #ifdef CONFIG_PCI_IOV |
3794 | static int octeon_enable_sriov(struct octeon_device *oct) |
3795 | { |
3796 | unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; |
3797 | struct pci_dev *vfdev; |
3798 | int err; |
3799 | u32 u; |
3800 | |
3801 | if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { |
3802 | err = pci_enable_sriov(dev: oct->pci_dev, |
3803 | nr_virtfn: oct->sriov_info.num_vfs_alloced); |
3804 | if (err) { |
3805 | dev_err(&oct->pci_dev->dev, |
3806 | "OCTEON: Failed to enable PCI sriov: %d\n" , |
3807 | err); |
3808 | oct->sriov_info.num_vfs_alloced = 0; |
3809 | return err; |
3810 | } |
3811 | oct->sriov_info.sriov_enabled = 1; |
3812 | |
3813 | /* init lookup table that maps DPI ring number to VF pci_dev |
3814 | * struct pointer |
3815 | */ |
3816 | u = 0; |
3817 | vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, |
3818 | OCTEON_CN23XX_VF_VID, NULL); |
3819 | while (vfdev) { |
3820 | if (vfdev->is_virtfn && |
3821 | (vfdev->physfn == oct->pci_dev)) { |
3822 | oct->sriov_info.dpiring_to_vfpcidev_lut[u] = |
3823 | vfdev; |
3824 | u += oct->sriov_info.rings_per_vf; |
3825 | } |
3826 | vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, |
3827 | OCTEON_CN23XX_VF_VID, from: vfdev); |
3828 | } |
3829 | } |
3830 | |
3831 | return num_vfs_alloced; |
3832 | } |
3833 | |
3834 | static int lio_pci_sriov_disable(struct octeon_device *oct) |
3835 | { |
3836 | int u; |
3837 | |
3838 | if (pci_vfs_assigned(dev: oct->pci_dev)) { |
3839 | dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n" ); |
3840 | return -EPERM; |
3841 | } |
3842 | |
3843 | pci_disable_sriov(dev: oct->pci_dev); |
3844 | |
3845 | u = 0; |
3846 | while (u < MAX_POSSIBLE_VFS) { |
3847 | oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; |
3848 | u += oct->sriov_info.rings_per_vf; |
3849 | } |
3850 | |
3851 | oct->sriov_info.num_vfs_alloced = 0; |
3852 | dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n" , |
3853 | oct->pf_num); |
3854 | |
3855 | return 0; |
3856 | } |
3857 | |
3858 | static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) |
3859 | { |
3860 | struct octeon_device *oct = pci_get_drvdata(pdev: dev); |
3861 | int ret = 0; |
3862 | |
3863 | if ((num_vfs == oct->sriov_info.num_vfs_alloced) && |
3864 | (oct->sriov_info.sriov_enabled)) { |
3865 | dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n" , |
3866 | oct->pf_num, num_vfs); |
3867 | return 0; |
3868 | } |
3869 | |
3870 | if (!num_vfs) { |
3871 | lio_vf_rep_destroy(oct); |
3872 | ret = lio_pci_sriov_disable(oct); |
3873 | } else if (num_vfs > oct->sriov_info.max_vfs) { |
3874 | dev_err(&oct->pci_dev->dev, |
3875 | "OCTEON: Max allowed VFs:%d user requested:%d" , |
3876 | oct->sriov_info.max_vfs, num_vfs); |
3877 | ret = -EPERM; |
3878 | } else { |
3879 | oct->sriov_info.num_vfs_alloced = num_vfs; |
3880 | ret = octeon_enable_sriov(oct); |
3881 | dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n" , |
3882 | oct->pf_num, num_vfs); |
3883 | ret = lio_vf_rep_create(oct); |
3884 | if (ret) |
3885 | dev_info(&oct->pci_dev->dev, |
3886 | "vf representor create failed" ); |
3887 | } |
3888 | |
3889 | return ret; |
3890 | } |
3891 | #endif |
3892 | |
3893 | /** |
3894 | * liquidio_init_nic_module - initialize the NIC |
3895 | * @oct: octeon device |
3896 | * |
3897 | * This initialization routine is called once the Octeon device application is |
3898 | * up and running |
3899 | */ |
3900 | static int liquidio_init_nic_module(struct octeon_device *oct) |
3901 | { |
3902 | int i, retval = 0; |
3903 | int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); |
3904 | |
3905 | dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n" ); |
3906 | |
3907 | /* only default iq and oq were initialized |
3908 | * initialize the rest as well |
3909 | */ |
3910 | /* run port_config command for each port */ |
3911 | oct->ifcount = num_nic_ports; |
3912 | |
3913 | memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); |
3914 | |
3915 | for (i = 0; i < MAX_OCTEON_LINKS; i++) |
3916 | oct->props[i].gmxport = -1; |
3917 | |
3918 | retval = setup_nic_devices(oct); |
3919 | if (retval) { |
3920 | dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n" ); |
3921 | goto octnet_init_failure; |
3922 | } |
3923 | |
3924 | /* Call vf_rep_modinit if the firmware is switchdev capable |
3925 | * and do it from the first liquidio function probed. |
3926 | */ |
3927 | if (!oct->octeon_id && |
3928 | oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { |
3929 | retval = lio_vf_rep_modinit(); |
3930 | if (retval) { |
3931 | liquidio_stop_nic_module(oct); |
3932 | goto octnet_init_failure; |
3933 | } |
3934 | } |
3935 | |
3936 | liquidio_ptp_init(oct); |
3937 | |
3938 | dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n" ); |
3939 | |
3940 | return retval; |
3941 | |
3942 | octnet_init_failure: |
3943 | |
3944 | oct->ifcount = 0; |
3945 | |
3946 | return retval; |
3947 | } |
3948 | |
3949 | /** |
3950 | * nic_starter - finish init |
3951 | * @work: work struct work_struct |
3952 | * |
3953 | * starter callback that invokes the remaining initialization work after the NIC is up and running. |
3954 | */ |
3955 | static void nic_starter(struct work_struct *work) |
3956 | { |
3957 | struct octeon_device *oct; |
3958 | struct cavium_wk *wk = (struct cavium_wk *)work; |
3959 | |
3960 | oct = (struct octeon_device *)wk->ctxptr; |
3961 | |
3962 | if (atomic_read(v: &oct->status) == OCT_DEV_RUNNING) |
3963 | return; |
3964 | |
3965 | /* If the status of the device is CORE_OK, the core |
3966 | * application has reported its application type. Call |
3967 | * any registered handlers now and move to the RUNNING |
3968 | * state. |
3969 | */ |
3970 | if (atomic_read(v: &oct->status) != OCT_DEV_CORE_OK) { |
3971 | schedule_delayed_work(dwork: &oct->nic_poll_work.work, |
3972 | LIQUIDIO_STARTER_POLL_INTERVAL_MS); |
3973 | return; |
3974 | } |
3975 | |
3976 | atomic_set(v: &oct->status, OCT_DEV_RUNNING); |
3977 | |
3978 | if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { |
3979 | dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n" ); |
3980 | |
3981 | if (liquidio_init_nic_module(oct)) |
3982 | dev_err(&oct->pci_dev->dev, "NIC initialization failed\n" ); |
3983 | else |
3984 | handshake[oct->octeon_id].started_ok = 1; |
3985 | } else { |
3986 | dev_err(&oct->pci_dev->dev, |
3987 | "Unexpected application running on NIC (%d). Check firmware.\n" , |
3988 | oct->app_mode); |
3989 | } |
3990 | |
3991 | complete(&handshake[oct->octeon_id].started); |
3992 | } |
3993 | |
3994 | static int |
3995 | octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) |
3996 | { |
3997 | struct octeon_device *oct = (struct octeon_device *)buf; |
3998 | struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; |
3999 | int i, notice, vf_idx; |
4000 | bool cores_crashed; |
4001 | u64 *data, vf_num; |
4002 | |
4003 | notice = recv_pkt->rh.r.ossp; |
4004 | data = (u64 *)(get_rbd(skb: recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE); |
4005 | |
4006 | /* the first 64-bit word of data is the vf_num */ |
4007 | vf_num = data[0]; |
4008 | octeon_swap_8B_data(data: &vf_num, blocks: 1); |
4009 | vf_idx = (int)vf_num - 1; |
4010 | |
4011 | cores_crashed = READ_ONCE(oct->cores_crashed); |
4012 | |
4013 | if (notice == VF_DRV_LOADED) { |
4014 | if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { |
4015 | oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); |
4016 | dev_info(&oct->pci_dev->dev, |
4017 | "driver for VF%d was loaded\n" , vf_idx); |
4018 | if (!cores_crashed) |
4019 | try_module_get(THIS_MODULE); |
4020 | } |
4021 | } else if (notice == VF_DRV_REMOVED) { |
4022 | if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { |
4023 | oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); |
4024 | dev_info(&oct->pci_dev->dev, |
4025 | "driver for VF%d was removed\n" , vf_idx); |
4026 | if (!cores_crashed) |
4027 | module_put(THIS_MODULE); |
4028 | } |
4029 | } else if (notice == VF_DRV_MACADDR_CHANGED) { |
4030 | u8 *b = (u8 *)&data[1]; |
4031 | |
4032 | oct->sriov_info.vf_macaddr[vf_idx] = data[1]; |
4033 | dev_info(&oct->pci_dev->dev, |
4034 | "VF driver changed VF%d's MAC address to %pM\n" , |
4035 | vf_idx, b + 2); |
4036 | } |
4037 | |
4038 | for (i = 0; i < recv_pkt->buffer_count; i++) |
4039 | recv_buffer_free(buffer: recv_pkt->buffer_ptr[i]); |
4040 | octeon_free_recv_info(recv_info); |
4041 | |
4042 | return 0; |
4043 | } |
4044 | |
4045 | /** |
4046 | * octeon_device_init - Device initialization for each Octeon device that is probed |
4047 | * @octeon_dev: octeon device |
4048 | */ |
4049 | static int octeon_device_init(struct octeon_device *octeon_dev) |
4050 | { |
4051 | int j, ret; |
4052 | char bootcmd[] = "\n" ; |
4053 | char *dbg_enb = NULL; |
4054 | enum lio_fw_state fw_state; |
4055 | struct octeon_device_priv *oct_priv = octeon_dev->priv; |
4056 | atomic_set(v: &octeon_dev->status, OCT_DEV_BEGIN_STATE); |
4057 | |
4058 | /* Enable access to the octeon device and make its DMA capability |
4059 | * known to the OS. |
4060 | */ |
4061 | if (octeon_pci_os_setup(oct: octeon_dev)) |
4062 | return 1; |
4063 | |
4064 | atomic_set(v: &octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); |
4065 | |
4066 | /* Identify the Octeon type and map the BAR address space. */ |
4067 | if (octeon_chip_specific_setup(oct: octeon_dev)) { |
4068 | dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n" ); |
4069 | return 1; |
4070 | } |
4071 | |
4072 | atomic_set(v: &octeon_dev->status, OCT_DEV_PCI_MAP_DONE); |
4073 | |
4074 | /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', |
4075 | * since that is what is required for the reference to be removed |
4076 | * during de-initialization (see 'octeon_destroy_resources'). |
4077 | */ |
4078 | octeon_register_device(oct: octeon_dev, bus: octeon_dev->pci_dev->bus->number, |
4079 | PCI_SLOT(octeon_dev->pci_dev->devfn), |
4080 | PCI_FUNC(octeon_dev->pci_dev->devfn), |
4081 | is_pf: true); |
4082 | |
4083 | octeon_dev->app_mode = CVM_DRV_INVALID_APP; |
4084 | |
4085 | /* CN23XX supports preloaded firmware if the following is true: |
4086 | * |
4087 | * The adapter indicates that firmware is currently running AND |
4088 | * 'fw_type' is 'auto'. |
4089 | * |
4090 | * (default state is NEEDS_TO_BE_LOADED, override it if appropriate). |
4091 | */ |
4092 | if (OCTEON_CN23XX_PF(octeon_dev) && |
4093 | cn23xx_fw_loaded(oct: octeon_dev) && fw_type_is_auto()) { |
4094 | atomic_cmpxchg(v: octeon_dev->adapter_fw_state, |
4095 | old: FW_NEEDS_TO_BE_LOADED, new: FW_IS_PRELOADED); |
4096 | } |
4097 | |
4098 | /* If loading firmware, only first device of adapter needs to do so. */ |
4099 | fw_state = atomic_cmpxchg(v: octeon_dev->adapter_fw_state, |
4100 | old: FW_NEEDS_TO_BE_LOADED, |
4101 | new: FW_IS_BEING_LOADED); |
4102 | |
4103 | /* Here, [local variable] 'fw_state' is set to one of: |
4104 | * |
4105 | * FW_IS_PRELOADED: No firmware is to be loaded (see above) |
4106 | * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load |
4107 | * firmware to the adapter. |
4108 | * FW_IS_BEING_LOADED: The driver's second instance will not load |
4109 | * firmware to the adapter. |
4110 | */ |
4111 | |
4112 | /* Prior to f/w load, perform a soft reset of the Octeon device; |
4113 | * if error resetting, return w/error. |
4114 | */ |
4115 | if (fw_state == FW_NEEDS_TO_BE_LOADED) |
4116 | if (octeon_dev->fn_list.soft_reset(octeon_dev)) |
4117 | return 1; |
4118 | |
4119 | /* Initialize the dispatch mechanism used to push packets arriving on |
4120 | * Octeon Output queues. |
4121 | */ |
4122 | if (octeon_init_dispatch_list(octeon_dev)) |
4123 | return 1; |
4124 | |
4125 | octeon_register_dispatch_fn(oct: octeon_dev, OPCODE_NIC, |
4126 | OPCODE_NIC_CORE_DRV_ACTIVE, |
4127 | fn: octeon_core_drv_init, |
4128 | fn_arg: octeon_dev); |
4129 | |
4130 | octeon_register_dispatch_fn(oct: octeon_dev, OPCODE_NIC, |
4131 | OPCODE_NIC_VF_DRV_NOTICE, |
4132 | fn: octeon_recv_vf_drv_notice, fn_arg: octeon_dev); |
4133 | INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); |
4134 | octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; |
4135 | schedule_delayed_work(dwork: &octeon_dev->nic_poll_work.work, |
4136 | LIQUIDIO_STARTER_POLL_INTERVAL_MS); |
4137 | |
4138 | atomic_set(v: &octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); |
4139 | |
4140 | if (octeon_set_io_queues_off(oct: octeon_dev)) { |
4141 | dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n" ); |
4142 | return 1; |
4143 | } |
4144 | |
4145 | if (OCTEON_CN23XX_PF(octeon_dev)) { |
4146 | ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); |
4147 | if (ret) { |
4148 | dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n" ); |
4149 | return ret; |
4150 | } |
4151 | } |
4152 | |
4153 | /* Initialize soft command buffer pool |
4154 | */ |
4155 | if (octeon_setup_sc_buffer_pool(oct: octeon_dev)) { |
4156 | dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n" ); |
4157 | return 1; |
4158 | } |
4159 | atomic_set(v: &octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); |
4160 | |
4161 | /* Setup the data structures that manage this Octeon's Input queues. */ |
4162 | if (octeon_setup_instr_queues(oct: octeon_dev)) { |
4163 | dev_err(&octeon_dev->pci_dev->dev, |
4164 | "instruction queue initialization failed\n" ); |
4165 | return 1; |
4166 | } |
4167 | atomic_set(v: &octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); |
4168 | |
4169 | /* Initialize lists to manage the requests of different types that |
4170 | * arrive from user & kernel applications for this octeon device. |
4171 | */ |
4172 | if (octeon_setup_response_list(octeon_dev)) { |
4173 | dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n" ); |
4174 | return 1; |
4175 | } |
4176 | atomic_set(v: &octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); |
4177 | |
4178 | if (octeon_setup_output_queues(oct: octeon_dev)) { |
4179 | dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n" ); |
4180 | return 1; |
4181 | } |
4182 | |
4183 | atomic_set(v: &octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); |
4184 | |
4185 | if (OCTEON_CN23XX_PF(octeon_dev)) { |
4186 | if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { |
4187 | dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n" ); |
4188 | return 1; |
4189 | } |
4190 | atomic_set(v: &octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); |
4191 | |
4192 | if (octeon_allocate_ioq_vector |
4193 | (oct: octeon_dev, |
4194 | num_ioqs: octeon_dev->sriov_info.num_pf_rings)) { |
4195 | dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n" ); |
4196 | return 1; |
4197 | } |
4198 | atomic_set(v: &octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); |
4199 | |
4200 | } else { |
4201 | /* The input and output queue registers were setup earlier (the |
4202 | * queues were not enabled). Any additional registers |
4203 | * that need to be programmed should be done now. |
4204 | */ |
4205 | ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); |
4206 | if (ret) { |
4207 | dev_err(&octeon_dev->pci_dev->dev, |
4208 | "Failed to configure device registers\n" ); |
4209 | return ret; |
4210 | } |
4211 | } |
4212 | |
4213 | /* Initialize the tasklet that handles output queue packet processing.*/ |
4214 | dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n" ); |
4215 | tasklet_setup(t: &oct_priv->droq_tasklet, callback: octeon_droq_bh); |
4216 | |
4217 | /* Setup the interrupt handler and record the INT SUM register address |
4218 | */ |
4219 | if (octeon_setup_interrupt(oct: octeon_dev, |
4220 | num_ioqs: octeon_dev->sriov_info.num_pf_rings)) |
4221 | return 1; |
4222 | |
4223 | /* Enable Octeon device interrupts */ |
4224 | octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); |
4225 | |
4226 | atomic_set(v: &octeon_dev->status, OCT_DEV_INTR_SET_DONE); |
4227 | |
4228 | /* Send Credit for Octeon Output queues. Credits are always sent BEFORE |
4229 | * the output queue is enabled. |
4230 | * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in |
4231 | * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. |
4232 | * Otherwise, it is possible that the DRV_ACTIVE message will be sent |
4233 | * before any credits have been issued, causing the ring to be reset |
4234 | * (and the f/w appear to never have started). |
4235 | */ |
4236 | for (j = 0; j < octeon_dev->num_oqs; j++) |
4237 | writel(val: octeon_dev->droq[j]->max_count, |
4238 | addr: octeon_dev->droq[j]->pkts_credit_reg); |
4239 | |
4240 | /* Enable the input and output queues for this Octeon device */ |
4241 | ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); |
4242 | if (ret) { |
4243 | dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues" ); |
4244 | return ret; |
4245 | } |
4246 | |
4247 | atomic_set(v: &octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); |
4248 | |
4249 | if (fw_state == FW_NEEDS_TO_BE_LOADED) { |
4250 | dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n" ); |
4251 | if (!ddr_timeout) { |
4252 | dev_info(&octeon_dev->pci_dev->dev, |
4253 | "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n" ); |
4254 | } |
4255 | |
4256 | schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); |
4257 | |
4258 | /* Wait for the octeon to initialize DDR after the soft-reset.*/ |
4259 | while (!ddr_timeout) { |
4260 | set_current_state(TASK_INTERRUPTIBLE); |
4261 | if (schedule_timeout(HZ / 10)) { |
4262 | /* user probably pressed Control-C */ |
4263 | return 1; |
4264 | } |
4265 | } |
4266 | ret = octeon_wait_for_ddr_init(oct: octeon_dev, timeout_in_ms: &ddr_timeout); |
4267 | if (ret) { |
4268 | dev_err(&octeon_dev->pci_dev->dev, |
4269 | "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n" , |
4270 | ret); |
4271 | return 1; |
4272 | } |
4273 | |
4274 | if (octeon_wait_for_bootloader(oct: octeon_dev, wait_time_hundredths: 1000)) { |
4275 | dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n" ); |
4276 | return 1; |
4277 | } |
4278 | |
4279 | /* Divert uboot to take commands from host instead. */ |
4280 | ret = octeon_console_send_cmd(oct: octeon_dev, cmd_str: bootcmd, wait_hundredths: 50); |
4281 | |
4282 | dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n" ); |
4283 | ret = octeon_init_consoles(oct: octeon_dev); |
4284 | if (ret) { |
4285 | dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n" ); |
4286 | return 1; |
4287 | } |
4288 | /* If console debug enabled, specify empty string to use default |
4289 | * enablement ELSE specify NULL string for 'disabled'. |
4290 | */ |
4291 | dbg_enb = octeon_console_debug_enabled(console: 0) ? "" : NULL; |
4292 | ret = octeon_add_console(oct: octeon_dev, console_num: 0, dbg_enb); |
4293 | if (ret) { |
4294 | dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n" ); |
4295 | return 1; |
4296 | } else if (octeon_console_debug_enabled(console: 0)) { |
4297 | /* If console was added AND we're logging console output |
4298 | * then set our console print function. |
4299 | */ |
4300 | octeon_dev->console[0].print = octeon_dbg_console_print; |
4301 | } |
4302 | |
4303 | atomic_set(v: &octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); |
4304 | |
4305 | dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n" ); |
4306 | ret = load_firmware(oct: octeon_dev); |
4307 | if (ret) { |
4308 | dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n" ); |
4309 | return 1; |
4310 | } |
4311 | |
4312 | atomic_set(v: octeon_dev->adapter_fw_state, i: FW_HAS_BEEN_LOADED); |
4313 | } |
4314 | |
4315 | handshake[octeon_dev->octeon_id].init_ok = 1; |
4316 | complete(&handshake[octeon_dev->octeon_id].init); |
4317 | |
4318 | atomic_set(v: &octeon_dev->status, OCT_DEV_HOST_OK); |
4319 | oct_priv->dev = octeon_dev; |
4320 | |
4321 | return 0; |
4322 | } |
4323 | |
4324 | /** |
4325 | * octeon_dbg_console_print - Debug console print function |
4326 | * @oct: octeon device |
4327 | * @console_num: console number |
4328 | * @prefix: first portion of line to display |
4329 | * @suffix: second portion of line to display |
4330 | * |
4331 | * The OCTEON debug console outputs entire lines (excluding '\n'). |
4332 | * Normally, the line will be passed in the 'prefix' parameter. |
4333 | * However, due to buffering, it is possible for a line to be split into two |
4334 | * parts, in which case they will be passed as the 'prefix' parameter and |
4335 | * 'suffix' parameter. |
4336 | */ |
4337 | static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, |
4338 | char *prefix, char *suffix) |
4339 | { |
4340 | if (prefix && suffix) |
4341 | dev_info(&oct->pci_dev->dev, "%u: %s%s\n" , console_num, prefix, |
4342 | suffix); |
4343 | else if (prefix) |
4344 | dev_info(&oct->pci_dev->dev, "%u: %s\n" , console_num, prefix); |
4345 | else if (suffix) |
4346 | dev_info(&oct->pci_dev->dev, "%u: %s\n" , console_num, suffix); |
4347 | |
4348 | return 0; |
4349 | } |
4350 | |
4351 | /** |
4352 | * liquidio_exit - Exits the module |
4353 | */ |
4354 | static void __exit liquidio_exit(void) |
4355 | { |
4356 | liquidio_deinit_pci(); |
4357 | |
4358 | pr_info("LiquidIO network module is now unloaded\n" ); |
4359 | } |
4360 | |
4361 | module_init(liquidio_init); |
4362 | module_exit(liquidio_exit); |
4363 | |