1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2013 - 2019 Intel Corporation. */ |
3 | |
4 | #include <linux/module.h> |
5 | #include <linux/interrupt.h> |
6 | |
7 | #include "fm10k.h" |
8 | |
9 | static const struct fm10k_info *fm10k_info_tbl[] = { |
10 | [fm10k_device_pf] = &fm10k_pf_info, |
11 | [fm10k_device_vf] = &fm10k_vf_info, |
12 | }; |
13 | |
14 | /* |
15 | * fm10k_pci_tbl - PCI Device ID Table |
16 | * |
17 | * Wildcard entries (PCI_ANY_ID) should come last |
18 | * Last entry must be all 0s |
19 | * |
20 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, |
21 | * Class, Class Mask, private data (not used) } |
22 | */ |
23 | static const struct pci_device_id fm10k_pci_tbl[] = { |
24 | { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf }, |
25 | { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_QDA2), fm10k_device_pf }, |
26 | { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_DA2), fm10k_device_pf }, |
27 | { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf }, |
28 | /* required last entry */ |
29 | { 0, } |
30 | }; |
31 | MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl); |
32 | |
33 | u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg) |
34 | { |
35 | struct fm10k_intfc *interface = hw->back; |
36 | u16 value = 0; |
37 | |
38 | if (FM10K_REMOVED(hw->hw_addr)) |
39 | return ~value; |
40 | |
41 | pci_read_config_word(dev: interface->pdev, where: reg, val: &value); |
42 | if (value == 0xFFFF) |
43 | fm10k_write_flush(hw); |
44 | |
45 | return value; |
46 | } |
47 | |
48 | u32 fm10k_read_reg(struct fm10k_hw *hw, int reg) |
49 | { |
50 | u32 __iomem *hw_addr = READ_ONCE(hw->hw_addr); |
51 | u32 value = 0; |
52 | |
53 | if (FM10K_REMOVED(hw_addr)) |
54 | return ~value; |
55 | |
56 | value = readl(addr: &hw_addr[reg]); |
57 | if (!(~value) && (!reg || !(~readl(addr: hw_addr)))) { |
58 | struct fm10k_intfc *interface = hw->back; |
59 | struct net_device *netdev = interface->netdev; |
60 | |
61 | hw->hw_addr = NULL; |
62 | netif_device_detach(dev: netdev); |
63 | netdev_err(dev: netdev, format: "PCIe link lost, device now detached\n" ); |
64 | } |
65 | |
66 | return value; |
67 | } |
68 | |
69 | static int fm10k_hw_ready(struct fm10k_intfc *interface) |
70 | { |
71 | struct fm10k_hw *hw = &interface->hw; |
72 | |
73 | fm10k_write_flush(hw); |
74 | |
75 | return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0; |
76 | } |
77 | |
78 | /** |
79 | * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task |
80 | * @interface: fm10k private interface structure |
81 | * |
82 | * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be |
83 | * started immediately, request that it be restarted when possible. |
84 | */ |
85 | void fm10k_macvlan_schedule(struct fm10k_intfc *interface) |
86 | { |
87 | /* Avoid processing the MAC/VLAN queue when the service task is |
88 | * disabled, or when we're resetting the device. |
89 | */ |
90 | if (!test_bit(__FM10K_MACVLAN_DISABLE, interface->state) && |
91 | !test_and_set_bit(nr: __FM10K_MACVLAN_SCHED, addr: interface->state)) { |
92 | clear_bit(nr: __FM10K_MACVLAN_REQUEST, addr: interface->state); |
93 | /* We delay the actual start of execution in order to allow |
94 | * multiple MAC/VLAN updates to accumulate before handling |
95 | * them, and to allow some time to let the mailbox drain |
96 | * between runs. |
97 | */ |
98 | queue_delayed_work(wq: fm10k_workqueue, |
99 | dwork: &interface->macvlan_task, delay: 10); |
100 | } else { |
101 | set_bit(nr: __FM10K_MACVLAN_REQUEST, addr: interface->state); |
102 | } |
103 | } |
104 | |
105 | /** |
106 | * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor |
107 | * @interface: fm10k private interface structure |
108 | * |
109 | * Wait until the MAC/VLAN queue task has stopped, and cancel any future |
110 | * requests. |
111 | */ |
112 | static void fm10k_stop_macvlan_task(struct fm10k_intfc *interface) |
113 | { |
114 | /* Disable the MAC/VLAN work item */ |
115 | set_bit(nr: __FM10K_MACVLAN_DISABLE, addr: interface->state); |
116 | |
117 | /* Make sure we waited until any current invocations have stopped */ |
118 | cancel_delayed_work_sync(dwork: &interface->macvlan_task); |
119 | |
120 | /* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task. |
121 | * However, it may not be unset of the MAC/VLAN task never actually |
122 | * got a chance to run. Since we've canceled the task here, and it |
123 | * cannot be rescheuled right now, we need to ensure the scheduled bit |
124 | * gets unset. |
125 | */ |
126 | clear_bit(nr: __FM10K_MACVLAN_SCHED, addr: interface->state); |
127 | } |
128 | |
129 | /** |
130 | * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor |
131 | * @interface: fm10k private interface structure |
132 | * |
133 | * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule |
134 | * the MAC/VLAN work monitor. |
135 | */ |
136 | static void fm10k_resume_macvlan_task(struct fm10k_intfc *interface) |
137 | { |
138 | /* Re-enable the MAC/VLAN work item */ |
139 | clear_bit(nr: __FM10K_MACVLAN_DISABLE, addr: interface->state); |
140 | |
141 | /* We might have received a MAC/VLAN request while disabled. If so, |
142 | * kick off the queue now. |
143 | */ |
144 | if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state)) |
145 | fm10k_macvlan_schedule(interface); |
146 | } |
147 | |
148 | void fm10k_service_event_schedule(struct fm10k_intfc *interface) |
149 | { |
150 | if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) && |
151 | !test_and_set_bit(nr: __FM10K_SERVICE_SCHED, addr: interface->state)) { |
152 | clear_bit(nr: __FM10K_SERVICE_REQUEST, addr: interface->state); |
153 | queue_work(wq: fm10k_workqueue, work: &interface->service_task); |
154 | } else { |
155 | set_bit(nr: __FM10K_SERVICE_REQUEST, addr: interface->state); |
156 | } |
157 | } |
158 | |
159 | static void fm10k_service_event_complete(struct fm10k_intfc *interface) |
160 | { |
161 | WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, interface->state)); |
162 | |
163 | /* flush memory to make sure state is correct before next watchog */ |
164 | smp_mb__before_atomic(); |
165 | clear_bit(nr: __FM10K_SERVICE_SCHED, addr: interface->state); |
166 | |
167 | /* If a service event was requested since we started, immediately |
168 | * re-schedule now. This ensures we don't drop a request until the |
169 | * next timer event. |
170 | */ |
171 | if (test_bit(__FM10K_SERVICE_REQUEST, interface->state)) |
172 | fm10k_service_event_schedule(interface); |
173 | } |
174 | |
175 | static void fm10k_stop_service_event(struct fm10k_intfc *interface) |
176 | { |
177 | set_bit(nr: __FM10K_SERVICE_DISABLE, addr: interface->state); |
178 | cancel_work_sync(work: &interface->service_task); |
179 | |
180 | /* It's possible that cancel_work_sync stopped the service task from |
181 | * running before it could actually start. In this case the |
182 | * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that |
183 | * the service task cannot be running at this point, we need to clear |
184 | * the scheduled bit, as otherwise the service task may never be |
185 | * restarted. |
186 | */ |
187 | clear_bit(nr: __FM10K_SERVICE_SCHED, addr: interface->state); |
188 | } |
189 | |
190 | static void fm10k_start_service_event(struct fm10k_intfc *interface) |
191 | { |
192 | clear_bit(nr: __FM10K_SERVICE_DISABLE, addr: interface->state); |
193 | fm10k_service_event_schedule(interface); |
194 | } |
195 | |
196 | /** |
197 | * fm10k_service_timer - Timer Call-back |
198 | * @t: pointer to timer data |
199 | **/ |
200 | static void fm10k_service_timer(struct timer_list *t) |
201 | { |
202 | struct fm10k_intfc *interface = from_timer(interface, t, |
203 | service_timer); |
204 | |
205 | /* Reset the timer */ |
206 | mod_timer(timer: &interface->service_timer, expires: (HZ * 2) + jiffies); |
207 | |
208 | fm10k_service_event_schedule(interface); |
209 | } |
210 | |
211 | /** |
212 | * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset |
213 | * @interface: fm10k private data structure |
214 | * |
215 | * This function prepares for a device reset by shutting as much down as we |
216 | * can. It does nothing and returns false if __FM10K_RESETTING was already set |
217 | * prior to calling this function. It returns true if it actually did work. |
218 | */ |
219 | static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface) |
220 | { |
221 | struct net_device *netdev = interface->netdev; |
222 | |
223 | /* put off any impending NetWatchDogTimeout */ |
224 | netif_trans_update(dev: netdev); |
225 | |
226 | /* Nothing to do if a reset is already in progress */ |
227 | if (test_and_set_bit(nr: __FM10K_RESETTING, addr: interface->state)) |
228 | return false; |
229 | |
230 | /* As the MAC/VLAN task will be accessing registers it must not be |
231 | * running while we reset. Although the task will not be scheduled |
232 | * once we start resetting it may already be running |
233 | */ |
234 | fm10k_stop_macvlan_task(interface); |
235 | |
236 | rtnl_lock(); |
237 | |
238 | fm10k_iov_suspend(pdev: interface->pdev); |
239 | |
240 | if (netif_running(dev: netdev)) |
241 | fm10k_close(netdev); |
242 | |
243 | fm10k_mbx_free_irq(interface); |
244 | |
245 | /* free interrupts */ |
246 | fm10k_clear_queueing_scheme(interface); |
247 | |
248 | /* delay any future reset requests */ |
249 | interface->last_reset = jiffies + (10 * HZ); |
250 | |
251 | rtnl_unlock(); |
252 | |
253 | return true; |
254 | } |
255 | |
256 | static int fm10k_handle_reset(struct fm10k_intfc *interface) |
257 | { |
258 | struct net_device *netdev = interface->netdev; |
259 | struct fm10k_hw *hw = &interface->hw; |
260 | int err; |
261 | |
262 | WARN_ON(!test_bit(__FM10K_RESETTING, interface->state)); |
263 | |
264 | rtnl_lock(); |
265 | |
266 | pci_set_master(dev: interface->pdev); |
267 | |
268 | /* reset and initialize the hardware so it is in a known state */ |
269 | err = hw->mac.ops.reset_hw(hw); |
270 | if (err) { |
271 | dev_err(&interface->pdev->dev, "reset_hw failed: %d\n" , err); |
272 | goto reinit_err; |
273 | } |
274 | |
275 | err = hw->mac.ops.init_hw(hw); |
276 | if (err) { |
277 | dev_err(&interface->pdev->dev, "init_hw failed: %d\n" , err); |
278 | goto reinit_err; |
279 | } |
280 | |
281 | err = fm10k_init_queueing_scheme(interface); |
282 | if (err) { |
283 | dev_err(&interface->pdev->dev, |
284 | "init_queueing_scheme failed: %d\n" , err); |
285 | goto reinit_err; |
286 | } |
287 | |
288 | /* re-associate interrupts */ |
289 | err = fm10k_mbx_request_irq(interface); |
290 | if (err) |
291 | goto err_mbx_irq; |
292 | |
293 | err = fm10k_hw_ready(interface); |
294 | if (err) |
295 | goto err_open; |
296 | |
297 | /* update hardware address for VFs if perm_addr has changed */ |
298 | if (hw->mac.type == fm10k_mac_vf) { |
299 | if (is_valid_ether_addr(addr: hw->mac.perm_addr)) { |
300 | ether_addr_copy(dst: hw->mac.addr, src: hw->mac.perm_addr); |
301 | ether_addr_copy(dst: netdev->perm_addr, src: hw->mac.perm_addr); |
302 | eth_hw_addr_set(dev: netdev, addr: hw->mac.perm_addr); |
303 | netdev->addr_assign_type &= ~NET_ADDR_RANDOM; |
304 | } |
305 | |
306 | if (hw->mac.vlan_override) |
307 | netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; |
308 | else |
309 | netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; |
310 | } |
311 | |
312 | err = netif_running(dev: netdev) ? fm10k_open(netdev) : 0; |
313 | if (err) |
314 | goto err_open; |
315 | |
316 | fm10k_iov_resume(pdev: interface->pdev); |
317 | |
318 | rtnl_unlock(); |
319 | |
320 | fm10k_resume_macvlan_task(interface); |
321 | |
322 | clear_bit(nr: __FM10K_RESETTING, addr: interface->state); |
323 | |
324 | return err; |
325 | err_open: |
326 | fm10k_mbx_free_irq(interface); |
327 | err_mbx_irq: |
328 | fm10k_clear_queueing_scheme(interface); |
329 | reinit_err: |
330 | netif_device_detach(dev: netdev); |
331 | |
332 | rtnl_unlock(); |
333 | |
334 | clear_bit(nr: __FM10K_RESETTING, addr: interface->state); |
335 | |
336 | return err; |
337 | } |
338 | |
339 | static void fm10k_detach_subtask(struct fm10k_intfc *interface) |
340 | { |
341 | struct net_device *netdev = interface->netdev; |
342 | u32 __iomem *hw_addr; |
343 | u32 value; |
344 | |
345 | /* do nothing if netdev is still present or hw_addr is set */ |
346 | if (netif_device_present(dev: netdev) || interface->hw.hw_addr) |
347 | return; |
348 | |
349 | /* We've lost the PCIe register space, and can no longer access the |
350 | * device. Shut everything except the detach subtask down and prepare |
351 | * to reset the device in case we recover. If we actually prepare for |
352 | * reset, indicate that we're detached. |
353 | */ |
354 | if (fm10k_prepare_for_reset(interface)) |
355 | set_bit(nr: __FM10K_RESET_DETACHED, addr: interface->state); |
356 | |
357 | /* check the real address space to see if we've recovered */ |
358 | hw_addr = READ_ONCE(interface->uc_addr); |
359 | value = readl(addr: hw_addr); |
360 | if (~value) { |
361 | int err; |
362 | |
363 | /* Make sure the reset was initiated because we detached, |
364 | * otherwise we might race with a different reset flow. |
365 | */ |
366 | if (!test_and_clear_bit(nr: __FM10K_RESET_DETACHED, |
367 | addr: interface->state)) |
368 | return; |
369 | |
370 | /* Restore the hardware address */ |
371 | interface->hw.hw_addr = interface->uc_addr; |
372 | |
373 | /* PCIe link has been restored, and the device is active |
374 | * again. Restore everything and reset the device. |
375 | */ |
376 | err = fm10k_handle_reset(interface); |
377 | if (err) { |
378 | netdev_err(dev: netdev, format: "Unable to reset device: %d\n" , err); |
379 | interface->hw.hw_addr = NULL; |
380 | return; |
381 | } |
382 | |
383 | /* Re-attach the netdev */ |
384 | netif_device_attach(dev: netdev); |
385 | netdev_warn(dev: netdev, format: "PCIe link restored, device now attached\n" ); |
386 | return; |
387 | } |
388 | } |
389 | |
390 | static void fm10k_reset_subtask(struct fm10k_intfc *interface) |
391 | { |
392 | int err; |
393 | |
394 | if (!test_and_clear_bit(nr: FM10K_FLAG_RESET_REQUESTED, |
395 | addr: interface->flags)) |
396 | return; |
397 | |
398 | /* If another thread has already prepared to reset the device, we |
399 | * should not attempt to handle a reset here, since we'd race with |
400 | * that thread. This may happen if we suspend the device or if the |
401 | * PCIe link is lost. In this case, we'll just ignore the RESET |
402 | * request, as it will (eventually) be taken care of when the thread |
403 | * which actually started the reset is finished. |
404 | */ |
405 | if (!fm10k_prepare_for_reset(interface)) |
406 | return; |
407 | |
408 | netdev_err(dev: interface->netdev, format: "Reset interface\n" ); |
409 | |
410 | err = fm10k_handle_reset(interface); |
411 | if (err) |
412 | dev_err(&interface->pdev->dev, |
413 | "fm10k_handle_reset failed: %d\n" , err); |
414 | } |
415 | |
416 | /** |
417 | * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping |
418 | * @interface: board private structure |
419 | * |
420 | * Configure the SWPRI to PC mapping for the port. |
421 | **/ |
422 | static void fm10k_configure_swpri_map(struct fm10k_intfc *interface) |
423 | { |
424 | struct net_device *netdev = interface->netdev; |
425 | struct fm10k_hw *hw = &interface->hw; |
426 | int i; |
427 | |
428 | /* clear flag indicating update is needed */ |
429 | clear_bit(nr: FM10K_FLAG_SWPRI_CONFIG, addr: interface->flags); |
430 | |
431 | /* these registers are only available on the PF */ |
432 | if (hw->mac.type != fm10k_mac_pf) |
433 | return; |
434 | |
435 | /* configure SWPRI to PC map */ |
436 | for (i = 0; i < FM10K_SWPRI_MAX; i++) |
437 | fm10k_write_reg(hw, FM10K_SWPRI_MAP(i), |
438 | netdev_get_prio_tc_map(netdev, i)); |
439 | } |
440 | |
441 | /** |
442 | * fm10k_watchdog_update_host_state - Update the link status based on host. |
443 | * @interface: board private structure |
444 | **/ |
445 | static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface) |
446 | { |
447 | struct fm10k_hw *hw = &interface->hw; |
448 | s32 err; |
449 | |
450 | if (test_bit(__FM10K_LINK_DOWN, interface->state)) { |
451 | interface->host_ready = false; |
452 | if (time_is_after_jiffies(interface->link_down_event)) |
453 | return; |
454 | clear_bit(nr: __FM10K_LINK_DOWN, addr: interface->state); |
455 | } |
456 | |
457 | if (test_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags)) { |
458 | if (rtnl_trylock()) { |
459 | fm10k_configure_swpri_map(interface); |
460 | rtnl_unlock(); |
461 | } |
462 | } |
463 | |
464 | /* lock the mailbox for transmit and receive */ |
465 | fm10k_mbx_lock(interface); |
466 | |
467 | err = hw->mac.ops.get_host_state(hw, &interface->host_ready); |
468 | if (err && time_is_before_jiffies(interface->last_reset)) |
469 | set_bit(nr: FM10K_FLAG_RESET_REQUESTED, addr: interface->flags); |
470 | |
471 | /* free the lock */ |
472 | fm10k_mbx_unlock(interface); |
473 | } |
474 | |
475 | /** |
476 | * fm10k_mbx_subtask - Process upstream and downstream mailboxes |
477 | * @interface: board private structure |
478 | * |
479 | * This function will process both the upstream and downstream mailboxes. |
480 | **/ |
481 | static void fm10k_mbx_subtask(struct fm10k_intfc *interface) |
482 | { |
483 | /* If we're resetting, bail out */ |
484 | if (test_bit(__FM10K_RESETTING, interface->state)) |
485 | return; |
486 | |
487 | /* process upstream mailbox and update device state */ |
488 | fm10k_watchdog_update_host_state(interface); |
489 | |
490 | /* process downstream mailboxes */ |
491 | fm10k_iov_mbx(interface); |
492 | } |
493 | |
494 | /** |
495 | * fm10k_watchdog_host_is_ready - Update netdev status based on host ready |
496 | * @interface: board private structure |
497 | **/ |
498 | static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface) |
499 | { |
500 | struct net_device *netdev = interface->netdev; |
501 | |
502 | /* only continue if link state is currently down */ |
503 | if (netif_carrier_ok(dev: netdev)) |
504 | return; |
505 | |
506 | netif_info(interface, drv, netdev, "NIC Link is up\n" ); |
507 | |
508 | netif_carrier_on(dev: netdev); |
509 | netif_tx_wake_all_queues(dev: netdev); |
510 | } |
511 | |
512 | /** |
513 | * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready |
514 | * @interface: board private structure |
515 | **/ |
516 | static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface) |
517 | { |
518 | struct net_device *netdev = interface->netdev; |
519 | |
520 | /* only continue if link state is currently up */ |
521 | if (!netif_carrier_ok(dev: netdev)) |
522 | return; |
523 | |
524 | netif_info(interface, drv, netdev, "NIC Link is down\n" ); |
525 | |
526 | netif_carrier_off(dev: netdev); |
527 | netif_tx_stop_all_queues(dev: netdev); |
528 | } |
529 | |
530 | /** |
531 | * fm10k_update_stats - Update the board statistics counters. |
532 | * @interface: board private structure |
533 | **/ |
534 | void fm10k_update_stats(struct fm10k_intfc *interface) |
535 | { |
536 | struct net_device_stats *net_stats = &interface->netdev->stats; |
537 | struct fm10k_hw *hw = &interface->hw; |
538 | u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0; |
539 | u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0; |
540 | u64 rx_link_errors = 0; |
541 | u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0; |
542 | u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0; |
543 | u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0; |
544 | u64 tx_bytes_nic = 0, tx_pkts_nic = 0; |
545 | u64 bytes, pkts; |
546 | int i; |
547 | |
548 | /* ensure only one thread updates stats at a time */ |
549 | if (test_and_set_bit(nr: __FM10K_UPDATING_STATS, addr: interface->state)) |
550 | return; |
551 | |
552 | /* do not allow stats update via service task for next second */ |
553 | interface->next_stats_update = jiffies + HZ; |
554 | |
555 | /* gather some stats to the interface struct that are per queue */ |
556 | for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) { |
557 | struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]); |
558 | |
559 | if (!tx_ring) |
560 | continue; |
561 | |
562 | restart_queue += tx_ring->tx_stats.restart_queue; |
563 | tx_busy += tx_ring->tx_stats.tx_busy; |
564 | tx_csum_errors += tx_ring->tx_stats.csum_err; |
565 | bytes += tx_ring->stats.bytes; |
566 | pkts += tx_ring->stats.packets; |
567 | hw_csum_tx_good += tx_ring->tx_stats.csum_good; |
568 | } |
569 | |
570 | interface->restart_queue = restart_queue; |
571 | interface->tx_busy = tx_busy; |
572 | net_stats->tx_bytes = bytes; |
573 | net_stats->tx_packets = pkts; |
574 | interface->tx_csum_errors = tx_csum_errors; |
575 | interface->hw_csum_tx_good = hw_csum_tx_good; |
576 | |
577 | /* gather some stats to the interface struct that are per queue */ |
578 | for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { |
579 | struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]); |
580 | |
581 | if (!rx_ring) |
582 | continue; |
583 | |
584 | bytes += rx_ring->stats.bytes; |
585 | pkts += rx_ring->stats.packets; |
586 | alloc_failed += rx_ring->rx_stats.alloc_failed; |
587 | rx_csum_errors += rx_ring->rx_stats.csum_err; |
588 | rx_errors += rx_ring->rx_stats.errors; |
589 | hw_csum_rx_good += rx_ring->rx_stats.csum_good; |
590 | rx_switch_errors += rx_ring->rx_stats.switch_errors; |
591 | rx_drops += rx_ring->rx_stats.drops; |
592 | rx_pp_errors += rx_ring->rx_stats.pp_errors; |
593 | rx_link_errors += rx_ring->rx_stats.link_errors; |
594 | rx_length_errors += rx_ring->rx_stats.length_errors; |
595 | } |
596 | |
597 | net_stats->rx_bytes = bytes; |
598 | net_stats->rx_packets = pkts; |
599 | interface->alloc_failed = alloc_failed; |
600 | interface->rx_csum_errors = rx_csum_errors; |
601 | interface->hw_csum_rx_good = hw_csum_rx_good; |
602 | interface->rx_switch_errors = rx_switch_errors; |
603 | interface->rx_drops = rx_drops; |
604 | interface->rx_pp_errors = rx_pp_errors; |
605 | interface->rx_link_errors = rx_link_errors; |
606 | interface->rx_length_errors = rx_length_errors; |
607 | |
608 | hw->mac.ops.update_hw_stats(hw, &interface->stats); |
609 | |
610 | for (i = 0; i < hw->mac.max_queues; i++) { |
611 | struct fm10k_hw_stats_q *q = &interface->stats.q[i]; |
612 | |
613 | tx_bytes_nic += q->tx_bytes.count; |
614 | tx_pkts_nic += q->tx_packets.count; |
615 | rx_bytes_nic += q->rx_bytes.count; |
616 | rx_pkts_nic += q->rx_packets.count; |
617 | rx_drops_nic += q->rx_drops.count; |
618 | } |
619 | |
620 | interface->tx_bytes_nic = tx_bytes_nic; |
621 | interface->tx_packets_nic = tx_pkts_nic; |
622 | interface->rx_bytes_nic = rx_bytes_nic; |
623 | interface->rx_packets_nic = rx_pkts_nic; |
624 | interface->rx_drops_nic = rx_drops_nic; |
625 | |
626 | /* Fill out the OS statistics structure */ |
627 | net_stats->rx_errors = rx_errors; |
628 | net_stats->rx_dropped = interface->stats.nodesc_drop.count; |
629 | |
630 | /* Update VF statistics */ |
631 | fm10k_iov_update_stats(interface); |
632 | |
633 | clear_bit(nr: __FM10K_UPDATING_STATS, addr: interface->state); |
634 | } |
635 | |
636 | /** |
637 | * fm10k_watchdog_flush_tx - flush queues on host not ready |
638 | * @interface: pointer to the device interface structure |
639 | **/ |
640 | static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface) |
641 | { |
642 | int some_tx_pending = 0; |
643 | int i; |
644 | |
645 | /* nothing to do if carrier is up */ |
646 | if (netif_carrier_ok(dev: interface->netdev)) |
647 | return; |
648 | |
649 | for (i = 0; i < interface->num_tx_queues; i++) { |
650 | struct fm10k_ring *tx_ring = interface->tx_ring[i]; |
651 | |
652 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { |
653 | some_tx_pending = 1; |
654 | break; |
655 | } |
656 | } |
657 | |
658 | /* We've lost link, so the controller stops DMA, but we've got |
659 | * queued Tx work that's never going to get done, so reset |
660 | * controller to flush Tx. |
661 | */ |
662 | if (some_tx_pending) |
663 | set_bit(nr: FM10K_FLAG_RESET_REQUESTED, addr: interface->flags); |
664 | } |
665 | |
666 | /** |
667 | * fm10k_watchdog_subtask - check and bring link up |
668 | * @interface: pointer to the device interface structure |
669 | **/ |
670 | static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) |
671 | { |
672 | /* if interface is down do nothing */ |
673 | if (test_bit(__FM10K_DOWN, interface->state) || |
674 | test_bit(__FM10K_RESETTING, interface->state)) |
675 | return; |
676 | |
677 | if (interface->host_ready) |
678 | fm10k_watchdog_host_is_ready(interface); |
679 | else |
680 | fm10k_watchdog_host_not_ready(interface); |
681 | |
682 | /* update stats only once every second */ |
683 | if (time_is_before_jiffies(interface->next_stats_update)) |
684 | fm10k_update_stats(interface); |
685 | |
686 | /* flush any uncompleted work */ |
687 | fm10k_watchdog_flush_tx(interface); |
688 | } |
689 | |
690 | /** |
691 | * fm10k_check_hang_subtask - check for hung queues and dropped interrupts |
692 | * @interface: pointer to the device interface structure |
693 | * |
694 | * This function serves two purposes. First it strobes the interrupt lines |
695 | * in order to make certain interrupts are occurring. Secondly it sets the |
696 | * bits needed to check for TX hangs. As a result we should immediately |
697 | * determine if a hang has occurred. |
698 | */ |
699 | static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) |
700 | { |
701 | /* If we're down or resetting, just bail */ |
702 | if (test_bit(__FM10K_DOWN, interface->state) || |
703 | test_bit(__FM10K_RESETTING, interface->state)) |
704 | return; |
705 | |
706 | /* rate limit tx hang checks to only once every 2 seconds */ |
707 | if (time_is_after_eq_jiffies(interface->next_tx_hang_check)) |
708 | return; |
709 | interface->next_tx_hang_check = jiffies + (2 * HZ); |
710 | |
711 | if (netif_carrier_ok(dev: interface->netdev)) { |
712 | int i; |
713 | |
714 | /* Force detection of hung controller */ |
715 | for (i = 0; i < interface->num_tx_queues; i++) |
716 | set_check_for_tx_hang(interface->tx_ring[i]); |
717 | |
718 | /* Rearm all in-use q_vectors for immediate firing */ |
719 | for (i = 0; i < interface->num_q_vectors; i++) { |
720 | struct fm10k_q_vector *qv = interface->q_vector[i]; |
721 | |
722 | if (!qv->tx.count && !qv->rx.count) |
723 | continue; |
724 | writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, addr: qv->itr); |
725 | } |
726 | } |
727 | } |
728 | |
729 | /** |
730 | * fm10k_service_task - manages and runs subtasks |
731 | * @work: pointer to work_struct containing our data |
732 | **/ |
733 | static void fm10k_service_task(struct work_struct *work) |
734 | { |
735 | struct fm10k_intfc *interface; |
736 | |
737 | interface = container_of(work, struct fm10k_intfc, service_task); |
738 | |
739 | /* Check whether we're detached first */ |
740 | fm10k_detach_subtask(interface); |
741 | |
742 | /* tasks run even when interface is down */ |
743 | fm10k_mbx_subtask(interface); |
744 | fm10k_reset_subtask(interface); |
745 | |
746 | /* tasks only run when interface is up */ |
747 | fm10k_watchdog_subtask(interface); |
748 | fm10k_check_hang_subtask(interface); |
749 | |
750 | /* release lock on service events to allow scheduling next event */ |
751 | fm10k_service_event_complete(interface); |
752 | } |
753 | |
754 | /** |
755 | * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager |
756 | * @work: pointer to work_struct containing our data |
757 | * |
758 | * This work item handles sending MAC/VLAN updates to the switch manager. When |
759 | * the interface is up, it will attempt to queue mailbox messages to the |
760 | * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the |
761 | * mailbox is full, it will reschedule itself to try again in a short while. |
762 | * This ensures that the driver does not overload the switch mailbox with too |
763 | * many simultaneous requests, causing an unnecessary reset. |
764 | **/ |
765 | static void fm10k_macvlan_task(struct work_struct *work) |
766 | { |
767 | struct fm10k_macvlan_request *item; |
768 | struct fm10k_intfc *interface; |
769 | struct delayed_work *dwork; |
770 | struct list_head *requests; |
771 | struct fm10k_hw *hw; |
772 | unsigned long flags; |
773 | |
774 | dwork = to_delayed_work(work); |
775 | interface = container_of(dwork, struct fm10k_intfc, macvlan_task); |
776 | hw = &interface->hw; |
777 | requests = &interface->macvlan_requests; |
778 | |
779 | do { |
780 | /* Pop the first item off the list */ |
781 | spin_lock_irqsave(&interface->macvlan_lock, flags); |
782 | item = list_first_entry_or_null(requests, |
783 | struct fm10k_macvlan_request, |
784 | list); |
785 | if (item) |
786 | list_del_init(entry: &item->list); |
787 | |
788 | spin_unlock_irqrestore(lock: &interface->macvlan_lock, flags); |
789 | |
790 | /* We have no more items to process */ |
791 | if (!item) |
792 | goto done; |
793 | |
794 | fm10k_mbx_lock(interface); |
795 | |
796 | /* Check that we have plenty of space to send the message. We |
797 | * want to ensure that the mailbox stays low enough to avoid a |
798 | * change in the host state, otherwise we may see spurious |
799 | * link up / link down notifications. |
800 | */ |
801 | if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) { |
802 | hw->mbx.ops.process(hw, &hw->mbx); |
803 | set_bit(nr: __FM10K_MACVLAN_REQUEST, addr: interface->state); |
804 | fm10k_mbx_unlock(interface); |
805 | |
806 | /* Put the request back on the list */ |
807 | spin_lock_irqsave(&interface->macvlan_lock, flags); |
808 | list_add(new: &item->list, head: requests); |
809 | spin_unlock_irqrestore(lock: &interface->macvlan_lock, flags); |
810 | break; |
811 | } |
812 | |
813 | switch (item->type) { |
814 | case FM10K_MC_MAC_REQUEST: |
815 | hw->mac.ops.update_mc_addr(hw, |
816 | item->mac.glort, |
817 | item->mac.addr, |
818 | item->mac.vid, |
819 | item->set); |
820 | break; |
821 | case FM10K_UC_MAC_REQUEST: |
822 | hw->mac.ops.update_uc_addr(hw, |
823 | item->mac.glort, |
824 | item->mac.addr, |
825 | item->mac.vid, |
826 | item->set, |
827 | 0); |
828 | break; |
829 | case FM10K_VLAN_REQUEST: |
830 | hw->mac.ops.update_vlan(hw, |
831 | item->vlan.vid, |
832 | item->vlan.vsi, |
833 | item->set); |
834 | break; |
835 | default: |
836 | break; |
837 | } |
838 | |
839 | fm10k_mbx_unlock(interface); |
840 | |
841 | /* Free the item now that we've sent the update */ |
842 | kfree(objp: item); |
843 | } while (true); |
844 | |
845 | done: |
846 | WARN_ON(!test_bit(__FM10K_MACVLAN_SCHED, interface->state)); |
847 | |
848 | /* flush memory to make sure state is correct */ |
849 | smp_mb__before_atomic(); |
850 | clear_bit(nr: __FM10K_MACVLAN_SCHED, addr: interface->state); |
851 | |
852 | /* If a MAC/VLAN request was scheduled since we started, we should |
853 | * re-schedule. However, there is no reason to re-schedule if there is |
854 | * no work to do. |
855 | */ |
856 | if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state)) |
857 | fm10k_macvlan_schedule(interface); |
858 | } |
859 | |
860 | /** |
861 | * fm10k_configure_tx_ring - Configure Tx ring after Reset |
862 | * @interface: board private structure |
863 | * @ring: structure containing ring specific data |
864 | * |
865 | * Configure the Tx descriptor ring after a reset. |
866 | **/ |
867 | static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, |
868 | struct fm10k_ring *ring) |
869 | { |
870 | struct fm10k_hw *hw = &interface->hw; |
871 | u64 tdba = ring->dma; |
872 | u32 size = ring->count * sizeof(struct fm10k_tx_desc); |
873 | u32 txint = FM10K_INT_MAP_DISABLE; |
874 | u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE; |
875 | u8 reg_idx = ring->reg_idx; |
876 | |
877 | /* disable queue to avoid issues while updating state */ |
878 | fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0); |
879 | fm10k_write_flush(hw); |
880 | |
881 | /* possible poll here to verify ring resources have been cleaned */ |
882 | |
883 | /* set location and size for descriptor ring */ |
884 | fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); |
885 | fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32); |
886 | fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size); |
887 | |
888 | /* reset head and tail pointers */ |
889 | fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0); |
890 | fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0); |
891 | |
892 | /* store tail pointer */ |
893 | ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)]; |
894 | |
895 | /* reset ntu and ntc to place SW in sync with hardware */ |
896 | ring->next_to_clean = 0; |
897 | ring->next_to_use = 0; |
898 | |
899 | /* Map interrupt */ |
900 | if (ring->q_vector) { |
901 | txint = ring->q_vector->v_idx + NON_Q_VECTORS; |
902 | txint |= FM10K_INT_MAP_TIMER0; |
903 | } |
904 | |
905 | fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint); |
906 | |
907 | /* enable use of FTAG bit in Tx descriptor, register is RO for VF */ |
908 | fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx), |
909 | FM10K_PFVTCTL_FTAG_DESC_ENABLE); |
910 | |
911 | /* Initialize XPS */ |
912 | if (!test_and_set_bit(nr: __FM10K_TX_XPS_INIT_DONE, addr: ring->state) && |
913 | ring->q_vector) |
914 | netif_set_xps_queue(dev: ring->netdev, |
915 | mask: &ring->q_vector->affinity_mask, |
916 | index: ring->queue_index); |
917 | |
918 | /* enable queue */ |
919 | fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl); |
920 | } |
921 | |
922 | /** |
923 | * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration |
924 | * @interface: board private structure |
925 | * @ring: structure containing ring specific data |
926 | * |
927 | * Verify the Tx descriptor ring is ready for transmit. |
928 | **/ |
929 | static void fm10k_enable_tx_ring(struct fm10k_intfc *interface, |
930 | struct fm10k_ring *ring) |
931 | { |
932 | struct fm10k_hw *hw = &interface->hw; |
933 | int wait_loop = 10; |
934 | u32 txdctl; |
935 | u8 reg_idx = ring->reg_idx; |
936 | |
937 | /* if we are already enabled just exit */ |
938 | if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE) |
939 | return; |
940 | |
941 | /* poll to verify queue is enabled */ |
942 | do { |
943 | usleep_range(min: 1000, max: 2000); |
944 | txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)); |
945 | } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop); |
946 | if (!wait_loop) |
947 | netif_err(interface, drv, interface->netdev, |
948 | "Could not enable Tx Queue %d\n" , reg_idx); |
949 | } |
950 | |
951 | /** |
952 | * fm10k_configure_tx - Configure Transmit Unit after Reset |
953 | * @interface: board private structure |
954 | * |
955 | * Configure the Tx unit of the MAC after a reset. |
956 | **/ |
957 | static void fm10k_configure_tx(struct fm10k_intfc *interface) |
958 | { |
959 | int i; |
960 | |
961 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
962 | for (i = 0; i < interface->num_tx_queues; i++) |
963 | fm10k_configure_tx_ring(interface, ring: interface->tx_ring[i]); |
964 | |
965 | /* poll here to verify that Tx rings are now enabled */ |
966 | for (i = 0; i < interface->num_tx_queues; i++) |
967 | fm10k_enable_tx_ring(interface, ring: interface->tx_ring[i]); |
968 | } |
969 | |
970 | /** |
971 | * fm10k_configure_rx_ring - Configure Rx ring after Reset |
972 | * @interface: board private structure |
973 | * @ring: structure containing ring specific data |
974 | * |
975 | * Configure the Rx descriptor ring after a reset. |
976 | **/ |
977 | static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, |
978 | struct fm10k_ring *ring) |
979 | { |
980 | u64 rdba = ring->dma; |
981 | struct fm10k_hw *hw = &interface->hw; |
982 | u32 size = ring->count * sizeof(union fm10k_rx_desc); |
983 | u32 rxqctl, rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; |
984 | u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN; |
985 | u32 rxint = FM10K_INT_MAP_DISABLE; |
986 | u8 rx_pause = interface->rx_pause; |
987 | u8 reg_idx = ring->reg_idx; |
988 | |
989 | /* disable queue to avoid issues while updating state */ |
990 | rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx)); |
991 | rxqctl &= ~FM10K_RXQCTL_ENABLE; |
992 | fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl); |
993 | fm10k_write_flush(hw); |
994 | |
995 | /* possible poll here to verify ring resources have been cleaned */ |
996 | |
997 | /* set location and size for descriptor ring */ |
998 | fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); |
999 | fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32); |
1000 | fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size); |
1001 | |
1002 | /* reset head and tail pointers */ |
1003 | fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0); |
1004 | fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0); |
1005 | |
1006 | /* store tail pointer */ |
1007 | ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)]; |
1008 | |
1009 | /* reset ntu and ntc to place SW in sync with hardware */ |
1010 | ring->next_to_clean = 0; |
1011 | ring->next_to_use = 0; |
1012 | ring->next_to_alloc = 0; |
1013 | |
1014 | /* Configure the Rx buffer size for one buff without split */ |
1015 | srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; |
1016 | |
1017 | /* Configure the Rx ring to suppress loopback packets */ |
1018 | srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; |
1019 | fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); |
1020 | |
1021 | /* Enable drop on empty */ |
1022 | #ifdef CONFIG_DCB |
1023 | if (interface->pfc_en) |
1024 | rx_pause = interface->pfc_en; |
1025 | #endif |
1026 | if (!(rx_pause & BIT(ring->qos_pc))) |
1027 | rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; |
1028 | |
1029 | fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); |
1030 | |
1031 | /* assign default VLAN to queue */ |
1032 | ring->vid = hw->mac.default_vid; |
1033 | |
1034 | /* if we have an active VLAN, disable default VLAN ID */ |
1035 | if (test_bit(hw->mac.default_vid, interface->active_vlans)) |
1036 | ring->vid |= FM10K_VLAN_CLEAR; |
1037 | |
1038 | /* Map interrupt */ |
1039 | if (ring->q_vector) { |
1040 | rxint = ring->q_vector->v_idx + NON_Q_VECTORS; |
1041 | rxint |= FM10K_INT_MAP_TIMER1; |
1042 | } |
1043 | |
1044 | fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint); |
1045 | |
1046 | /* enable queue */ |
1047 | rxqctl = fm10k_read_reg(hw, FM10K_RXQCTL(reg_idx)); |
1048 | rxqctl |= FM10K_RXQCTL_ENABLE; |
1049 | fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl); |
1050 | |
1051 | /* place buffers on ring for receive data */ |
1052 | fm10k_alloc_rx_buffers(rx_ring: ring, cleaned_count: fm10k_desc_unused(ring)); |
1053 | } |
1054 | |
1055 | /** |
1056 | * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings |
1057 | * @interface: board private structure |
1058 | * |
1059 | * Configure the drop enable bits for the Rx rings. |
1060 | **/ |
1061 | void fm10k_update_rx_drop_en(struct fm10k_intfc *interface) |
1062 | { |
1063 | struct fm10k_hw *hw = &interface->hw; |
1064 | u8 rx_pause = interface->rx_pause; |
1065 | int i; |
1066 | |
1067 | #ifdef CONFIG_DCB |
1068 | if (interface->pfc_en) |
1069 | rx_pause = interface->pfc_en; |
1070 | |
1071 | #endif |
1072 | for (i = 0; i < interface->num_rx_queues; i++) { |
1073 | struct fm10k_ring *ring = interface->rx_ring[i]; |
1074 | u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; |
1075 | u8 reg_idx = ring->reg_idx; |
1076 | |
1077 | if (!(rx_pause & BIT(ring->qos_pc))) |
1078 | rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; |
1079 | |
1080 | fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); |
1081 | } |
1082 | } |
1083 | |
1084 | /** |
1085 | * fm10k_configure_dglort - Configure Receive DGLORT after reset |
1086 | * @interface: board private structure |
1087 | * |
1088 | * Configure the DGLORT description and RSS tables. |
1089 | **/ |
1090 | static void fm10k_configure_dglort(struct fm10k_intfc *interface) |
1091 | { |
1092 | struct fm10k_dglort_cfg dglort = { 0 }; |
1093 | struct fm10k_hw *hw = &interface->hw; |
1094 | int i; |
1095 | u32 mrqc; |
1096 | |
1097 | /* Fill out hash function seeds */ |
1098 | for (i = 0; i < FM10K_RSSRK_SIZE; i++) |
1099 | fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]); |
1100 | |
1101 | /* Write RETA table to hardware */ |
1102 | for (i = 0; i < FM10K_RETA_SIZE; i++) |
1103 | fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]); |
1104 | |
1105 | /* Generate RSS hash based on packet types, TCP/UDP |
1106 | * port numbers and/or IPv4/v6 src and dst addresses |
1107 | */ |
1108 | mrqc = FM10K_MRQC_IPV4 | |
1109 | FM10K_MRQC_TCP_IPV4 | |
1110 | FM10K_MRQC_IPV6 | |
1111 | FM10K_MRQC_TCP_IPV6; |
1112 | |
1113 | if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags)) |
1114 | mrqc |= FM10K_MRQC_UDP_IPV4; |
1115 | if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP, interface->flags)) |
1116 | mrqc |= FM10K_MRQC_UDP_IPV6; |
1117 | |
1118 | fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); |
1119 | |
1120 | /* configure default DGLORT mapping for RSS/DCB */ |
1121 | dglort.inner_rss = 1; |
1122 | dglort.rss_l = fls(x: interface->ring_feature[RING_F_RSS].mask); |
1123 | dglort.pc_l = fls(x: interface->ring_feature[RING_F_QOS].mask); |
1124 | hw->mac.ops.configure_dglort_map(hw, &dglort); |
1125 | |
1126 | /* assign GLORT per queue for queue mapped testing */ |
1127 | if (interface->glort_count > 64) { |
1128 | memset(&dglort, 0, sizeof(dglort)); |
1129 | dglort.inner_rss = 1; |
1130 | dglort.glort = interface->glort + 64; |
1131 | dglort.idx = fm10k_dglort_pf_queue; |
1132 | dglort.queue_l = fls(x: interface->num_rx_queues - 1); |
1133 | hw->mac.ops.configure_dglort_map(hw, &dglort); |
1134 | } |
1135 | |
1136 | /* assign glort value for RSS/DCB specific to this interface */ |
1137 | memset(&dglort, 0, sizeof(dglort)); |
1138 | dglort.inner_rss = 1; |
1139 | dglort.glort = interface->glort; |
1140 | dglort.rss_l = fls(x: interface->ring_feature[RING_F_RSS].mask); |
1141 | dglort.pc_l = fls(x: interface->ring_feature[RING_F_QOS].mask); |
1142 | /* configure DGLORT mapping for RSS/DCB */ |
1143 | dglort.idx = fm10k_dglort_pf_rss; |
1144 | if (interface->l2_accel) |
1145 | dglort.shared_l = fls(x: interface->l2_accel->size); |
1146 | hw->mac.ops.configure_dglort_map(hw, &dglort); |
1147 | } |
1148 | |
1149 | /** |
1150 | * fm10k_configure_rx - Configure Receive Unit after Reset |
1151 | * @interface: board private structure |
1152 | * |
1153 | * Configure the Rx unit of the MAC after a reset. |
1154 | **/ |
1155 | static void fm10k_configure_rx(struct fm10k_intfc *interface) |
1156 | { |
1157 | int i; |
1158 | |
1159 | /* Configure SWPRI to PC map */ |
1160 | fm10k_configure_swpri_map(interface); |
1161 | |
1162 | /* Configure RSS and DGLORT map */ |
1163 | fm10k_configure_dglort(interface); |
1164 | |
1165 | /* Setup the HW Rx Head and Tail descriptor pointers */ |
1166 | for (i = 0; i < interface->num_rx_queues; i++) |
1167 | fm10k_configure_rx_ring(interface, ring: interface->rx_ring[i]); |
1168 | |
1169 | /* possible poll here to verify that Rx rings are now enabled */ |
1170 | } |
1171 | |
1172 | static void fm10k_napi_enable_all(struct fm10k_intfc *interface) |
1173 | { |
1174 | struct fm10k_q_vector *q_vector; |
1175 | int q_idx; |
1176 | |
1177 | for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { |
1178 | q_vector = interface->q_vector[q_idx]; |
1179 | napi_enable(n: &q_vector->napi); |
1180 | } |
1181 | } |
1182 | |
1183 | static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data) |
1184 | { |
1185 | struct fm10k_q_vector *q_vector = data; |
1186 | |
1187 | if (q_vector->rx.count || q_vector->tx.count) |
1188 | napi_schedule_irqoff(n: &q_vector->napi); |
1189 | |
1190 | return IRQ_HANDLED; |
1191 | } |
1192 | |
1193 | static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) |
1194 | { |
1195 | struct fm10k_intfc *interface = data; |
1196 | struct fm10k_hw *hw = &interface->hw; |
1197 | struct fm10k_mbx_info *mbx = &hw->mbx; |
1198 | |
1199 | /* re-enable mailbox interrupt and indicate 20us delay */ |
1200 | fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), |
1201 | (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) | |
1202 | FM10K_ITR_ENABLE); |
1203 | |
1204 | /* service upstream mailbox */ |
1205 | if (fm10k_mbx_trylock(interface)) { |
1206 | mbx->ops.process(hw, mbx); |
1207 | fm10k_mbx_unlock(interface); |
1208 | } |
1209 | |
1210 | hw->mac.get_host_state = true; |
1211 | fm10k_service_event_schedule(interface); |
1212 | |
1213 | return IRQ_HANDLED; |
1214 | } |
1215 | |
1216 | #define FM10K_ERR_MSG(type) case (type): error = #type; break |
1217 | static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, |
1218 | struct fm10k_fault *fault) |
1219 | { |
1220 | struct pci_dev *pdev = interface->pdev; |
1221 | struct fm10k_hw *hw = &interface->hw; |
1222 | struct fm10k_iov_data *iov_data = interface->iov_data; |
1223 | char *error; |
1224 | |
1225 | switch (type) { |
1226 | case FM10K_PCA_FAULT: |
1227 | switch (fault->type) { |
1228 | default: |
1229 | error = "Unknown PCA error" ; |
1230 | break; |
1231 | FM10K_ERR_MSG(PCA_NO_FAULT); |
1232 | FM10K_ERR_MSG(PCA_UNMAPPED_ADDR); |
1233 | FM10K_ERR_MSG(PCA_BAD_QACCESS_PF); |
1234 | FM10K_ERR_MSG(PCA_BAD_QACCESS_VF); |
1235 | FM10K_ERR_MSG(PCA_MALICIOUS_REQ); |
1236 | FM10K_ERR_MSG(PCA_POISONED_TLP); |
1237 | FM10K_ERR_MSG(PCA_TLP_ABORT); |
1238 | } |
1239 | break; |
1240 | case FM10K_THI_FAULT: |
1241 | switch (fault->type) { |
1242 | default: |
1243 | error = "Unknown THI error" ; |
1244 | break; |
1245 | FM10K_ERR_MSG(THI_NO_FAULT); |
1246 | FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT); |
1247 | } |
1248 | break; |
1249 | case FM10K_FUM_FAULT: |
1250 | switch (fault->type) { |
1251 | default: |
1252 | error = "Unknown FUM error" ; |
1253 | break; |
1254 | FM10K_ERR_MSG(FUM_NO_FAULT); |
1255 | FM10K_ERR_MSG(FUM_UNMAPPED_ADDR); |
1256 | FM10K_ERR_MSG(FUM_BAD_VF_QACCESS); |
1257 | FM10K_ERR_MSG(FUM_ADD_DECODE_ERR); |
1258 | FM10K_ERR_MSG(FUM_RO_ERROR); |
1259 | FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR); |
1260 | FM10K_ERR_MSG(FUM_CSR_TIMEOUT); |
1261 | FM10K_ERR_MSG(FUM_INVALID_TYPE); |
1262 | FM10K_ERR_MSG(FUM_INVALID_LENGTH); |
1263 | FM10K_ERR_MSG(FUM_INVALID_BE); |
1264 | FM10K_ERR_MSG(FUM_INVALID_ALIGN); |
1265 | } |
1266 | break; |
1267 | default: |
1268 | error = "Undocumented fault" ; |
1269 | break; |
1270 | } |
1271 | |
1272 | dev_warn(&pdev->dev, |
1273 | "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n" , |
1274 | error, fault->address, fault->specinfo, |
1275 | PCI_SLOT(fault->func), PCI_FUNC(fault->func)); |
1276 | |
1277 | /* For VF faults, clear out the respective LPORT, reset the queue |
1278 | * resources, and then reconnect to the mailbox. This allows the |
1279 | * VF in question to resume behavior. For transient faults that are |
1280 | * the result of non-malicious behavior this will log the fault and |
1281 | * allow the VF to resume functionality. Obviously for malicious VFs |
1282 | * they will be able to attempt malicious behavior again. In this |
1283 | * case, the system administrator will need to step in and manually |
1284 | * remove or disable the VF in question. |
1285 | */ |
1286 | if (fault->func && iov_data) { |
1287 | int vf = fault->func - 1; |
1288 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf]; |
1289 | |
1290 | hw->iov.ops.reset_lport(hw, vf_info); |
1291 | hw->iov.ops.reset_resources(hw, vf_info); |
1292 | |
1293 | /* reset_lport disables the VF, so re-enable it */ |
1294 | hw->iov.ops.set_lport(hw, vf_info, vf, |
1295 | FM10K_VF_FLAG_MULTI_CAPABLE); |
1296 | |
1297 | /* reset_resources will disconnect from the mbx */ |
1298 | vf_info->mbx.ops.connect(hw, &vf_info->mbx); |
1299 | } |
1300 | } |
1301 | |
1302 | static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) |
1303 | { |
1304 | struct fm10k_hw *hw = &interface->hw; |
1305 | struct fm10k_fault fault = { 0 }; |
1306 | int type, err; |
1307 | |
1308 | for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT; |
1309 | eicr; |
1310 | eicr >>= 1, type += FM10K_FAULT_SIZE) { |
1311 | /* only check if there is an error reported */ |
1312 | if (!(eicr & 0x1)) |
1313 | continue; |
1314 | |
1315 | /* retrieve fault info */ |
1316 | err = hw->mac.ops.get_fault(hw, type, &fault); |
1317 | if (err) { |
1318 | dev_err(&interface->pdev->dev, |
1319 | "error reading fault\n" ); |
1320 | continue; |
1321 | } |
1322 | |
1323 | fm10k_handle_fault(interface, type, fault: &fault); |
1324 | } |
1325 | } |
1326 | |
1327 | static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr) |
1328 | { |
1329 | struct fm10k_hw *hw = &interface->hw; |
1330 | const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; |
1331 | u32 maxholdq; |
1332 | int q; |
1333 | |
1334 | if (!(eicr & FM10K_EICR_MAXHOLDTIME)) |
1335 | return; |
1336 | |
1337 | maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7)); |
1338 | if (maxholdq) |
1339 | fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq); |
1340 | for (q = 255;;) { |
1341 | if (maxholdq & BIT(31)) { |
1342 | if (q < FM10K_MAX_QUEUES_PF) { |
1343 | interface->rx_overrun_pf++; |
1344 | fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl); |
1345 | } else { |
1346 | interface->rx_overrun_vf++; |
1347 | } |
1348 | } |
1349 | |
1350 | maxholdq *= 2; |
1351 | if (!maxholdq) |
1352 | q &= ~(32 - 1); |
1353 | |
1354 | if (!q) |
1355 | break; |
1356 | |
1357 | if (q-- % 32) |
1358 | continue; |
1359 | |
1360 | maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32)); |
1361 | if (maxholdq) |
1362 | fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq); |
1363 | } |
1364 | } |
1365 | |
1366 | static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) |
1367 | { |
1368 | struct fm10k_intfc *interface = data; |
1369 | struct fm10k_hw *hw = &interface->hw; |
1370 | struct fm10k_mbx_info *mbx = &hw->mbx; |
1371 | u32 eicr; |
1372 | |
1373 | /* unmask any set bits related to this interrupt */ |
1374 | eicr = fm10k_read_reg(hw, FM10K_EICR); |
1375 | fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX | |
1376 | FM10K_EICR_SWITCHREADY | |
1377 | FM10K_EICR_SWITCHNOTREADY)); |
1378 | |
1379 | /* report any faults found to the message log */ |
1380 | fm10k_report_fault(interface, eicr); |
1381 | |
1382 | /* reset any queues disabled due to receiver overrun */ |
1383 | fm10k_reset_drop_on_empty(interface, eicr); |
1384 | |
1385 | /* service mailboxes */ |
1386 | if (fm10k_mbx_trylock(interface)) { |
1387 | s32 err = mbx->ops.process(hw, mbx); |
1388 | |
1389 | if (err == FM10K_ERR_RESET_REQUESTED) |
1390 | set_bit(nr: FM10K_FLAG_RESET_REQUESTED, addr: interface->flags); |
1391 | |
1392 | /* handle VFLRE events */ |
1393 | fm10k_iov_event(interface); |
1394 | fm10k_mbx_unlock(interface); |
1395 | } |
1396 | |
1397 | /* if switch toggled state we should reset GLORTs */ |
1398 | if (eicr & FM10K_EICR_SWITCHNOTREADY) { |
1399 | /* force link down for at least 4 seconds */ |
1400 | interface->link_down_event = jiffies + (4 * HZ); |
1401 | set_bit(nr: __FM10K_LINK_DOWN, addr: interface->state); |
1402 | |
1403 | /* reset dglort_map back to no config */ |
1404 | hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; |
1405 | } |
1406 | |
1407 | /* we should validate host state after interrupt event */ |
1408 | hw->mac.get_host_state = true; |
1409 | |
1410 | /* validate host state, and handle VF mailboxes in the service task */ |
1411 | fm10k_service_event_schedule(interface); |
1412 | |
1413 | /* re-enable mailbox interrupt and indicate 20us delay */ |
1414 | fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), |
1415 | (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) | |
1416 | FM10K_ITR_ENABLE); |
1417 | |
1418 | return IRQ_HANDLED; |
1419 | } |
1420 | |
1421 | void fm10k_mbx_free_irq(struct fm10k_intfc *interface) |
1422 | { |
1423 | struct fm10k_hw *hw = &interface->hw; |
1424 | struct msix_entry *entry; |
1425 | int itr_reg; |
1426 | |
1427 | /* no mailbox IRQ to free if MSI-X is not enabled */ |
1428 | if (!interface->msix_entries) |
1429 | return; |
1430 | |
1431 | entry = &interface->msix_entries[FM10K_MBX_VECTOR]; |
1432 | |
1433 | /* disconnect the mailbox */ |
1434 | hw->mbx.ops.disconnect(hw, &hw->mbx); |
1435 | |
1436 | /* disable Mailbox cause */ |
1437 | if (hw->mac.type == fm10k_mac_pf) { |
1438 | fm10k_write_reg(hw, FM10K_EIMR, |
1439 | FM10K_EIMR_DISABLE(PCA_FAULT) | |
1440 | FM10K_EIMR_DISABLE(FUM_FAULT) | |
1441 | FM10K_EIMR_DISABLE(MAILBOX) | |
1442 | FM10K_EIMR_DISABLE(SWITCHREADY) | |
1443 | FM10K_EIMR_DISABLE(SWITCHNOTREADY) | |
1444 | FM10K_EIMR_DISABLE(SRAMERROR) | |
1445 | FM10K_EIMR_DISABLE(VFLR) | |
1446 | FM10K_EIMR_DISABLE(MAXHOLDTIME)); |
1447 | itr_reg = FM10K_ITR(FM10K_MBX_VECTOR); |
1448 | } else { |
1449 | itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR); |
1450 | } |
1451 | |
1452 | fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET); |
1453 | |
1454 | free_irq(entry->vector, interface); |
1455 | } |
1456 | |
1457 | static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, |
1458 | struct fm10k_mbx_info *mbx) |
1459 | { |
1460 | bool vlan_override = hw->mac.vlan_override; |
1461 | u16 default_vid = hw->mac.default_vid; |
1462 | struct fm10k_intfc *interface; |
1463 | s32 err; |
1464 | |
1465 | err = fm10k_msg_mac_vlan_vf(hw, results, mbx); |
1466 | if (err) |
1467 | return err; |
1468 | |
1469 | interface = container_of(hw, struct fm10k_intfc, hw); |
1470 | |
1471 | /* MAC was changed so we need reset */ |
1472 | if (is_valid_ether_addr(addr: hw->mac.perm_addr) && |
1473 | !ether_addr_equal(addr1: hw->mac.perm_addr, addr2: hw->mac.addr)) |
1474 | set_bit(nr: FM10K_FLAG_RESET_REQUESTED, addr: interface->flags); |
1475 | |
1476 | /* VLAN override was changed, or default VLAN changed */ |
1477 | if ((vlan_override != hw->mac.vlan_override) || |
1478 | (default_vid != hw->mac.default_vid)) |
1479 | set_bit(nr: FM10K_FLAG_RESET_REQUESTED, addr: interface->flags); |
1480 | |
1481 | return 0; |
1482 | } |
1483 | |
1484 | /* generic error handler for mailbox issues */ |
1485 | static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results, |
1486 | struct fm10k_mbx_info __always_unused *mbx) |
1487 | { |
1488 | struct fm10k_intfc *interface; |
1489 | struct pci_dev *pdev; |
1490 | |
1491 | interface = container_of(hw, struct fm10k_intfc, hw); |
1492 | pdev = interface->pdev; |
1493 | |
1494 | dev_err(&pdev->dev, "Unknown message ID %u\n" , |
1495 | **results & FM10K_TLV_ID_MASK); |
1496 | |
1497 | return 0; |
1498 | } |
1499 | |
1500 | static const struct fm10k_msg_data vf_mbx_data[] = { |
1501 | FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), |
1502 | FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr), |
1503 | FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), |
1504 | FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), |
1505 | }; |
1506 | |
1507 | static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface) |
1508 | { |
1509 | struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; |
1510 | struct net_device *dev = interface->netdev; |
1511 | struct fm10k_hw *hw = &interface->hw; |
1512 | int err; |
1513 | |
1514 | /* Use timer0 for interrupt moderation on the mailbox */ |
1515 | u32 itr = entry->entry | FM10K_INT_MAP_TIMER0; |
1516 | |
1517 | /* register mailbox handlers */ |
1518 | err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data); |
1519 | if (err) |
1520 | return err; |
1521 | |
1522 | /* request the IRQ */ |
1523 | err = request_irq(irq: entry->vector, handler: fm10k_msix_mbx_vf, flags: 0, |
1524 | name: dev->name, dev: interface); |
1525 | if (err) { |
1526 | netif_err(interface, probe, dev, |
1527 | "request_irq for msix_mbx failed: %d\n" , err); |
1528 | return err; |
1529 | } |
1530 | |
1531 | /* map all of the interrupt sources */ |
1532 | fm10k_write_reg(hw, FM10K_VFINT_MAP, itr); |
1533 | |
1534 | /* enable interrupt */ |
1535 | fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE); |
1536 | |
1537 | return 0; |
1538 | } |
1539 | |
1540 | static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, |
1541 | struct fm10k_mbx_info *mbx) |
1542 | { |
1543 | struct fm10k_intfc *interface; |
1544 | u32 dglort_map = hw->mac.dglort_map; |
1545 | s32 err; |
1546 | |
1547 | interface = container_of(hw, struct fm10k_intfc, hw); |
1548 | |
1549 | err = fm10k_msg_err_pf(hw, results, mbx); |
1550 | if (!err && hw->swapi.status) { |
1551 | /* force link down for a reasonable delay */ |
1552 | interface->link_down_event = jiffies + (2 * HZ); |
1553 | set_bit(nr: __FM10K_LINK_DOWN, addr: interface->state); |
1554 | |
1555 | /* reset dglort_map back to no config */ |
1556 | hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; |
1557 | |
1558 | fm10k_service_event_schedule(interface); |
1559 | |
1560 | /* prevent overloading kernel message buffer */ |
1561 | if (interface->lport_map_failed) |
1562 | return 0; |
1563 | |
1564 | interface->lport_map_failed = true; |
1565 | |
1566 | if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED) |
1567 | dev_warn(&interface->pdev->dev, |
1568 | "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n" ); |
1569 | dev_warn(&interface->pdev->dev, |
1570 | "request logical port map failed: %d\n" , |
1571 | hw->swapi.status); |
1572 | |
1573 | return 0; |
1574 | } |
1575 | |
1576 | err = fm10k_msg_lport_map_pf(hw, results, mbx); |
1577 | if (err) |
1578 | return err; |
1579 | |
1580 | interface->lport_map_failed = false; |
1581 | |
1582 | /* we need to reset if port count was just updated */ |
1583 | if (dglort_map != hw->mac.dglort_map) |
1584 | set_bit(nr: FM10K_FLAG_RESET_REQUESTED, addr: interface->flags); |
1585 | |
1586 | return 0; |
1587 | } |
1588 | |
1589 | static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, |
1590 | struct fm10k_mbx_info __always_unused *mbx) |
1591 | { |
1592 | struct fm10k_intfc *interface; |
1593 | u16 glort, pvid; |
1594 | u32 pvid_update; |
1595 | s32 err; |
1596 | |
1597 | err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID], |
1598 | &pvid_update); |
1599 | if (err) |
1600 | return err; |
1601 | |
1602 | /* extract values from the pvid update */ |
1603 | glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT); |
1604 | pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID); |
1605 | |
1606 | /* if glort is not valid return error */ |
1607 | if (!fm10k_glort_valid_pf(hw, glort)) |
1608 | return FM10K_ERR_PARAM; |
1609 | |
1610 | /* verify VLAN ID is valid */ |
1611 | if (pvid >= FM10K_VLAN_TABLE_VID_MAX) |
1612 | return FM10K_ERR_PARAM; |
1613 | |
1614 | interface = container_of(hw, struct fm10k_intfc, hw); |
1615 | |
1616 | /* check to see if this belongs to one of the VFs */ |
1617 | err = fm10k_iov_update_pvid(interface, glort, pvid); |
1618 | if (!err) |
1619 | return 0; |
1620 | |
1621 | /* we need to reset if default VLAN was just updated */ |
1622 | if (pvid != hw->mac.default_vid) |
1623 | set_bit(nr: FM10K_FLAG_RESET_REQUESTED, addr: interface->flags); |
1624 | |
1625 | hw->mac.default_vid = pvid; |
1626 | |
1627 | return 0; |
1628 | } |
1629 | |
1630 | static const struct fm10k_msg_data pf_mbx_data[] = { |
1631 | FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), |
1632 | FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), |
1633 | FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map), |
1634 | FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), |
1635 | FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), |
1636 | FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid), |
1637 | FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), |
1638 | }; |
1639 | |
1640 | static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface) |
1641 | { |
1642 | struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; |
1643 | struct net_device *dev = interface->netdev; |
1644 | struct fm10k_hw *hw = &interface->hw; |
1645 | int err; |
1646 | |
1647 | /* Use timer0 for interrupt moderation on the mailbox */ |
1648 | u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0; |
1649 | u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE; |
1650 | |
1651 | /* register mailbox handlers */ |
1652 | err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data); |
1653 | if (err) |
1654 | return err; |
1655 | |
1656 | /* request the IRQ */ |
1657 | err = request_irq(irq: entry->vector, handler: fm10k_msix_mbx_pf, flags: 0, |
1658 | name: dev->name, dev: interface); |
1659 | if (err) { |
1660 | netif_err(interface, probe, dev, |
1661 | "request_irq for msix_mbx failed: %d\n" , err); |
1662 | return err; |
1663 | } |
1664 | |
1665 | /* Enable interrupts w/ no moderation for "other" interrupts */ |
1666 | fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr); |
1667 | fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr); |
1668 | fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr); |
1669 | fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr); |
1670 | fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr); |
1671 | |
1672 | /* Enable interrupts w/ moderation for mailbox */ |
1673 | fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr); |
1674 | |
1675 | /* Enable individual interrupt causes */ |
1676 | fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | |
1677 | FM10K_EIMR_ENABLE(FUM_FAULT) | |
1678 | FM10K_EIMR_ENABLE(MAILBOX) | |
1679 | FM10K_EIMR_ENABLE(SWITCHREADY) | |
1680 | FM10K_EIMR_ENABLE(SWITCHNOTREADY) | |
1681 | FM10K_EIMR_ENABLE(SRAMERROR) | |
1682 | FM10K_EIMR_ENABLE(VFLR) | |
1683 | FM10K_EIMR_ENABLE(MAXHOLDTIME)); |
1684 | |
1685 | /* enable interrupt */ |
1686 | fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE); |
1687 | |
1688 | return 0; |
1689 | } |
1690 | |
1691 | int fm10k_mbx_request_irq(struct fm10k_intfc *interface) |
1692 | { |
1693 | struct fm10k_hw *hw = &interface->hw; |
1694 | int err; |
1695 | |
1696 | /* enable Mailbox cause */ |
1697 | if (hw->mac.type == fm10k_mac_pf) |
1698 | err = fm10k_mbx_request_irq_pf(interface); |
1699 | else |
1700 | err = fm10k_mbx_request_irq_vf(interface); |
1701 | if (err) |
1702 | return err; |
1703 | |
1704 | /* connect mailbox */ |
1705 | err = hw->mbx.ops.connect(hw, &hw->mbx); |
1706 | |
1707 | /* if the mailbox failed to connect, then free IRQ */ |
1708 | if (err) |
1709 | fm10k_mbx_free_irq(interface); |
1710 | |
1711 | return err; |
1712 | } |
1713 | |
1714 | /** |
1715 | * fm10k_qv_free_irq - release interrupts associated with queue vectors |
1716 | * @interface: board private structure |
1717 | * |
1718 | * Release all interrupts associated with this interface |
1719 | **/ |
1720 | void fm10k_qv_free_irq(struct fm10k_intfc *interface) |
1721 | { |
1722 | int vector = interface->num_q_vectors; |
1723 | struct msix_entry *entry; |
1724 | |
1725 | entry = &interface->msix_entries[NON_Q_VECTORS + vector]; |
1726 | |
1727 | while (vector) { |
1728 | struct fm10k_q_vector *q_vector; |
1729 | |
1730 | vector--; |
1731 | entry--; |
1732 | q_vector = interface->q_vector[vector]; |
1733 | |
1734 | if (!q_vector->tx.count && !q_vector->rx.count) |
1735 | continue; |
1736 | |
1737 | /* clear the affinity_mask in the IRQ descriptor */ |
1738 | irq_set_affinity_hint(irq: entry->vector, NULL); |
1739 | |
1740 | /* disable interrupts */ |
1741 | writel(FM10K_ITR_MASK_SET, addr: q_vector->itr); |
1742 | |
1743 | free_irq(entry->vector, q_vector); |
1744 | } |
1745 | } |
1746 | |
1747 | /** |
1748 | * fm10k_qv_request_irq - initialize interrupts for queue vectors |
1749 | * @interface: board private structure |
1750 | * |
1751 | * Attempts to configure interrupts using the best available |
1752 | * capabilities of the hardware and kernel. |
1753 | **/ |
1754 | int fm10k_qv_request_irq(struct fm10k_intfc *interface) |
1755 | { |
1756 | struct net_device *dev = interface->netdev; |
1757 | struct fm10k_hw *hw = &interface->hw; |
1758 | struct msix_entry *entry; |
1759 | unsigned int ri = 0, ti = 0; |
1760 | int vector, err; |
1761 | |
1762 | entry = &interface->msix_entries[NON_Q_VECTORS]; |
1763 | |
1764 | for (vector = 0; vector < interface->num_q_vectors; vector++) { |
1765 | struct fm10k_q_vector *q_vector = interface->q_vector[vector]; |
1766 | |
1767 | /* name the vector */ |
1768 | if (q_vector->tx.count && q_vector->rx.count) { |
1769 | snprintf(buf: q_vector->name, size: sizeof(q_vector->name), |
1770 | fmt: "%s-TxRx-%u" , dev->name, ri++); |
1771 | ti++; |
1772 | } else if (q_vector->rx.count) { |
1773 | snprintf(buf: q_vector->name, size: sizeof(q_vector->name), |
1774 | fmt: "%s-rx-%u" , dev->name, ri++); |
1775 | } else if (q_vector->tx.count) { |
1776 | snprintf(buf: q_vector->name, size: sizeof(q_vector->name), |
1777 | fmt: "%s-tx-%u" , dev->name, ti++); |
1778 | } else { |
1779 | /* skip this unused q_vector */ |
1780 | continue; |
1781 | } |
1782 | |
1783 | /* Assign ITR register to q_vector */ |
1784 | q_vector->itr = (hw->mac.type == fm10k_mac_pf) ? |
1785 | &interface->uc_addr[FM10K_ITR(entry->entry)] : |
1786 | &interface->uc_addr[FM10K_VFITR(entry->entry)]; |
1787 | |
1788 | /* request the IRQ */ |
1789 | err = request_irq(irq: entry->vector, handler: &fm10k_msix_clean_rings, flags: 0, |
1790 | name: q_vector->name, dev: q_vector); |
1791 | if (err) { |
1792 | netif_err(interface, probe, dev, |
1793 | "request_irq failed for MSIX interrupt Error: %d\n" , |
1794 | err); |
1795 | goto err_out; |
1796 | } |
1797 | |
1798 | /* assign the mask for this irq */ |
1799 | irq_set_affinity_hint(irq: entry->vector, m: &q_vector->affinity_mask); |
1800 | |
1801 | /* Enable q_vector */ |
1802 | writel(FM10K_ITR_ENABLE, addr: q_vector->itr); |
1803 | |
1804 | entry++; |
1805 | } |
1806 | |
1807 | return 0; |
1808 | |
1809 | err_out: |
1810 | /* wind through the ring freeing all entries and vectors */ |
1811 | while (vector) { |
1812 | struct fm10k_q_vector *q_vector; |
1813 | |
1814 | entry--; |
1815 | vector--; |
1816 | q_vector = interface->q_vector[vector]; |
1817 | |
1818 | if (!q_vector->tx.count && !q_vector->rx.count) |
1819 | continue; |
1820 | |
1821 | /* clear the affinity_mask in the IRQ descriptor */ |
1822 | irq_set_affinity_hint(irq: entry->vector, NULL); |
1823 | |
1824 | /* disable interrupts */ |
1825 | writel(FM10K_ITR_MASK_SET, addr: q_vector->itr); |
1826 | |
1827 | free_irq(entry->vector, q_vector); |
1828 | } |
1829 | |
1830 | return err; |
1831 | } |
1832 | |
1833 | void fm10k_up(struct fm10k_intfc *interface) |
1834 | { |
1835 | struct fm10k_hw *hw = &interface->hw; |
1836 | |
1837 | /* Enable Tx/Rx DMA */ |
1838 | hw->mac.ops.start_hw(hw); |
1839 | |
1840 | /* configure Tx descriptor rings */ |
1841 | fm10k_configure_tx(interface); |
1842 | |
1843 | /* configure Rx descriptor rings */ |
1844 | fm10k_configure_rx(interface); |
1845 | |
1846 | /* configure interrupts */ |
1847 | hw->mac.ops.update_int_moderator(hw); |
1848 | |
1849 | /* enable statistics capture again */ |
1850 | clear_bit(nr: __FM10K_UPDATING_STATS, addr: interface->state); |
1851 | |
1852 | /* clear down bit to indicate we are ready to go */ |
1853 | clear_bit(nr: __FM10K_DOWN, addr: interface->state); |
1854 | |
1855 | /* enable polling cleanups */ |
1856 | fm10k_napi_enable_all(interface); |
1857 | |
1858 | /* re-establish Rx filters */ |
1859 | fm10k_restore_rx_state(interface); |
1860 | |
1861 | /* enable transmits */ |
1862 | netif_tx_start_all_queues(dev: interface->netdev); |
1863 | |
1864 | /* kick off the service timer now */ |
1865 | hw->mac.get_host_state = true; |
1866 | mod_timer(timer: &interface->service_timer, expires: jiffies); |
1867 | } |
1868 | |
1869 | static void fm10k_napi_disable_all(struct fm10k_intfc *interface) |
1870 | { |
1871 | struct fm10k_q_vector *q_vector; |
1872 | int q_idx; |
1873 | |
1874 | for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { |
1875 | q_vector = interface->q_vector[q_idx]; |
1876 | napi_disable(n: &q_vector->napi); |
1877 | } |
1878 | } |
1879 | |
1880 | void fm10k_down(struct fm10k_intfc *interface) |
1881 | { |
1882 | struct net_device *netdev = interface->netdev; |
1883 | struct fm10k_hw *hw = &interface->hw; |
1884 | int err, i = 0, count = 0; |
1885 | |
1886 | /* signal that we are down to the interrupt handler and service task */ |
1887 | if (test_and_set_bit(nr: __FM10K_DOWN, addr: interface->state)) |
1888 | return; |
1889 | |
1890 | /* call carrier off first to avoid false dev_watchdog timeouts */ |
1891 | netif_carrier_off(dev: netdev); |
1892 | |
1893 | /* disable transmits */ |
1894 | netif_tx_stop_all_queues(dev: netdev); |
1895 | netif_tx_disable(dev: netdev); |
1896 | |
1897 | /* reset Rx filters */ |
1898 | fm10k_reset_rx_state(interface); |
1899 | |
1900 | /* disable polling routines */ |
1901 | fm10k_napi_disable_all(interface); |
1902 | |
1903 | /* capture stats one last time before stopping interface */ |
1904 | fm10k_update_stats(interface); |
1905 | |
1906 | /* prevent updating statistics while we're down */ |
1907 | while (test_and_set_bit(nr: __FM10K_UPDATING_STATS, addr: interface->state)) |
1908 | usleep_range(min: 1000, max: 2000); |
1909 | |
1910 | /* skip waiting for TX DMA if we lost PCIe link */ |
1911 | if (FM10K_REMOVED(hw->hw_addr)) |
1912 | goto skip_tx_dma_drain; |
1913 | |
1914 | /* In some rare circumstances it can take a while for Tx queues to |
1915 | * quiesce and be fully disabled. Attempt to .stop_hw() first, and |
1916 | * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop |
1917 | * until the Tx queues have emptied, or until a number of retries. If |
1918 | * we fail to clear within the retry loop, we will issue a warning |
1919 | * indicating that Tx DMA is probably hung. Note this means we call |
1920 | * .stop_hw() twice but this shouldn't cause any problems. |
1921 | */ |
1922 | err = hw->mac.ops.stop_hw(hw); |
1923 | if (err != FM10K_ERR_REQUESTS_PENDING) |
1924 | goto skip_tx_dma_drain; |
1925 | |
1926 | #define TX_DMA_DRAIN_RETRIES 25 |
1927 | for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) { |
1928 | usleep_range(min: 10000, max: 20000); |
1929 | |
1930 | /* start checking at the last ring to have pending Tx */ |
1931 | for (; i < interface->num_tx_queues; i++) |
1932 | if (fm10k_get_tx_pending(ring: interface->tx_ring[i], in_sw: false)) |
1933 | break; |
1934 | |
1935 | /* if all the queues are drained, we can break now */ |
1936 | if (i == interface->num_tx_queues) |
1937 | break; |
1938 | } |
1939 | |
1940 | if (count >= TX_DMA_DRAIN_RETRIES) |
1941 | dev_err(&interface->pdev->dev, |
1942 | "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n" , |
1943 | count); |
1944 | skip_tx_dma_drain: |
1945 | /* Disable DMA engine for Tx/Rx */ |
1946 | err = hw->mac.ops.stop_hw(hw); |
1947 | if (err == FM10K_ERR_REQUESTS_PENDING) |
1948 | dev_err(&interface->pdev->dev, |
1949 | "due to pending requests hw was not shut down gracefully\n" ); |
1950 | else if (err) |
1951 | dev_err(&interface->pdev->dev, "stop_hw failed: %d\n" , err); |
1952 | |
1953 | /* free any buffers still on the rings */ |
1954 | fm10k_clean_all_tx_rings(interface); |
1955 | fm10k_clean_all_rx_rings(interface); |
1956 | } |
1957 | |
1958 | /** |
1959 | * fm10k_sw_init - Initialize general software structures |
1960 | * @interface: host interface private structure to initialize |
1961 | * @ent: PCI device ID entry |
1962 | * |
1963 | * fm10k_sw_init initializes the interface private data structure. |
1964 | * Fields are initialized based on PCI device information and |
1965 | * OS network device settings (MTU size). |
1966 | **/ |
1967 | static int fm10k_sw_init(struct fm10k_intfc *interface, |
1968 | const struct pci_device_id *ent) |
1969 | { |
1970 | const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data]; |
1971 | struct fm10k_hw *hw = &interface->hw; |
1972 | struct pci_dev *pdev = interface->pdev; |
1973 | struct net_device *netdev = interface->netdev; |
1974 | u32 [FM10K_RSSRK_SIZE]; |
1975 | unsigned int ; |
1976 | int err; |
1977 | |
1978 | /* initialize back pointer */ |
1979 | hw->back = interface; |
1980 | hw->hw_addr = interface->uc_addr; |
1981 | |
1982 | /* PCI config space info */ |
1983 | hw->vendor_id = pdev->vendor; |
1984 | hw->device_id = pdev->device; |
1985 | hw->revision_id = pdev->revision; |
1986 | hw->subsystem_vendor_id = pdev->subsystem_vendor; |
1987 | hw->subsystem_device_id = pdev->subsystem_device; |
1988 | |
1989 | /* Setup hw api */ |
1990 | memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops)); |
1991 | hw->mac.type = fi->mac; |
1992 | |
1993 | /* Setup IOV handlers */ |
1994 | if (fi->iov_ops) |
1995 | memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops)); |
1996 | |
1997 | /* Set common capability flags and settings */ |
1998 | rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus()); |
1999 | interface->ring_feature[RING_F_RSS].limit = rss; |
2000 | fi->get_invariants(hw); |
2001 | |
2002 | /* pick up the PCIe bus settings for reporting later */ |
2003 | if (hw->mac.ops.get_bus_info) |
2004 | hw->mac.ops.get_bus_info(hw); |
2005 | |
2006 | /* limit the usable DMA range */ |
2007 | if (hw->mac.ops.set_dma_mask) |
2008 | hw->mac.ops.set_dma_mask(hw, dma_get_mask(dev: &pdev->dev)); |
2009 | |
2010 | /* update netdev with DMA restrictions */ |
2011 | if (dma_get_mask(dev: &pdev->dev) > DMA_BIT_MASK(32)) { |
2012 | netdev->features |= NETIF_F_HIGHDMA; |
2013 | netdev->vlan_features |= NETIF_F_HIGHDMA; |
2014 | } |
2015 | |
2016 | /* reset and initialize the hardware so it is in a known state */ |
2017 | err = hw->mac.ops.reset_hw(hw); |
2018 | if (err) { |
2019 | dev_err(&pdev->dev, "reset_hw failed: %d\n" , err); |
2020 | return err; |
2021 | } |
2022 | |
2023 | err = hw->mac.ops.init_hw(hw); |
2024 | if (err) { |
2025 | dev_err(&pdev->dev, "init_hw failed: %d\n" , err); |
2026 | return err; |
2027 | } |
2028 | |
2029 | /* initialize hardware statistics */ |
2030 | hw->mac.ops.update_hw_stats(hw, &interface->stats); |
2031 | |
2032 | /* Set upper limit on IOV VFs that can be allocated */ |
2033 | pci_sriov_set_totalvfs(dev: pdev, numvfs: hw->iov.total_vfs); |
2034 | |
2035 | /* Start with random Ethernet address */ |
2036 | eth_random_addr(addr: hw->mac.addr); |
2037 | |
2038 | /* Initialize MAC address from hardware */ |
2039 | err = hw->mac.ops.read_mac_addr(hw); |
2040 | if (err) { |
2041 | dev_warn(&pdev->dev, |
2042 | "Failed to obtain MAC address defaulting to random\n" ); |
2043 | /* tag address assignment as random */ |
2044 | netdev->addr_assign_type |= NET_ADDR_RANDOM; |
2045 | } |
2046 | |
2047 | eth_hw_addr_set(dev: netdev, addr: hw->mac.addr); |
2048 | ether_addr_copy(dst: netdev->perm_addr, src: hw->mac.addr); |
2049 | |
2050 | if (!is_valid_ether_addr(addr: netdev->perm_addr)) { |
2051 | dev_err(&pdev->dev, "Invalid MAC Address\n" ); |
2052 | return -EIO; |
2053 | } |
2054 | |
2055 | /* initialize DCBNL interface */ |
2056 | fm10k_dcbnl_set_ops(dev: netdev); |
2057 | |
2058 | /* set default ring sizes */ |
2059 | interface->tx_ring_count = FM10K_DEFAULT_TXD; |
2060 | interface->rx_ring_count = FM10K_DEFAULT_RXD; |
2061 | |
2062 | /* set default interrupt moderation */ |
2063 | interface->tx_itr = FM10K_TX_ITR_DEFAULT; |
2064 | interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; |
2065 | |
2066 | /* Initialize the MAC/VLAN queue */ |
2067 | INIT_LIST_HEAD(list: &interface->macvlan_requests); |
2068 | |
2069 | netdev_rss_key_fill(buffer: rss_key, len: sizeof(rss_key)); |
2070 | memcpy(interface->rssrk, rss_key, sizeof(rss_key)); |
2071 | |
2072 | /* Initialize the mailbox lock */ |
2073 | spin_lock_init(&interface->mbx_lock); |
2074 | spin_lock_init(&interface->macvlan_lock); |
2075 | |
2076 | /* Start off interface as being down */ |
2077 | set_bit(nr: __FM10K_DOWN, addr: interface->state); |
2078 | set_bit(nr: __FM10K_UPDATING_STATS, addr: interface->state); |
2079 | |
2080 | return 0; |
2081 | } |
2082 | |
2083 | /** |
2084 | * fm10k_probe - Device Initialization Routine |
2085 | * @pdev: PCI device information struct |
2086 | * @ent: entry in fm10k_pci_tbl |
2087 | * |
2088 | * Returns 0 on success, negative on failure |
2089 | * |
2090 | * fm10k_probe initializes an interface identified by a pci_dev structure. |
2091 | * The OS initialization, configuring of the interface private structure, |
2092 | * and a hardware reset occur. |
2093 | **/ |
2094 | static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
2095 | { |
2096 | struct net_device *netdev; |
2097 | struct fm10k_intfc *interface; |
2098 | int err; |
2099 | |
2100 | if (pdev->error_state != pci_channel_io_normal) { |
2101 | dev_err(&pdev->dev, |
2102 | "PCI device still in an error state. Unable to load...\n" ); |
2103 | return -EIO; |
2104 | } |
2105 | |
2106 | err = pci_enable_device_mem(dev: pdev); |
2107 | if (err) { |
2108 | dev_err(&pdev->dev, |
2109 | "PCI enable device failed: %d\n" , err); |
2110 | return err; |
2111 | } |
2112 | |
2113 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(48)); |
2114 | if (err) |
2115 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32)); |
2116 | if (err) { |
2117 | dev_err(&pdev->dev, |
2118 | "DMA configuration failed: %d\n" , err); |
2119 | goto err_dma; |
2120 | } |
2121 | |
2122 | err = pci_request_mem_regions(pdev, name: fm10k_driver_name); |
2123 | if (err) { |
2124 | dev_err(&pdev->dev, |
2125 | "pci_request_selected_regions failed: %d\n" , err); |
2126 | goto err_pci_reg; |
2127 | } |
2128 | |
2129 | pci_set_master(dev: pdev); |
2130 | pci_save_state(dev: pdev); |
2131 | |
2132 | netdev = fm10k_alloc_netdev(info: fm10k_info_tbl[ent->driver_data]); |
2133 | if (!netdev) { |
2134 | err = -ENOMEM; |
2135 | goto err_alloc_netdev; |
2136 | } |
2137 | |
2138 | SET_NETDEV_DEV(netdev, &pdev->dev); |
2139 | |
2140 | interface = netdev_priv(dev: netdev); |
2141 | pci_set_drvdata(pdev, data: interface); |
2142 | |
2143 | interface->netdev = netdev; |
2144 | interface->pdev = pdev; |
2145 | |
2146 | interface->uc_addr = ioremap(pci_resource_start(pdev, 0), |
2147 | FM10K_UC_ADDR_SIZE); |
2148 | if (!interface->uc_addr) { |
2149 | err = -EIO; |
2150 | goto err_ioremap; |
2151 | } |
2152 | |
2153 | err = fm10k_sw_init(interface, ent); |
2154 | if (err) |
2155 | goto err_sw_init; |
2156 | |
2157 | /* enable debugfs support */ |
2158 | fm10k_dbg_intfc_init(interface); |
2159 | |
2160 | err = fm10k_init_queueing_scheme(interface); |
2161 | if (err) |
2162 | goto err_sw_init; |
2163 | |
2164 | /* the mbx interrupt might attempt to schedule the service task, so we |
2165 | * must ensure it is disabled since we haven't yet requested the timer |
2166 | * or work item. |
2167 | */ |
2168 | set_bit(nr: __FM10K_SERVICE_DISABLE, addr: interface->state); |
2169 | |
2170 | err = fm10k_mbx_request_irq(interface); |
2171 | if (err) |
2172 | goto err_mbx_interrupt; |
2173 | |
2174 | /* final check of hardware state before registering the interface */ |
2175 | err = fm10k_hw_ready(interface); |
2176 | if (err) |
2177 | goto err_register; |
2178 | |
2179 | err = register_netdev(dev: netdev); |
2180 | if (err) |
2181 | goto err_register; |
2182 | |
2183 | /* carrier off reporting is important to ethtool even BEFORE open */ |
2184 | netif_carrier_off(dev: netdev); |
2185 | |
2186 | /* stop all the transmit queues from transmitting until link is up */ |
2187 | netif_tx_stop_all_queues(dev: netdev); |
2188 | |
2189 | /* Initialize service timer and service task late in order to avoid |
2190 | * cleanup issues. |
2191 | */ |
2192 | timer_setup(&interface->service_timer, fm10k_service_timer, 0); |
2193 | INIT_WORK(&interface->service_task, fm10k_service_task); |
2194 | |
2195 | /* Setup the MAC/VLAN queue */ |
2196 | INIT_DELAYED_WORK(&interface->macvlan_task, fm10k_macvlan_task); |
2197 | |
2198 | /* kick off service timer now, even when interface is down */ |
2199 | mod_timer(timer: &interface->service_timer, expires: (HZ * 2) + jiffies); |
2200 | |
2201 | /* print warning for non-optimal configurations */ |
2202 | pcie_print_link_status(dev: interface->pdev); |
2203 | |
2204 | /* report MAC address for logging */ |
2205 | dev_info(&pdev->dev, "%pM\n" , netdev->dev_addr); |
2206 | |
2207 | /* enable SR-IOV after registering netdev to enforce PF/VF ordering */ |
2208 | fm10k_iov_configure(pdev, num_vfs: 0); |
2209 | |
2210 | /* clear the service task disable bit and kick off service task */ |
2211 | clear_bit(nr: __FM10K_SERVICE_DISABLE, addr: interface->state); |
2212 | fm10k_service_event_schedule(interface); |
2213 | |
2214 | return 0; |
2215 | |
2216 | err_register: |
2217 | fm10k_mbx_free_irq(interface); |
2218 | err_mbx_interrupt: |
2219 | fm10k_clear_queueing_scheme(interface); |
2220 | err_sw_init: |
2221 | if (interface->sw_addr) |
2222 | iounmap(addr: interface->sw_addr); |
2223 | iounmap(addr: interface->uc_addr); |
2224 | err_ioremap: |
2225 | free_netdev(dev: netdev); |
2226 | err_alloc_netdev: |
2227 | pci_release_mem_regions(pdev); |
2228 | err_pci_reg: |
2229 | err_dma: |
2230 | pci_disable_device(dev: pdev); |
2231 | return err; |
2232 | } |
2233 | |
2234 | /** |
2235 | * fm10k_remove - Device Removal Routine |
2236 | * @pdev: PCI device information struct |
2237 | * |
2238 | * fm10k_remove is called by the PCI subsystem to alert the driver |
2239 | * that it should release a PCI device. The could be caused by a |
2240 | * Hot-Plug event, or because the driver is going to be removed from |
2241 | * memory. |
2242 | **/ |
2243 | static void fm10k_remove(struct pci_dev *pdev) |
2244 | { |
2245 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
2246 | struct net_device *netdev = interface->netdev; |
2247 | |
2248 | del_timer_sync(timer: &interface->service_timer); |
2249 | |
2250 | fm10k_stop_service_event(interface); |
2251 | fm10k_stop_macvlan_task(interface); |
2252 | |
2253 | /* Remove all pending MAC/VLAN requests */ |
2254 | fm10k_clear_macvlan_queue(interface, glort: interface->glort, vlans: true); |
2255 | |
2256 | /* free netdev, this may bounce the interrupts due to setup_tc */ |
2257 | if (netdev->reg_state == NETREG_REGISTERED) |
2258 | unregister_netdev(dev: netdev); |
2259 | |
2260 | /* release VFs */ |
2261 | fm10k_iov_disable(pdev); |
2262 | |
2263 | /* disable mailbox interrupt */ |
2264 | fm10k_mbx_free_irq(interface); |
2265 | |
2266 | /* free interrupts */ |
2267 | fm10k_clear_queueing_scheme(interface); |
2268 | |
2269 | /* remove any debugfs interfaces */ |
2270 | fm10k_dbg_intfc_exit(interface); |
2271 | |
2272 | if (interface->sw_addr) |
2273 | iounmap(addr: interface->sw_addr); |
2274 | iounmap(addr: interface->uc_addr); |
2275 | |
2276 | free_netdev(dev: netdev); |
2277 | |
2278 | pci_release_mem_regions(pdev); |
2279 | |
2280 | pci_disable_device(dev: pdev); |
2281 | } |
2282 | |
2283 | static void fm10k_prepare_suspend(struct fm10k_intfc *interface) |
2284 | { |
2285 | /* the watchdog task reads from registers, which might appear like |
2286 | * a surprise remove if the PCIe device is disabled while we're |
2287 | * stopped. We stop the watchdog task until after we resume software |
2288 | * activity. |
2289 | * |
2290 | * Note that the MAC/VLAN task will be stopped as part of preparing |
2291 | * for reset so we don't need to handle it here. |
2292 | */ |
2293 | fm10k_stop_service_event(interface); |
2294 | |
2295 | if (fm10k_prepare_for_reset(interface)) |
2296 | set_bit(nr: __FM10K_RESET_SUSPENDED, addr: interface->state); |
2297 | } |
2298 | |
2299 | static int fm10k_handle_resume(struct fm10k_intfc *interface) |
2300 | { |
2301 | struct fm10k_hw *hw = &interface->hw; |
2302 | int err; |
2303 | |
2304 | /* Even if we didn't properly prepare for reset in |
2305 | * fm10k_prepare_suspend, we'll attempt to resume anyways. |
2306 | */ |
2307 | if (!test_and_clear_bit(nr: __FM10K_RESET_SUSPENDED, addr: interface->state)) |
2308 | dev_warn(&interface->pdev->dev, |
2309 | "Device was shut down as part of suspend... Attempting to recover\n" ); |
2310 | |
2311 | /* reset statistics starting values */ |
2312 | hw->mac.ops.rebind_hw_stats(hw, &interface->stats); |
2313 | |
2314 | err = fm10k_handle_reset(interface); |
2315 | if (err) |
2316 | return err; |
2317 | |
2318 | /* assume host is not ready, to prevent race with watchdog in case we |
2319 | * actually don't have connection to the switch |
2320 | */ |
2321 | interface->host_ready = false; |
2322 | fm10k_watchdog_host_not_ready(interface); |
2323 | |
2324 | /* force link to stay down for a second to prevent link flutter */ |
2325 | interface->link_down_event = jiffies + (HZ); |
2326 | set_bit(nr: __FM10K_LINK_DOWN, addr: interface->state); |
2327 | |
2328 | /* restart the service task */ |
2329 | fm10k_start_service_event(interface); |
2330 | |
2331 | /* Restart the MAC/VLAN request queue in-case of outstanding events */ |
2332 | fm10k_macvlan_schedule(interface); |
2333 | |
2334 | return 0; |
2335 | } |
2336 | |
2337 | /** |
2338 | * fm10k_resume - Generic PM resume hook |
2339 | * @dev: generic device structure |
2340 | * |
2341 | * Generic PM hook used when waking the device from a low power state after |
2342 | * suspend or hibernation. This function does not need to handle lower PCIe |
2343 | * device state as the stack takes care of that for us. |
2344 | **/ |
2345 | static int __maybe_unused fm10k_resume(struct device *dev) |
2346 | { |
2347 | struct fm10k_intfc *interface = dev_get_drvdata(dev); |
2348 | struct net_device *netdev = interface->netdev; |
2349 | struct fm10k_hw *hw = &interface->hw; |
2350 | int err; |
2351 | |
2352 | /* refresh hw_addr in case it was dropped */ |
2353 | hw->hw_addr = interface->uc_addr; |
2354 | |
2355 | err = fm10k_handle_resume(interface); |
2356 | if (err) |
2357 | return err; |
2358 | |
2359 | netif_device_attach(dev: netdev); |
2360 | |
2361 | return 0; |
2362 | } |
2363 | |
2364 | /** |
2365 | * fm10k_suspend - Generic PM suspend hook |
2366 | * @dev: generic device structure |
2367 | * |
2368 | * Generic PM hook used when setting the device into a low power state for |
2369 | * system suspend or hibernation. This function does not need to handle lower |
2370 | * PCIe device state as the stack takes care of that for us. |
2371 | **/ |
2372 | static int __maybe_unused fm10k_suspend(struct device *dev) |
2373 | { |
2374 | struct fm10k_intfc *interface = dev_get_drvdata(dev); |
2375 | struct net_device *netdev = interface->netdev; |
2376 | |
2377 | netif_device_detach(dev: netdev); |
2378 | |
2379 | fm10k_prepare_suspend(interface); |
2380 | |
2381 | return 0; |
2382 | } |
2383 | |
2384 | /** |
2385 | * fm10k_io_error_detected - called when PCI error is detected |
2386 | * @pdev: Pointer to PCI device |
2387 | * @state: The current pci connection state |
2388 | * |
2389 | * This function is called after a PCI bus error affecting |
2390 | * this device has been detected. |
2391 | */ |
2392 | static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, |
2393 | pci_channel_state_t state) |
2394 | { |
2395 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
2396 | struct net_device *netdev = interface->netdev; |
2397 | |
2398 | netif_device_detach(dev: netdev); |
2399 | |
2400 | if (state == pci_channel_io_perm_failure) |
2401 | return PCI_ERS_RESULT_DISCONNECT; |
2402 | |
2403 | fm10k_prepare_suspend(interface); |
2404 | |
2405 | /* Request a slot reset. */ |
2406 | return PCI_ERS_RESULT_NEED_RESET; |
2407 | } |
2408 | |
2409 | /** |
2410 | * fm10k_io_slot_reset - called after the pci bus has been reset. |
2411 | * @pdev: Pointer to PCI device |
2412 | * |
2413 | * Restart the card from scratch, as if from a cold-boot. |
2414 | */ |
2415 | static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) |
2416 | { |
2417 | pci_ers_result_t result; |
2418 | |
2419 | if (pci_reenable_device(pdev)) { |
2420 | dev_err(&pdev->dev, |
2421 | "Cannot re-enable PCI device after reset.\n" ); |
2422 | result = PCI_ERS_RESULT_DISCONNECT; |
2423 | } else { |
2424 | pci_set_master(dev: pdev); |
2425 | pci_restore_state(dev: pdev); |
2426 | |
2427 | /* After second error pci->state_saved is false, this |
2428 | * resets it so EEH doesn't break. |
2429 | */ |
2430 | pci_save_state(dev: pdev); |
2431 | |
2432 | pci_wake_from_d3(dev: pdev, enable: false); |
2433 | |
2434 | result = PCI_ERS_RESULT_RECOVERED; |
2435 | } |
2436 | |
2437 | return result; |
2438 | } |
2439 | |
2440 | /** |
2441 | * fm10k_io_resume - called when traffic can start flowing again. |
2442 | * @pdev: Pointer to PCI device |
2443 | * |
2444 | * This callback is called when the error recovery driver tells us that |
2445 | * its OK to resume normal operation. |
2446 | */ |
2447 | static void fm10k_io_resume(struct pci_dev *pdev) |
2448 | { |
2449 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
2450 | struct net_device *netdev = interface->netdev; |
2451 | int err; |
2452 | |
2453 | err = fm10k_handle_resume(interface); |
2454 | |
2455 | if (err) |
2456 | dev_warn(&pdev->dev, |
2457 | "%s failed: %d\n" , __func__, err); |
2458 | else |
2459 | netif_device_attach(dev: netdev); |
2460 | } |
2461 | |
2462 | /** |
2463 | * fm10k_io_reset_prepare - called when PCI function is about to be reset |
2464 | * @pdev: Pointer to PCI device |
2465 | * |
2466 | * This callback is called when the PCI function is about to be reset, |
2467 | * allowing the device driver to prepare for it. |
2468 | */ |
2469 | static void fm10k_io_reset_prepare(struct pci_dev *pdev) |
2470 | { |
2471 | /* warn incase we have any active VF devices */ |
2472 | if (pci_num_vf(dev: pdev)) |
2473 | dev_warn(&pdev->dev, |
2474 | "PCIe FLR may cause issues for any active VF devices\n" ); |
2475 | fm10k_prepare_suspend(interface: pci_get_drvdata(pdev)); |
2476 | } |
2477 | |
2478 | /** |
2479 | * fm10k_io_reset_done - called when PCI function has finished resetting |
2480 | * @pdev: Pointer to PCI device |
2481 | * |
2482 | * This callback is called just after the PCI function is reset, such as via |
2483 | * /sys/class/net/<enpX>/device/reset or similar. |
2484 | */ |
2485 | static void fm10k_io_reset_done(struct pci_dev *pdev) |
2486 | { |
2487 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
2488 | int err = fm10k_handle_resume(interface); |
2489 | |
2490 | if (err) { |
2491 | dev_warn(&pdev->dev, |
2492 | "%s failed: %d\n" , __func__, err); |
2493 | netif_device_detach(dev: interface->netdev); |
2494 | } |
2495 | } |
2496 | |
2497 | static const struct pci_error_handlers fm10k_err_handler = { |
2498 | .error_detected = fm10k_io_error_detected, |
2499 | .slot_reset = fm10k_io_slot_reset, |
2500 | .resume = fm10k_io_resume, |
2501 | .reset_prepare = fm10k_io_reset_prepare, |
2502 | .reset_done = fm10k_io_reset_done, |
2503 | }; |
2504 | |
2505 | static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume); |
2506 | |
2507 | static struct pci_driver fm10k_driver = { |
2508 | .name = fm10k_driver_name, |
2509 | .id_table = fm10k_pci_tbl, |
2510 | .probe = fm10k_probe, |
2511 | .remove = fm10k_remove, |
2512 | .driver = { |
2513 | .pm = &fm10k_pm_ops, |
2514 | }, |
2515 | .sriov_configure = fm10k_iov_configure, |
2516 | .err_handler = &fm10k_err_handler |
2517 | }; |
2518 | |
2519 | /** |
2520 | * fm10k_register_pci_driver - register driver interface |
2521 | * |
2522 | * This function is called on module load in order to register the driver. |
2523 | **/ |
2524 | int fm10k_register_pci_driver(void) |
2525 | { |
2526 | return pci_register_driver(&fm10k_driver); |
2527 | } |
2528 | |
2529 | /** |
2530 | * fm10k_unregister_pci_driver - unregister driver interface |
2531 | * |
2532 | * This function is called on module unload in order to remove the driver. |
2533 | **/ |
2534 | void fm10k_unregister_pci_driver(void) |
2535 | { |
2536 | pci_unregister_driver(dev: &fm10k_driver); |
2537 | } |
2538 | |