1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 1999 - 2018 Intel Corporation. */ |
3 | |
4 | #include <linux/types.h> |
5 | #include <linux/module.h> |
6 | #include <linux/pci.h> |
7 | #include <linux/netdevice.h> |
8 | #include <linux/vmalloc.h> |
9 | #include <linux/string.h> |
10 | #include <linux/in.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/ip.h> |
13 | #include <linux/tcp.h> |
14 | #include <linux/sctp.h> |
15 | #include <linux/pkt_sched.h> |
16 | #include <linux/ipv6.h> |
17 | #include <linux/slab.h> |
18 | #include <net/checksum.h> |
19 | #include <net/ip6_checksum.h> |
20 | #include <linux/etherdevice.h> |
21 | #include <linux/ethtool.h> |
22 | #include <linux/if.h> |
23 | #include <linux/if_vlan.h> |
24 | #include <linux/if_macvlan.h> |
25 | #include <linux/if_bridge.h> |
26 | #include <linux/prefetch.h> |
27 | #include <linux/bpf.h> |
28 | #include <linux/bpf_trace.h> |
29 | #include <linux/atomic.h> |
30 | #include <linux/numa.h> |
31 | #include <generated/utsrelease.h> |
32 | #include <scsi/fc/fc_fcoe.h> |
33 | #include <net/udp_tunnel.h> |
34 | #include <net/pkt_cls.h> |
35 | #include <net/tc_act/tc_gact.h> |
36 | #include <net/tc_act/tc_mirred.h> |
37 | #include <net/vxlan.h> |
38 | #include <net/mpls.h> |
39 | #include <net/netdev_queues.h> |
40 | #include <net/xdp_sock_drv.h> |
41 | #include <net/xfrm.h> |
42 | |
43 | #include "ixgbe.h" |
44 | #include "ixgbe_common.h" |
45 | #include "ixgbe_dcb_82599.h" |
46 | #include "ixgbe_phy.h" |
47 | #include "ixgbe_sriov.h" |
48 | #include "ixgbe_model.h" |
49 | #include "ixgbe_txrx_common.h" |
50 | |
51 | char ixgbe_driver_name[] = "ixgbe" ; |
52 | static const char ixgbe_driver_string[] = |
53 | "Intel(R) 10 Gigabit PCI Express Network Driver" ; |
54 | #ifdef IXGBE_FCOE |
55 | char ixgbe_default_device_descr[] = |
56 | "Intel(R) 10 Gigabit Network Connection" ; |
57 | #else |
58 | static char ixgbe_default_device_descr[] = |
59 | "Intel(R) 10 Gigabit Network Connection" ; |
60 | #endif |
61 | static const char ixgbe_copyright[] = |
62 | "Copyright (c) 1999-2016 Intel Corporation." ; |
63 | |
64 | static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter" ; |
65 | |
66 | static const struct ixgbe_info *ixgbe_info_tbl[] = { |
67 | [board_82598] = &ixgbe_82598_info, |
68 | [board_82599] = &ixgbe_82599_info, |
69 | [board_X540] = &ixgbe_X540_info, |
70 | [board_X550] = &ixgbe_X550_info, |
71 | [board_X550EM_x] = &ixgbe_X550EM_x_info, |
72 | [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info, |
73 | [board_x550em_a] = &ixgbe_x550em_a_info, |
74 | [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info, |
75 | }; |
76 | |
77 | /* ixgbe_pci_tbl - PCI Device ID Table |
78 | * |
79 | * Wildcard entries (PCI_ANY_ID) should come last |
80 | * Last entry must be all 0s |
81 | * |
82 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, |
83 | * Class, Class Mask, private data (not used) } |
84 | */ |
85 | static const struct pci_device_id ixgbe_pci_tbl[] = { |
86 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, |
87 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, |
88 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, |
89 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, |
90 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, |
91 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, |
92 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, |
93 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, |
94 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, |
95 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, |
96 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, |
97 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, |
98 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, |
99 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, |
100 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, |
101 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, |
102 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, |
103 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, |
104 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, |
105 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, |
106 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, |
107 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, |
108 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, |
109 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, |
110 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, |
111 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, |
112 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, |
113 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, |
114 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, |
115 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, |
116 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, |
117 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, |
118 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, |
119 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x}, |
120 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, |
121 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, |
122 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, |
123 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw}, |
124 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, |
125 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, |
126 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, |
127 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, |
128 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, |
129 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, |
130 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, |
131 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw }, |
132 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw }, |
133 | /* required last entry */ |
134 | {0, } |
135 | }; |
136 | MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); |
137 | |
138 | #ifdef CONFIG_IXGBE_DCA |
139 | static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, |
140 | void *p); |
141 | static struct notifier_block dca_notifier = { |
142 | .notifier_call = ixgbe_notify_dca, |
143 | .next = NULL, |
144 | .priority = 0 |
145 | }; |
146 | #endif |
147 | |
148 | #ifdef CONFIG_PCI_IOV |
149 | static unsigned int max_vfs; |
150 | module_param(max_vfs, uint, 0); |
151 | MODULE_PARM_DESC(max_vfs, |
152 | "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)" ); |
153 | #endif /* CONFIG_PCI_IOV */ |
154 | |
155 | static bool allow_unsupported_sfp; |
156 | module_param(allow_unsupported_sfp, bool, 0); |
157 | MODULE_PARM_DESC(allow_unsupported_sfp, |
158 | "Allow unsupported and untested SFP+ modules on 82599-based adapters" ); |
159 | |
160 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) |
161 | static int debug = -1; |
162 | module_param(debug, int, 0); |
163 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)" ); |
164 | |
165 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>" ); |
166 | MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver" ); |
167 | MODULE_LICENSE("GPL v2" ); |
168 | |
169 | DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); |
170 | EXPORT_SYMBOL(ixgbe_xdp_locking_key); |
171 | |
172 | static struct workqueue_struct *ixgbe_wq; |
173 | |
174 | static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); |
175 | static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); |
176 | |
177 | static const struct net_device_ops ixgbe_netdev_ops; |
178 | |
179 | static bool netif_is_ixgbe(struct net_device *dev) |
180 | { |
181 | return dev && (dev->netdev_ops == &ixgbe_netdev_ops); |
182 | } |
183 | |
184 | static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, |
185 | u32 reg, u16 *value) |
186 | { |
187 | struct pci_dev *parent_dev; |
188 | struct pci_bus *parent_bus; |
189 | |
190 | parent_bus = adapter->pdev->bus->parent; |
191 | if (!parent_bus) |
192 | return -1; |
193 | |
194 | parent_dev = parent_bus->self; |
195 | if (!parent_dev) |
196 | return -1; |
197 | |
198 | if (!pci_is_pcie(dev: parent_dev)) |
199 | return -1; |
200 | |
201 | pcie_capability_read_word(dev: parent_dev, pos: reg, val: value); |
202 | if (*value == IXGBE_FAILED_READ_CFG_WORD && |
203 | ixgbe_check_cfg_remove(hw: &adapter->hw, pdev: parent_dev)) |
204 | return -1; |
205 | return 0; |
206 | } |
207 | |
208 | static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) |
209 | { |
210 | struct ixgbe_hw *hw = &adapter->hw; |
211 | u16 link_status = 0; |
212 | int err; |
213 | |
214 | hw->bus.type = ixgbe_bus_type_pci_express; |
215 | |
216 | /* Get the negotiated link width and speed from PCI config space of the |
217 | * parent, as this device is behind a switch |
218 | */ |
219 | err = ixgbe_read_pci_cfg_word_parent(adapter, reg: 18, value: &link_status); |
220 | |
221 | /* assume caller will handle error case */ |
222 | if (err) |
223 | return err; |
224 | |
225 | hw->bus.width = ixgbe_convert_bus_width(link_status); |
226 | hw->bus.speed = ixgbe_convert_bus_speed(link_status); |
227 | |
228 | return 0; |
229 | } |
230 | |
231 | /** |
232 | * ixgbe_pcie_from_parent - Determine whether PCIe info should come from parent |
233 | * @hw: hw specific details |
234 | * |
235 | * This function is used by probe to determine whether a device's PCI-Express |
236 | * bandwidth details should be gathered from the parent bus instead of from the |
237 | * device. Used to ensure that various locations all have the correct device ID |
238 | * checks. |
239 | */ |
240 | static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) |
241 | { |
242 | switch (hw->device_id) { |
243 | case IXGBE_DEV_ID_82599_SFP_SF_QP: |
244 | case IXGBE_DEV_ID_82599_QSFP_SF_QP: |
245 | return true; |
246 | default: |
247 | return false; |
248 | } |
249 | } |
250 | |
251 | static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, |
252 | int expected_gts) |
253 | { |
254 | struct ixgbe_hw *hw = &adapter->hw; |
255 | struct pci_dev *pdev; |
256 | |
257 | /* Some devices are not connected over PCIe and thus do not negotiate |
258 | * speed. These devices do not have valid bus info, and thus any report |
259 | * we generate may not be correct. |
260 | */ |
261 | if (hw->bus.type == ixgbe_bus_type_internal) |
262 | return; |
263 | |
264 | /* determine whether to use the parent device */ |
265 | if (ixgbe_pcie_from_parent(hw: &adapter->hw)) |
266 | pdev = adapter->pdev->bus->parent->self; |
267 | else |
268 | pdev = adapter->pdev; |
269 | |
270 | pcie_print_link_status(dev: pdev); |
271 | } |
272 | |
273 | static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) |
274 | { |
275 | if (!test_bit(__IXGBE_DOWN, &adapter->state) && |
276 | !test_bit(__IXGBE_REMOVING, &adapter->state) && |
277 | !test_and_set_bit(nr: __IXGBE_SERVICE_SCHED, addr: &adapter->state)) |
278 | queue_work(wq: ixgbe_wq, work: &adapter->service_task); |
279 | } |
280 | |
281 | static void ixgbe_remove_adapter(struct ixgbe_hw *hw) |
282 | { |
283 | struct ixgbe_adapter *adapter = hw->back; |
284 | |
285 | if (!hw->hw_addr) |
286 | return; |
287 | hw->hw_addr = NULL; |
288 | e_dev_err("Adapter removed\n" ); |
289 | if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) |
290 | ixgbe_service_event_schedule(adapter); |
291 | } |
292 | |
293 | static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) |
294 | { |
295 | u8 __iomem *reg_addr; |
296 | u32 value; |
297 | int i; |
298 | |
299 | reg_addr = READ_ONCE(hw->hw_addr); |
300 | if (ixgbe_removed(addr: reg_addr)) |
301 | return IXGBE_FAILED_READ_REG; |
302 | |
303 | /* Register read of 0xFFFFFFF can indicate the adapter has been removed, |
304 | * so perform several status register reads to determine if the adapter |
305 | * has been removed. |
306 | */ |
307 | for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) { |
308 | value = readl(addr: reg_addr + IXGBE_STATUS); |
309 | if (value != IXGBE_FAILED_READ_REG) |
310 | break; |
311 | mdelay(3); |
312 | } |
313 | |
314 | if (value == IXGBE_FAILED_READ_REG) |
315 | ixgbe_remove_adapter(hw); |
316 | else |
317 | value = readl(addr: reg_addr + reg); |
318 | return value; |
319 | } |
320 | |
321 | /** |
322 | * ixgbe_read_reg - Read from device register |
323 | * @hw: hw specific details |
324 | * @reg: offset of register to read |
325 | * |
326 | * Returns : value read or IXGBE_FAILED_READ_REG if removed |
327 | * |
328 | * This function is used to read device registers. It checks for device |
329 | * removal by confirming any read that returns all ones by checking the |
330 | * status register value for all ones. This function avoids reading from |
331 | * the hardware if a removal was previously detected in which case it |
332 | * returns IXGBE_FAILED_READ_REG (all ones). |
333 | */ |
334 | u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) |
335 | { |
336 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
337 | u32 value; |
338 | |
339 | if (ixgbe_removed(addr: reg_addr)) |
340 | return IXGBE_FAILED_READ_REG; |
341 | if (unlikely(hw->phy.nw_mng_if_sel & |
342 | IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { |
343 | struct ixgbe_adapter *adapter; |
344 | int i; |
345 | |
346 | for (i = 0; i < 200; ++i) { |
347 | value = readl(addr: reg_addr + IXGBE_MAC_SGMII_BUSY); |
348 | if (likely(!value)) |
349 | goto writes_completed; |
350 | if (value == IXGBE_FAILED_READ_REG) { |
351 | ixgbe_remove_adapter(hw); |
352 | return IXGBE_FAILED_READ_REG; |
353 | } |
354 | udelay(5); |
355 | } |
356 | |
357 | adapter = hw->back; |
358 | e_warn(hw, "register writes incomplete %08x\n" , value); |
359 | } |
360 | |
361 | writes_completed: |
362 | value = readl(addr: reg_addr + reg); |
363 | if (unlikely(value == IXGBE_FAILED_READ_REG)) |
364 | value = ixgbe_check_remove(hw, reg); |
365 | return value; |
366 | } |
367 | |
368 | static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) |
369 | { |
370 | u16 value; |
371 | |
372 | pci_read_config_word(dev: pdev, PCI_VENDOR_ID, val: &value); |
373 | if (value == IXGBE_FAILED_READ_CFG_WORD) { |
374 | ixgbe_remove_adapter(hw); |
375 | return true; |
376 | } |
377 | return false; |
378 | } |
379 | |
380 | u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) |
381 | { |
382 | struct ixgbe_adapter *adapter = hw->back; |
383 | u16 value; |
384 | |
385 | if (ixgbe_removed(addr: hw->hw_addr)) |
386 | return IXGBE_FAILED_READ_CFG_WORD; |
387 | pci_read_config_word(dev: adapter->pdev, where: reg, val: &value); |
388 | if (value == IXGBE_FAILED_READ_CFG_WORD && |
389 | ixgbe_check_cfg_remove(hw, pdev: adapter->pdev)) |
390 | return IXGBE_FAILED_READ_CFG_WORD; |
391 | return value; |
392 | } |
393 | |
394 | #ifdef CONFIG_PCI_IOV |
395 | static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) |
396 | { |
397 | struct ixgbe_adapter *adapter = hw->back; |
398 | u32 value; |
399 | |
400 | if (ixgbe_removed(addr: hw->hw_addr)) |
401 | return IXGBE_FAILED_READ_CFG_DWORD; |
402 | pci_read_config_dword(dev: adapter->pdev, where: reg, val: &value); |
403 | if (value == IXGBE_FAILED_READ_CFG_DWORD && |
404 | ixgbe_check_cfg_remove(hw, pdev: adapter->pdev)) |
405 | return IXGBE_FAILED_READ_CFG_DWORD; |
406 | return value; |
407 | } |
408 | #endif /* CONFIG_PCI_IOV */ |
409 | |
410 | void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) |
411 | { |
412 | struct ixgbe_adapter *adapter = hw->back; |
413 | |
414 | if (ixgbe_removed(addr: hw->hw_addr)) |
415 | return; |
416 | pci_write_config_word(dev: adapter->pdev, where: reg, val: value); |
417 | } |
418 | |
419 | static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) |
420 | { |
421 | BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); |
422 | |
423 | /* flush memory to make sure state is correct before next watchdog */ |
424 | smp_mb__before_atomic(); |
425 | clear_bit(nr: __IXGBE_SERVICE_SCHED, addr: &adapter->state); |
426 | } |
427 | |
428 | struct ixgbe_reg_info { |
429 | u32 ofs; |
430 | char *name; |
431 | }; |
432 | |
433 | static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { |
434 | |
435 | /* General Registers */ |
436 | {IXGBE_CTRL, .name: "CTRL" }, |
437 | {IXGBE_STATUS, "STATUS" }, |
438 | {IXGBE_CTRL_EXT, "CTRL_EXT" }, |
439 | |
440 | /* Interrupt Registers */ |
441 | {IXGBE_EICR, "EICR" }, |
442 | |
443 | /* RX Registers */ |
444 | {IXGBE_SRRCTL(0), "SRRCTL" }, |
445 | {IXGBE_DCA_RXCTRL(0), "DRXCTL" }, |
446 | {IXGBE_RDLEN(0), "RDLEN" }, |
447 | {IXGBE_RDH(0), "RDH" }, |
448 | {IXGBE_RDT(0), "RDT" }, |
449 | {IXGBE_RXDCTL(0), "RXDCTL" }, |
450 | {IXGBE_RDBAL(0), "RDBAL" }, |
451 | {IXGBE_RDBAH(0), "RDBAH" }, |
452 | |
453 | /* TX Registers */ |
454 | {IXGBE_TDBAL(0), "TDBAL" }, |
455 | {IXGBE_TDBAH(0), "TDBAH" }, |
456 | {IXGBE_TDLEN(0), "TDLEN" }, |
457 | {IXGBE_TDH(0), "TDH" }, |
458 | {IXGBE_TDT(0), "TDT" }, |
459 | {IXGBE_TXDCTL(0), "TXDCTL" }, |
460 | |
461 | /* List Terminator */ |
462 | { .name = NULL } |
463 | }; |
464 | |
465 | |
466 | /* |
467 | * ixgbe_regdump - register printout routine |
468 | */ |
469 | static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) |
470 | { |
471 | int i; |
472 | char rname[16]; |
473 | u32 regs[64]; |
474 | |
475 | switch (reginfo->ofs) { |
476 | case IXGBE_SRRCTL(0): |
477 | for (i = 0; i < 64; i++) |
478 | regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); |
479 | break; |
480 | case IXGBE_DCA_RXCTRL(0): |
481 | for (i = 0; i < 64; i++) |
482 | regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); |
483 | break; |
484 | case IXGBE_RDLEN(0): |
485 | for (i = 0; i < 64; i++) |
486 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); |
487 | break; |
488 | case IXGBE_RDH(0): |
489 | for (i = 0; i < 64; i++) |
490 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); |
491 | break; |
492 | case IXGBE_RDT(0): |
493 | for (i = 0; i < 64; i++) |
494 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); |
495 | break; |
496 | case IXGBE_RXDCTL(0): |
497 | for (i = 0; i < 64; i++) |
498 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); |
499 | break; |
500 | case IXGBE_RDBAL(0): |
501 | for (i = 0; i < 64; i++) |
502 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); |
503 | break; |
504 | case IXGBE_RDBAH(0): |
505 | for (i = 0; i < 64; i++) |
506 | regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); |
507 | break; |
508 | case IXGBE_TDBAL(0): |
509 | for (i = 0; i < 64; i++) |
510 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); |
511 | break; |
512 | case IXGBE_TDBAH(0): |
513 | for (i = 0; i < 64; i++) |
514 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); |
515 | break; |
516 | case IXGBE_TDLEN(0): |
517 | for (i = 0; i < 64; i++) |
518 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); |
519 | break; |
520 | case IXGBE_TDH(0): |
521 | for (i = 0; i < 64; i++) |
522 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); |
523 | break; |
524 | case IXGBE_TDT(0): |
525 | for (i = 0; i < 64; i++) |
526 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); |
527 | break; |
528 | case IXGBE_TXDCTL(0): |
529 | for (i = 0; i < 64; i++) |
530 | regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); |
531 | break; |
532 | default: |
533 | pr_info("%-15s %08x\n" , |
534 | reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs)); |
535 | return; |
536 | } |
537 | |
538 | i = 0; |
539 | while (i < 64) { |
540 | int j; |
541 | char buf[9 * 8 + 1]; |
542 | char *p = buf; |
543 | |
544 | snprintf(buf: rname, size: 16, fmt: "%s[%d-%d]" , reginfo->name, i, i + 7); |
545 | for (j = 0; j < 8; j++) |
546 | p += sprintf(buf: p, fmt: " %08x" , regs[i++]); |
547 | pr_err("%-15s%s\n" , rname, buf); |
548 | } |
549 | |
550 | } |
551 | |
552 | static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n) |
553 | { |
554 | struct ixgbe_tx_buffer *tx_buffer; |
555 | |
556 | tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; |
557 | pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n" , |
558 | n, ring->next_to_use, ring->next_to_clean, |
559 | (u64)dma_unmap_addr(tx_buffer, dma), |
560 | dma_unmap_len(tx_buffer, len), |
561 | tx_buffer->next_to_watch, |
562 | (u64)tx_buffer->time_stamp); |
563 | } |
564 | |
565 | /* |
566 | * ixgbe_dump - Print registers, tx-rings and rx-rings |
567 | */ |
568 | static void ixgbe_dump(struct ixgbe_adapter *adapter) |
569 | { |
570 | struct net_device *netdev = adapter->netdev; |
571 | struct ixgbe_hw *hw = &adapter->hw; |
572 | struct ixgbe_reg_info *reginfo; |
573 | int n = 0; |
574 | struct ixgbe_ring *ring; |
575 | struct ixgbe_tx_buffer *tx_buffer; |
576 | union ixgbe_adv_tx_desc *tx_desc; |
577 | struct my_u0 { u64 a; u64 b; } *u0; |
578 | struct ixgbe_ring *rx_ring; |
579 | union ixgbe_adv_rx_desc *rx_desc; |
580 | struct ixgbe_rx_buffer *rx_buffer_info; |
581 | int i = 0; |
582 | |
583 | if (!netif_msg_hw(adapter)) |
584 | return; |
585 | |
586 | /* Print netdevice Info */ |
587 | if (netdev) { |
588 | dev_info(&adapter->pdev->dev, "Net device Info\n" ); |
589 | pr_info("Device Name state " |
590 | "trans_start\n" ); |
591 | pr_info("%-15s %016lX %016lX\n" , |
592 | netdev->name, |
593 | netdev->state, |
594 | dev_trans_start(netdev)); |
595 | } |
596 | |
597 | /* Print Registers */ |
598 | dev_info(&adapter->pdev->dev, "Register Dump\n" ); |
599 | pr_info(" Register Name Value\n" ); |
600 | for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; |
601 | reginfo->name; reginfo++) { |
602 | ixgbe_regdump(hw, reginfo); |
603 | } |
604 | |
605 | /* Print TX Ring Summary */ |
606 | if (!netdev || !netif_running(dev: netdev)) |
607 | return; |
608 | |
609 | dev_info(&adapter->pdev->dev, "TX Rings Summary\n" ); |
610 | pr_info(" %s %s %s %s\n" , |
611 | "Queue [NTU] [NTC] [bi(ntc)->dma ]" , |
612 | "leng" , "ntw" , "timestamp" ); |
613 | for (n = 0; n < adapter->num_tx_queues; n++) { |
614 | ring = adapter->tx_ring[n]; |
615 | ixgbe_print_buffer(ring, n); |
616 | } |
617 | |
618 | for (n = 0; n < adapter->num_xdp_queues; n++) { |
619 | ring = adapter->xdp_ring[n]; |
620 | ixgbe_print_buffer(ring, n); |
621 | } |
622 | |
623 | /* Print TX Rings */ |
624 | if (!netif_msg_tx_done(adapter)) |
625 | goto rx_ring_summary; |
626 | |
627 | dev_info(&adapter->pdev->dev, "TX Rings Dump\n" ); |
628 | |
629 | /* Transmit Descriptor Formats |
630 | * |
631 | * 82598 Advanced Transmit Descriptor |
632 | * +--------------------------------------------------------------+ |
633 | * 0 | Buffer Address [63:0] | |
634 | * +--------------------------------------------------------------+ |
635 | * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | |
636 | * +--------------------------------------------------------------+ |
637 | * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 |
638 | * |
639 | * 82598 Advanced Transmit Descriptor (Write-Back Format) |
640 | * +--------------------------------------------------------------+ |
641 | * 0 | RSV [63:0] | |
642 | * +--------------------------------------------------------------+ |
643 | * 8 | RSV | STA | NXTSEQ | |
644 | * +--------------------------------------------------------------+ |
645 | * 63 36 35 32 31 0 |
646 | * |
647 | * 82599+ Advanced Transmit Descriptor |
648 | * +--------------------------------------------------------------+ |
649 | * 0 | Buffer Address [63:0] | |
650 | * +--------------------------------------------------------------+ |
651 | * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | |
652 | * +--------------------------------------------------------------+ |
653 | * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 |
654 | * |
655 | * 82599+ Advanced Transmit Descriptor (Write-Back Format) |
656 | * +--------------------------------------------------------------+ |
657 | * 0 | RSV [63:0] | |
658 | * +--------------------------------------------------------------+ |
659 | * 8 | RSV | STA | RSV | |
660 | * +--------------------------------------------------------------+ |
661 | * 63 36 35 32 31 0 |
662 | */ |
663 | |
664 | for (n = 0; n < adapter->num_tx_queues; n++) { |
665 | ring = adapter->tx_ring[n]; |
666 | pr_info("------------------------------------\n" ); |
667 | pr_info("TX QUEUE INDEX = %d\n" , ring->queue_index); |
668 | pr_info("------------------------------------\n" ); |
669 | pr_info("%s%s %s %s %s %s\n" , |
670 | "T [desc] [address 63:0 ] " , |
671 | "[PlPOIdStDDt Ln] [bi->dma ] " , |
672 | "leng" , "ntw" , "timestamp" , "bi->skb" ); |
673 | |
674 | for (i = 0; ring->desc && (i < ring->count); i++) { |
675 | tx_desc = IXGBE_TX_DESC(ring, i); |
676 | tx_buffer = &ring->tx_buffer_info[i]; |
677 | u0 = (struct my_u0 *)tx_desc; |
678 | if (dma_unmap_len(tx_buffer, len) > 0) { |
679 | const char *ring_desc; |
680 | |
681 | if (i == ring->next_to_use && |
682 | i == ring->next_to_clean) |
683 | ring_desc = " NTC/U" ; |
684 | else if (i == ring->next_to_use) |
685 | ring_desc = " NTU" ; |
686 | else if (i == ring->next_to_clean) |
687 | ring_desc = " NTC" ; |
688 | else |
689 | ring_desc = "" ; |
690 | pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s" , |
691 | i, |
692 | le64_to_cpu((__force __le64)u0->a), |
693 | le64_to_cpu((__force __le64)u0->b), |
694 | (u64)dma_unmap_addr(tx_buffer, dma), |
695 | dma_unmap_len(tx_buffer, len), |
696 | tx_buffer->next_to_watch, |
697 | (u64)tx_buffer->time_stamp, |
698 | tx_buffer->skb, |
699 | ring_desc); |
700 | |
701 | if (netif_msg_pktdata(adapter) && |
702 | tx_buffer->skb) |
703 | print_hex_dump(KERN_INFO, prefix_str: "" , |
704 | prefix_type: DUMP_PREFIX_ADDRESS, rowsize: 16, groupsize: 1, |
705 | buf: tx_buffer->skb->data, |
706 | dma_unmap_len(tx_buffer, len), |
707 | ascii: true); |
708 | } |
709 | } |
710 | } |
711 | |
712 | /* Print RX Rings Summary */ |
713 | rx_ring_summary: |
714 | dev_info(&adapter->pdev->dev, "RX Rings Summary\n" ); |
715 | pr_info("Queue [NTU] [NTC]\n" ); |
716 | for (n = 0; n < adapter->num_rx_queues; n++) { |
717 | rx_ring = adapter->rx_ring[n]; |
718 | pr_info("%5d %5X %5X\n" , |
719 | n, rx_ring->next_to_use, rx_ring->next_to_clean); |
720 | } |
721 | |
722 | /* Print RX Rings */ |
723 | if (!netif_msg_rx_status(adapter)) |
724 | return; |
725 | |
726 | dev_info(&adapter->pdev->dev, "RX Rings Dump\n" ); |
727 | |
728 | /* Receive Descriptor Formats |
729 | * |
730 | * 82598 Advanced Receive Descriptor (Read) Format |
731 | * 63 1 0 |
732 | * +-----------------------------------------------------+ |
733 | * 0 | Packet Buffer Address [63:1] |A0/NSE| |
734 | * +----------------------------------------------+------+ |
735 | * 8 | Header Buffer Address [63:1] | DD | |
736 | * +-----------------------------------------------------+ |
737 | * |
738 | * |
739 | * 82598 Advanced Receive Descriptor (Write-Back) Format |
740 | * |
741 | * 63 48 47 32 31 30 21 20 16 15 4 3 0 |
742 | * +------------------------------------------------------+ |
743 | * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | |
744 | * | Packet | IP | | | | Type | Type | |
745 | * | Checksum | Ident | | | | | | |
746 | * +------------------------------------------------------+ |
747 | * 8 | VLAN Tag | Length | Extended Error | Extended Status | |
748 | * +------------------------------------------------------+ |
749 | * 63 48 47 32 31 20 19 0 |
750 | * |
751 | * 82599+ Advanced Receive Descriptor (Read) Format |
752 | * 63 1 0 |
753 | * +-----------------------------------------------------+ |
754 | * 0 | Packet Buffer Address [63:1] |A0/NSE| |
755 | * +----------------------------------------------+------+ |
756 | * 8 | Header Buffer Address [63:1] | DD | |
757 | * +-----------------------------------------------------+ |
758 | * |
759 | * |
760 | * 82599+ Advanced Receive Descriptor (Write-Back) Format |
761 | * |
762 | * 63 48 47 32 31 30 21 20 17 16 4 3 0 |
763 | * +------------------------------------------------------+ |
764 | * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | |
765 | * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | |
766 | * |/ Flow Dir Flt ID | | | | | | |
767 | * +------------------------------------------------------+ |
768 | * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | |
769 | * +------------------------------------------------------+ |
770 | * 63 48 47 32 31 20 19 0 |
771 | */ |
772 | |
773 | for (n = 0; n < adapter->num_rx_queues; n++) { |
774 | rx_ring = adapter->rx_ring[n]; |
775 | pr_info("------------------------------------\n" ); |
776 | pr_info("RX QUEUE INDEX = %d\n" , rx_ring->queue_index); |
777 | pr_info("------------------------------------\n" ); |
778 | pr_info("%s%s%s\n" , |
779 | "R [desc] [ PktBuf A0] " , |
780 | "[ HeadBuf DD] [bi->dma ] [bi->skb ] " , |
781 | "<-- Adv Rx Read format" ); |
782 | pr_info("%s%s%s\n" , |
783 | "RWB[desc] [PcsmIpSHl PtRs] " , |
784 | "[vl er S cks ln] ---------------- [bi->skb ] " , |
785 | "<-- Adv Rx Write-Back format" ); |
786 | |
787 | for (i = 0; i < rx_ring->count; i++) { |
788 | const char *ring_desc; |
789 | |
790 | if (i == rx_ring->next_to_use) |
791 | ring_desc = " NTU" ; |
792 | else if (i == rx_ring->next_to_clean) |
793 | ring_desc = " NTC" ; |
794 | else |
795 | ring_desc = "" ; |
796 | |
797 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
798 | rx_desc = IXGBE_RX_DESC(rx_ring, i); |
799 | u0 = (struct my_u0 *)rx_desc; |
800 | if (rx_desc->wb.upper.length) { |
801 | /* Descriptor Done */ |
802 | pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n" , |
803 | i, |
804 | le64_to_cpu((__force __le64)u0->a), |
805 | le64_to_cpu((__force __le64)u0->b), |
806 | rx_buffer_info->skb, |
807 | ring_desc); |
808 | } else { |
809 | pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n" , |
810 | i, |
811 | le64_to_cpu((__force __le64)u0->a), |
812 | le64_to_cpu((__force __le64)u0->b), |
813 | (u64)rx_buffer_info->dma, |
814 | rx_buffer_info->skb, |
815 | ring_desc); |
816 | |
817 | if (netif_msg_pktdata(adapter) && |
818 | rx_buffer_info->dma) { |
819 | print_hex_dump(KERN_INFO, prefix_str: "" , |
820 | prefix_type: DUMP_PREFIX_ADDRESS, rowsize: 16, groupsize: 1, |
821 | page_address(rx_buffer_info->page) + |
822 | rx_buffer_info->page_offset, |
823 | len: ixgbe_rx_bufsz(ring: rx_ring), ascii: true); |
824 | } |
825 | } |
826 | } |
827 | } |
828 | } |
829 | |
830 | static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) |
831 | { |
832 | u32 ctrl_ext; |
833 | |
834 | /* Let firmware take over control of h/w */ |
835 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); |
836 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, |
837 | ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); |
838 | } |
839 | |
840 | static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) |
841 | { |
842 | u32 ctrl_ext; |
843 | |
844 | /* Let firmware know the driver has taken over */ |
845 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); |
846 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, |
847 | ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); |
848 | } |
849 | |
850 | /** |
851 | * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors |
852 | * @adapter: pointer to adapter struct |
853 | * @direction: 0 for Rx, 1 for Tx, -1 for other causes |
854 | * @queue: queue to map the corresponding interrupt to |
855 | * @msix_vector: the vector to map to the corresponding queue |
856 | * |
857 | */ |
858 | static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, |
859 | u8 queue, u8 msix_vector) |
860 | { |
861 | u32 ivar, index; |
862 | struct ixgbe_hw *hw = &adapter->hw; |
863 | switch (hw->mac.type) { |
864 | case ixgbe_mac_82598EB: |
865 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; |
866 | if (direction == -1) |
867 | direction = 0; |
868 | index = (((direction * 64) + queue) >> 2) & 0x1F; |
869 | ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); |
870 | ivar &= ~(0xFF << (8 * (queue & 0x3))); |
871 | ivar |= (msix_vector << (8 * (queue & 0x3))); |
872 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); |
873 | break; |
874 | case ixgbe_mac_82599EB: |
875 | case ixgbe_mac_X540: |
876 | case ixgbe_mac_X550: |
877 | case ixgbe_mac_X550EM_x: |
878 | case ixgbe_mac_x550em_a: |
879 | if (direction == -1) { |
880 | /* other causes */ |
881 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; |
882 | index = ((queue & 1) * 8); |
883 | ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); |
884 | ivar &= ~(0xFF << index); |
885 | ivar |= (msix_vector << index); |
886 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); |
887 | break; |
888 | } else { |
889 | /* tx or rx causes */ |
890 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; |
891 | index = ((16 * (queue & 1)) + (8 * direction)); |
892 | ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); |
893 | ivar &= ~(0xFF << index); |
894 | ivar |= (msix_vector << index); |
895 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); |
896 | break; |
897 | } |
898 | default: |
899 | break; |
900 | } |
901 | } |
902 | |
903 | void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, |
904 | u64 qmask) |
905 | { |
906 | u32 mask; |
907 | |
908 | switch (adapter->hw.mac.type) { |
909 | case ixgbe_mac_82598EB: |
910 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
911 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); |
912 | break; |
913 | case ixgbe_mac_82599EB: |
914 | case ixgbe_mac_X540: |
915 | case ixgbe_mac_X550: |
916 | case ixgbe_mac_X550EM_x: |
917 | case ixgbe_mac_x550em_a: |
918 | mask = (qmask & 0xFFFFFFFF); |
919 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); |
920 | mask = (qmask >> 32); |
921 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); |
922 | break; |
923 | default: |
924 | break; |
925 | } |
926 | } |
927 | |
928 | static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) |
929 | { |
930 | struct ixgbe_hw *hw = &adapter->hw; |
931 | struct ixgbe_hw_stats *hwstats = &adapter->stats; |
932 | int i; |
933 | u32 data; |
934 | |
935 | if ((hw->fc.current_mode != ixgbe_fc_full) && |
936 | (hw->fc.current_mode != ixgbe_fc_rx_pause)) |
937 | return; |
938 | |
939 | switch (hw->mac.type) { |
940 | case ixgbe_mac_82598EB: |
941 | data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); |
942 | break; |
943 | default: |
944 | data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); |
945 | } |
946 | hwstats->lxoffrxc += data; |
947 | |
948 | /* refill credits (no tx hang) if we received xoff */ |
949 | if (!data) |
950 | return; |
951 | |
952 | for (i = 0; i < adapter->num_tx_queues; i++) |
953 | clear_bit(nr: __IXGBE_HANG_CHECK_ARMED, |
954 | addr: &adapter->tx_ring[i]->state); |
955 | |
956 | for (i = 0; i < adapter->num_xdp_queues; i++) |
957 | clear_bit(nr: __IXGBE_HANG_CHECK_ARMED, |
958 | addr: &adapter->xdp_ring[i]->state); |
959 | } |
960 | |
961 | static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) |
962 | { |
963 | struct ixgbe_hw *hw = &adapter->hw; |
964 | struct ixgbe_hw_stats *hwstats = &adapter->stats; |
965 | u32 xoff[8] = {0}; |
966 | u8 tc; |
967 | int i; |
968 | bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; |
969 | |
970 | if (adapter->ixgbe_ieee_pfc) |
971 | pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); |
972 | |
973 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { |
974 | ixgbe_update_xoff_rx_lfc(adapter); |
975 | return; |
976 | } |
977 | |
978 | /* update stats for each tc, only valid with PFC enabled */ |
979 | for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { |
980 | u32 pxoffrxc; |
981 | |
982 | switch (hw->mac.type) { |
983 | case ixgbe_mac_82598EB: |
984 | pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); |
985 | break; |
986 | default: |
987 | pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); |
988 | } |
989 | hwstats->pxoffrxc[i] += pxoffrxc; |
990 | /* Get the TC for given UP */ |
991 | tc = netdev_get_prio_tc_map(dev: adapter->netdev, prio: i); |
992 | xoff[tc] += pxoffrxc; |
993 | } |
994 | |
995 | /* disarm tx queues that have received xoff frames */ |
996 | for (i = 0; i < adapter->num_tx_queues; i++) { |
997 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; |
998 | |
999 | tc = tx_ring->dcb_tc; |
1000 | if (xoff[tc]) |
1001 | clear_bit(nr: __IXGBE_HANG_CHECK_ARMED, addr: &tx_ring->state); |
1002 | } |
1003 | |
1004 | for (i = 0; i < adapter->num_xdp_queues; i++) { |
1005 | struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; |
1006 | |
1007 | tc = xdp_ring->dcb_tc; |
1008 | if (xoff[tc]) |
1009 | clear_bit(nr: __IXGBE_HANG_CHECK_ARMED, addr: &xdp_ring->state); |
1010 | } |
1011 | } |
1012 | |
1013 | static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) |
1014 | { |
1015 | return ring->stats.packets; |
1016 | } |
1017 | |
1018 | static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) |
1019 | { |
1020 | unsigned int head, tail; |
1021 | |
1022 | head = ring->next_to_clean; |
1023 | tail = ring->next_to_use; |
1024 | |
1025 | return ((head <= tail) ? tail : tail + ring->count) - head; |
1026 | } |
1027 | |
1028 | static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) |
1029 | { |
1030 | u32 tx_done = ixgbe_get_tx_completed(ring: tx_ring); |
1031 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; |
1032 | u32 tx_pending = ixgbe_get_tx_pending(ring: tx_ring); |
1033 | |
1034 | clear_check_for_tx_hang(tx_ring); |
1035 | |
1036 | /* |
1037 | * Check for a hung queue, but be thorough. This verifies |
1038 | * that a transmit has been completed since the previous |
1039 | * check AND there is at least one packet pending. The |
1040 | * ARMED bit is set to indicate a potential hang. The |
1041 | * bit is cleared if a pause frame is received to remove |
1042 | * false hang detection due to PFC or 802.3x frames. By |
1043 | * requiring this to fail twice we avoid races with |
1044 | * pfc clearing the ARMED bit and conditions where we |
1045 | * run the check_tx_hang logic with a transmit completion |
1046 | * pending but without time to complete it yet. |
1047 | */ |
1048 | if (tx_done_old == tx_done && tx_pending) |
1049 | /* make sure it is true for two checks in a row */ |
1050 | return test_and_set_bit(nr: __IXGBE_HANG_CHECK_ARMED, |
1051 | addr: &tx_ring->state); |
1052 | /* update completed stats and continue */ |
1053 | tx_ring->tx_stats.tx_done_old = tx_done; |
1054 | /* reset the countdown */ |
1055 | clear_bit(nr: __IXGBE_HANG_CHECK_ARMED, addr: &tx_ring->state); |
1056 | |
1057 | return false; |
1058 | } |
1059 | |
1060 | /** |
1061 | * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout |
1062 | * @adapter: driver private struct |
1063 | **/ |
1064 | static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) |
1065 | { |
1066 | |
1067 | /* Do the reset outside of interrupt context */ |
1068 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
1069 | set_bit(nr: __IXGBE_RESET_REQUESTED, addr: &adapter->state); |
1070 | e_warn(drv, "initiating reset due to tx timeout\n" ); |
1071 | ixgbe_service_event_schedule(adapter); |
1072 | } |
1073 | } |
1074 | |
1075 | /** |
1076 | * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate |
1077 | * @netdev: network interface device structure |
1078 | * @queue_index: Tx queue to set |
1079 | * @maxrate: desired maximum transmit bitrate |
1080 | **/ |
1081 | static int ixgbe_tx_maxrate(struct net_device *netdev, |
1082 | int queue_index, u32 maxrate) |
1083 | { |
1084 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1085 | struct ixgbe_hw *hw = &adapter->hw; |
1086 | u32 bcnrc_val = ixgbe_link_mbps(adapter); |
1087 | |
1088 | if (!maxrate) |
1089 | return 0; |
1090 | |
1091 | /* Calculate the rate factor values to set */ |
1092 | bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; |
1093 | bcnrc_val /= maxrate; |
1094 | |
1095 | /* clear everything but the rate factor */ |
1096 | bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | |
1097 | IXGBE_RTTBCNRC_RF_DEC_MASK; |
1098 | |
1099 | /* enable the rate scheduler */ |
1100 | bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; |
1101 | |
1102 | IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index); |
1103 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); |
1104 | |
1105 | return 0; |
1106 | } |
1107 | |
1108 | /** |
1109 | * ixgbe_clean_tx_irq - Reclaim resources after transmit completes |
1110 | * @q_vector: structure containing interrupt and ring information |
1111 | * @tx_ring: tx ring to clean |
1112 | * @napi_budget: Used to determine if we are in netpoll |
1113 | **/ |
1114 | static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, |
1115 | struct ixgbe_ring *tx_ring, int napi_budget) |
1116 | { |
1117 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1118 | struct ixgbe_tx_buffer *tx_buffer; |
1119 | union ixgbe_adv_tx_desc *tx_desc; |
1120 | unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; |
1121 | unsigned int budget = q_vector->tx.work_limit; |
1122 | unsigned int i = tx_ring->next_to_clean; |
1123 | struct netdev_queue *txq; |
1124 | |
1125 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
1126 | return true; |
1127 | |
1128 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
1129 | tx_desc = IXGBE_TX_DESC(tx_ring, i); |
1130 | i -= tx_ring->count; |
1131 | |
1132 | do { |
1133 | union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; |
1134 | |
1135 | /* if next_to_watch is not set then there is no work pending */ |
1136 | if (!eop_desc) |
1137 | break; |
1138 | |
1139 | /* prevent any other reads prior to eop_desc */ |
1140 | smp_rmb(); |
1141 | |
1142 | /* if DD is not set pending work has not been completed */ |
1143 | if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) |
1144 | break; |
1145 | |
1146 | /* clear next_to_watch to prevent false hangs */ |
1147 | tx_buffer->next_to_watch = NULL; |
1148 | |
1149 | /* update the statistics for this packet */ |
1150 | total_bytes += tx_buffer->bytecount; |
1151 | total_packets += tx_buffer->gso_segs; |
1152 | if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) |
1153 | total_ipsec++; |
1154 | |
1155 | /* free the skb */ |
1156 | if (ring_is_xdp(tx_ring)) |
1157 | xdp_return_frame(xdpf: tx_buffer->xdpf); |
1158 | else |
1159 | napi_consume_skb(skb: tx_buffer->skb, budget: napi_budget); |
1160 | |
1161 | /* unmap skb header data */ |
1162 | dma_unmap_single(tx_ring->dev, |
1163 | dma_unmap_addr(tx_buffer, dma), |
1164 | dma_unmap_len(tx_buffer, len), |
1165 | DMA_TO_DEVICE); |
1166 | |
1167 | /* clear tx_buffer data */ |
1168 | dma_unmap_len_set(tx_buffer, len, 0); |
1169 | |
1170 | /* unmap remaining buffers */ |
1171 | while (tx_desc != eop_desc) { |
1172 | tx_buffer++; |
1173 | tx_desc++; |
1174 | i++; |
1175 | if (unlikely(!i)) { |
1176 | i -= tx_ring->count; |
1177 | tx_buffer = tx_ring->tx_buffer_info; |
1178 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); |
1179 | } |
1180 | |
1181 | /* unmap any remaining paged data */ |
1182 | if (dma_unmap_len(tx_buffer, len)) { |
1183 | dma_unmap_page(tx_ring->dev, |
1184 | dma_unmap_addr(tx_buffer, dma), |
1185 | dma_unmap_len(tx_buffer, len), |
1186 | DMA_TO_DEVICE); |
1187 | dma_unmap_len_set(tx_buffer, len, 0); |
1188 | } |
1189 | } |
1190 | |
1191 | /* move us one more past the eop_desc for start of next pkt */ |
1192 | tx_buffer++; |
1193 | tx_desc++; |
1194 | i++; |
1195 | if (unlikely(!i)) { |
1196 | i -= tx_ring->count; |
1197 | tx_buffer = tx_ring->tx_buffer_info; |
1198 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); |
1199 | } |
1200 | |
1201 | /* issue prefetch for next Tx descriptor */ |
1202 | prefetch(tx_desc); |
1203 | |
1204 | /* update budget accounting */ |
1205 | budget--; |
1206 | } while (likely(budget)); |
1207 | |
1208 | i += tx_ring->count; |
1209 | tx_ring->next_to_clean = i; |
1210 | u64_stats_update_begin(syncp: &tx_ring->syncp); |
1211 | tx_ring->stats.bytes += total_bytes; |
1212 | tx_ring->stats.packets += total_packets; |
1213 | u64_stats_update_end(syncp: &tx_ring->syncp); |
1214 | q_vector->tx.total_bytes += total_bytes; |
1215 | q_vector->tx.total_packets += total_packets; |
1216 | adapter->tx_ipsec += total_ipsec; |
1217 | |
1218 | if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { |
1219 | /* schedule immediate reset if we believe we hung */ |
1220 | struct ixgbe_hw *hw = &adapter->hw; |
1221 | e_err(drv, "Detected Tx Unit Hang %s\n" |
1222 | " Tx Queue <%d>\n" |
1223 | " TDH, TDT <%x>, <%x>\n" |
1224 | " next_to_use <%x>\n" |
1225 | " next_to_clean <%x>\n" |
1226 | "tx_buffer_info[next_to_clean]\n" |
1227 | " time_stamp <%lx>\n" |
1228 | " jiffies <%lx>\n" , |
1229 | ring_is_xdp(tx_ring) ? "(XDP)" : "" , |
1230 | tx_ring->queue_index, |
1231 | IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), |
1232 | IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), |
1233 | tx_ring->next_to_use, i, |
1234 | tx_ring->tx_buffer_info[i].time_stamp, jiffies); |
1235 | |
1236 | if (!ring_is_xdp(tx_ring)) |
1237 | netif_stop_subqueue(dev: tx_ring->netdev, |
1238 | queue_index: tx_ring->queue_index); |
1239 | |
1240 | e_info(probe, |
1241 | "tx hang %d detected on queue %d, resetting adapter\n" , |
1242 | adapter->tx_timeout_count + 1, tx_ring->queue_index); |
1243 | |
1244 | /* schedule immediate reset if we believe we hung */ |
1245 | ixgbe_tx_timeout_reset(adapter); |
1246 | |
1247 | /* the adapter is about to reset, no point in enabling stuff */ |
1248 | return true; |
1249 | } |
1250 | |
1251 | if (ring_is_xdp(tx_ring)) |
1252 | return !!budget; |
1253 | |
1254 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
1255 | txq = netdev_get_tx_queue(dev: tx_ring->netdev, index: tx_ring->queue_index); |
1256 | if (!__netif_txq_completed_wake(txq, total_packets, total_bytes, |
1257 | ixgbe_desc_unused(tx_ring), |
1258 | TX_WAKE_THRESHOLD, |
1259 | !netif_carrier_ok(tx_ring->netdev) || |
1260 | test_bit(__IXGBE_DOWN, &adapter->state))) |
1261 | ++tx_ring->tx_stats.restart_queue; |
1262 | |
1263 | return !!budget; |
1264 | } |
1265 | |
1266 | #ifdef CONFIG_IXGBE_DCA |
1267 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, |
1268 | struct ixgbe_ring *tx_ring, |
1269 | int cpu) |
1270 | { |
1271 | struct ixgbe_hw *hw = &adapter->hw; |
1272 | u32 txctrl = 0; |
1273 | u16 reg_offset; |
1274 | |
1275 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1276 | txctrl = dca3_get_tag(dev: tx_ring->dev, cpu); |
1277 | |
1278 | switch (hw->mac.type) { |
1279 | case ixgbe_mac_82598EB: |
1280 | reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); |
1281 | break; |
1282 | case ixgbe_mac_82599EB: |
1283 | case ixgbe_mac_X540: |
1284 | reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); |
1285 | txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; |
1286 | break; |
1287 | default: |
1288 | /* for unknown hardware do not write register */ |
1289 | return; |
1290 | } |
1291 | |
1292 | /* |
1293 | * We can enable relaxed ordering for reads, but not writes when |
1294 | * DCA is enabled. This is due to a known issue in some chipsets |
1295 | * which will cause the DCA tag to be cleared. |
1296 | */ |
1297 | txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | |
1298 | IXGBE_DCA_TXCTRL_DATA_RRO_EN | |
1299 | IXGBE_DCA_TXCTRL_DESC_DCA_EN; |
1300 | |
1301 | IXGBE_WRITE_REG(hw, reg_offset, txctrl); |
1302 | } |
1303 | |
1304 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, |
1305 | struct ixgbe_ring *rx_ring, |
1306 | int cpu) |
1307 | { |
1308 | struct ixgbe_hw *hw = &adapter->hw; |
1309 | u32 rxctrl = 0; |
1310 | u8 reg_idx = rx_ring->reg_idx; |
1311 | |
1312 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1313 | rxctrl = dca3_get_tag(dev: rx_ring->dev, cpu); |
1314 | |
1315 | switch (hw->mac.type) { |
1316 | case ixgbe_mac_82599EB: |
1317 | case ixgbe_mac_X540: |
1318 | rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; |
1319 | break; |
1320 | default: |
1321 | break; |
1322 | } |
1323 | |
1324 | /* |
1325 | * We can enable relaxed ordering for reads, but not writes when |
1326 | * DCA is enabled. This is due to a known issue in some chipsets |
1327 | * which will cause the DCA tag to be cleared. |
1328 | */ |
1329 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | |
1330 | IXGBE_DCA_RXCTRL_DATA_DCA_EN | |
1331 | IXGBE_DCA_RXCTRL_DESC_DCA_EN; |
1332 | |
1333 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); |
1334 | } |
1335 | |
1336 | static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) |
1337 | { |
1338 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1339 | struct ixgbe_ring *ring; |
1340 | int cpu = get_cpu(); |
1341 | |
1342 | if (q_vector->cpu == cpu) |
1343 | goto out_no_update; |
1344 | |
1345 | ixgbe_for_each_ring(ring, q_vector->tx) |
1346 | ixgbe_update_tx_dca(adapter, tx_ring: ring, cpu); |
1347 | |
1348 | ixgbe_for_each_ring(ring, q_vector->rx) |
1349 | ixgbe_update_rx_dca(adapter, rx_ring: ring, cpu); |
1350 | |
1351 | q_vector->cpu = cpu; |
1352 | out_no_update: |
1353 | put_cpu(); |
1354 | } |
1355 | |
1356 | static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) |
1357 | { |
1358 | int i; |
1359 | |
1360 | /* always use CB2 mode, difference is masked in the CB driver */ |
1361 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1362 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, |
1363 | IXGBE_DCA_CTRL_DCA_MODE_CB2); |
1364 | else |
1365 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, |
1366 | IXGBE_DCA_CTRL_DCA_DISABLE); |
1367 | |
1368 | for (i = 0; i < adapter->num_q_vectors; i++) { |
1369 | adapter->q_vector[i]->cpu = -1; |
1370 | ixgbe_update_dca(q_vector: adapter->q_vector[i]); |
1371 | } |
1372 | } |
1373 | |
1374 | static int __ixgbe_notify_dca(struct device *dev, void *data) |
1375 | { |
1376 | struct ixgbe_adapter *adapter = dev_get_drvdata(dev); |
1377 | unsigned long event = *(unsigned long *)data; |
1378 | |
1379 | if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) |
1380 | return 0; |
1381 | |
1382 | switch (event) { |
1383 | case DCA_PROVIDER_ADD: |
1384 | /* if we're already enabled, don't do it again */ |
1385 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1386 | break; |
1387 | if (dca_add_requester(dev) == 0) { |
1388 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
1389 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, |
1390 | IXGBE_DCA_CTRL_DCA_MODE_CB2); |
1391 | break; |
1392 | } |
1393 | fallthrough; /* DCA is disabled. */ |
1394 | case DCA_PROVIDER_REMOVE: |
1395 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
1396 | dca_remove_requester(dev); |
1397 | adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; |
1398 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, |
1399 | IXGBE_DCA_CTRL_DCA_DISABLE); |
1400 | } |
1401 | break; |
1402 | } |
1403 | |
1404 | return 0; |
1405 | } |
1406 | |
1407 | #endif /* CONFIG_IXGBE_DCA */ |
1408 | |
1409 | #define \ |
1410 | ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ |
1411 | (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ |
1412 | (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ |
1413 | (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) |
1414 | |
1415 | static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, |
1416 | union ixgbe_adv_rx_desc *rx_desc, |
1417 | struct sk_buff *skb) |
1418 | { |
1419 | u16 ; |
1420 | |
1421 | if (!(ring->netdev->features & NETIF_F_RXHASH)) |
1422 | return; |
1423 | |
1424 | rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & |
1425 | IXGBE_RXDADV_RSSTYPE_MASK; |
1426 | |
1427 | if (!rss_type) |
1428 | return; |
1429 | |
1430 | skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), |
1431 | type: (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? |
1432 | PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); |
1433 | } |
1434 | |
1435 | #ifdef IXGBE_FCOE |
1436 | /** |
1437 | * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type |
1438 | * @ring: structure containing ring specific data |
1439 | * @rx_desc: advanced rx descriptor |
1440 | * |
1441 | * Returns : true if it is FCoE pkt |
1442 | */ |
1443 | static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, |
1444 | union ixgbe_adv_rx_desc *rx_desc) |
1445 | { |
1446 | __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; |
1447 | |
1448 | return test_bit(__IXGBE_RX_FCOE, &ring->state) && |
1449 | ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == |
1450 | (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << |
1451 | IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); |
1452 | } |
1453 | |
1454 | #endif /* IXGBE_FCOE */ |
1455 | /** |
1456 | * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum |
1457 | * @ring: structure containing ring specific data |
1458 | * @rx_desc: current Rx descriptor being processed |
1459 | * @skb: skb currently being received and modified |
1460 | **/ |
1461 | static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, |
1462 | union ixgbe_adv_rx_desc *rx_desc, |
1463 | struct sk_buff *skb) |
1464 | { |
1465 | __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; |
1466 | bool encap_pkt = false; |
1467 | |
1468 | skb_checksum_none_assert(skb); |
1469 | |
1470 | /* Rx csum disabled */ |
1471 | if (!(ring->netdev->features & NETIF_F_RXCSUM)) |
1472 | return; |
1473 | |
1474 | /* check for VXLAN and Geneve packets */ |
1475 | if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { |
1476 | encap_pkt = true; |
1477 | skb->encapsulation = 1; |
1478 | } |
1479 | |
1480 | /* if IP and error */ |
1481 | if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && |
1482 | ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { |
1483 | ring->rx_stats.csum_err++; |
1484 | return; |
1485 | } |
1486 | |
1487 | if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) |
1488 | return; |
1489 | |
1490 | if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { |
1491 | /* |
1492 | * 82599 errata, UDP frames with a 0 checksum can be marked as |
1493 | * checksum errors. |
1494 | */ |
1495 | if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && |
1496 | test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) |
1497 | return; |
1498 | |
1499 | ring->rx_stats.csum_err++; |
1500 | return; |
1501 | } |
1502 | |
1503 | /* It must be a TCP or UDP packet with a valid checksum */ |
1504 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1505 | if (encap_pkt) { |
1506 | if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS)) |
1507 | return; |
1508 | |
1509 | if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { |
1510 | skb->ip_summed = CHECKSUM_NONE; |
1511 | return; |
1512 | } |
1513 | /* If we checked the outer header let the stack know */ |
1514 | skb->csum_level = 1; |
1515 | } |
1516 | } |
1517 | |
1518 | static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) |
1519 | { |
1520 | return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; |
1521 | } |
1522 | |
1523 | static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, |
1524 | struct ixgbe_rx_buffer *bi) |
1525 | { |
1526 | struct page *page = bi->page; |
1527 | dma_addr_t dma; |
1528 | |
1529 | /* since we are recycling buffers we should seldom need to alloc */ |
1530 | if (likely(page)) |
1531 | return true; |
1532 | |
1533 | /* alloc new page for storage */ |
1534 | page = dev_alloc_pages(order: ixgbe_rx_pg_order(ring: rx_ring)); |
1535 | if (unlikely(!page)) { |
1536 | rx_ring->rx_stats.alloc_rx_page_failed++; |
1537 | return false; |
1538 | } |
1539 | |
1540 | /* map page for use */ |
1541 | dma = dma_map_page_attrs(dev: rx_ring->dev, page, offset: 0, |
1542 | ixgbe_rx_pg_size(rx_ring), |
1543 | dir: DMA_FROM_DEVICE, |
1544 | IXGBE_RX_DMA_ATTR); |
1545 | |
1546 | /* |
1547 | * if mapping failed free memory back to system since |
1548 | * there isn't much point in holding memory we can't use |
1549 | */ |
1550 | if (dma_mapping_error(dev: rx_ring->dev, dma_addr: dma)) { |
1551 | __free_pages(page, order: ixgbe_rx_pg_order(ring: rx_ring)); |
1552 | |
1553 | rx_ring->rx_stats.alloc_rx_page_failed++; |
1554 | return false; |
1555 | } |
1556 | |
1557 | bi->dma = dma; |
1558 | bi->page = page; |
1559 | bi->page_offset = rx_ring->rx_offset; |
1560 | page_ref_add(page, USHRT_MAX - 1); |
1561 | bi->pagecnt_bias = USHRT_MAX; |
1562 | rx_ring->rx_stats.alloc_rx_page++; |
1563 | |
1564 | return true; |
1565 | } |
1566 | |
1567 | /** |
1568 | * ixgbe_alloc_rx_buffers - Replace used receive buffers |
1569 | * @rx_ring: ring to place buffers on |
1570 | * @cleaned_count: number of buffers to replace |
1571 | **/ |
1572 | void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) |
1573 | { |
1574 | union ixgbe_adv_rx_desc *rx_desc; |
1575 | struct ixgbe_rx_buffer *bi; |
1576 | u16 i = rx_ring->next_to_use; |
1577 | u16 bufsz; |
1578 | |
1579 | /* nothing to do */ |
1580 | if (!cleaned_count) |
1581 | return; |
1582 | |
1583 | rx_desc = IXGBE_RX_DESC(rx_ring, i); |
1584 | bi = &rx_ring->rx_buffer_info[i]; |
1585 | i -= rx_ring->count; |
1586 | |
1587 | bufsz = ixgbe_rx_bufsz(ring: rx_ring); |
1588 | |
1589 | do { |
1590 | if (!ixgbe_alloc_mapped_page(rx_ring, bi)) |
1591 | break; |
1592 | |
1593 | /* sync the buffer for use by the device */ |
1594 | dma_sync_single_range_for_device(dev: rx_ring->dev, addr: bi->dma, |
1595 | offset: bi->page_offset, size: bufsz, |
1596 | dir: DMA_FROM_DEVICE); |
1597 | |
1598 | /* |
1599 | * Refresh the desc even if buffer_addrs didn't change |
1600 | * because each write-back erases this info. |
1601 | */ |
1602 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); |
1603 | |
1604 | rx_desc++; |
1605 | bi++; |
1606 | i++; |
1607 | if (unlikely(!i)) { |
1608 | rx_desc = IXGBE_RX_DESC(rx_ring, 0); |
1609 | bi = rx_ring->rx_buffer_info; |
1610 | i -= rx_ring->count; |
1611 | } |
1612 | |
1613 | /* clear the length for the next_to_use descriptor */ |
1614 | rx_desc->wb.upper.length = 0; |
1615 | |
1616 | cleaned_count--; |
1617 | } while (cleaned_count); |
1618 | |
1619 | i += rx_ring->count; |
1620 | |
1621 | if (rx_ring->next_to_use != i) { |
1622 | rx_ring->next_to_use = i; |
1623 | |
1624 | /* update next to alloc since we have filled the ring */ |
1625 | rx_ring->next_to_alloc = i; |
1626 | |
1627 | /* Force memory writes to complete before letting h/w |
1628 | * know there are new descriptors to fetch. (Only |
1629 | * applicable for weak-ordered memory model archs, |
1630 | * such as IA-64). |
1631 | */ |
1632 | wmb(); |
1633 | writel(val: i, addr: rx_ring->tail); |
1634 | } |
1635 | } |
1636 | |
1637 | static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, |
1638 | struct sk_buff *skb) |
1639 | { |
1640 | u16 hdr_len = skb_headlen(skb); |
1641 | |
1642 | /* set gso_size to avoid messing up TCP MSS */ |
1643 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), |
1644 | IXGBE_CB(skb)->append_cnt); |
1645 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
1646 | } |
1647 | |
1648 | static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, |
1649 | struct sk_buff *skb) |
1650 | { |
1651 | /* if append_cnt is 0 then frame is not RSC */ |
1652 | if (!IXGBE_CB(skb)->append_cnt) |
1653 | return; |
1654 | |
1655 | rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; |
1656 | rx_ring->rx_stats.rsc_flush++; |
1657 | |
1658 | ixgbe_set_rsc_gso_size(ring: rx_ring, skb); |
1659 | |
1660 | /* gso_size is computed using append_cnt so always clear it last */ |
1661 | IXGBE_CB(skb)->append_cnt = 0; |
1662 | } |
1663 | |
1664 | /** |
1665 | * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor |
1666 | * @rx_ring: rx descriptor ring packet is being transacted on |
1667 | * @rx_desc: pointer to the EOP Rx descriptor |
1668 | * @skb: pointer to current skb being populated |
1669 | * |
1670 | * This function checks the ring, descriptor, and packet information in |
1671 | * order to populate the hash, checksum, VLAN, timestamp, protocol, and |
1672 | * other fields within the skb. |
1673 | **/ |
1674 | void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, |
1675 | union ixgbe_adv_rx_desc *rx_desc, |
1676 | struct sk_buff *skb) |
1677 | { |
1678 | struct net_device *dev = rx_ring->netdev; |
1679 | u32 flags = rx_ring->q_vector->adapter->flags; |
1680 | |
1681 | ixgbe_update_rsc_stats(rx_ring, skb); |
1682 | |
1683 | ixgbe_rx_hash(ring: rx_ring, rx_desc, skb); |
1684 | |
1685 | ixgbe_rx_checksum(ring: rx_ring, rx_desc, skb); |
1686 | |
1687 | if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) |
1688 | ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); |
1689 | |
1690 | if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
1691 | ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { |
1692 | u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); |
1693 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vid); |
1694 | } |
1695 | |
1696 | if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) |
1697 | ixgbe_ipsec_rx(rx_ring, rx_desc, skb); |
1698 | |
1699 | /* record Rx queue, or update MACVLAN statistics */ |
1700 | if (netif_is_ixgbe(dev)) |
1701 | skb_record_rx_queue(skb, rx_queue: rx_ring->queue_index); |
1702 | else |
1703 | macvlan_count_rx(vlan: netdev_priv(dev), len: skb->len + ETH_HLEN, success: true, |
1704 | multicast: false); |
1705 | |
1706 | skb->protocol = eth_type_trans(skb, dev); |
1707 | } |
1708 | |
1709 | void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, |
1710 | struct sk_buff *skb) |
1711 | { |
1712 | napi_gro_receive(napi: &q_vector->napi, skb); |
1713 | } |
1714 | |
1715 | /** |
1716 | * ixgbe_is_non_eop - process handling of non-EOP buffers |
1717 | * @rx_ring: Rx ring being processed |
1718 | * @rx_desc: Rx descriptor for current buffer |
1719 | * @skb: Current socket buffer containing buffer in progress |
1720 | * |
1721 | * This function updates next to clean. If the buffer is an EOP buffer |
1722 | * this function exits returning false, otherwise it will place the |
1723 | * sk_buff in the next buffer to be chained and return true indicating |
1724 | * that this is in fact a non-EOP buffer. |
1725 | **/ |
1726 | static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, |
1727 | union ixgbe_adv_rx_desc *rx_desc, |
1728 | struct sk_buff *skb) |
1729 | { |
1730 | u32 ntc = rx_ring->next_to_clean + 1; |
1731 | |
1732 | /* fetch, update, and store next to clean */ |
1733 | ntc = (ntc < rx_ring->count) ? ntc : 0; |
1734 | rx_ring->next_to_clean = ntc; |
1735 | |
1736 | prefetch(IXGBE_RX_DESC(rx_ring, ntc)); |
1737 | |
1738 | /* update RSC append count if present */ |
1739 | if (ring_is_rsc_enabled(rx_ring)) { |
1740 | __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & |
1741 | cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); |
1742 | |
1743 | if (unlikely(rsc_enabled)) { |
1744 | u32 rsc_cnt = le32_to_cpu(rsc_enabled); |
1745 | |
1746 | rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; |
1747 | IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; |
1748 | |
1749 | /* update ntc based on RSC value */ |
1750 | ntc = le32_to_cpu(rx_desc->wb.upper.status_error); |
1751 | ntc &= IXGBE_RXDADV_NEXTP_MASK; |
1752 | ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; |
1753 | } |
1754 | } |
1755 | |
1756 | /* if we are the last buffer then there is nothing else to do */ |
1757 | if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) |
1758 | return false; |
1759 | |
1760 | /* place skb in next buffer to be received */ |
1761 | rx_ring->rx_buffer_info[ntc].skb = skb; |
1762 | rx_ring->rx_stats.non_eop_descs++; |
1763 | |
1764 | return true; |
1765 | } |
1766 | |
1767 | /** |
1768 | * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail |
1769 | * @rx_ring: rx descriptor ring packet is being transacted on |
1770 | * @skb: pointer to current skb being adjusted |
1771 | * |
1772 | * This function is an ixgbe specific version of __pskb_pull_tail. The |
1773 | * main difference between this version and the original function is that |
1774 | * this function can make several assumptions about the state of things |
1775 | * that allow for significant optimizations versus the standard function. |
1776 | * As a result we can do things like drop a frag and maintain an accurate |
1777 | * truesize for the skb. |
1778 | */ |
1779 | static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, |
1780 | struct sk_buff *skb) |
1781 | { |
1782 | skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; |
1783 | unsigned char *va; |
1784 | unsigned int pull_len; |
1785 | |
1786 | /* |
1787 | * it is valid to use page_address instead of kmap since we are |
1788 | * working with pages allocated out of the lomem pool per |
1789 | * alloc_page(GFP_ATOMIC) |
1790 | */ |
1791 | va = skb_frag_address(frag); |
1792 | |
1793 | /* |
1794 | * we need the header to contain the greater of either ETH_HLEN or |
1795 | * 60 bytes if the skb->len is less than 60 for skb_pad. |
1796 | */ |
1797 | pull_len = eth_get_headlen(dev: skb->dev, data: va, IXGBE_RX_HDR_SIZE); |
1798 | |
1799 | /* align pull length to size of long to optimize memcpy performance */ |
1800 | skb_copy_to_linear_data(skb, from: va, ALIGN(pull_len, sizeof(long))); |
1801 | |
1802 | /* update all of the pointers */ |
1803 | skb_frag_size_sub(frag, delta: pull_len); |
1804 | skb_frag_off_add(frag, delta: pull_len); |
1805 | skb->data_len -= pull_len; |
1806 | skb->tail += pull_len; |
1807 | } |
1808 | |
1809 | /** |
1810 | * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB |
1811 | * @rx_ring: rx descriptor ring packet is being transacted on |
1812 | * @skb: pointer to current skb being updated |
1813 | * |
1814 | * This function provides a basic DMA sync up for the first fragment of an |
1815 | * skb. The reason for doing this is that the first fragment cannot be |
1816 | * unmapped until we have reached the end of packet descriptor for a buffer |
1817 | * chain. |
1818 | */ |
1819 | static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, |
1820 | struct sk_buff *skb) |
1821 | { |
1822 | if (ring_uses_build_skb(rx_ring)) { |
1823 | unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; |
1824 | unsigned long offset = (unsigned long)(skb->data) & mask; |
1825 | |
1826 | dma_sync_single_range_for_cpu(dev: rx_ring->dev, |
1827 | IXGBE_CB(skb)->dma, |
1828 | offset, |
1829 | size: skb_headlen(skb), |
1830 | dir: DMA_FROM_DEVICE); |
1831 | } else { |
1832 | skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; |
1833 | |
1834 | dma_sync_single_range_for_cpu(dev: rx_ring->dev, |
1835 | IXGBE_CB(skb)->dma, |
1836 | offset: skb_frag_off(frag), |
1837 | size: skb_frag_size(frag), |
1838 | dir: DMA_FROM_DEVICE); |
1839 | } |
1840 | |
1841 | /* If the page was released, just unmap it. */ |
1842 | if (unlikely(IXGBE_CB(skb)->page_released)) { |
1843 | dma_unmap_page_attrs(dev: rx_ring->dev, IXGBE_CB(skb)->dma, |
1844 | ixgbe_rx_pg_size(rx_ring), |
1845 | dir: DMA_FROM_DEVICE, |
1846 | IXGBE_RX_DMA_ATTR); |
1847 | } |
1848 | } |
1849 | |
1850 | /** |
1851 | * ixgbe_cleanup_headers - Correct corrupted or empty headers |
1852 | * @rx_ring: rx descriptor ring packet is being transacted on |
1853 | * @rx_desc: pointer to the EOP Rx descriptor |
1854 | * @skb: pointer to current skb being fixed |
1855 | * |
1856 | * Check if the skb is valid in the XDP case it will be an error pointer. |
1857 | * Return true in this case to abort processing and advance to next |
1858 | * descriptor. |
1859 | * |
1860 | * Check for corrupted packet headers caused by senders on the local L2 |
1861 | * embedded NIC switch not setting up their Tx Descriptors right. These |
1862 | * should be very rare. |
1863 | * |
1864 | * Also address the case where we are pulling data in on pages only |
1865 | * and as such no data is present in the skb header. |
1866 | * |
1867 | * In addition if skb is not at least 60 bytes we need to pad it so that |
1868 | * it is large enough to qualify as a valid Ethernet frame. |
1869 | * |
1870 | * Returns true if an error was encountered and skb was freed. |
1871 | **/ |
1872 | bool (struct ixgbe_ring *rx_ring, |
1873 | union ixgbe_adv_rx_desc *rx_desc, |
1874 | struct sk_buff *skb) |
1875 | { |
1876 | struct net_device *netdev = rx_ring->netdev; |
1877 | |
1878 | /* XDP packets use error pointer so abort at this point */ |
1879 | if (IS_ERR(ptr: skb)) |
1880 | return true; |
1881 | |
1882 | /* Verify netdev is present, and that packet does not have any |
1883 | * errors that would be unacceptable to the netdev. |
1884 | */ |
1885 | if (!netdev || |
1886 | (unlikely(ixgbe_test_staterr(rx_desc, |
1887 | IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && |
1888 | !(netdev->features & NETIF_F_RXALL)))) { |
1889 | dev_kfree_skb_any(skb); |
1890 | return true; |
1891 | } |
1892 | |
1893 | /* place header in linear portion of buffer */ |
1894 | if (!skb_headlen(skb)) |
1895 | ixgbe_pull_tail(rx_ring, skb); |
1896 | |
1897 | #ifdef IXGBE_FCOE |
1898 | /* do not attempt to pad FCoE Frames as this will disrupt DDP */ |
1899 | if (ixgbe_rx_is_fcoe(ring: rx_ring, rx_desc)) |
1900 | return false; |
1901 | |
1902 | #endif |
1903 | /* if eth_skb_pad returns an error the skb was freed */ |
1904 | if (eth_skb_pad(skb)) |
1905 | return true; |
1906 | |
1907 | return false; |
1908 | } |
1909 | |
1910 | /** |
1911 | * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring |
1912 | * @rx_ring: rx descriptor ring to store buffers on |
1913 | * @old_buff: donor buffer to have page reused |
1914 | * |
1915 | * Synchronizes page for reuse by the adapter |
1916 | **/ |
1917 | static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, |
1918 | struct ixgbe_rx_buffer *old_buff) |
1919 | { |
1920 | struct ixgbe_rx_buffer *new_buff; |
1921 | u16 nta = rx_ring->next_to_alloc; |
1922 | |
1923 | new_buff = &rx_ring->rx_buffer_info[nta]; |
1924 | |
1925 | /* update, and store next to alloc */ |
1926 | nta++; |
1927 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; |
1928 | |
1929 | /* Transfer page from old buffer to new buffer. |
1930 | * Move each member individually to avoid possible store |
1931 | * forwarding stalls and unnecessary copy of skb. |
1932 | */ |
1933 | new_buff->dma = old_buff->dma; |
1934 | new_buff->page = old_buff->page; |
1935 | new_buff->page_offset = old_buff->page_offset; |
1936 | new_buff->pagecnt_bias = old_buff->pagecnt_bias; |
1937 | } |
1938 | |
1939 | static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer, |
1940 | int rx_buffer_pgcnt) |
1941 | { |
1942 | unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; |
1943 | struct page *page = rx_buffer->page; |
1944 | |
1945 | /* avoid re-using remote and pfmemalloc pages */ |
1946 | if (!dev_page_is_reusable(page)) |
1947 | return false; |
1948 | |
1949 | #if (PAGE_SIZE < 8192) |
1950 | /* if we are only owner of page we can reuse it */ |
1951 | if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) |
1952 | return false; |
1953 | #else |
1954 | /* The last offset is a bit aggressive in that we assume the |
1955 | * worst case of FCoE being enabled and using a 3K buffer. |
1956 | * However this should have minimal impact as the 1K extra is |
1957 | * still less than one buffer in size. |
1958 | */ |
1959 | #define IXGBE_LAST_OFFSET \ |
1960 | (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) |
1961 | if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) |
1962 | return false; |
1963 | #endif |
1964 | |
1965 | /* If we have drained the page fragment pool we need to update |
1966 | * the pagecnt_bias and page count so that we fully restock the |
1967 | * number of references the driver holds. |
1968 | */ |
1969 | if (unlikely(pagecnt_bias == 1)) { |
1970 | page_ref_add(page, USHRT_MAX - 1); |
1971 | rx_buffer->pagecnt_bias = USHRT_MAX; |
1972 | } |
1973 | |
1974 | return true; |
1975 | } |
1976 | |
1977 | /** |
1978 | * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff |
1979 | * @rx_ring: rx descriptor ring to transact packets on |
1980 | * @rx_buffer: buffer containing page to add |
1981 | * @skb: sk_buff to place the data into |
1982 | * @size: size of data in rx_buffer |
1983 | * |
1984 | * This function will add the data contained in rx_buffer->page to the skb. |
1985 | * This is done either through a direct copy if the data in the buffer is |
1986 | * less than the skb header size, otherwise it will just attach the page as |
1987 | * a frag to the skb. |
1988 | * |
1989 | * The function will then update the page offset if necessary and return |
1990 | * true if the buffer can be reused by the adapter. |
1991 | **/ |
1992 | static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, |
1993 | struct ixgbe_rx_buffer *rx_buffer, |
1994 | struct sk_buff *skb, |
1995 | unsigned int size) |
1996 | { |
1997 | #if (PAGE_SIZE < 8192) |
1998 | unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; |
1999 | #else |
2000 | unsigned int truesize = rx_ring->rx_offset ? |
2001 | SKB_DATA_ALIGN(rx_ring->rx_offset + size) : |
2002 | SKB_DATA_ALIGN(size); |
2003 | #endif |
2004 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page: rx_buffer->page, |
2005 | off: rx_buffer->page_offset, size, truesize); |
2006 | #if (PAGE_SIZE < 8192) |
2007 | rx_buffer->page_offset ^= truesize; |
2008 | #else |
2009 | rx_buffer->page_offset += truesize; |
2010 | #endif |
2011 | } |
2012 | |
2013 | static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, |
2014 | union ixgbe_adv_rx_desc *rx_desc, |
2015 | struct sk_buff **skb, |
2016 | const unsigned int size, |
2017 | int *rx_buffer_pgcnt) |
2018 | { |
2019 | struct ixgbe_rx_buffer *rx_buffer; |
2020 | |
2021 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; |
2022 | *rx_buffer_pgcnt = |
2023 | #if (PAGE_SIZE < 8192) |
2024 | page_count(page: rx_buffer->page); |
2025 | #else |
2026 | 0; |
2027 | #endif |
2028 | prefetchw(x: rx_buffer->page); |
2029 | *skb = rx_buffer->skb; |
2030 | |
2031 | /* Delay unmapping of the first packet. It carries the header |
2032 | * information, HW may still access the header after the writeback. |
2033 | * Only unmap it when EOP is reached |
2034 | */ |
2035 | if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) { |
2036 | if (!*skb) |
2037 | goto skip_sync; |
2038 | } else { |
2039 | if (*skb) |
2040 | ixgbe_dma_sync_frag(rx_ring, skb: *skb); |
2041 | } |
2042 | |
2043 | /* we are reusing so sync this buffer for CPU use */ |
2044 | dma_sync_single_range_for_cpu(dev: rx_ring->dev, |
2045 | addr: rx_buffer->dma, |
2046 | offset: rx_buffer->page_offset, |
2047 | size, |
2048 | dir: DMA_FROM_DEVICE); |
2049 | skip_sync: |
2050 | rx_buffer->pagecnt_bias--; |
2051 | |
2052 | return rx_buffer; |
2053 | } |
2054 | |
2055 | static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, |
2056 | struct ixgbe_rx_buffer *rx_buffer, |
2057 | struct sk_buff *skb, |
2058 | int rx_buffer_pgcnt) |
2059 | { |
2060 | if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { |
2061 | /* hand second half of page back to the ring */ |
2062 | ixgbe_reuse_rx_page(rx_ring, old_buff: rx_buffer); |
2063 | } else { |
2064 | if (!IS_ERR(ptr: skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) { |
2065 | /* the page has been released from the ring */ |
2066 | IXGBE_CB(skb)->page_released = true; |
2067 | } else { |
2068 | /* we are not reusing the buffer so unmap it */ |
2069 | dma_unmap_page_attrs(dev: rx_ring->dev, addr: rx_buffer->dma, |
2070 | ixgbe_rx_pg_size(rx_ring), |
2071 | dir: DMA_FROM_DEVICE, |
2072 | IXGBE_RX_DMA_ATTR); |
2073 | } |
2074 | __page_frag_cache_drain(page: rx_buffer->page, |
2075 | count: rx_buffer->pagecnt_bias); |
2076 | } |
2077 | |
2078 | /* clear contents of rx_buffer */ |
2079 | rx_buffer->page = NULL; |
2080 | rx_buffer->skb = NULL; |
2081 | } |
2082 | |
2083 | static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, |
2084 | struct ixgbe_rx_buffer *rx_buffer, |
2085 | struct xdp_buff *xdp, |
2086 | union ixgbe_adv_rx_desc *rx_desc) |
2087 | { |
2088 | unsigned int size = xdp->data_end - xdp->data; |
2089 | #if (PAGE_SIZE < 8192) |
2090 | unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; |
2091 | #else |
2092 | unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - |
2093 | xdp->data_hard_start); |
2094 | #endif |
2095 | struct sk_buff *skb; |
2096 | |
2097 | /* prefetch first cache line of first page */ |
2098 | net_prefetch(p: xdp->data); |
2099 | |
2100 | /* Note, we get here by enabling legacy-rx via: |
2101 | * |
2102 | * ethtool --set-priv-flags <dev> legacy-rx on |
2103 | * |
2104 | * In this mode, we currently get 0 extra XDP headroom as |
2105 | * opposed to having legacy-rx off, where we process XDP |
2106 | * packets going to stack via ixgbe_build_skb(). The latter |
2107 | * provides us currently with 192 bytes of headroom. |
2108 | * |
2109 | * For ixgbe_construct_skb() mode it means that the |
2110 | * xdp->data_meta will always point to xdp->data, since |
2111 | * the helper cannot expand the head. Should this ever |
2112 | * change in future for legacy-rx mode on, then lets also |
2113 | * add xdp->data_meta handling here. |
2114 | */ |
2115 | |
2116 | /* allocate a skb to store the frags */ |
2117 | skb = napi_alloc_skb(napi: &rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); |
2118 | if (unlikely(!skb)) |
2119 | return NULL; |
2120 | |
2121 | if (size > IXGBE_RX_HDR_SIZE) { |
2122 | if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) |
2123 | IXGBE_CB(skb)->dma = rx_buffer->dma; |
2124 | |
2125 | skb_add_rx_frag(skb, i: 0, page: rx_buffer->page, |
2126 | off: xdp->data - page_address(rx_buffer->page), |
2127 | size, truesize); |
2128 | #if (PAGE_SIZE < 8192) |
2129 | rx_buffer->page_offset ^= truesize; |
2130 | #else |
2131 | rx_buffer->page_offset += truesize; |
2132 | #endif |
2133 | } else { |
2134 | memcpy(__skb_put(skb, size), |
2135 | xdp->data, ALIGN(size, sizeof(long))); |
2136 | rx_buffer->pagecnt_bias++; |
2137 | } |
2138 | |
2139 | return skb; |
2140 | } |
2141 | |
2142 | static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, |
2143 | struct ixgbe_rx_buffer *rx_buffer, |
2144 | struct xdp_buff *xdp, |
2145 | union ixgbe_adv_rx_desc *rx_desc) |
2146 | { |
2147 | unsigned int metasize = xdp->data - xdp->data_meta; |
2148 | #if (PAGE_SIZE < 8192) |
2149 | unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; |
2150 | #else |
2151 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + |
2152 | SKB_DATA_ALIGN(xdp->data_end - |
2153 | xdp->data_hard_start); |
2154 | #endif |
2155 | struct sk_buff *skb; |
2156 | |
2157 | /* Prefetch first cache line of first page. If xdp->data_meta |
2158 | * is unused, this points extactly as xdp->data, otherwise we |
2159 | * likely have a consumer accessing first few bytes of meta |
2160 | * data, and then actual data. |
2161 | */ |
2162 | net_prefetch(p: xdp->data_meta); |
2163 | |
2164 | /* build an skb to around the page buffer */ |
2165 | skb = napi_build_skb(data: xdp->data_hard_start, frag_size: truesize); |
2166 | if (unlikely(!skb)) |
2167 | return NULL; |
2168 | |
2169 | /* update pointers within the skb to store the data */ |
2170 | skb_reserve(skb, len: xdp->data - xdp->data_hard_start); |
2171 | __skb_put(skb, len: xdp->data_end - xdp->data); |
2172 | if (metasize) |
2173 | skb_metadata_set(skb, meta_len: metasize); |
2174 | |
2175 | /* record DMA address if this is the start of a chain of buffers */ |
2176 | if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) |
2177 | IXGBE_CB(skb)->dma = rx_buffer->dma; |
2178 | |
2179 | /* update buffer offset */ |
2180 | #if (PAGE_SIZE < 8192) |
2181 | rx_buffer->page_offset ^= truesize; |
2182 | #else |
2183 | rx_buffer->page_offset += truesize; |
2184 | #endif |
2185 | |
2186 | return skb; |
2187 | } |
2188 | |
2189 | static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, |
2190 | struct ixgbe_ring *rx_ring, |
2191 | struct xdp_buff *xdp) |
2192 | { |
2193 | int err, result = IXGBE_XDP_PASS; |
2194 | struct bpf_prog *xdp_prog; |
2195 | struct ixgbe_ring *ring; |
2196 | struct xdp_frame *xdpf; |
2197 | u32 act; |
2198 | |
2199 | xdp_prog = READ_ONCE(rx_ring->xdp_prog); |
2200 | |
2201 | if (!xdp_prog) |
2202 | goto xdp_out; |
2203 | |
2204 | prefetchw(x: xdp->data_hard_start); /* xdp_frame write */ |
2205 | |
2206 | act = bpf_prog_run_xdp(prog: xdp_prog, xdp); |
2207 | switch (act) { |
2208 | case XDP_PASS: |
2209 | break; |
2210 | case XDP_TX: |
2211 | xdpf = xdp_convert_buff_to_frame(xdp); |
2212 | if (unlikely(!xdpf)) |
2213 | goto out_failure; |
2214 | ring = ixgbe_determine_xdp_ring(adapter); |
2215 | if (static_branch_unlikely(&ixgbe_xdp_locking_key)) |
2216 | spin_lock(lock: &ring->tx_lock); |
2217 | result = ixgbe_xmit_xdp_ring(ring, xdpf); |
2218 | if (static_branch_unlikely(&ixgbe_xdp_locking_key)) |
2219 | spin_unlock(lock: &ring->tx_lock); |
2220 | if (result == IXGBE_XDP_CONSUMED) |
2221 | goto out_failure; |
2222 | break; |
2223 | case XDP_REDIRECT: |
2224 | err = xdp_do_redirect(dev: adapter->netdev, xdp, prog: xdp_prog); |
2225 | if (err) |
2226 | goto out_failure; |
2227 | result = IXGBE_XDP_REDIR; |
2228 | break; |
2229 | default: |
2230 | bpf_warn_invalid_xdp_action(dev: rx_ring->netdev, prog: xdp_prog, act); |
2231 | fallthrough; |
2232 | case XDP_ABORTED: |
2233 | out_failure: |
2234 | trace_xdp_exception(dev: rx_ring->netdev, xdp: xdp_prog, act); |
2235 | fallthrough; /* handle aborts by dropping packet */ |
2236 | case XDP_DROP: |
2237 | result = IXGBE_XDP_CONSUMED; |
2238 | break; |
2239 | } |
2240 | xdp_out: |
2241 | return ERR_PTR(error: -result); |
2242 | } |
2243 | |
2244 | static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring, |
2245 | unsigned int size) |
2246 | { |
2247 | unsigned int truesize; |
2248 | |
2249 | #if (PAGE_SIZE < 8192) |
2250 | truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ |
2251 | #else |
2252 | truesize = rx_ring->rx_offset ? |
2253 | SKB_DATA_ALIGN(rx_ring->rx_offset + size) + |
2254 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : |
2255 | SKB_DATA_ALIGN(size); |
2256 | #endif |
2257 | return truesize; |
2258 | } |
2259 | |
2260 | static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring, |
2261 | struct ixgbe_rx_buffer *rx_buffer, |
2262 | unsigned int size) |
2263 | { |
2264 | unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size); |
2265 | #if (PAGE_SIZE < 8192) |
2266 | rx_buffer->page_offset ^= truesize; |
2267 | #else |
2268 | rx_buffer->page_offset += truesize; |
2269 | #endif |
2270 | } |
2271 | |
2272 | /** |
2273 | * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf |
2274 | * @q_vector: structure containing interrupt and ring information |
2275 | * @rx_ring: rx descriptor ring to transact packets on |
2276 | * @budget: Total limit on number of packets to process |
2277 | * |
2278 | * This function provides a "bounce buffer" approach to Rx interrupt |
2279 | * processing. The advantage to this is that on systems that have |
2280 | * expensive overhead for IOMMU access this provides a means of avoiding |
2281 | * it by maintaining the mapping of the page to the syste. |
2282 | * |
2283 | * Returns amount of work completed |
2284 | **/ |
2285 | static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
2286 | struct ixgbe_ring *rx_ring, |
2287 | const int budget) |
2288 | { |
2289 | unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; |
2290 | struct ixgbe_adapter *adapter = q_vector->adapter; |
2291 | #ifdef IXGBE_FCOE |
2292 | int ddp_bytes; |
2293 | unsigned int mss = 0; |
2294 | #endif /* IXGBE_FCOE */ |
2295 | u16 cleaned_count = ixgbe_desc_unused(ring: rx_ring); |
2296 | unsigned int offset = rx_ring->rx_offset; |
2297 | unsigned int xdp_xmit = 0; |
2298 | struct xdp_buff xdp; |
2299 | |
2300 | /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ |
2301 | #if (PAGE_SIZE < 8192) |
2302 | frame_sz = ixgbe_rx_frame_truesize(rx_ring, size: 0); |
2303 | #endif |
2304 | xdp_init_buff(xdp: &xdp, frame_sz, rxq: &rx_ring->xdp_rxq); |
2305 | |
2306 | while (likely(total_rx_packets < budget)) { |
2307 | union ixgbe_adv_rx_desc *rx_desc; |
2308 | struct ixgbe_rx_buffer *rx_buffer; |
2309 | struct sk_buff *skb; |
2310 | int rx_buffer_pgcnt; |
2311 | unsigned int size; |
2312 | |
2313 | /* return some buffers to hardware, one at a time is too slow */ |
2314 | if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { |
2315 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); |
2316 | cleaned_count = 0; |
2317 | } |
2318 | |
2319 | rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); |
2320 | size = le16_to_cpu(rx_desc->wb.upper.length); |
2321 | if (!size) |
2322 | break; |
2323 | |
2324 | /* This memory barrier is needed to keep us from reading |
2325 | * any other fields out of the rx_desc until we know the |
2326 | * descriptor has been written back |
2327 | */ |
2328 | dma_rmb(); |
2329 | |
2330 | rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, skb: &skb, size, rx_buffer_pgcnt: &rx_buffer_pgcnt); |
2331 | |
2332 | /* retrieve a buffer from the ring */ |
2333 | if (!skb) { |
2334 | unsigned char *hard_start; |
2335 | |
2336 | hard_start = page_address(rx_buffer->page) + |
2337 | rx_buffer->page_offset - offset; |
2338 | xdp_prepare_buff(xdp: &xdp, hard_start, headroom: offset, data_len: size, meta_valid: true); |
2339 | xdp_buff_clear_frags_flag(xdp: &xdp); |
2340 | #if (PAGE_SIZE > 4096) |
2341 | /* At larger PAGE_SIZE, frame_sz depend on len size */ |
2342 | xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); |
2343 | #endif |
2344 | skb = ixgbe_run_xdp(adapter, rx_ring, xdp: &xdp); |
2345 | } |
2346 | |
2347 | if (IS_ERR(ptr: skb)) { |
2348 | unsigned int xdp_res = -PTR_ERR(ptr: skb); |
2349 | |
2350 | if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { |
2351 | xdp_xmit |= xdp_res; |
2352 | ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); |
2353 | } else { |
2354 | rx_buffer->pagecnt_bias++; |
2355 | } |
2356 | total_rx_packets++; |
2357 | total_rx_bytes += size; |
2358 | } else if (skb) { |
2359 | ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); |
2360 | } else if (ring_uses_build_skb(rx_ring)) { |
2361 | skb = ixgbe_build_skb(rx_ring, rx_buffer, |
2362 | xdp: &xdp, rx_desc); |
2363 | } else { |
2364 | skb = ixgbe_construct_skb(rx_ring, rx_buffer, |
2365 | xdp: &xdp, rx_desc); |
2366 | } |
2367 | |
2368 | /* exit if we failed to retrieve a buffer */ |
2369 | if (!skb) { |
2370 | rx_ring->rx_stats.alloc_rx_buff_failed++; |
2371 | rx_buffer->pagecnt_bias++; |
2372 | break; |
2373 | } |
2374 | |
2375 | ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); |
2376 | cleaned_count++; |
2377 | |
2378 | /* place incomplete frames back on ring for completion */ |
2379 | if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) |
2380 | continue; |
2381 | |
2382 | /* verify the packet layout is correct */ |
2383 | if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) |
2384 | continue; |
2385 | |
2386 | /* probably a little skewed due to removing CRC */ |
2387 | total_rx_bytes += skb->len; |
2388 | |
2389 | /* populate checksum, timestamp, VLAN, and protocol */ |
2390 | ixgbe_process_skb_fields(rx_ring, rx_desc, skb); |
2391 | |
2392 | #ifdef IXGBE_FCOE |
2393 | /* if ddp, not passing to ULD unless for FCP_RSP or error */ |
2394 | if (ixgbe_rx_is_fcoe(ring: rx_ring, rx_desc)) { |
2395 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); |
2396 | /* include DDPed FCoE data */ |
2397 | if (ddp_bytes > 0) { |
2398 | if (!mss) { |
2399 | mss = rx_ring->netdev->mtu - |
2400 | sizeof(struct fcoe_hdr) - |
2401 | sizeof(struct fc_frame_header) - |
2402 | sizeof(struct fcoe_crc_eof); |
2403 | if (mss > 512) |
2404 | mss &= ~511; |
2405 | } |
2406 | total_rx_bytes += ddp_bytes; |
2407 | total_rx_packets += DIV_ROUND_UP(ddp_bytes, |
2408 | mss); |
2409 | } |
2410 | if (!ddp_bytes) { |
2411 | dev_kfree_skb_any(skb); |
2412 | continue; |
2413 | } |
2414 | } |
2415 | |
2416 | #endif /* IXGBE_FCOE */ |
2417 | ixgbe_rx_skb(q_vector, skb); |
2418 | |
2419 | /* update budget accounting */ |
2420 | total_rx_packets++; |
2421 | } |
2422 | |
2423 | if (xdp_xmit & IXGBE_XDP_REDIR) |
2424 | xdp_do_flush(); |
2425 | |
2426 | if (xdp_xmit & IXGBE_XDP_TX) { |
2427 | struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); |
2428 | |
2429 | ixgbe_xdp_ring_update_tail_locked(ring); |
2430 | } |
2431 | |
2432 | u64_stats_update_begin(syncp: &rx_ring->syncp); |
2433 | rx_ring->stats.packets += total_rx_packets; |
2434 | rx_ring->stats.bytes += total_rx_bytes; |
2435 | u64_stats_update_end(syncp: &rx_ring->syncp); |
2436 | q_vector->rx.total_packets += total_rx_packets; |
2437 | q_vector->rx.total_bytes += total_rx_bytes; |
2438 | |
2439 | return total_rx_packets; |
2440 | } |
2441 | |
2442 | /** |
2443 | * ixgbe_configure_msix - Configure MSI-X hardware |
2444 | * @adapter: board private structure |
2445 | * |
2446 | * ixgbe_configure_msix sets up the hardware to properly generate MSI-X |
2447 | * interrupts. |
2448 | **/ |
2449 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) |
2450 | { |
2451 | struct ixgbe_q_vector *q_vector; |
2452 | int v_idx; |
2453 | u32 mask; |
2454 | |
2455 | /* Populate MSIX to EITR Select */ |
2456 | if (adapter->num_vfs > 32) { |
2457 | u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; |
2458 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); |
2459 | } |
2460 | |
2461 | /* |
2462 | * Populate the IVAR table and set the ITR values to the |
2463 | * corresponding register. |
2464 | */ |
2465 | for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { |
2466 | struct ixgbe_ring *ring; |
2467 | q_vector = adapter->q_vector[v_idx]; |
2468 | |
2469 | ixgbe_for_each_ring(ring, q_vector->rx) |
2470 | ixgbe_set_ivar(adapter, direction: 0, queue: ring->reg_idx, msix_vector: v_idx); |
2471 | |
2472 | ixgbe_for_each_ring(ring, q_vector->tx) |
2473 | ixgbe_set_ivar(adapter, direction: 1, queue: ring->reg_idx, msix_vector: v_idx); |
2474 | |
2475 | ixgbe_write_eitr(q_vector); |
2476 | } |
2477 | |
2478 | switch (adapter->hw.mac.type) { |
2479 | case ixgbe_mac_82598EB: |
2480 | ixgbe_set_ivar(adapter, direction: -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, |
2481 | msix_vector: v_idx); |
2482 | break; |
2483 | case ixgbe_mac_82599EB: |
2484 | case ixgbe_mac_X540: |
2485 | case ixgbe_mac_X550: |
2486 | case ixgbe_mac_X550EM_x: |
2487 | case ixgbe_mac_x550em_a: |
2488 | ixgbe_set_ivar(adapter, direction: -1, queue: 1, msix_vector: v_idx); |
2489 | break; |
2490 | default: |
2491 | break; |
2492 | } |
2493 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); |
2494 | |
2495 | /* set up to autoclear timer, and the vectors */ |
2496 | mask = IXGBE_EIMS_ENABLE_MASK; |
2497 | mask &= ~(IXGBE_EIMS_OTHER | |
2498 | IXGBE_EIMS_MAILBOX | |
2499 | IXGBE_EIMS_LSC); |
2500 | |
2501 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); |
2502 | } |
2503 | |
2504 | /** |
2505 | * ixgbe_update_itr - update the dynamic ITR value based on statistics |
2506 | * @q_vector: structure containing interrupt and ring information |
2507 | * @ring_container: structure containing ring performance data |
2508 | * |
2509 | * Stores a new ITR value based on packets and byte |
2510 | * counts during the last interrupt. The advantage of per interrupt |
2511 | * computation is faster updates and more accurate ITR for the current |
2512 | * traffic pattern. Constants in this function were computed |
2513 | * based on theoretical maximum wire speed and thresholds were set based |
2514 | * on testing data as well as attempting to minimize response time |
2515 | * while increasing bulk throughput. |
2516 | **/ |
2517 | static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, |
2518 | struct ixgbe_ring_container *ring_container) |
2519 | { |
2520 | unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS | |
2521 | IXGBE_ITR_ADAPTIVE_LATENCY; |
2522 | unsigned int avg_wire_size, packets, bytes; |
2523 | unsigned long next_update = jiffies; |
2524 | |
2525 | /* If we don't have any rings just leave ourselves set for maximum |
2526 | * possible latency so we take ourselves out of the equation. |
2527 | */ |
2528 | if (!ring_container->ring) |
2529 | return; |
2530 | |
2531 | /* If we didn't update within up to 1 - 2 jiffies we can assume |
2532 | * that either packets are coming in so slow there hasn't been |
2533 | * any work, or that there is so much work that NAPI is dealing |
2534 | * with interrupt moderation and we don't need to do anything. |
2535 | */ |
2536 | if (time_after(next_update, ring_container->next_update)) |
2537 | goto clear_counts; |
2538 | |
2539 | packets = ring_container->total_packets; |
2540 | |
2541 | /* We have no packets to actually measure against. This means |
2542 | * either one of the other queues on this vector is active or |
2543 | * we are a Tx queue doing TSO with too high of an interrupt rate. |
2544 | * |
2545 | * When this occurs just tick up our delay by the minimum value |
2546 | * and hope that this extra delay will prevent us from being called |
2547 | * without any work on our queue. |
2548 | */ |
2549 | if (!packets) { |
2550 | itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; |
2551 | if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS) |
2552 | itr = IXGBE_ITR_ADAPTIVE_MAX_USECS; |
2553 | itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY; |
2554 | goto clear_counts; |
2555 | } |
2556 | |
2557 | bytes = ring_container->total_bytes; |
2558 | |
2559 | /* If packets are less than 4 or bytes are less than 9000 assume |
2560 | * insufficient data to use bulk rate limiting approach. We are |
2561 | * likely latency driven. |
2562 | */ |
2563 | if (packets < 4 && bytes < 9000) { |
2564 | itr = IXGBE_ITR_ADAPTIVE_LATENCY; |
2565 | goto adjust_by_size; |
2566 | } |
2567 | |
2568 | /* Between 4 and 48 we can assume that our current interrupt delay |
2569 | * is only slightly too low. As such we should increase it by a small |
2570 | * fixed amount. |
2571 | */ |
2572 | if (packets < 48) { |
2573 | itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; |
2574 | if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS) |
2575 | itr = IXGBE_ITR_ADAPTIVE_MAX_USECS; |
2576 | goto clear_counts; |
2577 | } |
2578 | |
2579 | /* Between 48 and 96 is our "goldilocks" zone where we are working |
2580 | * out "just right". Just report that our current ITR is good for us. |
2581 | */ |
2582 | if (packets < 96) { |
2583 | itr = q_vector->itr >> 2; |
2584 | goto clear_counts; |
2585 | } |
2586 | |
2587 | /* If packet count is 96 or greater we are likely looking at a slight |
2588 | * overrun of the delay we want. Try halving our delay to see if that |
2589 | * will cut the number of packets in half per interrupt. |
2590 | */ |
2591 | if (packets < 256) { |
2592 | itr = q_vector->itr >> 3; |
2593 | if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS) |
2594 | itr = IXGBE_ITR_ADAPTIVE_MIN_USECS; |
2595 | goto clear_counts; |
2596 | } |
2597 | |
2598 | /* The paths below assume we are dealing with a bulk ITR since number |
2599 | * of packets is 256 or greater. We are just going to have to compute |
2600 | * a value and try to bring the count under control, though for smaller |
2601 | * packet sizes there isn't much we can do as NAPI polling will likely |
2602 | * be kicking in sooner rather than later. |
2603 | */ |
2604 | itr = IXGBE_ITR_ADAPTIVE_BULK; |
2605 | |
2606 | adjust_by_size: |
2607 | /* If packet counts are 256 or greater we can assume we have a gross |
2608 | * overestimation of what the rate should be. Instead of trying to fine |
2609 | * tune it just use the formula below to try and dial in an exact value |
2610 | * give the current packet size of the frame. |
2611 | */ |
2612 | avg_wire_size = bytes / packets; |
2613 | |
2614 | /* The following is a crude approximation of: |
2615 | * wmem_default / (size + overhead) = desired_pkts_per_int |
2616 | * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate |
2617 | * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value |
2618 | * |
2619 | * Assuming wmem_default is 212992 and overhead is 640 bytes per |
2620 | * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the |
2621 | * formula down to |
2622 | * |
2623 | * (170 * (size + 24)) / (size + 640) = ITR |
2624 | * |
2625 | * We first do some math on the packet size and then finally bitshift |
2626 | * by 8 after rounding up. We also have to account for PCIe link speed |
2627 | * difference as ITR scales based on this. |
2628 | */ |
2629 | if (avg_wire_size <= 60) { |
2630 | /* Start at 50k ints/sec */ |
2631 | avg_wire_size = 5120; |
2632 | } else if (avg_wire_size <= 316) { |
2633 | /* 50K ints/sec to 16K ints/sec */ |
2634 | avg_wire_size *= 40; |
2635 | avg_wire_size += 2720; |
2636 | } else if (avg_wire_size <= 1084) { |
2637 | /* 16K ints/sec to 9.2K ints/sec */ |
2638 | avg_wire_size *= 15; |
2639 | avg_wire_size += 11452; |
2640 | } else if (avg_wire_size < 1968) { |
2641 | /* 9.2K ints/sec to 8K ints/sec */ |
2642 | avg_wire_size *= 5; |
2643 | avg_wire_size += 22420; |
2644 | } else { |
2645 | /* plateau at a limit of 8K ints/sec */ |
2646 | avg_wire_size = 32256; |
2647 | } |
2648 | |
2649 | /* If we are in low latency mode half our delay which doubles the rate |
2650 | * to somewhere between 100K to 16K ints/sec |
2651 | */ |
2652 | if (itr & IXGBE_ITR_ADAPTIVE_LATENCY) |
2653 | avg_wire_size >>= 1; |
2654 | |
2655 | /* Resultant value is 256 times larger than it needs to be. This |
2656 | * gives us room to adjust the value as needed to either increase |
2657 | * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. |
2658 | * |
2659 | * Use addition as we have already recorded the new latency flag |
2660 | * for the ITR value. |
2661 | */ |
2662 | switch (q_vector->adapter->link_speed) { |
2663 | case IXGBE_LINK_SPEED_10GB_FULL: |
2664 | case IXGBE_LINK_SPEED_100_FULL: |
2665 | default: |
2666 | itr += DIV_ROUND_UP(avg_wire_size, |
2667 | IXGBE_ITR_ADAPTIVE_MIN_INC * 256) * |
2668 | IXGBE_ITR_ADAPTIVE_MIN_INC; |
2669 | break; |
2670 | case IXGBE_LINK_SPEED_2_5GB_FULL: |
2671 | case IXGBE_LINK_SPEED_1GB_FULL: |
2672 | case IXGBE_LINK_SPEED_10_FULL: |
2673 | if (avg_wire_size > 8064) |
2674 | avg_wire_size = 8064; |
2675 | itr += DIV_ROUND_UP(avg_wire_size, |
2676 | IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * |
2677 | IXGBE_ITR_ADAPTIVE_MIN_INC; |
2678 | break; |
2679 | } |
2680 | |
2681 | clear_counts: |
2682 | /* write back value */ |
2683 | ring_container->itr = itr; |
2684 | |
2685 | /* next update should occur within next jiffy */ |
2686 | ring_container->next_update = next_update + 1; |
2687 | |
2688 | ring_container->total_bytes = 0; |
2689 | ring_container->total_packets = 0; |
2690 | } |
2691 | |
2692 | /** |
2693 | * ixgbe_write_eitr - write EITR register in hardware specific way |
2694 | * @q_vector: structure containing interrupt and ring information |
2695 | * |
2696 | * This function is made to be called by ethtool and by the driver |
2697 | * when it needs to update EITR registers at runtime. Hardware |
2698 | * specific quirks/differences are taken care of here. |
2699 | */ |
2700 | void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) |
2701 | { |
2702 | struct ixgbe_adapter *adapter = q_vector->adapter; |
2703 | struct ixgbe_hw *hw = &adapter->hw; |
2704 | int v_idx = q_vector->v_idx; |
2705 | u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; |
2706 | |
2707 | switch (adapter->hw.mac.type) { |
2708 | case ixgbe_mac_82598EB: |
2709 | /* must write high and low 16 bits to reset counter */ |
2710 | itr_reg |= (itr_reg << 16); |
2711 | break; |
2712 | case ixgbe_mac_82599EB: |
2713 | case ixgbe_mac_X540: |
2714 | case ixgbe_mac_X550: |
2715 | case ixgbe_mac_X550EM_x: |
2716 | case ixgbe_mac_x550em_a: |
2717 | /* |
2718 | * set the WDIS bit to not clear the timer bits and cause an |
2719 | * immediate assertion of the interrupt |
2720 | */ |
2721 | itr_reg |= IXGBE_EITR_CNT_WDIS; |
2722 | break; |
2723 | default: |
2724 | break; |
2725 | } |
2726 | IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); |
2727 | } |
2728 | |
2729 | static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) |
2730 | { |
2731 | u32 new_itr; |
2732 | |
2733 | ixgbe_update_itr(q_vector, ring_container: &q_vector->tx); |
2734 | ixgbe_update_itr(q_vector, ring_container: &q_vector->rx); |
2735 | |
2736 | /* use the smallest value of new ITR delay calculations */ |
2737 | new_itr = min(q_vector->rx.itr, q_vector->tx.itr); |
2738 | |
2739 | /* Clear latency flag if set, shift into correct position */ |
2740 | new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY; |
2741 | new_itr <<= 2; |
2742 | |
2743 | if (new_itr != q_vector->itr) { |
2744 | /* save the algorithm value here */ |
2745 | q_vector->itr = new_itr; |
2746 | |
2747 | ixgbe_write_eitr(q_vector); |
2748 | } |
2749 | } |
2750 | |
2751 | /** |
2752 | * ixgbe_check_overtemp_subtask - check for over temperature |
2753 | * @adapter: pointer to adapter |
2754 | **/ |
2755 | static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) |
2756 | { |
2757 | struct ixgbe_hw *hw = &adapter->hw; |
2758 | u32 eicr = adapter->interrupt_event; |
2759 | s32 rc; |
2760 | |
2761 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
2762 | return; |
2763 | |
2764 | if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) |
2765 | return; |
2766 | |
2767 | adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; |
2768 | |
2769 | switch (hw->device_id) { |
2770 | case IXGBE_DEV_ID_82599_T3_LOM: |
2771 | /* |
2772 | * Since the warning interrupt is for both ports |
2773 | * we don't have to check if: |
2774 | * - This interrupt wasn't for our port. |
2775 | * - We may have missed the interrupt so always have to |
2776 | * check if we got a LSC |
2777 | */ |
2778 | if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) && |
2779 | !(eicr & IXGBE_EICR_LSC)) |
2780 | return; |
2781 | |
2782 | if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { |
2783 | u32 speed; |
2784 | bool link_up = false; |
2785 | |
2786 | hw->mac.ops.check_link(hw, &speed, &link_up, false); |
2787 | |
2788 | if (link_up) |
2789 | return; |
2790 | } |
2791 | |
2792 | /* Check if this is not due to overtemp */ |
2793 | if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) |
2794 | return; |
2795 | |
2796 | break; |
2797 | case IXGBE_DEV_ID_X550EM_A_1G_T: |
2798 | case IXGBE_DEV_ID_X550EM_A_1G_T_L: |
2799 | rc = hw->phy.ops.check_overtemp(hw); |
2800 | if (rc != IXGBE_ERR_OVERTEMP) |
2801 | return; |
2802 | break; |
2803 | default: |
2804 | if (adapter->hw.mac.type >= ixgbe_mac_X540) |
2805 | return; |
2806 | if (!(eicr & IXGBE_EICR_GPI_SDP0(hw))) |
2807 | return; |
2808 | break; |
2809 | } |
2810 | e_crit(drv, "%s\n" , ixgbe_overheat_msg); |
2811 | |
2812 | adapter->interrupt_event = 0; |
2813 | } |
2814 | |
2815 | static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) |
2816 | { |
2817 | struct ixgbe_hw *hw = &adapter->hw; |
2818 | |
2819 | if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && |
2820 | (eicr & IXGBE_EICR_GPI_SDP1(hw))) { |
2821 | e_crit(probe, "Fan has stopped, replace the adapter\n" ); |
2822 | /* write to clear the interrupt */ |
2823 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); |
2824 | } |
2825 | } |
2826 | |
2827 | static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) |
2828 | { |
2829 | struct ixgbe_hw *hw = &adapter->hw; |
2830 | |
2831 | if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) |
2832 | return; |
2833 | |
2834 | switch (adapter->hw.mac.type) { |
2835 | case ixgbe_mac_82599EB: |
2836 | /* |
2837 | * Need to check link state so complete overtemp check |
2838 | * on service task |
2839 | */ |
2840 | if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) || |
2841 | (eicr & IXGBE_EICR_LSC)) && |
2842 | (!test_bit(__IXGBE_DOWN, &adapter->state))) { |
2843 | adapter->interrupt_event = eicr; |
2844 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; |
2845 | ixgbe_service_event_schedule(adapter); |
2846 | return; |
2847 | } |
2848 | return; |
2849 | case ixgbe_mac_x550em_a: |
2850 | if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { |
2851 | adapter->interrupt_event = eicr; |
2852 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; |
2853 | ixgbe_service_event_schedule(adapter); |
2854 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, |
2855 | IXGBE_EICR_GPI_SDP0_X550EM_a); |
2856 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, |
2857 | IXGBE_EICR_GPI_SDP0_X550EM_a); |
2858 | } |
2859 | return; |
2860 | case ixgbe_mac_X550: |
2861 | case ixgbe_mac_X540: |
2862 | if (!(eicr & IXGBE_EICR_TS)) |
2863 | return; |
2864 | break; |
2865 | default: |
2866 | return; |
2867 | } |
2868 | |
2869 | e_crit(drv, "%s\n" , ixgbe_overheat_msg); |
2870 | } |
2871 | |
2872 | static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) |
2873 | { |
2874 | switch (hw->mac.type) { |
2875 | case ixgbe_mac_82598EB: |
2876 | if (hw->phy.type == ixgbe_phy_nl) |
2877 | return true; |
2878 | return false; |
2879 | case ixgbe_mac_82599EB: |
2880 | case ixgbe_mac_X550EM_x: |
2881 | case ixgbe_mac_x550em_a: |
2882 | switch (hw->mac.ops.get_media_type(hw)) { |
2883 | case ixgbe_media_type_fiber: |
2884 | case ixgbe_media_type_fiber_qsfp: |
2885 | return true; |
2886 | default: |
2887 | return false; |
2888 | } |
2889 | default: |
2890 | return false; |
2891 | } |
2892 | } |
2893 | |
2894 | static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) |
2895 | { |
2896 | struct ixgbe_hw *hw = &adapter->hw; |
2897 | u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw); |
2898 | |
2899 | if (!ixgbe_is_sfp(hw)) |
2900 | return; |
2901 | |
2902 | /* Later MAC's use different SDP */ |
2903 | if (hw->mac.type >= ixgbe_mac_X540) |
2904 | eicr_mask = IXGBE_EICR_GPI_SDP0_X540; |
2905 | |
2906 | if (eicr & eicr_mask) { |
2907 | /* Clear the interrupt */ |
2908 | IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); |
2909 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
2910 | adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; |
2911 | adapter->sfp_poll_time = 0; |
2912 | ixgbe_service_event_schedule(adapter); |
2913 | } |
2914 | } |
2915 | |
2916 | if (adapter->hw.mac.type == ixgbe_mac_82599EB && |
2917 | (eicr & IXGBE_EICR_GPI_SDP1(hw))) { |
2918 | /* Clear the interrupt */ |
2919 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); |
2920 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
2921 | adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; |
2922 | ixgbe_service_event_schedule(adapter); |
2923 | } |
2924 | } |
2925 | } |
2926 | |
2927 | static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) |
2928 | { |
2929 | struct ixgbe_hw *hw = &adapter->hw; |
2930 | |
2931 | adapter->lsc_int++; |
2932 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
2933 | adapter->link_check_timeout = jiffies; |
2934 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
2935 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); |
2936 | IXGBE_WRITE_FLUSH(hw); |
2937 | ixgbe_service_event_schedule(adapter); |
2938 | } |
2939 | } |
2940 | |
2941 | static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, |
2942 | u64 qmask) |
2943 | { |
2944 | u32 mask; |
2945 | struct ixgbe_hw *hw = &adapter->hw; |
2946 | |
2947 | switch (hw->mac.type) { |
2948 | case ixgbe_mac_82598EB: |
2949 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
2950 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); |
2951 | break; |
2952 | case ixgbe_mac_82599EB: |
2953 | case ixgbe_mac_X540: |
2954 | case ixgbe_mac_X550: |
2955 | case ixgbe_mac_X550EM_x: |
2956 | case ixgbe_mac_x550em_a: |
2957 | mask = (qmask & 0xFFFFFFFF); |
2958 | if (mask) |
2959 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); |
2960 | mask = (qmask >> 32); |
2961 | if (mask) |
2962 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); |
2963 | break; |
2964 | default: |
2965 | break; |
2966 | } |
2967 | /* skip the flush */ |
2968 | } |
2969 | |
2970 | /** |
2971 | * ixgbe_irq_enable - Enable default interrupt generation settings |
2972 | * @adapter: board private structure |
2973 | * @queues: enable irqs for queues |
2974 | * @flush: flush register write |
2975 | **/ |
2976 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, |
2977 | bool flush) |
2978 | { |
2979 | struct ixgbe_hw *hw = &adapter->hw; |
2980 | u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); |
2981 | |
2982 | /* don't reenable LSC while waiting for link */ |
2983 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) |
2984 | mask &= ~IXGBE_EIMS_LSC; |
2985 | |
2986 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) |
2987 | switch (adapter->hw.mac.type) { |
2988 | case ixgbe_mac_82599EB: |
2989 | mask |= IXGBE_EIMS_GPI_SDP0(hw); |
2990 | break; |
2991 | case ixgbe_mac_X540: |
2992 | case ixgbe_mac_X550: |
2993 | case ixgbe_mac_X550EM_x: |
2994 | case ixgbe_mac_x550em_a: |
2995 | mask |= IXGBE_EIMS_TS; |
2996 | break; |
2997 | default: |
2998 | break; |
2999 | } |
3000 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) |
3001 | mask |= IXGBE_EIMS_GPI_SDP1(hw); |
3002 | switch (adapter->hw.mac.type) { |
3003 | case ixgbe_mac_82599EB: |
3004 | mask |= IXGBE_EIMS_GPI_SDP1(hw); |
3005 | mask |= IXGBE_EIMS_GPI_SDP2(hw); |
3006 | fallthrough; |
3007 | case ixgbe_mac_X540: |
3008 | case ixgbe_mac_X550: |
3009 | case ixgbe_mac_X550EM_x: |
3010 | case ixgbe_mac_x550em_a: |
3011 | if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || |
3012 | adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || |
3013 | adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) |
3014 | mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); |
3015 | if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) |
3016 | mask |= IXGBE_EICR_GPI_SDP0_X540; |
3017 | mask |= IXGBE_EIMS_ECC; |
3018 | mask |= IXGBE_EIMS_MAILBOX; |
3019 | break; |
3020 | default: |
3021 | break; |
3022 | } |
3023 | |
3024 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && |
3025 | !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) |
3026 | mask |= IXGBE_EIMS_FLOW_DIR; |
3027 | |
3028 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
3029 | if (queues) |
3030 | ixgbe_irq_enable_queues(adapter, qmask: ~0); |
3031 | if (flush) |
3032 | IXGBE_WRITE_FLUSH(&adapter->hw); |
3033 | } |
3034 | |
3035 | static irqreturn_t ixgbe_msix_other(int irq, void *data) |
3036 | { |
3037 | struct ixgbe_adapter *adapter = data; |
3038 | struct ixgbe_hw *hw = &adapter->hw; |
3039 | u32 eicr; |
3040 | |
3041 | /* |
3042 | * Workaround for Silicon errata. Use clear-by-write instead |
3043 | * of clear-by-read. Reading with EICS will return the |
3044 | * interrupt causes without clearing, which later be done |
3045 | * with the write to EICR. |
3046 | */ |
3047 | eicr = IXGBE_READ_REG(hw, IXGBE_EICS); |
3048 | |
3049 | /* The lower 16bits of the EICR register are for the queue interrupts |
3050 | * which should be masked here in order to not accidentally clear them if |
3051 | * the bits are high when ixgbe_msix_other is called. There is a race |
3052 | * condition otherwise which results in possible performance loss |
3053 | * especially if the ixgbe_msix_other interrupt is triggering |
3054 | * consistently (as it would when PPS is turned on for the X540 device) |
3055 | */ |
3056 | eicr &= 0xFFFF0000; |
3057 | |
3058 | IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); |
3059 | |
3060 | if (eicr & IXGBE_EICR_LSC) |
3061 | ixgbe_check_lsc(adapter); |
3062 | |
3063 | if (eicr & IXGBE_EICR_MAILBOX) |
3064 | ixgbe_msg_task(adapter); |
3065 | |
3066 | switch (hw->mac.type) { |
3067 | case ixgbe_mac_82599EB: |
3068 | case ixgbe_mac_X540: |
3069 | case ixgbe_mac_X550: |
3070 | case ixgbe_mac_X550EM_x: |
3071 | case ixgbe_mac_x550em_a: |
3072 | if (hw->phy.type == ixgbe_phy_x550em_ext_t && |
3073 | (eicr & IXGBE_EICR_GPI_SDP0_X540)) { |
3074 | adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; |
3075 | ixgbe_service_event_schedule(adapter); |
3076 | IXGBE_WRITE_REG(hw, IXGBE_EICR, |
3077 | IXGBE_EICR_GPI_SDP0_X540); |
3078 | } |
3079 | if (eicr & IXGBE_EICR_ECC) { |
3080 | e_info(link, "Received ECC Err, initiating reset\n" ); |
3081 | set_bit(nr: __IXGBE_RESET_REQUESTED, addr: &adapter->state); |
3082 | ixgbe_service_event_schedule(adapter); |
3083 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); |
3084 | } |
3085 | /* Handle Flow Director Full threshold interrupt */ |
3086 | if (eicr & IXGBE_EICR_FLOW_DIR) { |
3087 | int reinit_count = 0; |
3088 | int i; |
3089 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3090 | struct ixgbe_ring *ring = adapter->tx_ring[i]; |
3091 | if (test_and_clear_bit(nr: __IXGBE_TX_FDIR_INIT_DONE, |
3092 | addr: &ring->state)) |
3093 | reinit_count++; |
3094 | } |
3095 | if (reinit_count) { |
3096 | /* no more flow director interrupts until after init */ |
3097 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); |
3098 | adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; |
3099 | ixgbe_service_event_schedule(adapter); |
3100 | } |
3101 | } |
3102 | ixgbe_check_sfp_event(adapter, eicr); |
3103 | ixgbe_check_overtemp_event(adapter, eicr); |
3104 | break; |
3105 | default: |
3106 | break; |
3107 | } |
3108 | |
3109 | ixgbe_check_fan_failure(adapter, eicr); |
3110 | |
3111 | if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) |
3112 | ixgbe_ptp_check_pps_event(adapter); |
3113 | |
3114 | /* re-enable the original interrupt state, no lsc, no queues */ |
3115 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
3116 | ixgbe_irq_enable(adapter, queues: false, flush: false); |
3117 | |
3118 | return IRQ_HANDLED; |
3119 | } |
3120 | |
3121 | static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) |
3122 | { |
3123 | struct ixgbe_q_vector *q_vector = data; |
3124 | |
3125 | /* EIAM disabled interrupts (on this vector) for us */ |
3126 | |
3127 | if (q_vector->rx.ring || q_vector->tx.ring) |
3128 | napi_schedule_irqoff(n: &q_vector->napi); |
3129 | |
3130 | return IRQ_HANDLED; |
3131 | } |
3132 | |
3133 | /** |
3134 | * ixgbe_poll - NAPI Rx polling callback |
3135 | * @napi: structure for representing this polling device |
3136 | * @budget: how many packets driver is allowed to clean |
3137 | * |
3138 | * This function is used for legacy and MSI, NAPI mode |
3139 | **/ |
3140 | int ixgbe_poll(struct napi_struct *napi, int budget) |
3141 | { |
3142 | struct ixgbe_q_vector *q_vector = |
3143 | container_of(napi, struct ixgbe_q_vector, napi); |
3144 | struct ixgbe_adapter *adapter = q_vector->adapter; |
3145 | struct ixgbe_ring *ring; |
3146 | int per_ring_budget, work_done = 0; |
3147 | bool clean_complete = true; |
3148 | |
3149 | #ifdef CONFIG_IXGBE_DCA |
3150 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
3151 | ixgbe_update_dca(q_vector); |
3152 | #endif |
3153 | |
3154 | ixgbe_for_each_ring(ring, q_vector->tx) { |
3155 | bool wd = ring->xsk_pool ? |
3156 | ixgbe_clean_xdp_tx_irq(q_vector, tx_ring: ring, napi_budget: budget) : |
3157 | ixgbe_clean_tx_irq(q_vector, tx_ring: ring, napi_budget: budget); |
3158 | |
3159 | if (!wd) |
3160 | clean_complete = false; |
3161 | } |
3162 | |
3163 | /* Exit if we are called by netpoll */ |
3164 | if (budget <= 0) |
3165 | return budget; |
3166 | |
3167 | /* attempt to distribute budget to each queue fairly, but don't allow |
3168 | * the budget to go below 1 because we'll exit polling */ |
3169 | if (q_vector->rx.count > 1) |
3170 | per_ring_budget = max(budget/q_vector->rx.count, 1); |
3171 | else |
3172 | per_ring_budget = budget; |
3173 | |
3174 | ixgbe_for_each_ring(ring, q_vector->rx) { |
3175 | int cleaned = ring->xsk_pool ? |
3176 | ixgbe_clean_rx_irq_zc(q_vector, rx_ring: ring, |
3177 | budget: per_ring_budget) : |
3178 | ixgbe_clean_rx_irq(q_vector, rx_ring: ring, |
3179 | budget: per_ring_budget); |
3180 | |
3181 | work_done += cleaned; |
3182 | if (cleaned >= per_ring_budget) |
3183 | clean_complete = false; |
3184 | } |
3185 | |
3186 | /* If all work not completed, return budget and keep polling */ |
3187 | if (!clean_complete) |
3188 | return budget; |
3189 | |
3190 | /* all work done, exit the polling mode */ |
3191 | if (likely(napi_complete_done(napi, work_done))) { |
3192 | if (adapter->rx_itr_setting & 1) |
3193 | ixgbe_set_itr(q_vector); |
3194 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
3195 | ixgbe_irq_enable_queues(adapter, |
3196 | BIT_ULL(q_vector->v_idx)); |
3197 | } |
3198 | |
3199 | return min(work_done, budget - 1); |
3200 | } |
3201 | |
3202 | /** |
3203 | * ixgbe_request_msix_irqs - Initialize MSI-X interrupts |
3204 | * @adapter: board private structure |
3205 | * |
3206 | * ixgbe_request_msix_irqs allocates MSI-X vectors and requests |
3207 | * interrupts from the kernel. |
3208 | **/ |
3209 | static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) |
3210 | { |
3211 | struct net_device *netdev = adapter->netdev; |
3212 | unsigned int ri = 0, ti = 0; |
3213 | int vector, err; |
3214 | |
3215 | for (vector = 0; vector < adapter->num_q_vectors; vector++) { |
3216 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; |
3217 | struct msix_entry *entry = &adapter->msix_entries[vector]; |
3218 | |
3219 | if (q_vector->tx.ring && q_vector->rx.ring) { |
3220 | snprintf(buf: q_vector->name, size: sizeof(q_vector->name), |
3221 | fmt: "%s-TxRx-%u" , netdev->name, ri++); |
3222 | ti++; |
3223 | } else if (q_vector->rx.ring) { |
3224 | snprintf(buf: q_vector->name, size: sizeof(q_vector->name), |
3225 | fmt: "%s-rx-%u" , netdev->name, ri++); |
3226 | } else if (q_vector->tx.ring) { |
3227 | snprintf(buf: q_vector->name, size: sizeof(q_vector->name), |
3228 | fmt: "%s-tx-%u" , netdev->name, ti++); |
3229 | } else { |
3230 | /* skip this unused q_vector */ |
3231 | continue; |
3232 | } |
3233 | err = request_irq(irq: entry->vector, handler: &ixgbe_msix_clean_rings, flags: 0, |
3234 | name: q_vector->name, dev: q_vector); |
3235 | if (err) { |
3236 | e_err(probe, "request_irq failed for MSIX interrupt " |
3237 | "Error: %d\n" , err); |
3238 | goto free_queue_irqs; |
3239 | } |
3240 | /* If Flow Director is enabled, set interrupt affinity */ |
3241 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
3242 | /* assign the mask for this irq */ |
3243 | irq_update_affinity_hint(irq: entry->vector, |
3244 | m: &q_vector->affinity_mask); |
3245 | } |
3246 | } |
3247 | |
3248 | err = request_irq(irq: adapter->msix_entries[vector].vector, |
3249 | handler: ixgbe_msix_other, flags: 0, name: netdev->name, dev: adapter); |
3250 | if (err) { |
3251 | e_err(probe, "request_irq for msix_other failed: %d\n" , err); |
3252 | goto free_queue_irqs; |
3253 | } |
3254 | |
3255 | return 0; |
3256 | |
3257 | free_queue_irqs: |
3258 | while (vector) { |
3259 | vector--; |
3260 | irq_update_affinity_hint(irq: adapter->msix_entries[vector].vector, |
3261 | NULL); |
3262 | free_irq(adapter->msix_entries[vector].vector, |
3263 | adapter->q_vector[vector]); |
3264 | } |
3265 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
3266 | pci_disable_msix(dev: adapter->pdev); |
3267 | kfree(objp: adapter->msix_entries); |
3268 | adapter->msix_entries = NULL; |
3269 | return err; |
3270 | } |
3271 | |
3272 | /** |
3273 | * ixgbe_intr - legacy mode Interrupt Handler |
3274 | * @irq: interrupt number |
3275 | * @data: pointer to a network interface device structure |
3276 | **/ |
3277 | static irqreturn_t ixgbe_intr(int irq, void *data) |
3278 | { |
3279 | struct ixgbe_adapter *adapter = data; |
3280 | struct ixgbe_hw *hw = &adapter->hw; |
3281 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
3282 | u32 eicr; |
3283 | |
3284 | /* |
3285 | * Workaround for silicon errata #26 on 82598. Mask the interrupt |
3286 | * before the read of EICR. |
3287 | */ |
3288 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); |
3289 | |
3290 | /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read |
3291 | * therefore no explicit interrupt disable is necessary */ |
3292 | eicr = IXGBE_READ_REG(hw, IXGBE_EICR); |
3293 | if (!eicr) { |
3294 | /* |
3295 | * shared interrupt alert! |
3296 | * make sure interrupts are enabled because the read will |
3297 | * have disabled interrupts due to EIAM |
3298 | * finish the workaround of silicon errata on 82598. Unmask |
3299 | * the interrupt that we masked before the EICR read. |
3300 | */ |
3301 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
3302 | ixgbe_irq_enable(adapter, queues: true, flush: true); |
3303 | return IRQ_NONE; /* Not our interrupt */ |
3304 | } |
3305 | |
3306 | if (eicr & IXGBE_EICR_LSC) |
3307 | ixgbe_check_lsc(adapter); |
3308 | |
3309 | switch (hw->mac.type) { |
3310 | case ixgbe_mac_82599EB: |
3311 | ixgbe_check_sfp_event(adapter, eicr); |
3312 | fallthrough; |
3313 | case ixgbe_mac_X540: |
3314 | case ixgbe_mac_X550: |
3315 | case ixgbe_mac_X550EM_x: |
3316 | case ixgbe_mac_x550em_a: |
3317 | if (eicr & IXGBE_EICR_ECC) { |
3318 | e_info(link, "Received ECC Err, initiating reset\n" ); |
3319 | set_bit(nr: __IXGBE_RESET_REQUESTED, addr: &adapter->state); |
3320 | ixgbe_service_event_schedule(adapter); |
3321 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); |
3322 | } |
3323 | ixgbe_check_overtemp_event(adapter, eicr); |
3324 | break; |
3325 | default: |
3326 | break; |
3327 | } |
3328 | |
3329 | ixgbe_check_fan_failure(adapter, eicr); |
3330 | if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) |
3331 | ixgbe_ptp_check_pps_event(adapter); |
3332 | |
3333 | /* would disable interrupts here but EIAM disabled it */ |
3334 | napi_schedule_irqoff(n: &q_vector->napi); |
3335 | |
3336 | /* |
3337 | * re-enable link(maybe) and non-queue interrupts, no flush. |
3338 | * ixgbe_poll will re-enable the queue interrupts |
3339 | */ |
3340 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
3341 | ixgbe_irq_enable(adapter, queues: false, flush: false); |
3342 | |
3343 | return IRQ_HANDLED; |
3344 | } |
3345 | |
3346 | /** |
3347 | * ixgbe_request_irq - initialize interrupts |
3348 | * @adapter: board private structure |
3349 | * |
3350 | * Attempts to configure interrupts using the best available |
3351 | * capabilities of the hardware and kernel. |
3352 | **/ |
3353 | static int ixgbe_request_irq(struct ixgbe_adapter *adapter) |
3354 | { |
3355 | struct net_device *netdev = adapter->netdev; |
3356 | int err; |
3357 | |
3358 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
3359 | err = ixgbe_request_msix_irqs(adapter); |
3360 | else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) |
3361 | err = request_irq(irq: adapter->pdev->irq, handler: ixgbe_intr, flags: 0, |
3362 | name: netdev->name, dev: adapter); |
3363 | else |
3364 | err = request_irq(irq: adapter->pdev->irq, handler: ixgbe_intr, IRQF_SHARED, |
3365 | name: netdev->name, dev: adapter); |
3366 | |
3367 | if (err) |
3368 | e_err(probe, "request_irq failed, Error %d\n" , err); |
3369 | |
3370 | return err; |
3371 | } |
3372 | |
3373 | static void ixgbe_free_irq(struct ixgbe_adapter *adapter) |
3374 | { |
3375 | int vector; |
3376 | |
3377 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { |
3378 | free_irq(adapter->pdev->irq, adapter); |
3379 | return; |
3380 | } |
3381 | |
3382 | if (!adapter->msix_entries) |
3383 | return; |
3384 | |
3385 | for (vector = 0; vector < adapter->num_q_vectors; vector++) { |
3386 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; |
3387 | struct msix_entry *entry = &adapter->msix_entries[vector]; |
3388 | |
3389 | /* free only the irqs that were actually requested */ |
3390 | if (!q_vector->rx.ring && !q_vector->tx.ring) |
3391 | continue; |
3392 | |
3393 | /* clear the affinity_mask in the IRQ descriptor */ |
3394 | irq_update_affinity_hint(irq: entry->vector, NULL); |
3395 | |
3396 | free_irq(entry->vector, q_vector); |
3397 | } |
3398 | |
3399 | free_irq(adapter->msix_entries[vector].vector, adapter); |
3400 | } |
3401 | |
3402 | /** |
3403 | * ixgbe_irq_disable - Mask off interrupt generation on the NIC |
3404 | * @adapter: board private structure |
3405 | **/ |
3406 | static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) |
3407 | { |
3408 | switch (adapter->hw.mac.type) { |
3409 | case ixgbe_mac_82598EB: |
3410 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); |
3411 | break; |
3412 | case ixgbe_mac_82599EB: |
3413 | case ixgbe_mac_X540: |
3414 | case ixgbe_mac_X550: |
3415 | case ixgbe_mac_X550EM_x: |
3416 | case ixgbe_mac_x550em_a: |
3417 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); |
3418 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); |
3419 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); |
3420 | break; |
3421 | default: |
3422 | break; |
3423 | } |
3424 | IXGBE_WRITE_FLUSH(&adapter->hw); |
3425 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
3426 | int vector; |
3427 | |
3428 | for (vector = 0; vector < adapter->num_q_vectors; vector++) |
3429 | synchronize_irq(irq: adapter->msix_entries[vector].vector); |
3430 | |
3431 | synchronize_irq(irq: adapter->msix_entries[vector++].vector); |
3432 | } else { |
3433 | synchronize_irq(irq: adapter->pdev->irq); |
3434 | } |
3435 | } |
3436 | |
3437 | /** |
3438 | * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts |
3439 | * @adapter: board private structure |
3440 | * |
3441 | **/ |
3442 | static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) |
3443 | { |
3444 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
3445 | |
3446 | ixgbe_write_eitr(q_vector); |
3447 | |
3448 | ixgbe_set_ivar(adapter, direction: 0, queue: 0, msix_vector: 0); |
3449 | ixgbe_set_ivar(adapter, direction: 1, queue: 0, msix_vector: 0); |
3450 | |
3451 | e_info(hw, "Legacy interrupt IVAR setup done\n" ); |
3452 | } |
3453 | |
3454 | /** |
3455 | * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset |
3456 | * @adapter: board private structure |
3457 | * @ring: structure containing ring specific data |
3458 | * |
3459 | * Configure the Tx descriptor ring after a reset. |
3460 | **/ |
3461 | void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, |
3462 | struct ixgbe_ring *ring) |
3463 | { |
3464 | struct ixgbe_hw *hw = &adapter->hw; |
3465 | u64 tdba = ring->dma; |
3466 | int wait_loop = 10; |
3467 | u32 txdctl = IXGBE_TXDCTL_ENABLE; |
3468 | u8 reg_idx = ring->reg_idx; |
3469 | |
3470 | ring->xsk_pool = NULL; |
3471 | if (ring_is_xdp(ring)) |
3472 | ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); |
3473 | |
3474 | /* disable queue to avoid issues while updating state */ |
3475 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); |
3476 | IXGBE_WRITE_FLUSH(hw); |
3477 | |
3478 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), |
3479 | (tdba & DMA_BIT_MASK(32))); |
3480 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); |
3481 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), |
3482 | ring->count * sizeof(union ixgbe_adv_tx_desc)); |
3483 | IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); |
3484 | IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); |
3485 | ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); |
3486 | |
3487 | /* |
3488 | * set WTHRESH to encourage burst writeback, it should not be set |
3489 | * higher than 1 when: |
3490 | * - ITR is 0 as it could cause false TX hangs |
3491 | * - ITR is set to > 100k int/sec and BQL is enabled |
3492 | * |
3493 | * In order to avoid issues WTHRESH + PTHRESH should always be equal |
3494 | * to or less than the number of on chip descriptors, which is |
3495 | * currently 40. |
3496 | */ |
3497 | if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) |
3498 | txdctl |= 1u << 16; /* WTHRESH = 1 */ |
3499 | else |
3500 | txdctl |= 8u << 16; /* WTHRESH = 8 */ |
3501 | |
3502 | /* |
3503 | * Setting PTHRESH to 32 both improves performance |
3504 | * and avoids a TX hang with DFP enabled |
3505 | */ |
3506 | txdctl |= (1u << 8) | /* HTHRESH = 1 */ |
3507 | 32; /* PTHRESH = 32 */ |
3508 | |
3509 | /* reinitialize flowdirector state */ |
3510 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
3511 | ring->atr_sample_rate = adapter->atr_sample_rate; |
3512 | ring->atr_count = 0; |
3513 | set_bit(nr: __IXGBE_TX_FDIR_INIT_DONE, addr: &ring->state); |
3514 | } else { |
3515 | ring->atr_sample_rate = 0; |
3516 | } |
3517 | |
3518 | /* initialize XPS */ |
3519 | if (!test_and_set_bit(nr: __IXGBE_TX_XPS_INIT_DONE, addr: &ring->state)) { |
3520 | struct ixgbe_q_vector *q_vector = ring->q_vector; |
3521 | |
3522 | if (q_vector) |
3523 | netif_set_xps_queue(dev: ring->netdev, |
3524 | mask: &q_vector->affinity_mask, |
3525 | index: ring->queue_index); |
3526 | } |
3527 | |
3528 | clear_bit(nr: __IXGBE_HANG_CHECK_ARMED, addr: &ring->state); |
3529 | |
3530 | /* reinitialize tx_buffer_info */ |
3531 | memset(ring->tx_buffer_info, 0, |
3532 | sizeof(struct ixgbe_tx_buffer) * ring->count); |
3533 | |
3534 | /* enable queue */ |
3535 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); |
3536 | |
3537 | /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ |
3538 | if (hw->mac.type == ixgbe_mac_82598EB && |
3539 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) |
3540 | return; |
3541 | |
3542 | /* poll to verify queue is enabled */ |
3543 | do { |
3544 | usleep_range(min: 1000, max: 2000); |
3545 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
3546 | } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); |
3547 | if (!wait_loop) |
3548 | hw_dbg(hw, "Could not enable Tx Queue %d\n" , reg_idx); |
3549 | } |
3550 | |
3551 | static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) |
3552 | { |
3553 | struct ixgbe_hw *hw = &adapter->hw; |
3554 | u32 rttdcs, mtqc; |
3555 | u8 tcs = adapter->hw_tcs; |
3556 | |
3557 | if (hw->mac.type == ixgbe_mac_82598EB) |
3558 | return; |
3559 | |
3560 | /* disable the arbiter while setting MTQC */ |
3561 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); |
3562 | rttdcs |= IXGBE_RTTDCS_ARBDIS; |
3563 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
3564 | |
3565 | /* set transmit pool layout */ |
3566 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
3567 | mtqc = IXGBE_MTQC_VT_ENA; |
3568 | if (tcs > 4) |
3569 | mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; |
3570 | else if (tcs > 1) |
3571 | mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; |
3572 | else if (adapter->ring_feature[RING_F_VMDQ].mask == |
3573 | IXGBE_82599_VMDQ_4Q_MASK) |
3574 | mtqc |= IXGBE_MTQC_32VF; |
3575 | else |
3576 | mtqc |= IXGBE_MTQC_64VF; |
3577 | } else { |
3578 | if (tcs > 4) { |
3579 | mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; |
3580 | } else if (tcs > 1) { |
3581 | mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; |
3582 | } else { |
3583 | u8 max_txq = adapter->num_tx_queues + |
3584 | adapter->num_xdp_queues; |
3585 | if (max_txq > 63) |
3586 | mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; |
3587 | else |
3588 | mtqc = IXGBE_MTQC_64Q_1PB; |
3589 | } |
3590 | } |
3591 | |
3592 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); |
3593 | |
3594 | /* Enable Security TX Buffer IFG for multiple pb */ |
3595 | if (tcs) { |
3596 | u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); |
3597 | sectx |= IXGBE_SECTX_DCB; |
3598 | IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); |
3599 | } |
3600 | |
3601 | /* re-enable the arbiter */ |
3602 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; |
3603 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
3604 | } |
3605 | |
3606 | /** |
3607 | * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset |
3608 | * @adapter: board private structure |
3609 | * |
3610 | * Configure the Tx unit of the MAC after a reset. |
3611 | **/ |
3612 | static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) |
3613 | { |
3614 | struct ixgbe_hw *hw = &adapter->hw; |
3615 | u32 dmatxctl; |
3616 | u32 i; |
3617 | |
3618 | ixgbe_setup_mtqc(adapter); |
3619 | |
3620 | if (hw->mac.type != ixgbe_mac_82598EB) { |
3621 | /* DMATXCTL.EN must be before Tx queues are enabled */ |
3622 | dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); |
3623 | dmatxctl |= IXGBE_DMATXCTL_TE; |
3624 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); |
3625 | } |
3626 | |
3627 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
3628 | for (i = 0; i < adapter->num_tx_queues; i++) |
3629 | ixgbe_configure_tx_ring(adapter, ring: adapter->tx_ring[i]); |
3630 | for (i = 0; i < adapter->num_xdp_queues; i++) |
3631 | ixgbe_configure_tx_ring(adapter, ring: adapter->xdp_ring[i]); |
3632 | } |
3633 | |
3634 | static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, |
3635 | struct ixgbe_ring *ring) |
3636 | { |
3637 | struct ixgbe_hw *hw = &adapter->hw; |
3638 | u8 reg_idx = ring->reg_idx; |
3639 | u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); |
3640 | |
3641 | srrctl |= IXGBE_SRRCTL_DROP_EN; |
3642 | |
3643 | IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); |
3644 | } |
3645 | |
3646 | static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, |
3647 | struct ixgbe_ring *ring) |
3648 | { |
3649 | struct ixgbe_hw *hw = &adapter->hw; |
3650 | u8 reg_idx = ring->reg_idx; |
3651 | u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); |
3652 | |
3653 | srrctl &= ~IXGBE_SRRCTL_DROP_EN; |
3654 | |
3655 | IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); |
3656 | } |
3657 | |
3658 | #ifdef CONFIG_IXGBE_DCB |
3659 | void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) |
3660 | #else |
3661 | static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) |
3662 | #endif |
3663 | { |
3664 | int i; |
3665 | bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; |
3666 | |
3667 | if (adapter->ixgbe_ieee_pfc) |
3668 | pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); |
3669 | |
3670 | /* |
3671 | * We should set the drop enable bit if: |
3672 | * SR-IOV is enabled |
3673 | * or |
3674 | * Number of Rx queues > 1 and flow control is disabled |
3675 | * |
3676 | * This allows us to avoid head of line blocking for security |
3677 | * and performance reasons. |
3678 | */ |
3679 | if (adapter->num_vfs || (adapter->num_rx_queues > 1 && |
3680 | !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { |
3681 | for (i = 0; i < adapter->num_rx_queues; i++) |
3682 | ixgbe_enable_rx_drop(adapter, ring: adapter->rx_ring[i]); |
3683 | } else { |
3684 | for (i = 0; i < adapter->num_rx_queues; i++) |
3685 | ixgbe_disable_rx_drop(adapter, ring: adapter->rx_ring[i]); |
3686 | } |
3687 | } |
3688 | |
3689 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 |
3690 | |
3691 | static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, |
3692 | struct ixgbe_ring *rx_ring) |
3693 | { |
3694 | struct ixgbe_hw *hw = &adapter->hw; |
3695 | u32 srrctl; |
3696 | u8 reg_idx = rx_ring->reg_idx; |
3697 | |
3698 | if (hw->mac.type == ixgbe_mac_82598EB) { |
3699 | u16 mask = adapter->ring_feature[RING_F_RSS].mask; |
3700 | |
3701 | /* |
3702 | * if VMDq is not active we must program one srrctl register |
3703 | * per RSS queue since we have enabled RDRXCTL.MVMEN |
3704 | */ |
3705 | reg_idx &= mask; |
3706 | } |
3707 | |
3708 | /* configure header buffer length, needed for RSC */ |
3709 | srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; |
3710 | |
3711 | /* configure the packet buffer length */ |
3712 | if (rx_ring->xsk_pool) { |
3713 | u32 xsk_buf_len = xsk_pool_get_rx_frame_size(pool: rx_ring->xsk_pool); |
3714 | |
3715 | /* If the MAC support setting RXDCTL.RLPML, the |
3716 | * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and |
3717 | * RXDCTL.RLPML is set to the actual UMEM buffer |
3718 | * size. If not, then we are stuck with a 1k buffer |
3719 | * size resolution. In this case frames larger than |
3720 | * the UMEM buffer size viewed in a 1k resolution will |
3721 | * be dropped. |
3722 | */ |
3723 | if (hw->mac.type != ixgbe_mac_82599EB) |
3724 | srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
3725 | else |
3726 | srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
3727 | } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { |
3728 | srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
3729 | } else { |
3730 | srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
3731 | } |
3732 | |
3733 | /* configure descriptor type */ |
3734 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
3735 | |
3736 | IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); |
3737 | } |
3738 | |
3739 | /** |
3740 | * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries |
3741 | * @adapter: device handle |
3742 | * |
3743 | * - 82598/82599/X540: 128 |
3744 | * - X550(non-SRIOV mode): 512 |
3745 | * - X550(SRIOV mode): 64 |
3746 | */ |
3747 | u32 (struct ixgbe_adapter *adapter) |
3748 | { |
3749 | if (adapter->hw.mac.type < ixgbe_mac_X550) |
3750 | return 128; |
3751 | else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
3752 | return 64; |
3753 | else |
3754 | return 512; |
3755 | } |
3756 | |
3757 | /** |
3758 | * ixgbe_store_key - Write the RSS key to HW |
3759 | * @adapter: device handle |
3760 | * |
3761 | * Write the RSS key stored in adapter.rss_key to HW. |
3762 | */ |
3763 | void ixgbe_store_key(struct ixgbe_adapter *adapter) |
3764 | { |
3765 | struct ixgbe_hw *hw = &adapter->hw; |
3766 | int i; |
3767 | |
3768 | for (i = 0; i < 10; i++) |
3769 | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); |
3770 | } |
3771 | |
3772 | /** |
3773 | * ixgbe_init_rss_key - Initialize adapter RSS key |
3774 | * @adapter: device handle |
3775 | * |
3776 | * Allocates and initializes the RSS key if it is not allocated. |
3777 | **/ |
3778 | static inline int (struct ixgbe_adapter *adapter) |
3779 | { |
3780 | u32 *; |
3781 | |
3782 | if (!adapter->rss_key) { |
3783 | rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL); |
3784 | if (unlikely(!rss_key)) |
3785 | return -ENOMEM; |
3786 | |
3787 | netdev_rss_key_fill(buffer: rss_key, IXGBE_RSS_KEY_SIZE); |
3788 | adapter->rss_key = rss_key; |
3789 | } |
3790 | |
3791 | return 0; |
3792 | } |
3793 | |
3794 | /** |
3795 | * ixgbe_store_reta - Write the RETA table to HW |
3796 | * @adapter: device handle |
3797 | * |
3798 | * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. |
3799 | */ |
3800 | void ixgbe_store_reta(struct ixgbe_adapter *adapter) |
3801 | { |
3802 | u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); |
3803 | struct ixgbe_hw *hw = &adapter->hw; |
3804 | u32 reta = 0; |
3805 | u32 indices_multi; |
3806 | u8 *indir_tbl = adapter->rss_indir_tbl; |
3807 | |
3808 | /* Fill out the redirection table as follows: |
3809 | * - 82598: 8 bit wide entries containing pair of 4 bit RSS |
3810 | * indices. |
3811 | * - 82599/X540: 8 bit wide entries containing 4 bit RSS index |
3812 | * - X550: 8 bit wide entries containing 6 bit RSS index |
3813 | */ |
3814 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
3815 | indices_multi = 0x11; |
3816 | else |
3817 | indices_multi = 0x1; |
3818 | |
3819 | /* Write redirection table to HW */ |
3820 | for (i = 0; i < reta_entries; i++) { |
3821 | reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; |
3822 | if ((i & 3) == 3) { |
3823 | if (i < 128) |
3824 | IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); |
3825 | else |
3826 | IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), |
3827 | reta); |
3828 | reta = 0; |
3829 | } |
3830 | } |
3831 | } |
3832 | |
3833 | /** |
3834 | * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode) |
3835 | * @adapter: device handle |
3836 | * |
3837 | * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. |
3838 | */ |
3839 | static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) |
3840 | { |
3841 | u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); |
3842 | struct ixgbe_hw *hw = &adapter->hw; |
3843 | u32 vfreta = 0; |
3844 | |
3845 | /* Write redirection table to HW */ |
3846 | for (i = 0; i < reta_entries; i++) { |
3847 | u16 pool = adapter->num_rx_pools; |
3848 | |
3849 | vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; |
3850 | if ((i & 3) != 3) |
3851 | continue; |
3852 | |
3853 | while (pool--) |
3854 | IXGBE_WRITE_REG(hw, |
3855 | IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)), |
3856 | vfreta); |
3857 | vfreta = 0; |
3858 | } |
3859 | } |
3860 | |
3861 | static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) |
3862 | { |
3863 | u32 i, j; |
3864 | u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); |
3865 | u16 = adapter->ring_feature[RING_F_RSS].indices; |
3866 | |
3867 | /* Program table for at least 4 queues w/ SR-IOV so that VFs can |
3868 | * make full use of any rings they may have. We will use the |
3869 | * PSRTYPE register to control how many rings we use within the PF. |
3870 | */ |
3871 | if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4)) |
3872 | rss_i = 4; |
3873 | |
3874 | /* Fill out hash function seeds */ |
3875 | ixgbe_store_key(adapter); |
3876 | |
3877 | /* Fill out redirection table */ |
3878 | memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); |
3879 | |
3880 | for (i = 0, j = 0; i < reta_entries; i++, j++) { |
3881 | if (j == rss_i) |
3882 | j = 0; |
3883 | |
3884 | adapter->rss_indir_tbl[i] = j; |
3885 | } |
3886 | |
3887 | ixgbe_store_reta(adapter); |
3888 | } |
3889 | |
3890 | static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) |
3891 | { |
3892 | struct ixgbe_hw *hw = &adapter->hw; |
3893 | u16 = adapter->ring_feature[RING_F_RSS].indices; |
3894 | int i, j; |
3895 | |
3896 | /* Fill out hash function seeds */ |
3897 | for (i = 0; i < 10; i++) { |
3898 | u16 pool = adapter->num_rx_pools; |
3899 | |
3900 | while (pool--) |
3901 | IXGBE_WRITE_REG(hw, |
3902 | IXGBE_PFVFRSSRK(i, VMDQ_P(pool)), |
3903 | *(adapter->rss_key + i)); |
3904 | } |
3905 | |
3906 | /* Fill out the redirection table */ |
3907 | for (i = 0, j = 0; i < 64; i++, j++) { |
3908 | if (j == rss_i) |
3909 | j = 0; |
3910 | |
3911 | adapter->rss_indir_tbl[i] = j; |
3912 | } |
3913 | |
3914 | ixgbe_store_vfreta(adapter); |
3915 | } |
3916 | |
3917 | static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) |
3918 | { |
3919 | struct ixgbe_hw *hw = &adapter->hw; |
3920 | u32 mrqc = 0, = 0, vfmrqc = 0; |
3921 | u32 rxcsum; |
3922 | |
3923 | /* Disable indicating checksum in descriptor, enables RSS hash */ |
3924 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); |
3925 | rxcsum |= IXGBE_RXCSUM_PCSD; |
3926 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); |
3927 | |
3928 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
3929 | if (adapter->ring_feature[RING_F_RSS].mask) |
3930 | mrqc = IXGBE_MRQC_RSSEN; |
3931 | } else { |
3932 | u8 tcs = adapter->hw_tcs; |
3933 | |
3934 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
3935 | if (tcs > 4) |
3936 | mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ |
3937 | else if (tcs > 1) |
3938 | mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ |
3939 | else if (adapter->ring_feature[RING_F_VMDQ].mask == |
3940 | IXGBE_82599_VMDQ_4Q_MASK) |
3941 | mrqc = IXGBE_MRQC_VMDQRSS32EN; |
3942 | else |
3943 | mrqc = IXGBE_MRQC_VMDQRSS64EN; |
3944 | |
3945 | /* Enable L3/L4 for Tx Switched packets only for X550, |
3946 | * older devices do not support this feature |
3947 | */ |
3948 | if (hw->mac.type >= ixgbe_mac_X550) |
3949 | mrqc |= IXGBE_MRQC_L3L4TXSWEN; |
3950 | } else { |
3951 | if (tcs > 4) |
3952 | mrqc = IXGBE_MRQC_RTRSS8TCEN; |
3953 | else if (tcs > 1) |
3954 | mrqc = IXGBE_MRQC_RTRSS4TCEN; |
3955 | else |
3956 | mrqc = IXGBE_MRQC_RSSEN; |
3957 | } |
3958 | } |
3959 | |
3960 | /* Perform hash on these packet types */ |
3961 | rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | |
3962 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | |
3963 | IXGBE_MRQC_RSS_FIELD_IPV6 | |
3964 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; |
3965 | |
3966 | if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) |
3967 | rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; |
3968 | if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) |
3969 | rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; |
3970 | |
3971 | if ((hw->mac.type >= ixgbe_mac_X550) && |
3972 | (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { |
3973 | u16 pool = adapter->num_rx_pools; |
3974 | |
3975 | /* Enable VF RSS mode */ |
3976 | mrqc |= IXGBE_MRQC_MULTIPLE_RSS; |
3977 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
3978 | |
3979 | /* Setup RSS through the VF registers */ |
3980 | ixgbe_setup_vfreta(adapter); |
3981 | vfmrqc = IXGBE_MRQC_RSSEN; |
3982 | vfmrqc |= rss_field; |
3983 | |
3984 | while (pool--) |
3985 | IXGBE_WRITE_REG(hw, |
3986 | IXGBE_PFVFMRQC(VMDQ_P(pool)), |
3987 | vfmrqc); |
3988 | } else { |
3989 | ixgbe_setup_reta(adapter); |
3990 | mrqc |= rss_field; |
3991 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
3992 | } |
3993 | } |
3994 | |
3995 | /** |
3996 | * ixgbe_configure_rscctl - enable RSC for the indicated ring |
3997 | * @adapter: address of board private structure |
3998 | * @ring: structure containing ring specific data |
3999 | **/ |
4000 | static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, |
4001 | struct ixgbe_ring *ring) |
4002 | { |
4003 | struct ixgbe_hw *hw = &adapter->hw; |
4004 | u32 rscctrl; |
4005 | u8 reg_idx = ring->reg_idx; |
4006 | |
4007 | if (!ring_is_rsc_enabled(ring)) |
4008 | return; |
4009 | |
4010 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); |
4011 | rscctrl |= IXGBE_RSCCTL_RSCEN; |
4012 | /* |
4013 | * we must limit the number of descriptors so that the |
4014 | * total size of max desc * buf_len is not greater |
4015 | * than 65536 |
4016 | */ |
4017 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; |
4018 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); |
4019 | } |
4020 | |
4021 | #define IXGBE_MAX_RX_DESC_POLL 10 |
4022 | static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, |
4023 | struct ixgbe_ring *ring) |
4024 | { |
4025 | struct ixgbe_hw *hw = &adapter->hw; |
4026 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; |
4027 | u32 rxdctl; |
4028 | u8 reg_idx = ring->reg_idx; |
4029 | |
4030 | if (ixgbe_removed(addr: hw->hw_addr)) |
4031 | return; |
4032 | /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ |
4033 | if (hw->mac.type == ixgbe_mac_82598EB && |
4034 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) |
4035 | return; |
4036 | |
4037 | do { |
4038 | usleep_range(min: 1000, max: 2000); |
4039 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
4040 | } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); |
4041 | |
4042 | if (!wait_loop) { |
4043 | e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " |
4044 | "the polling period\n" , reg_idx); |
4045 | } |
4046 | } |
4047 | |
4048 | void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, |
4049 | struct ixgbe_ring *ring) |
4050 | { |
4051 | struct ixgbe_hw *hw = &adapter->hw; |
4052 | union ixgbe_adv_rx_desc *rx_desc; |
4053 | u64 rdba = ring->dma; |
4054 | u32 rxdctl; |
4055 | u8 reg_idx = ring->reg_idx; |
4056 | |
4057 | xdp_rxq_info_unreg_mem_model(xdp_rxq: &ring->xdp_rxq); |
4058 | ring->xsk_pool = ixgbe_xsk_pool(adapter, ring); |
4059 | if (ring->xsk_pool) { |
4060 | WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
4061 | MEM_TYPE_XSK_BUFF_POOL, |
4062 | NULL)); |
4063 | xsk_pool_set_rxq_info(pool: ring->xsk_pool, rxq: &ring->xdp_rxq); |
4064 | } else { |
4065 | WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
4066 | MEM_TYPE_PAGE_SHARED, NULL)); |
4067 | } |
4068 | |
4069 | /* disable queue to avoid use of these values while updating state */ |
4070 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
4071 | rxdctl &= ~IXGBE_RXDCTL_ENABLE; |
4072 | |
4073 | /* write value back with RXDCTL.ENABLE bit cleared */ |
4074 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); |
4075 | IXGBE_WRITE_FLUSH(hw); |
4076 | |
4077 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); |
4078 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); |
4079 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), |
4080 | ring->count * sizeof(union ixgbe_adv_rx_desc)); |
4081 | /* Force flushing of IXGBE_RDLEN to prevent MDD */ |
4082 | IXGBE_WRITE_FLUSH(hw); |
4083 | |
4084 | IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); |
4085 | IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); |
4086 | ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); |
4087 | |
4088 | ixgbe_configure_srrctl(adapter, rx_ring: ring); |
4089 | ixgbe_configure_rscctl(adapter, ring); |
4090 | |
4091 | if (hw->mac.type == ixgbe_mac_82598EB) { |
4092 | /* |
4093 | * enable cache line friendly hardware writes: |
4094 | * PTHRESH=32 descriptors (half the internal cache), |
4095 | * this also removes ugly rx_no_buffer_count increment |
4096 | * HTHRESH=4 descriptors (to minimize latency on fetch) |
4097 | * WTHRESH=8 burst writeback up to two cache lines |
4098 | */ |
4099 | rxdctl &= ~0x3FFFFF; |
4100 | rxdctl |= 0x080420; |
4101 | #if (PAGE_SIZE < 8192) |
4102 | /* RXDCTL.RLPML does not work on 82599 */ |
4103 | } else if (hw->mac.type != ixgbe_mac_82599EB) { |
4104 | rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | |
4105 | IXGBE_RXDCTL_RLPML_EN); |
4106 | |
4107 | /* Limit the maximum frame size so we don't overrun the skb. |
4108 | * This can happen in SRIOV mode when the MTU of the VF is |
4109 | * higher than the MTU of the PF. |
4110 | */ |
4111 | if (ring_uses_build_skb(ring) && |
4112 | !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) |
4113 | rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | |
4114 | IXGBE_RXDCTL_RLPML_EN; |
4115 | #endif |
4116 | } |
4117 | |
4118 | ring->rx_offset = ixgbe_rx_offset(rx_ring: ring); |
4119 | |
4120 | if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { |
4121 | u32 xsk_buf_len = xsk_pool_get_rx_frame_size(pool: ring->xsk_pool); |
4122 | |
4123 | rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | |
4124 | IXGBE_RXDCTL_RLPML_EN); |
4125 | rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN; |
4126 | |
4127 | ring->rx_buf_len = xsk_buf_len; |
4128 | } |
4129 | |
4130 | /* initialize rx_buffer_info */ |
4131 | memset(ring->rx_buffer_info, 0, |
4132 | sizeof(struct ixgbe_rx_buffer) * ring->count); |
4133 | |
4134 | /* initialize Rx descriptor 0 */ |
4135 | rx_desc = IXGBE_RX_DESC(ring, 0); |
4136 | rx_desc->wb.upper.length = 0; |
4137 | |
4138 | /* enable receive descriptor ring */ |
4139 | rxdctl |= IXGBE_RXDCTL_ENABLE; |
4140 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); |
4141 | |
4142 | ixgbe_rx_desc_queue_enable(adapter, ring); |
4143 | if (ring->xsk_pool) |
4144 | ixgbe_alloc_rx_buffers_zc(rx_ring: ring, cleaned_count: ixgbe_desc_unused(ring)); |
4145 | else |
4146 | ixgbe_alloc_rx_buffers(rx_ring: ring, cleaned_count: ixgbe_desc_unused(ring)); |
4147 | } |
4148 | |
4149 | static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) |
4150 | { |
4151 | struct ixgbe_hw *hw = &adapter->hw; |
4152 | int = adapter->ring_feature[RING_F_RSS].indices; |
4153 | u16 pool = adapter->num_rx_pools; |
4154 | |
4155 | /* PSRTYPE must be initialized in non 82598 adapters */ |
4156 | u32 psrtype = IXGBE_PSRTYPE_TCPHDR | |
4157 | IXGBE_PSRTYPE_UDPHDR | |
4158 | IXGBE_PSRTYPE_IPV4HDR | |
4159 | IXGBE_PSRTYPE_L2HDR | |
4160 | IXGBE_PSRTYPE_IPV6HDR; |
4161 | |
4162 | if (hw->mac.type == ixgbe_mac_82598EB) |
4163 | return; |
4164 | |
4165 | if (rss_i > 3) |
4166 | psrtype |= 2u << 29; |
4167 | else if (rss_i > 1) |
4168 | psrtype |= 1u << 29; |
4169 | |
4170 | while (pool--) |
4171 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); |
4172 | } |
4173 | |
4174 | static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) |
4175 | { |
4176 | struct ixgbe_hw *hw = &adapter->hw; |
4177 | u16 pool = adapter->num_rx_pools; |
4178 | u32 reg_offset, vf_shift, vmolr; |
4179 | u32 gcr_ext, vmdctl; |
4180 | int i; |
4181 | |
4182 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) |
4183 | return; |
4184 | |
4185 | vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); |
4186 | vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; |
4187 | vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; |
4188 | vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; |
4189 | vmdctl |= IXGBE_VT_CTL_REPLEN; |
4190 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); |
4191 | |
4192 | /* accept untagged packets until a vlan tag is |
4193 | * specifically set for the VMDQ queue/pool |
4194 | */ |
4195 | vmolr = IXGBE_VMOLR_AUPE; |
4196 | while (pool--) |
4197 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr); |
4198 | |
4199 | vf_shift = VMDQ_P(0) % 32; |
4200 | reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; |
4201 | |
4202 | /* Enable only the PF's pool for Tx/Rx */ |
4203 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift)); |
4204 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); |
4205 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift)); |
4206 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); |
4207 | if (adapter->bridge_mode == BRIDGE_MODE_VEB) |
4208 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); |
4209 | |
4210 | /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ |
4211 | hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); |
4212 | |
4213 | /* clear VLAN promisc flag so VFTA will be updated if necessary */ |
4214 | adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; |
4215 | |
4216 | /* |
4217 | * Set up VF register offsets for selected VT Mode, |
4218 | * i.e. 32 or 64 VFs for SR-IOV |
4219 | */ |
4220 | switch (adapter->ring_feature[RING_F_VMDQ].mask) { |
4221 | case IXGBE_82599_VMDQ_8Q_MASK: |
4222 | gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; |
4223 | break; |
4224 | case IXGBE_82599_VMDQ_4Q_MASK: |
4225 | gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; |
4226 | break; |
4227 | default: |
4228 | gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; |
4229 | break; |
4230 | } |
4231 | |
4232 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); |
4233 | |
4234 | for (i = 0; i < adapter->num_vfs; i++) { |
4235 | /* configure spoof checking */ |
4236 | ixgbe_ndo_set_vf_spoofchk(netdev: adapter->netdev, vf: i, |
4237 | setting: adapter->vfinfo[i].spoofchk_enabled); |
4238 | |
4239 | /* Enable/Disable RSS query feature */ |
4240 | ixgbe_ndo_set_vf_rss_query_en(netdev: adapter->netdev, vf: i, |
4241 | setting: adapter->vfinfo[i].rss_query_enabled); |
4242 | } |
4243 | } |
4244 | |
4245 | static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) |
4246 | { |
4247 | struct ixgbe_hw *hw = &adapter->hw; |
4248 | struct net_device *netdev = adapter->netdev; |
4249 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
4250 | struct ixgbe_ring *rx_ring; |
4251 | int i; |
4252 | u32 mhadd, hlreg0; |
4253 | |
4254 | #ifdef IXGBE_FCOE |
4255 | /* adjust max frame to be able to do baby jumbo for FCoE */ |
4256 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
4257 | (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) |
4258 | max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; |
4259 | |
4260 | #endif /* IXGBE_FCOE */ |
4261 | |
4262 | /* adjust max frame to be at least the size of a standard frame */ |
4263 | if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) |
4264 | max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); |
4265 | |
4266 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); |
4267 | if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { |
4268 | mhadd &= ~IXGBE_MHADD_MFS_MASK; |
4269 | mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; |
4270 | |
4271 | IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); |
4272 | } |
4273 | |
4274 | hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); |
4275 | /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ |
4276 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; |
4277 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); |
4278 | |
4279 | /* |
4280 | * Setup the HW Rx Head and Tail Descriptor Pointers and |
4281 | * the Base and Length of the Rx Descriptor Ring |
4282 | */ |
4283 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4284 | rx_ring = adapter->rx_ring[i]; |
4285 | |
4286 | clear_ring_rsc_enabled(rx_ring); |
4287 | clear_bit(nr: __IXGBE_RX_3K_BUFFER, addr: &rx_ring->state); |
4288 | clear_bit(nr: __IXGBE_RX_BUILD_SKB_ENABLED, addr: &rx_ring->state); |
4289 | |
4290 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
4291 | set_ring_rsc_enabled(rx_ring); |
4292 | |
4293 | if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) |
4294 | set_bit(nr: __IXGBE_RX_3K_BUFFER, addr: &rx_ring->state); |
4295 | |
4296 | if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) |
4297 | continue; |
4298 | |
4299 | set_bit(nr: __IXGBE_RX_BUILD_SKB_ENABLED, addr: &rx_ring->state); |
4300 | |
4301 | #if (PAGE_SIZE < 8192) |
4302 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
4303 | set_bit(nr: __IXGBE_RX_3K_BUFFER, addr: &rx_ring->state); |
4304 | |
4305 | if (IXGBE_2K_TOO_SMALL_WITH_PADDING || |
4306 | (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) |
4307 | set_bit(nr: __IXGBE_RX_3K_BUFFER, addr: &rx_ring->state); |
4308 | #endif |
4309 | } |
4310 | } |
4311 | |
4312 | static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) |
4313 | { |
4314 | struct ixgbe_hw *hw = &adapter->hw; |
4315 | u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); |
4316 | |
4317 | switch (hw->mac.type) { |
4318 | case ixgbe_mac_82598EB: |
4319 | /* |
4320 | * For VMDq support of different descriptor types or |
4321 | * buffer sizes through the use of multiple SRRCTL |
4322 | * registers, RDRXCTL.MVMEN must be set to 1 |
4323 | * |
4324 | * also, the manual doesn't mention it clearly but DCA hints |
4325 | * will only use queue 0's tags unless this bit is set. Side |
4326 | * effects of setting this bit are only that SRRCTL must be |
4327 | * fully programmed [0..15] |
4328 | */ |
4329 | rdrxctl |= IXGBE_RDRXCTL_MVMEN; |
4330 | break; |
4331 | case ixgbe_mac_X550: |
4332 | case ixgbe_mac_X550EM_x: |
4333 | case ixgbe_mac_x550em_a: |
4334 | if (adapter->num_vfs) |
4335 | rdrxctl |= IXGBE_RDRXCTL_PSP; |
4336 | fallthrough; |
4337 | case ixgbe_mac_82599EB: |
4338 | case ixgbe_mac_X540: |
4339 | /* Disable RSC for ACK packets */ |
4340 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, |
4341 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); |
4342 | rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; |
4343 | /* hardware requires some bits to be set by default */ |
4344 | rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); |
4345 | rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; |
4346 | break; |
4347 | default: |
4348 | /* We should do nothing since we don't know this hardware */ |
4349 | return; |
4350 | } |
4351 | |
4352 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); |
4353 | } |
4354 | |
4355 | /** |
4356 | * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset |
4357 | * @adapter: board private structure |
4358 | * |
4359 | * Configure the Rx unit of the MAC after a reset. |
4360 | **/ |
4361 | static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) |
4362 | { |
4363 | struct ixgbe_hw *hw = &adapter->hw; |
4364 | int i; |
4365 | u32 rxctrl, rfctl; |
4366 | |
4367 | /* disable receives while setting up the descriptors */ |
4368 | hw->mac.ops.disable_rx(hw); |
4369 | |
4370 | ixgbe_setup_psrtype(adapter); |
4371 | ixgbe_setup_rdrxctl(adapter); |
4372 | |
4373 | /* RSC Setup */ |
4374 | rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); |
4375 | rfctl &= ~IXGBE_RFCTL_RSC_DIS; |
4376 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) |
4377 | rfctl |= IXGBE_RFCTL_RSC_DIS; |
4378 | |
4379 | /* disable NFS filtering */ |
4380 | rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS); |
4381 | IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); |
4382 | |
4383 | /* Program registers for the distribution of queues */ |
4384 | ixgbe_setup_mrqc(adapter); |
4385 | |
4386 | /* set_rx_buffer_len must be called before ring initialization */ |
4387 | ixgbe_set_rx_buffer_len(adapter); |
4388 | |
4389 | /* |
4390 | * Setup the HW Rx Head and Tail Descriptor Pointers and |
4391 | * the Base and Length of the Rx Descriptor Ring |
4392 | */ |
4393 | for (i = 0; i < adapter->num_rx_queues; i++) |
4394 | ixgbe_configure_rx_ring(adapter, ring: adapter->rx_ring[i]); |
4395 | |
4396 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
4397 | /* disable drop enable for 82598 parts */ |
4398 | if (hw->mac.type == ixgbe_mac_82598EB) |
4399 | rxctrl |= IXGBE_RXCTRL_DMBYPS; |
4400 | |
4401 | /* enable all receives */ |
4402 | rxctrl |= IXGBE_RXCTRL_RXEN; |
4403 | hw->mac.ops.enable_rx_dma(hw, rxctrl); |
4404 | } |
4405 | |
4406 | static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, |
4407 | __be16 proto, u16 vid) |
4408 | { |
4409 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
4410 | struct ixgbe_hw *hw = &adapter->hw; |
4411 | |
4412 | /* add VID to filter table */ |
4413 | if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) |
4414 | hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); |
4415 | |
4416 | set_bit(nr: vid, addr: adapter->active_vlans); |
4417 | |
4418 | return 0; |
4419 | } |
4420 | |
4421 | static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) |
4422 | { |
4423 | u32 vlvf; |
4424 | int idx; |
4425 | |
4426 | /* short cut the special case */ |
4427 | if (vlan == 0) |
4428 | return 0; |
4429 | |
4430 | /* Search for the vlan id in the VLVF entries */ |
4431 | for (idx = IXGBE_VLVF_ENTRIES; --idx;) { |
4432 | vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx)); |
4433 | if ((vlvf & VLAN_VID_MASK) == vlan) |
4434 | break; |
4435 | } |
4436 | |
4437 | return idx; |
4438 | } |
4439 | |
4440 | void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) |
4441 | { |
4442 | struct ixgbe_hw *hw = &adapter->hw; |
4443 | u32 bits, word; |
4444 | int idx; |
4445 | |
4446 | idx = ixgbe_find_vlvf_entry(hw, vlan: vid); |
4447 | if (!idx) |
4448 | return; |
4449 | |
4450 | /* See if any other pools are set for this VLAN filter |
4451 | * entry other than the PF. |
4452 | */ |
4453 | word = idx * 2 + (VMDQ_P(0) / 32); |
4454 | bits = ~BIT(VMDQ_P(0) % 32); |
4455 | bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); |
4456 | |
4457 | /* Disable the filter so this falls into the default pool. */ |
4458 | if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) { |
4459 | if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) |
4460 | IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0); |
4461 | IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0); |
4462 | } |
4463 | } |
4464 | |
4465 | static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, |
4466 | __be16 proto, u16 vid) |
4467 | { |
4468 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
4469 | struct ixgbe_hw *hw = &adapter->hw; |
4470 | |
4471 | /* remove VID from filter table */ |
4472 | if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) |
4473 | hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); |
4474 | |
4475 | clear_bit(nr: vid, addr: adapter->active_vlans); |
4476 | |
4477 | return 0; |
4478 | } |
4479 | |
4480 | /** |
4481 | * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping |
4482 | * @adapter: driver data |
4483 | */ |
4484 | static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) |
4485 | { |
4486 | struct ixgbe_hw *hw = &adapter->hw; |
4487 | u32 vlnctrl; |
4488 | int i, j; |
4489 | |
4490 | switch (hw->mac.type) { |
4491 | case ixgbe_mac_82598EB: |
4492 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
4493 | vlnctrl &= ~IXGBE_VLNCTRL_VME; |
4494 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
4495 | break; |
4496 | case ixgbe_mac_82599EB: |
4497 | case ixgbe_mac_X540: |
4498 | case ixgbe_mac_X550: |
4499 | case ixgbe_mac_X550EM_x: |
4500 | case ixgbe_mac_x550em_a: |
4501 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4502 | struct ixgbe_ring *ring = adapter->rx_ring[i]; |
4503 | |
4504 | if (!netif_is_ixgbe(dev: ring->netdev)) |
4505 | continue; |
4506 | |
4507 | j = ring->reg_idx; |
4508 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
4509 | vlnctrl &= ~IXGBE_RXDCTL_VME; |
4510 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); |
4511 | } |
4512 | break; |
4513 | default: |
4514 | break; |
4515 | } |
4516 | } |
4517 | |
4518 | /** |
4519 | * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping |
4520 | * @adapter: driver data |
4521 | */ |
4522 | static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) |
4523 | { |
4524 | struct ixgbe_hw *hw = &adapter->hw; |
4525 | u32 vlnctrl; |
4526 | int i, j; |
4527 | |
4528 | switch (hw->mac.type) { |
4529 | case ixgbe_mac_82598EB: |
4530 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
4531 | vlnctrl |= IXGBE_VLNCTRL_VME; |
4532 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
4533 | break; |
4534 | case ixgbe_mac_82599EB: |
4535 | case ixgbe_mac_X540: |
4536 | case ixgbe_mac_X550: |
4537 | case ixgbe_mac_X550EM_x: |
4538 | case ixgbe_mac_x550em_a: |
4539 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4540 | struct ixgbe_ring *ring = adapter->rx_ring[i]; |
4541 | |
4542 | if (!netif_is_ixgbe(dev: ring->netdev)) |
4543 | continue; |
4544 | |
4545 | j = ring->reg_idx; |
4546 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
4547 | vlnctrl |= IXGBE_RXDCTL_VME; |
4548 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); |
4549 | } |
4550 | break; |
4551 | default: |
4552 | break; |
4553 | } |
4554 | } |
4555 | |
4556 | static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) |
4557 | { |
4558 | struct ixgbe_hw *hw = &adapter->hw; |
4559 | u32 vlnctrl, i; |
4560 | |
4561 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
4562 | |
4563 | if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { |
4564 | /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ |
4565 | vlnctrl |= IXGBE_VLNCTRL_VFE; |
4566 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
4567 | } else { |
4568 | vlnctrl &= ~IXGBE_VLNCTRL_VFE; |
4569 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
4570 | return; |
4571 | } |
4572 | |
4573 | /* Nothing to do for 82598 */ |
4574 | if (hw->mac.type == ixgbe_mac_82598EB) |
4575 | return; |
4576 | |
4577 | /* We are already in VLAN promisc, nothing to do */ |
4578 | if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) |
4579 | return; |
4580 | |
4581 | /* Set flag so we don't redo unnecessary work */ |
4582 | adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; |
4583 | |
4584 | /* Add PF to all active pools */ |
4585 | for (i = IXGBE_VLVF_ENTRIES; --i;) { |
4586 | u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); |
4587 | u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); |
4588 | |
4589 | vlvfb |= BIT(VMDQ_P(0) % 32); |
4590 | IXGBE_WRITE_REG(hw, reg_offset, vlvfb); |
4591 | } |
4592 | |
4593 | /* Set all bits in the VLAN filter table array */ |
4594 | for (i = hw->mac.vft_size; i--;) |
4595 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U); |
4596 | } |
4597 | |
4598 | #define VFTA_BLOCK_SIZE 8 |
4599 | static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) |
4600 | { |
4601 | struct ixgbe_hw *hw = &adapter->hw; |
4602 | u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; |
4603 | u32 vid_start = vfta_offset * 32; |
4604 | u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); |
4605 | u32 i, vid, word, bits; |
4606 | |
4607 | for (i = IXGBE_VLVF_ENTRIES; --i;) { |
4608 | u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); |
4609 | |
4610 | /* pull VLAN ID from VLVF */ |
4611 | vid = vlvf & VLAN_VID_MASK; |
4612 | |
4613 | /* only concern outselves with a certain range */ |
4614 | if (vid < vid_start || vid >= vid_end) |
4615 | continue; |
4616 | |
4617 | if (vlvf) { |
4618 | /* record VLAN ID in VFTA */ |
4619 | vfta[(vid - vid_start) / 32] |= BIT(vid % 32); |
4620 | |
4621 | /* if PF is part of this then continue */ |
4622 | if (test_bit(vid, adapter->active_vlans)) |
4623 | continue; |
4624 | } |
4625 | |
4626 | /* remove PF from the pool */ |
4627 | word = i * 2 + VMDQ_P(0) / 32; |
4628 | bits = ~BIT(VMDQ_P(0) % 32); |
4629 | bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); |
4630 | IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); |
4631 | } |
4632 | |
4633 | /* extract values from active_vlans and write back to VFTA */ |
4634 | for (i = VFTA_BLOCK_SIZE; i--;) { |
4635 | vid = (vfta_offset + i) * 32; |
4636 | word = vid / BITS_PER_LONG; |
4637 | bits = vid % BITS_PER_LONG; |
4638 | |
4639 | vfta[i] |= adapter->active_vlans[word] >> bits; |
4640 | |
4641 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]); |
4642 | } |
4643 | } |
4644 | |
4645 | static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) |
4646 | { |
4647 | struct ixgbe_hw *hw = &adapter->hw; |
4648 | u32 vlnctrl, i; |
4649 | |
4650 | /* Set VLAN filtering to enabled */ |
4651 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
4652 | vlnctrl |= IXGBE_VLNCTRL_VFE; |
4653 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
4654 | |
4655 | if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) || |
4656 | hw->mac.type == ixgbe_mac_82598EB) |
4657 | return; |
4658 | |
4659 | /* We are not in VLAN promisc, nothing to do */ |
4660 | if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) |
4661 | return; |
4662 | |
4663 | /* Set flag so we don't redo unnecessary work */ |
4664 | adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; |
4665 | |
4666 | for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) |
4667 | ixgbe_scrub_vfta(adapter, vfta_offset: i); |
4668 | } |
4669 | |
4670 | static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) |
4671 | { |
4672 | u16 vid = 1; |
4673 | |
4674 | ixgbe_vlan_rx_add_vid(netdev: adapter->netdev, htons(ETH_P_8021Q), vid: 0); |
4675 | |
4676 | for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) |
4677 | ixgbe_vlan_rx_add_vid(netdev: adapter->netdev, htons(ETH_P_8021Q), vid); |
4678 | } |
4679 | |
4680 | /** |
4681 | * ixgbe_write_mc_addr_list - write multicast addresses to MTA |
4682 | * @netdev: network interface device structure |
4683 | * |
4684 | * Writes multicast address list to the MTA hash table. |
4685 | * Returns: -ENOMEM on failure |
4686 | * 0 on no addresses written |
4687 | * X on writing X addresses to MTA |
4688 | **/ |
4689 | static int ixgbe_write_mc_addr_list(struct net_device *netdev) |
4690 | { |
4691 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
4692 | struct ixgbe_hw *hw = &adapter->hw; |
4693 | |
4694 | if (!netif_running(dev: netdev)) |
4695 | return 0; |
4696 | |
4697 | if (hw->mac.ops.update_mc_addr_list) |
4698 | hw->mac.ops.update_mc_addr_list(hw, netdev); |
4699 | else |
4700 | return -ENOMEM; |
4701 | |
4702 | #ifdef CONFIG_PCI_IOV |
4703 | ixgbe_restore_vf_multicasts(adapter); |
4704 | #endif |
4705 | |
4706 | return netdev_mc_count(netdev); |
4707 | } |
4708 | |
4709 | #ifdef CONFIG_PCI_IOV |
4710 | void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) |
4711 | { |
4712 | struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; |
4713 | struct ixgbe_hw *hw = &adapter->hw; |
4714 | int i; |
4715 | |
4716 | for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { |
4717 | mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; |
4718 | |
4719 | if (mac_table->state & IXGBE_MAC_STATE_IN_USE) |
4720 | hw->mac.ops.set_rar(hw, i, |
4721 | mac_table->addr, |
4722 | mac_table->pool, |
4723 | IXGBE_RAH_AV); |
4724 | else |
4725 | hw->mac.ops.clear_rar(hw, i); |
4726 | } |
4727 | } |
4728 | |
4729 | #endif |
4730 | static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) |
4731 | { |
4732 | struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; |
4733 | struct ixgbe_hw *hw = &adapter->hw; |
4734 | int i; |
4735 | |
4736 | for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { |
4737 | if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) |
4738 | continue; |
4739 | |
4740 | mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; |
4741 | |
4742 | if (mac_table->state & IXGBE_MAC_STATE_IN_USE) |
4743 | hw->mac.ops.set_rar(hw, i, |
4744 | mac_table->addr, |
4745 | mac_table->pool, |
4746 | IXGBE_RAH_AV); |
4747 | else |
4748 | hw->mac.ops.clear_rar(hw, i); |
4749 | } |
4750 | } |
4751 | |
4752 | static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) |
4753 | { |
4754 | struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; |
4755 | struct ixgbe_hw *hw = &adapter->hw; |
4756 | int i; |
4757 | |
4758 | for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { |
4759 | mac_table->state |= IXGBE_MAC_STATE_MODIFIED; |
4760 | mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; |
4761 | } |
4762 | |
4763 | ixgbe_sync_mac_table(adapter); |
4764 | } |
4765 | |
4766 | static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) |
4767 | { |
4768 | struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; |
4769 | struct ixgbe_hw *hw = &adapter->hw; |
4770 | int i, count = 0; |
4771 | |
4772 | for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { |
4773 | /* do not count default RAR as available */ |
4774 | if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) |
4775 | continue; |
4776 | |
4777 | /* only count unused and addresses that belong to us */ |
4778 | if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { |
4779 | if (mac_table->pool != pool) |
4780 | continue; |
4781 | } |
4782 | |
4783 | count++; |
4784 | } |
4785 | |
4786 | return count; |
4787 | } |
4788 | |
4789 | /* this function destroys the first RAR entry */ |
4790 | static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) |
4791 | { |
4792 | struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; |
4793 | struct ixgbe_hw *hw = &adapter->hw; |
4794 | |
4795 | memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN); |
4796 | mac_table->pool = VMDQ_P(0); |
4797 | |
4798 | mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; |
4799 | |
4800 | hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, |
4801 | IXGBE_RAH_AV); |
4802 | } |
4803 | |
4804 | int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, |
4805 | const u8 *addr, u16 pool) |
4806 | { |
4807 | struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; |
4808 | struct ixgbe_hw *hw = &adapter->hw; |
4809 | int i; |
4810 | |
4811 | if (is_zero_ether_addr(addr)) |
4812 | return -EINVAL; |
4813 | |
4814 | for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { |
4815 | if (mac_table->state & IXGBE_MAC_STATE_IN_USE) |
4816 | continue; |
4817 | |
4818 | ether_addr_copy(dst: mac_table->addr, src: addr); |
4819 | mac_table->pool = pool; |
4820 | |
4821 | mac_table->state |= IXGBE_MAC_STATE_MODIFIED | |
4822 | IXGBE_MAC_STATE_IN_USE; |
4823 | |
4824 | ixgbe_sync_mac_table(adapter); |
4825 | |
4826 | return i; |
4827 | } |
4828 | |
4829 | return -ENOMEM; |
4830 | } |
4831 | |
4832 | int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, |
4833 | const u8 *addr, u16 pool) |
4834 | { |
4835 | struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; |
4836 | struct ixgbe_hw *hw = &adapter->hw; |
4837 | int i; |
4838 | |
4839 | if (is_zero_ether_addr(addr)) |
4840 | return -EINVAL; |
4841 | |
4842 | /* search table for addr, if found clear IN_USE flag and sync */ |
4843 | for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { |
4844 | /* we can only delete an entry if it is in use */ |
4845 | if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) |
4846 | continue; |
4847 | /* we only care about entries that belong to the given pool */ |
4848 | if (mac_table->pool != pool) |
4849 | continue; |
4850 | /* we only care about a specific MAC address */ |
4851 | if (!ether_addr_equal(addr1: addr, addr2: mac_table->addr)) |
4852 | continue; |
4853 | |
4854 | mac_table->state |= IXGBE_MAC_STATE_MODIFIED; |
4855 | mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; |
4856 | |
4857 | ixgbe_sync_mac_table(adapter); |
4858 | |
4859 | return 0; |
4860 | } |
4861 | |
4862 | return -ENOMEM; |
4863 | } |
4864 | |
4865 | static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) |
4866 | { |
4867 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
4868 | int ret; |
4869 | |
4870 | ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); |
4871 | |
4872 | return min_t(int, ret, 0); |
4873 | } |
4874 | |
4875 | static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) |
4876 | { |
4877 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
4878 | |
4879 | ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); |
4880 | |
4881 | return 0; |
4882 | } |
4883 | |
4884 | /** |
4885 | * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set |
4886 | * @netdev: network interface device structure |
4887 | * |
4888 | * The set_rx_method entry point is called whenever the unicast/multicast |
4889 | * address list or the network interface flags are updated. This routine is |
4890 | * responsible for configuring the hardware for proper unicast, multicast and |
4891 | * promiscuous mode. |
4892 | **/ |
4893 | void ixgbe_set_rx_mode(struct net_device *netdev) |
4894 | { |
4895 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
4896 | struct ixgbe_hw *hw = &adapter->hw; |
4897 | u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; |
4898 | netdev_features_t features = netdev->features; |
4899 | int count; |
4900 | |
4901 | /* Check for Promiscuous and All Multicast modes */ |
4902 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
4903 | |
4904 | /* set all bits that we expect to always be set */ |
4905 | fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ |
4906 | fctrl |= IXGBE_FCTRL_BAM; |
4907 | fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ |
4908 | fctrl |= IXGBE_FCTRL_PMCF; |
4909 | |
4910 | /* clear the bits we are changing the status of */ |
4911 | fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); |
4912 | if (netdev->flags & IFF_PROMISC) { |
4913 | hw->addr_ctrl.user_set_promisc = true; |
4914 | fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); |
4915 | vmolr |= IXGBE_VMOLR_MPE; |
4916 | features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; |
4917 | } else { |
4918 | if (netdev->flags & IFF_ALLMULTI) { |
4919 | fctrl |= IXGBE_FCTRL_MPE; |
4920 | vmolr |= IXGBE_VMOLR_MPE; |
4921 | } |
4922 | hw->addr_ctrl.user_set_promisc = false; |
4923 | } |
4924 | |
4925 | /* |
4926 | * Write addresses to available RAR registers, if there is not |
4927 | * sufficient space to store all the addresses then enable |
4928 | * unicast promiscuous mode |
4929 | */ |
4930 | if (__dev_uc_sync(dev: netdev, sync: ixgbe_uc_sync, unsync: ixgbe_uc_unsync)) { |
4931 | fctrl |= IXGBE_FCTRL_UPE; |
4932 | vmolr |= IXGBE_VMOLR_ROPE; |
4933 | } |
4934 | |
4935 | /* Write addresses to the MTA, if the attempt fails |
4936 | * then we should just turn on promiscuous mode so |
4937 | * that we can at least receive multicast traffic |
4938 | */ |
4939 | count = ixgbe_write_mc_addr_list(netdev); |
4940 | if (count < 0) { |
4941 | fctrl |= IXGBE_FCTRL_MPE; |
4942 | vmolr |= IXGBE_VMOLR_MPE; |
4943 | } else if (count) { |
4944 | vmolr |= IXGBE_VMOLR_ROMPE; |
4945 | } |
4946 | |
4947 | if (hw->mac.type != ixgbe_mac_82598EB) { |
4948 | vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & |
4949 | ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | |
4950 | IXGBE_VMOLR_ROPE); |
4951 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); |
4952 | } |
4953 | |
4954 | /* This is useful for sniffing bad packets. */ |
4955 | if (features & NETIF_F_RXALL) { |
4956 | /* UPE and MPE will be handled by normal PROMISC logic |
4957 | * in e1000e_set_rx_mode */ |
4958 | fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ |
4959 | IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ |
4960 | IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ |
4961 | |
4962 | fctrl &= ~(IXGBE_FCTRL_DPF); |
4963 | /* NOTE: VLAN filtering is disabled by setting PROMISC */ |
4964 | } |
4965 | |
4966 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); |
4967 | |
4968 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
4969 | ixgbe_vlan_strip_enable(adapter); |
4970 | else |
4971 | ixgbe_vlan_strip_disable(adapter); |
4972 | |
4973 | if (features & NETIF_F_HW_VLAN_CTAG_FILTER) |
4974 | ixgbe_vlan_promisc_disable(adapter); |
4975 | else |
4976 | ixgbe_vlan_promisc_enable(adapter); |
4977 | } |
4978 | |
4979 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) |
4980 | { |
4981 | int q_idx; |
4982 | |
4983 | for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) |
4984 | napi_enable(n: &adapter->q_vector[q_idx]->napi); |
4985 | } |
4986 | |
4987 | static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) |
4988 | { |
4989 | int q_idx; |
4990 | |
4991 | for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) |
4992 | napi_disable(n: &adapter->q_vector[q_idx]->napi); |
4993 | } |
4994 | |
4995 | static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table) |
4996 | { |
4997 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
4998 | struct ixgbe_hw *hw = &adapter->hw; |
4999 | struct udp_tunnel_info ti; |
5000 | |
5001 | udp_tunnel_nic_get_port(dev, table, idx: 0, ti: &ti); |
5002 | if (ti.type == UDP_TUNNEL_TYPE_VXLAN) |
5003 | adapter->vxlan_port = ti.port; |
5004 | else |
5005 | adapter->geneve_port = ti.port; |
5006 | |
5007 | IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, |
5008 | ntohs(adapter->vxlan_port) | |
5009 | ntohs(adapter->geneve_port) << |
5010 | IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT); |
5011 | return 0; |
5012 | } |
5013 | |
5014 | static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = { |
5015 | .sync_table = ixgbe_udp_tunnel_sync, |
5016 | .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, |
5017 | .tables = { |
5018 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
5019 | }, |
5020 | }; |
5021 | |
5022 | static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = { |
5023 | .sync_table = ixgbe_udp_tunnel_sync, |
5024 | .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, |
5025 | .tables = { |
5026 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
5027 | { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, |
5028 | }, |
5029 | }; |
5030 | |
5031 | #ifdef CONFIG_IXGBE_DCB |
5032 | /** |
5033 | * ixgbe_configure_dcb - Configure DCB hardware |
5034 | * @adapter: ixgbe adapter struct |
5035 | * |
5036 | * This is called by the driver on open to configure the DCB hardware. |
5037 | * This is also called by the gennetlink interface when reconfiguring |
5038 | * the DCB state. |
5039 | */ |
5040 | static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) |
5041 | { |
5042 | struct ixgbe_hw *hw = &adapter->hw; |
5043 | int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
5044 | |
5045 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { |
5046 | if (hw->mac.type == ixgbe_mac_82598EB) |
5047 | netif_set_tso_max_size(dev: adapter->netdev, size: 65536); |
5048 | return; |
5049 | } |
5050 | |
5051 | if (hw->mac.type == ixgbe_mac_82598EB) |
5052 | netif_set_tso_max_size(dev: adapter->netdev, size: 32768); |
5053 | |
5054 | #ifdef IXGBE_FCOE |
5055 | if (adapter->netdev->features & NETIF_F_FCOE_MTU) |
5056 | max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); |
5057 | #endif |
5058 | |
5059 | /* reconfigure the hardware */ |
5060 | if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { |
5061 | ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, |
5062 | DCB_TX_CONFIG); |
5063 | ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, |
5064 | DCB_RX_CONFIG); |
5065 | ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); |
5066 | } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { |
5067 | ixgbe_dcb_hw_ets(hw: &adapter->hw, |
5068 | ets: adapter->ixgbe_ieee_ets, |
5069 | max: max_frame); |
5070 | ixgbe_dcb_hw_pfc_config(hw: &adapter->hw, |
5071 | pfc_en: adapter->ixgbe_ieee_pfc->pfc_en, |
5072 | tc_prio: adapter->ixgbe_ieee_ets->prio_tc); |
5073 | } |
5074 | |
5075 | /* Enable RSS Hash per TC */ |
5076 | if (hw->mac.type != ixgbe_mac_82598EB) { |
5077 | u32 msb = 0; |
5078 | u16 = adapter->ring_feature[RING_F_RSS].indices - 1; |
5079 | |
5080 | while (rss_i) { |
5081 | msb++; |
5082 | rss_i >>= 1; |
5083 | } |
5084 | |
5085 | /* write msb to all 8 TCs in one write */ |
5086 | IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); |
5087 | } |
5088 | } |
5089 | #endif |
5090 | |
5091 | /* Additional bittime to account for IXGBE framing */ |
5092 | #define IXGBE_ETH_FRAMING 20 |
5093 | |
5094 | /** |
5095 | * ixgbe_hpbthresh - calculate high water mark for flow control |
5096 | * |
5097 | * @adapter: board private structure to calculate for |
5098 | * @pb: packet buffer to calculate |
5099 | */ |
5100 | static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) |
5101 | { |
5102 | struct ixgbe_hw *hw = &adapter->hw; |
5103 | struct net_device *dev = adapter->netdev; |
5104 | int link, tc, kb, marker; |
5105 | u32 dv_id, rx_pba; |
5106 | |
5107 | /* Calculate max LAN frame size */ |
5108 | tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; |
5109 | |
5110 | #ifdef IXGBE_FCOE |
5111 | /* FCoE traffic class uses FCOE jumbo frames */ |
5112 | if ((dev->features & NETIF_F_FCOE_MTU) && |
5113 | (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && |
5114 | (pb == ixgbe_fcoe_get_tc(adapter))) |
5115 | tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; |
5116 | #endif |
5117 | |
5118 | /* Calculate delay value for device */ |
5119 | switch (hw->mac.type) { |
5120 | case ixgbe_mac_X540: |
5121 | case ixgbe_mac_X550: |
5122 | case ixgbe_mac_X550EM_x: |
5123 | case ixgbe_mac_x550em_a: |
5124 | dv_id = IXGBE_DV_X540(link, tc); |
5125 | break; |
5126 | default: |
5127 | dv_id = IXGBE_DV(link, tc); |
5128 | break; |
5129 | } |
5130 | |
5131 | /* Loopback switch introduces additional latency */ |
5132 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
5133 | dv_id += IXGBE_B2BT(tc); |
5134 | |
5135 | /* Delay value is calculated in bit times convert to KB */ |
5136 | kb = IXGBE_BT2KB(dv_id); |
5137 | rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10; |
5138 | |
5139 | marker = rx_pba - kb; |
5140 | |
5141 | /* It is possible that the packet buffer is not large enough |
5142 | * to provide required headroom. In this case throw an error |
5143 | * to user and a do the best we can. |
5144 | */ |
5145 | if (marker < 0) { |
5146 | e_warn(drv, "Packet Buffer(%i) can not provide enough" |
5147 | "headroom to support flow control." |
5148 | "Decrease MTU or number of traffic classes\n" , pb); |
5149 | marker = tc + 1; |
5150 | } |
5151 | |
5152 | return marker; |
5153 | } |
5154 | |
5155 | /** |
5156 | * ixgbe_lpbthresh - calculate low water mark for flow control |
5157 | * |
5158 | * @adapter: board private structure to calculate for |
5159 | * @pb: packet buffer to calculate |
5160 | */ |
5161 | static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) |
5162 | { |
5163 | struct ixgbe_hw *hw = &adapter->hw; |
5164 | struct net_device *dev = adapter->netdev; |
5165 | int tc; |
5166 | u32 dv_id; |
5167 | |
5168 | /* Calculate max LAN frame size */ |
5169 | tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; |
5170 | |
5171 | #ifdef IXGBE_FCOE |
5172 | /* FCoE traffic class uses FCOE jumbo frames */ |
5173 | if ((dev->features & NETIF_F_FCOE_MTU) && |
5174 | (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && |
5175 | (pb == netdev_get_prio_tc_map(dev, prio: adapter->fcoe.up))) |
5176 | tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; |
5177 | #endif |
5178 | |
5179 | /* Calculate delay value for device */ |
5180 | switch (hw->mac.type) { |
5181 | case ixgbe_mac_X540: |
5182 | case ixgbe_mac_X550: |
5183 | case ixgbe_mac_X550EM_x: |
5184 | case ixgbe_mac_x550em_a: |
5185 | dv_id = IXGBE_LOW_DV_X540(tc); |
5186 | break; |
5187 | default: |
5188 | dv_id = IXGBE_LOW_DV(tc); |
5189 | break; |
5190 | } |
5191 | |
5192 | /* Delay value is calculated in bit times convert to KB */ |
5193 | return IXGBE_BT2KB(dv_id); |
5194 | } |
5195 | |
5196 | /* |
5197 | * ixgbe_pbthresh_setup - calculate and setup high low water marks |
5198 | */ |
5199 | static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) |
5200 | { |
5201 | struct ixgbe_hw *hw = &adapter->hw; |
5202 | int num_tc = adapter->hw_tcs; |
5203 | int i; |
5204 | |
5205 | if (!num_tc) |
5206 | num_tc = 1; |
5207 | |
5208 | for (i = 0; i < num_tc; i++) { |
5209 | hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, pb: i); |
5210 | hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, pb: i); |
5211 | |
5212 | /* Low water marks must not be larger than high water marks */ |
5213 | if (hw->fc.low_water[i] > hw->fc.high_water[i]) |
5214 | hw->fc.low_water[i] = 0; |
5215 | } |
5216 | |
5217 | for (; i < MAX_TRAFFIC_CLASS; i++) |
5218 | hw->fc.high_water[i] = 0; |
5219 | } |
5220 | |
5221 | static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) |
5222 | { |
5223 | struct ixgbe_hw *hw = &adapter->hw; |
5224 | int hdrm; |
5225 | u8 tc = adapter->hw_tcs; |
5226 | |
5227 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
5228 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) |
5229 | hdrm = 32 << adapter->fdir_pballoc; |
5230 | else |
5231 | hdrm = 0; |
5232 | |
5233 | hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); |
5234 | ixgbe_pbthresh_setup(adapter); |
5235 | } |
5236 | |
5237 | static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) |
5238 | { |
5239 | struct ixgbe_hw *hw = &adapter->hw; |
5240 | struct hlist_node *node2; |
5241 | struct ixgbe_fdir_filter *filter; |
5242 | u8 queue; |
5243 | |
5244 | spin_lock(lock: &adapter->fdir_perfect_lock); |
5245 | |
5246 | if (!hlist_empty(h: &adapter->fdir_filter_list)) |
5247 | ixgbe_fdir_set_input_mask_82599(hw, input_mask: &adapter->fdir_mask); |
5248 | |
5249 | hlist_for_each_entry_safe(filter, node2, |
5250 | &adapter->fdir_filter_list, fdir_node) { |
5251 | if (filter->action == IXGBE_FDIR_DROP_QUEUE) { |
5252 | queue = IXGBE_FDIR_DROP_QUEUE; |
5253 | } else { |
5254 | u32 ring = ethtool_get_flow_spec_ring(ring_cookie: filter->action); |
5255 | u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie: filter->action); |
5256 | |
5257 | if (!vf && (ring >= adapter->num_rx_queues)) { |
5258 | e_err(drv, "FDIR restore failed without VF, ring: %u\n" , |
5259 | ring); |
5260 | continue; |
5261 | } else if (vf && |
5262 | ((vf > adapter->num_vfs) || |
5263 | ring >= adapter->num_rx_queues_per_pool)) { |
5264 | e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n" , |
5265 | vf, ring); |
5266 | continue; |
5267 | } |
5268 | |
5269 | /* Map the ring onto the absolute queue index */ |
5270 | if (!vf) |
5271 | queue = adapter->rx_ring[ring]->reg_idx; |
5272 | else |
5273 | queue = ((vf - 1) * |
5274 | adapter->num_rx_queues_per_pool) + ring; |
5275 | } |
5276 | |
5277 | ixgbe_fdir_write_perfect_filter_82599(hw, |
5278 | input: &filter->filter, soft_id: filter->sw_idx, queue); |
5279 | } |
5280 | |
5281 | spin_unlock(lock: &adapter->fdir_perfect_lock); |
5282 | } |
5283 | |
5284 | /** |
5285 | * ixgbe_clean_rx_ring - Free Rx Buffers per Queue |
5286 | * @rx_ring: ring to free buffers from |
5287 | **/ |
5288 | static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) |
5289 | { |
5290 | u16 i = rx_ring->next_to_clean; |
5291 | struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; |
5292 | |
5293 | if (rx_ring->xsk_pool) { |
5294 | ixgbe_xsk_clean_rx_ring(rx_ring); |
5295 | goto skip_free; |
5296 | } |
5297 | |
5298 | /* Free all the Rx ring sk_buffs */ |
5299 | while (i != rx_ring->next_to_alloc) { |
5300 | if (rx_buffer->skb) { |
5301 | struct sk_buff *skb = rx_buffer->skb; |
5302 | if (IXGBE_CB(skb)->page_released) |
5303 | dma_unmap_page_attrs(dev: rx_ring->dev, |
5304 | IXGBE_CB(skb)->dma, |
5305 | ixgbe_rx_pg_size(rx_ring), |
5306 | dir: DMA_FROM_DEVICE, |
5307 | IXGBE_RX_DMA_ATTR); |
5308 | dev_kfree_skb(skb); |
5309 | } |
5310 | |
5311 | /* Invalidate cache lines that may have been written to by |
5312 | * device so that we avoid corrupting memory. |
5313 | */ |
5314 | dma_sync_single_range_for_cpu(dev: rx_ring->dev, |
5315 | addr: rx_buffer->dma, |
5316 | offset: rx_buffer->page_offset, |
5317 | size: ixgbe_rx_bufsz(ring: rx_ring), |
5318 | dir: DMA_FROM_DEVICE); |
5319 | |
5320 | /* free resources associated with mapping */ |
5321 | dma_unmap_page_attrs(dev: rx_ring->dev, addr: rx_buffer->dma, |
5322 | ixgbe_rx_pg_size(rx_ring), |
5323 | dir: DMA_FROM_DEVICE, |
5324 | IXGBE_RX_DMA_ATTR); |
5325 | __page_frag_cache_drain(page: rx_buffer->page, |
5326 | count: rx_buffer->pagecnt_bias); |
5327 | |
5328 | i++; |
5329 | rx_buffer++; |
5330 | if (i == rx_ring->count) { |
5331 | i = 0; |
5332 | rx_buffer = rx_ring->rx_buffer_info; |
5333 | } |
5334 | } |
5335 | |
5336 | skip_free: |
5337 | rx_ring->next_to_alloc = 0; |
5338 | rx_ring->next_to_clean = 0; |
5339 | rx_ring->next_to_use = 0; |
5340 | } |
5341 | |
5342 | static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, |
5343 | struct ixgbe_fwd_adapter *accel) |
5344 | { |
5345 | u16 = adapter->ring_feature[RING_F_RSS].indices; |
5346 | int num_tc = netdev_get_num_tc(dev: adapter->netdev); |
5347 | struct net_device *vdev = accel->netdev; |
5348 | int i, baseq, err; |
5349 | |
5350 | baseq = accel->pool * adapter->num_rx_queues_per_pool; |
5351 | netdev_dbg(vdev, "pool %i:%i queues %i:%i\n" , |
5352 | accel->pool, adapter->num_rx_pools, |
5353 | baseq, baseq + adapter->num_rx_queues_per_pool); |
5354 | |
5355 | accel->rx_base_queue = baseq; |
5356 | accel->tx_base_queue = baseq; |
5357 | |
5358 | /* record configuration for macvlan interface in vdev */ |
5359 | for (i = 0; i < num_tc; i++) |
5360 | netdev_bind_sb_channel_queue(dev: adapter->netdev, sb_dev: vdev, |
5361 | tc: i, count: rss_i, offset: baseq + (rss_i * i)); |
5362 | |
5363 | for (i = 0; i < adapter->num_rx_queues_per_pool; i++) |
5364 | adapter->rx_ring[baseq + i]->netdev = vdev; |
5365 | |
5366 | /* Guarantee all rings are updated before we update the |
5367 | * MAC address filter. |
5368 | */ |
5369 | wmb(); |
5370 | |
5371 | /* ixgbe_add_mac_filter will return an index if it succeeds, so we |
5372 | * need to only treat it as an error value if it is negative. |
5373 | */ |
5374 | err = ixgbe_add_mac_filter(adapter, addr: vdev->dev_addr, |
5375 | VMDQ_P(accel->pool)); |
5376 | if (err >= 0) |
5377 | return 0; |
5378 | |
5379 | /* if we cannot add the MAC rule then disable the offload */ |
5380 | macvlan_release_l2fw_offload(dev: vdev); |
5381 | |
5382 | for (i = 0; i < adapter->num_rx_queues_per_pool; i++) |
5383 | adapter->rx_ring[baseq + i]->netdev = NULL; |
5384 | |
5385 | netdev_err(dev: vdev, format: "L2FW offload disabled due to L2 filter error\n" ); |
5386 | |
5387 | /* unbind the queues and drop the subordinate channel config */ |
5388 | netdev_unbind_sb_channel(dev: adapter->netdev, sb_dev: vdev); |
5389 | netdev_set_sb_channel(dev: vdev, channel: 0); |
5390 | |
5391 | clear_bit(nr: accel->pool, addr: adapter->fwd_bitmask); |
5392 | kfree(objp: accel); |
5393 | |
5394 | return err; |
5395 | } |
5396 | |
5397 | static int ixgbe_macvlan_up(struct net_device *vdev, |
5398 | struct netdev_nested_priv *priv) |
5399 | { |
5400 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; |
5401 | struct ixgbe_fwd_adapter *accel; |
5402 | |
5403 | if (!netif_is_macvlan(dev: vdev)) |
5404 | return 0; |
5405 | |
5406 | accel = macvlan_accel_priv(dev: vdev); |
5407 | if (!accel) |
5408 | return 0; |
5409 | |
5410 | ixgbe_fwd_ring_up(adapter, accel); |
5411 | |
5412 | return 0; |
5413 | } |
5414 | |
5415 | static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) |
5416 | { |
5417 | struct netdev_nested_priv priv = { |
5418 | .data = (void *)adapter, |
5419 | }; |
5420 | |
5421 | netdev_walk_all_upper_dev_rcu(dev: adapter->netdev, |
5422 | fn: ixgbe_macvlan_up, priv: &priv); |
5423 | } |
5424 | |
5425 | static void ixgbe_configure(struct ixgbe_adapter *adapter) |
5426 | { |
5427 | struct ixgbe_hw *hw = &adapter->hw; |
5428 | |
5429 | ixgbe_configure_pb(adapter); |
5430 | #ifdef CONFIG_IXGBE_DCB |
5431 | ixgbe_configure_dcb(adapter); |
5432 | #endif |
5433 | /* |
5434 | * We must restore virtualization before VLANs or else |
5435 | * the VLVF registers will not be populated |
5436 | */ |
5437 | ixgbe_configure_virtualization(adapter); |
5438 | |
5439 | ixgbe_set_rx_mode(netdev: adapter->netdev); |
5440 | ixgbe_restore_vlan(adapter); |
5441 | ixgbe_ipsec_restore(adapter); |
5442 | |
5443 | switch (hw->mac.type) { |
5444 | case ixgbe_mac_82599EB: |
5445 | case ixgbe_mac_X540: |
5446 | hw->mac.ops.disable_rx_buff(hw); |
5447 | break; |
5448 | default: |
5449 | break; |
5450 | } |
5451 | |
5452 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
5453 | ixgbe_init_fdir_signature_82599(hw: &adapter->hw, |
5454 | fdirctrl: adapter->fdir_pballoc); |
5455 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { |
5456 | ixgbe_init_fdir_perfect_82599(hw: &adapter->hw, |
5457 | fdirctrl: adapter->fdir_pballoc); |
5458 | ixgbe_fdir_filter_restore(adapter); |
5459 | } |
5460 | |
5461 | switch (hw->mac.type) { |
5462 | case ixgbe_mac_82599EB: |
5463 | case ixgbe_mac_X540: |
5464 | hw->mac.ops.enable_rx_buff(hw); |
5465 | break; |
5466 | default: |
5467 | break; |
5468 | } |
5469 | |
5470 | #ifdef CONFIG_IXGBE_DCA |
5471 | /* configure DCA */ |
5472 | if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) |
5473 | ixgbe_setup_dca(adapter); |
5474 | #endif /* CONFIG_IXGBE_DCA */ |
5475 | |
5476 | #ifdef IXGBE_FCOE |
5477 | /* configure FCoE L2 filters, redirection table, and Rx control */ |
5478 | ixgbe_configure_fcoe(adapter); |
5479 | |
5480 | #endif /* IXGBE_FCOE */ |
5481 | ixgbe_configure_tx(adapter); |
5482 | ixgbe_configure_rx(adapter); |
5483 | ixgbe_configure_dfwd(adapter); |
5484 | } |
5485 | |
5486 | /** |
5487 | * ixgbe_sfp_link_config - set up SFP+ link |
5488 | * @adapter: pointer to private adapter struct |
5489 | **/ |
5490 | static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) |
5491 | { |
5492 | /* |
5493 | * We are assuming the worst case scenario here, and that |
5494 | * is that an SFP was inserted/removed after the reset |
5495 | * but before SFP detection was enabled. As such the best |
5496 | * solution is to just start searching as soon as we start |
5497 | */ |
5498 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
5499 | adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; |
5500 | |
5501 | adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; |
5502 | adapter->sfp_poll_time = 0; |
5503 | } |
5504 | |
5505 | /** |
5506 | * ixgbe_non_sfp_link_config - set up non-SFP+ link |
5507 | * @hw: pointer to private hardware struct |
5508 | * |
5509 | * Returns 0 on success, negative on failure |
5510 | **/ |
5511 | static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) |
5512 | { |
5513 | u32 speed; |
5514 | bool autoneg, link_up = false; |
5515 | int ret = IXGBE_ERR_LINK_SETUP; |
5516 | |
5517 | if (hw->mac.ops.check_link) |
5518 | ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); |
5519 | |
5520 | if (ret) |
5521 | return ret; |
5522 | |
5523 | speed = hw->phy.autoneg_advertised; |
5524 | if (!speed && hw->mac.ops.get_link_capabilities) { |
5525 | ret = hw->mac.ops.get_link_capabilities(hw, &speed, |
5526 | &autoneg); |
5527 | /* remove NBASE-T speeds from default autonegotiation |
5528 | * to accommodate broken network switches in the field |
5529 | * which cannot cope with advertised NBASE-T speeds |
5530 | */ |
5531 | speed &= ~(IXGBE_LINK_SPEED_5GB_FULL | |
5532 | IXGBE_LINK_SPEED_2_5GB_FULL); |
5533 | } |
5534 | |
5535 | if (ret) |
5536 | return ret; |
5537 | |
5538 | if (hw->mac.ops.setup_link) |
5539 | ret = hw->mac.ops.setup_link(hw, speed, link_up); |
5540 | |
5541 | return ret; |
5542 | } |
5543 | |
5544 | /** |
5545 | * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset |
5546 | * @adapter: board private structure |
5547 | * |
5548 | * On a reset we need to clear out the VF stats or accounting gets |
5549 | * messed up because they're not clear on read. |
5550 | **/ |
5551 | static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter) |
5552 | { |
5553 | struct ixgbe_hw *hw = &adapter->hw; |
5554 | int i; |
5555 | |
5556 | for (i = 0; i < adapter->num_vfs; i++) { |
5557 | adapter->vfinfo[i].last_vfstats.gprc = |
5558 | IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i)); |
5559 | adapter->vfinfo[i].saved_rst_vfstats.gprc += |
5560 | adapter->vfinfo[i].vfstats.gprc; |
5561 | adapter->vfinfo[i].vfstats.gprc = 0; |
5562 | adapter->vfinfo[i].last_vfstats.gptc = |
5563 | IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i)); |
5564 | adapter->vfinfo[i].saved_rst_vfstats.gptc += |
5565 | adapter->vfinfo[i].vfstats.gptc; |
5566 | adapter->vfinfo[i].vfstats.gptc = 0; |
5567 | adapter->vfinfo[i].last_vfstats.gorc = |
5568 | IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i)); |
5569 | adapter->vfinfo[i].saved_rst_vfstats.gorc += |
5570 | adapter->vfinfo[i].vfstats.gorc; |
5571 | adapter->vfinfo[i].vfstats.gorc = 0; |
5572 | adapter->vfinfo[i].last_vfstats.gotc = |
5573 | IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i)); |
5574 | adapter->vfinfo[i].saved_rst_vfstats.gotc += |
5575 | adapter->vfinfo[i].vfstats.gotc; |
5576 | adapter->vfinfo[i].vfstats.gotc = 0; |
5577 | adapter->vfinfo[i].last_vfstats.mprc = |
5578 | IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i)); |
5579 | adapter->vfinfo[i].saved_rst_vfstats.mprc += |
5580 | adapter->vfinfo[i].vfstats.mprc; |
5581 | adapter->vfinfo[i].vfstats.mprc = 0; |
5582 | } |
5583 | } |
5584 | |
5585 | static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) |
5586 | { |
5587 | struct ixgbe_hw *hw = &adapter->hw; |
5588 | u32 gpie = 0; |
5589 | |
5590 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
5591 | gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | |
5592 | IXGBE_GPIE_OCD; |
5593 | gpie |= IXGBE_GPIE_EIAME; |
5594 | /* |
5595 | * use EIAM to auto-mask when MSI-X interrupt is asserted |
5596 | * this saves a register write for every interrupt |
5597 | */ |
5598 | switch (hw->mac.type) { |
5599 | case ixgbe_mac_82598EB: |
5600 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); |
5601 | break; |
5602 | case ixgbe_mac_82599EB: |
5603 | case ixgbe_mac_X540: |
5604 | case ixgbe_mac_X550: |
5605 | case ixgbe_mac_X550EM_x: |
5606 | case ixgbe_mac_x550em_a: |
5607 | default: |
5608 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); |
5609 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); |
5610 | break; |
5611 | } |
5612 | } else { |
5613 | /* legacy interrupts, use EIAM to auto-mask when reading EICR, |
5614 | * specifically only auto mask tx and rx interrupts */ |
5615 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); |
5616 | } |
5617 | |
5618 | /* XXX: to interrupt immediately for EICS writes, enable this */ |
5619 | /* gpie |= IXGBE_GPIE_EIMEN; */ |
5620 | |
5621 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
5622 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; |
5623 | |
5624 | switch (adapter->ring_feature[RING_F_VMDQ].mask) { |
5625 | case IXGBE_82599_VMDQ_8Q_MASK: |
5626 | gpie |= IXGBE_GPIE_VTMODE_16; |
5627 | break; |
5628 | case IXGBE_82599_VMDQ_4Q_MASK: |
5629 | gpie |= IXGBE_GPIE_VTMODE_32; |
5630 | break; |
5631 | default: |
5632 | gpie |= IXGBE_GPIE_VTMODE_64; |
5633 | break; |
5634 | } |
5635 | } |
5636 | |
5637 | /* Enable Thermal over heat sensor interrupt */ |
5638 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { |
5639 | switch (adapter->hw.mac.type) { |
5640 | case ixgbe_mac_82599EB: |
5641 | gpie |= IXGBE_SDP0_GPIEN_8259X; |
5642 | break; |
5643 | default: |
5644 | break; |
5645 | } |
5646 | } |
5647 | |
5648 | /* Enable fan failure interrupt */ |
5649 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) |
5650 | gpie |= IXGBE_SDP1_GPIEN(hw); |
5651 | |
5652 | switch (hw->mac.type) { |
5653 | case ixgbe_mac_82599EB: |
5654 | gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; |
5655 | break; |
5656 | case ixgbe_mac_X550EM_x: |
5657 | case ixgbe_mac_x550em_a: |
5658 | gpie |= IXGBE_SDP0_GPIEN_X540; |
5659 | break; |
5660 | default: |
5661 | break; |
5662 | } |
5663 | |
5664 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); |
5665 | } |
5666 | |
5667 | static void ixgbe_up_complete(struct ixgbe_adapter *adapter) |
5668 | { |
5669 | struct ixgbe_hw *hw = &adapter->hw; |
5670 | int err; |
5671 | u32 ctrl_ext; |
5672 | |
5673 | ixgbe_get_hw_control(adapter); |
5674 | ixgbe_setup_gpie(adapter); |
5675 | |
5676 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
5677 | ixgbe_configure_msix(adapter); |
5678 | else |
5679 | ixgbe_configure_msi_and_legacy(adapter); |
5680 | |
5681 | /* enable the optics for 82599 SFP+ fiber */ |
5682 | if (hw->mac.ops.enable_tx_laser) |
5683 | hw->mac.ops.enable_tx_laser(hw); |
5684 | |
5685 | if (hw->phy.ops.set_phy_power) |
5686 | hw->phy.ops.set_phy_power(hw, true); |
5687 | |
5688 | smp_mb__before_atomic(); |
5689 | clear_bit(nr: __IXGBE_DOWN, addr: &adapter->state); |
5690 | ixgbe_napi_enable_all(adapter); |
5691 | |
5692 | if (ixgbe_is_sfp(hw)) { |
5693 | ixgbe_sfp_link_config(adapter); |
5694 | } else { |
5695 | err = ixgbe_non_sfp_link_config(hw); |
5696 | if (err) |
5697 | e_err(probe, "link_config FAILED %d\n" , err); |
5698 | } |
5699 | |
5700 | /* clear any pending interrupts, may auto mask */ |
5701 | IXGBE_READ_REG(hw, IXGBE_EICR); |
5702 | ixgbe_irq_enable(adapter, queues: true, flush: true); |
5703 | |
5704 | /* |
5705 | * If this adapter has a fan, check to see if we had a failure |
5706 | * before we enabled the interrupt. |
5707 | */ |
5708 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { |
5709 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); |
5710 | if (esdp & IXGBE_ESDP_SDP1) |
5711 | e_crit(drv, "Fan has stopped, replace the adapter\n" ); |
5712 | } |
5713 | |
5714 | /* bring the link up in the watchdog, this could race with our first |
5715 | * link up interrupt but shouldn't be a problem */ |
5716 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
5717 | adapter->link_check_timeout = jiffies; |
5718 | mod_timer(timer: &adapter->service_timer, expires: jiffies); |
5719 | |
5720 | ixgbe_clear_vf_stats_counters(adapter); |
5721 | /* Set PF Reset Done bit so PF/VF Mail Ops can work */ |
5722 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); |
5723 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; |
5724 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); |
5725 | |
5726 | /* update setting rx tx for all active vfs */ |
5727 | ixgbe_set_all_vfs(adapter); |
5728 | } |
5729 | |
5730 | void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) |
5731 | { |
5732 | /* put off any impending NetWatchDogTimeout */ |
5733 | netif_trans_update(dev: adapter->netdev); |
5734 | |
5735 | while (test_and_set_bit(nr: __IXGBE_RESETTING, addr: &adapter->state)) |
5736 | usleep_range(min: 1000, max: 2000); |
5737 | if (adapter->hw.phy.type == ixgbe_phy_fw) |
5738 | ixgbe_watchdog_link_is_down(adapter); |
5739 | ixgbe_down(adapter); |
5740 | /* |
5741 | * If SR-IOV enabled then wait a bit before bringing the adapter |
5742 | * back up to give the VFs time to respond to the reset. The |
5743 | * two second wait is based upon the watchdog timer cycle in |
5744 | * the VF driver. |
5745 | */ |
5746 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
5747 | msleep(msecs: 2000); |
5748 | ixgbe_up(adapter); |
5749 | clear_bit(nr: __IXGBE_RESETTING, addr: &adapter->state); |
5750 | } |
5751 | |
5752 | void ixgbe_up(struct ixgbe_adapter *adapter) |
5753 | { |
5754 | /* hardware has been reset, we need to reload some things */ |
5755 | ixgbe_configure(adapter); |
5756 | |
5757 | ixgbe_up_complete(adapter); |
5758 | } |
5759 | |
5760 | static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter) |
5761 | { |
5762 | u16 devctl2; |
5763 | |
5764 | pcie_capability_read_word(dev: adapter->pdev, PCI_EXP_DEVCTL2, val: &devctl2); |
5765 | |
5766 | switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) { |
5767 | case IXGBE_PCIDEVCTRL2_17_34s: |
5768 | case IXGBE_PCIDEVCTRL2_4_8s: |
5769 | /* For now we cap the upper limit on delay to 2 seconds |
5770 | * as we end up going up to 34 seconds of delay in worst |
5771 | * case timeout value. |
5772 | */ |
5773 | case IXGBE_PCIDEVCTRL2_1_2s: |
5774 | return 2000000ul; /* 2.0 s */ |
5775 | case IXGBE_PCIDEVCTRL2_260_520ms: |
5776 | return 520000ul; /* 520 ms */ |
5777 | case IXGBE_PCIDEVCTRL2_65_130ms: |
5778 | return 130000ul; /* 130 ms */ |
5779 | case IXGBE_PCIDEVCTRL2_16_32ms: |
5780 | return 32000ul; /* 32 ms */ |
5781 | case IXGBE_PCIDEVCTRL2_1_2ms: |
5782 | return 2000ul; /* 2 ms */ |
5783 | case IXGBE_PCIDEVCTRL2_50_100us: |
5784 | return 100ul; /* 100 us */ |
5785 | case IXGBE_PCIDEVCTRL2_16_32ms_def: |
5786 | return 32000ul; /* 32 ms */ |
5787 | default: |
5788 | break; |
5789 | } |
5790 | |
5791 | /* We shouldn't need to hit this path, but just in case default as |
5792 | * though completion timeout is not supported and support 32ms. |
5793 | */ |
5794 | return 32000ul; |
5795 | } |
5796 | |
5797 | void ixgbe_disable_rx(struct ixgbe_adapter *adapter) |
5798 | { |
5799 | unsigned long wait_delay, delay_interval; |
5800 | struct ixgbe_hw *hw = &adapter->hw; |
5801 | int i, wait_loop; |
5802 | u32 rxdctl; |
5803 | |
5804 | /* disable receives */ |
5805 | hw->mac.ops.disable_rx(hw); |
5806 | |
5807 | if (ixgbe_removed(addr: hw->hw_addr)) |
5808 | return; |
5809 | |
5810 | /* disable all enabled Rx queues */ |
5811 | for (i = 0; i < adapter->num_rx_queues; i++) { |
5812 | struct ixgbe_ring *ring = adapter->rx_ring[i]; |
5813 | u8 reg_idx = ring->reg_idx; |
5814 | |
5815 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
5816 | rxdctl &= ~IXGBE_RXDCTL_ENABLE; |
5817 | rxdctl |= IXGBE_RXDCTL_SWFLSH; |
5818 | |
5819 | /* write value back with RXDCTL.ENABLE bit cleared */ |
5820 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); |
5821 | } |
5822 | |
5823 | /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ |
5824 | if (hw->mac.type == ixgbe_mac_82598EB && |
5825 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) |
5826 | return; |
5827 | |
5828 | /* Determine our minimum delay interval. We will increase this value |
5829 | * with each subsequent test. This way if the device returns quickly |
5830 | * we should spend as little time as possible waiting, however as |
5831 | * the time increases we will wait for larger periods of time. |
5832 | * |
5833 | * The trick here is that we increase the interval using the |
5834 | * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result |
5835 | * of that wait is that it totals up to 100x whatever interval we |
5836 | * choose. Since our minimum wait is 100us we can just divide the |
5837 | * total timeout by 100 to get our minimum delay interval. |
5838 | */ |
5839 | delay_interval = ixgbe_get_completion_timeout(adapter) / 100; |
5840 | |
5841 | wait_loop = IXGBE_MAX_RX_DESC_POLL; |
5842 | wait_delay = delay_interval; |
5843 | |
5844 | while (wait_loop--) { |
5845 | usleep_range(min: wait_delay, max: wait_delay + 10); |
5846 | wait_delay += delay_interval * 2; |
5847 | rxdctl = 0; |
5848 | |
5849 | /* OR together the reading of all the active RXDCTL registers, |
5850 | * and then test the result. We need the disable to complete |
5851 | * before we start freeing the memory and invalidating the |
5852 | * DMA mappings. |
5853 | */ |
5854 | for (i = 0; i < adapter->num_rx_queues; i++) { |
5855 | struct ixgbe_ring *ring = adapter->rx_ring[i]; |
5856 | u8 reg_idx = ring->reg_idx; |
5857 | |
5858 | rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
5859 | } |
5860 | |
5861 | if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) |
5862 | return; |
5863 | } |
5864 | |
5865 | e_err(drv, |
5866 | "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n" ); |
5867 | } |
5868 | |
5869 | void ixgbe_disable_tx(struct ixgbe_adapter *adapter) |
5870 | { |
5871 | unsigned long wait_delay, delay_interval; |
5872 | struct ixgbe_hw *hw = &adapter->hw; |
5873 | int i, wait_loop; |
5874 | u32 txdctl; |
5875 | |
5876 | if (ixgbe_removed(addr: hw->hw_addr)) |
5877 | return; |
5878 | |
5879 | /* disable all enabled Tx queues */ |
5880 | for (i = 0; i < adapter->num_tx_queues; i++) { |
5881 | struct ixgbe_ring *ring = adapter->tx_ring[i]; |
5882 | u8 reg_idx = ring->reg_idx; |
5883 | |
5884 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); |
5885 | } |
5886 | |
5887 | /* disable all enabled XDP Tx queues */ |
5888 | for (i = 0; i < adapter->num_xdp_queues; i++) { |
5889 | struct ixgbe_ring *ring = adapter->xdp_ring[i]; |
5890 | u8 reg_idx = ring->reg_idx; |
5891 | |
5892 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); |
5893 | } |
5894 | |
5895 | /* If the link is not up there shouldn't be much in the way of |
5896 | * pending transactions. Those that are left will be flushed out |
5897 | * when the reset logic goes through the flush sequence to clean out |
5898 | * the pending Tx transactions. |
5899 | */ |
5900 | if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) |
5901 | goto dma_engine_disable; |
5902 | |
5903 | /* Determine our minimum delay interval. We will increase this value |
5904 | * with each subsequent test. This way if the device returns quickly |
5905 | * we should spend as little time as possible waiting, however as |
5906 | * the time increases we will wait for larger periods of time. |
5907 | * |
5908 | * The trick here is that we increase the interval using the |
5909 | * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result |
5910 | * of that wait is that it totals up to 100x whatever interval we |
5911 | * choose. Since our minimum wait is 100us we can just divide the |
5912 | * total timeout by 100 to get our minimum delay interval. |
5913 | */ |
5914 | delay_interval = ixgbe_get_completion_timeout(adapter) / 100; |
5915 | |
5916 | wait_loop = IXGBE_MAX_RX_DESC_POLL; |
5917 | wait_delay = delay_interval; |
5918 | |
5919 | while (wait_loop--) { |
5920 | usleep_range(min: wait_delay, max: wait_delay + 10); |
5921 | wait_delay += delay_interval * 2; |
5922 | txdctl = 0; |
5923 | |
5924 | /* OR together the reading of all the active TXDCTL registers, |
5925 | * and then test the result. We need the disable to complete |
5926 | * before we start freeing the memory and invalidating the |
5927 | * DMA mappings. |
5928 | */ |
5929 | for (i = 0; i < adapter->num_tx_queues; i++) { |
5930 | struct ixgbe_ring *ring = adapter->tx_ring[i]; |
5931 | u8 reg_idx = ring->reg_idx; |
5932 | |
5933 | txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
5934 | } |
5935 | for (i = 0; i < adapter->num_xdp_queues; i++) { |
5936 | struct ixgbe_ring *ring = adapter->xdp_ring[i]; |
5937 | u8 reg_idx = ring->reg_idx; |
5938 | |
5939 | txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
5940 | } |
5941 | |
5942 | if (!(txdctl & IXGBE_TXDCTL_ENABLE)) |
5943 | goto dma_engine_disable; |
5944 | } |
5945 | |
5946 | e_err(drv, |
5947 | "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n" ); |
5948 | |
5949 | dma_engine_disable: |
5950 | /* Disable the Tx DMA engine on 82599 and later MAC */ |
5951 | switch (hw->mac.type) { |
5952 | case ixgbe_mac_82599EB: |
5953 | case ixgbe_mac_X540: |
5954 | case ixgbe_mac_X550: |
5955 | case ixgbe_mac_X550EM_x: |
5956 | case ixgbe_mac_x550em_a: |
5957 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, |
5958 | (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & |
5959 | ~IXGBE_DMATXCTL_TE)); |
5960 | fallthrough; |
5961 | default: |
5962 | break; |
5963 | } |
5964 | } |
5965 | |
5966 | void ixgbe_reset(struct ixgbe_adapter *adapter) |
5967 | { |
5968 | struct ixgbe_hw *hw = &adapter->hw; |
5969 | struct net_device *netdev = adapter->netdev; |
5970 | int err; |
5971 | |
5972 | if (ixgbe_removed(addr: hw->hw_addr)) |
5973 | return; |
5974 | /* lock SFP init bit to prevent race conditions with the watchdog */ |
5975 | while (test_and_set_bit(nr: __IXGBE_IN_SFP_INIT, addr: &adapter->state)) |
5976 | usleep_range(min: 1000, max: 2000); |
5977 | |
5978 | /* clear all SFP and link config related flags while holding SFP_INIT */ |
5979 | adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | |
5980 | IXGBE_FLAG2_SFP_NEEDS_RESET); |
5981 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; |
5982 | |
5983 | err = hw->mac.ops.init_hw(hw); |
5984 | switch (err) { |
5985 | case 0: |
5986 | case IXGBE_ERR_SFP_NOT_PRESENT: |
5987 | case IXGBE_ERR_SFP_NOT_SUPPORTED: |
5988 | break; |
5989 | case IXGBE_ERR_PRIMARY_REQUESTS_PENDING: |
5990 | e_dev_err("primary disable timed out\n" ); |
5991 | break; |
5992 | case IXGBE_ERR_EEPROM_VERSION: |
5993 | /* We are running on a pre-production device, log a warning */ |
5994 | e_dev_warn("This device is a pre-production adapter/LOM. " |
5995 | "Please be aware there may be issues associated with " |
5996 | "your hardware. If you are experiencing problems " |
5997 | "please contact your Intel or hardware " |
5998 | "representative who provided you with this " |
5999 | "hardware.\n" ); |
6000 | break; |
6001 | default: |
6002 | e_dev_err("Hardware Error: %d\n" , err); |
6003 | } |
6004 | |
6005 | clear_bit(nr: __IXGBE_IN_SFP_INIT, addr: &adapter->state); |
6006 | |
6007 | /* flush entries out of MAC table */ |
6008 | ixgbe_flush_sw_mac_table(adapter); |
6009 | __dev_uc_unsync(dev: netdev, NULL); |
6010 | |
6011 | /* do not flush user set addresses */ |
6012 | ixgbe_mac_set_default_filter(adapter); |
6013 | |
6014 | /* update SAN MAC vmdq pool selection */ |
6015 | if (hw->mac.san_mac_rar_index) |
6016 | hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); |
6017 | |
6018 | if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) |
6019 | ixgbe_ptp_reset(adapter); |
6020 | |
6021 | if (hw->phy.ops.set_phy_power) { |
6022 | if (!netif_running(dev: adapter->netdev) && !adapter->wol) |
6023 | hw->phy.ops.set_phy_power(hw, false); |
6024 | else |
6025 | hw->phy.ops.set_phy_power(hw, true); |
6026 | } |
6027 | } |
6028 | |
6029 | /** |
6030 | * ixgbe_clean_tx_ring - Free Tx Buffers |
6031 | * @tx_ring: ring to be cleaned |
6032 | **/ |
6033 | static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) |
6034 | { |
6035 | u16 i = tx_ring->next_to_clean; |
6036 | struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; |
6037 | |
6038 | if (tx_ring->xsk_pool) { |
6039 | ixgbe_xsk_clean_tx_ring(tx_ring); |
6040 | goto out; |
6041 | } |
6042 | |
6043 | while (i != tx_ring->next_to_use) { |
6044 | union ixgbe_adv_tx_desc *eop_desc, *tx_desc; |
6045 | |
6046 | /* Free all the Tx ring sk_buffs */ |
6047 | if (ring_is_xdp(tx_ring)) |
6048 | xdp_return_frame(xdpf: tx_buffer->xdpf); |
6049 | else |
6050 | dev_kfree_skb_any(skb: tx_buffer->skb); |
6051 | |
6052 | /* unmap skb header data */ |
6053 | dma_unmap_single(tx_ring->dev, |
6054 | dma_unmap_addr(tx_buffer, dma), |
6055 | dma_unmap_len(tx_buffer, len), |
6056 | DMA_TO_DEVICE); |
6057 | |
6058 | /* check for eop_desc to determine the end of the packet */ |
6059 | eop_desc = tx_buffer->next_to_watch; |
6060 | tx_desc = IXGBE_TX_DESC(tx_ring, i); |
6061 | |
6062 | /* unmap remaining buffers */ |
6063 | while (tx_desc != eop_desc) { |
6064 | tx_buffer++; |
6065 | tx_desc++; |
6066 | i++; |
6067 | if (unlikely(i == tx_ring->count)) { |
6068 | i = 0; |
6069 | tx_buffer = tx_ring->tx_buffer_info; |
6070 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); |
6071 | } |
6072 | |
6073 | /* unmap any remaining paged data */ |
6074 | if (dma_unmap_len(tx_buffer, len)) |
6075 | dma_unmap_page(tx_ring->dev, |
6076 | dma_unmap_addr(tx_buffer, dma), |
6077 | dma_unmap_len(tx_buffer, len), |
6078 | DMA_TO_DEVICE); |
6079 | } |
6080 | |
6081 | /* move us one more past the eop_desc for start of next pkt */ |
6082 | tx_buffer++; |
6083 | i++; |
6084 | if (unlikely(i == tx_ring->count)) { |
6085 | i = 0; |
6086 | tx_buffer = tx_ring->tx_buffer_info; |
6087 | } |
6088 | } |
6089 | |
6090 | /* reset BQL for queue */ |
6091 | if (!ring_is_xdp(tx_ring)) |
6092 | netdev_tx_reset_queue(q: txring_txq(ring: tx_ring)); |
6093 | |
6094 | out: |
6095 | /* reset next_to_use and next_to_clean */ |
6096 | tx_ring->next_to_use = 0; |
6097 | tx_ring->next_to_clean = 0; |
6098 | } |
6099 | |
6100 | /** |
6101 | * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues |
6102 | * @adapter: board private structure |
6103 | **/ |
6104 | static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) |
6105 | { |
6106 | int i; |
6107 | |
6108 | for (i = 0; i < adapter->num_rx_queues; i++) |
6109 | ixgbe_clean_rx_ring(rx_ring: adapter->rx_ring[i]); |
6110 | } |
6111 | |
6112 | /** |
6113 | * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues |
6114 | * @adapter: board private structure |
6115 | **/ |
6116 | static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) |
6117 | { |
6118 | int i; |
6119 | |
6120 | for (i = 0; i < adapter->num_tx_queues; i++) |
6121 | ixgbe_clean_tx_ring(tx_ring: adapter->tx_ring[i]); |
6122 | for (i = 0; i < adapter->num_xdp_queues; i++) |
6123 | ixgbe_clean_tx_ring(tx_ring: adapter->xdp_ring[i]); |
6124 | } |
6125 | |
6126 | static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) |
6127 | { |
6128 | struct hlist_node *node2; |
6129 | struct ixgbe_fdir_filter *filter; |
6130 | |
6131 | spin_lock(lock: &adapter->fdir_perfect_lock); |
6132 | |
6133 | hlist_for_each_entry_safe(filter, node2, |
6134 | &adapter->fdir_filter_list, fdir_node) { |
6135 | hlist_del(n: &filter->fdir_node); |
6136 | kfree(objp: filter); |
6137 | } |
6138 | adapter->fdir_filter_count = 0; |
6139 | |
6140 | spin_unlock(lock: &adapter->fdir_perfect_lock); |
6141 | } |
6142 | |
6143 | void ixgbe_down(struct ixgbe_adapter *adapter) |
6144 | { |
6145 | struct net_device *netdev = adapter->netdev; |
6146 | struct ixgbe_hw *hw = &adapter->hw; |
6147 | int i; |
6148 | |
6149 | /* signal that we are down to the interrupt handler */ |
6150 | if (test_and_set_bit(nr: __IXGBE_DOWN, addr: &adapter->state)) |
6151 | return; /* do nothing if already down */ |
6152 | |
6153 | /* Shut off incoming Tx traffic */ |
6154 | netif_tx_stop_all_queues(dev: netdev); |
6155 | |
6156 | /* call carrier off first to avoid false dev_watchdog timeouts */ |
6157 | netif_carrier_off(dev: netdev); |
6158 | netif_tx_disable(dev: netdev); |
6159 | |
6160 | /* Disable Rx */ |
6161 | ixgbe_disable_rx(adapter); |
6162 | |
6163 | /* synchronize_rcu() needed for pending XDP buffers to drain */ |
6164 | if (adapter->xdp_ring[0]) |
6165 | synchronize_rcu(); |
6166 | |
6167 | ixgbe_irq_disable(adapter); |
6168 | |
6169 | ixgbe_napi_disable_all(adapter); |
6170 | |
6171 | clear_bit(nr: __IXGBE_RESET_REQUESTED, addr: &adapter->state); |
6172 | adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; |
6173 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; |
6174 | |
6175 | del_timer_sync(timer: &adapter->service_timer); |
6176 | |
6177 | if (adapter->num_vfs) { |
6178 | /* Clear EITR Select mapping */ |
6179 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); |
6180 | |
6181 | /* Mark all the VFs as inactive */ |
6182 | for (i = 0 ; i < adapter->num_vfs; i++) |
6183 | adapter->vfinfo[i].clear_to_send = false; |
6184 | |
6185 | /* update setting rx tx for all active vfs */ |
6186 | ixgbe_set_all_vfs(adapter); |
6187 | } |
6188 | |
6189 | /* disable transmits in the hardware now that interrupts are off */ |
6190 | ixgbe_disable_tx(adapter); |
6191 | |
6192 | if (!pci_channel_offline(pdev: adapter->pdev)) |
6193 | ixgbe_reset(adapter); |
6194 | |
6195 | /* power down the optics for 82599 SFP+ fiber */ |
6196 | if (hw->mac.ops.disable_tx_laser) |
6197 | hw->mac.ops.disable_tx_laser(hw); |
6198 | |
6199 | ixgbe_clean_all_tx_rings(adapter); |
6200 | ixgbe_clean_all_rx_rings(adapter); |
6201 | } |
6202 | |
6203 | /** |
6204 | * ixgbe_set_eee_capable - helper function to determine EEE support on X550 |
6205 | * @adapter: board private structure |
6206 | */ |
6207 | static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) |
6208 | { |
6209 | struct ixgbe_hw *hw = &adapter->hw; |
6210 | |
6211 | switch (hw->device_id) { |
6212 | case IXGBE_DEV_ID_X550EM_A_1G_T: |
6213 | case IXGBE_DEV_ID_X550EM_A_1G_T_L: |
6214 | if (!hw->phy.eee_speeds_supported) |
6215 | break; |
6216 | adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; |
6217 | if (!hw->phy.eee_speeds_advertised) |
6218 | break; |
6219 | adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; |
6220 | break; |
6221 | default: |
6222 | adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; |
6223 | adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; |
6224 | break; |
6225 | } |
6226 | } |
6227 | |
6228 | /** |
6229 | * ixgbe_tx_timeout - Respond to a Tx Hang |
6230 | * @netdev: network interface device structure |
6231 | * @txqueue: queue number that timed out |
6232 | **/ |
6233 | static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) |
6234 | { |
6235 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
6236 | |
6237 | /* Do the reset outside of interrupt context */ |
6238 | ixgbe_tx_timeout_reset(adapter); |
6239 | } |
6240 | |
6241 | #ifdef CONFIG_IXGBE_DCB |
6242 | static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) |
6243 | { |
6244 | struct ixgbe_hw *hw = &adapter->hw; |
6245 | struct tc_configuration *tc; |
6246 | int j; |
6247 | |
6248 | switch (hw->mac.type) { |
6249 | case ixgbe_mac_82598EB: |
6250 | case ixgbe_mac_82599EB: |
6251 | adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; |
6252 | adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; |
6253 | break; |
6254 | case ixgbe_mac_X540: |
6255 | case ixgbe_mac_X550: |
6256 | adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; |
6257 | adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; |
6258 | break; |
6259 | case ixgbe_mac_X550EM_x: |
6260 | case ixgbe_mac_x550em_a: |
6261 | default: |
6262 | adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; |
6263 | adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; |
6264 | break; |
6265 | } |
6266 | |
6267 | /* Configure DCB traffic classes */ |
6268 | for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { |
6269 | tc = &adapter->dcb_cfg.tc_config[j]; |
6270 | tc->path[DCB_TX_CONFIG].bwg_id = 0; |
6271 | tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); |
6272 | tc->path[DCB_RX_CONFIG].bwg_id = 0; |
6273 | tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); |
6274 | tc->dcb_pfc = pfc_disabled; |
6275 | } |
6276 | |
6277 | /* Initialize default user to priority mapping, UPx->TC0 */ |
6278 | tc = &adapter->dcb_cfg.tc_config[0]; |
6279 | tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; |
6280 | tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; |
6281 | |
6282 | adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; |
6283 | adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; |
6284 | adapter->dcb_cfg.pfc_mode_enable = false; |
6285 | adapter->dcb_set_bitmap = 0x00; |
6286 | if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) |
6287 | adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; |
6288 | memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, |
6289 | sizeof(adapter->temp_dcb_cfg)); |
6290 | } |
6291 | #endif |
6292 | |
6293 | /** |
6294 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) |
6295 | * @adapter: board private structure to initialize |
6296 | * @ii: pointer to ixgbe_info for device |
6297 | * |
6298 | * ixgbe_sw_init initializes the Adapter private data structure. |
6299 | * Fields are initialized based on PCI device information and |
6300 | * OS network device settings (MTU size). |
6301 | **/ |
6302 | static int ixgbe_sw_init(struct ixgbe_adapter *adapter, |
6303 | const struct ixgbe_info *ii) |
6304 | { |
6305 | struct ixgbe_hw *hw = &adapter->hw; |
6306 | struct pci_dev *pdev = adapter->pdev; |
6307 | unsigned int , fdir; |
6308 | u32 fwsm; |
6309 | int i; |
6310 | |
6311 | /* PCI config space info */ |
6312 | |
6313 | hw->vendor_id = pdev->vendor; |
6314 | hw->device_id = pdev->device; |
6315 | hw->revision_id = pdev->revision; |
6316 | hw->subsystem_vendor_id = pdev->subsystem_vendor; |
6317 | hw->subsystem_device_id = pdev->subsystem_device; |
6318 | |
6319 | /* get_invariants needs the device IDs */ |
6320 | ii->get_invariants(hw); |
6321 | |
6322 | /* Set common capability flags and settings */ |
6323 | rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); |
6324 | adapter->ring_feature[RING_F_RSS].limit = rss; |
6325 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; |
6326 | adapter->max_q_vectors = MAX_Q_VECTORS_82599; |
6327 | adapter->atr_sample_rate = 20; |
6328 | fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); |
6329 | adapter->ring_feature[RING_F_FDIR].limit = fdir; |
6330 | adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; |
6331 | adapter->ring_feature[RING_F_VMDQ].limit = 1; |
6332 | #ifdef CONFIG_IXGBE_DCA |
6333 | adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; |
6334 | #endif |
6335 | #ifdef CONFIG_IXGBE_DCB |
6336 | adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; |
6337 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
6338 | #endif |
6339 | #ifdef IXGBE_FCOE |
6340 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; |
6341 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; |
6342 | #ifdef CONFIG_IXGBE_DCB |
6343 | /* Default traffic class to use for FCoE */ |
6344 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; |
6345 | #endif /* CONFIG_IXGBE_DCB */ |
6346 | #endif /* IXGBE_FCOE */ |
6347 | |
6348 | /* initialize static ixgbe jump table entries */ |
6349 | adapter->jump_tables[0] = kzalloc(size: sizeof(*adapter->jump_tables[0]), |
6350 | GFP_KERNEL); |
6351 | if (!adapter->jump_tables[0]) |
6352 | return -ENOMEM; |
6353 | adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; |
6354 | |
6355 | for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) |
6356 | adapter->jump_tables[i] = NULL; |
6357 | |
6358 | adapter->mac_table = kcalloc(n: hw->mac.num_rar_entries, |
6359 | size: sizeof(struct ixgbe_mac_addr), |
6360 | GFP_KERNEL); |
6361 | if (!adapter->mac_table) |
6362 | return -ENOMEM; |
6363 | |
6364 | if (ixgbe_init_rss_key(adapter)) |
6365 | return -ENOMEM; |
6366 | |
6367 | adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL); |
6368 | if (!adapter->af_xdp_zc_qps) |
6369 | return -ENOMEM; |
6370 | |
6371 | /* Set MAC specific capability flags and exceptions */ |
6372 | switch (hw->mac.type) { |
6373 | case ixgbe_mac_82598EB: |
6374 | adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; |
6375 | |
6376 | if (hw->device_id == IXGBE_DEV_ID_82598AT) |
6377 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; |
6378 | |
6379 | adapter->max_q_vectors = MAX_Q_VECTORS_82598; |
6380 | adapter->ring_feature[RING_F_FDIR].limit = 0; |
6381 | adapter->atr_sample_rate = 0; |
6382 | adapter->fdir_pballoc = 0; |
6383 | #ifdef IXGBE_FCOE |
6384 | adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; |
6385 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; |
6386 | #ifdef CONFIG_IXGBE_DCB |
6387 | adapter->fcoe.up = 0; |
6388 | #endif /* IXGBE_DCB */ |
6389 | #endif /* IXGBE_FCOE */ |
6390 | break; |
6391 | case ixgbe_mac_82599EB: |
6392 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) |
6393 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
6394 | break; |
6395 | case ixgbe_mac_X540: |
6396 | fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); |
6397 | if (fwsm & IXGBE_FWSM_TS_ENABLED) |
6398 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
6399 | break; |
6400 | case ixgbe_mac_x550em_a: |
6401 | switch (hw->device_id) { |
6402 | case IXGBE_DEV_ID_X550EM_A_1G_T: |
6403 | case IXGBE_DEV_ID_X550EM_A_1G_T_L: |
6404 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
6405 | break; |
6406 | default: |
6407 | break; |
6408 | } |
6409 | fallthrough; |
6410 | case ixgbe_mac_X550EM_x: |
6411 | #ifdef CONFIG_IXGBE_DCB |
6412 | adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; |
6413 | #endif |
6414 | #ifdef IXGBE_FCOE |
6415 | adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; |
6416 | #ifdef CONFIG_IXGBE_DCB |
6417 | adapter->fcoe.up = 0; |
6418 | #endif /* IXGBE_DCB */ |
6419 | #endif /* IXGBE_FCOE */ |
6420 | fallthrough; |
6421 | case ixgbe_mac_X550: |
6422 | if (hw->mac.type == ixgbe_mac_X550) |
6423 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
6424 | #ifdef CONFIG_IXGBE_DCA |
6425 | adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; |
6426 | #endif |
6427 | break; |
6428 | default: |
6429 | break; |
6430 | } |
6431 | |
6432 | #ifdef IXGBE_FCOE |
6433 | /* FCoE support exists, always init the FCoE lock */ |
6434 | spin_lock_init(&adapter->fcoe.lock); |
6435 | |
6436 | #endif |
6437 | /* n-tuple support exists, always init our spinlock */ |
6438 | spin_lock_init(&adapter->fdir_perfect_lock); |
6439 | |
6440 | /* init spinlock to avoid concurrency of VF resources */ |
6441 | spin_lock_init(&adapter->vfs_lock); |
6442 | |
6443 | #ifdef CONFIG_IXGBE_DCB |
6444 | ixgbe_init_dcb(adapter); |
6445 | #endif |
6446 | ixgbe_init_ipsec_offload(adapter); |
6447 | |
6448 | /* default flow control settings */ |
6449 | hw->fc.requested_mode = ixgbe_fc_full; |
6450 | hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ |
6451 | ixgbe_pbthresh_setup(adapter); |
6452 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; |
6453 | hw->fc.send_xon = true; |
6454 | hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); |
6455 | |
6456 | #ifdef CONFIG_PCI_IOV |
6457 | if (max_vfs > 0) |
6458 | e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n" ); |
6459 | |
6460 | /* assign number of SR-IOV VFs */ |
6461 | if (hw->mac.type != ixgbe_mac_82598EB) { |
6462 | if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { |
6463 | max_vfs = 0; |
6464 | e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n" ); |
6465 | } |
6466 | } |
6467 | #endif /* CONFIG_PCI_IOV */ |
6468 | |
6469 | /* enable itr by default in dynamic mode */ |
6470 | adapter->rx_itr_setting = 1; |
6471 | adapter->tx_itr_setting = 1; |
6472 | |
6473 | /* set default ring sizes */ |
6474 | adapter->tx_ring_count = IXGBE_DEFAULT_TXD; |
6475 | adapter->rx_ring_count = IXGBE_DEFAULT_RXD; |
6476 | |
6477 | /* set default work limits */ |
6478 | adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; |
6479 | |
6480 | /* initialize eeprom parameters */ |
6481 | if (ixgbe_init_eeprom_params_generic(hw)) { |
6482 | e_dev_err("EEPROM initialization failed\n" ); |
6483 | return -EIO; |
6484 | } |
6485 | |
6486 | /* PF holds first pool slot */ |
6487 | set_bit(nr: 0, addr: adapter->fwd_bitmask); |
6488 | set_bit(nr: __IXGBE_DOWN, addr: &adapter->state); |
6489 | |
6490 | /* enable locking for XDP_TX if we have more CPUs than queues */ |
6491 | if (nr_cpu_ids > IXGBE_MAX_XDP_QS) |
6492 | static_branch_enable(&ixgbe_xdp_locking_key); |
6493 | |
6494 | return 0; |
6495 | } |
6496 | |
6497 | /** |
6498 | * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) |
6499 | * @tx_ring: tx descriptor ring (for a specific queue) to setup |
6500 | * |
6501 | * Return 0 on success, negative on failure |
6502 | **/ |
6503 | int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) |
6504 | { |
6505 | struct device *dev = tx_ring->dev; |
6506 | int orig_node = dev_to_node(dev); |
6507 | int ring_node = NUMA_NO_NODE; |
6508 | int size; |
6509 | |
6510 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; |
6511 | |
6512 | if (tx_ring->q_vector) |
6513 | ring_node = tx_ring->q_vector->numa_node; |
6514 | |
6515 | tx_ring->tx_buffer_info = vmalloc_node(size, node: ring_node); |
6516 | if (!tx_ring->tx_buffer_info) |
6517 | tx_ring->tx_buffer_info = vmalloc(size); |
6518 | if (!tx_ring->tx_buffer_info) |
6519 | goto err; |
6520 | |
6521 | /* round up to nearest 4K */ |
6522 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); |
6523 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
6524 | |
6525 | set_dev_node(dev, node: ring_node); |
6526 | tx_ring->desc = dma_alloc_coherent(dev, |
6527 | size: tx_ring->size, |
6528 | dma_handle: &tx_ring->dma, |
6529 | GFP_KERNEL); |
6530 | set_dev_node(dev, node: orig_node); |
6531 | if (!tx_ring->desc) |
6532 | tx_ring->desc = dma_alloc_coherent(dev, size: tx_ring->size, |
6533 | dma_handle: &tx_ring->dma, GFP_KERNEL); |
6534 | if (!tx_ring->desc) |
6535 | goto err; |
6536 | |
6537 | tx_ring->next_to_use = 0; |
6538 | tx_ring->next_to_clean = 0; |
6539 | return 0; |
6540 | |
6541 | err: |
6542 | vfree(addr: tx_ring->tx_buffer_info); |
6543 | tx_ring->tx_buffer_info = NULL; |
6544 | dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n" ); |
6545 | return -ENOMEM; |
6546 | } |
6547 | |
6548 | /** |
6549 | * ixgbe_setup_all_tx_resources - allocate all queues Tx resources |
6550 | * @adapter: board private structure |
6551 | * |
6552 | * If this function returns with an error, then it's possible one or |
6553 | * more of the rings is populated (while the rest are not). It is the |
6554 | * callers duty to clean those orphaned rings. |
6555 | * |
6556 | * Return 0 on success, negative on failure |
6557 | **/ |
6558 | static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) |
6559 | { |
6560 | int i, j = 0, err = 0; |
6561 | |
6562 | for (i = 0; i < adapter->num_tx_queues; i++) { |
6563 | err = ixgbe_setup_tx_resources(tx_ring: adapter->tx_ring[i]); |
6564 | if (!err) |
6565 | continue; |
6566 | |
6567 | e_err(probe, "Allocation for Tx Queue %u failed\n" , i); |
6568 | goto err_setup_tx; |
6569 | } |
6570 | for (j = 0; j < adapter->num_xdp_queues; j++) { |
6571 | err = ixgbe_setup_tx_resources(tx_ring: adapter->xdp_ring[j]); |
6572 | if (!err) |
6573 | continue; |
6574 | |
6575 | e_err(probe, "Allocation for Tx Queue %u failed\n" , j); |
6576 | goto err_setup_tx; |
6577 | } |
6578 | |
6579 | return 0; |
6580 | err_setup_tx: |
6581 | /* rewind the index freeing the rings as we go */ |
6582 | while (j--) |
6583 | ixgbe_free_tx_resources(adapter->xdp_ring[j]); |
6584 | while (i--) |
6585 | ixgbe_free_tx_resources(adapter->tx_ring[i]); |
6586 | return err; |
6587 | } |
6588 | |
6589 | static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring) |
6590 | { |
6591 | struct ixgbe_q_vector *q_vector = rx_ring->q_vector; |
6592 | |
6593 | return q_vector ? q_vector->napi.napi_id : 0; |
6594 | } |
6595 | |
6596 | /** |
6597 | * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) |
6598 | * @adapter: pointer to ixgbe_adapter |
6599 | * @rx_ring: rx descriptor ring (for a specific queue) to setup |
6600 | * |
6601 | * Returns 0 on success, negative on failure |
6602 | **/ |
6603 | int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, |
6604 | struct ixgbe_ring *rx_ring) |
6605 | { |
6606 | struct device *dev = rx_ring->dev; |
6607 | int orig_node = dev_to_node(dev); |
6608 | int ring_node = NUMA_NO_NODE; |
6609 | int size; |
6610 | |
6611 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; |
6612 | |
6613 | if (rx_ring->q_vector) |
6614 | ring_node = rx_ring->q_vector->numa_node; |
6615 | |
6616 | rx_ring->rx_buffer_info = vmalloc_node(size, node: ring_node); |
6617 | if (!rx_ring->rx_buffer_info) |
6618 | rx_ring->rx_buffer_info = vmalloc(size); |
6619 | if (!rx_ring->rx_buffer_info) |
6620 | goto err; |
6621 | |
6622 | /* Round up to nearest 4K */ |
6623 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); |
6624 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
6625 | |
6626 | set_dev_node(dev, node: ring_node); |
6627 | rx_ring->desc = dma_alloc_coherent(dev, |
6628 | size: rx_ring->size, |
6629 | dma_handle: &rx_ring->dma, |
6630 | GFP_KERNEL); |
6631 | set_dev_node(dev, node: orig_node); |
6632 | if (!rx_ring->desc) |
6633 | rx_ring->desc = dma_alloc_coherent(dev, size: rx_ring->size, |
6634 | dma_handle: &rx_ring->dma, GFP_KERNEL); |
6635 | if (!rx_ring->desc) |
6636 | goto err; |
6637 | |
6638 | rx_ring->next_to_clean = 0; |
6639 | rx_ring->next_to_use = 0; |
6640 | |
6641 | /* XDP RX-queue info */ |
6642 | if (xdp_rxq_info_reg(xdp_rxq: &rx_ring->xdp_rxq, dev: adapter->netdev, |
6643 | queue_index: rx_ring->queue_index, napi_id: ixgbe_rx_napi_id(rx_ring)) < 0) |
6644 | goto err; |
6645 | |
6646 | WRITE_ONCE(rx_ring->xdp_prog, adapter->xdp_prog); |
6647 | |
6648 | return 0; |
6649 | err: |
6650 | vfree(addr: rx_ring->rx_buffer_info); |
6651 | rx_ring->rx_buffer_info = NULL; |
6652 | dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n" ); |
6653 | return -ENOMEM; |
6654 | } |
6655 | |
6656 | /** |
6657 | * ixgbe_setup_all_rx_resources - allocate all queues Rx resources |
6658 | * @adapter: board private structure |
6659 | * |
6660 | * If this function returns with an error, then it's possible one or |
6661 | * more of the rings is populated (while the rest are not). It is the |
6662 | * callers duty to clean those orphaned rings. |
6663 | * |
6664 | * Return 0 on success, negative on failure |
6665 | **/ |
6666 | static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) |
6667 | { |
6668 | int i, err = 0; |
6669 | |
6670 | for (i = 0; i < adapter->num_rx_queues; i++) { |
6671 | err = ixgbe_setup_rx_resources(adapter, rx_ring: adapter->rx_ring[i]); |
6672 | if (!err) |
6673 | continue; |
6674 | |
6675 | e_err(probe, "Allocation for Rx Queue %u failed\n" , i); |
6676 | goto err_setup_rx; |
6677 | } |
6678 | |
6679 | #ifdef IXGBE_FCOE |
6680 | err = ixgbe_setup_fcoe_ddp_resources(adapter); |
6681 | if (!err) |
6682 | #endif |
6683 | return 0; |
6684 | err_setup_rx: |
6685 | /* rewind the index freeing the rings as we go */ |
6686 | while (i--) |
6687 | ixgbe_free_rx_resources(adapter->rx_ring[i]); |
6688 | return err; |
6689 | } |
6690 | |
6691 | /** |
6692 | * ixgbe_free_tx_resources - Free Tx Resources per Queue |
6693 | * @tx_ring: Tx descriptor ring for a specific queue |
6694 | * |
6695 | * Free all transmit software resources |
6696 | **/ |
6697 | void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) |
6698 | { |
6699 | ixgbe_clean_tx_ring(tx_ring); |
6700 | |
6701 | vfree(addr: tx_ring->tx_buffer_info); |
6702 | tx_ring->tx_buffer_info = NULL; |
6703 | |
6704 | /* if not set, then don't free */ |
6705 | if (!tx_ring->desc) |
6706 | return; |
6707 | |
6708 | dma_free_coherent(dev: tx_ring->dev, size: tx_ring->size, |
6709 | cpu_addr: tx_ring->desc, dma_handle: tx_ring->dma); |
6710 | |
6711 | tx_ring->desc = NULL; |
6712 | } |
6713 | |
6714 | /** |
6715 | * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues |
6716 | * @adapter: board private structure |
6717 | * |
6718 | * Free all transmit software resources |
6719 | **/ |
6720 | static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) |
6721 | { |
6722 | int i; |
6723 | |
6724 | for (i = 0; i < adapter->num_tx_queues; i++) |
6725 | if (adapter->tx_ring[i]->desc) |
6726 | ixgbe_free_tx_resources(tx_ring: adapter->tx_ring[i]); |
6727 | for (i = 0; i < adapter->num_xdp_queues; i++) |
6728 | if (adapter->xdp_ring[i]->desc) |
6729 | ixgbe_free_tx_resources(tx_ring: adapter->xdp_ring[i]); |
6730 | } |
6731 | |
6732 | /** |
6733 | * ixgbe_free_rx_resources - Free Rx Resources |
6734 | * @rx_ring: ring to clean the resources from |
6735 | * |
6736 | * Free all receive software resources |
6737 | **/ |
6738 | void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) |
6739 | { |
6740 | ixgbe_clean_rx_ring(rx_ring); |
6741 | |
6742 | rx_ring->xdp_prog = NULL; |
6743 | xdp_rxq_info_unreg(xdp_rxq: &rx_ring->xdp_rxq); |
6744 | vfree(addr: rx_ring->rx_buffer_info); |
6745 | rx_ring->rx_buffer_info = NULL; |
6746 | |
6747 | /* if not set, then don't free */ |
6748 | if (!rx_ring->desc) |
6749 | return; |
6750 | |
6751 | dma_free_coherent(dev: rx_ring->dev, size: rx_ring->size, |
6752 | cpu_addr: rx_ring->desc, dma_handle: rx_ring->dma); |
6753 | |
6754 | rx_ring->desc = NULL; |
6755 | } |
6756 | |
6757 | /** |
6758 | * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues |
6759 | * @adapter: board private structure |
6760 | * |
6761 | * Free all receive software resources |
6762 | **/ |
6763 | static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) |
6764 | { |
6765 | int i; |
6766 | |
6767 | #ifdef IXGBE_FCOE |
6768 | ixgbe_free_fcoe_ddp_resources(adapter); |
6769 | |
6770 | #endif |
6771 | for (i = 0; i < adapter->num_rx_queues; i++) |
6772 | if (adapter->rx_ring[i]->desc) |
6773 | ixgbe_free_rx_resources(rx_ring: adapter->rx_ring[i]); |
6774 | } |
6775 | |
6776 | /** |
6777 | * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP |
6778 | * @adapter: device handle, pointer to adapter |
6779 | */ |
6780 | static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter) |
6781 | { |
6782 | if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) |
6783 | return IXGBE_RXBUFFER_2K; |
6784 | else |
6785 | return IXGBE_RXBUFFER_3K; |
6786 | } |
6787 | |
6788 | /** |
6789 | * ixgbe_change_mtu - Change the Maximum Transfer Unit |
6790 | * @netdev: network interface device structure |
6791 | * @new_mtu: new value for maximum frame size |
6792 | * |
6793 | * Returns 0 on success, negative on failure |
6794 | **/ |
6795 | static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) |
6796 | { |
6797 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
6798 | |
6799 | if (ixgbe_enabled_xdp_adapter(adapter)) { |
6800 | int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD; |
6801 | |
6802 | if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) { |
6803 | e_warn(probe, "Requested MTU size is not supported with XDP\n" ); |
6804 | return -EINVAL; |
6805 | } |
6806 | } |
6807 | |
6808 | /* |
6809 | * For 82599EB we cannot allow legacy VFs to enable their receive |
6810 | * paths when MTU greater than 1500 is configured. So display a |
6811 | * warning that legacy VFs will be disabled. |
6812 | */ |
6813 | if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && |
6814 | (adapter->hw.mac.type == ixgbe_mac_82599EB) && |
6815 | (new_mtu > ETH_DATA_LEN)) |
6816 | e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n" ); |
6817 | |
6818 | netdev_dbg(netdev, "changing MTU from %d to %d\n" , |
6819 | netdev->mtu, new_mtu); |
6820 | |
6821 | /* must set new MTU before calling down or up */ |
6822 | netdev->mtu = new_mtu; |
6823 | |
6824 | if (netif_running(dev: netdev)) |
6825 | ixgbe_reinit_locked(adapter); |
6826 | |
6827 | return 0; |
6828 | } |
6829 | |
6830 | /** |
6831 | * ixgbe_open - Called when a network interface is made active |
6832 | * @netdev: network interface device structure |
6833 | * |
6834 | * Returns 0 on success, negative value on failure |
6835 | * |
6836 | * The open entry point is called when a network interface is made |
6837 | * active by the system (IFF_UP). At this point all resources needed |
6838 | * for transmit and receive operations are allocated, the interrupt |
6839 | * handler is registered with the OS, the watchdog timer is started, |
6840 | * and the stack is notified that the interface is ready. |
6841 | **/ |
6842 | int ixgbe_open(struct net_device *netdev) |
6843 | { |
6844 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
6845 | struct ixgbe_hw *hw = &adapter->hw; |
6846 | int err, queues; |
6847 | |
6848 | /* disallow open during test */ |
6849 | if (test_bit(__IXGBE_TESTING, &adapter->state)) |
6850 | return -EBUSY; |
6851 | |
6852 | netif_carrier_off(dev: netdev); |
6853 | |
6854 | /* allocate transmit descriptors */ |
6855 | err = ixgbe_setup_all_tx_resources(adapter); |
6856 | if (err) |
6857 | goto err_setup_tx; |
6858 | |
6859 | /* allocate receive descriptors */ |
6860 | err = ixgbe_setup_all_rx_resources(adapter); |
6861 | if (err) |
6862 | goto err_setup_rx; |
6863 | |
6864 | ixgbe_configure(adapter); |
6865 | |
6866 | err = ixgbe_request_irq(adapter); |
6867 | if (err) |
6868 | goto err_req_irq; |
6869 | |
6870 | /* Notify the stack of the actual queue counts. */ |
6871 | queues = adapter->num_tx_queues; |
6872 | err = netif_set_real_num_tx_queues(dev: netdev, txq: queues); |
6873 | if (err) |
6874 | goto err_set_queues; |
6875 | |
6876 | queues = adapter->num_rx_queues; |
6877 | err = netif_set_real_num_rx_queues(dev: netdev, rxq: queues); |
6878 | if (err) |
6879 | goto err_set_queues; |
6880 | |
6881 | ixgbe_ptp_init(adapter); |
6882 | |
6883 | ixgbe_up_complete(adapter); |
6884 | |
6885 | udp_tunnel_nic_reset_ntf(dev: netdev); |
6886 | |
6887 | return 0; |
6888 | |
6889 | err_set_queues: |
6890 | ixgbe_free_irq(adapter); |
6891 | err_req_irq: |
6892 | ixgbe_free_all_rx_resources(adapter); |
6893 | if (hw->phy.ops.set_phy_power && !adapter->wol) |
6894 | hw->phy.ops.set_phy_power(&adapter->hw, false); |
6895 | err_setup_rx: |
6896 | ixgbe_free_all_tx_resources(adapter); |
6897 | err_setup_tx: |
6898 | ixgbe_reset(adapter); |
6899 | |
6900 | return err; |
6901 | } |
6902 | |
6903 | static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) |
6904 | { |
6905 | ixgbe_ptp_suspend(adapter); |
6906 | |
6907 | if (adapter->hw.phy.ops.enter_lplu) { |
6908 | adapter->hw.phy.reset_disable = true; |
6909 | ixgbe_down(adapter); |
6910 | adapter->hw.phy.ops.enter_lplu(&adapter->hw); |
6911 | adapter->hw.phy.reset_disable = false; |
6912 | } else { |
6913 | ixgbe_down(adapter); |
6914 | } |
6915 | |
6916 | ixgbe_free_irq(adapter); |
6917 | |
6918 | ixgbe_free_all_tx_resources(adapter); |
6919 | ixgbe_free_all_rx_resources(adapter); |
6920 | } |
6921 | |
6922 | /** |
6923 | * ixgbe_close - Disables a network interface |
6924 | * @netdev: network interface device structure |
6925 | * |
6926 | * Returns 0, this is not allowed to fail |
6927 | * |
6928 | * The close entry point is called when an interface is de-activated |
6929 | * by the OS. The hardware is still under the drivers control, but |
6930 | * needs to be disabled. A global MAC reset is issued to stop the |
6931 | * hardware, and all transmit and receive resources are freed. |
6932 | **/ |
6933 | int ixgbe_close(struct net_device *netdev) |
6934 | { |
6935 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
6936 | |
6937 | ixgbe_ptp_stop(adapter); |
6938 | |
6939 | if (netif_device_present(dev: netdev)) |
6940 | ixgbe_close_suspend(adapter); |
6941 | |
6942 | ixgbe_fdir_filter_exit(adapter); |
6943 | |
6944 | ixgbe_release_hw_control(adapter); |
6945 | |
6946 | return 0; |
6947 | } |
6948 | |
6949 | static int __maybe_unused ixgbe_resume(struct device *dev_d) |
6950 | { |
6951 | struct pci_dev *pdev = to_pci_dev(dev_d); |
6952 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
6953 | struct net_device *netdev = adapter->netdev; |
6954 | u32 err; |
6955 | |
6956 | adapter->hw.hw_addr = adapter->io_addr; |
6957 | |
6958 | err = pci_enable_device_mem(dev: pdev); |
6959 | if (err) { |
6960 | e_dev_err("Cannot enable PCI device from suspend\n" ); |
6961 | return err; |
6962 | } |
6963 | smp_mb__before_atomic(); |
6964 | clear_bit(nr: __IXGBE_DISABLED, addr: &adapter->state); |
6965 | pci_set_master(dev: pdev); |
6966 | |
6967 | device_wakeup_disable(dev: dev_d); |
6968 | |
6969 | ixgbe_reset(adapter); |
6970 | |
6971 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
6972 | |
6973 | rtnl_lock(); |
6974 | err = ixgbe_init_interrupt_scheme(adapter); |
6975 | if (!err && netif_running(dev: netdev)) |
6976 | err = ixgbe_open(netdev); |
6977 | |
6978 | |
6979 | if (!err) |
6980 | netif_device_attach(dev: netdev); |
6981 | rtnl_unlock(); |
6982 | |
6983 | return err; |
6984 | } |
6985 | |
6986 | static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) |
6987 | { |
6988 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
6989 | struct net_device *netdev = adapter->netdev; |
6990 | struct ixgbe_hw *hw = &adapter->hw; |
6991 | u32 ctrl; |
6992 | u32 wufc = adapter->wol; |
6993 | |
6994 | rtnl_lock(); |
6995 | netif_device_detach(dev: netdev); |
6996 | |
6997 | if (netif_running(dev: netdev)) |
6998 | ixgbe_close_suspend(adapter); |
6999 | |
7000 | ixgbe_clear_interrupt_scheme(adapter); |
7001 | rtnl_unlock(); |
7002 | |
7003 | if (hw->mac.ops.stop_link_on_d3) |
7004 | hw->mac.ops.stop_link_on_d3(hw); |
7005 | |
7006 | if (wufc) { |
7007 | u32 fctrl; |
7008 | |
7009 | ixgbe_set_rx_mode(netdev); |
7010 | |
7011 | /* enable the optics for 82599 SFP+ fiber as we can WoL */ |
7012 | if (hw->mac.ops.enable_tx_laser) |
7013 | hw->mac.ops.enable_tx_laser(hw); |
7014 | |
7015 | /* enable the reception of multicast packets */ |
7016 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
7017 | fctrl |= IXGBE_FCTRL_MPE; |
7018 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); |
7019 | |
7020 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
7021 | ctrl |= IXGBE_CTRL_GIO_DIS; |
7022 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); |
7023 | |
7024 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); |
7025 | } else { |
7026 | IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); |
7027 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); |
7028 | } |
7029 | |
7030 | switch (hw->mac.type) { |
7031 | case ixgbe_mac_82598EB: |
7032 | pci_wake_from_d3(dev: pdev, enable: false); |
7033 | break; |
7034 | case ixgbe_mac_82599EB: |
7035 | case ixgbe_mac_X540: |
7036 | case ixgbe_mac_X550: |
7037 | case ixgbe_mac_X550EM_x: |
7038 | case ixgbe_mac_x550em_a: |
7039 | pci_wake_from_d3(dev: pdev, enable: !!wufc); |
7040 | break; |
7041 | default: |
7042 | break; |
7043 | } |
7044 | |
7045 | *enable_wake = !!wufc; |
7046 | if (hw->phy.ops.set_phy_power && !*enable_wake) |
7047 | hw->phy.ops.set_phy_power(hw, false); |
7048 | |
7049 | ixgbe_release_hw_control(adapter); |
7050 | |
7051 | if (!test_and_set_bit(nr: __IXGBE_DISABLED, addr: &adapter->state)) |
7052 | pci_disable_device(dev: pdev); |
7053 | |
7054 | return 0; |
7055 | } |
7056 | |
7057 | static int __maybe_unused ixgbe_suspend(struct device *dev_d) |
7058 | { |
7059 | struct pci_dev *pdev = to_pci_dev(dev_d); |
7060 | int retval; |
7061 | bool wake; |
7062 | |
7063 | retval = __ixgbe_shutdown(pdev, enable_wake: &wake); |
7064 | |
7065 | device_set_wakeup_enable(dev: dev_d, enable: wake); |
7066 | |
7067 | return retval; |
7068 | } |
7069 | |
7070 | static void ixgbe_shutdown(struct pci_dev *pdev) |
7071 | { |
7072 | bool wake; |
7073 | |
7074 | __ixgbe_shutdown(pdev, enable_wake: &wake); |
7075 | |
7076 | if (system_state == SYSTEM_POWER_OFF) { |
7077 | pci_wake_from_d3(dev: pdev, enable: wake); |
7078 | pci_set_power_state(dev: pdev, PCI_D3hot); |
7079 | } |
7080 | } |
7081 | |
7082 | /** |
7083 | * ixgbe_update_stats - Update the board statistics counters. |
7084 | * @adapter: board private structure |
7085 | **/ |
7086 | void ixgbe_update_stats(struct ixgbe_adapter *adapter) |
7087 | { |
7088 | struct net_device *netdev = adapter->netdev; |
7089 | struct ixgbe_hw *hw = &adapter->hw; |
7090 | struct ixgbe_hw_stats *hwstats = &adapter->stats; |
7091 | u64 total_mpc = 0; |
7092 | u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; |
7093 | u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; |
7094 | u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; |
7095 | u64 alloc_rx_page = 0; |
7096 | u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; |
7097 | |
7098 | if (test_bit(__IXGBE_DOWN, &adapter->state) || |
7099 | test_bit(__IXGBE_RESETTING, &adapter->state)) |
7100 | return; |
7101 | |
7102 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
7103 | u64 rsc_count = 0; |
7104 | u64 rsc_flush = 0; |
7105 | for (i = 0; i < adapter->num_rx_queues; i++) { |
7106 | rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; |
7107 | rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; |
7108 | } |
7109 | adapter->rsc_total_count = rsc_count; |
7110 | adapter->rsc_total_flush = rsc_flush; |
7111 | } |
7112 | |
7113 | for (i = 0; i < adapter->num_rx_queues; i++) { |
7114 | struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); |
7115 | |
7116 | if (!rx_ring) |
7117 | continue; |
7118 | non_eop_descs += rx_ring->rx_stats.non_eop_descs; |
7119 | alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; |
7120 | alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; |
7121 | alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; |
7122 | hw_csum_rx_error += rx_ring->rx_stats.csum_err; |
7123 | bytes += rx_ring->stats.bytes; |
7124 | packets += rx_ring->stats.packets; |
7125 | } |
7126 | adapter->non_eop_descs = non_eop_descs; |
7127 | adapter->alloc_rx_page = alloc_rx_page; |
7128 | adapter->alloc_rx_page_failed = alloc_rx_page_failed; |
7129 | adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; |
7130 | adapter->hw_csum_rx_error = hw_csum_rx_error; |
7131 | netdev->stats.rx_bytes = bytes; |
7132 | netdev->stats.rx_packets = packets; |
7133 | |
7134 | bytes = 0; |
7135 | packets = 0; |
7136 | /* gather some stats to the adapter struct that are per queue */ |
7137 | for (i = 0; i < adapter->num_tx_queues; i++) { |
7138 | struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); |
7139 | |
7140 | if (!tx_ring) |
7141 | continue; |
7142 | restart_queue += tx_ring->tx_stats.restart_queue; |
7143 | tx_busy += tx_ring->tx_stats.tx_busy; |
7144 | bytes += tx_ring->stats.bytes; |
7145 | packets += tx_ring->stats.packets; |
7146 | } |
7147 | for (i = 0; i < adapter->num_xdp_queues; i++) { |
7148 | struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); |
7149 | |
7150 | if (!xdp_ring) |
7151 | continue; |
7152 | restart_queue += xdp_ring->tx_stats.restart_queue; |
7153 | tx_busy += xdp_ring->tx_stats.tx_busy; |
7154 | bytes += xdp_ring->stats.bytes; |
7155 | packets += xdp_ring->stats.packets; |
7156 | } |
7157 | adapter->restart_queue = restart_queue; |
7158 | adapter->tx_busy = tx_busy; |
7159 | netdev->stats.tx_bytes = bytes; |
7160 | netdev->stats.tx_packets = packets; |
7161 | |
7162 | hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); |
7163 | |
7164 | /* 8 register reads */ |
7165 | for (i = 0; i < 8; i++) { |
7166 | /* for packet buffers not used, the register should read 0 */ |
7167 | mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); |
7168 | missed_rx += mpc; |
7169 | hwstats->mpc[i] += mpc; |
7170 | total_mpc += hwstats->mpc[i]; |
7171 | hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); |
7172 | hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); |
7173 | switch (hw->mac.type) { |
7174 | case ixgbe_mac_82598EB: |
7175 | hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); |
7176 | hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); |
7177 | hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); |
7178 | hwstats->pxonrxc[i] += |
7179 | IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); |
7180 | break; |
7181 | case ixgbe_mac_82599EB: |
7182 | case ixgbe_mac_X540: |
7183 | case ixgbe_mac_X550: |
7184 | case ixgbe_mac_X550EM_x: |
7185 | case ixgbe_mac_x550em_a: |
7186 | hwstats->pxonrxc[i] += |
7187 | IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); |
7188 | break; |
7189 | default: |
7190 | break; |
7191 | } |
7192 | } |
7193 | |
7194 | /*16 register reads */ |
7195 | for (i = 0; i < 16; i++) { |
7196 | hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); |
7197 | hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); |
7198 | if ((hw->mac.type == ixgbe_mac_82599EB) || |
7199 | (hw->mac.type == ixgbe_mac_X540) || |
7200 | (hw->mac.type == ixgbe_mac_X550) || |
7201 | (hw->mac.type == ixgbe_mac_X550EM_x) || |
7202 | (hw->mac.type == ixgbe_mac_x550em_a)) { |
7203 | hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); |
7204 | IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ |
7205 | hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); |
7206 | IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */ |
7207 | } |
7208 | } |
7209 | |
7210 | hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); |
7211 | /* work around hardware counting issue */ |
7212 | hwstats->gprc -= missed_rx; |
7213 | |
7214 | ixgbe_update_xoff_received(adapter); |
7215 | |
7216 | /* 82598 hardware only has a 32 bit counter in the high register */ |
7217 | switch (hw->mac.type) { |
7218 | case ixgbe_mac_82598EB: |
7219 | hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); |
7220 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); |
7221 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); |
7222 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); |
7223 | break; |
7224 | case ixgbe_mac_X540: |
7225 | case ixgbe_mac_X550: |
7226 | case ixgbe_mac_X550EM_x: |
7227 | case ixgbe_mac_x550em_a: |
7228 | /* OS2BMC stats are X540 and later */ |
7229 | hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); |
7230 | hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); |
7231 | hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); |
7232 | hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); |
7233 | fallthrough; |
7234 | case ixgbe_mac_82599EB: |
7235 | for (i = 0; i < 16; i++) |
7236 | adapter->hw_rx_no_dma_resources += |
7237 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); |
7238 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); |
7239 | IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ |
7240 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); |
7241 | IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ |
7242 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); |
7243 | IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ |
7244 | hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); |
7245 | hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); |
7246 | hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); |
7247 | #ifdef IXGBE_FCOE |
7248 | hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); |
7249 | hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); |
7250 | hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); |
7251 | hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); |
7252 | hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); |
7253 | hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); |
7254 | /* Add up per cpu counters for total ddp aloc fail */ |
7255 | if (adapter->fcoe.ddp_pool) { |
7256 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
7257 | struct ixgbe_fcoe_ddp_pool *ddp_pool; |
7258 | unsigned int cpu; |
7259 | u64 noddp = 0, noddp_ext_buff = 0; |
7260 | for_each_possible_cpu(cpu) { |
7261 | ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); |
7262 | noddp += ddp_pool->noddp; |
7263 | noddp_ext_buff += ddp_pool->noddp_ext_buff; |
7264 | } |
7265 | hwstats->fcoe_noddp = noddp; |
7266 | hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; |
7267 | } |
7268 | #endif /* IXGBE_FCOE */ |
7269 | break; |
7270 | default: |
7271 | break; |
7272 | } |
7273 | bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); |
7274 | hwstats->bprc += bprc; |
7275 | hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); |
7276 | if (hw->mac.type == ixgbe_mac_82598EB) |
7277 | hwstats->mprc -= bprc; |
7278 | hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); |
7279 | hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); |
7280 | hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); |
7281 | hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); |
7282 | hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); |
7283 | hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); |
7284 | hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); |
7285 | hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); |
7286 | lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); |
7287 | hwstats->lxontxc += lxon; |
7288 | lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); |
7289 | hwstats->lxofftxc += lxoff; |
7290 | hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); |
7291 | hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); |
7292 | /* |
7293 | * 82598 errata - tx of flow control packets is included in tx counters |
7294 | */ |
7295 | xon_off_tot = lxon + lxoff; |
7296 | hwstats->gptc -= xon_off_tot; |
7297 | hwstats->mptc -= xon_off_tot; |
7298 | hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); |
7299 | hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); |
7300 | hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); |
7301 | hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); |
7302 | hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); |
7303 | hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); |
7304 | hwstats->ptc64 -= xon_off_tot; |
7305 | hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); |
7306 | hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); |
7307 | hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); |
7308 | hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); |
7309 | hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); |
7310 | hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); |
7311 | |
7312 | /* Fill out the OS statistics structure */ |
7313 | netdev->stats.multicast = hwstats->mprc; |
7314 | |
7315 | /* Rx Errors */ |
7316 | netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; |
7317 | netdev->stats.rx_dropped = 0; |
7318 | netdev->stats.rx_length_errors = hwstats->rlec; |
7319 | netdev->stats.rx_crc_errors = hwstats->crcerrs; |
7320 | netdev->stats.rx_missed_errors = total_mpc; |
7321 | |
7322 | /* VF Stats Collection - skip while resetting because these |
7323 | * are not clear on read and otherwise you'll sometimes get |
7324 | * crazy values. |
7325 | */ |
7326 | if (!test_bit(__IXGBE_RESETTING, &adapter->state)) { |
7327 | for (i = 0; i < adapter->num_vfs; i++) { |
7328 | UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), |
7329 | adapter->vfinfo[i].last_vfstats.gprc, |
7330 | adapter->vfinfo[i].vfstats.gprc); |
7331 | UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), |
7332 | adapter->vfinfo[i].last_vfstats.gptc, |
7333 | adapter->vfinfo[i].vfstats.gptc); |
7334 | UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), |
7335 | IXGBE_PVFGORC_MSB(i), |
7336 | adapter->vfinfo[i].last_vfstats.gorc, |
7337 | adapter->vfinfo[i].vfstats.gorc); |
7338 | UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), |
7339 | IXGBE_PVFGOTC_MSB(i), |
7340 | adapter->vfinfo[i].last_vfstats.gotc, |
7341 | adapter->vfinfo[i].vfstats.gotc); |
7342 | UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), |
7343 | adapter->vfinfo[i].last_vfstats.mprc, |
7344 | adapter->vfinfo[i].vfstats.mprc); |
7345 | } |
7346 | } |
7347 | } |
7348 | |
7349 | /** |
7350 | * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table |
7351 | * @adapter: pointer to the device adapter structure |
7352 | **/ |
7353 | static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) |
7354 | { |
7355 | struct ixgbe_hw *hw = &adapter->hw; |
7356 | int i; |
7357 | |
7358 | if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) |
7359 | return; |
7360 | |
7361 | adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; |
7362 | |
7363 | /* if interface is down do nothing */ |
7364 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
7365 | return; |
7366 | |
7367 | /* do nothing if we are not using signature filters */ |
7368 | if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) |
7369 | return; |
7370 | |
7371 | adapter->fdir_overflow++; |
7372 | |
7373 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { |
7374 | for (i = 0; i < adapter->num_tx_queues; i++) |
7375 | set_bit(nr: __IXGBE_TX_FDIR_INIT_DONE, |
7376 | addr: &(adapter->tx_ring[i]->state)); |
7377 | for (i = 0; i < adapter->num_xdp_queues; i++) |
7378 | set_bit(nr: __IXGBE_TX_FDIR_INIT_DONE, |
7379 | addr: &adapter->xdp_ring[i]->state); |
7380 | /* re-enable flow director interrupts */ |
7381 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); |
7382 | } else { |
7383 | e_err(probe, "failed to finish FDIR re-initialization, " |
7384 | "ignored adding FDIR ATR filters\n" ); |
7385 | } |
7386 | } |
7387 | |
7388 | /** |
7389 | * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts |
7390 | * @adapter: pointer to the device adapter structure |
7391 | * |
7392 | * This function serves two purposes. First it strobes the interrupt lines |
7393 | * in order to make certain interrupts are occurring. Secondly it sets the |
7394 | * bits needed to check for TX hangs. As a result we should immediately |
7395 | * determine if a hang has occurred. |
7396 | */ |
7397 | static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) |
7398 | { |
7399 | struct ixgbe_hw *hw = &adapter->hw; |
7400 | u64 eics = 0; |
7401 | int i; |
7402 | |
7403 | /* If we're down, removing or resetting, just bail */ |
7404 | if (test_bit(__IXGBE_DOWN, &adapter->state) || |
7405 | test_bit(__IXGBE_REMOVING, &adapter->state) || |
7406 | test_bit(__IXGBE_RESETTING, &adapter->state)) |
7407 | return; |
7408 | |
7409 | /* Force detection of hung controller */ |
7410 | if (netif_carrier_ok(dev: adapter->netdev)) { |
7411 | for (i = 0; i < adapter->num_tx_queues; i++) |
7412 | set_check_for_tx_hang(adapter->tx_ring[i]); |
7413 | for (i = 0; i < adapter->num_xdp_queues; i++) |
7414 | set_check_for_tx_hang(adapter->xdp_ring[i]); |
7415 | } |
7416 | |
7417 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { |
7418 | /* |
7419 | * for legacy and MSI interrupts don't set any bits |
7420 | * that are enabled for EIAM, because this operation |
7421 | * would set *both* EIMS and EICS for any bit in EIAM |
7422 | */ |
7423 | IXGBE_WRITE_REG(hw, IXGBE_EICS, |
7424 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); |
7425 | } else { |
7426 | /* get one bit for every active tx/rx interrupt vector */ |
7427 | for (i = 0; i < adapter->num_q_vectors; i++) { |
7428 | struct ixgbe_q_vector *qv = adapter->q_vector[i]; |
7429 | if (qv->rx.ring || qv->tx.ring) |
7430 | eics |= BIT_ULL(i); |
7431 | } |
7432 | } |
7433 | |
7434 | /* Cause software interrupt to ensure rings are cleaned */ |
7435 | ixgbe_irq_rearm_queues(adapter, qmask: eics); |
7436 | } |
7437 | |
7438 | /** |
7439 | * ixgbe_watchdog_update_link - update the link status |
7440 | * @adapter: pointer to the device adapter structure |
7441 | **/ |
7442 | static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) |
7443 | { |
7444 | struct ixgbe_hw *hw = &adapter->hw; |
7445 | u32 link_speed = adapter->link_speed; |
7446 | bool link_up = adapter->link_up; |
7447 | bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; |
7448 | |
7449 | if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) |
7450 | return; |
7451 | |
7452 | if (hw->mac.ops.check_link) { |
7453 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); |
7454 | } else { |
7455 | /* always assume link is up, if no check link function */ |
7456 | link_speed = IXGBE_LINK_SPEED_10GB_FULL; |
7457 | link_up = true; |
7458 | } |
7459 | |
7460 | if (adapter->ixgbe_ieee_pfc) |
7461 | pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); |
7462 | |
7463 | if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { |
7464 | hw->mac.ops.fc_enable(hw); |
7465 | ixgbe_set_rx_drop_en(adapter); |
7466 | } |
7467 | |
7468 | if (link_up || |
7469 | time_after(jiffies, (adapter->link_check_timeout + |
7470 | IXGBE_TRY_LINK_TIMEOUT))) { |
7471 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; |
7472 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); |
7473 | IXGBE_WRITE_FLUSH(hw); |
7474 | } |
7475 | |
7476 | adapter->link_up = link_up; |
7477 | adapter->link_speed = link_speed; |
7478 | } |
7479 | |
7480 | static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) |
7481 | { |
7482 | #ifdef CONFIG_IXGBE_DCB |
7483 | struct net_device *netdev = adapter->netdev; |
7484 | struct dcb_app app = { |
7485 | .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, |
7486 | .protocol = 0, |
7487 | }; |
7488 | u8 up = 0; |
7489 | |
7490 | if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) |
7491 | up = dcb_ieee_getapp_mask(netdev, &app); |
7492 | |
7493 | adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; |
7494 | #endif |
7495 | } |
7496 | |
7497 | /** |
7498 | * ixgbe_watchdog_link_is_up - update netif_carrier status and |
7499 | * print link up message |
7500 | * @adapter: pointer to the device adapter structure |
7501 | **/ |
7502 | static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) |
7503 | { |
7504 | struct net_device *netdev = adapter->netdev; |
7505 | struct ixgbe_hw *hw = &adapter->hw; |
7506 | u32 link_speed = adapter->link_speed; |
7507 | const char *speed_str; |
7508 | bool flow_rx, flow_tx; |
7509 | |
7510 | /* only continue if link was previously down */ |
7511 | if (netif_carrier_ok(dev: netdev)) |
7512 | return; |
7513 | |
7514 | adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; |
7515 | |
7516 | switch (hw->mac.type) { |
7517 | case ixgbe_mac_82598EB: { |
7518 | u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
7519 | u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); |
7520 | flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); |
7521 | flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); |
7522 | } |
7523 | break; |
7524 | case ixgbe_mac_X540: |
7525 | case ixgbe_mac_X550: |
7526 | case ixgbe_mac_X550EM_x: |
7527 | case ixgbe_mac_x550em_a: |
7528 | case ixgbe_mac_82599EB: { |
7529 | u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); |
7530 | u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); |
7531 | flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); |
7532 | flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); |
7533 | } |
7534 | break; |
7535 | default: |
7536 | flow_tx = false; |
7537 | flow_rx = false; |
7538 | break; |
7539 | } |
7540 | |
7541 | adapter->last_rx_ptp_check = jiffies; |
7542 | |
7543 | if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) |
7544 | ixgbe_ptp_start_cyclecounter(adapter); |
7545 | |
7546 | switch (link_speed) { |
7547 | case IXGBE_LINK_SPEED_10GB_FULL: |
7548 | speed_str = "10 Gbps" ; |
7549 | break; |
7550 | case IXGBE_LINK_SPEED_5GB_FULL: |
7551 | speed_str = "5 Gbps" ; |
7552 | break; |
7553 | case IXGBE_LINK_SPEED_2_5GB_FULL: |
7554 | speed_str = "2.5 Gbps" ; |
7555 | break; |
7556 | case IXGBE_LINK_SPEED_1GB_FULL: |
7557 | speed_str = "1 Gbps" ; |
7558 | break; |
7559 | case IXGBE_LINK_SPEED_100_FULL: |
7560 | speed_str = "100 Mbps" ; |
7561 | break; |
7562 | case IXGBE_LINK_SPEED_10_FULL: |
7563 | speed_str = "10 Mbps" ; |
7564 | break; |
7565 | default: |
7566 | speed_str = "unknown speed" ; |
7567 | break; |
7568 | } |
7569 | e_info(drv, "NIC Link is Up %s, Flow Control: %s\n" , speed_str, |
7570 | ((flow_rx && flow_tx) ? "RX/TX" : |
7571 | (flow_rx ? "RX" : |
7572 | (flow_tx ? "TX" : "None" )))); |
7573 | |
7574 | netif_carrier_on(dev: netdev); |
7575 | ixgbe_check_vf_rate_limit(adapter); |
7576 | |
7577 | /* enable transmits */ |
7578 | netif_tx_wake_all_queues(dev: adapter->netdev); |
7579 | |
7580 | /* update the default user priority for VFs */ |
7581 | ixgbe_update_default_up(adapter); |
7582 | |
7583 | /* ping all the active vfs to let them know link has changed */ |
7584 | ixgbe_ping_all_vfs(adapter); |
7585 | } |
7586 | |
7587 | /** |
7588 | * ixgbe_watchdog_link_is_down - update netif_carrier status and |
7589 | * print link down message |
7590 | * @adapter: pointer to the adapter structure |
7591 | **/ |
7592 | static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) |
7593 | { |
7594 | struct net_device *netdev = adapter->netdev; |
7595 | struct ixgbe_hw *hw = &adapter->hw; |
7596 | |
7597 | adapter->link_up = false; |
7598 | adapter->link_speed = 0; |
7599 | |
7600 | /* only continue if link was up previously */ |
7601 | if (!netif_carrier_ok(dev: netdev)) |
7602 | return; |
7603 | |
7604 | /* poll for SFP+ cable when link is down */ |
7605 | if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) |
7606 | adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; |
7607 | |
7608 | if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) |
7609 | ixgbe_ptp_start_cyclecounter(adapter); |
7610 | |
7611 | e_info(drv, "NIC Link is Down\n" ); |
7612 | netif_carrier_off(dev: netdev); |
7613 | |
7614 | /* ping all the active vfs to let them know link has changed */ |
7615 | ixgbe_ping_all_vfs(adapter); |
7616 | } |
7617 | |
7618 | static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) |
7619 | { |
7620 | int i; |
7621 | |
7622 | for (i = 0; i < adapter->num_tx_queues; i++) { |
7623 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; |
7624 | |
7625 | if (tx_ring->next_to_use != tx_ring->next_to_clean) |
7626 | return true; |
7627 | } |
7628 | |
7629 | for (i = 0; i < adapter->num_xdp_queues; i++) { |
7630 | struct ixgbe_ring *ring = adapter->xdp_ring[i]; |
7631 | |
7632 | if (ring->next_to_use != ring->next_to_clean) |
7633 | return true; |
7634 | } |
7635 | |
7636 | return false; |
7637 | } |
7638 | |
7639 | static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) |
7640 | { |
7641 | struct ixgbe_hw *hw = &adapter->hw; |
7642 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
7643 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
7644 | |
7645 | int i, j; |
7646 | |
7647 | if (!adapter->num_vfs) |
7648 | return false; |
7649 | |
7650 | /* resetting the PF is only needed for MAC before X550 */ |
7651 | if (hw->mac.type >= ixgbe_mac_X550) |
7652 | return false; |
7653 | |
7654 | for (i = 0; i < adapter->num_vfs; i++) { |
7655 | for (j = 0; j < q_per_pool; j++) { |
7656 | u32 h, t; |
7657 | |
7658 | h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); |
7659 | t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j)); |
7660 | |
7661 | if (h != t) |
7662 | return true; |
7663 | } |
7664 | } |
7665 | |
7666 | return false; |
7667 | } |
7668 | |
7669 | /** |
7670 | * ixgbe_watchdog_flush_tx - flush queues on link down |
7671 | * @adapter: pointer to the device adapter structure |
7672 | **/ |
7673 | static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) |
7674 | { |
7675 | if (!netif_carrier_ok(dev: adapter->netdev)) { |
7676 | if (ixgbe_ring_tx_pending(adapter) || |
7677 | ixgbe_vf_tx_pending(adapter)) { |
7678 | /* We've lost link, so the controller stops DMA, |
7679 | * but we've got queued Tx work that's never going |
7680 | * to get done, so reset controller to flush Tx. |
7681 | * (Do the reset outside of interrupt context). |
7682 | */ |
7683 | e_warn(drv, "initiating reset to clear Tx work after link loss\n" ); |
7684 | set_bit(nr: __IXGBE_RESET_REQUESTED, addr: &adapter->state); |
7685 | } |
7686 | } |
7687 | } |
7688 | |
7689 | #ifdef CONFIG_PCI_IOV |
7690 | static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf) |
7691 | { |
7692 | struct ixgbe_hw *hw = &adapter->hw; |
7693 | |
7694 | if (adapter->hw.mac.type == ixgbe_mac_82599EB && |
7695 | adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) { |
7696 | adapter->vfinfo[vf].primary_abort_count++; |
7697 | if (adapter->vfinfo[vf].primary_abort_count == |
7698 | IXGBE_PRIMARY_ABORT_LIMIT) { |
7699 | ixgbe_set_vf_link_state(adapter, vf, |
7700 | state: IFLA_VF_LINK_STATE_DISABLE); |
7701 | adapter->vfinfo[vf].primary_abort_count = 0; |
7702 | |
7703 | e_info(drv, |
7704 | "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on" , |
7705 | hw->bus.func, vf, |
7706 | adapter->vfinfo[vf].vf_mac_addresses); |
7707 | } |
7708 | } |
7709 | } |
7710 | |
7711 | static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) |
7712 | { |
7713 | struct ixgbe_hw *hw = &adapter->hw; |
7714 | struct pci_dev *pdev = adapter->pdev; |
7715 | unsigned int vf; |
7716 | u32 gpc; |
7717 | |
7718 | if (!(netif_carrier_ok(dev: adapter->netdev))) |
7719 | return; |
7720 | |
7721 | gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); |
7722 | if (gpc) /* If incrementing then no need for the check below */ |
7723 | return; |
7724 | /* Check to see if a bad DMA write target from an errant or |
7725 | * malicious VF has caused a PCIe error. If so then we can |
7726 | * issue a VFLR to the offending VF(s) and then resume without |
7727 | * requesting a full slot reset. |
7728 | */ |
7729 | |
7730 | if (!pdev) |
7731 | return; |
7732 | |
7733 | /* check status reg for all VFs owned by this PF */ |
7734 | for (vf = 0; vf < adapter->num_vfs; ++vf) { |
7735 | struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; |
7736 | u16 status_reg; |
7737 | |
7738 | if (!vfdev) |
7739 | continue; |
7740 | pci_read_config_word(dev: vfdev, PCI_STATUS, val: &status_reg); |
7741 | if (status_reg != IXGBE_FAILED_READ_CFG_WORD && |
7742 | status_reg & PCI_STATUS_REC_MASTER_ABORT) { |
7743 | ixgbe_bad_vf_abort(adapter, vf); |
7744 | pcie_flr(dev: vfdev); |
7745 | } |
7746 | } |
7747 | } |
7748 | |
7749 | static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) |
7750 | { |
7751 | u32 ssvpc; |
7752 | |
7753 | /* Do not perform spoof check for 82598 or if not in IOV mode */ |
7754 | if (adapter->hw.mac.type == ixgbe_mac_82598EB || |
7755 | adapter->num_vfs == 0) |
7756 | return; |
7757 | |
7758 | ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); |
7759 | |
7760 | /* |
7761 | * ssvpc register is cleared on read, if zero then no |
7762 | * spoofed packets in the last interval. |
7763 | */ |
7764 | if (!ssvpc) |
7765 | return; |
7766 | |
7767 | e_warn(drv, "%u Spoofed packets detected\n" , ssvpc); |
7768 | } |
7769 | #else |
7770 | static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) |
7771 | { |
7772 | } |
7773 | |
7774 | static void |
7775 | ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) |
7776 | { |
7777 | } |
7778 | #endif /* CONFIG_PCI_IOV */ |
7779 | |
7780 | |
7781 | /** |
7782 | * ixgbe_watchdog_subtask - check and bring link up |
7783 | * @adapter: pointer to the device adapter structure |
7784 | **/ |
7785 | static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) |
7786 | { |
7787 | /* if interface is down, removing or resetting, do nothing */ |
7788 | if (test_bit(__IXGBE_DOWN, &adapter->state) || |
7789 | test_bit(__IXGBE_REMOVING, &adapter->state) || |
7790 | test_bit(__IXGBE_RESETTING, &adapter->state)) |
7791 | return; |
7792 | |
7793 | ixgbe_watchdog_update_link(adapter); |
7794 | |
7795 | if (adapter->link_up) |
7796 | ixgbe_watchdog_link_is_up(adapter); |
7797 | else |
7798 | ixgbe_watchdog_link_is_down(adapter); |
7799 | |
7800 | ixgbe_check_for_bad_vf(adapter); |
7801 | ixgbe_spoof_check(adapter); |
7802 | ixgbe_update_stats(adapter); |
7803 | |
7804 | ixgbe_watchdog_flush_tx(adapter); |
7805 | } |
7806 | |
7807 | /** |
7808 | * ixgbe_sfp_detection_subtask - poll for SFP+ cable |
7809 | * @adapter: the ixgbe adapter structure |
7810 | **/ |
7811 | static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) |
7812 | { |
7813 | struct ixgbe_hw *hw = &adapter->hw; |
7814 | s32 err; |
7815 | |
7816 | /* not searching for SFP so there is nothing to do here */ |
7817 | if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && |
7818 | !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) |
7819 | return; |
7820 | |
7821 | if (adapter->sfp_poll_time && |
7822 | time_after(adapter->sfp_poll_time, jiffies)) |
7823 | return; /* If not yet time to poll for SFP */ |
7824 | |
7825 | /* someone else is in init, wait until next service event */ |
7826 | if (test_and_set_bit(nr: __IXGBE_IN_SFP_INIT, addr: &adapter->state)) |
7827 | return; |
7828 | |
7829 | adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; |
7830 | |
7831 | err = hw->phy.ops.identify_sfp(hw); |
7832 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) |
7833 | goto sfp_out; |
7834 | |
7835 | if (err == IXGBE_ERR_SFP_NOT_PRESENT) { |
7836 | /* If no cable is present, then we need to reset |
7837 | * the next time we find a good cable. */ |
7838 | adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; |
7839 | } |
7840 | |
7841 | /* exit on error */ |
7842 | if (err) |
7843 | goto sfp_out; |
7844 | |
7845 | /* exit if reset not needed */ |
7846 | if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) |
7847 | goto sfp_out; |
7848 | |
7849 | adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; |
7850 | |
7851 | /* |
7852 | * A module may be identified correctly, but the EEPROM may not have |
7853 | * support for that module. setup_sfp() will fail in that case, so |
7854 | * we should not allow that module to load. |
7855 | */ |
7856 | if (hw->mac.type == ixgbe_mac_82598EB) |
7857 | err = hw->phy.ops.reset(hw); |
7858 | else |
7859 | err = hw->mac.ops.setup_sfp(hw); |
7860 | |
7861 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) |
7862 | goto sfp_out; |
7863 | |
7864 | adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; |
7865 | e_info(probe, "detected SFP+: %d\n" , hw->phy.sfp_type); |
7866 | |
7867 | sfp_out: |
7868 | clear_bit(nr: __IXGBE_IN_SFP_INIT, addr: &adapter->state); |
7869 | |
7870 | if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && |
7871 | (adapter->netdev->reg_state == NETREG_REGISTERED)) { |
7872 | e_dev_err("failed to initialize because an unsupported " |
7873 | "SFP+ module type was detected.\n" ); |
7874 | e_dev_err("Reload the driver after installing a " |
7875 | "supported module.\n" ); |
7876 | unregister_netdev(dev: adapter->netdev); |
7877 | } |
7878 | } |
7879 | |
7880 | /** |
7881 | * ixgbe_sfp_link_config_subtask - set up link SFP after module install |
7882 | * @adapter: the ixgbe adapter structure |
7883 | **/ |
7884 | static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) |
7885 | { |
7886 | struct ixgbe_hw *hw = &adapter->hw; |
7887 | u32 cap_speed; |
7888 | u32 speed; |
7889 | bool autoneg = false; |
7890 | |
7891 | if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) |
7892 | return; |
7893 | |
7894 | /* someone else is in init, wait until next service event */ |
7895 | if (test_and_set_bit(nr: __IXGBE_IN_SFP_INIT, addr: &adapter->state)) |
7896 | return; |
7897 | |
7898 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; |
7899 | |
7900 | hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg); |
7901 | |
7902 | /* advertise highest capable link speed */ |
7903 | if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL)) |
7904 | speed = IXGBE_LINK_SPEED_10GB_FULL; |
7905 | else |
7906 | speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL | |
7907 | IXGBE_LINK_SPEED_1GB_FULL); |
7908 | |
7909 | if (hw->mac.ops.setup_link) |
7910 | hw->mac.ops.setup_link(hw, speed, true); |
7911 | |
7912 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
7913 | adapter->link_check_timeout = jiffies; |
7914 | clear_bit(nr: __IXGBE_IN_SFP_INIT, addr: &adapter->state); |
7915 | } |
7916 | |
7917 | /** |
7918 | * ixgbe_service_timer - Timer Call-back |
7919 | * @t: pointer to timer_list structure |
7920 | **/ |
7921 | static void ixgbe_service_timer(struct timer_list *t) |
7922 | { |
7923 | struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer); |
7924 | unsigned long next_event_offset; |
7925 | |
7926 | /* poll faster when waiting for link */ |
7927 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) |
7928 | next_event_offset = HZ / 10; |
7929 | else |
7930 | next_event_offset = HZ * 2; |
7931 | |
7932 | /* Reset the timer */ |
7933 | mod_timer(timer: &adapter->service_timer, expires: next_event_offset + jiffies); |
7934 | |
7935 | ixgbe_service_event_schedule(adapter); |
7936 | } |
7937 | |
7938 | static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) |
7939 | { |
7940 | struct ixgbe_hw *hw = &adapter->hw; |
7941 | u32 status; |
7942 | |
7943 | if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) |
7944 | return; |
7945 | |
7946 | adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; |
7947 | |
7948 | if (!hw->phy.ops.handle_lasi) |
7949 | return; |
7950 | |
7951 | status = hw->phy.ops.handle_lasi(&adapter->hw); |
7952 | if (status != IXGBE_ERR_OVERTEMP) |
7953 | return; |
7954 | |
7955 | e_crit(drv, "%s\n" , ixgbe_overheat_msg); |
7956 | } |
7957 | |
7958 | static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) |
7959 | { |
7960 | if (!test_and_clear_bit(nr: __IXGBE_RESET_REQUESTED, addr: &adapter->state)) |
7961 | return; |
7962 | |
7963 | rtnl_lock(); |
7964 | /* If we're already down, removing or resetting, just bail */ |
7965 | if (test_bit(__IXGBE_DOWN, &adapter->state) || |
7966 | test_bit(__IXGBE_REMOVING, &adapter->state) || |
7967 | test_bit(__IXGBE_RESETTING, &adapter->state)) { |
7968 | rtnl_unlock(); |
7969 | return; |
7970 | } |
7971 | |
7972 | ixgbe_dump(adapter); |
7973 | netdev_err(dev: adapter->netdev, format: "Reset adapter\n" ); |
7974 | adapter->tx_timeout_count++; |
7975 | |
7976 | ixgbe_reinit_locked(adapter); |
7977 | rtnl_unlock(); |
7978 | } |
7979 | |
7980 | /** |
7981 | * ixgbe_check_fw_error - Check firmware for errors |
7982 | * @adapter: the adapter private structure |
7983 | * |
7984 | * Check firmware errors in register FWSM |
7985 | */ |
7986 | static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) |
7987 | { |
7988 | struct ixgbe_hw *hw = &adapter->hw; |
7989 | u32 fwsm; |
7990 | |
7991 | /* read fwsm.ext_err_ind register and log errors */ |
7992 | fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); |
7993 | |
7994 | if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || |
7995 | !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) |
7996 | e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n" , |
7997 | fwsm); |
7998 | |
7999 | if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { |
8000 | e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n" ); |
8001 | return true; |
8002 | } |
8003 | |
8004 | return false; |
8005 | } |
8006 | |
8007 | /** |
8008 | * ixgbe_service_task - manages and runs subtasks |
8009 | * @work: pointer to work_struct containing our data |
8010 | **/ |
8011 | static void ixgbe_service_task(struct work_struct *work) |
8012 | { |
8013 | struct ixgbe_adapter *adapter = container_of(work, |
8014 | struct ixgbe_adapter, |
8015 | service_task); |
8016 | if (ixgbe_removed(addr: adapter->hw.hw_addr)) { |
8017 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
8018 | rtnl_lock(); |
8019 | ixgbe_down(adapter); |
8020 | rtnl_unlock(); |
8021 | } |
8022 | ixgbe_service_event_complete(adapter); |
8023 | return; |
8024 | } |
8025 | if (ixgbe_check_fw_error(adapter)) { |
8026 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
8027 | unregister_netdev(dev: adapter->netdev); |
8028 | ixgbe_service_event_complete(adapter); |
8029 | return; |
8030 | } |
8031 | ixgbe_reset_subtask(adapter); |
8032 | ixgbe_phy_interrupt_subtask(adapter); |
8033 | ixgbe_sfp_detection_subtask(adapter); |
8034 | ixgbe_sfp_link_config_subtask(adapter); |
8035 | ixgbe_check_overtemp_subtask(adapter); |
8036 | ixgbe_watchdog_subtask(adapter); |
8037 | ixgbe_fdir_reinit_subtask(adapter); |
8038 | ixgbe_check_hang_subtask(adapter); |
8039 | |
8040 | if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { |
8041 | ixgbe_ptp_overflow_check(adapter); |
8042 | if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) |
8043 | ixgbe_ptp_rx_hang(adapter); |
8044 | ixgbe_ptp_tx_hang(adapter); |
8045 | } |
8046 | |
8047 | ixgbe_service_event_complete(adapter); |
8048 | } |
8049 | |
8050 | static int ixgbe_tso(struct ixgbe_ring *tx_ring, |
8051 | struct ixgbe_tx_buffer *first, |
8052 | u8 *hdr_len, |
8053 | struct ixgbe_ipsec_tx_data *itd) |
8054 | { |
8055 | u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; |
8056 | struct sk_buff *skb = first->skb; |
8057 | union { |
8058 | struct iphdr *v4; |
8059 | struct ipv6hdr *v6; |
8060 | unsigned char *hdr; |
8061 | } ip; |
8062 | union { |
8063 | struct tcphdr *tcp; |
8064 | struct udphdr *udp; |
8065 | unsigned char *hdr; |
8066 | } l4; |
8067 | u32 paylen, l4_offset; |
8068 | u32 fceof_saidx = 0; |
8069 | int err; |
8070 | |
8071 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
8072 | return 0; |
8073 | |
8074 | if (!skb_is_gso(skb)) |
8075 | return 0; |
8076 | |
8077 | err = skb_cow_head(skb, headroom: 0); |
8078 | if (err < 0) |
8079 | return err; |
8080 | |
8081 | if (eth_p_mpls(eth_type: first->protocol)) |
8082 | ip.hdr = skb_inner_network_header(skb); |
8083 | else |
8084 | ip.hdr = skb_network_header(skb); |
8085 | l4.hdr = skb_checksum_start(skb); |
8086 | |
8087 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
8088 | type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ? |
8089 | IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP; |
8090 | |
8091 | /* initialize outer IP header fields */ |
8092 | if (ip.v4->version == 4) { |
8093 | unsigned char *csum_start = skb_checksum_start(skb); |
8094 | unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); |
8095 | int len = csum_start - trans_start; |
8096 | |
8097 | /* IP header will have to cancel out any data that |
8098 | * is not a part of the outer IP header, so set to |
8099 | * a reverse csum if needed, else init check to 0. |
8100 | */ |
8101 | ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? |
8102 | csum_fold(sum: csum_partial(buff: trans_start, |
8103 | len, sum: 0)) : 0; |
8104 | type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; |
8105 | |
8106 | ip.v4->tot_len = 0; |
8107 | first->tx_flags |= IXGBE_TX_FLAGS_TSO | |
8108 | IXGBE_TX_FLAGS_CSUM | |
8109 | IXGBE_TX_FLAGS_IPV4; |
8110 | } else { |
8111 | ip.v6->payload_len = 0; |
8112 | first->tx_flags |= IXGBE_TX_FLAGS_TSO | |
8113 | IXGBE_TX_FLAGS_CSUM; |
8114 | } |
8115 | |
8116 | /* determine offset of inner transport header */ |
8117 | l4_offset = l4.hdr - skb->data; |
8118 | |
8119 | /* remove payload length from inner checksum */ |
8120 | paylen = skb->len - l4_offset; |
8121 | |
8122 | if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) { |
8123 | /* compute length of segmentation header */ |
8124 | *hdr_len = (l4.tcp->doff * 4) + l4_offset; |
8125 | csum_replace_by_diff(sum: &l4.tcp->check, |
8126 | diff: (__force __wsum)htonl(paylen)); |
8127 | } else { |
8128 | /* compute length of segmentation header */ |
8129 | *hdr_len = sizeof(*l4.udp) + l4_offset; |
8130 | csum_replace_by_diff(sum: &l4.udp->check, |
8131 | diff: (__force __wsum)htonl(paylen)); |
8132 | } |
8133 | |
8134 | /* update gso size and bytecount with header size */ |
8135 | first->gso_segs = skb_shinfo(skb)->gso_segs; |
8136 | first->bytecount += (first->gso_segs - 1) * *hdr_len; |
8137 | |
8138 | /* mss_l4len_id: use 0 as index for TSO */ |
8139 | mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; |
8140 | mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; |
8141 | |
8142 | fceof_saidx |= itd->sa_idx; |
8143 | type_tucmd |= itd->flags | itd->trailer_len; |
8144 | |
8145 | /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ |
8146 | vlan_macip_lens = l4.hdr - ip.hdr; |
8147 | vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; |
8148 | vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; |
8149 | |
8150 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, |
8151 | mss_l4len_idx); |
8152 | |
8153 | return 1; |
8154 | } |
8155 | |
8156 | static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, |
8157 | struct ixgbe_tx_buffer *first, |
8158 | struct ixgbe_ipsec_tx_data *itd) |
8159 | { |
8160 | struct sk_buff *skb = first->skb; |
8161 | u32 vlan_macip_lens = 0; |
8162 | u32 fceof_saidx = 0; |
8163 | u32 type_tucmd = 0; |
8164 | |
8165 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
8166 | csum_failed: |
8167 | if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | |
8168 | IXGBE_TX_FLAGS_CC))) |
8169 | return; |
8170 | goto no_csum; |
8171 | } |
8172 | |
8173 | switch (skb->csum_offset) { |
8174 | case offsetof(struct tcphdr, check): |
8175 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; |
8176 | fallthrough; |
8177 | case offsetof(struct udphdr, check): |
8178 | break; |
8179 | case offsetof(struct sctphdr, checksum): |
8180 | /* validate that this is actually an SCTP request */ |
8181 | if (skb_csum_is_sctp(skb)) { |
8182 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; |
8183 | break; |
8184 | } |
8185 | fallthrough; |
8186 | default: |
8187 | skb_checksum_help(skb); |
8188 | goto csum_failed; |
8189 | } |
8190 | |
8191 | /* update TX checksum flag */ |
8192 | first->tx_flags |= IXGBE_TX_FLAGS_CSUM; |
8193 | vlan_macip_lens = skb_checksum_start_offset(skb) - |
8194 | skb_network_offset(skb); |
8195 | no_csum: |
8196 | /* vlan_macip_lens: MACLEN, VLAN tag */ |
8197 | vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; |
8198 | vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; |
8199 | |
8200 | fceof_saidx |= itd->sa_idx; |
8201 | type_tucmd |= itd->flags | itd->trailer_len; |
8202 | |
8203 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0); |
8204 | } |
8205 | |
8206 | #define IXGBE_SET_FLAG(_input, _flag, _result) \ |
8207 | ((_flag <= _result) ? \ |
8208 | ((u32)(_input & _flag) * (_result / _flag)) : \ |
8209 | ((u32)(_input & _flag) / (_flag / _result))) |
8210 | |
8211 | static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) |
8212 | { |
8213 | /* set type for advanced descriptor with frame checksum insertion */ |
8214 | u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | |
8215 | IXGBE_ADVTXD_DCMD_DEXT | |
8216 | IXGBE_ADVTXD_DCMD_IFCS; |
8217 | |
8218 | /* set HW vlan bit if vlan is present */ |
8219 | cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, |
8220 | IXGBE_ADVTXD_DCMD_VLE); |
8221 | |
8222 | /* set segmentation enable bits for TSO/FSO */ |
8223 | cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, |
8224 | IXGBE_ADVTXD_DCMD_TSE); |
8225 | |
8226 | /* set timestamp bit if present */ |
8227 | cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, |
8228 | IXGBE_ADVTXD_MAC_TSTAMP); |
8229 | |
8230 | /* insert frame checksum */ |
8231 | cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); |
8232 | |
8233 | return cmd_type; |
8234 | } |
8235 | |
8236 | static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, |
8237 | u32 tx_flags, unsigned int paylen) |
8238 | { |
8239 | u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; |
8240 | |
8241 | /* enable L4 checksum for TSO and TX checksum offload */ |
8242 | olinfo_status |= IXGBE_SET_FLAG(tx_flags, |
8243 | IXGBE_TX_FLAGS_CSUM, |
8244 | IXGBE_ADVTXD_POPTS_TXSM); |
8245 | |
8246 | /* enable IPv4 checksum for TSO */ |
8247 | olinfo_status |= IXGBE_SET_FLAG(tx_flags, |
8248 | IXGBE_TX_FLAGS_IPV4, |
8249 | IXGBE_ADVTXD_POPTS_IXSM); |
8250 | |
8251 | /* enable IPsec */ |
8252 | olinfo_status |= IXGBE_SET_FLAG(tx_flags, |
8253 | IXGBE_TX_FLAGS_IPSEC, |
8254 | IXGBE_ADVTXD_POPTS_IPSEC); |
8255 | |
8256 | /* |
8257 | * Check Context must be set if Tx switch is enabled, which it |
8258 | * always is for case where virtual functions are running |
8259 | */ |
8260 | olinfo_status |= IXGBE_SET_FLAG(tx_flags, |
8261 | IXGBE_TX_FLAGS_CC, |
8262 | IXGBE_ADVTXD_CC); |
8263 | |
8264 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
8265 | } |
8266 | |
8267 | static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) |
8268 | { |
8269 | if (!netif_subqueue_try_stop(tx_ring->netdev, tx_ring->queue_index, |
8270 | ixgbe_desc_unused(tx_ring), size)) |
8271 | return -EBUSY; |
8272 | |
8273 | ++tx_ring->tx_stats.restart_queue; |
8274 | return 0; |
8275 | } |
8276 | |
8277 | static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) |
8278 | { |
8279 | if (likely(ixgbe_desc_unused(tx_ring) >= size)) |
8280 | return 0; |
8281 | |
8282 | return __ixgbe_maybe_stop_tx(tx_ring, size); |
8283 | } |
8284 | |
8285 | static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, |
8286 | struct ixgbe_tx_buffer *first, |
8287 | const u8 hdr_len) |
8288 | { |
8289 | struct sk_buff *skb = first->skb; |
8290 | struct ixgbe_tx_buffer *tx_buffer; |
8291 | union ixgbe_adv_tx_desc *tx_desc; |
8292 | skb_frag_t *frag; |
8293 | dma_addr_t dma; |
8294 | unsigned int data_len, size; |
8295 | u32 tx_flags = first->tx_flags; |
8296 | u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); |
8297 | u16 i = tx_ring->next_to_use; |
8298 | |
8299 | tx_desc = IXGBE_TX_DESC(tx_ring, i); |
8300 | |
8301 | ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen: skb->len - hdr_len); |
8302 | |
8303 | size = skb_headlen(skb); |
8304 | data_len = skb->data_len; |
8305 | |
8306 | #ifdef IXGBE_FCOE |
8307 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { |
8308 | if (data_len < sizeof(struct fcoe_crc_eof)) { |
8309 | size -= sizeof(struct fcoe_crc_eof) - data_len; |
8310 | data_len = 0; |
8311 | } else { |
8312 | data_len -= sizeof(struct fcoe_crc_eof); |
8313 | } |
8314 | } |
8315 | |
8316 | #endif |
8317 | dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); |
8318 | |
8319 | tx_buffer = first; |
8320 | |
8321 | for (frag = &skb_shinfo(skb)->frags[0];; frag++) { |
8322 | if (dma_mapping_error(dev: tx_ring->dev, dma_addr: dma)) |
8323 | goto dma_error; |
8324 | |
8325 | /* record length, and DMA address */ |
8326 | dma_unmap_len_set(tx_buffer, len, size); |
8327 | dma_unmap_addr_set(tx_buffer, dma, dma); |
8328 | |
8329 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
8330 | |
8331 | while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { |
8332 | tx_desc->read.cmd_type_len = |
8333 | cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); |
8334 | |
8335 | i++; |
8336 | tx_desc++; |
8337 | if (i == tx_ring->count) { |
8338 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); |
8339 | i = 0; |
8340 | } |
8341 | tx_desc->read.olinfo_status = 0; |
8342 | |
8343 | dma += IXGBE_MAX_DATA_PER_TXD; |
8344 | size -= IXGBE_MAX_DATA_PER_TXD; |
8345 | |
8346 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
8347 | } |
8348 | |
8349 | if (likely(!data_len)) |
8350 | break; |
8351 | |
8352 | tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); |
8353 | |
8354 | i++; |
8355 | tx_desc++; |
8356 | if (i == tx_ring->count) { |
8357 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); |
8358 | i = 0; |
8359 | } |
8360 | tx_desc->read.olinfo_status = 0; |
8361 | |
8362 | #ifdef IXGBE_FCOE |
8363 | size = min_t(unsigned int, data_len, skb_frag_size(frag)); |
8364 | #else |
8365 | size = skb_frag_size(frag); |
8366 | #endif |
8367 | data_len -= size; |
8368 | |
8369 | dma = skb_frag_dma_map(dev: tx_ring->dev, frag, offset: 0, size, |
8370 | dir: DMA_TO_DEVICE); |
8371 | |
8372 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
8373 | } |
8374 | |
8375 | /* write last descriptor with RS and EOP bits */ |
8376 | cmd_type |= size | IXGBE_TXD_CMD; |
8377 | tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); |
8378 | |
8379 | netdev_tx_sent_queue(dev_queue: txring_txq(ring: tx_ring), bytes: first->bytecount); |
8380 | |
8381 | /* set the timestamp */ |
8382 | first->time_stamp = jiffies; |
8383 | |
8384 | skb_tx_timestamp(skb); |
8385 | |
8386 | /* |
8387 | * Force memory writes to complete before letting h/w know there |
8388 | * are new descriptors to fetch. (Only applicable for weak-ordered |
8389 | * memory model archs, such as IA-64). |
8390 | * |
8391 | * We also need this memory barrier to make certain all of the |
8392 | * status bits have been updated before next_to_watch is written. |
8393 | */ |
8394 | wmb(); |
8395 | |
8396 | /* set next_to_watch value indicating a packet is present */ |
8397 | first->next_to_watch = tx_desc; |
8398 | |
8399 | i++; |
8400 | if (i == tx_ring->count) |
8401 | i = 0; |
8402 | |
8403 | tx_ring->next_to_use = i; |
8404 | |
8405 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); |
8406 | |
8407 | if (netif_xmit_stopped(dev_queue: txring_txq(ring: tx_ring)) || !netdev_xmit_more()) { |
8408 | writel(val: i, addr: tx_ring->tail); |
8409 | } |
8410 | |
8411 | return 0; |
8412 | dma_error: |
8413 | dev_err(tx_ring->dev, "TX DMA map failed\n" ); |
8414 | |
8415 | /* clear dma mappings for failed tx_buffer_info map */ |
8416 | for (;;) { |
8417 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
8418 | if (dma_unmap_len(tx_buffer, len)) |
8419 | dma_unmap_page(tx_ring->dev, |
8420 | dma_unmap_addr(tx_buffer, dma), |
8421 | dma_unmap_len(tx_buffer, len), |
8422 | DMA_TO_DEVICE); |
8423 | dma_unmap_len_set(tx_buffer, len, 0); |
8424 | if (tx_buffer == first) |
8425 | break; |
8426 | if (i == 0) |
8427 | i += tx_ring->count; |
8428 | i--; |
8429 | } |
8430 | |
8431 | dev_kfree_skb_any(skb: first->skb); |
8432 | first->skb = NULL; |
8433 | |
8434 | tx_ring->next_to_use = i; |
8435 | |
8436 | return -1; |
8437 | } |
8438 | |
8439 | static void ixgbe_atr(struct ixgbe_ring *ring, |
8440 | struct ixgbe_tx_buffer *first) |
8441 | { |
8442 | struct ixgbe_q_vector *q_vector = ring->q_vector; |
8443 | union ixgbe_atr_hash_dword input = { .dword = 0 }; |
8444 | union ixgbe_atr_hash_dword common = { .dword = 0 }; |
8445 | union { |
8446 | unsigned char *network; |
8447 | struct iphdr *ipv4; |
8448 | struct ipv6hdr *ipv6; |
8449 | } hdr; |
8450 | struct tcphdr *th; |
8451 | unsigned int hlen; |
8452 | struct sk_buff *skb; |
8453 | __be16 vlan_id; |
8454 | int l4_proto; |
8455 | |
8456 | /* if ring doesn't have a interrupt vector, cannot perform ATR */ |
8457 | if (!q_vector) |
8458 | return; |
8459 | |
8460 | /* do nothing if sampling is disabled */ |
8461 | if (!ring->atr_sample_rate) |
8462 | return; |
8463 | |
8464 | ring->atr_count++; |
8465 | |
8466 | /* currently only IPv4/IPv6 with TCP is supported */ |
8467 | if ((first->protocol != htons(ETH_P_IP)) && |
8468 | (first->protocol != htons(ETH_P_IPV6))) |
8469 | return; |
8470 | |
8471 | /* snag network header to get L4 type and address */ |
8472 | skb = first->skb; |
8473 | hdr.network = skb_network_header(skb); |
8474 | if (unlikely(hdr.network <= skb->data)) |
8475 | return; |
8476 | if (skb->encapsulation && |
8477 | first->protocol == htons(ETH_P_IP) && |
8478 | hdr.ipv4->protocol == IPPROTO_UDP) { |
8479 | struct ixgbe_adapter *adapter = q_vector->adapter; |
8480 | |
8481 | if (unlikely(skb_tail_pointer(skb) < hdr.network + |
8482 | vxlan_headroom(0))) |
8483 | return; |
8484 | |
8485 | /* verify the port is recognized as VXLAN */ |
8486 | if (adapter->vxlan_port && |
8487 | udp_hdr(skb)->dest == adapter->vxlan_port) |
8488 | hdr.network = skb_inner_network_header(skb); |
8489 | |
8490 | if (adapter->geneve_port && |
8491 | udp_hdr(skb)->dest == adapter->geneve_port) |
8492 | hdr.network = skb_inner_network_header(skb); |
8493 | } |
8494 | |
8495 | /* Make sure we have at least [minimum IPv4 header + TCP] |
8496 | * or [IPv6 header] bytes |
8497 | */ |
8498 | if (unlikely(skb_tail_pointer(skb) < hdr.network + 40)) |
8499 | return; |
8500 | |
8501 | /* Currently only IPv4/IPv6 with TCP is supported */ |
8502 | switch (hdr.ipv4->version) { |
8503 | case IPVERSION: |
8504 | /* access ihl as u8 to avoid unaligned access on ia64 */ |
8505 | hlen = (hdr.network[0] & 0x0F) << 2; |
8506 | l4_proto = hdr.ipv4->protocol; |
8507 | break; |
8508 | case 6: |
8509 | hlen = hdr.network - skb->data; |
8510 | l4_proto = ipv6_find_hdr(skb, offset: &hlen, IPPROTO_TCP, NULL, NULL); |
8511 | hlen -= hdr.network - skb->data; |
8512 | break; |
8513 | default: |
8514 | return; |
8515 | } |
8516 | |
8517 | if (l4_proto != IPPROTO_TCP) |
8518 | return; |
8519 | |
8520 | if (unlikely(skb_tail_pointer(skb) < hdr.network + |
8521 | hlen + sizeof(struct tcphdr))) |
8522 | return; |
8523 | |
8524 | th = (struct tcphdr *)(hdr.network + hlen); |
8525 | |
8526 | /* skip this packet since the socket is closing */ |
8527 | if (th->fin) |
8528 | return; |
8529 | |
8530 | /* sample on all syn packets or once every atr sample count */ |
8531 | if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) |
8532 | return; |
8533 | |
8534 | /* reset sample count */ |
8535 | ring->atr_count = 0; |
8536 | |
8537 | vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); |
8538 | |
8539 | /* |
8540 | * src and dst are inverted, think how the receiver sees them |
8541 | * |
8542 | * The input is broken into two sections, a non-compressed section |
8543 | * containing vm_pool, vlan_id, and flow_type. The rest of the data |
8544 | * is XORed together and stored in the compressed dword. |
8545 | */ |
8546 | input.formatted.vlan_id = vlan_id; |
8547 | |
8548 | /* |
8549 | * since src port and flex bytes occupy the same word XOR them together |
8550 | * and write the value to source port portion of compressed dword |
8551 | */ |
8552 | if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) |
8553 | common.port.src ^= th->dest ^ htons(ETH_P_8021Q); |
8554 | else |
8555 | common.port.src ^= th->dest ^ first->protocol; |
8556 | common.port.dst ^= th->source; |
8557 | |
8558 | switch (hdr.ipv4->version) { |
8559 | case IPVERSION: |
8560 | input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; |
8561 | common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; |
8562 | break; |
8563 | case 6: |
8564 | input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; |
8565 | common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ |
8566 | hdr.ipv6->saddr.s6_addr32[1] ^ |
8567 | hdr.ipv6->saddr.s6_addr32[2] ^ |
8568 | hdr.ipv6->saddr.s6_addr32[3] ^ |
8569 | hdr.ipv6->daddr.s6_addr32[0] ^ |
8570 | hdr.ipv6->daddr.s6_addr32[1] ^ |
8571 | hdr.ipv6->daddr.s6_addr32[2] ^ |
8572 | hdr.ipv6->daddr.s6_addr32[3]; |
8573 | break; |
8574 | default: |
8575 | break; |
8576 | } |
8577 | |
8578 | if (hdr.network != skb_network_header(skb)) |
8579 | input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; |
8580 | |
8581 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ |
8582 | ixgbe_fdir_add_signature_filter_82599(hw: &q_vector->adapter->hw, |
8583 | input, common, queue: ring->queue_index); |
8584 | } |
8585 | |
8586 | #ifdef IXGBE_FCOE |
8587 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, |
8588 | struct net_device *sb_dev) |
8589 | { |
8590 | struct ixgbe_adapter *adapter; |
8591 | struct ixgbe_ring_feature *f; |
8592 | int txq; |
8593 | |
8594 | if (sb_dev) { |
8595 | u8 tc = netdev_get_prio_tc_map(dev, prio: skb->priority); |
8596 | struct net_device *vdev = sb_dev; |
8597 | |
8598 | txq = vdev->tc_to_txq[tc].offset; |
8599 | txq += reciprocal_scale(val: skb_get_hash(skb), |
8600 | ep_ro: vdev->tc_to_txq[tc].count); |
8601 | |
8602 | return txq; |
8603 | } |
8604 | |
8605 | /* |
8606 | * only execute the code below if protocol is FCoE |
8607 | * or FIP and we have FCoE enabled on the adapter |
8608 | */ |
8609 | switch (vlan_get_protocol(skb)) { |
8610 | case htons(ETH_P_FCOE): |
8611 | case htons(ETH_P_FIP): |
8612 | adapter = netdev_priv(dev); |
8613 | |
8614 | if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) |
8615 | break; |
8616 | fallthrough; |
8617 | default: |
8618 | return netdev_pick_tx(dev, skb, sb_dev); |
8619 | } |
8620 | |
8621 | f = &adapter->ring_feature[RING_F_FCOE]; |
8622 | |
8623 | txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : |
8624 | smp_processor_id(); |
8625 | |
8626 | while (txq >= f->indices) |
8627 | txq -= f->indices; |
8628 | |
8629 | return txq + f->offset; |
8630 | } |
8631 | |
8632 | #endif |
8633 | int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, |
8634 | struct xdp_frame *xdpf) |
8635 | { |
8636 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
8637 | u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; |
8638 | u16 i = 0, index = ring->next_to_use; |
8639 | struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index]; |
8640 | struct ixgbe_tx_buffer *tx_buff = tx_head; |
8641 | union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index); |
8642 | u32 cmd_type, len = xdpf->len; |
8643 | void *data = xdpf->data; |
8644 | |
8645 | if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags)) |
8646 | return IXGBE_XDP_CONSUMED; |
8647 | |
8648 | tx_head->bytecount = xdp_get_frame_len(xdpf); |
8649 | tx_head->gso_segs = 1; |
8650 | tx_head->xdpf = xdpf; |
8651 | |
8652 | tx_desc->read.olinfo_status = |
8653 | cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT); |
8654 | |
8655 | for (;;) { |
8656 | dma_addr_t dma; |
8657 | |
8658 | dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); |
8659 | if (dma_mapping_error(dev: ring->dev, dma_addr: dma)) |
8660 | goto unmap; |
8661 | |
8662 | dma_unmap_len_set(tx_buff, len, len); |
8663 | dma_unmap_addr_set(tx_buff, dma, dma); |
8664 | |
8665 | cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | |
8666 | IXGBE_ADVTXD_DCMD_IFCS | len; |
8667 | tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); |
8668 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
8669 | tx_buff->protocol = 0; |
8670 | |
8671 | if (++index == ring->count) |
8672 | index = 0; |
8673 | |
8674 | if (i == nr_frags) |
8675 | break; |
8676 | |
8677 | tx_buff = &ring->tx_buffer_info[index]; |
8678 | tx_desc = IXGBE_TX_DESC(ring, index); |
8679 | tx_desc->read.olinfo_status = 0; |
8680 | |
8681 | data = skb_frag_address(frag: &sinfo->frags[i]); |
8682 | len = skb_frag_size(frag: &sinfo->frags[i]); |
8683 | i++; |
8684 | } |
8685 | /* put descriptor type bits */ |
8686 | tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); |
8687 | |
8688 | /* Avoid any potential race with xdp_xmit and cleanup */ |
8689 | smp_wmb(); |
8690 | |
8691 | tx_head->next_to_watch = tx_desc; |
8692 | ring->next_to_use = index; |
8693 | |
8694 | return IXGBE_XDP_TX; |
8695 | |
8696 | unmap: |
8697 | for (;;) { |
8698 | tx_buff = &ring->tx_buffer_info[index]; |
8699 | if (dma_unmap_len(tx_buff, len)) |
8700 | dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma), |
8701 | dma_unmap_len(tx_buff, len), |
8702 | DMA_TO_DEVICE); |
8703 | dma_unmap_len_set(tx_buff, len, 0); |
8704 | if (tx_buff == tx_head) |
8705 | break; |
8706 | |
8707 | if (!index) |
8708 | index += ring->count; |
8709 | index--; |
8710 | } |
8711 | |
8712 | return IXGBE_XDP_CONSUMED; |
8713 | } |
8714 | |
8715 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, |
8716 | struct ixgbe_adapter *adapter, |
8717 | struct ixgbe_ring *tx_ring) |
8718 | { |
8719 | struct ixgbe_tx_buffer *first; |
8720 | int tso; |
8721 | u32 tx_flags = 0; |
8722 | unsigned short f; |
8723 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); |
8724 | struct ixgbe_ipsec_tx_data ipsec_tx = { 0 }; |
8725 | __be16 protocol = skb->protocol; |
8726 | u8 hdr_len = 0; |
8727 | |
8728 | /* |
8729 | * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, |
8730 | * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, |
8731 | * + 2 desc gap to keep tail from touching head, |
8732 | * + 1 desc for context descriptor, |
8733 | * otherwise try next time |
8734 | */ |
8735 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
8736 | count += TXD_USE_COUNT(skb_frag_size( |
8737 | &skb_shinfo(skb)->frags[f])); |
8738 | |
8739 | if (ixgbe_maybe_stop_tx(tx_ring, size: count + 3)) { |
8740 | tx_ring->tx_stats.tx_busy++; |
8741 | return NETDEV_TX_BUSY; |
8742 | } |
8743 | |
8744 | /* record the location of the first descriptor for this packet */ |
8745 | first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
8746 | first->skb = skb; |
8747 | first->bytecount = skb->len; |
8748 | first->gso_segs = 1; |
8749 | |
8750 | /* if we have a HW VLAN tag being added default to the HW one */ |
8751 | if (skb_vlan_tag_present(skb)) { |
8752 | tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; |
8753 | tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; |
8754 | /* else if it is a SW VLAN check the next protocol and store the tag */ |
8755 | } else if (protocol == htons(ETH_P_8021Q)) { |
8756 | struct vlan_hdr *vhdr, _vhdr; |
8757 | vhdr = skb_header_pointer(skb, ETH_HLEN, len: sizeof(_vhdr), buffer: &_vhdr); |
8758 | if (!vhdr) |
8759 | goto out_drop; |
8760 | |
8761 | tx_flags |= ntohs(vhdr->h_vlan_TCI) << |
8762 | IXGBE_TX_FLAGS_VLAN_SHIFT; |
8763 | tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; |
8764 | } |
8765 | protocol = vlan_get_protocol(skb); |
8766 | |
8767 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
8768 | adapter->ptp_clock) { |
8769 | if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && |
8770 | !test_and_set_bit_lock(nr: __IXGBE_PTP_TX_IN_PROGRESS, |
8771 | addr: &adapter->state)) { |
8772 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
8773 | tx_flags |= IXGBE_TX_FLAGS_TSTAMP; |
8774 | |
8775 | /* schedule check for Tx timestamp */ |
8776 | adapter->ptp_tx_skb = skb_get(skb); |
8777 | adapter->ptp_tx_start = jiffies; |
8778 | schedule_work(work: &adapter->ptp_tx_work); |
8779 | } else { |
8780 | adapter->tx_hwtstamp_skipped++; |
8781 | } |
8782 | } |
8783 | |
8784 | #ifdef CONFIG_PCI_IOV |
8785 | /* |
8786 | * Use the l2switch_enable flag - would be false if the DMA |
8787 | * Tx switch had been disabled. |
8788 | */ |
8789 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
8790 | tx_flags |= IXGBE_TX_FLAGS_CC; |
8791 | |
8792 | #endif |
8793 | /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ |
8794 | if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && |
8795 | ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || |
8796 | (skb->priority != TC_PRIO_CONTROL))) { |
8797 | tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; |
8798 | tx_flags |= (skb->priority & 0x7) << |
8799 | IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; |
8800 | if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { |
8801 | struct vlan_ethhdr *vhdr; |
8802 | |
8803 | if (skb_cow_head(skb, headroom: 0)) |
8804 | goto out_drop; |
8805 | vhdr = skb_vlan_eth_hdr(skb); |
8806 | vhdr->h_vlan_TCI = htons(tx_flags >> |
8807 | IXGBE_TX_FLAGS_VLAN_SHIFT); |
8808 | } else { |
8809 | tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; |
8810 | } |
8811 | } |
8812 | |
8813 | /* record initial flags and protocol */ |
8814 | first->tx_flags = tx_flags; |
8815 | first->protocol = protocol; |
8816 | |
8817 | #ifdef IXGBE_FCOE |
8818 | /* setup tx offload for FCoE */ |
8819 | if ((protocol == htons(ETH_P_FCOE)) && |
8820 | (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { |
8821 | tso = ixgbe_fso(tx_ring, first, hdr_len: &hdr_len); |
8822 | if (tso < 0) |
8823 | goto out_drop; |
8824 | |
8825 | goto xmit_fcoe; |
8826 | } |
8827 | |
8828 | #endif /* IXGBE_FCOE */ |
8829 | |
8830 | #ifdef CONFIG_IXGBE_IPSEC |
8831 | if (xfrm_offload(skb) && |
8832 | !ixgbe_ipsec_tx(tx_ring, first, itd: &ipsec_tx)) |
8833 | goto out_drop; |
8834 | #endif |
8835 | tso = ixgbe_tso(tx_ring, first, hdr_len: &hdr_len, itd: &ipsec_tx); |
8836 | if (tso < 0) |
8837 | goto out_drop; |
8838 | else if (!tso) |
8839 | ixgbe_tx_csum(tx_ring, first, itd: &ipsec_tx); |
8840 | |
8841 | /* add the ATR filter if ATR is on */ |
8842 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) |
8843 | ixgbe_atr(ring: tx_ring, first); |
8844 | |
8845 | #ifdef IXGBE_FCOE |
8846 | xmit_fcoe: |
8847 | #endif /* IXGBE_FCOE */ |
8848 | if (ixgbe_tx_map(tx_ring, first, hdr_len)) |
8849 | goto cleanup_tx_timestamp; |
8850 | |
8851 | return NETDEV_TX_OK; |
8852 | |
8853 | out_drop: |
8854 | dev_kfree_skb_any(skb: first->skb); |
8855 | first->skb = NULL; |
8856 | cleanup_tx_timestamp: |
8857 | if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) { |
8858 | dev_kfree_skb_any(skb: adapter->ptp_tx_skb); |
8859 | adapter->ptp_tx_skb = NULL; |
8860 | cancel_work_sync(work: &adapter->ptp_tx_work); |
8861 | clear_bit_unlock(nr: __IXGBE_PTP_TX_IN_PROGRESS, addr: &adapter->state); |
8862 | } |
8863 | |
8864 | return NETDEV_TX_OK; |
8865 | } |
8866 | |
8867 | static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, |
8868 | struct net_device *netdev, |
8869 | struct ixgbe_ring *ring) |
8870 | { |
8871 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
8872 | struct ixgbe_ring *tx_ring; |
8873 | |
8874 | /* |
8875 | * The minimum packet size for olinfo paylen is 17 so pad the skb |
8876 | * in order to meet this minimum size requirement. |
8877 | */ |
8878 | if (skb_put_padto(skb, len: 17)) |
8879 | return NETDEV_TX_OK; |
8880 | |
8881 | tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)]; |
8882 | if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) |
8883 | return NETDEV_TX_BUSY; |
8884 | |
8885 | return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); |
8886 | } |
8887 | |
8888 | static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, |
8889 | struct net_device *netdev) |
8890 | { |
8891 | return __ixgbe_xmit_frame(skb, netdev, NULL); |
8892 | } |
8893 | |
8894 | /** |
8895 | * ixgbe_set_mac - Change the Ethernet Address of the NIC |
8896 | * @netdev: network interface device structure |
8897 | * @p: pointer to an address structure |
8898 | * |
8899 | * Returns 0 on success, negative on failure |
8900 | **/ |
8901 | static int ixgbe_set_mac(struct net_device *netdev, void *p) |
8902 | { |
8903 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
8904 | struct ixgbe_hw *hw = &adapter->hw; |
8905 | struct sockaddr *addr = p; |
8906 | |
8907 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
8908 | return -EADDRNOTAVAIL; |
8909 | |
8910 | eth_hw_addr_set(dev: netdev, addr: addr->sa_data); |
8911 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); |
8912 | |
8913 | ixgbe_mac_set_default_filter(adapter); |
8914 | |
8915 | return 0; |
8916 | } |
8917 | |
8918 | static int |
8919 | ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) |
8920 | { |
8921 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
8922 | struct ixgbe_hw *hw = &adapter->hw; |
8923 | u16 value; |
8924 | int rc; |
8925 | |
8926 | if (adapter->mii_bus) { |
8927 | int regnum = addr; |
8928 | |
8929 | if (devad != MDIO_DEVAD_NONE) |
8930 | return mdiobus_c45_read(bus: adapter->mii_bus, addr: prtad, |
8931 | devad, regnum); |
8932 | |
8933 | return mdiobus_read(bus: adapter->mii_bus, addr: prtad, regnum); |
8934 | } |
8935 | |
8936 | if (prtad != hw->phy.mdio.prtad) |
8937 | return -EINVAL; |
8938 | rc = hw->phy.ops.read_reg(hw, addr, devad, &value); |
8939 | if (!rc) |
8940 | rc = value; |
8941 | return rc; |
8942 | } |
8943 | |
8944 | static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, |
8945 | u16 addr, u16 value) |
8946 | { |
8947 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
8948 | struct ixgbe_hw *hw = &adapter->hw; |
8949 | |
8950 | if (adapter->mii_bus) { |
8951 | int regnum = addr; |
8952 | |
8953 | if (devad != MDIO_DEVAD_NONE) |
8954 | return mdiobus_c45_write(bus: adapter->mii_bus, addr: prtad, devad, |
8955 | regnum, val: value); |
8956 | |
8957 | return mdiobus_write(bus: adapter->mii_bus, addr: prtad, regnum, val: value); |
8958 | } |
8959 | |
8960 | if (prtad != hw->phy.mdio.prtad) |
8961 | return -EINVAL; |
8962 | return hw->phy.ops.write_reg(hw, addr, devad, value); |
8963 | } |
8964 | |
8965 | static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) |
8966 | { |
8967 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
8968 | |
8969 | switch (cmd) { |
8970 | case SIOCSHWTSTAMP: |
8971 | return ixgbe_ptp_set_ts_config(adapter, ifr: req); |
8972 | case SIOCGHWTSTAMP: |
8973 | return ixgbe_ptp_get_ts_config(adapter, ifr: req); |
8974 | case SIOCGMIIPHY: |
8975 | if (!adapter->hw.phy.ops.read_reg) |
8976 | return -EOPNOTSUPP; |
8977 | fallthrough; |
8978 | default: |
8979 | return mdio_mii_ioctl(mdio: &adapter->hw.phy.mdio, mii_data: if_mii(rq: req), cmd); |
8980 | } |
8981 | } |
8982 | |
8983 | /** |
8984 | * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding |
8985 | * netdev->dev_addrs |
8986 | * @dev: network interface device structure |
8987 | * |
8988 | * Returns non-zero on failure |
8989 | **/ |
8990 | static int ixgbe_add_sanmac_netdev(struct net_device *dev) |
8991 | { |
8992 | int err = 0; |
8993 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
8994 | struct ixgbe_hw *hw = &adapter->hw; |
8995 | |
8996 | if (is_valid_ether_addr(addr: hw->mac.san_addr)) { |
8997 | rtnl_lock(); |
8998 | err = dev_addr_add(dev, addr: hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); |
8999 | rtnl_unlock(); |
9000 | |
9001 | /* update SAN MAC vmdq pool selection */ |
9002 | hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); |
9003 | } |
9004 | return err; |
9005 | } |
9006 | |
9007 | /** |
9008 | * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding |
9009 | * netdev->dev_addrs |
9010 | * @dev: network interface device structure |
9011 | * |
9012 | * Returns non-zero on failure |
9013 | **/ |
9014 | static int ixgbe_del_sanmac_netdev(struct net_device *dev) |
9015 | { |
9016 | int err = 0; |
9017 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
9018 | struct ixgbe_mac_info *mac = &adapter->hw.mac; |
9019 | |
9020 | if (is_valid_ether_addr(addr: mac->san_addr)) { |
9021 | rtnl_lock(); |
9022 | err = dev_addr_del(dev, addr: mac->san_addr, NETDEV_HW_ADDR_T_SAN); |
9023 | rtnl_unlock(); |
9024 | } |
9025 | return err; |
9026 | } |
9027 | |
9028 | static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, |
9029 | struct ixgbe_ring *ring) |
9030 | { |
9031 | u64 bytes, packets; |
9032 | unsigned int start; |
9033 | |
9034 | if (ring) { |
9035 | do { |
9036 | start = u64_stats_fetch_begin(syncp: &ring->syncp); |
9037 | packets = ring->stats.packets; |
9038 | bytes = ring->stats.bytes; |
9039 | } while (u64_stats_fetch_retry(syncp: &ring->syncp, start)); |
9040 | stats->tx_packets += packets; |
9041 | stats->tx_bytes += bytes; |
9042 | } |
9043 | } |
9044 | |
9045 | static void ixgbe_get_stats64(struct net_device *netdev, |
9046 | struct rtnl_link_stats64 *stats) |
9047 | { |
9048 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
9049 | int i; |
9050 | |
9051 | rcu_read_lock(); |
9052 | for (i = 0; i < adapter->num_rx_queues; i++) { |
9053 | struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); |
9054 | u64 bytes, packets; |
9055 | unsigned int start; |
9056 | |
9057 | if (ring) { |
9058 | do { |
9059 | start = u64_stats_fetch_begin(syncp: &ring->syncp); |
9060 | packets = ring->stats.packets; |
9061 | bytes = ring->stats.bytes; |
9062 | } while (u64_stats_fetch_retry(syncp: &ring->syncp, start)); |
9063 | stats->rx_packets += packets; |
9064 | stats->rx_bytes += bytes; |
9065 | } |
9066 | } |
9067 | |
9068 | for (i = 0; i < adapter->num_tx_queues; i++) { |
9069 | struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); |
9070 | |
9071 | ixgbe_get_ring_stats64(stats, ring); |
9072 | } |
9073 | for (i = 0; i < adapter->num_xdp_queues; i++) { |
9074 | struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); |
9075 | |
9076 | ixgbe_get_ring_stats64(stats, ring); |
9077 | } |
9078 | rcu_read_unlock(); |
9079 | |
9080 | /* following stats updated by ixgbe_watchdog_task() */ |
9081 | stats->multicast = netdev->stats.multicast; |
9082 | stats->rx_errors = netdev->stats.rx_errors; |
9083 | stats->rx_length_errors = netdev->stats.rx_length_errors; |
9084 | stats->rx_crc_errors = netdev->stats.rx_crc_errors; |
9085 | stats->rx_missed_errors = netdev->stats.rx_missed_errors; |
9086 | } |
9087 | |
9088 | static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf, |
9089 | struct ifla_vf_stats *vf_stats) |
9090 | { |
9091 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
9092 | |
9093 | if (vf < 0 || vf >= adapter->num_vfs) |
9094 | return -EINVAL; |
9095 | |
9096 | vf_stats->rx_packets = adapter->vfinfo[vf].vfstats.gprc; |
9097 | vf_stats->rx_bytes = adapter->vfinfo[vf].vfstats.gorc; |
9098 | vf_stats->tx_packets = adapter->vfinfo[vf].vfstats.gptc; |
9099 | vf_stats->tx_bytes = adapter->vfinfo[vf].vfstats.gotc; |
9100 | vf_stats->multicast = adapter->vfinfo[vf].vfstats.mprc; |
9101 | |
9102 | return 0; |
9103 | } |
9104 | |
9105 | #ifdef CONFIG_IXGBE_DCB |
9106 | /** |
9107 | * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. |
9108 | * @adapter: pointer to ixgbe_adapter |
9109 | * @tc: number of traffic classes currently enabled |
9110 | * |
9111 | * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm |
9112 | * 802.1Q priority maps to a packet buffer that exists. |
9113 | */ |
9114 | static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) |
9115 | { |
9116 | struct ixgbe_hw *hw = &adapter->hw; |
9117 | u32 reg, rsave; |
9118 | int i; |
9119 | |
9120 | /* 82598 have a static priority to TC mapping that can not |
9121 | * be changed so no validation is needed. |
9122 | */ |
9123 | if (hw->mac.type == ixgbe_mac_82598EB) |
9124 | return; |
9125 | |
9126 | reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); |
9127 | rsave = reg; |
9128 | |
9129 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { |
9130 | u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); |
9131 | |
9132 | /* If up2tc is out of bounds default to zero */ |
9133 | if (up2tc > tc) |
9134 | reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); |
9135 | } |
9136 | |
9137 | if (reg != rsave) |
9138 | IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); |
9139 | |
9140 | return; |
9141 | } |
9142 | |
9143 | /** |
9144 | * ixgbe_set_prio_tc_map - Configure netdev prio tc map |
9145 | * @adapter: Pointer to adapter struct |
9146 | * |
9147 | * Populate the netdev user priority to tc map |
9148 | */ |
9149 | static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) |
9150 | { |
9151 | struct net_device *dev = adapter->netdev; |
9152 | struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; |
9153 | struct ieee_ets *ets = adapter->ixgbe_ieee_ets; |
9154 | u8 prio; |
9155 | |
9156 | for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { |
9157 | u8 tc = 0; |
9158 | |
9159 | if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) |
9160 | tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); |
9161 | else if (ets) |
9162 | tc = ets->prio_tc[prio]; |
9163 | |
9164 | netdev_set_prio_tc_map(dev, prio, tc); |
9165 | } |
9166 | } |
9167 | |
9168 | #endif /* CONFIG_IXGBE_DCB */ |
9169 | static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, |
9170 | struct netdev_nested_priv *priv) |
9171 | { |
9172 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data; |
9173 | struct ixgbe_fwd_adapter *accel; |
9174 | int pool; |
9175 | |
9176 | /* we only care about macvlans... */ |
9177 | if (!netif_is_macvlan(dev: vdev)) |
9178 | return 0; |
9179 | |
9180 | /* that have hardware offload enabled... */ |
9181 | accel = macvlan_accel_priv(dev: vdev); |
9182 | if (!accel) |
9183 | return 0; |
9184 | |
9185 | /* If we can relocate to a different bit do so */ |
9186 | pool = find_first_zero_bit(addr: adapter->fwd_bitmask, size: adapter->num_rx_pools); |
9187 | if (pool < adapter->num_rx_pools) { |
9188 | set_bit(nr: pool, addr: adapter->fwd_bitmask); |
9189 | accel->pool = pool; |
9190 | return 0; |
9191 | } |
9192 | |
9193 | /* if we cannot find a free pool then disable the offload */ |
9194 | netdev_err(dev: vdev, format: "L2FW offload disabled due to lack of queue resources\n" ); |
9195 | macvlan_release_l2fw_offload(dev: vdev); |
9196 | |
9197 | /* unbind the queues and drop the subordinate channel config */ |
9198 | netdev_unbind_sb_channel(dev: adapter->netdev, sb_dev: vdev); |
9199 | netdev_set_sb_channel(dev: vdev, channel: 0); |
9200 | |
9201 | kfree(objp: accel); |
9202 | |
9203 | return 0; |
9204 | } |
9205 | |
9206 | static void ixgbe_defrag_macvlan_pools(struct net_device *dev) |
9207 | { |
9208 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
9209 | struct netdev_nested_priv priv = { |
9210 | .data = (void *)adapter, |
9211 | }; |
9212 | |
9213 | /* flush any stale bits out of the fwd bitmask */ |
9214 | bitmap_clear(map: adapter->fwd_bitmask, start: 1, nbits: 63); |
9215 | |
9216 | /* walk through upper devices reassigning pools */ |
9217 | netdev_walk_all_upper_dev_rcu(dev, fn: ixgbe_reassign_macvlan_pool, |
9218 | priv: &priv); |
9219 | } |
9220 | |
9221 | /** |
9222 | * ixgbe_setup_tc - configure net_device for multiple traffic classes |
9223 | * |
9224 | * @dev: net device to configure |
9225 | * @tc: number of traffic classes to enable |
9226 | */ |
9227 | int ixgbe_setup_tc(struct net_device *dev, u8 tc) |
9228 | { |
9229 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
9230 | struct ixgbe_hw *hw = &adapter->hw; |
9231 | |
9232 | /* Hardware supports up to 8 traffic classes */ |
9233 | if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) |
9234 | return -EINVAL; |
9235 | |
9236 | if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) |
9237 | return -EINVAL; |
9238 | |
9239 | /* Hardware has to reinitialize queues and interrupts to |
9240 | * match packet buffer alignment. Unfortunately, the |
9241 | * hardware is not flexible enough to do this dynamically. |
9242 | */ |
9243 | if (netif_running(dev)) |
9244 | ixgbe_close(netdev: dev); |
9245 | else |
9246 | ixgbe_reset(adapter); |
9247 | |
9248 | ixgbe_clear_interrupt_scheme(adapter); |
9249 | |
9250 | #ifdef CONFIG_IXGBE_DCB |
9251 | if (tc) { |
9252 | if (adapter->xdp_prog) { |
9253 | e_warn(probe, "DCB is not supported with XDP\n" ); |
9254 | |
9255 | ixgbe_init_interrupt_scheme(adapter); |
9256 | if (netif_running(dev)) |
9257 | ixgbe_open(netdev: dev); |
9258 | return -EINVAL; |
9259 | } |
9260 | |
9261 | netdev_set_num_tc(dev, num_tc: tc); |
9262 | ixgbe_set_prio_tc_map(adapter); |
9263 | |
9264 | adapter->hw_tcs = tc; |
9265 | adapter->flags |= IXGBE_FLAG_DCB_ENABLED; |
9266 | |
9267 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
9268 | adapter->last_lfc_mode = adapter->hw.fc.requested_mode; |
9269 | adapter->hw.fc.requested_mode = ixgbe_fc_none; |
9270 | } |
9271 | } else { |
9272 | netdev_reset_tc(dev); |
9273 | |
9274 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
9275 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; |
9276 | |
9277 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
9278 | adapter->hw_tcs = tc; |
9279 | |
9280 | adapter->temp_dcb_cfg.pfc_mode_enable = false; |
9281 | adapter->dcb_cfg.pfc_mode_enable = false; |
9282 | } |
9283 | |
9284 | ixgbe_validate_rtr(adapter, tc); |
9285 | |
9286 | #endif /* CONFIG_IXGBE_DCB */ |
9287 | ixgbe_init_interrupt_scheme(adapter); |
9288 | |
9289 | ixgbe_defrag_macvlan_pools(dev); |
9290 | |
9291 | if (netif_running(dev)) |
9292 | return ixgbe_open(netdev: dev); |
9293 | |
9294 | return 0; |
9295 | } |
9296 | |
9297 | static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, |
9298 | struct tc_cls_u32_offload *cls) |
9299 | { |
9300 | u32 hdl = cls->knode.handle; |
9301 | u32 uhtid = TC_U32_USERHTID(cls->knode.handle); |
9302 | u32 loc = cls->knode.handle & 0xfffff; |
9303 | int err = 0, i, j; |
9304 | struct ixgbe_jump_table *jump = NULL; |
9305 | |
9306 | if (loc > IXGBE_MAX_HW_ENTRIES) |
9307 | return -EINVAL; |
9308 | |
9309 | if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) |
9310 | return -EINVAL; |
9311 | |
9312 | /* Clear this filter in the link data it is associated with */ |
9313 | if (uhtid != 0x800) { |
9314 | jump = adapter->jump_tables[uhtid]; |
9315 | if (!jump) |
9316 | return -EINVAL; |
9317 | if (!test_bit(loc - 1, jump->child_loc_map)) |
9318 | return -EINVAL; |
9319 | clear_bit(nr: loc - 1, addr: jump->child_loc_map); |
9320 | } |
9321 | |
9322 | /* Check if the filter being deleted is a link */ |
9323 | for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { |
9324 | jump = adapter->jump_tables[i]; |
9325 | if (jump && jump->link_hdl == hdl) { |
9326 | /* Delete filters in the hardware in the child hash |
9327 | * table associated with this link |
9328 | */ |
9329 | for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { |
9330 | if (!test_bit(j, jump->child_loc_map)) |
9331 | continue; |
9332 | spin_lock(lock: &adapter->fdir_perfect_lock); |
9333 | err = ixgbe_update_ethtool_fdir_entry(adapter, |
9334 | NULL, |
9335 | sw_idx: j + 1); |
9336 | spin_unlock(lock: &adapter->fdir_perfect_lock); |
9337 | clear_bit(nr: j, addr: jump->child_loc_map); |
9338 | } |
9339 | /* Remove resources for this link */ |
9340 | kfree(objp: jump->input); |
9341 | kfree(objp: jump->mask); |
9342 | kfree(objp: jump); |
9343 | adapter->jump_tables[i] = NULL; |
9344 | return err; |
9345 | } |
9346 | } |
9347 | |
9348 | spin_lock(lock: &adapter->fdir_perfect_lock); |
9349 | err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, sw_idx: loc); |
9350 | spin_unlock(lock: &adapter->fdir_perfect_lock); |
9351 | return err; |
9352 | } |
9353 | |
9354 | static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, |
9355 | struct tc_cls_u32_offload *cls) |
9356 | { |
9357 | u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); |
9358 | |
9359 | if (uhtid >= IXGBE_MAX_LINK_HANDLE) |
9360 | return -EINVAL; |
9361 | |
9362 | /* This ixgbe devices do not support hash tables at the moment |
9363 | * so abort when given hash tables. |
9364 | */ |
9365 | if (cls->hnode.divisor > 0) |
9366 | return -EINVAL; |
9367 | |
9368 | set_bit(nr: uhtid - 1, addr: &adapter->tables); |
9369 | return 0; |
9370 | } |
9371 | |
9372 | static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, |
9373 | struct tc_cls_u32_offload *cls) |
9374 | { |
9375 | u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); |
9376 | |
9377 | if (uhtid >= IXGBE_MAX_LINK_HANDLE) |
9378 | return -EINVAL; |
9379 | |
9380 | clear_bit(nr: uhtid - 1, addr: &adapter->tables); |
9381 | return 0; |
9382 | } |
9383 | |
9384 | #ifdef CONFIG_NET_CLS_ACT |
9385 | struct upper_walk_data { |
9386 | struct ixgbe_adapter *adapter; |
9387 | u64 action; |
9388 | int ifindex; |
9389 | u8 queue; |
9390 | }; |
9391 | |
9392 | static int get_macvlan_queue(struct net_device *upper, |
9393 | struct netdev_nested_priv *priv) |
9394 | { |
9395 | if (netif_is_macvlan(dev: upper)) { |
9396 | struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(dev: upper); |
9397 | struct ixgbe_adapter *adapter; |
9398 | struct upper_walk_data *data; |
9399 | int ifindex; |
9400 | |
9401 | data = (struct upper_walk_data *)priv->data; |
9402 | ifindex = data->ifindex; |
9403 | adapter = data->adapter; |
9404 | if (vadapter && upper->ifindex == ifindex) { |
9405 | data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; |
9406 | data->action = data->queue; |
9407 | return 1; |
9408 | } |
9409 | } |
9410 | |
9411 | return 0; |
9412 | } |
9413 | |
9414 | static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, |
9415 | u8 *queue, u64 *action) |
9416 | { |
9417 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
9418 | unsigned int num_vfs = adapter->num_vfs, vf; |
9419 | struct netdev_nested_priv priv; |
9420 | struct upper_walk_data data; |
9421 | struct net_device *upper; |
9422 | |
9423 | /* redirect to a SRIOV VF */ |
9424 | for (vf = 0; vf < num_vfs; ++vf) { |
9425 | upper = pci_get_drvdata(pdev: adapter->vfinfo[vf].vfdev); |
9426 | if (upper->ifindex == ifindex) { |
9427 | *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); |
9428 | *action = vf + 1; |
9429 | *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; |
9430 | return 0; |
9431 | } |
9432 | } |
9433 | |
9434 | /* redirect to a offloaded macvlan netdev */ |
9435 | data.adapter = adapter; |
9436 | data.ifindex = ifindex; |
9437 | data.action = 0; |
9438 | data.queue = 0; |
9439 | priv.data = (void *)&data; |
9440 | if (netdev_walk_all_upper_dev_rcu(dev: adapter->netdev, |
9441 | fn: get_macvlan_queue, priv: &priv)) { |
9442 | *action = data.action; |
9443 | *queue = data.queue; |
9444 | |
9445 | return 0; |
9446 | } |
9447 | |
9448 | return -EINVAL; |
9449 | } |
9450 | |
9451 | static int parse_tc_actions(struct ixgbe_adapter *adapter, |
9452 | struct tcf_exts *exts, u64 *action, u8 *queue) |
9453 | { |
9454 | const struct tc_action *a; |
9455 | int i; |
9456 | |
9457 | if (!tcf_exts_has_actions(exts)) |
9458 | return -EINVAL; |
9459 | |
9460 | tcf_exts_for_each_action(i, a, exts) { |
9461 | /* Drop action */ |
9462 | if (is_tcf_gact_shot(a)) { |
9463 | *action = IXGBE_FDIR_DROP_QUEUE; |
9464 | *queue = IXGBE_FDIR_DROP_QUEUE; |
9465 | return 0; |
9466 | } |
9467 | |
9468 | /* Redirect to a VF or a offloaded macvlan */ |
9469 | if (is_tcf_mirred_egress_redirect(a)) { |
9470 | struct net_device *dev = tcf_mirred_dev(a); |
9471 | |
9472 | if (!dev) |
9473 | return -EINVAL; |
9474 | return handle_redirect_action(adapter, ifindex: dev->ifindex, |
9475 | queue, action); |
9476 | } |
9477 | |
9478 | return -EINVAL; |
9479 | } |
9480 | |
9481 | return -EINVAL; |
9482 | } |
9483 | #else |
9484 | static int parse_tc_actions(struct ixgbe_adapter *adapter, |
9485 | struct tcf_exts *exts, u64 *action, u8 *queue) |
9486 | { |
9487 | return -EINVAL; |
9488 | } |
9489 | #endif /* CONFIG_NET_CLS_ACT */ |
9490 | |
9491 | static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, |
9492 | union ixgbe_atr_input *mask, |
9493 | struct tc_cls_u32_offload *cls, |
9494 | struct ixgbe_mat_field *field_ptr, |
9495 | struct ixgbe_nexthdr *nexthdr) |
9496 | { |
9497 | int i, j, off; |
9498 | __be32 val, m; |
9499 | bool found_entry = false, found_jump_field = false; |
9500 | |
9501 | for (i = 0; i < cls->knode.sel->nkeys; i++) { |
9502 | off = cls->knode.sel->keys[i].off; |
9503 | val = cls->knode.sel->keys[i].val; |
9504 | m = cls->knode.sel->keys[i].mask; |
9505 | |
9506 | for (j = 0; field_ptr[j].val; j++) { |
9507 | if (field_ptr[j].off == off) { |
9508 | field_ptr[j].val(input, mask, (__force u32)val, |
9509 | (__force u32)m); |
9510 | input->filter.formatted.flow_type |= |
9511 | field_ptr[j].type; |
9512 | found_entry = true; |
9513 | break; |
9514 | } |
9515 | } |
9516 | if (nexthdr) { |
9517 | if (nexthdr->off == cls->knode.sel->keys[i].off && |
9518 | nexthdr->val == |
9519 | (__force u32)cls->knode.sel->keys[i].val && |
9520 | nexthdr->mask == |
9521 | (__force u32)cls->knode.sel->keys[i].mask) |
9522 | found_jump_field = true; |
9523 | else |
9524 | continue; |
9525 | } |
9526 | } |
9527 | |
9528 | if (nexthdr && !found_jump_field) |
9529 | return -EINVAL; |
9530 | |
9531 | if (!found_entry) |
9532 | return 0; |
9533 | |
9534 | mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | |
9535 | IXGBE_ATR_L4TYPE_MASK; |
9536 | |
9537 | if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) |
9538 | mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; |
9539 | |
9540 | return 0; |
9541 | } |
9542 | |
9543 | static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, |
9544 | struct tc_cls_u32_offload *cls) |
9545 | { |
9546 | __be16 protocol = cls->common.protocol; |
9547 | u32 loc = cls->knode.handle & 0xfffff; |
9548 | struct ixgbe_hw *hw = &adapter->hw; |
9549 | struct ixgbe_mat_field *field_ptr; |
9550 | struct ixgbe_fdir_filter *input = NULL; |
9551 | union ixgbe_atr_input *mask = NULL; |
9552 | struct ixgbe_jump_table *jump = NULL; |
9553 | int i, err = -EINVAL; |
9554 | u8 queue; |
9555 | u32 uhtid, link_uhtid; |
9556 | |
9557 | uhtid = TC_U32_USERHTID(cls->knode.handle); |
9558 | link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); |
9559 | |
9560 | /* At the moment cls_u32 jumps to network layer and skips past |
9561 | * L2 headers. The canonical method to match L2 frames is to use |
9562 | * negative values. However this is error prone at best but really |
9563 | * just broken because there is no way to "know" what sort of hdr |
9564 | * is in front of the network layer. Fix cls_u32 to support L2 |
9565 | * headers when needed. |
9566 | */ |
9567 | if (protocol != htons(ETH_P_IP)) |
9568 | return err; |
9569 | |
9570 | if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { |
9571 | e_err(drv, "Location out of range\n" ); |
9572 | return err; |
9573 | } |
9574 | |
9575 | /* cls u32 is a graph starting at root node 0x800. The driver tracks |
9576 | * links and also the fields used to advance the parser across each |
9577 | * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map |
9578 | * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h |
9579 | * To add support for new nodes update ixgbe_model.h parse structures |
9580 | * this function _should_ be generic try not to hardcode values here. |
9581 | */ |
9582 | if (uhtid == 0x800) { |
9583 | field_ptr = (adapter->jump_tables[0])->mat; |
9584 | } else { |
9585 | if (uhtid >= IXGBE_MAX_LINK_HANDLE) |
9586 | return err; |
9587 | if (!adapter->jump_tables[uhtid]) |
9588 | return err; |
9589 | field_ptr = (adapter->jump_tables[uhtid])->mat; |
9590 | } |
9591 | |
9592 | if (!field_ptr) |
9593 | return err; |
9594 | |
9595 | /* At this point we know the field_ptr is valid and need to either |
9596 | * build cls_u32 link or attach filter. Because adding a link to |
9597 | * a handle that does not exist is invalid and the same for adding |
9598 | * rules to handles that don't exist. |
9599 | */ |
9600 | |
9601 | if (link_uhtid) { |
9602 | struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; |
9603 | |
9604 | if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) |
9605 | return err; |
9606 | |
9607 | if (!test_bit(link_uhtid - 1, &adapter->tables)) |
9608 | return err; |
9609 | |
9610 | /* Multiple filters as links to the same hash table are not |
9611 | * supported. To add a new filter with the same next header |
9612 | * but different match/jump conditions, create a new hash table |
9613 | * and link to it. |
9614 | */ |
9615 | if (adapter->jump_tables[link_uhtid] && |
9616 | (adapter->jump_tables[link_uhtid])->link_hdl) { |
9617 | e_err(drv, "Link filter exists for link: %x\n" , |
9618 | link_uhtid); |
9619 | return err; |
9620 | } |
9621 | |
9622 | for (i = 0; nexthdr[i].jump; i++) { |
9623 | if (nexthdr[i].o != cls->knode.sel->offoff || |
9624 | nexthdr[i].s != cls->knode.sel->offshift || |
9625 | nexthdr[i].m != |
9626 | (__force u32)cls->knode.sel->offmask) |
9627 | return err; |
9628 | |
9629 | jump = kzalloc(size: sizeof(*jump), GFP_KERNEL); |
9630 | if (!jump) |
9631 | return -ENOMEM; |
9632 | input = kzalloc(size: sizeof(*input), GFP_KERNEL); |
9633 | if (!input) { |
9634 | err = -ENOMEM; |
9635 | goto free_jump; |
9636 | } |
9637 | mask = kzalloc(size: sizeof(*mask), GFP_KERNEL); |
9638 | if (!mask) { |
9639 | err = -ENOMEM; |
9640 | goto free_input; |
9641 | } |
9642 | jump->input = input; |
9643 | jump->mask = mask; |
9644 | jump->link_hdl = cls->knode.handle; |
9645 | |
9646 | err = ixgbe_clsu32_build_input(input, mask, cls, |
9647 | field_ptr, nexthdr: &nexthdr[i]); |
9648 | if (!err) { |
9649 | jump->mat = nexthdr[i].jump; |
9650 | adapter->jump_tables[link_uhtid] = jump; |
9651 | break; |
9652 | } else { |
9653 | kfree(objp: mask); |
9654 | kfree(objp: input); |
9655 | kfree(objp: jump); |
9656 | } |
9657 | } |
9658 | return 0; |
9659 | } |
9660 | |
9661 | input = kzalloc(size: sizeof(*input), GFP_KERNEL); |
9662 | if (!input) |
9663 | return -ENOMEM; |
9664 | mask = kzalloc(size: sizeof(*mask), GFP_KERNEL); |
9665 | if (!mask) { |
9666 | err = -ENOMEM; |
9667 | goto free_input; |
9668 | } |
9669 | |
9670 | if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { |
9671 | if ((adapter->jump_tables[uhtid])->input) |
9672 | memcpy(input, (adapter->jump_tables[uhtid])->input, |
9673 | sizeof(*input)); |
9674 | if ((adapter->jump_tables[uhtid])->mask) |
9675 | memcpy(mask, (adapter->jump_tables[uhtid])->mask, |
9676 | sizeof(*mask)); |
9677 | |
9678 | /* Lookup in all child hash tables if this location is already |
9679 | * filled with a filter |
9680 | */ |
9681 | for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { |
9682 | struct ixgbe_jump_table *link = adapter->jump_tables[i]; |
9683 | |
9684 | if (link && (test_bit(loc - 1, link->child_loc_map))) { |
9685 | e_err(drv, "Filter exists in location: %x\n" , |
9686 | loc); |
9687 | err = -EINVAL; |
9688 | goto err_out; |
9689 | } |
9690 | } |
9691 | } |
9692 | err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); |
9693 | if (err) |
9694 | goto err_out; |
9695 | |
9696 | err = parse_tc_actions(adapter, exts: cls->knode.exts, action: &input->action, |
9697 | queue: &queue); |
9698 | if (err < 0) |
9699 | goto err_out; |
9700 | |
9701 | input->sw_idx = loc; |
9702 | |
9703 | spin_lock(lock: &adapter->fdir_perfect_lock); |
9704 | |
9705 | if (hlist_empty(h: &adapter->fdir_filter_list)) { |
9706 | memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); |
9707 | err = ixgbe_fdir_set_input_mask_82599(hw, input_mask: mask); |
9708 | if (err) |
9709 | goto err_out_w_lock; |
9710 | } else if (memcmp(p: &adapter->fdir_mask, q: mask, size: sizeof(*mask))) { |
9711 | err = -EINVAL; |
9712 | goto err_out_w_lock; |
9713 | } |
9714 | |
9715 | ixgbe_atr_compute_perfect_hash_82599(input: &input->filter, mask); |
9716 | err = ixgbe_fdir_write_perfect_filter_82599(hw, input: &input->filter, |
9717 | soft_id: input->sw_idx, queue); |
9718 | if (err) |
9719 | goto err_out_w_lock; |
9720 | |
9721 | ixgbe_update_ethtool_fdir_entry(adapter, input, sw_idx: input->sw_idx); |
9722 | spin_unlock(lock: &adapter->fdir_perfect_lock); |
9723 | |
9724 | if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) |
9725 | set_bit(nr: loc - 1, addr: (adapter->jump_tables[uhtid])->child_loc_map); |
9726 | |
9727 | kfree(objp: mask); |
9728 | return err; |
9729 | err_out_w_lock: |
9730 | spin_unlock(lock: &adapter->fdir_perfect_lock); |
9731 | err_out: |
9732 | kfree(objp: mask); |
9733 | free_input: |
9734 | kfree(objp: input); |
9735 | free_jump: |
9736 | kfree(objp: jump); |
9737 | return err; |
9738 | } |
9739 | |
9740 | static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter, |
9741 | struct tc_cls_u32_offload *cls_u32) |
9742 | { |
9743 | switch (cls_u32->command) { |
9744 | case TC_CLSU32_NEW_KNODE: |
9745 | case TC_CLSU32_REPLACE_KNODE: |
9746 | return ixgbe_configure_clsu32(adapter, cls: cls_u32); |
9747 | case TC_CLSU32_DELETE_KNODE: |
9748 | return ixgbe_delete_clsu32(adapter, cls: cls_u32); |
9749 | case TC_CLSU32_NEW_HNODE: |
9750 | case TC_CLSU32_REPLACE_HNODE: |
9751 | return ixgbe_configure_clsu32_add_hnode(adapter, cls: cls_u32); |
9752 | case TC_CLSU32_DELETE_HNODE: |
9753 | return ixgbe_configure_clsu32_del_hnode(adapter, cls: cls_u32); |
9754 | default: |
9755 | return -EOPNOTSUPP; |
9756 | } |
9757 | } |
9758 | |
9759 | static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
9760 | void *cb_priv) |
9761 | { |
9762 | struct ixgbe_adapter *adapter = cb_priv; |
9763 | |
9764 | if (!tc_cls_can_offload_and_chain0(dev: adapter->netdev, common: type_data)) |
9765 | return -EOPNOTSUPP; |
9766 | |
9767 | switch (type) { |
9768 | case TC_SETUP_CLSU32: |
9769 | return ixgbe_setup_tc_cls_u32(adapter, cls_u32: type_data); |
9770 | default: |
9771 | return -EOPNOTSUPP; |
9772 | } |
9773 | } |
9774 | |
9775 | static int ixgbe_setup_tc_mqprio(struct net_device *dev, |
9776 | struct tc_mqprio_qopt *mqprio) |
9777 | { |
9778 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
9779 | return ixgbe_setup_tc(dev, tc: mqprio->num_tc); |
9780 | } |
9781 | |
9782 | static LIST_HEAD(ixgbe_block_cb_list); |
9783 | |
9784 | static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, |
9785 | void *type_data) |
9786 | { |
9787 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
9788 | |
9789 | switch (type) { |
9790 | case TC_SETUP_BLOCK: |
9791 | return flow_block_cb_setup_simple(f: type_data, |
9792 | driver_list: &ixgbe_block_cb_list, |
9793 | cb: ixgbe_setup_tc_block_cb, |
9794 | cb_ident: adapter, cb_priv: adapter, ingress_only: true); |
9795 | case TC_SETUP_QDISC_MQPRIO: |
9796 | return ixgbe_setup_tc_mqprio(dev, mqprio: type_data); |
9797 | default: |
9798 | return -EOPNOTSUPP; |
9799 | } |
9800 | } |
9801 | |
9802 | #ifdef CONFIG_PCI_IOV |
9803 | void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) |
9804 | { |
9805 | struct net_device *netdev = adapter->netdev; |
9806 | |
9807 | rtnl_lock(); |
9808 | ixgbe_setup_tc(dev: netdev, tc: adapter->hw_tcs); |
9809 | rtnl_unlock(); |
9810 | } |
9811 | |
9812 | #endif |
9813 | void ixgbe_do_reset(struct net_device *netdev) |
9814 | { |
9815 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
9816 | |
9817 | if (netif_running(dev: netdev)) |
9818 | ixgbe_reinit_locked(adapter); |
9819 | else |
9820 | ixgbe_reset(adapter); |
9821 | } |
9822 | |
9823 | static netdev_features_t ixgbe_fix_features(struct net_device *netdev, |
9824 | netdev_features_t features) |
9825 | { |
9826 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
9827 | |
9828 | /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ |
9829 | if (!(features & NETIF_F_RXCSUM)) |
9830 | features &= ~NETIF_F_LRO; |
9831 | |
9832 | /* Turn off LRO if not RSC capable */ |
9833 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) |
9834 | features &= ~NETIF_F_LRO; |
9835 | |
9836 | if (adapter->xdp_prog && (features & NETIF_F_LRO)) { |
9837 | e_dev_err("LRO is not supported with XDP\n" ); |
9838 | features &= ~NETIF_F_LRO; |
9839 | } |
9840 | |
9841 | return features; |
9842 | } |
9843 | |
9844 | static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter) |
9845 | { |
9846 | int = min_t(int, ixgbe_max_rss_indices(adapter), |
9847 | num_online_cpus()); |
9848 | |
9849 | /* go back to full RSS if we're not running SR-IOV */ |
9850 | if (!adapter->ring_feature[RING_F_VMDQ].offset) |
9851 | adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED | |
9852 | IXGBE_FLAG_SRIOV_ENABLED); |
9853 | |
9854 | adapter->ring_feature[RING_F_RSS].limit = rss; |
9855 | adapter->ring_feature[RING_F_VMDQ].limit = 1; |
9856 | |
9857 | ixgbe_setup_tc(dev: adapter->netdev, tc: adapter->hw_tcs); |
9858 | } |
9859 | |
9860 | static int ixgbe_set_features(struct net_device *netdev, |
9861 | netdev_features_t features) |
9862 | { |
9863 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
9864 | netdev_features_t changed = netdev->features ^ features; |
9865 | bool need_reset = false; |
9866 | |
9867 | /* Make sure RSC matches LRO, reset if change */ |
9868 | if (!(features & NETIF_F_LRO)) { |
9869 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
9870 | need_reset = true; |
9871 | adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; |
9872 | } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && |
9873 | !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { |
9874 | if (adapter->rx_itr_setting == 1 || |
9875 | adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { |
9876 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
9877 | need_reset = true; |
9878 | } else if ((changed ^ features) & NETIF_F_LRO) { |
9879 | e_info(probe, "rx-usecs set too low, " |
9880 | "disabling RSC\n" ); |
9881 | } |
9882 | } |
9883 | |
9884 | /* |
9885 | * Check if Flow Director n-tuple support or hw_tc support was |
9886 | * enabled or disabled. If the state changed, we need to reset. |
9887 | */ |
9888 | if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { |
9889 | /* turn off ATR, enable perfect filters and reset */ |
9890 | if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) |
9891 | need_reset = true; |
9892 | |
9893 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
9894 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
9895 | } else { |
9896 | /* turn off perfect filters, enable ATR and reset */ |
9897 | if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) |
9898 | need_reset = true; |
9899 | |
9900 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
9901 | |
9902 | /* We cannot enable ATR if SR-IOV is enabled */ |
9903 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || |
9904 | /* We cannot enable ATR if we have 2 or more tcs */ |
9905 | (adapter->hw_tcs > 1) || |
9906 | /* We cannot enable ATR if RSS is disabled */ |
9907 | (adapter->ring_feature[RING_F_RSS].limit <= 1) || |
9908 | /* A sample rate of 0 indicates ATR disabled */ |
9909 | (!adapter->atr_sample_rate)) |
9910 | ; /* do nothing not supported */ |
9911 | else /* otherwise supported and set the flag */ |
9912 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; |
9913 | } |
9914 | |
9915 | if (changed & NETIF_F_RXALL) |
9916 | need_reset = true; |
9917 | |
9918 | netdev->features = features; |
9919 | |
9920 | if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) |
9921 | ixgbe_reset_l2fw_offload(adapter); |
9922 | else if (need_reset) |
9923 | ixgbe_do_reset(netdev); |
9924 | else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | |
9925 | NETIF_F_HW_VLAN_CTAG_FILTER)) |
9926 | ixgbe_set_rx_mode(netdev); |
9927 | |
9928 | return 1; |
9929 | } |
9930 | |
9931 | static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], |
9932 | struct net_device *dev, |
9933 | const unsigned char *addr, u16 vid, |
9934 | u16 flags, |
9935 | struct netlink_ext_ack *extack) |
9936 | { |
9937 | /* guarantee we can provide a unique filter for the unicast address */ |
9938 | if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { |
9939 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
9940 | u16 pool = VMDQ_P(0); |
9941 | |
9942 | if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) |
9943 | return -ENOMEM; |
9944 | } |
9945 | |
9946 | return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); |
9947 | } |
9948 | |
9949 | /** |
9950 | * ixgbe_configure_bridge_mode - set various bridge modes |
9951 | * @adapter: the private structure |
9952 | * @mode: requested bridge mode |
9953 | * |
9954 | * Configure some settings require for various bridge modes. |
9955 | **/ |
9956 | static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, |
9957 | __u16 mode) |
9958 | { |
9959 | struct ixgbe_hw *hw = &adapter->hw; |
9960 | unsigned int p, num_pools; |
9961 | u32 vmdctl; |
9962 | |
9963 | switch (mode) { |
9964 | case BRIDGE_MODE_VEPA: |
9965 | /* disable Tx loopback, rely on switch hairpin mode */ |
9966 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); |
9967 | |
9968 | /* must enable Rx switching replication to allow multicast |
9969 | * packet reception on all VFs, and to enable source address |
9970 | * pruning. |
9971 | */ |
9972 | vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); |
9973 | vmdctl |= IXGBE_VT_CTL_REPLEN; |
9974 | IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); |
9975 | |
9976 | /* enable Rx source address pruning. Note, this requires |
9977 | * replication to be enabled or else it does nothing. |
9978 | */ |
9979 | num_pools = adapter->num_vfs + adapter->num_rx_pools; |
9980 | for (p = 0; p < num_pools; p++) { |
9981 | if (hw->mac.ops.set_source_address_pruning) |
9982 | hw->mac.ops.set_source_address_pruning(hw, |
9983 | true, |
9984 | p); |
9985 | } |
9986 | break; |
9987 | case BRIDGE_MODE_VEB: |
9988 | /* enable Tx loopback for internal VF/PF communication */ |
9989 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, |
9990 | IXGBE_PFDTXGSWC_VT_LBEN); |
9991 | |
9992 | /* disable Rx switching replication unless we have SR-IOV |
9993 | * virtual functions |
9994 | */ |
9995 | vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); |
9996 | if (!adapter->num_vfs) |
9997 | vmdctl &= ~IXGBE_VT_CTL_REPLEN; |
9998 | IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); |
9999 | |
10000 | /* disable Rx source address pruning, since we don't expect to |
10001 | * be receiving external loopback of our transmitted frames. |
10002 | */ |
10003 | num_pools = adapter->num_vfs + adapter->num_rx_pools; |
10004 | for (p = 0; p < num_pools; p++) { |
10005 | if (hw->mac.ops.set_source_address_pruning) |
10006 | hw->mac.ops.set_source_address_pruning(hw, |
10007 | false, |
10008 | p); |
10009 | } |
10010 | break; |
10011 | default: |
10012 | return -EINVAL; |
10013 | } |
10014 | |
10015 | adapter->bridge_mode = mode; |
10016 | |
10017 | e_info(drv, "enabling bridge mode: %s\n" , |
10018 | mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB" ); |
10019 | |
10020 | return 0; |
10021 | } |
10022 | |
10023 | static int ixgbe_ndo_bridge_setlink(struct net_device *dev, |
10024 | struct nlmsghdr *nlh, u16 flags, |
10025 | struct netlink_ext_ack *extack) |
10026 | { |
10027 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
10028 | struct nlattr *attr, *br_spec; |
10029 | int rem; |
10030 | |
10031 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) |
10032 | return -EOPNOTSUPP; |
10033 | |
10034 | br_spec = nlmsg_find_attr(nlh, hdrlen: sizeof(struct ifinfomsg), attrtype: IFLA_AF_SPEC); |
10035 | if (!br_spec) |
10036 | return -EINVAL; |
10037 | |
10038 | nla_for_each_nested(attr, br_spec, rem) { |
10039 | int status; |
10040 | __u16 mode; |
10041 | |
10042 | if (nla_type(nla: attr) != IFLA_BRIDGE_MODE) |
10043 | continue; |
10044 | |
10045 | mode = nla_get_u16(nla: attr); |
10046 | status = ixgbe_configure_bridge_mode(adapter, mode); |
10047 | if (status) |
10048 | return status; |
10049 | |
10050 | break; |
10051 | } |
10052 | |
10053 | return 0; |
10054 | } |
10055 | |
10056 | static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
10057 | struct net_device *dev, |
10058 | u32 filter_mask, int nlflags) |
10059 | { |
10060 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
10061 | |
10062 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) |
10063 | return 0; |
10064 | |
10065 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, |
10066 | mode: adapter->bridge_mode, flags: 0, mask: 0, nlflags, |
10067 | filter_mask, NULL); |
10068 | } |
10069 | |
10070 | static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) |
10071 | { |
10072 | struct ixgbe_adapter *adapter = netdev_priv(dev: pdev); |
10073 | struct ixgbe_fwd_adapter *accel; |
10074 | int tcs = adapter->hw_tcs ? : 1; |
10075 | int pool, err; |
10076 | |
10077 | if (adapter->xdp_prog) { |
10078 | e_warn(probe, "L2FW offload is not supported with XDP\n" ); |
10079 | return ERR_PTR(error: -EINVAL); |
10080 | } |
10081 | |
10082 | /* The hardware supported by ixgbe only filters on the destination MAC |
10083 | * address. In order to avoid issues we only support offloading modes |
10084 | * where the hardware can actually provide the functionality. |
10085 | */ |
10086 | if (!macvlan_supports_dest_filter(dev: vdev)) |
10087 | return ERR_PTR(error: -EMEDIUMTYPE); |
10088 | |
10089 | /* We need to lock down the macvlan to be a single queue device so that |
10090 | * we can reuse the tc_to_txq field in the macvlan netdev to represent |
10091 | * the queue mapping to our netdev. |
10092 | */ |
10093 | if (netif_is_multiqueue(dev: vdev)) |
10094 | return ERR_PTR(error: -ERANGE); |
10095 | |
10096 | pool = find_first_zero_bit(addr: adapter->fwd_bitmask, size: adapter->num_rx_pools); |
10097 | if (pool == adapter->num_rx_pools) { |
10098 | u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; |
10099 | u16 reserved_pools; |
10100 | |
10101 | if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && |
10102 | adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || |
10103 | adapter->num_rx_pools > IXGBE_MAX_MACVLANS) |
10104 | return ERR_PTR(error: -EBUSY); |
10105 | |
10106 | /* Hardware has a limited number of available pools. Each VF, |
10107 | * and the PF require a pool. Check to ensure we don't |
10108 | * attempt to use more then the available number of pools. |
10109 | */ |
10110 | if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) |
10111 | return ERR_PTR(error: -EBUSY); |
10112 | |
10113 | /* Enable VMDq flag so device will be set in VM mode */ |
10114 | adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | |
10115 | IXGBE_FLAG_SRIOV_ENABLED; |
10116 | |
10117 | /* Try to reserve as many queues per pool as possible, |
10118 | * we start with the configurations that support 4 queues |
10119 | * per pools, followed by 2, and then by just 1 per pool. |
10120 | */ |
10121 | if (used_pools < 32 && adapter->num_rx_pools < 16) |
10122 | reserved_pools = min_t(u16, |
10123 | 32 - used_pools, |
10124 | 16 - adapter->num_rx_pools); |
10125 | else if (adapter->num_rx_pools < 32) |
10126 | reserved_pools = min_t(u16, |
10127 | 64 - used_pools, |
10128 | 32 - adapter->num_rx_pools); |
10129 | else |
10130 | reserved_pools = 64 - used_pools; |
10131 | |
10132 | |
10133 | if (!reserved_pools) |
10134 | return ERR_PTR(error: -EBUSY); |
10135 | |
10136 | adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools; |
10137 | |
10138 | /* Force reinit of ring allocation with VMDQ enabled */ |
10139 | err = ixgbe_setup_tc(dev: pdev, tc: adapter->hw_tcs); |
10140 | if (err) |
10141 | return ERR_PTR(error: err); |
10142 | |
10143 | if (pool >= adapter->num_rx_pools) |
10144 | return ERR_PTR(error: -ENOMEM); |
10145 | } |
10146 | |
10147 | accel = kzalloc(size: sizeof(*accel), GFP_KERNEL); |
10148 | if (!accel) |
10149 | return ERR_PTR(error: -ENOMEM); |
10150 | |
10151 | set_bit(nr: pool, addr: adapter->fwd_bitmask); |
10152 | netdev_set_sb_channel(dev: vdev, channel: pool); |
10153 | accel->pool = pool; |
10154 | accel->netdev = vdev; |
10155 | |
10156 | if (!netif_running(dev: pdev)) |
10157 | return accel; |
10158 | |
10159 | err = ixgbe_fwd_ring_up(adapter, accel); |
10160 | if (err) |
10161 | return ERR_PTR(error: err); |
10162 | |
10163 | return accel; |
10164 | } |
10165 | |
10166 | static void ixgbe_fwd_del(struct net_device *pdev, void *priv) |
10167 | { |
10168 | struct ixgbe_fwd_adapter *accel = priv; |
10169 | struct ixgbe_adapter *adapter = netdev_priv(dev: pdev); |
10170 | unsigned int rxbase = accel->rx_base_queue; |
10171 | unsigned int i; |
10172 | |
10173 | /* delete unicast filter associated with offloaded interface */ |
10174 | ixgbe_del_mac_filter(adapter, addr: accel->netdev->dev_addr, |
10175 | VMDQ_P(accel->pool)); |
10176 | |
10177 | /* Allow remaining Rx packets to get flushed out of the |
10178 | * Rx FIFO before we drop the netdev for the ring. |
10179 | */ |
10180 | usleep_range(min: 10000, max: 20000); |
10181 | |
10182 | for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { |
10183 | struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i]; |
10184 | struct ixgbe_q_vector *qv = ring->q_vector; |
10185 | |
10186 | /* Make sure we aren't processing any packets and clear |
10187 | * netdev to shut down the ring. |
10188 | */ |
10189 | if (netif_running(dev: adapter->netdev)) |
10190 | napi_synchronize(n: &qv->napi); |
10191 | ring->netdev = NULL; |
10192 | } |
10193 | |
10194 | /* unbind the queues and drop the subordinate channel config */ |
10195 | netdev_unbind_sb_channel(dev: pdev, sb_dev: accel->netdev); |
10196 | netdev_set_sb_channel(dev: accel->netdev, channel: 0); |
10197 | |
10198 | clear_bit(nr: accel->pool, addr: adapter->fwd_bitmask); |
10199 | kfree(objp: accel); |
10200 | } |
10201 | |
10202 | #define IXGBE_MAX_MAC_HDR_LEN 127 |
10203 | #define IXGBE_MAX_NETWORK_HDR_LEN 511 |
10204 | |
10205 | static netdev_features_t |
10206 | ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, |
10207 | netdev_features_t features) |
10208 | { |
10209 | unsigned int network_hdr_len, mac_hdr_len; |
10210 | |
10211 | /* Make certain the headers can be described by a context descriptor */ |
10212 | mac_hdr_len = skb_network_header(skb) - skb->data; |
10213 | if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) |
10214 | return features & ~(NETIF_F_HW_CSUM | |
10215 | NETIF_F_SCTP_CRC | |
10216 | NETIF_F_GSO_UDP_L4 | |
10217 | NETIF_F_HW_VLAN_CTAG_TX | |
10218 | NETIF_F_TSO | |
10219 | NETIF_F_TSO6); |
10220 | |
10221 | network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); |
10222 | if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) |
10223 | return features & ~(NETIF_F_HW_CSUM | |
10224 | NETIF_F_SCTP_CRC | |
10225 | NETIF_F_GSO_UDP_L4 | |
10226 | NETIF_F_TSO | |
10227 | NETIF_F_TSO6); |
10228 | |
10229 | /* We can only support IPV4 TSO in tunnels if we can mangle the |
10230 | * inner IP ID field, so strip TSO if MANGLEID is not supported. |
10231 | * IPsec offoad sets skb->encapsulation but still can handle |
10232 | * the TSO, so it's the exception. |
10233 | */ |
10234 | if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { |
10235 | #ifdef CONFIG_IXGBE_IPSEC |
10236 | if (!secpath_exists(skb)) |
10237 | #endif |
10238 | features &= ~NETIF_F_TSO; |
10239 | } |
10240 | |
10241 | return features; |
10242 | } |
10243 | |
10244 | static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) |
10245 | { |
10246 | int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
10247 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
10248 | struct bpf_prog *old_prog; |
10249 | bool need_reset; |
10250 | int num_queues; |
10251 | |
10252 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
10253 | return -EINVAL; |
10254 | |
10255 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) |
10256 | return -EINVAL; |
10257 | |
10258 | /* verify ixgbe ring attributes are sufficient for XDP */ |
10259 | for (i = 0; i < adapter->num_rx_queues; i++) { |
10260 | struct ixgbe_ring *ring = adapter->rx_ring[i]; |
10261 | |
10262 | if (ring_is_rsc_enabled(ring)) |
10263 | return -EINVAL; |
10264 | |
10265 | if (frame_size > ixgbe_rx_bufsz(ring)) |
10266 | return -EINVAL; |
10267 | } |
10268 | |
10269 | /* if the number of cpus is much larger than the maximum of queues, |
10270 | * we should stop it and then return with ENOMEM like before. |
10271 | */ |
10272 | if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2) |
10273 | return -ENOMEM; |
10274 | |
10275 | old_prog = xchg(&adapter->xdp_prog, prog); |
10276 | need_reset = (!!prog != !!old_prog); |
10277 | |
10278 | /* If transitioning XDP modes reconfigure rings */ |
10279 | if (need_reset) { |
10280 | int err; |
10281 | |
10282 | if (!prog) |
10283 | /* Wait until ndo_xsk_wakeup completes. */ |
10284 | synchronize_rcu(); |
10285 | err = ixgbe_setup_tc(dev, tc: adapter->hw_tcs); |
10286 | |
10287 | if (err) |
10288 | return -EINVAL; |
10289 | if (!prog) |
10290 | xdp_features_clear_redirect_target(dev); |
10291 | } else { |
10292 | for (i = 0; i < adapter->num_rx_queues; i++) { |
10293 | WRITE_ONCE(adapter->rx_ring[i]->xdp_prog, |
10294 | adapter->xdp_prog); |
10295 | } |
10296 | } |
10297 | |
10298 | if (old_prog) |
10299 | bpf_prog_put(prog: old_prog); |
10300 | |
10301 | /* Kick start the NAPI context if there is an AF_XDP socket open |
10302 | * on that queue id. This so that receiving will start. |
10303 | */ |
10304 | if (need_reset && prog) { |
10305 | num_queues = min_t(int, adapter->num_rx_queues, |
10306 | adapter->num_xdp_queues); |
10307 | for (i = 0; i < num_queues; i++) |
10308 | if (adapter->xdp_ring[i]->xsk_pool) |
10309 | (void)ixgbe_xsk_wakeup(dev: adapter->netdev, queue_id: i, |
10310 | XDP_WAKEUP_RX); |
10311 | xdp_features_set_redirect_target(dev, support_sg: true); |
10312 | } |
10313 | |
10314 | return 0; |
10315 | } |
10316 | |
10317 | static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
10318 | { |
10319 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
10320 | |
10321 | switch (xdp->command) { |
10322 | case XDP_SETUP_PROG: |
10323 | return ixgbe_xdp_setup(dev, prog: xdp->prog); |
10324 | case XDP_SETUP_XSK_POOL: |
10325 | return ixgbe_xsk_pool_setup(adapter, pool: xdp->xsk.pool, |
10326 | qid: xdp->xsk.queue_id); |
10327 | |
10328 | default: |
10329 | return -EINVAL; |
10330 | } |
10331 | } |
10332 | |
10333 | void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) |
10334 | { |
10335 | /* Force memory writes to complete before letting h/w know there |
10336 | * are new descriptors to fetch. |
10337 | */ |
10338 | wmb(); |
10339 | writel(val: ring->next_to_use, addr: ring->tail); |
10340 | } |
10341 | |
10342 | void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring) |
10343 | { |
10344 | if (static_branch_unlikely(&ixgbe_xdp_locking_key)) |
10345 | spin_lock(lock: &ring->tx_lock); |
10346 | ixgbe_xdp_ring_update_tail(ring); |
10347 | if (static_branch_unlikely(&ixgbe_xdp_locking_key)) |
10348 | spin_unlock(lock: &ring->tx_lock); |
10349 | } |
10350 | |
10351 | static int ixgbe_xdp_xmit(struct net_device *dev, int n, |
10352 | struct xdp_frame **frames, u32 flags) |
10353 | { |
10354 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
10355 | struct ixgbe_ring *ring; |
10356 | int nxmit = 0; |
10357 | int i; |
10358 | |
10359 | if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) |
10360 | return -ENETDOWN; |
10361 | |
10362 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
10363 | return -EINVAL; |
10364 | |
10365 | /* During program transitions its possible adapter->xdp_prog is assigned |
10366 | * but ring has not been configured yet. In this case simply abort xmit. |
10367 | */ |
10368 | ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL; |
10369 | if (unlikely(!ring)) |
10370 | return -ENXIO; |
10371 | |
10372 | if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) |
10373 | return -ENXIO; |
10374 | |
10375 | if (static_branch_unlikely(&ixgbe_xdp_locking_key)) |
10376 | spin_lock(lock: &ring->tx_lock); |
10377 | |
10378 | for (i = 0; i < n; i++) { |
10379 | struct xdp_frame *xdpf = frames[i]; |
10380 | int err; |
10381 | |
10382 | err = ixgbe_xmit_xdp_ring(ring, xdpf); |
10383 | if (err != IXGBE_XDP_TX) |
10384 | break; |
10385 | nxmit++; |
10386 | } |
10387 | |
10388 | if (unlikely(flags & XDP_XMIT_FLUSH)) |
10389 | ixgbe_xdp_ring_update_tail(ring); |
10390 | |
10391 | if (static_branch_unlikely(&ixgbe_xdp_locking_key)) |
10392 | spin_unlock(lock: &ring->tx_lock); |
10393 | |
10394 | return nxmit; |
10395 | } |
10396 | |
10397 | static const struct net_device_ops ixgbe_netdev_ops = { |
10398 | .ndo_open = ixgbe_open, |
10399 | .ndo_stop = ixgbe_close, |
10400 | .ndo_start_xmit = ixgbe_xmit_frame, |
10401 | .ndo_set_rx_mode = ixgbe_set_rx_mode, |
10402 | .ndo_validate_addr = eth_validate_addr, |
10403 | .ndo_set_mac_address = ixgbe_set_mac, |
10404 | .ndo_change_mtu = ixgbe_change_mtu, |
10405 | .ndo_tx_timeout = ixgbe_tx_timeout, |
10406 | .ndo_set_tx_maxrate = ixgbe_tx_maxrate, |
10407 | .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, |
10408 | .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, |
10409 | .ndo_eth_ioctl = ixgbe_ioctl, |
10410 | .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, |
10411 | .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, |
10412 | .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, |
10413 | .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, |
10414 | .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state, |
10415 | .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, |
10416 | .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, |
10417 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, |
10418 | .ndo_get_vf_stats = ixgbe_ndo_get_vf_stats, |
10419 | .ndo_get_stats64 = ixgbe_get_stats64, |
10420 | .ndo_setup_tc = __ixgbe_setup_tc, |
10421 | #ifdef IXGBE_FCOE |
10422 | .ndo_select_queue = ixgbe_select_queue, |
10423 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, |
10424 | .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, |
10425 | .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, |
10426 | .ndo_fcoe_enable = ixgbe_fcoe_enable, |
10427 | .ndo_fcoe_disable = ixgbe_fcoe_disable, |
10428 | .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, |
10429 | .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, |
10430 | #endif /* IXGBE_FCOE */ |
10431 | .ndo_set_features = ixgbe_set_features, |
10432 | .ndo_fix_features = ixgbe_fix_features, |
10433 | .ndo_fdb_add = ixgbe_ndo_fdb_add, |
10434 | .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, |
10435 | .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, |
10436 | .ndo_dfwd_add_station = ixgbe_fwd_add, |
10437 | .ndo_dfwd_del_station = ixgbe_fwd_del, |
10438 | .ndo_features_check = ixgbe_features_check, |
10439 | .ndo_bpf = ixgbe_xdp, |
10440 | .ndo_xdp_xmit = ixgbe_xdp_xmit, |
10441 | .ndo_xsk_wakeup = ixgbe_xsk_wakeup, |
10442 | }; |
10443 | |
10444 | static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, |
10445 | struct ixgbe_ring *tx_ring) |
10446 | { |
10447 | unsigned long wait_delay, delay_interval; |
10448 | struct ixgbe_hw *hw = &adapter->hw; |
10449 | u8 reg_idx = tx_ring->reg_idx; |
10450 | int wait_loop; |
10451 | u32 txdctl; |
10452 | |
10453 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); |
10454 | |
10455 | /* delay mechanism from ixgbe_disable_tx */ |
10456 | delay_interval = ixgbe_get_completion_timeout(adapter) / 100; |
10457 | |
10458 | wait_loop = IXGBE_MAX_RX_DESC_POLL; |
10459 | wait_delay = delay_interval; |
10460 | |
10461 | while (wait_loop--) { |
10462 | usleep_range(min: wait_delay, max: wait_delay + 10); |
10463 | wait_delay += delay_interval * 2; |
10464 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
10465 | |
10466 | if (!(txdctl & IXGBE_TXDCTL_ENABLE)) |
10467 | return; |
10468 | } |
10469 | |
10470 | e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n" ); |
10471 | } |
10472 | |
10473 | static void ixgbe_disable_txr(struct ixgbe_adapter *adapter, |
10474 | struct ixgbe_ring *tx_ring) |
10475 | { |
10476 | set_bit(nr: __IXGBE_TX_DISABLED, addr: &tx_ring->state); |
10477 | ixgbe_disable_txr_hw(adapter, tx_ring); |
10478 | } |
10479 | |
10480 | static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter, |
10481 | struct ixgbe_ring *rx_ring) |
10482 | { |
10483 | unsigned long wait_delay, delay_interval; |
10484 | struct ixgbe_hw *hw = &adapter->hw; |
10485 | u8 reg_idx = rx_ring->reg_idx; |
10486 | int wait_loop; |
10487 | u32 rxdctl; |
10488 | |
10489 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
10490 | rxdctl &= ~IXGBE_RXDCTL_ENABLE; |
10491 | rxdctl |= IXGBE_RXDCTL_SWFLSH; |
10492 | |
10493 | /* write value back with RXDCTL.ENABLE bit cleared */ |
10494 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); |
10495 | |
10496 | /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ |
10497 | if (hw->mac.type == ixgbe_mac_82598EB && |
10498 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) |
10499 | return; |
10500 | |
10501 | /* delay mechanism from ixgbe_disable_rx */ |
10502 | delay_interval = ixgbe_get_completion_timeout(adapter) / 100; |
10503 | |
10504 | wait_loop = IXGBE_MAX_RX_DESC_POLL; |
10505 | wait_delay = delay_interval; |
10506 | |
10507 | while (wait_loop--) { |
10508 | usleep_range(min: wait_delay, max: wait_delay + 10); |
10509 | wait_delay += delay_interval * 2; |
10510 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
10511 | |
10512 | if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) |
10513 | return; |
10514 | } |
10515 | |
10516 | e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n" ); |
10517 | } |
10518 | |
10519 | static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring) |
10520 | { |
10521 | memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); |
10522 | memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); |
10523 | } |
10524 | |
10525 | static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) |
10526 | { |
10527 | memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); |
10528 | memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); |
10529 | } |
10530 | |
10531 | /** |
10532 | * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings |
10533 | * @adapter: adapter structure |
10534 | * @ring: ring index |
10535 | * |
10536 | * This function disables a certain Rx/Tx/XDP Tx ring. The function |
10537 | * assumes that the netdev is running. |
10538 | **/ |
10539 | void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) |
10540 | { |
10541 | struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; |
10542 | |
10543 | rx_ring = adapter->rx_ring[ring]; |
10544 | tx_ring = adapter->tx_ring[ring]; |
10545 | xdp_ring = adapter->xdp_ring[ring]; |
10546 | |
10547 | ixgbe_disable_txr(adapter, tx_ring); |
10548 | if (xdp_ring) |
10549 | ixgbe_disable_txr(adapter, tx_ring: xdp_ring); |
10550 | ixgbe_disable_rxr_hw(adapter, rx_ring); |
10551 | |
10552 | if (xdp_ring) |
10553 | synchronize_rcu(); |
10554 | |
10555 | /* Rx/Tx/XDP Tx share the same napi context. */ |
10556 | napi_disable(n: &rx_ring->q_vector->napi); |
10557 | |
10558 | ixgbe_clean_tx_ring(tx_ring); |
10559 | if (xdp_ring) |
10560 | ixgbe_clean_tx_ring(tx_ring: xdp_ring); |
10561 | ixgbe_clean_rx_ring(rx_ring); |
10562 | |
10563 | ixgbe_reset_txr_stats(tx_ring); |
10564 | if (xdp_ring) |
10565 | ixgbe_reset_txr_stats(tx_ring: xdp_ring); |
10566 | ixgbe_reset_rxr_stats(rx_ring); |
10567 | } |
10568 | |
10569 | /** |
10570 | * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings |
10571 | * @adapter: adapter structure |
10572 | * @ring: ring index |
10573 | * |
10574 | * This function enables a certain Rx/Tx/XDP Tx ring. The function |
10575 | * assumes that the netdev is running. |
10576 | **/ |
10577 | void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) |
10578 | { |
10579 | struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; |
10580 | |
10581 | rx_ring = adapter->rx_ring[ring]; |
10582 | tx_ring = adapter->tx_ring[ring]; |
10583 | xdp_ring = adapter->xdp_ring[ring]; |
10584 | |
10585 | /* Rx/Tx/XDP Tx share the same napi context. */ |
10586 | napi_enable(n: &rx_ring->q_vector->napi); |
10587 | |
10588 | ixgbe_configure_tx_ring(adapter, ring: tx_ring); |
10589 | if (xdp_ring) |
10590 | ixgbe_configure_tx_ring(adapter, ring: xdp_ring); |
10591 | ixgbe_configure_rx_ring(adapter, ring: rx_ring); |
10592 | |
10593 | clear_bit(nr: __IXGBE_TX_DISABLED, addr: &tx_ring->state); |
10594 | if (xdp_ring) |
10595 | clear_bit(nr: __IXGBE_TX_DISABLED, addr: &xdp_ring->state); |
10596 | } |
10597 | |
10598 | /** |
10599 | * ixgbe_enumerate_functions - Get the number of ports this device has |
10600 | * @adapter: adapter structure |
10601 | * |
10602 | * This function enumerates the phsyical functions co-located on a single slot, |
10603 | * in order to determine how many ports a device has. This is most useful in |
10604 | * determining the required GT/s of PCIe bandwidth necessary for optimal |
10605 | * performance. |
10606 | **/ |
10607 | static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) |
10608 | { |
10609 | struct pci_dev *entry, *pdev = adapter->pdev; |
10610 | int physfns = 0; |
10611 | |
10612 | /* Some cards can not use the generic count PCIe functions method, |
10613 | * because they are behind a parent switch, so we hardcode these with |
10614 | * the correct number of functions. |
10615 | */ |
10616 | if (ixgbe_pcie_from_parent(hw: &adapter->hw)) |
10617 | physfns = 4; |
10618 | |
10619 | list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { |
10620 | /* don't count virtual functions */ |
10621 | if (entry->is_virtfn) |
10622 | continue; |
10623 | |
10624 | /* When the devices on the bus don't all match our device ID, |
10625 | * we can't reliably determine the correct number of |
10626 | * functions. This can occur if a function has been direct |
10627 | * attached to a virtual machine using VT-d, for example. In |
10628 | * this case, simply return -1 to indicate this. |
10629 | */ |
10630 | if ((entry->vendor != pdev->vendor) || |
10631 | (entry->device != pdev->device)) |
10632 | return -1; |
10633 | |
10634 | physfns++; |
10635 | } |
10636 | |
10637 | return physfns; |
10638 | } |
10639 | |
10640 | /** |
10641 | * ixgbe_wol_supported - Check whether device supports WoL |
10642 | * @adapter: the adapter private structure |
10643 | * @device_id: the device ID |
10644 | * @subdevice_id: the subsystem device ID |
10645 | * |
10646 | * This function is used by probe and ethtool to determine |
10647 | * which devices have WoL support |
10648 | * |
10649 | **/ |
10650 | bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, |
10651 | u16 subdevice_id) |
10652 | { |
10653 | struct ixgbe_hw *hw = &adapter->hw; |
10654 | u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; |
10655 | |
10656 | /* WOL not supported on 82598 */ |
10657 | if (hw->mac.type == ixgbe_mac_82598EB) |
10658 | return false; |
10659 | |
10660 | /* check eeprom to see if WOL is enabled for X540 and newer */ |
10661 | if (hw->mac.type >= ixgbe_mac_X540) { |
10662 | if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || |
10663 | ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && |
10664 | (hw->bus.func == 0))) |
10665 | return true; |
10666 | } |
10667 | |
10668 | /* WOL is determined based on device IDs for 82599 MACs */ |
10669 | switch (device_id) { |
10670 | case IXGBE_DEV_ID_82599_SFP: |
10671 | /* Only these subdevices could supports WOL */ |
10672 | switch (subdevice_id) { |
10673 | case IXGBE_SUBDEV_ID_82599_560FLR: |
10674 | case IXGBE_SUBDEV_ID_82599_LOM_SNAP6: |
10675 | case IXGBE_SUBDEV_ID_82599_SFP_WOL0: |
10676 | case IXGBE_SUBDEV_ID_82599_SFP_2OCP: |
10677 | /* only support first port */ |
10678 | if (hw->bus.func != 0) |
10679 | break; |
10680 | fallthrough; |
10681 | case IXGBE_SUBDEV_ID_82599_SP_560FLR: |
10682 | case IXGBE_SUBDEV_ID_82599_SFP: |
10683 | case IXGBE_SUBDEV_ID_82599_RNDC: |
10684 | case IXGBE_SUBDEV_ID_82599_ECNA_DP: |
10685 | case IXGBE_SUBDEV_ID_82599_SFP_1OCP: |
10686 | case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: |
10687 | case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: |
10688 | return true; |
10689 | } |
10690 | break; |
10691 | case IXGBE_DEV_ID_82599EN_SFP: |
10692 | /* Only these subdevices support WOL */ |
10693 | switch (subdevice_id) { |
10694 | case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: |
10695 | return true; |
10696 | } |
10697 | break; |
10698 | case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: |
10699 | /* All except this subdevice support WOL */ |
10700 | if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) |
10701 | return true; |
10702 | break; |
10703 | case IXGBE_DEV_ID_82599_KX4: |
10704 | return true; |
10705 | default: |
10706 | break; |
10707 | } |
10708 | |
10709 | return false; |
10710 | } |
10711 | |
10712 | /** |
10713 | * ixgbe_set_fw_version - Set FW version |
10714 | * @adapter: the adapter private structure |
10715 | * |
10716 | * This function is used by probe and ethtool to determine the FW version to |
10717 | * format to display. The FW version is taken from the EEPROM/NVM. |
10718 | */ |
10719 | static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) |
10720 | { |
10721 | struct ixgbe_hw *hw = &adapter->hw; |
10722 | struct ixgbe_nvm_version nvm_ver; |
10723 | |
10724 | ixgbe_get_oem_prod_version(hw, nvm_ver: &nvm_ver); |
10725 | if (nvm_ver.oem_valid) { |
10726 | snprintf(buf: adapter->eeprom_id, size: sizeof(adapter->eeprom_id), |
10727 | fmt: "%x.%x.%x" , nvm_ver.oem_major, nvm_ver.oem_minor, |
10728 | nvm_ver.oem_release); |
10729 | return; |
10730 | } |
10731 | |
10732 | ixgbe_get_etk_id(hw, nvm_ver: &nvm_ver); |
10733 | ixgbe_get_orom_version(hw, nvm_ver: &nvm_ver); |
10734 | |
10735 | if (nvm_ver.or_valid) { |
10736 | snprintf(buf: adapter->eeprom_id, size: sizeof(adapter->eeprom_id), |
10737 | fmt: "0x%08x, %d.%d.%d" , nvm_ver.etk_id, nvm_ver.or_major, |
10738 | nvm_ver.or_build, nvm_ver.or_patch); |
10739 | return; |
10740 | } |
10741 | |
10742 | /* Set ETrack ID format */ |
10743 | snprintf(buf: adapter->eeprom_id, size: sizeof(adapter->eeprom_id), |
10744 | fmt: "0x%08x" , nvm_ver.etk_id); |
10745 | } |
10746 | |
10747 | /** |
10748 | * ixgbe_probe - Device Initialization Routine |
10749 | * @pdev: PCI device information struct |
10750 | * @ent: entry in ixgbe_pci_tbl |
10751 | * |
10752 | * Returns 0 on success, negative on failure |
10753 | * |
10754 | * ixgbe_probe initializes an adapter identified by a pci_dev structure. |
10755 | * The OS initialization, configuring of the adapter private structure, |
10756 | * and a hardware reset occur. |
10757 | **/ |
10758 | static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
10759 | { |
10760 | struct net_device *netdev; |
10761 | struct ixgbe_adapter *adapter = NULL; |
10762 | struct ixgbe_hw *hw; |
10763 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; |
10764 | unsigned int indices = MAX_TX_QUEUES; |
10765 | u8 part_str[IXGBE_PBANUM_LENGTH]; |
10766 | int i, err, expected_gts; |
10767 | bool disable_dev = false; |
10768 | #ifdef IXGBE_FCOE |
10769 | u16 device_caps; |
10770 | #endif |
10771 | u32 eec; |
10772 | |
10773 | /* Catch broken hardware that put the wrong VF device ID in |
10774 | * the PCIe SR-IOV capability. |
10775 | */ |
10776 | if (pdev->is_virtfn) { |
10777 | WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n" , |
10778 | pci_name(pdev), pdev->vendor, pdev->device); |
10779 | return -EINVAL; |
10780 | } |
10781 | |
10782 | err = pci_enable_device_mem(dev: pdev); |
10783 | if (err) |
10784 | return err; |
10785 | |
10786 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
10787 | if (err) { |
10788 | dev_err(&pdev->dev, |
10789 | "No usable DMA configuration, aborting\n" ); |
10790 | goto err_dma; |
10791 | } |
10792 | |
10793 | err = pci_request_mem_regions(pdev, name: ixgbe_driver_name); |
10794 | if (err) { |
10795 | dev_err(&pdev->dev, |
10796 | "pci_request_selected_regions failed 0x%x\n" , err); |
10797 | goto err_pci_reg; |
10798 | } |
10799 | |
10800 | pci_set_master(dev: pdev); |
10801 | pci_save_state(dev: pdev); |
10802 | |
10803 | if (ii->mac == ixgbe_mac_82598EB) { |
10804 | #ifdef CONFIG_IXGBE_DCB |
10805 | /* 8 TC w/ 4 queues per TC */ |
10806 | indices = 4 * MAX_TRAFFIC_CLASS; |
10807 | #else |
10808 | indices = IXGBE_MAX_RSS_INDICES; |
10809 | #endif |
10810 | } |
10811 | |
10812 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); |
10813 | if (!netdev) { |
10814 | err = -ENOMEM; |
10815 | goto err_alloc_etherdev; |
10816 | } |
10817 | |
10818 | SET_NETDEV_DEV(netdev, &pdev->dev); |
10819 | |
10820 | adapter = netdev_priv(dev: netdev); |
10821 | |
10822 | adapter->netdev = netdev; |
10823 | adapter->pdev = pdev; |
10824 | hw = &adapter->hw; |
10825 | hw->back = adapter; |
10826 | adapter->msg_enable = netif_msg_init(debug_value: debug, DEFAULT_MSG_ENABLE); |
10827 | |
10828 | hw->hw_addr = ioremap(pci_resource_start(pdev, 0), |
10829 | pci_resource_len(pdev, 0)); |
10830 | adapter->io_addr = hw->hw_addr; |
10831 | if (!hw->hw_addr) { |
10832 | err = -EIO; |
10833 | goto err_ioremap; |
10834 | } |
10835 | |
10836 | netdev->netdev_ops = &ixgbe_netdev_ops; |
10837 | ixgbe_set_ethtool_ops(netdev); |
10838 | netdev->watchdog_timeo = 5 * HZ; |
10839 | strscpy(p: netdev->name, q: pci_name(pdev), size: sizeof(netdev->name)); |
10840 | |
10841 | /* Setup hw api */ |
10842 | hw->mac.ops = *ii->mac_ops; |
10843 | hw->mac.type = ii->mac; |
10844 | hw->mvals = ii->mvals; |
10845 | if (ii->link_ops) |
10846 | hw->link.ops = *ii->link_ops; |
10847 | |
10848 | /* EEPROM */ |
10849 | hw->eeprom.ops = *ii->eeprom_ops; |
10850 | eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); |
10851 | if (ixgbe_removed(addr: hw->hw_addr)) { |
10852 | err = -EIO; |
10853 | goto err_ioremap; |
10854 | } |
10855 | /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ |
10856 | if (!(eec & BIT(8))) |
10857 | hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; |
10858 | |
10859 | /* PHY */ |
10860 | hw->phy.ops = *ii->phy_ops; |
10861 | hw->phy.sfp_type = ixgbe_sfp_type_unknown; |
10862 | /* ixgbe_identify_phy_generic will set prtad and mmds properly */ |
10863 | hw->phy.mdio.prtad = MDIO_PRTAD_NONE; |
10864 | hw->phy.mdio.mmds = 0; |
10865 | hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; |
10866 | hw->phy.mdio.dev = netdev; |
10867 | hw->phy.mdio.mdio_read = ixgbe_mdio_read; |
10868 | hw->phy.mdio.mdio_write = ixgbe_mdio_write; |
10869 | |
10870 | /* setup the private structure */ |
10871 | err = ixgbe_sw_init(adapter, ii); |
10872 | if (err) |
10873 | goto err_sw_init; |
10874 | |
10875 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) |
10876 | adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; |
10877 | |
10878 | switch (adapter->hw.mac.type) { |
10879 | case ixgbe_mac_X550: |
10880 | case ixgbe_mac_X550EM_x: |
10881 | netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550; |
10882 | break; |
10883 | case ixgbe_mac_x550em_a: |
10884 | netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a; |
10885 | break; |
10886 | default: |
10887 | break; |
10888 | } |
10889 | |
10890 | /* Make sure the SWFW semaphore is in a valid state */ |
10891 | if (hw->mac.ops.init_swfw_sync) |
10892 | hw->mac.ops.init_swfw_sync(hw); |
10893 | |
10894 | /* Make it possible the adapter to be woken up via WOL */ |
10895 | switch (adapter->hw.mac.type) { |
10896 | case ixgbe_mac_82599EB: |
10897 | case ixgbe_mac_X540: |
10898 | case ixgbe_mac_X550: |
10899 | case ixgbe_mac_X550EM_x: |
10900 | case ixgbe_mac_x550em_a: |
10901 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
10902 | break; |
10903 | default: |
10904 | break; |
10905 | } |
10906 | |
10907 | /* |
10908 | * If there is a fan on this device and it has failed log the |
10909 | * failure. |
10910 | */ |
10911 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { |
10912 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); |
10913 | if (esdp & IXGBE_ESDP_SDP1) |
10914 | e_crit(probe, "Fan has stopped, replace the adapter\n" ); |
10915 | } |
10916 | |
10917 | if (allow_unsupported_sfp) |
10918 | hw->allow_unsupported_sfp = allow_unsupported_sfp; |
10919 | |
10920 | /* reset_hw fills in the perm_addr as well */ |
10921 | hw->phy.reset_if_overtemp = true; |
10922 | err = hw->mac.ops.reset_hw(hw); |
10923 | hw->phy.reset_if_overtemp = false; |
10924 | ixgbe_set_eee_capable(adapter); |
10925 | if (err == IXGBE_ERR_SFP_NOT_PRESENT) { |
10926 | err = 0; |
10927 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
10928 | e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n" ); |
10929 | e_dev_err("Reload the driver after installing a supported module.\n" ); |
10930 | goto err_sw_init; |
10931 | } else if (err) { |
10932 | e_dev_err("HW Init failed: %d\n" , err); |
10933 | goto err_sw_init; |
10934 | } |
10935 | |
10936 | #ifdef CONFIG_PCI_IOV |
10937 | /* SR-IOV not supported on the 82598 */ |
10938 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
10939 | goto skip_sriov; |
10940 | /* Mailbox */ |
10941 | ixgbe_init_mbx_params_pf(hw); |
10942 | hw->mbx.ops = ii->mbx_ops; |
10943 | pci_sriov_set_totalvfs(dev: pdev, IXGBE_MAX_VFS_DRV_LIMIT); |
10944 | ixgbe_enable_sriov(adapter, max_vfs); |
10945 | skip_sriov: |
10946 | |
10947 | #endif |
10948 | netdev->features = NETIF_F_SG | |
10949 | NETIF_F_TSO | |
10950 | NETIF_F_TSO6 | |
10951 | NETIF_F_RXHASH | |
10952 | NETIF_F_RXCSUM | |
10953 | NETIF_F_HW_CSUM; |
10954 | |
10955 | #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ |
10956 | NETIF_F_GSO_GRE_CSUM | \ |
10957 | NETIF_F_GSO_IPXIP4 | \ |
10958 | NETIF_F_GSO_IPXIP6 | \ |
10959 | NETIF_F_GSO_UDP_TUNNEL | \ |
10960 | NETIF_F_GSO_UDP_TUNNEL_CSUM) |
10961 | |
10962 | netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; |
10963 | netdev->features |= NETIF_F_GSO_PARTIAL | |
10964 | IXGBE_GSO_PARTIAL_FEATURES; |
10965 | |
10966 | if (hw->mac.type >= ixgbe_mac_82599EB) |
10967 | netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4; |
10968 | |
10969 | #ifdef CONFIG_IXGBE_IPSEC |
10970 | #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ |
10971 | NETIF_F_HW_ESP_TX_CSUM | \ |
10972 | NETIF_F_GSO_ESP) |
10973 | |
10974 | if (adapter->ipsec) |
10975 | netdev->features |= IXGBE_ESP_FEATURES; |
10976 | #endif |
10977 | /* copy netdev features into list of user selectable features */ |
10978 | netdev->hw_features |= netdev->features | |
10979 | NETIF_F_HW_VLAN_CTAG_FILTER | |
10980 | NETIF_F_HW_VLAN_CTAG_RX | |
10981 | NETIF_F_HW_VLAN_CTAG_TX | |
10982 | NETIF_F_RXALL | |
10983 | NETIF_F_HW_L2FW_DOFFLOAD; |
10984 | |
10985 | if (hw->mac.type >= ixgbe_mac_82599EB) |
10986 | netdev->hw_features |= NETIF_F_NTUPLE | |
10987 | NETIF_F_HW_TC; |
10988 | |
10989 | netdev->features |= NETIF_F_HIGHDMA; |
10990 | |
10991 | netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; |
10992 | netdev->hw_enc_features |= netdev->vlan_features; |
10993 | netdev->mpls_features |= NETIF_F_SG | |
10994 | NETIF_F_TSO | |
10995 | NETIF_F_TSO6 | |
10996 | NETIF_F_HW_CSUM; |
10997 | netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES; |
10998 | |
10999 | /* set this bit last since it cannot be part of vlan_features */ |
11000 | netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | |
11001 | NETIF_F_HW_VLAN_CTAG_RX | |
11002 | NETIF_F_HW_VLAN_CTAG_TX; |
11003 | |
11004 | netdev->priv_flags |= IFF_UNICAST_FLT; |
11005 | netdev->priv_flags |= IFF_SUPP_NOFCS; |
11006 | |
11007 | netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | |
11008 | NETDEV_XDP_ACT_XSK_ZEROCOPY; |
11009 | |
11010 | /* MTU range: 68 - 9710 */ |
11011 | netdev->min_mtu = ETH_MIN_MTU; |
11012 | netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); |
11013 | |
11014 | #ifdef CONFIG_IXGBE_DCB |
11015 | if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) |
11016 | netdev->dcbnl_ops = &ixgbe_dcbnl_ops; |
11017 | #endif |
11018 | |
11019 | #ifdef IXGBE_FCOE |
11020 | if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { |
11021 | unsigned int fcoe_l; |
11022 | |
11023 | if (hw->mac.ops.get_device_caps) { |
11024 | hw->mac.ops.get_device_caps(hw, &device_caps); |
11025 | if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) |
11026 | adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; |
11027 | } |
11028 | |
11029 | |
11030 | fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); |
11031 | adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; |
11032 | |
11033 | netdev->features |= NETIF_F_FSO | |
11034 | NETIF_F_FCOE_CRC; |
11035 | |
11036 | netdev->vlan_features |= NETIF_F_FSO | |
11037 | NETIF_F_FCOE_CRC | |
11038 | NETIF_F_FCOE_MTU; |
11039 | } |
11040 | #endif /* IXGBE_FCOE */ |
11041 | if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) |
11042 | netdev->hw_features |= NETIF_F_LRO; |
11043 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
11044 | netdev->features |= NETIF_F_LRO; |
11045 | |
11046 | if (ixgbe_check_fw_error(adapter)) { |
11047 | err = -EIO; |
11048 | goto err_sw_init; |
11049 | } |
11050 | |
11051 | /* make sure the EEPROM is good */ |
11052 | if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { |
11053 | e_dev_err("The EEPROM Checksum Is Not Valid\n" ); |
11054 | err = -EIO; |
11055 | goto err_sw_init; |
11056 | } |
11057 | |
11058 | eth_platform_get_mac_address(dev: &adapter->pdev->dev, |
11059 | mac_addr: adapter->hw.mac.perm_addr); |
11060 | |
11061 | eth_hw_addr_set(dev: netdev, addr: hw->mac.perm_addr); |
11062 | |
11063 | if (!is_valid_ether_addr(addr: netdev->dev_addr)) { |
11064 | e_dev_err("invalid MAC address\n" ); |
11065 | err = -EIO; |
11066 | goto err_sw_init; |
11067 | } |
11068 | |
11069 | /* Set hw->mac.addr to permanent MAC address */ |
11070 | ether_addr_copy(dst: hw->mac.addr, src: hw->mac.perm_addr); |
11071 | ixgbe_mac_set_default_filter(adapter); |
11072 | |
11073 | timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); |
11074 | |
11075 | if (ixgbe_removed(addr: hw->hw_addr)) { |
11076 | err = -EIO; |
11077 | goto err_sw_init; |
11078 | } |
11079 | INIT_WORK(&adapter->service_task, ixgbe_service_task); |
11080 | set_bit(nr: __IXGBE_SERVICE_INITED, addr: &adapter->state); |
11081 | clear_bit(nr: __IXGBE_SERVICE_SCHED, addr: &adapter->state); |
11082 | |
11083 | err = ixgbe_init_interrupt_scheme(adapter); |
11084 | if (err) |
11085 | goto err_sw_init; |
11086 | |
11087 | for (i = 0; i < adapter->num_rx_queues; i++) |
11088 | u64_stats_init(syncp: &adapter->rx_ring[i]->syncp); |
11089 | for (i = 0; i < adapter->num_tx_queues; i++) |
11090 | u64_stats_init(syncp: &adapter->tx_ring[i]->syncp); |
11091 | for (i = 0; i < adapter->num_xdp_queues; i++) |
11092 | u64_stats_init(syncp: &adapter->xdp_ring[i]->syncp); |
11093 | |
11094 | /* WOL not supported for all devices */ |
11095 | adapter->wol = 0; |
11096 | hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); |
11097 | hw->wol_enabled = ixgbe_wol_supported(adapter, device_id: pdev->device, |
11098 | subdevice_id: pdev->subsystem_device); |
11099 | if (hw->wol_enabled) |
11100 | adapter->wol = IXGBE_WUFC_MAG; |
11101 | |
11102 | device_set_wakeup_enable(dev: &adapter->pdev->dev, enable: adapter->wol); |
11103 | |
11104 | /* save off EEPROM version number */ |
11105 | ixgbe_set_fw_version(adapter); |
11106 | |
11107 | /* pick up the PCI bus settings for reporting later */ |
11108 | if (ixgbe_pcie_from_parent(hw)) |
11109 | ixgbe_get_parent_bus_info(adapter); |
11110 | else |
11111 | hw->mac.ops.get_bus_info(hw); |
11112 | |
11113 | /* calculate the expected PCIe bandwidth required for optimal |
11114 | * performance. Note that some older parts will never have enough |
11115 | * bandwidth due to being older generation PCIe parts. We clamp these |
11116 | * parts to ensure no warning is displayed if it can't be fixed. |
11117 | */ |
11118 | switch (hw->mac.type) { |
11119 | case ixgbe_mac_82598EB: |
11120 | expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); |
11121 | break; |
11122 | default: |
11123 | expected_gts = ixgbe_enumerate_functions(adapter) * 10; |
11124 | break; |
11125 | } |
11126 | |
11127 | /* don't check link if we failed to enumerate functions */ |
11128 | if (expected_gts > 0) |
11129 | ixgbe_check_minimum_link(adapter, expected_gts); |
11130 | |
11131 | err = ixgbe_read_pba_string_generic(hw, pba_num: part_str, pba_num_size: sizeof(part_str)); |
11132 | if (err) |
11133 | strscpy(p: part_str, q: "Unknown" , size: sizeof(part_str)); |
11134 | if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) |
11135 | e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n" , |
11136 | hw->mac.type, hw->phy.type, hw->phy.sfp_type, |
11137 | part_str); |
11138 | else |
11139 | e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n" , |
11140 | hw->mac.type, hw->phy.type, part_str); |
11141 | |
11142 | e_dev_info("%pM\n" , netdev->dev_addr); |
11143 | |
11144 | /* reset the hardware with the new settings */ |
11145 | err = hw->mac.ops.start_hw(hw); |
11146 | if (err == IXGBE_ERR_EEPROM_VERSION) { |
11147 | /* We are running on a pre-production device, log a warning */ |
11148 | e_dev_warn("This device is a pre-production adapter/LOM. " |
11149 | "Please be aware there may be issues associated " |
11150 | "with your hardware. If you are experiencing " |
11151 | "problems please contact your Intel or hardware " |
11152 | "representative who provided you with this " |
11153 | "hardware.\n" ); |
11154 | } |
11155 | strcpy(p: netdev->name, q: "eth%d" ); |
11156 | pci_set_drvdata(pdev, data: adapter); |
11157 | err = register_netdev(dev: netdev); |
11158 | if (err) |
11159 | goto err_register; |
11160 | |
11161 | |
11162 | /* power down the optics for 82599 SFP+ fiber */ |
11163 | if (hw->mac.ops.disable_tx_laser) |
11164 | hw->mac.ops.disable_tx_laser(hw); |
11165 | |
11166 | /* carrier off reporting is important to ethtool even BEFORE open */ |
11167 | netif_carrier_off(dev: netdev); |
11168 | |
11169 | #ifdef CONFIG_IXGBE_DCA |
11170 | if (dca_add_requester(dev: &pdev->dev) == 0) { |
11171 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
11172 | ixgbe_setup_dca(adapter); |
11173 | } |
11174 | #endif |
11175 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
11176 | e_info(probe, "IOV is enabled with %d VFs\n" , adapter->num_vfs); |
11177 | for (i = 0; i < adapter->num_vfs; i++) |
11178 | ixgbe_vf_configuration(pdev, event_mask: (i | 0x10000000)); |
11179 | } |
11180 | |
11181 | /* firmware requires driver version to be 0xFFFFFFFF |
11182 | * since os does not support feature |
11183 | */ |
11184 | if (hw->mac.ops.set_fw_drv_ver) |
11185 | hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, |
11186 | sizeof(UTS_RELEASE) - 1, |
11187 | UTS_RELEASE); |
11188 | |
11189 | /* add san mac addr to netdev */ |
11190 | ixgbe_add_sanmac_netdev(dev: netdev); |
11191 | |
11192 | e_dev_info("%s\n" , ixgbe_default_device_descr); |
11193 | |
11194 | #ifdef CONFIG_IXGBE_HWMON |
11195 | if (ixgbe_sysfs_init(adapter)) |
11196 | e_err(probe, "failed to allocate sysfs resources\n" ); |
11197 | #endif /* CONFIG_IXGBE_HWMON */ |
11198 | |
11199 | ixgbe_dbg_adapter_init(adapter); |
11200 | |
11201 | /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ |
11202 | if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) |
11203 | hw->mac.ops.setup_link(hw, |
11204 | IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, |
11205 | true); |
11206 | |
11207 | err = ixgbe_mii_bus_init(hw); |
11208 | if (err) |
11209 | goto err_netdev; |
11210 | |
11211 | return 0; |
11212 | |
11213 | err_netdev: |
11214 | unregister_netdev(dev: netdev); |
11215 | err_register: |
11216 | ixgbe_release_hw_control(adapter); |
11217 | ixgbe_clear_interrupt_scheme(adapter); |
11218 | err_sw_init: |
11219 | ixgbe_disable_sriov(adapter); |
11220 | adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; |
11221 | iounmap(addr: adapter->io_addr); |
11222 | kfree(objp: adapter->jump_tables[0]); |
11223 | kfree(objp: adapter->mac_table); |
11224 | kfree(objp: adapter->rss_key); |
11225 | bitmap_free(bitmap: adapter->af_xdp_zc_qps); |
11226 | err_ioremap: |
11227 | disable_dev = !test_and_set_bit(nr: __IXGBE_DISABLED, addr: &adapter->state); |
11228 | free_netdev(dev: netdev); |
11229 | err_alloc_etherdev: |
11230 | pci_release_mem_regions(pdev); |
11231 | err_pci_reg: |
11232 | err_dma: |
11233 | if (!adapter || disable_dev) |
11234 | pci_disable_device(dev: pdev); |
11235 | return err; |
11236 | } |
11237 | |
11238 | /** |
11239 | * ixgbe_remove - Device Removal Routine |
11240 | * @pdev: PCI device information struct |
11241 | * |
11242 | * ixgbe_remove is called by the PCI subsystem to alert the driver |
11243 | * that it should release a PCI device. The could be caused by a |
11244 | * Hot-Plug event, or because the driver is going to be removed from |
11245 | * memory. |
11246 | **/ |
11247 | static void ixgbe_remove(struct pci_dev *pdev) |
11248 | { |
11249 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
11250 | struct net_device *netdev; |
11251 | bool disable_dev; |
11252 | int i; |
11253 | |
11254 | /* if !adapter then we already cleaned up in probe */ |
11255 | if (!adapter) |
11256 | return; |
11257 | |
11258 | netdev = adapter->netdev; |
11259 | ixgbe_dbg_adapter_exit(adapter); |
11260 | |
11261 | set_bit(nr: __IXGBE_REMOVING, addr: &adapter->state); |
11262 | cancel_work_sync(work: &adapter->service_task); |
11263 | |
11264 | if (adapter->mii_bus) |
11265 | mdiobus_unregister(bus: adapter->mii_bus); |
11266 | |
11267 | #ifdef CONFIG_IXGBE_DCA |
11268 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
11269 | adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; |
11270 | dca_remove_requester(dev: &pdev->dev); |
11271 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, |
11272 | IXGBE_DCA_CTRL_DCA_DISABLE); |
11273 | } |
11274 | |
11275 | #endif |
11276 | #ifdef CONFIG_IXGBE_HWMON |
11277 | ixgbe_sysfs_exit(adapter); |
11278 | #endif /* CONFIG_IXGBE_HWMON */ |
11279 | |
11280 | /* remove the added san mac */ |
11281 | ixgbe_del_sanmac_netdev(dev: netdev); |
11282 | |
11283 | #ifdef CONFIG_PCI_IOV |
11284 | ixgbe_disable_sriov(adapter); |
11285 | #endif |
11286 | if (netdev->reg_state == NETREG_REGISTERED) |
11287 | unregister_netdev(dev: netdev); |
11288 | |
11289 | ixgbe_stop_ipsec_offload(adapter); |
11290 | ixgbe_clear_interrupt_scheme(adapter); |
11291 | |
11292 | ixgbe_release_hw_control(adapter); |
11293 | |
11294 | #ifdef CONFIG_DCB |
11295 | kfree(objp: adapter->ixgbe_ieee_pfc); |
11296 | kfree(objp: adapter->ixgbe_ieee_ets); |
11297 | |
11298 | #endif |
11299 | iounmap(addr: adapter->io_addr); |
11300 | pci_release_mem_regions(pdev); |
11301 | |
11302 | e_dev_info("complete\n" ); |
11303 | |
11304 | for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) { |
11305 | if (adapter->jump_tables[i]) { |
11306 | kfree(objp: adapter->jump_tables[i]->input); |
11307 | kfree(objp: adapter->jump_tables[i]->mask); |
11308 | } |
11309 | kfree(objp: adapter->jump_tables[i]); |
11310 | } |
11311 | |
11312 | kfree(objp: adapter->mac_table); |
11313 | kfree(objp: adapter->rss_key); |
11314 | bitmap_free(bitmap: adapter->af_xdp_zc_qps); |
11315 | disable_dev = !test_and_set_bit(nr: __IXGBE_DISABLED, addr: &adapter->state); |
11316 | free_netdev(dev: netdev); |
11317 | |
11318 | if (disable_dev) |
11319 | pci_disable_device(dev: pdev); |
11320 | } |
11321 | |
11322 | /** |
11323 | * ixgbe_io_error_detected - called when PCI error is detected |
11324 | * @pdev: Pointer to PCI device |
11325 | * @state: The current pci connection state |
11326 | * |
11327 | * This function is called after a PCI bus error affecting |
11328 | * this device has been detected. |
11329 | */ |
11330 | static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, |
11331 | pci_channel_state_t state) |
11332 | { |
11333 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
11334 | struct net_device *netdev = adapter->netdev; |
11335 | |
11336 | #ifdef CONFIG_PCI_IOV |
11337 | struct ixgbe_hw *hw = &adapter->hw; |
11338 | struct pci_dev *bdev, *vfdev; |
11339 | u32 dw0, dw1, dw2, dw3; |
11340 | int vf, pos; |
11341 | u16 req_id, pf_func; |
11342 | |
11343 | if (adapter->hw.mac.type == ixgbe_mac_82598EB || |
11344 | adapter->num_vfs == 0) |
11345 | goto skip_bad_vf_detection; |
11346 | |
11347 | bdev = pdev->bus->self; |
11348 | while (bdev && (pci_pcie_type(dev: bdev) != PCI_EXP_TYPE_ROOT_PORT)) |
11349 | bdev = bdev->bus->self; |
11350 | |
11351 | if (!bdev) |
11352 | goto skip_bad_vf_detection; |
11353 | |
11354 | pos = pci_find_ext_capability(dev: bdev, PCI_EXT_CAP_ID_ERR); |
11355 | if (!pos) |
11356 | goto skip_bad_vf_detection; |
11357 | |
11358 | dw0 = ixgbe_read_pci_cfg_dword(hw, reg: pos + PCI_ERR_HEADER_LOG); |
11359 | dw1 = ixgbe_read_pci_cfg_dword(hw, reg: pos + PCI_ERR_HEADER_LOG + 4); |
11360 | dw2 = ixgbe_read_pci_cfg_dword(hw, reg: pos + PCI_ERR_HEADER_LOG + 8); |
11361 | dw3 = ixgbe_read_pci_cfg_dword(hw, reg: pos + PCI_ERR_HEADER_LOG + 12); |
11362 | if (ixgbe_removed(addr: hw->hw_addr)) |
11363 | goto skip_bad_vf_detection; |
11364 | |
11365 | req_id = dw1 >> 16; |
11366 | /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ |
11367 | if (!(req_id & 0x0080)) |
11368 | goto skip_bad_vf_detection; |
11369 | |
11370 | pf_func = req_id & 0x01; |
11371 | if ((pf_func & 1) == (pdev->devfn & 1)) { |
11372 | unsigned int device_id; |
11373 | |
11374 | vf = (req_id & 0x7F) >> 1; |
11375 | e_dev_err("VF %d has caused a PCIe error\n" , vf); |
11376 | e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " |
11377 | "%8.8x\tdw3: %8.8x\n" , |
11378 | dw0, dw1, dw2, dw3); |
11379 | switch (adapter->hw.mac.type) { |
11380 | case ixgbe_mac_82599EB: |
11381 | device_id = IXGBE_82599_VF_DEVICE_ID; |
11382 | break; |
11383 | case ixgbe_mac_X540: |
11384 | device_id = IXGBE_X540_VF_DEVICE_ID; |
11385 | break; |
11386 | case ixgbe_mac_X550: |
11387 | device_id = IXGBE_DEV_ID_X550_VF; |
11388 | break; |
11389 | case ixgbe_mac_X550EM_x: |
11390 | device_id = IXGBE_DEV_ID_X550EM_X_VF; |
11391 | break; |
11392 | case ixgbe_mac_x550em_a: |
11393 | device_id = IXGBE_DEV_ID_X550EM_A_VF; |
11394 | break; |
11395 | default: |
11396 | device_id = 0; |
11397 | break; |
11398 | } |
11399 | |
11400 | /* Find the pci device of the offending VF */ |
11401 | vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device: device_id, NULL); |
11402 | while (vfdev) { |
11403 | if (vfdev->devfn == (req_id & 0xFF)) |
11404 | break; |
11405 | vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
11406 | device: device_id, from: vfdev); |
11407 | } |
11408 | /* |
11409 | * There's a slim chance the VF could have been hot plugged, |
11410 | * so if it is no longer present we don't need to issue the |
11411 | * VFLR. Just clean up the AER in that case. |
11412 | */ |
11413 | if (vfdev) { |
11414 | pcie_flr(dev: vfdev); |
11415 | /* Free device reference count */ |
11416 | pci_dev_put(dev: vfdev); |
11417 | } |
11418 | } |
11419 | |
11420 | /* |
11421 | * Even though the error may have occurred on the other port |
11422 | * we still need to increment the vf error reference count for |
11423 | * both ports because the I/O resume function will be called |
11424 | * for both of them. |
11425 | */ |
11426 | adapter->vferr_refcount++; |
11427 | |
11428 | return PCI_ERS_RESULT_RECOVERED; |
11429 | |
11430 | skip_bad_vf_detection: |
11431 | #endif /* CONFIG_PCI_IOV */ |
11432 | if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) |
11433 | return PCI_ERS_RESULT_DISCONNECT; |
11434 | |
11435 | if (!netif_device_present(dev: netdev)) |
11436 | return PCI_ERS_RESULT_DISCONNECT; |
11437 | |
11438 | rtnl_lock(); |
11439 | netif_device_detach(dev: netdev); |
11440 | |
11441 | if (netif_running(dev: netdev)) |
11442 | ixgbe_close_suspend(adapter); |
11443 | |
11444 | if (state == pci_channel_io_perm_failure) { |
11445 | rtnl_unlock(); |
11446 | return PCI_ERS_RESULT_DISCONNECT; |
11447 | } |
11448 | |
11449 | if (!test_and_set_bit(nr: __IXGBE_DISABLED, addr: &adapter->state)) |
11450 | pci_disable_device(dev: pdev); |
11451 | rtnl_unlock(); |
11452 | |
11453 | /* Request a slot reset. */ |
11454 | return PCI_ERS_RESULT_NEED_RESET; |
11455 | } |
11456 | |
11457 | /** |
11458 | * ixgbe_io_slot_reset - called after the pci bus has been reset. |
11459 | * @pdev: Pointer to PCI device |
11460 | * |
11461 | * Restart the card from scratch, as if from a cold-boot. |
11462 | */ |
11463 | static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) |
11464 | { |
11465 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
11466 | pci_ers_result_t result; |
11467 | |
11468 | if (pci_enable_device_mem(dev: pdev)) { |
11469 | e_err(probe, "Cannot re-enable PCI device after reset.\n" ); |
11470 | result = PCI_ERS_RESULT_DISCONNECT; |
11471 | } else { |
11472 | smp_mb__before_atomic(); |
11473 | clear_bit(nr: __IXGBE_DISABLED, addr: &adapter->state); |
11474 | adapter->hw.hw_addr = adapter->io_addr; |
11475 | pci_set_master(dev: pdev); |
11476 | pci_restore_state(dev: pdev); |
11477 | pci_save_state(dev: pdev); |
11478 | |
11479 | pci_wake_from_d3(dev: pdev, enable: false); |
11480 | |
11481 | ixgbe_reset(adapter); |
11482 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
11483 | result = PCI_ERS_RESULT_RECOVERED; |
11484 | } |
11485 | |
11486 | return result; |
11487 | } |
11488 | |
11489 | /** |
11490 | * ixgbe_io_resume - called when traffic can start flowing again. |
11491 | * @pdev: Pointer to PCI device |
11492 | * |
11493 | * This callback is called when the error recovery driver tells us that |
11494 | * its OK to resume normal operation. |
11495 | */ |
11496 | static void ixgbe_io_resume(struct pci_dev *pdev) |
11497 | { |
11498 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
11499 | struct net_device *netdev = adapter->netdev; |
11500 | |
11501 | #ifdef CONFIG_PCI_IOV |
11502 | if (adapter->vferr_refcount) { |
11503 | e_info(drv, "Resuming after VF err\n" ); |
11504 | adapter->vferr_refcount--; |
11505 | return; |
11506 | } |
11507 | |
11508 | #endif |
11509 | rtnl_lock(); |
11510 | if (netif_running(dev: netdev)) |
11511 | ixgbe_open(netdev); |
11512 | |
11513 | netif_device_attach(dev: netdev); |
11514 | rtnl_unlock(); |
11515 | } |
11516 | |
11517 | static const struct pci_error_handlers ixgbe_err_handler = { |
11518 | .error_detected = ixgbe_io_error_detected, |
11519 | .slot_reset = ixgbe_io_slot_reset, |
11520 | .resume = ixgbe_io_resume, |
11521 | }; |
11522 | |
11523 | static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume); |
11524 | |
11525 | static struct pci_driver ixgbe_driver = { |
11526 | .name = ixgbe_driver_name, |
11527 | .id_table = ixgbe_pci_tbl, |
11528 | .probe = ixgbe_probe, |
11529 | .remove = ixgbe_remove, |
11530 | .driver.pm = &ixgbe_pm_ops, |
11531 | .shutdown = ixgbe_shutdown, |
11532 | .sriov_configure = ixgbe_pci_sriov_configure, |
11533 | .err_handler = &ixgbe_err_handler |
11534 | }; |
11535 | |
11536 | /** |
11537 | * ixgbe_init_module - Driver Registration Routine |
11538 | * |
11539 | * ixgbe_init_module is the first routine called when the driver is |
11540 | * loaded. All it does is register with the PCI subsystem. |
11541 | **/ |
11542 | static int __init ixgbe_init_module(void) |
11543 | { |
11544 | int ret; |
11545 | pr_info("%s\n" , ixgbe_driver_string); |
11546 | pr_info("%s\n" , ixgbe_copyright); |
11547 | |
11548 | ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); |
11549 | if (!ixgbe_wq) { |
11550 | pr_err("%s: Failed to create workqueue\n" , ixgbe_driver_name); |
11551 | return -ENOMEM; |
11552 | } |
11553 | |
11554 | ixgbe_dbg_init(); |
11555 | |
11556 | ret = pci_register_driver(&ixgbe_driver); |
11557 | if (ret) { |
11558 | destroy_workqueue(wq: ixgbe_wq); |
11559 | ixgbe_dbg_exit(); |
11560 | return ret; |
11561 | } |
11562 | |
11563 | #ifdef CONFIG_IXGBE_DCA |
11564 | dca_register_notify(nb: &dca_notifier); |
11565 | #endif |
11566 | |
11567 | return 0; |
11568 | } |
11569 | |
11570 | module_init(ixgbe_init_module); |
11571 | |
11572 | /** |
11573 | * ixgbe_exit_module - Driver Exit Cleanup Routine |
11574 | * |
11575 | * ixgbe_exit_module is called just before the driver is removed |
11576 | * from memory. |
11577 | **/ |
11578 | static void __exit ixgbe_exit_module(void) |
11579 | { |
11580 | #ifdef CONFIG_IXGBE_DCA |
11581 | dca_unregister_notify(nb: &dca_notifier); |
11582 | #endif |
11583 | pci_unregister_driver(dev: &ixgbe_driver); |
11584 | |
11585 | ixgbe_dbg_exit(); |
11586 | if (ixgbe_wq) { |
11587 | destroy_workqueue(wq: ixgbe_wq); |
11588 | ixgbe_wq = NULL; |
11589 | } |
11590 | } |
11591 | |
11592 | #ifdef CONFIG_IXGBE_DCA |
11593 | static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, |
11594 | void *p) |
11595 | { |
11596 | int ret_val; |
11597 | |
11598 | ret_val = driver_for_each_device(drv: &ixgbe_driver.driver, NULL, data: &event, |
11599 | fn: __ixgbe_notify_dca); |
11600 | |
11601 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; |
11602 | } |
11603 | |
11604 | #endif /* CONFIG_IXGBE_DCA */ |
11605 | |
11606 | module_exit(ixgbe_exit_module); |
11607 | |
11608 | /* ixgbe_main.c */ |
11609 | |