1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 1999 - 2018 Intel Corporation. */ |
3 | |
4 | #include <linux/types.h> |
5 | #include <linux/module.h> |
6 | #include <linux/pci.h> |
7 | #include <linux/netdevice.h> |
8 | #include <linux/vmalloc.h> |
9 | #include <linux/string.h> |
10 | #include <linux/in.h> |
11 | #include <linux/ip.h> |
12 | #include <linux/tcp.h> |
13 | #include <linux/ipv6.h> |
14 | #include <linux/if_bridge.h> |
15 | #ifdef NETIF_F_HW_VLAN_CTAG_TX |
16 | #include <linux/if_vlan.h> |
17 | #endif |
18 | |
19 | #include "ixgbe.h" |
20 | #include "ixgbe_type.h" |
21 | #include "ixgbe_sriov.h" |
22 | |
23 | #ifdef CONFIG_PCI_IOV |
24 | static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, |
25 | unsigned int num_vfs) |
26 | { |
27 | struct ixgbe_hw *hw = &adapter->hw; |
28 | struct vf_macvlans *mv_list; |
29 | int num_vf_macvlans, i; |
30 | |
31 | /* Initialize list of VF macvlans */ |
32 | INIT_LIST_HEAD(list: &adapter->vf_mvs.l); |
33 | |
34 | num_vf_macvlans = hw->mac.num_rar_entries - |
35 | (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); |
36 | if (!num_vf_macvlans) |
37 | return; |
38 | |
39 | mv_list = kcalloc(n: num_vf_macvlans, size: sizeof(struct vf_macvlans), |
40 | GFP_KERNEL); |
41 | if (mv_list) { |
42 | for (i = 0; i < num_vf_macvlans; i++) { |
43 | mv_list[i].vf = -1; |
44 | mv_list[i].free = true; |
45 | list_add(new: &mv_list[i].l, head: &adapter->vf_mvs.l); |
46 | } |
47 | adapter->mv_list = mv_list; |
48 | } |
49 | } |
50 | |
51 | static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, |
52 | unsigned int num_vfs) |
53 | { |
54 | struct ixgbe_hw *hw = &adapter->hw; |
55 | int i; |
56 | |
57 | if (adapter->xdp_prog) { |
58 | e_warn(probe, "SRIOV is not supported with XDP\n" ); |
59 | return -EINVAL; |
60 | } |
61 | |
62 | /* Enable VMDq flag so device will be set in VM mode */ |
63 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | |
64 | IXGBE_FLAG_VMDQ_ENABLED; |
65 | |
66 | /* Allocate memory for per VF control structures */ |
67 | adapter->vfinfo = kcalloc(n: num_vfs, size: sizeof(struct vf_data_storage), |
68 | GFP_KERNEL); |
69 | if (!adapter->vfinfo) |
70 | return -ENOMEM; |
71 | |
72 | adapter->num_vfs = num_vfs; |
73 | |
74 | ixgbe_alloc_vf_macvlans(adapter, num_vfs); |
75 | adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; |
76 | |
77 | /* Initialize default switching mode VEB */ |
78 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); |
79 | adapter->bridge_mode = BRIDGE_MODE_VEB; |
80 | |
81 | /* limit traffic classes based on VFs enabled */ |
82 | if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) { |
83 | adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; |
84 | adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; |
85 | } else if (num_vfs < 32) { |
86 | adapter->dcb_cfg.num_tcs.pg_tcs = 4; |
87 | adapter->dcb_cfg.num_tcs.pfc_tcs = 4; |
88 | } else { |
89 | adapter->dcb_cfg.num_tcs.pg_tcs = 1; |
90 | adapter->dcb_cfg.num_tcs.pfc_tcs = 1; |
91 | } |
92 | |
93 | /* Disable RSC when in SR-IOV mode */ |
94 | adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | |
95 | IXGBE_FLAG2_RSC_ENABLED); |
96 | |
97 | for (i = 0; i < num_vfs; i++) { |
98 | /* enable spoof checking for all VFs */ |
99 | adapter->vfinfo[i].spoofchk_enabled = true; |
100 | adapter->vfinfo[i].link_enable = true; |
101 | |
102 | /* We support VF RSS querying only for 82599 and x540 |
103 | * devices at the moment. These devices share RSS |
104 | * indirection table and RSS hash key with PF therefore |
105 | * we want to disable the querying by default. |
106 | */ |
107 | adapter->vfinfo[i].rss_query_enabled = false; |
108 | |
109 | /* Untrust all VFs */ |
110 | adapter->vfinfo[i].trusted = false; |
111 | |
112 | /* set the default xcast mode */ |
113 | adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; |
114 | } |
115 | |
116 | e_info(probe, "SR-IOV enabled with %d VFs\n" , num_vfs); |
117 | return 0; |
118 | } |
119 | |
120 | /** |
121 | * ixgbe_get_vfs - Find and take references to all vf devices |
122 | * @adapter: Pointer to adapter struct |
123 | */ |
124 | static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) |
125 | { |
126 | struct pci_dev *pdev = adapter->pdev; |
127 | u16 vendor = pdev->vendor; |
128 | struct pci_dev *vfdev; |
129 | int vf = 0; |
130 | u16 vf_id; |
131 | int pos; |
132 | |
133 | pos = pci_find_ext_capability(dev: pdev, PCI_EXT_CAP_ID_SRIOV); |
134 | if (!pos) |
135 | return; |
136 | pci_read_config_word(dev: pdev, where: pos + PCI_SRIOV_VF_DID, val: &vf_id); |
137 | |
138 | vfdev = pci_get_device(vendor, device: vf_id, NULL); |
139 | for (; vfdev; vfdev = pci_get_device(vendor, device: vf_id, from: vfdev)) { |
140 | if (!vfdev->is_virtfn) |
141 | continue; |
142 | if (vfdev->physfn != pdev) |
143 | continue; |
144 | if (vf >= adapter->num_vfs) |
145 | continue; |
146 | pci_dev_get(dev: vfdev); |
147 | adapter->vfinfo[vf].vfdev = vfdev; |
148 | ++vf; |
149 | } |
150 | } |
151 | |
152 | /* Note this function is called when the user wants to enable SR-IOV |
153 | * VFs using the now deprecated module parameter |
154 | */ |
155 | void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) |
156 | { |
157 | int pre_existing_vfs = 0; |
158 | unsigned int num_vfs; |
159 | |
160 | pre_existing_vfs = pci_num_vf(dev: adapter->pdev); |
161 | if (!pre_existing_vfs && !max_vfs) |
162 | return; |
163 | |
164 | /* If there are pre-existing VFs then we have to force |
165 | * use of that many - over ride any module parameter value. |
166 | * This may result from the user unloading the PF driver |
167 | * while VFs were assigned to guest VMs or because the VFs |
168 | * have been created via the new PCI SR-IOV sysfs interface. |
169 | */ |
170 | if (pre_existing_vfs) { |
171 | num_vfs = pre_existing_vfs; |
172 | dev_warn(&adapter->pdev->dev, |
173 | "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n" ); |
174 | } else { |
175 | int err; |
176 | /* |
177 | * The 82599 supports up to 64 VFs per physical function |
178 | * but this implementation limits allocation to 63 so that |
179 | * basic networking resources are still available to the |
180 | * physical function. If the user requests greater than |
181 | * 63 VFs then it is an error - reset to default of zero. |
182 | */ |
183 | num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT); |
184 | |
185 | err = pci_enable_sriov(dev: adapter->pdev, nr_virtfn: num_vfs); |
186 | if (err) { |
187 | e_err(probe, "Failed to enable PCI sriov: %d\n" , err); |
188 | return; |
189 | } |
190 | } |
191 | |
192 | if (!__ixgbe_enable_sriov(adapter, num_vfs)) { |
193 | ixgbe_get_vfs(adapter); |
194 | return; |
195 | } |
196 | |
197 | /* If we have gotten to this point then there is no memory available |
198 | * to manage the VF devices - print message and bail. |
199 | */ |
200 | e_err(probe, "Unable to allocate memory for VF Data Storage - " |
201 | "SRIOV disabled\n" ); |
202 | ixgbe_disable_sriov(adapter); |
203 | } |
204 | |
205 | #endif /* #ifdef CONFIG_PCI_IOV */ |
206 | int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) |
207 | { |
208 | unsigned int num_vfs = adapter->num_vfs, vf; |
209 | unsigned long flags; |
210 | int ; |
211 | |
212 | spin_lock_irqsave(&adapter->vfs_lock, flags); |
213 | /* set num VFs to 0 to prevent access to vfinfo */ |
214 | adapter->num_vfs = 0; |
215 | spin_unlock_irqrestore(lock: &adapter->vfs_lock, flags); |
216 | |
217 | /* put the reference to all of the vf devices */ |
218 | for (vf = 0; vf < num_vfs; ++vf) { |
219 | struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; |
220 | |
221 | if (!vfdev) |
222 | continue; |
223 | adapter->vfinfo[vf].vfdev = NULL; |
224 | pci_dev_put(dev: vfdev); |
225 | } |
226 | |
227 | /* free VF control structures */ |
228 | kfree(objp: adapter->vfinfo); |
229 | adapter->vfinfo = NULL; |
230 | |
231 | /* free macvlan list */ |
232 | kfree(objp: adapter->mv_list); |
233 | adapter->mv_list = NULL; |
234 | |
235 | /* if SR-IOV is already disabled then there is nothing to do */ |
236 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) |
237 | return 0; |
238 | |
239 | #ifdef CONFIG_PCI_IOV |
240 | /* |
241 | * If our VFs are assigned we cannot shut down SR-IOV |
242 | * without causing issues, so just leave the hardware |
243 | * available but disabled |
244 | */ |
245 | if (pci_vfs_assigned(dev: adapter->pdev)) { |
246 | e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n" ); |
247 | return -EPERM; |
248 | } |
249 | /* disable iov and allow time for transactions to clear */ |
250 | pci_disable_sriov(dev: adapter->pdev); |
251 | #endif |
252 | |
253 | /* Disable VMDq flag so device will be set in VM mode */ |
254 | if (bitmap_weight(src: adapter->fwd_bitmask, nbits: adapter->num_rx_pools) == 1) { |
255 | adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; |
256 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; |
257 | rss = min_t(int, ixgbe_max_rss_indices(adapter), |
258 | num_online_cpus()); |
259 | } else { |
260 | rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); |
261 | } |
262 | |
263 | adapter->ring_feature[RING_F_VMDQ].offset = 0; |
264 | adapter->ring_feature[RING_F_RSS].limit = rss; |
265 | |
266 | /* take a breather then clean up driver data */ |
267 | msleep(msecs: 100); |
268 | return 0; |
269 | } |
270 | |
271 | static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) |
272 | { |
273 | #ifdef CONFIG_PCI_IOV |
274 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev: dev); |
275 | int pre_existing_vfs = pci_num_vf(dev); |
276 | int err = 0, num_rx_pools, i, limit; |
277 | u8 num_tc; |
278 | |
279 | if (pre_existing_vfs && pre_existing_vfs != num_vfs) |
280 | err = ixgbe_disable_sriov(adapter); |
281 | else if (pre_existing_vfs && pre_existing_vfs == num_vfs) |
282 | return num_vfs; |
283 | |
284 | if (err) |
285 | return err; |
286 | |
287 | /* While the SR-IOV capability structure reports total VFs to be 64, |
288 | * we limit the actual number allocated as below based on two factors. |
289 | * Num_TCs MAX_VFs |
290 | * 1 63 |
291 | * <=4 31 |
292 | * >4 15 |
293 | * First, we reserve some transmit/receive resources for the PF. |
294 | * Second, VMDQ also uses the same pools that SR-IOV does. We need to |
295 | * account for this, so that we don't accidentally allocate more VFs |
296 | * than we have available pools. The PCI bus driver already checks for |
297 | * other values out of range. |
298 | */ |
299 | num_tc = adapter->hw_tcs; |
300 | num_rx_pools = bitmap_weight(src: adapter->fwd_bitmask, |
301 | nbits: adapter->num_rx_pools); |
302 | limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : |
303 | (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; |
304 | |
305 | if (num_vfs > (limit - num_rx_pools)) { |
306 | e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n" , |
307 | num_tc, num_rx_pools - 1, limit - num_rx_pools); |
308 | return -EPERM; |
309 | } |
310 | |
311 | err = __ixgbe_enable_sriov(adapter, num_vfs); |
312 | if (err) |
313 | return err; |
314 | |
315 | for (i = 0; i < num_vfs; i++) |
316 | ixgbe_vf_configuration(pdev: dev, event_mask: (i | 0x10000000)); |
317 | |
318 | /* reset before enabling SRIOV to avoid mailbox issues */ |
319 | ixgbe_sriov_reinit(adapter); |
320 | |
321 | err = pci_enable_sriov(dev, nr_virtfn: num_vfs); |
322 | if (err) { |
323 | e_dev_warn("Failed to enable PCI sriov: %d\n" , err); |
324 | return err; |
325 | } |
326 | ixgbe_get_vfs(adapter); |
327 | |
328 | return num_vfs; |
329 | #else |
330 | return 0; |
331 | #endif |
332 | } |
333 | |
334 | static int ixgbe_pci_sriov_disable(struct pci_dev *dev) |
335 | { |
336 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev: dev); |
337 | int err; |
338 | #ifdef CONFIG_PCI_IOV |
339 | u32 current_flags = adapter->flags; |
340 | int prev_num_vf = pci_num_vf(dev); |
341 | #endif |
342 | |
343 | err = ixgbe_disable_sriov(adapter); |
344 | |
345 | /* Only reinit if no error and state changed */ |
346 | #ifdef CONFIG_PCI_IOV |
347 | if (!err && (current_flags != adapter->flags || |
348 | prev_num_vf != pci_num_vf(dev))) |
349 | ixgbe_sriov_reinit(adapter); |
350 | #endif |
351 | |
352 | return err; |
353 | } |
354 | |
355 | int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) |
356 | { |
357 | if (num_vfs == 0) |
358 | return ixgbe_pci_sriov_disable(dev); |
359 | else |
360 | return ixgbe_pci_sriov_enable(dev, num_vfs); |
361 | } |
362 | |
363 | static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, |
364 | u32 *msgbuf, u32 vf) |
365 | { |
366 | int entries = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]); |
367 | u16 *hash_list = (u16 *)&msgbuf[1]; |
368 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; |
369 | struct ixgbe_hw *hw = &adapter->hw; |
370 | int i; |
371 | u32 vector_bit; |
372 | u32 vector_reg; |
373 | u32 mta_reg; |
374 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
375 | |
376 | /* only so many hash values supported */ |
377 | entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); |
378 | |
379 | /* |
380 | * salt away the number of multi cast addresses assigned |
381 | * to this VF for later use to restore when the PF multi cast |
382 | * list changes |
383 | */ |
384 | vfinfo->num_vf_mc_hashes = entries; |
385 | |
386 | /* |
387 | * VFs are limited to using the MTA hash table for their multicast |
388 | * addresses |
389 | */ |
390 | for (i = 0; i < entries; i++) { |
391 | vfinfo->vf_mc_hashes[i] = hash_list[i]; |
392 | } |
393 | |
394 | for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { |
395 | vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; |
396 | vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; |
397 | mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); |
398 | mta_reg |= BIT(vector_bit); |
399 | IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); |
400 | } |
401 | vmolr |= IXGBE_VMOLR_ROMPE; |
402 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); |
403 | |
404 | return 0; |
405 | } |
406 | |
407 | #ifdef CONFIG_PCI_IOV |
408 | void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) |
409 | { |
410 | struct ixgbe_hw *hw = &adapter->hw; |
411 | struct vf_data_storage *vfinfo; |
412 | int i, j; |
413 | u32 vector_bit; |
414 | u32 vector_reg; |
415 | u32 mta_reg; |
416 | |
417 | for (i = 0; i < adapter->num_vfs; i++) { |
418 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); |
419 | vfinfo = &adapter->vfinfo[i]; |
420 | for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { |
421 | hw->addr_ctrl.mta_in_use++; |
422 | vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; |
423 | vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; |
424 | mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); |
425 | mta_reg |= BIT(vector_bit); |
426 | IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); |
427 | } |
428 | |
429 | if (vfinfo->num_vf_mc_hashes) |
430 | vmolr |= IXGBE_VMOLR_ROMPE; |
431 | else |
432 | vmolr &= ~IXGBE_VMOLR_ROMPE; |
433 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); |
434 | } |
435 | |
436 | /* Restore any VF macvlans */ |
437 | ixgbe_full_sync_mac_table(adapter); |
438 | } |
439 | #endif |
440 | |
441 | static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, |
442 | u32 vf) |
443 | { |
444 | struct ixgbe_hw *hw = &adapter->hw; |
445 | int err; |
446 | |
447 | /* If VLAN overlaps with one the PF is currently monitoring make |
448 | * sure that we are able to allocate a VLVF entry. This may be |
449 | * redundant but it guarantees PF will maintain visibility to |
450 | * the VLAN. |
451 | */ |
452 | if (add && test_bit(vid, adapter->active_vlans)) { |
453 | err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); |
454 | if (err) |
455 | return err; |
456 | } |
457 | |
458 | err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); |
459 | |
460 | if (add && !err) |
461 | return err; |
462 | |
463 | /* If we failed to add the VF VLAN or we are removing the VF VLAN |
464 | * we may need to drop the PF pool bit in order to allow us to free |
465 | * up the VLVF resources. |
466 | */ |
467 | if (test_bit(vid, adapter->active_vlans) || |
468 | (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) |
469 | ixgbe_update_pf_promisc_vlvf(adapter, vid); |
470 | |
471 | return err; |
472 | } |
473 | |
474 | static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf) |
475 | { |
476 | struct ixgbe_hw *hw = &adapter->hw; |
477 | u32 max_frs; |
478 | |
479 | if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { |
480 | e_err(drv, "VF max_frame %d out of range\n" , max_frame); |
481 | return -EINVAL; |
482 | } |
483 | |
484 | /* |
485 | * For 82599EB we have to keep all PFs and VFs operating with |
486 | * the same max_frame value in order to avoid sending an oversize |
487 | * frame to a VF. In order to guarantee this is handled correctly |
488 | * for all cases we have several special exceptions to take into |
489 | * account before we can enable the VF for receive |
490 | */ |
491 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
492 | struct net_device *dev = adapter->netdev; |
493 | int pf_max_frame = dev->mtu + ETH_HLEN; |
494 | u32 reg_offset, vf_shift, vfre; |
495 | int err = 0; |
496 | |
497 | #ifdef CONFIG_FCOE |
498 | if (dev->features & NETIF_F_FCOE_MTU) |
499 | pf_max_frame = max_t(int, pf_max_frame, |
500 | IXGBE_FCOE_JUMBO_FRAME_SIZE); |
501 | |
502 | #endif /* CONFIG_FCOE */ |
503 | switch (adapter->vfinfo[vf].vf_api) { |
504 | case ixgbe_mbox_api_11: |
505 | case ixgbe_mbox_api_12: |
506 | case ixgbe_mbox_api_13: |
507 | case ixgbe_mbox_api_14: |
508 | /* Version 1.1 supports jumbo frames on VFs if PF has |
509 | * jumbo frames enabled which means legacy VFs are |
510 | * disabled |
511 | */ |
512 | if (pf_max_frame > ETH_FRAME_LEN) |
513 | break; |
514 | fallthrough; |
515 | default: |
516 | /* If the PF or VF are running w/ jumbo frames enabled |
517 | * we need to shut down the VF Rx path as we cannot |
518 | * support jumbo frames on legacy VFs |
519 | */ |
520 | if ((pf_max_frame > ETH_FRAME_LEN) || |
521 | (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) |
522 | err = -EINVAL; |
523 | break; |
524 | } |
525 | |
526 | /* determine VF receive enable location */ |
527 | vf_shift = vf % 32; |
528 | reg_offset = vf / 32; |
529 | |
530 | /* enable or disable receive depending on error */ |
531 | vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); |
532 | if (err) |
533 | vfre &= ~BIT(vf_shift); |
534 | else |
535 | vfre |= BIT(vf_shift); |
536 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); |
537 | |
538 | if (err) { |
539 | e_err(drv, "VF max_frame %d out of range\n" , max_frame); |
540 | return err; |
541 | } |
542 | } |
543 | |
544 | /* pull current max frame size from hardware */ |
545 | max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); |
546 | max_frs &= IXGBE_MHADD_MFS_MASK; |
547 | max_frs >>= IXGBE_MHADD_MFS_SHIFT; |
548 | |
549 | if (max_frs < max_frame) { |
550 | max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; |
551 | IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); |
552 | } |
553 | |
554 | e_info(hw, "VF requests change max MTU to %d\n" , max_frame); |
555 | |
556 | return 0; |
557 | } |
558 | |
559 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) |
560 | { |
561 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
562 | vmolr |= IXGBE_VMOLR_BAM; |
563 | if (aupe) |
564 | vmolr |= IXGBE_VMOLR_AUPE; |
565 | else |
566 | vmolr &= ~IXGBE_VMOLR_AUPE; |
567 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); |
568 | } |
569 | |
570 | static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) |
571 | { |
572 | struct ixgbe_hw *hw = &adapter->hw; |
573 | |
574 | IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); |
575 | } |
576 | |
577 | static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) |
578 | { |
579 | struct ixgbe_hw *hw = &adapter->hw; |
580 | u32 vlvfb_mask, pool_mask, i; |
581 | |
582 | /* create mask for VF and other pools */ |
583 | pool_mask = ~BIT(VMDQ_P(0) % 32); |
584 | vlvfb_mask = BIT(vf % 32); |
585 | |
586 | /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ |
587 | for (i = IXGBE_VLVF_ENTRIES; i--;) { |
588 | u32 bits[2], vlvfb, vid, vfta, vlvf; |
589 | u32 word = i * 2 + vf / 32; |
590 | u32 mask; |
591 | |
592 | vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); |
593 | |
594 | /* if our bit isn't set we can skip it */ |
595 | if (!(vlvfb & vlvfb_mask)) |
596 | continue; |
597 | |
598 | /* clear our bit from vlvfb */ |
599 | vlvfb ^= vlvfb_mask; |
600 | |
601 | /* create 64b mask to chedk to see if we should clear VLVF */ |
602 | bits[word % 2] = vlvfb; |
603 | bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); |
604 | |
605 | /* if other pools are present, just remove ourselves */ |
606 | if (bits[(VMDQ_P(0) / 32) ^ 1] || |
607 | (bits[VMDQ_P(0) / 32] & pool_mask)) |
608 | goto update_vlvfb; |
609 | |
610 | /* if PF is present, leave VFTA */ |
611 | if (bits[0] || bits[1]) |
612 | goto update_vlvf; |
613 | |
614 | /* if we cannot determine VLAN just remove ourselves */ |
615 | vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); |
616 | if (!vlvf) |
617 | goto update_vlvfb; |
618 | |
619 | vid = vlvf & VLAN_VID_MASK; |
620 | mask = BIT(vid % 32); |
621 | |
622 | /* clear bit from VFTA */ |
623 | vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); |
624 | if (vfta & mask) |
625 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); |
626 | update_vlvf: |
627 | /* clear POOL selection enable */ |
628 | IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); |
629 | |
630 | if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) |
631 | vlvfb = 0; |
632 | update_vlvfb: |
633 | /* clear pool bits */ |
634 | IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); |
635 | } |
636 | } |
637 | |
638 | static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, |
639 | int vf, int index, unsigned char *mac_addr) |
640 | { |
641 | struct vf_macvlans *entry; |
642 | bool found = false; |
643 | int retval = 0; |
644 | |
645 | if (index <= 1) { |
646 | list_for_each_entry(entry, &adapter->vf_mvs.l, l) { |
647 | if (entry->vf == vf) { |
648 | entry->vf = -1; |
649 | entry->free = true; |
650 | entry->is_macvlan = false; |
651 | ixgbe_del_mac_filter(adapter, |
652 | addr: entry->vf_macvlan, queue: vf); |
653 | } |
654 | } |
655 | } |
656 | |
657 | /* |
658 | * If index was zero then we were asked to clear the uc list |
659 | * for the VF. We're done. |
660 | */ |
661 | if (!index) |
662 | return 0; |
663 | |
664 | list_for_each_entry(entry, &adapter->vf_mvs.l, l) { |
665 | if (entry->free) { |
666 | found = true; |
667 | break; |
668 | } |
669 | } |
670 | |
671 | /* |
672 | * If we traversed the entire list and didn't find a free entry |
673 | * then we're out of space on the RAR table. It's also possible |
674 | * for the &adapter->vf_mvs.l list to be empty because the original |
675 | * memory allocation for the list failed, which is not fatal but does |
676 | * mean we can't support VF requests for MACVLAN because we couldn't |
677 | * allocate memory for the list management required. |
678 | */ |
679 | if (!found) |
680 | return -ENOSPC; |
681 | |
682 | retval = ixgbe_add_mac_filter(adapter, addr: mac_addr, queue: vf); |
683 | if (retval < 0) |
684 | return retval; |
685 | |
686 | entry->free = false; |
687 | entry->is_macvlan = true; |
688 | entry->vf = vf; |
689 | memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); |
690 | |
691 | return 0; |
692 | } |
693 | |
694 | static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) |
695 | { |
696 | struct ixgbe_hw *hw = &adapter->hw; |
697 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
698 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; |
699 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
700 | u8 num_tcs = adapter->hw_tcs; |
701 | u32 reg_val; |
702 | u32 queue; |
703 | |
704 | /* remove VLAN filters beloning to this VF */ |
705 | ixgbe_clear_vf_vlans(adapter, vf); |
706 | |
707 | /* add back PF assigned VLAN or VLAN 0 */ |
708 | ixgbe_set_vf_vlan(adapter, add: true, vid: vfinfo->pf_vlan, vf); |
709 | |
710 | /* reset offloads to defaults */ |
711 | ixgbe_set_vmolr(hw, vf, aupe: !vfinfo->pf_vlan); |
712 | |
713 | /* set outgoing tags for VFs */ |
714 | if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { |
715 | ixgbe_clear_vmvir(adapter, vf); |
716 | } else { |
717 | if (vfinfo->pf_qos || !num_tcs) |
718 | ixgbe_set_vmvir(adapter, vid: vfinfo->pf_vlan, |
719 | qos: vfinfo->pf_qos, vf); |
720 | else |
721 | ixgbe_set_vmvir(adapter, vid: vfinfo->pf_vlan, |
722 | qos: adapter->default_up, vf); |
723 | |
724 | if (vfinfo->spoofchk_enabled) { |
725 | hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); |
726 | hw->mac.ops.set_mac_anti_spoofing(hw, true, vf); |
727 | } |
728 | } |
729 | |
730 | /* reset multicast table array for vf */ |
731 | adapter->vfinfo[vf].num_vf_mc_hashes = 0; |
732 | |
733 | /* clear any ipsec table info */ |
734 | ixgbe_ipsec_vf_clear(adapter, vf); |
735 | |
736 | /* Flush and reset the mta with the new values */ |
737 | ixgbe_set_rx_mode(netdev: adapter->netdev); |
738 | |
739 | ixgbe_del_mac_filter(adapter, addr: adapter->vfinfo[vf].vf_mac_addresses, queue: vf); |
740 | ixgbe_set_vf_macvlan(adapter, vf, index: 0, NULL); |
741 | |
742 | /* reset VF api back to unknown */ |
743 | adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; |
744 | |
745 | /* Restart each queue for given VF */ |
746 | for (queue = 0; queue < q_per_pool; queue++) { |
747 | unsigned int reg_idx = (vf * q_per_pool) + queue; |
748 | |
749 | reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); |
750 | |
751 | /* Re-enabling only configured queues */ |
752 | if (reg_val) { |
753 | reg_val |= IXGBE_TXDCTL_ENABLE; |
754 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); |
755 | reg_val &= ~IXGBE_TXDCTL_ENABLE; |
756 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); |
757 | } |
758 | } |
759 | |
760 | IXGBE_WRITE_FLUSH(hw); |
761 | } |
762 | |
763 | static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf) |
764 | { |
765 | struct ixgbe_hw *hw = &adapter->hw; |
766 | u32 word; |
767 | |
768 | /* Clear VF's mailbox memory */ |
769 | for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) |
770 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); |
771 | |
772 | IXGBE_WRITE_FLUSH(hw); |
773 | } |
774 | |
775 | static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, |
776 | int vf, unsigned char *mac_addr) |
777 | { |
778 | int retval; |
779 | |
780 | ixgbe_del_mac_filter(adapter, addr: adapter->vfinfo[vf].vf_mac_addresses, queue: vf); |
781 | retval = ixgbe_add_mac_filter(adapter, addr: mac_addr, queue: vf); |
782 | if (retval >= 0) |
783 | memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, |
784 | ETH_ALEN); |
785 | else |
786 | eth_zero_addr(addr: adapter->vfinfo[vf].vf_mac_addresses); |
787 | |
788 | return retval; |
789 | } |
790 | |
791 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) |
792 | { |
793 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
794 | unsigned int vfn = (event_mask & 0x3f); |
795 | |
796 | bool enable = ((event_mask & 0x10000000U) != 0); |
797 | |
798 | if (enable) |
799 | eth_zero_addr(addr: adapter->vfinfo[vfn].vf_mac_addresses); |
800 | |
801 | return 0; |
802 | } |
803 | |
804 | static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, |
805 | u32 qde) |
806 | { |
807 | struct ixgbe_hw *hw = &adapter->hw; |
808 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
809 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
810 | int i; |
811 | |
812 | for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { |
813 | u32 reg; |
814 | |
815 | /* flush previous write */ |
816 | IXGBE_WRITE_FLUSH(hw); |
817 | |
818 | /* indicate to hardware that we want to set drop enable */ |
819 | reg = IXGBE_QDE_WRITE | qde; |
820 | reg |= i << IXGBE_QDE_IDX_SHIFT; |
821 | IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); |
822 | } |
823 | } |
824 | |
825 | /** |
826 | * ixgbe_set_vf_rx_tx - Set VF rx tx |
827 | * @adapter: Pointer to adapter struct |
828 | * @vf: VF identifier |
829 | * |
830 | * Set or reset correct transmit and receive for vf |
831 | **/ |
832 | static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf) |
833 | { |
834 | u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; |
835 | struct ixgbe_hw *hw = &adapter->hw; |
836 | u32 reg_offset, vf_shift; |
837 | |
838 | vf_shift = vf % 32; |
839 | reg_offset = vf / 32; |
840 | |
841 | reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); |
842 | reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); |
843 | |
844 | if (adapter->vfinfo[vf].link_enable) { |
845 | reg_req_tx = reg_cur_tx | 1 << vf_shift; |
846 | reg_req_rx = reg_cur_rx | 1 << vf_shift; |
847 | } else { |
848 | reg_req_tx = reg_cur_tx & ~(1 << vf_shift); |
849 | reg_req_rx = reg_cur_rx & ~(1 << vf_shift); |
850 | } |
851 | |
852 | /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. |
853 | * For more info take a look at ixgbe_set_vf_lpe |
854 | */ |
855 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
856 | struct net_device *dev = adapter->netdev; |
857 | int pf_max_frame = dev->mtu + ETH_HLEN; |
858 | |
859 | #if IS_ENABLED(CONFIG_FCOE) |
860 | if (dev->features & NETIF_F_FCOE_MTU) |
861 | pf_max_frame = max_t(int, pf_max_frame, |
862 | IXGBE_FCOE_JUMBO_FRAME_SIZE); |
863 | #endif /* CONFIG_FCOE */ |
864 | |
865 | if (pf_max_frame > ETH_FRAME_LEN) |
866 | reg_req_rx = reg_cur_rx & ~(1 << vf_shift); |
867 | } |
868 | |
869 | /* Enable/Disable particular VF */ |
870 | if (reg_cur_tx != reg_req_tx) |
871 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx); |
872 | if (reg_cur_rx != reg_req_rx) |
873 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx); |
874 | } |
875 | |
876 | static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) |
877 | { |
878 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
879 | struct ixgbe_hw *hw = &adapter->hw; |
880 | unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; |
881 | u32 reg, reg_offset, vf_shift; |
882 | u32 msgbuf[4] = {0, 0, 0, 0}; |
883 | u8 *addr = (u8 *)(&msgbuf[1]); |
884 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
885 | int i; |
886 | |
887 | e_info(probe, "VF Reset msg received from vf %d\n" , vf); |
888 | |
889 | /* reset the filters for the device */ |
890 | ixgbe_vf_reset_event(adapter, vf); |
891 | |
892 | ixgbe_vf_clear_mbx(adapter, vf); |
893 | |
894 | /* set vf mac address */ |
895 | if (!is_zero_ether_addr(addr: vf_mac)) |
896 | ixgbe_set_vf_mac(adapter, vf, mac_addr: vf_mac); |
897 | |
898 | vf_shift = vf % 32; |
899 | reg_offset = vf / 32; |
900 | |
901 | /* force drop enable for all VF Rx queues */ |
902 | reg = IXGBE_QDE_ENABLE; |
903 | if (adapter->vfinfo[vf].pf_vlan) |
904 | reg |= IXGBE_QDE_HIDE_VLAN; |
905 | |
906 | ixgbe_write_qde(adapter, vf, qde: reg); |
907 | |
908 | ixgbe_set_vf_rx_tx(adapter, vf); |
909 | |
910 | /* enable VF mailbox for further messages */ |
911 | adapter->vfinfo[vf].clear_to_send = true; |
912 | |
913 | /* Enable counting of spoofed packets in the SSVPC register */ |
914 | reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); |
915 | reg |= BIT(vf_shift); |
916 | IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); |
917 | |
918 | /* |
919 | * Reset the VFs TDWBAL and TDWBAH registers |
920 | * which are not cleared by an FLR |
921 | */ |
922 | for (i = 0; i < q_per_pool; i++) { |
923 | IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); |
924 | IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); |
925 | } |
926 | |
927 | /* reply to reset with ack and vf mac address */ |
928 | msgbuf[0] = IXGBE_VF_RESET; |
929 | if (!is_zero_ether_addr(addr: vf_mac) && adapter->vfinfo[vf].pf_set_mac) { |
930 | msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; |
931 | memcpy(addr, vf_mac, ETH_ALEN); |
932 | } else { |
933 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; |
934 | } |
935 | |
936 | /* |
937 | * Piggyback the multicast filter type so VF can compute the |
938 | * correct vectors |
939 | */ |
940 | msgbuf[3] = hw->mac.mc_filter_type; |
941 | ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); |
942 | |
943 | return 0; |
944 | } |
945 | |
946 | static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, |
947 | u32 *msgbuf, u32 vf) |
948 | { |
949 | u8 *new_mac = ((u8 *)(&msgbuf[1])); |
950 | |
951 | if (!is_valid_ether_addr(addr: new_mac)) { |
952 | e_warn(drv, "VF %d attempted to set invalid mac\n" , vf); |
953 | return -1; |
954 | } |
955 | |
956 | if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && |
957 | !ether_addr_equal(addr1: adapter->vfinfo[vf].vf_mac_addresses, addr2: new_mac)) { |
958 | e_warn(drv, |
959 | "VF %d attempted to override administratively set MAC address\n" |
960 | "Reload the VF driver to resume operations\n" , |
961 | vf); |
962 | return -1; |
963 | } |
964 | |
965 | return ixgbe_set_vf_mac(adapter, vf, mac_addr: new_mac) < 0; |
966 | } |
967 | |
968 | static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, |
969 | u32 *msgbuf, u32 vf) |
970 | { |
971 | u32 add = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]); |
972 | u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); |
973 | u8 tcs = adapter->hw_tcs; |
974 | |
975 | if (adapter->vfinfo[vf].pf_vlan || tcs) { |
976 | e_warn(drv, |
977 | "VF %d attempted to override administratively set VLAN configuration\n" |
978 | "Reload the VF driver to resume operations\n" , |
979 | vf); |
980 | return -1; |
981 | } |
982 | |
983 | /* VLAN 0 is a special case, don't allow it to be removed */ |
984 | if (!vid && !add) |
985 | return 0; |
986 | |
987 | return ixgbe_set_vf_vlan(adapter, add, vid, vf); |
988 | } |
989 | |
990 | static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, |
991 | u32 *msgbuf, u32 vf) |
992 | { |
993 | u8 *new_mac = ((u8 *)(&msgbuf[1])); |
994 | int index = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]); |
995 | int err; |
996 | |
997 | if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && |
998 | index > 0) { |
999 | e_warn(drv, |
1000 | "VF %d requested MACVLAN filter but is administratively denied\n" , |
1001 | vf); |
1002 | return -1; |
1003 | } |
1004 | |
1005 | /* An non-zero index indicates the VF is setting a filter */ |
1006 | if (index) { |
1007 | if (!is_valid_ether_addr(addr: new_mac)) { |
1008 | e_warn(drv, "VF %d attempted to set invalid mac\n" , vf); |
1009 | return -1; |
1010 | } |
1011 | |
1012 | /* |
1013 | * If the VF is allowed to set MAC filters then turn off |
1014 | * anti-spoofing to avoid false positives. |
1015 | */ |
1016 | if (adapter->vfinfo[vf].spoofchk_enabled) { |
1017 | struct ixgbe_hw *hw = &adapter->hw; |
1018 | |
1019 | hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); |
1020 | hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); |
1021 | } |
1022 | } |
1023 | |
1024 | err = ixgbe_set_vf_macvlan(adapter, vf, index, mac_addr: new_mac); |
1025 | if (err == -ENOSPC) |
1026 | e_warn(drv, |
1027 | "VF %d has requested a MACVLAN filter but there is no space for it\n" , |
1028 | vf); |
1029 | |
1030 | return err < 0; |
1031 | } |
1032 | |
1033 | static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, |
1034 | u32 *msgbuf, u32 vf) |
1035 | { |
1036 | int api = msgbuf[1]; |
1037 | |
1038 | switch (api) { |
1039 | case ixgbe_mbox_api_10: |
1040 | case ixgbe_mbox_api_11: |
1041 | case ixgbe_mbox_api_12: |
1042 | case ixgbe_mbox_api_13: |
1043 | case ixgbe_mbox_api_14: |
1044 | adapter->vfinfo[vf].vf_api = api; |
1045 | return 0; |
1046 | default: |
1047 | break; |
1048 | } |
1049 | |
1050 | e_info(drv, "VF %d requested invalid api version %u\n" , vf, api); |
1051 | |
1052 | return -1; |
1053 | } |
1054 | |
1055 | static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, |
1056 | u32 *msgbuf, u32 vf) |
1057 | { |
1058 | struct net_device *dev = adapter->netdev; |
1059 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
1060 | unsigned int default_tc = 0; |
1061 | u8 num_tcs = adapter->hw_tcs; |
1062 | |
1063 | /* verify the PF is supporting the correct APIs */ |
1064 | switch (adapter->vfinfo[vf].vf_api) { |
1065 | case ixgbe_mbox_api_20: |
1066 | case ixgbe_mbox_api_11: |
1067 | case ixgbe_mbox_api_12: |
1068 | case ixgbe_mbox_api_13: |
1069 | case ixgbe_mbox_api_14: |
1070 | break; |
1071 | default: |
1072 | return -1; |
1073 | } |
1074 | |
1075 | /* only allow 1 Tx queue for bandwidth limiting */ |
1076 | msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); |
1077 | msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); |
1078 | |
1079 | /* if TCs > 1 determine which TC belongs to default user priority */ |
1080 | if (num_tcs > 1) |
1081 | default_tc = netdev_get_prio_tc_map(dev, prio: adapter->default_up); |
1082 | |
1083 | /* notify VF of need for VLAN tag stripping, and correct queue */ |
1084 | if (num_tcs) |
1085 | msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; |
1086 | else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) |
1087 | msgbuf[IXGBE_VF_TRANS_VLAN] = 1; |
1088 | else |
1089 | msgbuf[IXGBE_VF_TRANS_VLAN] = 0; |
1090 | |
1091 | /* notify VF of default queue */ |
1092 | msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; |
1093 | |
1094 | return 0; |
1095 | } |
1096 | |
1097 | static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) |
1098 | { |
1099 | u32 i, j; |
1100 | u32 *out_buf = &msgbuf[1]; |
1101 | const u8 *reta = adapter->rss_indir_tbl; |
1102 | u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); |
1103 | |
1104 | /* Check if operation is permitted */ |
1105 | if (!adapter->vfinfo[vf].rss_query_enabled) |
1106 | return -EPERM; |
1107 | |
1108 | /* verify the PF is supporting the correct API */ |
1109 | switch (adapter->vfinfo[vf].vf_api) { |
1110 | case ixgbe_mbox_api_14: |
1111 | case ixgbe_mbox_api_13: |
1112 | case ixgbe_mbox_api_12: |
1113 | break; |
1114 | default: |
1115 | return -EOPNOTSUPP; |
1116 | } |
1117 | |
1118 | /* This mailbox command is supported (required) only for 82599 and x540 |
1119 | * VFs which support up to 4 RSS queues. Therefore we will compress the |
1120 | * RETA by saving only 2 bits from each entry. This way we will be able |
1121 | * to transfer the whole RETA in a single mailbox operation. |
1122 | */ |
1123 | for (i = 0; i < reta_size / 16; i++) { |
1124 | out_buf[i] = 0; |
1125 | for (j = 0; j < 16; j++) |
1126 | out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); |
1127 | } |
1128 | |
1129 | return 0; |
1130 | } |
1131 | |
1132 | static int (struct ixgbe_adapter *adapter, |
1133 | u32 *msgbuf, u32 vf) |
1134 | { |
1135 | u32 * = &msgbuf[1]; |
1136 | |
1137 | /* Check if the operation is permitted */ |
1138 | if (!adapter->vfinfo[vf].rss_query_enabled) |
1139 | return -EPERM; |
1140 | |
1141 | /* verify the PF is supporting the correct API */ |
1142 | switch (adapter->vfinfo[vf].vf_api) { |
1143 | case ixgbe_mbox_api_14: |
1144 | case ixgbe_mbox_api_13: |
1145 | case ixgbe_mbox_api_12: |
1146 | break; |
1147 | default: |
1148 | return -EOPNOTSUPP; |
1149 | } |
1150 | |
1151 | memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); |
1152 | |
1153 | return 0; |
1154 | } |
1155 | |
1156 | static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, |
1157 | u32 *msgbuf, u32 vf) |
1158 | { |
1159 | struct ixgbe_hw *hw = &adapter->hw; |
1160 | int xcast_mode = msgbuf[1]; |
1161 | u32 vmolr, fctrl, disable, enable; |
1162 | |
1163 | /* verify the PF is supporting the correct APIs */ |
1164 | switch (adapter->vfinfo[vf].vf_api) { |
1165 | case ixgbe_mbox_api_12: |
1166 | /* promisc introduced in 1.3 version */ |
1167 | if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) |
1168 | return -EOPNOTSUPP; |
1169 | fallthrough; |
1170 | case ixgbe_mbox_api_13: |
1171 | case ixgbe_mbox_api_14: |
1172 | break; |
1173 | default: |
1174 | return -EOPNOTSUPP; |
1175 | } |
1176 | |
1177 | if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && |
1178 | !adapter->vfinfo[vf].trusted) { |
1179 | xcast_mode = IXGBEVF_XCAST_MODE_MULTI; |
1180 | } |
1181 | |
1182 | if (adapter->vfinfo[vf].xcast_mode == xcast_mode) |
1183 | goto out; |
1184 | |
1185 | switch (xcast_mode) { |
1186 | case IXGBEVF_XCAST_MODE_NONE: |
1187 | disable = IXGBE_VMOLR_ROMPE | |
1188 | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; |
1189 | enable = IXGBE_VMOLR_BAM; |
1190 | break; |
1191 | case IXGBEVF_XCAST_MODE_MULTI: |
1192 | disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; |
1193 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; |
1194 | break; |
1195 | case IXGBEVF_XCAST_MODE_ALLMULTI: |
1196 | disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; |
1197 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; |
1198 | break; |
1199 | case IXGBEVF_XCAST_MODE_PROMISC: |
1200 | if (hw->mac.type <= ixgbe_mac_82599EB) |
1201 | return -EOPNOTSUPP; |
1202 | |
1203 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
1204 | if (!(fctrl & IXGBE_FCTRL_UPE)) { |
1205 | /* VF promisc requires PF in promisc */ |
1206 | e_warn(drv, |
1207 | "Enabling VF promisc requires PF in promisc\n" ); |
1208 | return -EPERM; |
1209 | } |
1210 | |
1211 | disable = IXGBE_VMOLR_VPE; |
1212 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | |
1213 | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; |
1214 | break; |
1215 | default: |
1216 | return -EOPNOTSUPP; |
1217 | } |
1218 | |
1219 | vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
1220 | vmolr &= ~disable; |
1221 | vmolr |= enable; |
1222 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); |
1223 | |
1224 | adapter->vfinfo[vf].xcast_mode = xcast_mode; |
1225 | |
1226 | out: |
1227 | msgbuf[1] = xcast_mode; |
1228 | |
1229 | return 0; |
1230 | } |
1231 | |
1232 | static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter, |
1233 | u32 *msgbuf, u32 vf) |
1234 | { |
1235 | u32 *link_state = &msgbuf[1]; |
1236 | |
1237 | /* verify the PF is supporting the correct API */ |
1238 | switch (adapter->vfinfo[vf].vf_api) { |
1239 | case ixgbe_mbox_api_12: |
1240 | case ixgbe_mbox_api_13: |
1241 | case ixgbe_mbox_api_14: |
1242 | break; |
1243 | default: |
1244 | return -EOPNOTSUPP; |
1245 | } |
1246 | |
1247 | *link_state = adapter->vfinfo[vf].link_enable; |
1248 | |
1249 | return 0; |
1250 | } |
1251 | |
1252 | static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) |
1253 | { |
1254 | u32 mbx_size = IXGBE_VFMAILBOX_SIZE; |
1255 | u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; |
1256 | struct ixgbe_hw *hw = &adapter->hw; |
1257 | int retval; |
1258 | |
1259 | retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); |
1260 | |
1261 | if (retval) { |
1262 | pr_err("Error receiving message from VF\n" ); |
1263 | return retval; |
1264 | } |
1265 | |
1266 | /* this is a message we already processed, do nothing */ |
1267 | if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) |
1268 | return 0; |
1269 | |
1270 | /* flush the ack before we write any messages back */ |
1271 | IXGBE_WRITE_FLUSH(hw); |
1272 | |
1273 | if (msgbuf[0] == IXGBE_VF_RESET) |
1274 | return ixgbe_vf_reset_msg(adapter, vf); |
1275 | |
1276 | /* |
1277 | * until the vf completes a virtual function reset it should not be |
1278 | * allowed to start any configuration. |
1279 | */ |
1280 | if (!adapter->vfinfo[vf].clear_to_send) { |
1281 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; |
1282 | ixgbe_write_mbx(hw, msgbuf, 1, vf); |
1283 | return 0; |
1284 | } |
1285 | |
1286 | switch ((msgbuf[0] & 0xFFFF)) { |
1287 | case IXGBE_VF_SET_MAC_ADDR: |
1288 | retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); |
1289 | break; |
1290 | case IXGBE_VF_SET_MULTICAST: |
1291 | retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); |
1292 | break; |
1293 | case IXGBE_VF_SET_VLAN: |
1294 | retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); |
1295 | break; |
1296 | case IXGBE_VF_SET_LPE: |
1297 | retval = ixgbe_set_vf_lpe(adapter, max_frame: msgbuf[1], vf); |
1298 | break; |
1299 | case IXGBE_VF_SET_MACVLAN: |
1300 | retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); |
1301 | break; |
1302 | case IXGBE_VF_API_NEGOTIATE: |
1303 | retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); |
1304 | break; |
1305 | case IXGBE_VF_GET_QUEUES: |
1306 | retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); |
1307 | break; |
1308 | case IXGBE_VF_GET_RETA: |
1309 | retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); |
1310 | break; |
1311 | case IXGBE_VF_GET_RSS_KEY: |
1312 | retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); |
1313 | break; |
1314 | case IXGBE_VF_UPDATE_XCAST_MODE: |
1315 | retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); |
1316 | break; |
1317 | case IXGBE_VF_GET_LINK_STATE: |
1318 | retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf); |
1319 | break; |
1320 | case IXGBE_VF_IPSEC_ADD: |
1321 | retval = ixgbe_ipsec_vf_add_sa(adapter, mbuf: msgbuf, vf); |
1322 | break; |
1323 | case IXGBE_VF_IPSEC_DEL: |
1324 | retval = ixgbe_ipsec_vf_del_sa(adapter, mbuf: msgbuf, vf); |
1325 | break; |
1326 | default: |
1327 | e_err(drv, "Unhandled Msg %8.8x\n" , msgbuf[0]); |
1328 | retval = -EIO; |
1329 | break; |
1330 | } |
1331 | |
1332 | /* notify the VF of the results of what it sent us */ |
1333 | if (retval) |
1334 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; |
1335 | else |
1336 | msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; |
1337 | |
1338 | msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; |
1339 | |
1340 | ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); |
1341 | |
1342 | return retval; |
1343 | } |
1344 | |
1345 | static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) |
1346 | { |
1347 | struct ixgbe_hw *hw = &adapter->hw; |
1348 | u32 msg = IXGBE_VT_MSGTYPE_NACK; |
1349 | |
1350 | /* if device isn't clear to send it shouldn't be reading either */ |
1351 | if (!adapter->vfinfo[vf].clear_to_send) |
1352 | ixgbe_write_mbx(hw, &msg, 1, vf); |
1353 | } |
1354 | |
1355 | void ixgbe_msg_task(struct ixgbe_adapter *adapter) |
1356 | { |
1357 | struct ixgbe_hw *hw = &adapter->hw; |
1358 | unsigned long flags; |
1359 | u32 vf; |
1360 | |
1361 | spin_lock_irqsave(&adapter->vfs_lock, flags); |
1362 | for (vf = 0; vf < adapter->num_vfs; vf++) { |
1363 | /* process any reset requests */ |
1364 | if (!ixgbe_check_for_rst(hw, vf)) |
1365 | ixgbe_vf_reset_event(adapter, vf); |
1366 | |
1367 | /* process any messages pending */ |
1368 | if (!ixgbe_check_for_msg(hw, vf)) |
1369 | ixgbe_rcv_msg_from_vf(adapter, vf); |
1370 | |
1371 | /* process any acks */ |
1372 | if (!ixgbe_check_for_ack(hw, vf)) |
1373 | ixgbe_rcv_ack_from_vf(adapter, vf); |
1374 | } |
1375 | spin_unlock_irqrestore(lock: &adapter->vfs_lock, flags); |
1376 | } |
1377 | |
1378 | static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) |
1379 | { |
1380 | struct ixgbe_hw *hw = &adapter->hw; |
1381 | u32 ping; |
1382 | |
1383 | ping = IXGBE_PF_CONTROL_MSG; |
1384 | if (adapter->vfinfo[vf].clear_to_send) |
1385 | ping |= IXGBE_VT_MSGTYPE_CTS; |
1386 | ixgbe_write_mbx(hw, &ping, 1, vf); |
1387 | } |
1388 | |
1389 | void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) |
1390 | { |
1391 | struct ixgbe_hw *hw = &adapter->hw; |
1392 | u32 ping; |
1393 | int i; |
1394 | |
1395 | for (i = 0 ; i < adapter->num_vfs; i++) { |
1396 | ping = IXGBE_PF_CONTROL_MSG; |
1397 | if (adapter->vfinfo[i].clear_to_send) |
1398 | ping |= IXGBE_VT_MSGTYPE_CTS; |
1399 | ixgbe_write_mbx(hw, &ping, 1, i); |
1400 | } |
1401 | } |
1402 | |
1403 | /** |
1404 | * ixgbe_set_all_vfs - update vfs queues |
1405 | * @adapter: Pointer to adapter struct |
1406 | * |
1407 | * Update setting transmit and receive queues for all vfs |
1408 | **/ |
1409 | void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter) |
1410 | { |
1411 | int i; |
1412 | |
1413 | for (i = 0 ; i < adapter->num_vfs; i++) |
1414 | ixgbe_set_vf_link_state(adapter, vf: i, |
1415 | state: adapter->vfinfo[i].link_state); |
1416 | } |
1417 | |
1418 | int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) |
1419 | { |
1420 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1421 | int retval; |
1422 | |
1423 | if (vf >= adapter->num_vfs) |
1424 | return -EINVAL; |
1425 | |
1426 | if (is_valid_ether_addr(addr: mac)) { |
1427 | dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n" , |
1428 | mac, vf); |
1429 | dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective." ); |
1430 | |
1431 | retval = ixgbe_set_vf_mac(adapter, vf, mac_addr: mac); |
1432 | if (retval >= 0) { |
1433 | adapter->vfinfo[vf].pf_set_mac = true; |
1434 | |
1435 | if (test_bit(__IXGBE_DOWN, &adapter->state)) { |
1436 | dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n" ); |
1437 | dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n" ); |
1438 | } |
1439 | } else { |
1440 | dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n" ); |
1441 | } |
1442 | } else if (is_zero_ether_addr(addr: mac)) { |
1443 | unsigned char *vf_mac_addr = |
1444 | adapter->vfinfo[vf].vf_mac_addresses; |
1445 | |
1446 | /* nothing to do */ |
1447 | if (is_zero_ether_addr(addr: vf_mac_addr)) |
1448 | return 0; |
1449 | |
1450 | dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n" , vf); |
1451 | |
1452 | retval = ixgbe_del_mac_filter(adapter, addr: vf_mac_addr, queue: vf); |
1453 | if (retval >= 0) { |
1454 | adapter->vfinfo[vf].pf_set_mac = false; |
1455 | memcpy(vf_mac_addr, mac, ETH_ALEN); |
1456 | } else { |
1457 | dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n" ); |
1458 | } |
1459 | } else { |
1460 | retval = -EINVAL; |
1461 | } |
1462 | |
1463 | return retval; |
1464 | } |
1465 | |
1466 | static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, |
1467 | u16 vlan, u8 qos) |
1468 | { |
1469 | struct ixgbe_hw *hw = &adapter->hw; |
1470 | int err; |
1471 | |
1472 | err = ixgbe_set_vf_vlan(adapter, add: true, vid: vlan, vf); |
1473 | if (err) |
1474 | goto out; |
1475 | |
1476 | /* Revoke tagless access via VLAN 0 */ |
1477 | ixgbe_set_vf_vlan(adapter, add: false, vid: 0, vf); |
1478 | |
1479 | ixgbe_set_vmvir(adapter, vid: vlan, qos, vf); |
1480 | ixgbe_set_vmolr(hw, vf, aupe: false); |
1481 | |
1482 | /* enable hide vlan on X550 */ |
1483 | if (hw->mac.type >= ixgbe_mac_X550) |
1484 | ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | |
1485 | IXGBE_QDE_HIDE_VLAN); |
1486 | |
1487 | adapter->vfinfo[vf].pf_vlan = vlan; |
1488 | adapter->vfinfo[vf].pf_qos = qos; |
1489 | dev_info(&adapter->pdev->dev, |
1490 | "Setting VLAN %d, QOS 0x%x on VF %d\n" , vlan, qos, vf); |
1491 | if (test_bit(__IXGBE_DOWN, &adapter->state)) { |
1492 | dev_warn(&adapter->pdev->dev, |
1493 | "The VF VLAN has been set, but the PF device is not up.\n" ); |
1494 | dev_warn(&adapter->pdev->dev, |
1495 | "Bring the PF device up before attempting to use the VF device.\n" ); |
1496 | } |
1497 | |
1498 | out: |
1499 | return err; |
1500 | } |
1501 | |
1502 | static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) |
1503 | { |
1504 | struct ixgbe_hw *hw = &adapter->hw; |
1505 | int err; |
1506 | |
1507 | err = ixgbe_set_vf_vlan(adapter, add: false, |
1508 | vid: adapter->vfinfo[vf].pf_vlan, vf); |
1509 | /* Restore tagless access via VLAN 0 */ |
1510 | ixgbe_set_vf_vlan(adapter, add: true, vid: 0, vf); |
1511 | ixgbe_clear_vmvir(adapter, vf); |
1512 | ixgbe_set_vmolr(hw, vf, aupe: true); |
1513 | |
1514 | /* disable hide VLAN on X550 */ |
1515 | if (hw->mac.type >= ixgbe_mac_X550) |
1516 | ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); |
1517 | |
1518 | adapter->vfinfo[vf].pf_vlan = 0; |
1519 | adapter->vfinfo[vf].pf_qos = 0; |
1520 | |
1521 | return err; |
1522 | } |
1523 | |
1524 | int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, |
1525 | u8 qos, __be16 vlan_proto) |
1526 | { |
1527 | int err = 0; |
1528 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1529 | |
1530 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) |
1531 | return -EINVAL; |
1532 | if (vlan_proto != htons(ETH_P_8021Q)) |
1533 | return -EPROTONOSUPPORT; |
1534 | if (vlan || qos) { |
1535 | /* Check if there is already a port VLAN set, if so |
1536 | * we have to delete the old one first before we |
1537 | * can set the new one. The usage model had |
1538 | * previously assumed the user would delete the |
1539 | * old port VLAN before setting a new one but this |
1540 | * is not necessarily the case. |
1541 | */ |
1542 | if (adapter->vfinfo[vf].pf_vlan) |
1543 | err = ixgbe_disable_port_vlan(adapter, vf); |
1544 | if (err) |
1545 | goto out; |
1546 | err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); |
1547 | } else { |
1548 | err = ixgbe_disable_port_vlan(adapter, vf); |
1549 | } |
1550 | |
1551 | out: |
1552 | return err; |
1553 | } |
1554 | |
1555 | int ixgbe_link_mbps(struct ixgbe_adapter *adapter) |
1556 | { |
1557 | switch (adapter->link_speed) { |
1558 | case IXGBE_LINK_SPEED_100_FULL: |
1559 | return 100; |
1560 | case IXGBE_LINK_SPEED_1GB_FULL: |
1561 | return 1000; |
1562 | case IXGBE_LINK_SPEED_10GB_FULL: |
1563 | return 10000; |
1564 | default: |
1565 | return 0; |
1566 | } |
1567 | } |
1568 | |
1569 | static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) |
1570 | { |
1571 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
1572 | struct ixgbe_hw *hw = &adapter->hw; |
1573 | u32 bcnrc_val = 0; |
1574 | u16 queue, queues_per_pool; |
1575 | u16 tx_rate = adapter->vfinfo[vf].tx_rate; |
1576 | |
1577 | if (tx_rate) { |
1578 | /* start with base link speed value */ |
1579 | bcnrc_val = adapter->vf_rate_link_speed; |
1580 | |
1581 | /* Calculate the rate factor values to set */ |
1582 | bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; |
1583 | bcnrc_val /= tx_rate; |
1584 | |
1585 | /* clear everything but the rate factor */ |
1586 | bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | |
1587 | IXGBE_RTTBCNRC_RF_DEC_MASK; |
1588 | |
1589 | /* enable the rate scheduler */ |
1590 | bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; |
1591 | } |
1592 | |
1593 | /* |
1594 | * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM |
1595 | * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported |
1596 | * and 0x004 otherwise. |
1597 | */ |
1598 | switch (hw->mac.type) { |
1599 | case ixgbe_mac_82599EB: |
1600 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); |
1601 | break; |
1602 | case ixgbe_mac_X540: |
1603 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); |
1604 | break; |
1605 | default: |
1606 | break; |
1607 | } |
1608 | |
1609 | /* determine how many queues per pool based on VMDq mask */ |
1610 | queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
1611 | |
1612 | /* write value for all Tx queues belonging to VF */ |
1613 | for (queue = 0; queue < queues_per_pool; queue++) { |
1614 | unsigned int reg_idx = (vf * queues_per_pool) + queue; |
1615 | |
1616 | IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); |
1617 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); |
1618 | } |
1619 | } |
1620 | |
1621 | void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) |
1622 | { |
1623 | int i; |
1624 | |
1625 | /* VF Tx rate limit was not set */ |
1626 | if (!adapter->vf_rate_link_speed) |
1627 | return; |
1628 | |
1629 | if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { |
1630 | adapter->vf_rate_link_speed = 0; |
1631 | dev_info(&adapter->pdev->dev, |
1632 | "Link speed has been changed. VF Transmit rate is disabled\n" ); |
1633 | } |
1634 | |
1635 | for (i = 0; i < adapter->num_vfs; i++) { |
1636 | if (!adapter->vf_rate_link_speed) |
1637 | adapter->vfinfo[i].tx_rate = 0; |
1638 | |
1639 | ixgbe_set_vf_rate_limit(adapter, vf: i); |
1640 | } |
1641 | } |
1642 | |
1643 | int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, |
1644 | int max_tx_rate) |
1645 | { |
1646 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1647 | int link_speed; |
1648 | |
1649 | /* verify VF is active */ |
1650 | if (vf >= adapter->num_vfs) |
1651 | return -EINVAL; |
1652 | |
1653 | /* verify link is up */ |
1654 | if (!adapter->link_up) |
1655 | return -EINVAL; |
1656 | |
1657 | /* verify we are linked at 10Gbps */ |
1658 | link_speed = ixgbe_link_mbps(adapter); |
1659 | if (link_speed != 10000) |
1660 | return -EINVAL; |
1661 | |
1662 | if (min_tx_rate) |
1663 | return -EINVAL; |
1664 | |
1665 | /* rate limit cannot be less than 10Mbs or greater than link speed */ |
1666 | if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) |
1667 | return -EINVAL; |
1668 | |
1669 | /* store values */ |
1670 | adapter->vf_rate_link_speed = link_speed; |
1671 | adapter->vfinfo[vf].tx_rate = max_tx_rate; |
1672 | |
1673 | /* update hardware configuration */ |
1674 | ixgbe_set_vf_rate_limit(adapter, vf); |
1675 | |
1676 | return 0; |
1677 | } |
1678 | |
1679 | int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) |
1680 | { |
1681 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1682 | struct ixgbe_hw *hw = &adapter->hw; |
1683 | |
1684 | if (vf >= adapter->num_vfs) |
1685 | return -EINVAL; |
1686 | |
1687 | adapter->vfinfo[vf].spoofchk_enabled = setting; |
1688 | |
1689 | /* configure MAC spoofing */ |
1690 | hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); |
1691 | |
1692 | /* configure VLAN spoofing */ |
1693 | hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); |
1694 | |
1695 | /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be |
1696 | * calling set_ethertype_anti_spoofing for each VF in loop below |
1697 | */ |
1698 | if (hw->mac.ops.set_ethertype_anti_spoofing) { |
1699 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), |
1700 | (IXGBE_ETQF_FILTER_EN | |
1701 | IXGBE_ETQF_TX_ANTISPOOF | |
1702 | ETH_P_LLDP)); |
1703 | |
1704 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), |
1705 | (IXGBE_ETQF_FILTER_EN | |
1706 | IXGBE_ETQF_TX_ANTISPOOF | |
1707 | ETH_P_PAUSE)); |
1708 | |
1709 | hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); |
1710 | } |
1711 | |
1712 | return 0; |
1713 | } |
1714 | |
1715 | /** |
1716 | * ixgbe_set_vf_link_state - Set link state |
1717 | * @adapter: Pointer to adapter struct |
1718 | * @vf: VF identifier |
1719 | * @state: required link state |
1720 | * |
1721 | * Set a link force state on/off a single vf |
1722 | **/ |
1723 | void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state) |
1724 | { |
1725 | adapter->vfinfo[vf].link_state = state; |
1726 | |
1727 | switch (state) { |
1728 | case IFLA_VF_LINK_STATE_AUTO: |
1729 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
1730 | adapter->vfinfo[vf].link_enable = false; |
1731 | else |
1732 | adapter->vfinfo[vf].link_enable = true; |
1733 | break; |
1734 | case IFLA_VF_LINK_STATE_ENABLE: |
1735 | adapter->vfinfo[vf].link_enable = true; |
1736 | break; |
1737 | case IFLA_VF_LINK_STATE_DISABLE: |
1738 | adapter->vfinfo[vf].link_enable = false; |
1739 | break; |
1740 | } |
1741 | |
1742 | ixgbe_set_vf_rx_tx(adapter, vf); |
1743 | |
1744 | /* restart the VF */ |
1745 | adapter->vfinfo[vf].clear_to_send = false; |
1746 | ixgbe_ping_vf(adapter, vf); |
1747 | } |
1748 | |
1749 | /** |
1750 | * ixgbe_ndo_set_vf_link_state - Set link state |
1751 | * @netdev: network interface device structure |
1752 | * @vf: VF identifier |
1753 | * @state: required link state |
1754 | * |
1755 | * Set the link state of a specified VF, regardless of physical link state |
1756 | **/ |
1757 | int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) |
1758 | { |
1759 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1760 | int ret = 0; |
1761 | |
1762 | if (vf < 0 || vf >= adapter->num_vfs) { |
1763 | dev_err(&adapter->pdev->dev, |
1764 | "NDO set VF link - invalid VF identifier %d\n" , vf); |
1765 | return -EINVAL; |
1766 | } |
1767 | |
1768 | switch (state) { |
1769 | case IFLA_VF_LINK_STATE_ENABLE: |
1770 | dev_info(&adapter->pdev->dev, |
1771 | "NDO set VF %d link state %d - not supported\n" , |
1772 | vf, state); |
1773 | break; |
1774 | case IFLA_VF_LINK_STATE_DISABLE: |
1775 | dev_info(&adapter->pdev->dev, |
1776 | "NDO set VF %d link state disable\n" , vf); |
1777 | ixgbe_set_vf_link_state(adapter, vf, state); |
1778 | break; |
1779 | case IFLA_VF_LINK_STATE_AUTO: |
1780 | dev_info(&adapter->pdev->dev, |
1781 | "NDO set VF %d link state auto\n" , vf); |
1782 | ixgbe_set_vf_link_state(adapter, vf, state); |
1783 | break; |
1784 | default: |
1785 | dev_err(&adapter->pdev->dev, |
1786 | "NDO set VF %d - invalid link state %d\n" , vf, state); |
1787 | ret = -EINVAL; |
1788 | } |
1789 | |
1790 | return ret; |
1791 | } |
1792 | |
1793 | int (struct net_device *netdev, int vf, |
1794 | bool setting) |
1795 | { |
1796 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1797 | |
1798 | /* This operation is currently supported only for 82599 and x540 |
1799 | * devices. |
1800 | */ |
1801 | if (adapter->hw.mac.type < ixgbe_mac_82599EB || |
1802 | adapter->hw.mac.type >= ixgbe_mac_X550) |
1803 | return -EOPNOTSUPP; |
1804 | |
1805 | if (vf >= adapter->num_vfs) |
1806 | return -EINVAL; |
1807 | |
1808 | adapter->vfinfo[vf].rss_query_enabled = setting; |
1809 | |
1810 | return 0; |
1811 | } |
1812 | |
1813 | int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) |
1814 | { |
1815 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1816 | |
1817 | if (vf >= adapter->num_vfs) |
1818 | return -EINVAL; |
1819 | |
1820 | /* nothing to do */ |
1821 | if (adapter->vfinfo[vf].trusted == setting) |
1822 | return 0; |
1823 | |
1824 | adapter->vfinfo[vf].trusted = setting; |
1825 | |
1826 | /* reset VF to reconfigure features */ |
1827 | adapter->vfinfo[vf].clear_to_send = false; |
1828 | ixgbe_ping_vf(adapter, vf); |
1829 | |
1830 | e_info(drv, "VF %u is %strusted\n" , vf, setting ? "" : "not " ); |
1831 | |
1832 | return 0; |
1833 | } |
1834 | |
1835 | int ixgbe_ndo_get_vf_config(struct net_device *netdev, |
1836 | int vf, struct ifla_vf_info *ivi) |
1837 | { |
1838 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1839 | if (vf >= adapter->num_vfs) |
1840 | return -EINVAL; |
1841 | ivi->vf = vf; |
1842 | memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); |
1843 | ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; |
1844 | ivi->min_tx_rate = 0; |
1845 | ivi->vlan = adapter->vfinfo[vf].pf_vlan; |
1846 | ivi->qos = adapter->vfinfo[vf].pf_qos; |
1847 | ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; |
1848 | ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; |
1849 | ivi->trusted = adapter->vfinfo[vf].trusted; |
1850 | ivi->linkstate = adapter->vfinfo[vf].link_state; |
1851 | return 0; |
1852 | } |
1853 | |