1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2013 - 2018 Intel Corporation. */ |
3 | |
4 | #include "i40e.h" |
5 | #include "i40e_lan_hmc.h" |
6 | #include "i40e_virtchnl_pf.h" |
7 | |
8 | /*********************notification routines***********************/ |
9 | |
10 | /** |
11 | * i40e_vc_vf_broadcast |
12 | * @pf: pointer to the PF structure |
13 | * @v_opcode: operation code |
14 | * @v_retval: return value |
15 | * @msg: pointer to the msg buffer |
16 | * @msglen: msg length |
17 | * |
18 | * send a message to all VFs on a given PF |
19 | **/ |
20 | static void i40e_vc_vf_broadcast(struct i40e_pf *pf, |
21 | enum virtchnl_ops v_opcode, |
22 | int v_retval, u8 *msg, |
23 | u16 msglen) |
24 | { |
25 | struct i40e_hw *hw = &pf->hw; |
26 | struct i40e_vf *vf = pf->vf; |
27 | int i; |
28 | |
29 | for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { |
30 | int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; |
31 | /* Not all vfs are enabled so skip the ones that are not */ |
32 | if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && |
33 | !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) |
34 | continue; |
35 | |
36 | /* Ignore return value on purpose - a given VF may fail, but |
37 | * we need to keep going and send to all of them |
38 | */ |
39 | i40e_aq_send_msg_to_vf(hw, vfid: abs_vf_id, v_opcode, v_retval, |
40 | msg, msglen, NULL); |
41 | } |
42 | } |
43 | |
44 | /** |
45 | * i40e_vc_link_speed2mbps |
46 | * converts i40e_aq_link_speed to integer value of Mbps |
47 | * @link_speed: the speed to convert |
48 | * |
49 | * return the speed as direct value of Mbps. |
50 | **/ |
51 | static u32 |
52 | i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed) |
53 | { |
54 | switch (link_speed) { |
55 | case I40E_LINK_SPEED_100MB: |
56 | return SPEED_100; |
57 | case I40E_LINK_SPEED_1GB: |
58 | return SPEED_1000; |
59 | case I40E_LINK_SPEED_2_5GB: |
60 | return SPEED_2500; |
61 | case I40E_LINK_SPEED_5GB: |
62 | return SPEED_5000; |
63 | case I40E_LINK_SPEED_10GB: |
64 | return SPEED_10000; |
65 | case I40E_LINK_SPEED_20GB: |
66 | return SPEED_20000; |
67 | case I40E_LINK_SPEED_25GB: |
68 | return SPEED_25000; |
69 | case I40E_LINK_SPEED_40GB: |
70 | return SPEED_40000; |
71 | case I40E_LINK_SPEED_UNKNOWN: |
72 | return SPEED_UNKNOWN; |
73 | } |
74 | return SPEED_UNKNOWN; |
75 | } |
76 | |
77 | /** |
78 | * i40e_set_vf_link_state |
79 | * @vf: pointer to the VF structure |
80 | * @pfe: pointer to PF event structure |
81 | * @ls: pointer to link status structure |
82 | * |
83 | * set a link state on a single vf |
84 | **/ |
85 | static void i40e_set_vf_link_state(struct i40e_vf *vf, |
86 | struct virtchnl_pf_event *pfe, struct i40e_link_status *ls) |
87 | { |
88 | u8 link_status = ls->link_info & I40E_AQ_LINK_UP; |
89 | |
90 | if (vf->link_forced) |
91 | link_status = vf->link_up; |
92 | |
93 | if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { |
94 | pfe->event_data.link_event_adv.link_speed = link_status ? |
95 | i40e_vc_link_speed2mbps(link_speed: ls->link_speed) : 0; |
96 | pfe->event_data.link_event_adv.link_status = link_status; |
97 | } else { |
98 | pfe->event_data.link_event.link_speed = link_status ? |
99 | i40e_virtchnl_link_speed(link_speed: ls->link_speed) : 0; |
100 | pfe->event_data.link_event.link_status = link_status; |
101 | } |
102 | } |
103 | |
104 | /** |
105 | * i40e_vc_notify_vf_link_state |
106 | * @vf: pointer to the VF structure |
107 | * |
108 | * send a link status message to a single VF |
109 | **/ |
110 | static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) |
111 | { |
112 | struct virtchnl_pf_event pfe; |
113 | struct i40e_pf *pf = vf->pf; |
114 | struct i40e_hw *hw = &pf->hw; |
115 | struct i40e_link_status *ls = &pf->hw.phy.link_info; |
116 | int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; |
117 | |
118 | pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; |
119 | pfe.severity = PF_EVENT_SEVERITY_INFO; |
120 | |
121 | i40e_set_vf_link_state(vf, pfe: &pfe, ls); |
122 | |
123 | i40e_aq_send_msg_to_vf(hw, vfid: abs_vf_id, v_opcode: VIRTCHNL_OP_EVENT, |
124 | v_retval: 0, msg: (u8 *)&pfe, msglen: sizeof(pfe), NULL); |
125 | } |
126 | |
127 | /** |
128 | * i40e_vc_notify_link_state |
129 | * @pf: pointer to the PF structure |
130 | * |
131 | * send a link status message to all VFs on a given PF |
132 | **/ |
133 | void i40e_vc_notify_link_state(struct i40e_pf *pf) |
134 | { |
135 | int i; |
136 | |
137 | for (i = 0; i < pf->num_alloc_vfs; i++) |
138 | i40e_vc_notify_vf_link_state(vf: &pf->vf[i]); |
139 | } |
140 | |
141 | /** |
142 | * i40e_vc_notify_reset |
143 | * @pf: pointer to the PF structure |
144 | * |
145 | * indicate a pending reset to all VFs on a given PF |
146 | **/ |
147 | void i40e_vc_notify_reset(struct i40e_pf *pf) |
148 | { |
149 | struct virtchnl_pf_event pfe; |
150 | |
151 | pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; |
152 | pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; |
153 | i40e_vc_vf_broadcast(pf, v_opcode: VIRTCHNL_OP_EVENT, v_retval: 0, |
154 | msg: (u8 *)&pfe, msglen: sizeof(struct virtchnl_pf_event)); |
155 | } |
156 | |
157 | #ifdef CONFIG_PCI_IOV |
158 | void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev) |
159 | { |
160 | u16 vf_id; |
161 | u16 pos; |
162 | |
163 | /* Continue only if this is a PF */ |
164 | if (!pdev->is_physfn) |
165 | return; |
166 | |
167 | if (!pci_num_vf(dev: pdev)) |
168 | return; |
169 | |
170 | pos = pci_find_ext_capability(dev: pdev, PCI_EXT_CAP_ID_SRIOV); |
171 | if (pos) { |
172 | struct pci_dev *vf_dev = NULL; |
173 | |
174 | pci_read_config_word(dev: pdev, where: pos + PCI_SRIOV_VF_DID, val: &vf_id); |
175 | while ((vf_dev = pci_get_device(vendor: pdev->vendor, device: vf_id, from: vf_dev))) { |
176 | if (vf_dev->is_virtfn && vf_dev->physfn == pdev) |
177 | pci_restore_msi_state(dev: vf_dev); |
178 | } |
179 | } |
180 | } |
181 | #endif /* CONFIG_PCI_IOV */ |
182 | |
183 | /** |
184 | * i40e_vc_notify_vf_reset |
185 | * @vf: pointer to the VF structure |
186 | * |
187 | * indicate a pending reset to the given VF |
188 | **/ |
189 | void i40e_vc_notify_vf_reset(struct i40e_vf *vf) |
190 | { |
191 | struct virtchnl_pf_event pfe; |
192 | int abs_vf_id; |
193 | |
194 | /* validate the request */ |
195 | if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) |
196 | return; |
197 | |
198 | /* verify if the VF is in either init or active before proceeding */ |
199 | if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) && |
200 | !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) |
201 | return; |
202 | |
203 | abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; |
204 | |
205 | pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; |
206 | pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; |
207 | i40e_aq_send_msg_to_vf(hw: &vf->pf->hw, vfid: abs_vf_id, v_opcode: VIRTCHNL_OP_EVENT, |
208 | v_retval: 0, msg: (u8 *)&pfe, |
209 | msglen: sizeof(struct virtchnl_pf_event), NULL); |
210 | } |
211 | /***********************misc routines*****************************/ |
212 | |
213 | /** |
214 | * i40e_vc_reset_vf |
215 | * @vf: pointer to the VF info |
216 | * @notify_vf: notify vf about reset or not |
217 | * Reset VF handler. |
218 | **/ |
219 | static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) |
220 | { |
221 | struct i40e_pf *pf = vf->pf; |
222 | int i; |
223 | |
224 | if (notify_vf) |
225 | i40e_vc_notify_vf_reset(vf); |
226 | |
227 | /* We want to ensure that an actual reset occurs initiated after this |
228 | * function was called. However, we do not want to wait forever, so |
229 | * we'll give a reasonable time and print a message if we failed to |
230 | * ensure a reset. |
231 | */ |
232 | for (i = 0; i < 20; i++) { |
233 | /* If PF is in VFs releasing state reset VF is impossible, |
234 | * so leave it. |
235 | */ |
236 | if (test_bit(__I40E_VFS_RELEASING, pf->state)) |
237 | return; |
238 | if (i40e_reset_vf(vf, flr: false)) |
239 | return; |
240 | usleep_range(min: 10000, max: 20000); |
241 | } |
242 | |
243 | if (notify_vf) |
244 | dev_warn(&vf->pf->pdev->dev, |
245 | "Failed to initiate reset for VF %d after 200 milliseconds\n" , |
246 | vf->vf_id); |
247 | else |
248 | dev_dbg(&vf->pf->pdev->dev, |
249 | "Failed to initiate reset for VF %d after 200 milliseconds\n" , |
250 | vf->vf_id); |
251 | } |
252 | |
253 | /** |
254 | * i40e_vc_isvalid_vsi_id |
255 | * @vf: pointer to the VF info |
256 | * @vsi_id: VF relative VSI id |
257 | * |
258 | * check for the valid VSI id |
259 | **/ |
260 | static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) |
261 | { |
262 | struct i40e_pf *pf = vf->pf; |
263 | struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, id: vsi_id); |
264 | |
265 | return (vsi && (vsi->vf_id == vf->vf_id)); |
266 | } |
267 | |
268 | /** |
269 | * i40e_vc_isvalid_queue_id |
270 | * @vf: pointer to the VF info |
271 | * @vsi_id: vsi id |
272 | * @qid: vsi relative queue id |
273 | * |
274 | * check for the valid queue id |
275 | **/ |
276 | static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, |
277 | u16 qid) |
278 | { |
279 | struct i40e_pf *pf = vf->pf; |
280 | struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, id: vsi_id); |
281 | |
282 | return (vsi && (qid < vsi->alloc_queue_pairs)); |
283 | } |
284 | |
285 | /** |
286 | * i40e_vc_isvalid_vector_id |
287 | * @vf: pointer to the VF info |
288 | * @vector_id: VF relative vector id |
289 | * |
290 | * check for the valid vector id |
291 | **/ |
292 | static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) |
293 | { |
294 | struct i40e_pf *pf = vf->pf; |
295 | |
296 | return vector_id < pf->hw.func_caps.num_msix_vectors_vf; |
297 | } |
298 | |
299 | /***********************vf resource mgmt routines*****************/ |
300 | |
301 | /** |
302 | * i40e_vc_get_pf_queue_id |
303 | * @vf: pointer to the VF info |
304 | * @vsi_id: id of VSI as provided by the FW |
305 | * @vsi_queue_id: vsi relative queue id |
306 | * |
307 | * return PF relative queue id |
308 | **/ |
309 | static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, |
310 | u8 vsi_queue_id) |
311 | { |
312 | struct i40e_pf *pf = vf->pf; |
313 | struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, id: vsi_id); |
314 | u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; |
315 | |
316 | if (!vsi) |
317 | return pf_queue_id; |
318 | |
319 | if (le16_to_cpu(vsi->info.mapping_flags) & |
320 | I40E_AQ_VSI_QUE_MAP_NONCONTIG) |
321 | pf_queue_id = |
322 | le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); |
323 | else |
324 | pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + |
325 | vsi_queue_id; |
326 | |
327 | return pf_queue_id; |
328 | } |
329 | |
330 | /** |
331 | * i40e_get_real_pf_qid |
332 | * @vf: pointer to the VF info |
333 | * @vsi_id: vsi id |
334 | * @queue_id: queue number |
335 | * |
336 | * wrapper function to get pf_queue_id handling ADq code as well |
337 | **/ |
338 | static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id) |
339 | { |
340 | int i; |
341 | |
342 | if (vf->adq_enabled) { |
343 | /* Although VF considers all the queues(can be 1 to 16) as its |
344 | * own but they may actually belong to different VSIs(up to 4). |
345 | * We need to find which queues belongs to which VSI. |
346 | */ |
347 | for (i = 0; i < vf->num_tc; i++) { |
348 | if (queue_id < vf->ch[i].num_qps) { |
349 | vsi_id = vf->ch[i].vsi_id; |
350 | break; |
351 | } |
352 | /* find right queue id which is relative to a |
353 | * given VSI. |
354 | */ |
355 | queue_id -= vf->ch[i].num_qps; |
356 | } |
357 | } |
358 | |
359 | return i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id: queue_id); |
360 | } |
361 | |
362 | /** |
363 | * i40e_config_irq_link_list |
364 | * @vf: pointer to the VF info |
365 | * @vsi_id: id of VSI as given by the FW |
366 | * @vecmap: irq map info |
367 | * |
368 | * configure irq link list from the map |
369 | **/ |
370 | static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, |
371 | struct virtchnl_vector_map *vecmap) |
372 | { |
373 | unsigned long linklistmap = 0, tempmap; |
374 | struct i40e_pf *pf = vf->pf; |
375 | struct i40e_hw *hw = &pf->hw; |
376 | u16 vsi_queue_id, pf_queue_id; |
377 | enum i40e_queue_type qtype; |
378 | u16 next_q, vector_id, size; |
379 | u32 reg, reg_idx; |
380 | u16 itr_idx = 0; |
381 | |
382 | vector_id = vecmap->vector_id; |
383 | /* setup the head */ |
384 | if (0 == vector_id) |
385 | reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); |
386 | else |
387 | reg_idx = I40E_VPINT_LNKLSTN( |
388 | ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + |
389 | (vector_id - 1)); |
390 | |
391 | if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { |
392 | /* Special case - No queues mapped on this vector */ |
393 | wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); |
394 | goto irq_list_done; |
395 | } |
396 | tempmap = vecmap->rxq_map; |
397 | for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { |
398 | linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * |
399 | vsi_queue_id)); |
400 | } |
401 | |
402 | tempmap = vecmap->txq_map; |
403 | for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { |
404 | linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * |
405 | vsi_queue_id + 1)); |
406 | } |
407 | |
408 | size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; |
409 | next_q = find_first_bit(addr: &linklistmap, size); |
410 | if (unlikely(next_q == size)) |
411 | goto irq_list_done; |
412 | |
413 | vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; |
414 | qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; |
415 | pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, queue_id: vsi_queue_id); |
416 | reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); |
417 | |
418 | wr32(hw, reg_idx, reg); |
419 | |
420 | while (next_q < size) { |
421 | switch (qtype) { |
422 | case I40E_QUEUE_TYPE_RX: |
423 | reg_idx = I40E_QINT_RQCTL(pf_queue_id); |
424 | itr_idx = vecmap->rxitr_idx; |
425 | break; |
426 | case I40E_QUEUE_TYPE_TX: |
427 | reg_idx = I40E_QINT_TQCTL(pf_queue_id); |
428 | itr_idx = vecmap->txitr_idx; |
429 | break; |
430 | default: |
431 | break; |
432 | } |
433 | |
434 | next_q = find_next_bit(addr: &linklistmap, size, offset: next_q + 1); |
435 | if (next_q < size) { |
436 | vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; |
437 | qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; |
438 | pf_queue_id = i40e_get_real_pf_qid(vf, |
439 | vsi_id, |
440 | queue_id: vsi_queue_id); |
441 | } else { |
442 | pf_queue_id = I40E_QUEUE_END_OF_LIST; |
443 | qtype = 0; |
444 | } |
445 | |
446 | /* format for the RQCTL & TQCTL regs is same */ |
447 | reg = (vector_id) | |
448 | (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | |
449 | (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | |
450 | BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | |
451 | (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); |
452 | wr32(hw, reg_idx, reg); |
453 | } |
454 | |
455 | /* if the vf is running in polling mode and using interrupt zero, |
456 | * need to disable auto-mask on enabling zero interrupt for VFs. |
457 | */ |
458 | if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && |
459 | (vector_id == 0)) { |
460 | reg = rd32(hw, I40E_GLINT_CTL); |
461 | if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { |
462 | reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; |
463 | wr32(hw, I40E_GLINT_CTL, reg); |
464 | } |
465 | } |
466 | |
467 | irq_list_done: |
468 | i40e_flush(hw); |
469 | } |
470 | |
471 | /** |
472 | * i40e_release_rdma_qvlist |
473 | * @vf: pointer to the VF. |
474 | * |
475 | **/ |
476 | static void i40e_release_rdma_qvlist(struct i40e_vf *vf) |
477 | { |
478 | struct i40e_pf *pf = vf->pf; |
479 | struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info; |
480 | u32 msix_vf; |
481 | u32 i; |
482 | |
483 | if (!vf->qvlist_info) |
484 | return; |
485 | |
486 | msix_vf = pf->hw.func_caps.num_msix_vectors_vf; |
487 | for (i = 0; i < qvlist_info->num_vectors; i++) { |
488 | struct virtchnl_rdma_qv_info *qv_info; |
489 | u32 next_q_index, next_q_type; |
490 | struct i40e_hw *hw = &pf->hw; |
491 | u32 v_idx, reg_idx, reg; |
492 | |
493 | qv_info = &qvlist_info->qv_info[i]; |
494 | v_idx = qv_info->v_idx; |
495 | if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { |
496 | /* Figure out the queue after CEQ and make that the |
497 | * first queue. |
498 | */ |
499 | reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; |
500 | reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx)); |
501 | next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK, |
502 | reg); |
503 | next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK, |
504 | reg); |
505 | |
506 | reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); |
507 | reg = (next_q_index & |
508 | I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | |
509 | (next_q_type << |
510 | I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); |
511 | |
512 | wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); |
513 | } |
514 | } |
515 | kfree(objp: vf->qvlist_info); |
516 | vf->qvlist_info = NULL; |
517 | } |
518 | |
519 | /** |
520 | * i40e_config_rdma_qvlist |
521 | * @vf: pointer to the VF info |
522 | * @qvlist_info: queue and vector list |
523 | * |
524 | * Return 0 on success or < 0 on error |
525 | **/ |
526 | static int |
527 | i40e_config_rdma_qvlist(struct i40e_vf *vf, |
528 | struct virtchnl_rdma_qvlist_info *qvlist_info) |
529 | { |
530 | struct i40e_pf *pf = vf->pf; |
531 | struct i40e_hw *hw = &pf->hw; |
532 | struct virtchnl_rdma_qv_info *qv_info; |
533 | u32 v_idx, i, reg_idx, reg; |
534 | u32 next_q_idx, next_q_type; |
535 | size_t size; |
536 | u32 msix_vf; |
537 | int ret = 0; |
538 | |
539 | msix_vf = pf->hw.func_caps.num_msix_vectors_vf; |
540 | |
541 | if (qvlist_info->num_vectors > msix_vf) { |
542 | dev_warn(&pf->pdev->dev, |
543 | "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n" , |
544 | qvlist_info->num_vectors, |
545 | msix_vf); |
546 | ret = -EINVAL; |
547 | goto err_out; |
548 | } |
549 | |
550 | kfree(objp: vf->qvlist_info); |
551 | size = virtchnl_struct_size(vf->qvlist_info, qv_info, |
552 | qvlist_info->num_vectors); |
553 | vf->qvlist_info = kzalloc(size, GFP_KERNEL); |
554 | if (!vf->qvlist_info) { |
555 | ret = -ENOMEM; |
556 | goto err_out; |
557 | } |
558 | vf->qvlist_info->num_vectors = qvlist_info->num_vectors; |
559 | |
560 | msix_vf = pf->hw.func_caps.num_msix_vectors_vf; |
561 | for (i = 0; i < qvlist_info->num_vectors; i++) { |
562 | qv_info = &qvlist_info->qv_info[i]; |
563 | |
564 | /* Validate vector id belongs to this vf */ |
565 | if (!i40e_vc_isvalid_vector_id(vf, vector_id: qv_info->v_idx)) { |
566 | ret = -EINVAL; |
567 | goto err_free; |
568 | } |
569 | |
570 | v_idx = qv_info->v_idx; |
571 | |
572 | vf->qvlist_info->qv_info[i] = *qv_info; |
573 | |
574 | reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); |
575 | /* We might be sharing the interrupt, so get the first queue |
576 | * index and type, push it down the list by adding the new |
577 | * queue on top. Also link it with the new queue in CEQCTL. |
578 | */ |
579 | reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx)); |
580 | next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK, |
581 | reg); |
582 | next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK, |
583 | reg); |
584 | |
585 | if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) { |
586 | reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx; |
587 | reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK | |
588 | (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) | |
589 | (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) | |
590 | (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) | |
591 | (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)); |
592 | wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg); |
593 | |
594 | reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1); |
595 | reg = (qv_info->ceq_idx & |
596 | I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) | |
597 | (I40E_QUEUE_TYPE_PE_CEQ << |
598 | I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT); |
599 | wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg); |
600 | } |
601 | |
602 | if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) { |
603 | reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK | |
604 | (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) | |
605 | (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)); |
606 | |
607 | wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg); |
608 | } |
609 | } |
610 | |
611 | return 0; |
612 | err_free: |
613 | kfree(objp: vf->qvlist_info); |
614 | vf->qvlist_info = NULL; |
615 | err_out: |
616 | return ret; |
617 | } |
618 | |
619 | /** |
620 | * i40e_config_vsi_tx_queue |
621 | * @vf: pointer to the VF info |
622 | * @vsi_id: id of VSI as provided by the FW |
623 | * @vsi_queue_id: vsi relative queue index |
624 | * @info: config. info |
625 | * |
626 | * configure tx queue |
627 | **/ |
628 | static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, |
629 | u16 vsi_queue_id, |
630 | struct virtchnl_txq_info *info) |
631 | { |
632 | struct i40e_pf *pf = vf->pf; |
633 | struct i40e_hw *hw = &pf->hw; |
634 | struct i40e_hmc_obj_txq tx_ctx; |
635 | struct i40e_vsi *vsi; |
636 | u16 pf_queue_id; |
637 | u32 qtx_ctl; |
638 | int ret = 0; |
639 | |
640 | if (!i40e_vc_isvalid_vsi_id(vf, vsi_id: info->vsi_id)) { |
641 | ret = -ENOENT; |
642 | goto error_context; |
643 | } |
644 | pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); |
645 | vsi = i40e_find_vsi_from_id(pf, id: vsi_id); |
646 | if (!vsi) { |
647 | ret = -ENOENT; |
648 | goto error_context; |
649 | } |
650 | |
651 | /* clear the context structure first */ |
652 | memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); |
653 | |
654 | /* only set the required fields */ |
655 | tx_ctx.base = info->dma_ring_addr / 128; |
656 | tx_ctx.qlen = info->ring_len; |
657 | tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); |
658 | tx_ctx.rdylist_act = 0; |
659 | tx_ctx.head_wb_ena = info->headwb_enabled; |
660 | tx_ctx.head_wb_addr = info->dma_headwb_addr; |
661 | |
662 | /* clear the context in the HMC */ |
663 | ret = i40e_clear_lan_tx_queue_context(hw, queue: pf_queue_id); |
664 | if (ret) { |
665 | dev_err(&pf->pdev->dev, |
666 | "Failed to clear VF LAN Tx queue context %d, error: %d\n" , |
667 | pf_queue_id, ret); |
668 | ret = -ENOENT; |
669 | goto error_context; |
670 | } |
671 | |
672 | /* set the context in the HMC */ |
673 | ret = i40e_set_lan_tx_queue_context(hw, queue: pf_queue_id, s: &tx_ctx); |
674 | if (ret) { |
675 | dev_err(&pf->pdev->dev, |
676 | "Failed to set VF LAN Tx queue context %d error: %d\n" , |
677 | pf_queue_id, ret); |
678 | ret = -ENOENT; |
679 | goto error_context; |
680 | } |
681 | |
682 | /* associate this queue with the PCI VF function */ |
683 | qtx_ctl = I40E_QTX_CTL_VF_QUEUE; |
684 | qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id); |
685 | qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK, |
686 | vf->vf_id + hw->func_caps.vf_base_id); |
687 | wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); |
688 | i40e_flush(hw); |
689 | |
690 | error_context: |
691 | return ret; |
692 | } |
693 | |
694 | /** |
695 | * i40e_config_vsi_rx_queue |
696 | * @vf: pointer to the VF info |
697 | * @vsi_id: id of VSI as provided by the FW |
698 | * @vsi_queue_id: vsi relative queue index |
699 | * @info: config. info |
700 | * |
701 | * configure rx queue |
702 | **/ |
703 | static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, |
704 | u16 vsi_queue_id, |
705 | struct virtchnl_rxq_info *info) |
706 | { |
707 | u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); |
708 | struct i40e_pf *pf = vf->pf; |
709 | struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; |
710 | struct i40e_hw *hw = &pf->hw; |
711 | struct i40e_hmc_obj_rxq rx_ctx; |
712 | int ret = 0; |
713 | |
714 | /* clear the context structure first */ |
715 | memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); |
716 | |
717 | /* only set the required fields */ |
718 | rx_ctx.base = info->dma_ring_addr / 128; |
719 | rx_ctx.qlen = info->ring_len; |
720 | |
721 | if (info->splithdr_enabled) { |
722 | rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | |
723 | I40E_RX_SPLIT_IP | |
724 | I40E_RX_SPLIT_TCP_UDP | |
725 | I40E_RX_SPLIT_SCTP; |
726 | /* header length validation */ |
727 | if (info->hdr_size > ((2 * 1024) - 64)) { |
728 | ret = -EINVAL; |
729 | goto error_param; |
730 | } |
731 | rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; |
732 | |
733 | /* set split mode 10b */ |
734 | rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; |
735 | } |
736 | |
737 | /* databuffer length validation */ |
738 | if (info->databuffer_size > ((16 * 1024) - 128)) { |
739 | ret = -EINVAL; |
740 | goto error_param; |
741 | } |
742 | rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; |
743 | |
744 | /* max pkt. length validation */ |
745 | if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { |
746 | ret = -EINVAL; |
747 | goto error_param; |
748 | } |
749 | rx_ctx.rxmax = info->max_pkt_size; |
750 | |
751 | /* if port VLAN is configured increase the max packet size */ |
752 | if (vsi->info.pvid) |
753 | rx_ctx.rxmax += VLAN_HLEN; |
754 | |
755 | /* enable 32bytes desc always */ |
756 | rx_ctx.dsize = 1; |
757 | |
758 | /* default values */ |
759 | rx_ctx.lrxqthresh = 1; |
760 | rx_ctx.crcstrip = 1; |
761 | rx_ctx.prefena = 1; |
762 | rx_ctx.l2tsel = 1; |
763 | |
764 | /* clear the context in the HMC */ |
765 | ret = i40e_clear_lan_rx_queue_context(hw, queue: pf_queue_id); |
766 | if (ret) { |
767 | dev_err(&pf->pdev->dev, |
768 | "Failed to clear VF LAN Rx queue context %d, error: %d\n" , |
769 | pf_queue_id, ret); |
770 | ret = -ENOENT; |
771 | goto error_param; |
772 | } |
773 | |
774 | /* set the context in the HMC */ |
775 | ret = i40e_set_lan_rx_queue_context(hw, queue: pf_queue_id, s: &rx_ctx); |
776 | if (ret) { |
777 | dev_err(&pf->pdev->dev, |
778 | "Failed to set VF LAN Rx queue context %d error: %d\n" , |
779 | pf_queue_id, ret); |
780 | ret = -ENOENT; |
781 | goto error_param; |
782 | } |
783 | |
784 | error_param: |
785 | return ret; |
786 | } |
787 | |
788 | /** |
789 | * i40e_alloc_vsi_res |
790 | * @vf: pointer to the VF info |
791 | * @idx: VSI index, applies only for ADq mode, zero otherwise |
792 | * |
793 | * alloc VF vsi context & resources |
794 | **/ |
795 | static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) |
796 | { |
797 | struct i40e_mac_filter *f = NULL; |
798 | struct i40e_pf *pf = vf->pf; |
799 | struct i40e_vsi *vsi; |
800 | u64 max_tx_rate = 0; |
801 | int ret = 0; |
802 | |
803 | vsi = i40e_vsi_setup(pf, type: I40E_VSI_SRIOV, uplink: pf->vsi[pf->lan_vsi]->seid, |
804 | param1: vf->vf_id); |
805 | |
806 | if (!vsi) { |
807 | dev_err(&pf->pdev->dev, |
808 | "add vsi failed for VF %d, aq_err %d\n" , |
809 | vf->vf_id, pf->hw.aq.asq_last_status); |
810 | ret = -ENOENT; |
811 | goto error_alloc_vsi_res; |
812 | } |
813 | |
814 | if (!idx) { |
815 | u64 hena = i40e_pf_get_default_rss_hena(pf); |
816 | u8 broadcast[ETH_ALEN]; |
817 | |
818 | vf->lan_vsi_idx = vsi->idx; |
819 | vf->lan_vsi_id = vsi->id; |
820 | /* If the port VLAN has been configured and then the |
821 | * VF driver was removed then the VSI port VLAN |
822 | * configuration was destroyed. Check if there is |
823 | * a port VLAN and restore the VSI configuration if |
824 | * needed. |
825 | */ |
826 | if (vf->port_vlan_id) |
827 | i40e_vsi_add_pvid(vsi, vid: vf->port_vlan_id); |
828 | |
829 | spin_lock_bh(lock: &vsi->mac_filter_hash_lock); |
830 | if (is_valid_ether_addr(addr: vf->default_lan_addr.addr)) { |
831 | f = i40e_add_mac_filter(vsi, |
832 | macaddr: vf->default_lan_addr.addr); |
833 | if (!f) |
834 | dev_info(&pf->pdev->dev, |
835 | "Could not add MAC filter %pM for VF %d\n" , |
836 | vf->default_lan_addr.addr, vf->vf_id); |
837 | } |
838 | eth_broadcast_addr(addr: broadcast); |
839 | f = i40e_add_mac_filter(vsi, macaddr: broadcast); |
840 | if (!f) |
841 | dev_info(&pf->pdev->dev, |
842 | "Could not allocate VF broadcast filter\n" ); |
843 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
844 | wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena); |
845 | wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32)); |
846 | /* program mac filter only for VF VSI */ |
847 | ret = i40e_sync_vsi_filters(vsi); |
848 | if (ret) |
849 | dev_err(&pf->pdev->dev, "Unable to program ucast filters\n" ); |
850 | } |
851 | |
852 | /* storing VSI index and id for ADq and don't apply the mac filter */ |
853 | if (vf->adq_enabled) { |
854 | vf->ch[idx].vsi_idx = vsi->idx; |
855 | vf->ch[idx].vsi_id = vsi->id; |
856 | } |
857 | |
858 | /* Set VF bandwidth if specified */ |
859 | if (vf->tx_rate) { |
860 | max_tx_rate = vf->tx_rate; |
861 | } else if (vf->ch[idx].max_tx_rate) { |
862 | max_tx_rate = vf->ch[idx].max_tx_rate; |
863 | } |
864 | |
865 | if (max_tx_rate) { |
866 | max_tx_rate = div_u64(dividend: max_tx_rate, I40E_BW_CREDIT_DIVISOR); |
867 | ret = i40e_aq_config_vsi_bw_limit(hw: &pf->hw, seid: vsi->seid, |
868 | credit: max_tx_rate, max_credit: 0, NULL); |
869 | if (ret) |
870 | dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n" , |
871 | vf->vf_id, ret); |
872 | } |
873 | |
874 | error_alloc_vsi_res: |
875 | return ret; |
876 | } |
877 | |
878 | /** |
879 | * i40e_map_pf_queues_to_vsi |
880 | * @vf: pointer to the VF info |
881 | * |
882 | * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This |
883 | * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI. |
884 | **/ |
885 | static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf) |
886 | { |
887 | struct i40e_pf *pf = vf->pf; |
888 | struct i40e_hw *hw = &pf->hw; |
889 | u32 reg, num_tc = 1; /* VF has at least one traffic class */ |
890 | u16 vsi_id, qps; |
891 | int i, j; |
892 | |
893 | if (vf->adq_enabled) |
894 | num_tc = vf->num_tc; |
895 | |
896 | for (i = 0; i < num_tc; i++) { |
897 | if (vf->adq_enabled) { |
898 | qps = vf->ch[i].num_qps; |
899 | vsi_id = vf->ch[i].vsi_id; |
900 | } else { |
901 | qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; |
902 | vsi_id = vf->lan_vsi_id; |
903 | } |
904 | |
905 | for (j = 0; j < 7; j++) { |
906 | if (j * 2 >= qps) { |
907 | /* end of list */ |
908 | reg = 0x07FF07FF; |
909 | } else { |
910 | u16 qid = i40e_vc_get_pf_queue_id(vf, |
911 | vsi_id, |
912 | vsi_queue_id: j * 2); |
913 | reg = qid; |
914 | qid = i40e_vc_get_pf_queue_id(vf, vsi_id, |
915 | vsi_queue_id: (j * 2) + 1); |
916 | reg |= qid << 16; |
917 | } |
918 | i40e_write_rx_ctl(hw, |
919 | I40E_VSILAN_QTABLE(j, vsi_id), |
920 | reg_val: reg); |
921 | } |
922 | } |
923 | } |
924 | |
925 | /** |
926 | * i40e_map_pf_to_vf_queues |
927 | * @vf: pointer to the VF info |
928 | * |
929 | * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This |
930 | * function takes care of the second part VPLAN_QTABLE & completes VF mappings. |
931 | **/ |
932 | static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf) |
933 | { |
934 | struct i40e_pf *pf = vf->pf; |
935 | struct i40e_hw *hw = &pf->hw; |
936 | u32 reg, total_qps = 0; |
937 | u32 qps, num_tc = 1; /* VF has at least one traffic class */ |
938 | u16 vsi_id, qid; |
939 | int i, j; |
940 | |
941 | if (vf->adq_enabled) |
942 | num_tc = vf->num_tc; |
943 | |
944 | for (i = 0; i < num_tc; i++) { |
945 | if (vf->adq_enabled) { |
946 | qps = vf->ch[i].num_qps; |
947 | vsi_id = vf->ch[i].vsi_id; |
948 | } else { |
949 | qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; |
950 | vsi_id = vf->lan_vsi_id; |
951 | } |
952 | |
953 | for (j = 0; j < qps; j++) { |
954 | qid = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id: j); |
955 | |
956 | reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); |
957 | wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id), |
958 | reg); |
959 | total_qps++; |
960 | } |
961 | } |
962 | } |
963 | |
964 | /** |
965 | * i40e_enable_vf_mappings |
966 | * @vf: pointer to the VF info |
967 | * |
968 | * enable VF mappings |
969 | **/ |
970 | static void i40e_enable_vf_mappings(struct i40e_vf *vf) |
971 | { |
972 | struct i40e_pf *pf = vf->pf; |
973 | struct i40e_hw *hw = &pf->hw; |
974 | u32 reg; |
975 | |
976 | /* Tell the hardware we're using noncontiguous mapping. HW requires |
977 | * that VF queues be mapped using this method, even when they are |
978 | * contiguous in real life |
979 | */ |
980 | i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), |
981 | I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); |
982 | |
983 | /* enable VF vplan_qtable mappings */ |
984 | reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; |
985 | wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); |
986 | |
987 | i40e_map_pf_to_vf_queues(vf); |
988 | i40e_map_pf_queues_to_vsi(vf); |
989 | |
990 | i40e_flush(hw); |
991 | } |
992 | |
993 | /** |
994 | * i40e_disable_vf_mappings |
995 | * @vf: pointer to the VF info |
996 | * |
997 | * disable VF mappings |
998 | **/ |
999 | static void i40e_disable_vf_mappings(struct i40e_vf *vf) |
1000 | { |
1001 | struct i40e_pf *pf = vf->pf; |
1002 | struct i40e_hw *hw = &pf->hw; |
1003 | int i; |
1004 | |
1005 | /* disable qp mappings */ |
1006 | wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); |
1007 | for (i = 0; i < I40E_MAX_VSI_QP; i++) |
1008 | wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), |
1009 | I40E_QUEUE_END_OF_LIST); |
1010 | i40e_flush(hw); |
1011 | } |
1012 | |
1013 | /** |
1014 | * i40e_free_vf_res |
1015 | * @vf: pointer to the VF info |
1016 | * |
1017 | * free VF resources |
1018 | **/ |
1019 | static void i40e_free_vf_res(struct i40e_vf *vf) |
1020 | { |
1021 | struct i40e_pf *pf = vf->pf; |
1022 | struct i40e_hw *hw = &pf->hw; |
1023 | u32 reg_idx, reg; |
1024 | int i, j, msix_vf; |
1025 | |
1026 | /* Start by disabling VF's configuration API to prevent the OS from |
1027 | * accessing the VF's VSI after it's freed / invalidated. |
1028 | */ |
1029 | clear_bit(nr: I40E_VF_STATE_INIT, addr: &vf->vf_states); |
1030 | |
1031 | /* It's possible the VF had requeuested more queues than the default so |
1032 | * do the accounting here when we're about to free them. |
1033 | */ |
1034 | if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { |
1035 | pf->queues_left += vf->num_queue_pairs - |
1036 | I40E_DEFAULT_QUEUES_PER_VF; |
1037 | } |
1038 | |
1039 | /* free vsi & disconnect it from the parent uplink */ |
1040 | if (vf->lan_vsi_idx) { |
1041 | i40e_vsi_release(vsi: pf->vsi[vf->lan_vsi_idx]); |
1042 | vf->lan_vsi_idx = 0; |
1043 | vf->lan_vsi_id = 0; |
1044 | } |
1045 | |
1046 | /* do the accounting and remove additional ADq VSI's */ |
1047 | if (vf->adq_enabled && vf->ch[0].vsi_idx) { |
1048 | for (j = 0; j < vf->num_tc; j++) { |
1049 | /* At this point VSI0 is already released so don't |
1050 | * release it again and only clear their values in |
1051 | * structure variables |
1052 | */ |
1053 | if (j) |
1054 | i40e_vsi_release(vsi: pf->vsi[vf->ch[j].vsi_idx]); |
1055 | vf->ch[j].vsi_idx = 0; |
1056 | vf->ch[j].vsi_id = 0; |
1057 | } |
1058 | } |
1059 | msix_vf = pf->hw.func_caps.num_msix_vectors_vf; |
1060 | |
1061 | /* disable interrupts so the VF starts in a known state */ |
1062 | for (i = 0; i < msix_vf; i++) { |
1063 | /* format is same for both registers */ |
1064 | if (0 == i) |
1065 | reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); |
1066 | else |
1067 | reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * |
1068 | (vf->vf_id)) |
1069 | + (i - 1)); |
1070 | wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); |
1071 | i40e_flush(hw); |
1072 | } |
1073 | |
1074 | /* clear the irq settings */ |
1075 | for (i = 0; i < msix_vf; i++) { |
1076 | /* format is same for both registers */ |
1077 | if (0 == i) |
1078 | reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); |
1079 | else |
1080 | reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * |
1081 | (vf->vf_id)) |
1082 | + (i - 1)); |
1083 | reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | |
1084 | I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); |
1085 | wr32(hw, reg_idx, reg); |
1086 | i40e_flush(hw); |
1087 | } |
1088 | /* reset some of the state variables keeping track of the resources */ |
1089 | vf->num_queue_pairs = 0; |
1090 | clear_bit(nr: I40E_VF_STATE_MC_PROMISC, addr: &vf->vf_states); |
1091 | clear_bit(nr: I40E_VF_STATE_UC_PROMISC, addr: &vf->vf_states); |
1092 | } |
1093 | |
1094 | /** |
1095 | * i40e_alloc_vf_res |
1096 | * @vf: pointer to the VF info |
1097 | * |
1098 | * allocate VF resources |
1099 | **/ |
1100 | static int i40e_alloc_vf_res(struct i40e_vf *vf) |
1101 | { |
1102 | struct i40e_pf *pf = vf->pf; |
1103 | int total_queue_pairs = 0; |
1104 | int ret, idx; |
1105 | |
1106 | if (vf->num_req_queues && |
1107 | vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) |
1108 | pf->num_vf_qps = vf->num_req_queues; |
1109 | else |
1110 | pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; |
1111 | |
1112 | /* allocate hw vsi context & associated resources */ |
1113 | ret = i40e_alloc_vsi_res(vf, idx: 0); |
1114 | if (ret) |
1115 | goto error_alloc; |
1116 | total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; |
1117 | |
1118 | /* allocate additional VSIs based on tc information for ADq */ |
1119 | if (vf->adq_enabled) { |
1120 | if (pf->queues_left >= |
1121 | (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) { |
1122 | /* TC 0 always belongs to VF VSI */ |
1123 | for (idx = 1; idx < vf->num_tc; idx++) { |
1124 | ret = i40e_alloc_vsi_res(vf, idx); |
1125 | if (ret) |
1126 | goto error_alloc; |
1127 | } |
1128 | /* send correct number of queues */ |
1129 | total_queue_pairs = I40E_MAX_VF_QUEUES; |
1130 | } else { |
1131 | dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n" , |
1132 | vf->vf_id); |
1133 | vf->adq_enabled = false; |
1134 | } |
1135 | } |
1136 | |
1137 | /* We account for each VF to get a default number of queue pairs. If |
1138 | * the VF has now requested more, we need to account for that to make |
1139 | * certain we never request more queues than we actually have left in |
1140 | * HW. |
1141 | */ |
1142 | if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) |
1143 | pf->queues_left -= |
1144 | total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; |
1145 | |
1146 | if (vf->trusted) |
1147 | set_bit(nr: I40E_VIRTCHNL_VF_CAP_PRIVILEGE, addr: &vf->vf_caps); |
1148 | else |
1149 | clear_bit(nr: I40E_VIRTCHNL_VF_CAP_PRIVILEGE, addr: &vf->vf_caps); |
1150 | |
1151 | /* store the total qps number for the runtime |
1152 | * VF req validation |
1153 | */ |
1154 | vf->num_queue_pairs = total_queue_pairs; |
1155 | |
1156 | /* VF is now completely initialized */ |
1157 | set_bit(nr: I40E_VF_STATE_INIT, addr: &vf->vf_states); |
1158 | |
1159 | error_alloc: |
1160 | if (ret) |
1161 | i40e_free_vf_res(vf); |
1162 | |
1163 | return ret; |
1164 | } |
1165 | |
1166 | #define VF_DEVICE_STATUS 0xAA |
1167 | #define VF_TRANS_PENDING_MASK 0x20 |
1168 | /** |
1169 | * i40e_quiesce_vf_pci |
1170 | * @vf: pointer to the VF structure |
1171 | * |
1172 | * Wait for VF PCI transactions to be cleared after reset. Returns -EIO |
1173 | * if the transactions never clear. |
1174 | **/ |
1175 | static int i40e_quiesce_vf_pci(struct i40e_vf *vf) |
1176 | { |
1177 | struct i40e_pf *pf = vf->pf; |
1178 | struct i40e_hw *hw = &pf->hw; |
1179 | int vf_abs_id, i; |
1180 | u32 reg; |
1181 | |
1182 | vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; |
1183 | |
1184 | wr32(hw, I40E_PF_PCI_CIAA, |
1185 | VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); |
1186 | for (i = 0; i < 100; i++) { |
1187 | reg = rd32(hw, I40E_PF_PCI_CIAD); |
1188 | if ((reg & VF_TRANS_PENDING_MASK) == 0) |
1189 | return 0; |
1190 | udelay(1); |
1191 | } |
1192 | return -EIO; |
1193 | } |
1194 | |
1195 | /** |
1196 | * __i40e_getnum_vf_vsi_vlan_filters |
1197 | * @vsi: pointer to the vsi |
1198 | * |
1199 | * called to get the number of VLANs offloaded on this VF |
1200 | **/ |
1201 | static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) |
1202 | { |
1203 | struct i40e_mac_filter *f; |
1204 | u16 num_vlans = 0, bkt; |
1205 | |
1206 | hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { |
1207 | if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) |
1208 | num_vlans++; |
1209 | } |
1210 | |
1211 | return num_vlans; |
1212 | } |
1213 | |
1214 | /** |
1215 | * i40e_getnum_vf_vsi_vlan_filters |
1216 | * @vsi: pointer to the vsi |
1217 | * |
1218 | * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held |
1219 | **/ |
1220 | static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) |
1221 | { |
1222 | int num_vlans; |
1223 | |
1224 | spin_lock_bh(lock: &vsi->mac_filter_hash_lock); |
1225 | num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); |
1226 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
1227 | |
1228 | return num_vlans; |
1229 | } |
1230 | |
1231 | /** |
1232 | * i40e_get_vlan_list_sync |
1233 | * @vsi: pointer to the VSI |
1234 | * @num_vlans: number of VLANs in mac_filter_hash, returned to caller |
1235 | * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller. |
1236 | * This array is allocated here, but has to be freed in caller. |
1237 | * |
1238 | * Called to get number of VLANs and VLAN list present in mac_filter_hash. |
1239 | **/ |
1240 | static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans, |
1241 | s16 **vlan_list) |
1242 | { |
1243 | struct i40e_mac_filter *f; |
1244 | int i = 0; |
1245 | int bkt; |
1246 | |
1247 | spin_lock_bh(lock: &vsi->mac_filter_hash_lock); |
1248 | *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); |
1249 | *vlan_list = kcalloc(n: *num_vlans, size: sizeof(**vlan_list), GFP_ATOMIC); |
1250 | if (!(*vlan_list)) |
1251 | goto err; |
1252 | |
1253 | hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { |
1254 | if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) |
1255 | continue; |
1256 | (*vlan_list)[i++] = f->vlan; |
1257 | } |
1258 | err: |
1259 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
1260 | } |
1261 | |
1262 | /** |
1263 | * i40e_set_vsi_promisc |
1264 | * @vf: pointer to the VF struct |
1265 | * @seid: VSI number |
1266 | * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable |
1267 | * for a given VLAN |
1268 | * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable |
1269 | * for a given VLAN |
1270 | * @vl: List of VLANs - apply filter for given VLANs |
1271 | * @num_vlans: Number of elements in @vl |
1272 | **/ |
1273 | static int |
1274 | i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, |
1275 | bool unicast_enable, s16 *vl, u16 num_vlans) |
1276 | { |
1277 | struct i40e_pf *pf = vf->pf; |
1278 | struct i40e_hw *hw = &pf->hw; |
1279 | int aq_ret, aq_tmp = 0; |
1280 | int i; |
1281 | |
1282 | /* No VLAN to set promisc on, set on VSI */ |
1283 | if (!num_vlans || !vl) { |
1284 | aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi_id: seid, |
1285 | set: multi_enable, |
1286 | NULL); |
1287 | if (aq_ret) { |
1288 | int aq_err = pf->hw.aq.asq_last_status; |
1289 | |
1290 | dev_err(&pf->pdev->dev, |
1291 | "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n" , |
1292 | vf->vf_id, |
1293 | ERR_PTR(aq_ret), |
1294 | i40e_aq_str(&pf->hw, aq_err)); |
1295 | |
1296 | return aq_ret; |
1297 | } |
1298 | |
1299 | aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi_id: seid, |
1300 | set: unicast_enable, |
1301 | NULL, rx_only_promisc: true); |
1302 | |
1303 | if (aq_ret) { |
1304 | int aq_err = pf->hw.aq.asq_last_status; |
1305 | |
1306 | dev_err(&pf->pdev->dev, |
1307 | "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n" , |
1308 | vf->vf_id, |
1309 | ERR_PTR(aq_ret), |
1310 | i40e_aq_str(&pf->hw, aq_err)); |
1311 | } |
1312 | |
1313 | return aq_ret; |
1314 | } |
1315 | |
1316 | for (i = 0; i < num_vlans; i++) { |
1317 | aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid, |
1318 | enable: multi_enable, |
1319 | vid: vl[i], NULL); |
1320 | if (aq_ret) { |
1321 | int aq_err = pf->hw.aq.asq_last_status; |
1322 | |
1323 | dev_err(&pf->pdev->dev, |
1324 | "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n" , |
1325 | vf->vf_id, |
1326 | ERR_PTR(aq_ret), |
1327 | i40e_aq_str(&pf->hw, aq_err)); |
1328 | |
1329 | if (!aq_tmp) |
1330 | aq_tmp = aq_ret; |
1331 | } |
1332 | |
1333 | aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid, |
1334 | enable: unicast_enable, |
1335 | vid: vl[i], NULL); |
1336 | if (aq_ret) { |
1337 | int aq_err = pf->hw.aq.asq_last_status; |
1338 | |
1339 | dev_err(&pf->pdev->dev, |
1340 | "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n" , |
1341 | vf->vf_id, |
1342 | ERR_PTR(aq_ret), |
1343 | i40e_aq_str(&pf->hw, aq_err)); |
1344 | |
1345 | if (!aq_tmp) |
1346 | aq_tmp = aq_ret; |
1347 | } |
1348 | } |
1349 | |
1350 | if (aq_tmp) |
1351 | aq_ret = aq_tmp; |
1352 | |
1353 | return aq_ret; |
1354 | } |
1355 | |
1356 | /** |
1357 | * i40e_config_vf_promiscuous_mode |
1358 | * @vf: pointer to the VF info |
1359 | * @vsi_id: VSI id |
1360 | * @allmulti: set MAC L2 layer multicast promiscuous enable/disable |
1361 | * @alluni: set MAC L2 layer unicast promiscuous enable/disable |
1362 | * |
1363 | * Called from the VF to configure the promiscuous mode of |
1364 | * VF vsis and from the VF reset path to reset promiscuous mode. |
1365 | **/ |
1366 | static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, |
1367 | u16 vsi_id, |
1368 | bool allmulti, |
1369 | bool alluni) |
1370 | { |
1371 | struct i40e_pf *pf = vf->pf; |
1372 | struct i40e_vsi *vsi; |
1373 | int aq_ret = 0; |
1374 | u16 num_vlans; |
1375 | s16 *vl; |
1376 | |
1377 | vsi = i40e_find_vsi_from_id(pf, id: vsi_id); |
1378 | if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) |
1379 | return -EINVAL; |
1380 | |
1381 | if (vf->port_vlan_id) { |
1382 | aq_ret = i40e_set_vsi_promisc(vf, seid: vsi->seid, multi_enable: allmulti, |
1383 | unicast_enable: alluni, vl: &vf->port_vlan_id, num_vlans: 1); |
1384 | return aq_ret; |
1385 | } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { |
1386 | i40e_get_vlan_list_sync(vsi, num_vlans: &num_vlans, vlan_list: &vl); |
1387 | |
1388 | if (!vl) |
1389 | return -ENOMEM; |
1390 | |
1391 | aq_ret = i40e_set_vsi_promisc(vf, seid: vsi->seid, multi_enable: allmulti, unicast_enable: alluni, |
1392 | vl, num_vlans); |
1393 | kfree(objp: vl); |
1394 | return aq_ret; |
1395 | } |
1396 | |
1397 | /* no VLANs to set on, set on VSI */ |
1398 | aq_ret = i40e_set_vsi_promisc(vf, seid: vsi->seid, multi_enable: allmulti, unicast_enable: alluni, |
1399 | NULL, num_vlans: 0); |
1400 | return aq_ret; |
1401 | } |
1402 | |
1403 | /** |
1404 | * i40e_sync_vfr_reset |
1405 | * @hw: pointer to hw struct |
1406 | * @vf_id: VF identifier |
1407 | * |
1408 | * Before trigger hardware reset, we need to know if no other process has |
1409 | * reserved the hardware for any reset operations. This check is done by |
1410 | * examining the status of the RSTAT1 register used to signal the reset. |
1411 | **/ |
1412 | static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id) |
1413 | { |
1414 | u32 reg; |
1415 | int i; |
1416 | |
1417 | for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) { |
1418 | reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) & |
1419 | I40E_VFINT_ICR0_ADMINQ_MASK; |
1420 | if (reg) |
1421 | return 0; |
1422 | |
1423 | usleep_range(min: 100, max: 200); |
1424 | } |
1425 | |
1426 | return -EAGAIN; |
1427 | } |
1428 | |
1429 | /** |
1430 | * i40e_trigger_vf_reset |
1431 | * @vf: pointer to the VF structure |
1432 | * @flr: VFLR was issued or not |
1433 | * |
1434 | * Trigger hardware to start a reset for a particular VF. Expects the caller |
1435 | * to wait the proper amount of time to allow hardware to reset the VF before |
1436 | * it cleans up and restores VF functionality. |
1437 | **/ |
1438 | static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) |
1439 | { |
1440 | struct i40e_pf *pf = vf->pf; |
1441 | struct i40e_hw *hw = &pf->hw; |
1442 | u32 reg, reg_idx, bit_idx; |
1443 | bool vf_active; |
1444 | u32 radq; |
1445 | |
1446 | /* warn the VF */ |
1447 | vf_active = test_and_clear_bit(nr: I40E_VF_STATE_ACTIVE, addr: &vf->vf_states); |
1448 | |
1449 | /* Disable VF's configuration API during reset. The flag is re-enabled |
1450 | * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. |
1451 | * It's normally disabled in i40e_free_vf_res(), but it's safer |
1452 | * to do it earlier to give some time to finish to any VF config |
1453 | * functions that may still be running at this point. |
1454 | */ |
1455 | clear_bit(nr: I40E_VF_STATE_INIT, addr: &vf->vf_states); |
1456 | |
1457 | /* In the case of a VFLR, the HW has already reset the VF and we |
1458 | * just need to clean up, so don't hit the VFRTRIG register. |
1459 | */ |
1460 | if (!flr) { |
1461 | /* Sync VFR reset before trigger next one */ |
1462 | radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) & |
1463 | I40E_VFINT_ICR0_ADMINQ_MASK; |
1464 | if (vf_active && !radq) |
1465 | /* waiting for finish reset by virtual driver */ |
1466 | if (i40e_sync_vfr_reset(hw, vf_id: vf->vf_id)) |
1467 | dev_info(&pf->pdev->dev, |
1468 | "Reset VF %d never finished\n" , |
1469 | vf->vf_id); |
1470 | |
1471 | /* Reset VF using VPGEN_VFRTRIG reg. It is also setting |
1472 | * in progress state in rstat1 register. |
1473 | */ |
1474 | reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); |
1475 | reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; |
1476 | wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); |
1477 | i40e_flush(hw); |
1478 | } |
1479 | /* clear the VFLR bit in GLGEN_VFLRSTAT */ |
1480 | reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; |
1481 | bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; |
1482 | wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); |
1483 | i40e_flush(hw); |
1484 | |
1485 | if (i40e_quiesce_vf_pci(vf)) |
1486 | dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n" , |
1487 | vf->vf_id); |
1488 | } |
1489 | |
1490 | /** |
1491 | * i40e_cleanup_reset_vf |
1492 | * @vf: pointer to the VF structure |
1493 | * |
1494 | * Cleanup a VF after the hardware reset is finished. Expects the caller to |
1495 | * have verified whether the reset is finished properly, and ensure the |
1496 | * minimum amount of wait time has passed. |
1497 | **/ |
1498 | static void i40e_cleanup_reset_vf(struct i40e_vf *vf) |
1499 | { |
1500 | struct i40e_pf *pf = vf->pf; |
1501 | struct i40e_hw *hw = &pf->hw; |
1502 | u32 reg; |
1503 | |
1504 | /* disable promisc modes in case they were enabled */ |
1505 | i40e_config_vf_promiscuous_mode(vf, vsi_id: vf->lan_vsi_id, allmulti: false, alluni: false); |
1506 | |
1507 | /* free VF resources to begin resetting the VSI state */ |
1508 | i40e_free_vf_res(vf); |
1509 | |
1510 | /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg. |
1511 | * By doing this we allow HW to access VF memory at any point. If we |
1512 | * did it any sooner, HW could access memory while it was being freed |
1513 | * in i40e_free_vf_res(), causing an IOMMU fault. |
1514 | * |
1515 | * On the other hand, this needs to be done ASAP, because the VF driver |
1516 | * is waiting for this to happen and may report a timeout. It's |
1517 | * harmless, but it gets logged into Guest OS kernel log, so best avoid |
1518 | * it. |
1519 | */ |
1520 | reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); |
1521 | reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; |
1522 | wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); |
1523 | |
1524 | /* reallocate VF resources to finish resetting the VSI state */ |
1525 | if (!i40e_alloc_vf_res(vf)) { |
1526 | int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; |
1527 | i40e_enable_vf_mappings(vf); |
1528 | set_bit(nr: I40E_VF_STATE_ACTIVE, addr: &vf->vf_states); |
1529 | clear_bit(nr: I40E_VF_STATE_DISABLED, addr: &vf->vf_states); |
1530 | /* Do not notify the client during VF init */ |
1531 | if (!test_and_clear_bit(nr: I40E_VF_STATE_PRE_ENABLE, |
1532 | addr: &vf->vf_states)) |
1533 | i40e_notify_client_of_vf_reset(pf, vf_id: abs_vf_id); |
1534 | vf->num_vlan = 0; |
1535 | } |
1536 | |
1537 | /* Tell the VF driver the reset is done. This needs to be done only |
1538 | * after VF has been fully initialized, because the VF driver may |
1539 | * request resources immediately after setting this flag. |
1540 | */ |
1541 | wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); |
1542 | } |
1543 | |
1544 | /** |
1545 | * i40e_reset_vf |
1546 | * @vf: pointer to the VF structure |
1547 | * @flr: VFLR was issued or not |
1548 | * |
1549 | * Returns true if the VF is in reset, resets successfully, or resets |
1550 | * are disabled and false otherwise. |
1551 | **/ |
1552 | bool i40e_reset_vf(struct i40e_vf *vf, bool flr) |
1553 | { |
1554 | struct i40e_pf *pf = vf->pf; |
1555 | struct i40e_hw *hw = &pf->hw; |
1556 | bool rsd = false; |
1557 | u32 reg; |
1558 | int i; |
1559 | |
1560 | if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) |
1561 | return true; |
1562 | |
1563 | /* Bail out if VFs are disabled. */ |
1564 | if (test_bit(__I40E_VF_DISABLE, pf->state)) |
1565 | return true; |
1566 | |
1567 | /* If VF is being reset already we don't need to continue. */ |
1568 | if (test_and_set_bit(nr: I40E_VF_STATE_RESETTING, addr: &vf->vf_states)) |
1569 | return true; |
1570 | |
1571 | i40e_trigger_vf_reset(vf, flr); |
1572 | |
1573 | /* poll VPGEN_VFRSTAT reg to make sure |
1574 | * that reset is complete |
1575 | */ |
1576 | for (i = 0; i < 10; i++) { |
1577 | /* VF reset requires driver to first reset the VF and then |
1578 | * poll the status register to make sure that the reset |
1579 | * completed successfully. Due to internal HW FIFO flushes, |
1580 | * we must wait 10ms before the register will be valid. |
1581 | */ |
1582 | usleep_range(min: 10000, max: 20000); |
1583 | reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); |
1584 | if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { |
1585 | rsd = true; |
1586 | break; |
1587 | } |
1588 | } |
1589 | |
1590 | if (flr) |
1591 | usleep_range(min: 10000, max: 20000); |
1592 | |
1593 | if (!rsd) |
1594 | dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n" , |
1595 | vf->vf_id); |
1596 | usleep_range(min: 10000, max: 20000); |
1597 | |
1598 | /* On initial reset, we don't have any queues to disable */ |
1599 | if (vf->lan_vsi_idx != 0) |
1600 | i40e_vsi_stop_rings(vsi: pf->vsi[vf->lan_vsi_idx]); |
1601 | |
1602 | i40e_cleanup_reset_vf(vf); |
1603 | |
1604 | i40e_flush(hw); |
1605 | usleep_range(min: 20000, max: 40000); |
1606 | clear_bit(nr: I40E_VF_STATE_RESETTING, addr: &vf->vf_states); |
1607 | |
1608 | return true; |
1609 | } |
1610 | |
1611 | /** |
1612 | * i40e_reset_all_vfs |
1613 | * @pf: pointer to the PF structure |
1614 | * @flr: VFLR was issued or not |
1615 | * |
1616 | * Reset all allocated VFs in one go. First, tell the hardware to reset each |
1617 | * VF, then do all the waiting in one chunk, and finally finish restoring each |
1618 | * VF after the wait. This is useful during PF routines which need to reset |
1619 | * all VFs, as otherwise it must perform these resets in a serialized fashion. |
1620 | * |
1621 | * Returns true if any VFs were reset, and false otherwise. |
1622 | **/ |
1623 | bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) |
1624 | { |
1625 | struct i40e_hw *hw = &pf->hw; |
1626 | struct i40e_vf *vf; |
1627 | u32 reg; |
1628 | int i; |
1629 | |
1630 | /* If we don't have any VFs, then there is nothing to reset */ |
1631 | if (!pf->num_alloc_vfs) |
1632 | return false; |
1633 | |
1634 | /* If VFs have been disabled, there is no need to reset */ |
1635 | if (test_and_set_bit(nr: __I40E_VF_DISABLE, addr: pf->state)) |
1636 | return false; |
1637 | |
1638 | /* Begin reset on all VFs at once */ |
1639 | for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { |
1640 | /* If VF is being reset no need to trigger reset again */ |
1641 | if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) |
1642 | i40e_trigger_vf_reset(vf, flr); |
1643 | } |
1644 | |
1645 | /* HW requires some time to make sure it can flush the FIFO for a VF |
1646 | * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in |
1647 | * sequence to make sure that it has completed. We'll keep track of |
1648 | * the VFs using a simple iterator that increments once that VF has |
1649 | * finished resetting. |
1650 | */ |
1651 | for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) { |
1652 | usleep_range(min: 10000, max: 20000); |
1653 | |
1654 | /* Check each VF in sequence, beginning with the VF to fail |
1655 | * the previous check. |
1656 | */ |
1657 | while (vf < &pf->vf[pf->num_alloc_vfs]) { |
1658 | if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { |
1659 | reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); |
1660 | if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) |
1661 | break; |
1662 | } |
1663 | |
1664 | /* If the current VF has finished resetting, move on |
1665 | * to the next VF in sequence. |
1666 | */ |
1667 | ++vf; |
1668 | } |
1669 | } |
1670 | |
1671 | if (flr) |
1672 | usleep_range(min: 10000, max: 20000); |
1673 | |
1674 | /* Display a warning if at least one VF didn't manage to reset in |
1675 | * time, but continue on with the operation. |
1676 | */ |
1677 | if (vf < &pf->vf[pf->num_alloc_vfs]) |
1678 | dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n" , |
1679 | vf->vf_id); |
1680 | usleep_range(min: 10000, max: 20000); |
1681 | |
1682 | /* Begin disabling all the rings associated with VFs, but do not wait |
1683 | * between each VF. |
1684 | */ |
1685 | for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { |
1686 | /* On initial reset, we don't have any queues to disable */ |
1687 | if (vf->lan_vsi_idx == 0) |
1688 | continue; |
1689 | |
1690 | /* If VF is reset in another thread just continue */ |
1691 | if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) |
1692 | continue; |
1693 | |
1694 | i40e_vsi_stop_rings_no_wait(vsi: pf->vsi[vf->lan_vsi_idx]); |
1695 | } |
1696 | |
1697 | /* Now that we've notified HW to disable all of the VF rings, wait |
1698 | * until they finish. |
1699 | */ |
1700 | for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { |
1701 | /* On initial reset, we don't have any queues to disable */ |
1702 | if (vf->lan_vsi_idx == 0) |
1703 | continue; |
1704 | |
1705 | /* If VF is reset in another thread just continue */ |
1706 | if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) |
1707 | continue; |
1708 | |
1709 | i40e_vsi_wait_queues_disabled(vsi: pf->vsi[vf->lan_vsi_idx]); |
1710 | } |
1711 | |
1712 | /* Hw may need up to 50ms to finish disabling the RX queues. We |
1713 | * minimize the wait by delaying only once for all VFs. |
1714 | */ |
1715 | mdelay(50); |
1716 | |
1717 | /* Finish the reset on each VF */ |
1718 | for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) { |
1719 | /* If VF is reset in another thread just continue */ |
1720 | if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) |
1721 | continue; |
1722 | |
1723 | i40e_cleanup_reset_vf(vf); |
1724 | } |
1725 | |
1726 | i40e_flush(hw); |
1727 | usleep_range(min: 20000, max: 40000); |
1728 | clear_bit(nr: __I40E_VF_DISABLE, addr: pf->state); |
1729 | |
1730 | return true; |
1731 | } |
1732 | |
1733 | /** |
1734 | * i40e_free_vfs |
1735 | * @pf: pointer to the PF structure |
1736 | * |
1737 | * free VF resources |
1738 | **/ |
1739 | void i40e_free_vfs(struct i40e_pf *pf) |
1740 | { |
1741 | struct i40e_hw *hw = &pf->hw; |
1742 | u32 reg_idx, bit_idx; |
1743 | int i, tmp, vf_id; |
1744 | |
1745 | if (!pf->vf) |
1746 | return; |
1747 | |
1748 | set_bit(nr: __I40E_VFS_RELEASING, addr: pf->state); |
1749 | while (test_and_set_bit(nr: __I40E_VF_DISABLE, addr: pf->state)) |
1750 | usleep_range(min: 1000, max: 2000); |
1751 | |
1752 | i40e_notify_client_of_vf_enable(pf, num_vfs: 0); |
1753 | |
1754 | /* Disable IOV before freeing resources. This lets any VF drivers |
1755 | * running in the host get themselves cleaned up before we yank |
1756 | * the carpet out from underneath their feet. |
1757 | */ |
1758 | if (!pci_vfs_assigned(dev: pf->pdev)) |
1759 | pci_disable_sriov(dev: pf->pdev); |
1760 | else |
1761 | dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n" ); |
1762 | |
1763 | /* Amortize wait time by stopping all VFs at the same time */ |
1764 | for (i = 0; i < pf->num_alloc_vfs; i++) { |
1765 | if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) |
1766 | continue; |
1767 | |
1768 | i40e_vsi_stop_rings_no_wait(vsi: pf->vsi[pf->vf[i].lan_vsi_idx]); |
1769 | } |
1770 | |
1771 | for (i = 0; i < pf->num_alloc_vfs; i++) { |
1772 | if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) |
1773 | continue; |
1774 | |
1775 | i40e_vsi_wait_queues_disabled(vsi: pf->vsi[pf->vf[i].lan_vsi_idx]); |
1776 | } |
1777 | |
1778 | /* free up VF resources */ |
1779 | tmp = pf->num_alloc_vfs; |
1780 | pf->num_alloc_vfs = 0; |
1781 | for (i = 0; i < tmp; i++) { |
1782 | if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states)) |
1783 | i40e_free_vf_res(vf: &pf->vf[i]); |
1784 | /* disable qp mappings */ |
1785 | i40e_disable_vf_mappings(vf: &pf->vf[i]); |
1786 | } |
1787 | |
1788 | kfree(objp: pf->vf); |
1789 | pf->vf = NULL; |
1790 | |
1791 | /* This check is for when the driver is unloaded while VFs are |
1792 | * assigned. Setting the number of VFs to 0 through sysfs is caught |
1793 | * before this function ever gets called. |
1794 | */ |
1795 | if (!pci_vfs_assigned(dev: pf->pdev)) { |
1796 | /* Acknowledge VFLR for all VFS. Without this, VFs will fail to |
1797 | * work correctly when SR-IOV gets re-enabled. |
1798 | */ |
1799 | for (vf_id = 0; vf_id < tmp; vf_id++) { |
1800 | reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; |
1801 | bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; |
1802 | wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); |
1803 | } |
1804 | } |
1805 | clear_bit(nr: __I40E_VF_DISABLE, addr: pf->state); |
1806 | clear_bit(nr: __I40E_VFS_RELEASING, addr: pf->state); |
1807 | } |
1808 | |
1809 | #ifdef CONFIG_PCI_IOV |
1810 | /** |
1811 | * i40e_alloc_vfs |
1812 | * @pf: pointer to the PF structure |
1813 | * @num_alloc_vfs: number of VFs to allocate |
1814 | * |
1815 | * allocate VF resources |
1816 | **/ |
1817 | int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) |
1818 | { |
1819 | struct i40e_vf *vfs; |
1820 | int i, ret = 0; |
1821 | |
1822 | /* Disable interrupt 0 so we don't try to handle the VFLR. */ |
1823 | i40e_irq_dynamic_disable_icr0(pf); |
1824 | |
1825 | /* Check to see if we're just allocating resources for extant VFs */ |
1826 | if (pci_num_vf(dev: pf->pdev) != num_alloc_vfs) { |
1827 | ret = pci_enable_sriov(dev: pf->pdev, nr_virtfn: num_alloc_vfs); |
1828 | if (ret) { |
1829 | clear_bit(nr: I40E_FLAG_VEB_MODE_ENA, addr: pf->flags); |
1830 | pf->num_alloc_vfs = 0; |
1831 | goto err_iov; |
1832 | } |
1833 | } |
1834 | /* allocate memory */ |
1835 | vfs = kcalloc(n: num_alloc_vfs, size: sizeof(struct i40e_vf), GFP_KERNEL); |
1836 | if (!vfs) { |
1837 | ret = -ENOMEM; |
1838 | goto err_alloc; |
1839 | } |
1840 | pf->vf = vfs; |
1841 | |
1842 | /* apply default profile */ |
1843 | for (i = 0; i < num_alloc_vfs; i++) { |
1844 | vfs[i].pf = pf; |
1845 | vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; |
1846 | vfs[i].vf_id = i; |
1847 | |
1848 | /* assign default capabilities */ |
1849 | set_bit(nr: I40E_VIRTCHNL_VF_CAP_L2, addr: &vfs[i].vf_caps); |
1850 | vfs[i].spoofchk = true; |
1851 | |
1852 | set_bit(nr: I40E_VF_STATE_PRE_ENABLE, addr: &vfs[i].vf_states); |
1853 | |
1854 | } |
1855 | pf->num_alloc_vfs = num_alloc_vfs; |
1856 | |
1857 | /* VF resources get allocated during reset */ |
1858 | i40e_reset_all_vfs(pf, flr: false); |
1859 | |
1860 | i40e_notify_client_of_vf_enable(pf, num_vfs: num_alloc_vfs); |
1861 | |
1862 | err_alloc: |
1863 | if (ret) |
1864 | i40e_free_vfs(pf); |
1865 | err_iov: |
1866 | /* Re-enable interrupt 0. */ |
1867 | i40e_irq_dynamic_enable_icr0(pf); |
1868 | return ret; |
1869 | } |
1870 | |
1871 | #endif |
1872 | /** |
1873 | * i40e_pci_sriov_enable |
1874 | * @pdev: pointer to a pci_dev structure |
1875 | * @num_vfs: number of VFs to allocate |
1876 | * |
1877 | * Enable or change the number of VFs |
1878 | **/ |
1879 | static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) |
1880 | { |
1881 | #ifdef CONFIG_PCI_IOV |
1882 | struct i40e_pf *pf = pci_get_drvdata(pdev); |
1883 | int pre_existing_vfs = pci_num_vf(dev: pdev); |
1884 | int err = 0; |
1885 | |
1886 | if (test_bit(__I40E_TESTING, pf->state)) { |
1887 | dev_warn(&pdev->dev, |
1888 | "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n" ); |
1889 | err = -EPERM; |
1890 | goto err_out; |
1891 | } |
1892 | |
1893 | if (pre_existing_vfs && pre_existing_vfs != num_vfs) |
1894 | i40e_free_vfs(pf); |
1895 | else if (pre_existing_vfs && pre_existing_vfs == num_vfs) |
1896 | goto out; |
1897 | |
1898 | if (num_vfs > pf->num_req_vfs) { |
1899 | dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n" , |
1900 | num_vfs, pf->num_req_vfs); |
1901 | err = -EPERM; |
1902 | goto err_out; |
1903 | } |
1904 | |
1905 | dev_info(&pdev->dev, "Allocating %d VFs.\n" , num_vfs); |
1906 | err = i40e_alloc_vfs(pf, num_alloc_vfs: num_vfs); |
1907 | if (err) { |
1908 | dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n" , err); |
1909 | goto err_out; |
1910 | } |
1911 | |
1912 | out: |
1913 | return num_vfs; |
1914 | |
1915 | err_out: |
1916 | return err; |
1917 | #endif |
1918 | return 0; |
1919 | } |
1920 | |
1921 | /** |
1922 | * i40e_pci_sriov_configure |
1923 | * @pdev: pointer to a pci_dev structure |
1924 | * @num_vfs: number of VFs to allocate |
1925 | * |
1926 | * Enable or change the number of VFs. Called when the user updates the number |
1927 | * of VFs in sysfs. |
1928 | **/ |
1929 | int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) |
1930 | { |
1931 | struct i40e_pf *pf = pci_get_drvdata(pdev); |
1932 | int ret = 0; |
1933 | |
1934 | if (test_and_set_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state)) { |
1935 | dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n" ); |
1936 | return -EAGAIN; |
1937 | } |
1938 | |
1939 | if (num_vfs) { |
1940 | if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { |
1941 | set_bit(nr: I40E_FLAG_VEB_MODE_ENA, addr: pf->flags); |
1942 | i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); |
1943 | } |
1944 | ret = i40e_pci_sriov_enable(pdev, num_vfs); |
1945 | goto sriov_configure_out; |
1946 | } |
1947 | |
1948 | if (!pci_vfs_assigned(dev: pf->pdev)) { |
1949 | i40e_free_vfs(pf); |
1950 | clear_bit(nr: I40E_FLAG_VEB_MODE_ENA, addr: pf->flags); |
1951 | i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG); |
1952 | } else { |
1953 | dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n" ); |
1954 | ret = -EINVAL; |
1955 | goto sriov_configure_out; |
1956 | } |
1957 | sriov_configure_out: |
1958 | clear_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state); |
1959 | return ret; |
1960 | } |
1961 | |
1962 | /***********************virtual channel routines******************/ |
1963 | |
1964 | /** |
1965 | * i40e_vc_send_msg_to_vf |
1966 | * @vf: pointer to the VF info |
1967 | * @v_opcode: virtual channel opcode |
1968 | * @v_retval: virtual channel return value |
1969 | * @msg: pointer to the msg buffer |
1970 | * @msglen: msg length |
1971 | * |
1972 | * send msg to VF |
1973 | **/ |
1974 | static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, |
1975 | u32 v_retval, u8 *msg, u16 msglen) |
1976 | { |
1977 | struct i40e_pf *pf; |
1978 | struct i40e_hw *hw; |
1979 | int abs_vf_id; |
1980 | int aq_ret; |
1981 | |
1982 | /* validate the request */ |
1983 | if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) |
1984 | return -EINVAL; |
1985 | |
1986 | pf = vf->pf; |
1987 | hw = &pf->hw; |
1988 | abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; |
1989 | |
1990 | aq_ret = i40e_aq_send_msg_to_vf(hw, vfid: abs_vf_id, v_opcode, v_retval, |
1991 | msg, msglen, NULL); |
1992 | if (aq_ret) { |
1993 | dev_info(&pf->pdev->dev, |
1994 | "Unable to send the message to VF %d aq_err %d\n" , |
1995 | vf->vf_id, pf->hw.aq.asq_last_status); |
1996 | return -EIO; |
1997 | } |
1998 | |
1999 | return 0; |
2000 | } |
2001 | |
2002 | /** |
2003 | * i40e_vc_send_resp_to_vf |
2004 | * @vf: pointer to the VF info |
2005 | * @opcode: operation code |
2006 | * @retval: return value |
2007 | * |
2008 | * send resp msg to VF |
2009 | **/ |
2010 | static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, |
2011 | enum virtchnl_ops opcode, |
2012 | int retval) |
2013 | { |
2014 | return i40e_vc_send_msg_to_vf(vf, v_opcode: opcode, v_retval: retval, NULL, msglen: 0); |
2015 | } |
2016 | |
2017 | /** |
2018 | * i40e_sync_vf_state |
2019 | * @vf: pointer to the VF info |
2020 | * @state: VF state |
2021 | * |
2022 | * Called from a VF message to synchronize the service with a potential |
2023 | * VF reset state |
2024 | **/ |
2025 | static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state) |
2026 | { |
2027 | int i; |
2028 | |
2029 | /* When handling some messages, it needs VF state to be set. |
2030 | * It is possible that this flag is cleared during VF reset, |
2031 | * so there is a need to wait until the end of the reset to |
2032 | * handle the request message correctly. |
2033 | */ |
2034 | for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) { |
2035 | if (test_bit(state, &vf->vf_states)) |
2036 | return true; |
2037 | usleep_range(min: 10000, max: 20000); |
2038 | } |
2039 | |
2040 | return test_bit(state, &vf->vf_states); |
2041 | } |
2042 | |
2043 | /** |
2044 | * i40e_vc_get_version_msg |
2045 | * @vf: pointer to the VF info |
2046 | * @msg: pointer to the msg buffer |
2047 | * |
2048 | * called from the VF to request the API version used by the PF |
2049 | **/ |
2050 | static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) |
2051 | { |
2052 | struct virtchnl_version_info info = { |
2053 | VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR |
2054 | }; |
2055 | |
2056 | vf->vf_ver = *(struct virtchnl_version_info *)msg; |
2057 | /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ |
2058 | if (VF_IS_V10(&vf->vf_ver)) |
2059 | info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; |
2060 | return i40e_vc_send_msg_to_vf(vf, v_opcode: VIRTCHNL_OP_VERSION, |
2061 | v_retval: 0, msg: (u8 *)&info, |
2062 | msglen: sizeof(struct virtchnl_version_info)); |
2063 | } |
2064 | |
2065 | /** |
2066 | * i40e_del_qch - delete all the additional VSIs created as a part of ADq |
2067 | * @vf: pointer to VF structure |
2068 | **/ |
2069 | static void i40e_del_qch(struct i40e_vf *vf) |
2070 | { |
2071 | struct i40e_pf *pf = vf->pf; |
2072 | int i; |
2073 | |
2074 | /* first element in the array belongs to primary VF VSI and we shouldn't |
2075 | * delete it. We should however delete the rest of the VSIs created |
2076 | */ |
2077 | for (i = 1; i < vf->num_tc; i++) { |
2078 | if (vf->ch[i].vsi_idx) { |
2079 | i40e_vsi_release(vsi: pf->vsi[vf->ch[i].vsi_idx]); |
2080 | vf->ch[i].vsi_idx = 0; |
2081 | vf->ch[i].vsi_id = 0; |
2082 | } |
2083 | } |
2084 | } |
2085 | |
2086 | /** |
2087 | * i40e_vc_get_max_frame_size |
2088 | * @vf: pointer to the VF |
2089 | * |
2090 | * Max frame size is determined based on the current port's max frame size and |
2091 | * whether a port VLAN is configured on this VF. The VF is not aware whether |
2092 | * it's in a port VLAN so the PF needs to account for this in max frame size |
2093 | * checks and sending the max frame size to the VF. |
2094 | **/ |
2095 | static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) |
2096 | { |
2097 | u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; |
2098 | |
2099 | if (vf->port_vlan_id) |
2100 | max_frame_size -= VLAN_HLEN; |
2101 | |
2102 | return max_frame_size; |
2103 | } |
2104 | |
2105 | /** |
2106 | * i40e_vc_get_vf_resources_msg |
2107 | * @vf: pointer to the VF info |
2108 | * @msg: pointer to the msg buffer |
2109 | * |
2110 | * called from the VF to request its resources |
2111 | **/ |
2112 | static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) |
2113 | { |
2114 | struct virtchnl_vf_resource *vfres = NULL; |
2115 | struct i40e_pf *pf = vf->pf; |
2116 | struct i40e_vsi *vsi; |
2117 | int num_vsis = 1; |
2118 | int aq_ret = 0; |
2119 | size_t len = 0; |
2120 | int ret; |
2121 | |
2122 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_INIT)) { |
2123 | aq_ret = -EINVAL; |
2124 | goto err; |
2125 | } |
2126 | |
2127 | len = virtchnl_struct_size(vfres, vsi_res, num_vsis); |
2128 | vfres = kzalloc(size: len, GFP_KERNEL); |
2129 | if (!vfres) { |
2130 | aq_ret = -ENOMEM; |
2131 | len = 0; |
2132 | goto err; |
2133 | } |
2134 | if (VF_IS_V11(&vf->vf_ver)) |
2135 | vf->driver_caps = *(u32 *)msg; |
2136 | else |
2137 | vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | |
2138 | VIRTCHNL_VF_OFFLOAD_RSS_REG | |
2139 | VIRTCHNL_VF_OFFLOAD_VLAN; |
2140 | |
2141 | vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; |
2142 | vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; |
2143 | vsi = pf->vsi[vf->lan_vsi_idx]; |
2144 | if (!vsi->info.pvid) |
2145 | vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; |
2146 | |
2147 | if (i40e_vf_client_capable(pf, vf_id: vf->vf_id) && |
2148 | (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) { |
2149 | vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA; |
2150 | set_bit(nr: I40E_VF_STATE_RDMAENA, addr: &vf->vf_states); |
2151 | } else { |
2152 | clear_bit(nr: I40E_VF_STATE_RDMAENA, addr: &vf->vf_states); |
2153 | } |
2154 | |
2155 | if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { |
2156 | vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; |
2157 | } else { |
2158 | if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && |
2159 | (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) |
2160 | vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; |
2161 | else |
2162 | vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; |
2163 | } |
2164 | |
2165 | if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) { |
2166 | if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) |
2167 | vfres->vf_cap_flags |= |
2168 | VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; |
2169 | } |
2170 | |
2171 | if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) |
2172 | vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; |
2173 | |
2174 | if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) && |
2175 | (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) |
2176 | vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; |
2177 | |
2178 | if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { |
2179 | if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { |
2180 | dev_err(&pf->pdev->dev, |
2181 | "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n" , |
2182 | vf->vf_id); |
2183 | aq_ret = -EINVAL; |
2184 | goto err; |
2185 | } |
2186 | vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; |
2187 | } |
2188 | |
2189 | if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) { |
2190 | if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) |
2191 | vfres->vf_cap_flags |= |
2192 | VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; |
2193 | } |
2194 | |
2195 | if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) |
2196 | vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; |
2197 | |
2198 | if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ) |
2199 | vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ; |
2200 | |
2201 | vfres->num_vsis = num_vsis; |
2202 | vfres->num_queue_pairs = vf->num_queue_pairs; |
2203 | vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; |
2204 | vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; |
2205 | vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; |
2206 | vfres->max_mtu = i40e_vc_get_max_frame_size(vf); |
2207 | |
2208 | if (vf->lan_vsi_idx) { |
2209 | vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; |
2210 | vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; |
2211 | vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; |
2212 | /* VFs only use TC 0 */ |
2213 | vfres->vsi_res[0].qset_handle |
2214 | = le16_to_cpu(vsi->info.qs_handle[0]); |
2215 | if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) { |
2216 | i40e_del_mac_filter(vsi, macaddr: vf->default_lan_addr.addr); |
2217 | eth_zero_addr(addr: vf->default_lan_addr.addr); |
2218 | } |
2219 | ether_addr_copy(dst: vfres->vsi_res[0].default_mac_addr, |
2220 | src: vf->default_lan_addr.addr); |
2221 | } |
2222 | set_bit(nr: I40E_VF_STATE_ACTIVE, addr: &vf->vf_states); |
2223 | |
2224 | err: |
2225 | /* send the response back to the VF */ |
2226 | ret = i40e_vc_send_msg_to_vf(vf, v_opcode: VIRTCHNL_OP_GET_VF_RESOURCES, |
2227 | v_retval: aq_ret, msg: (u8 *)vfres, msglen: len); |
2228 | |
2229 | kfree(objp: vfres); |
2230 | return ret; |
2231 | } |
2232 | |
2233 | /** |
2234 | * i40e_vc_config_promiscuous_mode_msg |
2235 | * @vf: pointer to the VF info |
2236 | * @msg: pointer to the msg buffer |
2237 | * |
2238 | * called from the VF to configure the promiscuous mode of |
2239 | * VF vsis |
2240 | **/ |
2241 | static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) |
2242 | { |
2243 | struct virtchnl_promisc_info *info = |
2244 | (struct virtchnl_promisc_info *)msg; |
2245 | struct i40e_pf *pf = vf->pf; |
2246 | bool allmulti = false; |
2247 | bool alluni = false; |
2248 | int aq_ret = 0; |
2249 | |
2250 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
2251 | aq_ret = -EINVAL; |
2252 | goto err_out; |
2253 | } |
2254 | if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { |
2255 | dev_err(&pf->pdev->dev, |
2256 | "Unprivileged VF %d is attempting to configure promiscuous mode\n" , |
2257 | vf->vf_id); |
2258 | |
2259 | /* Lie to the VF on purpose, because this is an error we can |
2260 | * ignore. Unprivileged VF is not a virtual channel error. |
2261 | */ |
2262 | aq_ret = 0; |
2263 | goto err_out; |
2264 | } |
2265 | |
2266 | if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) { |
2267 | aq_ret = -EINVAL; |
2268 | goto err_out; |
2269 | } |
2270 | |
2271 | if (!i40e_vc_isvalid_vsi_id(vf, vsi_id: info->vsi_id)) { |
2272 | aq_ret = -EINVAL; |
2273 | goto err_out; |
2274 | } |
2275 | |
2276 | /* Multicast promiscuous handling*/ |
2277 | if (info->flags & FLAG_VF_MULTICAST_PROMISC) |
2278 | allmulti = true; |
2279 | |
2280 | if (info->flags & FLAG_VF_UNICAST_PROMISC) |
2281 | alluni = true; |
2282 | aq_ret = i40e_config_vf_promiscuous_mode(vf, vsi_id: info->vsi_id, allmulti, |
2283 | alluni); |
2284 | if (aq_ret) |
2285 | goto err_out; |
2286 | |
2287 | if (allmulti) { |
2288 | if (!test_and_set_bit(nr: I40E_VF_STATE_MC_PROMISC, |
2289 | addr: &vf->vf_states)) |
2290 | dev_info(&pf->pdev->dev, |
2291 | "VF %d successfully set multicast promiscuous mode\n" , |
2292 | vf->vf_id); |
2293 | } else if (test_and_clear_bit(nr: I40E_VF_STATE_MC_PROMISC, |
2294 | addr: &vf->vf_states)) |
2295 | dev_info(&pf->pdev->dev, |
2296 | "VF %d successfully unset multicast promiscuous mode\n" , |
2297 | vf->vf_id); |
2298 | |
2299 | if (alluni) { |
2300 | if (!test_and_set_bit(nr: I40E_VF_STATE_UC_PROMISC, |
2301 | addr: &vf->vf_states)) |
2302 | dev_info(&pf->pdev->dev, |
2303 | "VF %d successfully set unicast promiscuous mode\n" , |
2304 | vf->vf_id); |
2305 | } else if (test_and_clear_bit(nr: I40E_VF_STATE_UC_PROMISC, |
2306 | addr: &vf->vf_states)) |
2307 | dev_info(&pf->pdev->dev, |
2308 | "VF %d successfully unset unicast promiscuous mode\n" , |
2309 | vf->vf_id); |
2310 | |
2311 | err_out: |
2312 | /* send the response to the VF */ |
2313 | return i40e_vc_send_resp_to_vf(vf, |
2314 | opcode: VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, |
2315 | retval: aq_ret); |
2316 | } |
2317 | |
2318 | /** |
2319 | * i40e_vc_config_queues_msg |
2320 | * @vf: pointer to the VF info |
2321 | * @msg: pointer to the msg buffer |
2322 | * |
2323 | * called from the VF to configure the rx/tx |
2324 | * queues |
2325 | **/ |
2326 | static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) |
2327 | { |
2328 | struct virtchnl_vsi_queue_config_info *qci = |
2329 | (struct virtchnl_vsi_queue_config_info *)msg; |
2330 | struct virtchnl_queue_pair_info *qpi; |
2331 | u16 vsi_id, vsi_queue_id = 0; |
2332 | struct i40e_pf *pf = vf->pf; |
2333 | int i, j = 0, idx = 0; |
2334 | struct i40e_vsi *vsi; |
2335 | u16 num_qps_all = 0; |
2336 | int aq_ret = 0; |
2337 | |
2338 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
2339 | aq_ret = -EINVAL; |
2340 | goto error_param; |
2341 | } |
2342 | |
2343 | if (!i40e_vc_isvalid_vsi_id(vf, vsi_id: qci->vsi_id)) { |
2344 | aq_ret = -EINVAL; |
2345 | goto error_param; |
2346 | } |
2347 | |
2348 | if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { |
2349 | aq_ret = -EINVAL; |
2350 | goto error_param; |
2351 | } |
2352 | |
2353 | if (vf->adq_enabled) { |
2354 | for (i = 0; i < vf->num_tc; i++) |
2355 | num_qps_all += vf->ch[i].num_qps; |
2356 | if (num_qps_all != qci->num_queue_pairs) { |
2357 | aq_ret = -EINVAL; |
2358 | goto error_param; |
2359 | } |
2360 | } |
2361 | |
2362 | vsi_id = qci->vsi_id; |
2363 | |
2364 | for (i = 0; i < qci->num_queue_pairs; i++) { |
2365 | qpi = &qci->qpair[i]; |
2366 | |
2367 | if (!vf->adq_enabled) { |
2368 | if (!i40e_vc_isvalid_queue_id(vf, vsi_id, |
2369 | qid: qpi->txq.queue_id)) { |
2370 | aq_ret = -EINVAL; |
2371 | goto error_param; |
2372 | } |
2373 | |
2374 | vsi_queue_id = qpi->txq.queue_id; |
2375 | |
2376 | if (qpi->txq.vsi_id != qci->vsi_id || |
2377 | qpi->rxq.vsi_id != qci->vsi_id || |
2378 | qpi->rxq.queue_id != vsi_queue_id) { |
2379 | aq_ret = -EINVAL; |
2380 | goto error_param; |
2381 | } |
2382 | } |
2383 | |
2384 | if (vf->adq_enabled) { |
2385 | if (idx >= ARRAY_SIZE(vf->ch)) { |
2386 | aq_ret = -ENODEV; |
2387 | goto error_param; |
2388 | } |
2389 | vsi_id = vf->ch[idx].vsi_id; |
2390 | } |
2391 | |
2392 | if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, |
2393 | info: &qpi->rxq) || |
2394 | i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, |
2395 | info: &qpi->txq)) { |
2396 | aq_ret = -EINVAL; |
2397 | goto error_param; |
2398 | } |
2399 | |
2400 | /* For ADq there can be up to 4 VSIs with max 4 queues each. |
2401 | * VF does not know about these additional VSIs and all |
2402 | * it cares is about its own queues. PF configures these queues |
2403 | * to its appropriate VSIs based on TC mapping |
2404 | */ |
2405 | if (vf->adq_enabled) { |
2406 | if (idx >= ARRAY_SIZE(vf->ch)) { |
2407 | aq_ret = -ENODEV; |
2408 | goto error_param; |
2409 | } |
2410 | if (j == (vf->ch[idx].num_qps - 1)) { |
2411 | idx++; |
2412 | j = 0; /* resetting the queue count */ |
2413 | vsi_queue_id = 0; |
2414 | } else { |
2415 | j++; |
2416 | vsi_queue_id++; |
2417 | } |
2418 | } |
2419 | } |
2420 | /* set vsi num_queue_pairs in use to num configured by VF */ |
2421 | if (!vf->adq_enabled) { |
2422 | pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = |
2423 | qci->num_queue_pairs; |
2424 | } else { |
2425 | for (i = 0; i < vf->num_tc; i++) { |
2426 | vsi = pf->vsi[vf->ch[i].vsi_idx]; |
2427 | vsi->num_queue_pairs = vf->ch[i].num_qps; |
2428 | |
2429 | if (i40e_update_adq_vsi_queues(vsi, vsi_offset: i)) { |
2430 | aq_ret = -EIO; |
2431 | goto error_param; |
2432 | } |
2433 | } |
2434 | } |
2435 | |
2436 | error_param: |
2437 | /* send the response to the VF */ |
2438 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_CONFIG_VSI_QUEUES, |
2439 | retval: aq_ret); |
2440 | } |
2441 | |
2442 | /** |
2443 | * i40e_validate_queue_map - check queue map is valid |
2444 | * @vf: the VF structure pointer |
2445 | * @vsi_id: vsi id |
2446 | * @queuemap: Tx or Rx queue map |
2447 | * |
2448 | * check if Tx or Rx queue map is valid |
2449 | **/ |
2450 | static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, |
2451 | unsigned long queuemap) |
2452 | { |
2453 | u16 vsi_queue_id, queue_id; |
2454 | |
2455 | for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { |
2456 | if (vf->adq_enabled) { |
2457 | vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; |
2458 | queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); |
2459 | } else { |
2460 | queue_id = vsi_queue_id; |
2461 | } |
2462 | |
2463 | if (!i40e_vc_isvalid_queue_id(vf, vsi_id, qid: queue_id)) |
2464 | return -EINVAL; |
2465 | } |
2466 | |
2467 | return 0; |
2468 | } |
2469 | |
2470 | /** |
2471 | * i40e_vc_config_irq_map_msg |
2472 | * @vf: pointer to the VF info |
2473 | * @msg: pointer to the msg buffer |
2474 | * |
2475 | * called from the VF to configure the irq to |
2476 | * queue map |
2477 | **/ |
2478 | static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) |
2479 | { |
2480 | struct virtchnl_irq_map_info *irqmap_info = |
2481 | (struct virtchnl_irq_map_info *)msg; |
2482 | struct virtchnl_vector_map *map; |
2483 | int aq_ret = 0; |
2484 | u16 vsi_id; |
2485 | int i; |
2486 | |
2487 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
2488 | aq_ret = -EINVAL; |
2489 | goto error_param; |
2490 | } |
2491 | |
2492 | if (irqmap_info->num_vectors > |
2493 | vf->pf->hw.func_caps.num_msix_vectors_vf) { |
2494 | aq_ret = -EINVAL; |
2495 | goto error_param; |
2496 | } |
2497 | |
2498 | for (i = 0; i < irqmap_info->num_vectors; i++) { |
2499 | map = &irqmap_info->vecmap[i]; |
2500 | /* validate msg params */ |
2501 | if (!i40e_vc_isvalid_vector_id(vf, vector_id: map->vector_id) || |
2502 | !i40e_vc_isvalid_vsi_id(vf, vsi_id: map->vsi_id)) { |
2503 | aq_ret = -EINVAL; |
2504 | goto error_param; |
2505 | } |
2506 | vsi_id = map->vsi_id; |
2507 | |
2508 | if (i40e_validate_queue_map(vf, vsi_id, queuemap: map->rxq_map)) { |
2509 | aq_ret = -EINVAL; |
2510 | goto error_param; |
2511 | } |
2512 | |
2513 | if (i40e_validate_queue_map(vf, vsi_id, queuemap: map->txq_map)) { |
2514 | aq_ret = -EINVAL; |
2515 | goto error_param; |
2516 | } |
2517 | |
2518 | i40e_config_irq_link_list(vf, vsi_id, vecmap: map); |
2519 | } |
2520 | error_param: |
2521 | /* send the response to the VF */ |
2522 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_CONFIG_IRQ_MAP, |
2523 | retval: aq_ret); |
2524 | } |
2525 | |
2526 | /** |
2527 | * i40e_ctrl_vf_tx_rings |
2528 | * @vsi: the SRIOV VSI being configured |
2529 | * @q_map: bit map of the queues to be enabled |
2530 | * @enable: start or stop the queue |
2531 | **/ |
2532 | static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, |
2533 | bool enable) |
2534 | { |
2535 | struct i40e_pf *pf = vsi->back; |
2536 | int ret = 0; |
2537 | u16 q_id; |
2538 | |
2539 | for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { |
2540 | ret = i40e_control_wait_tx_q(seid: vsi->seid, pf, |
2541 | pf_q: vsi->base_queue + q_id, |
2542 | is_xdp: false /*is xdp*/, enable); |
2543 | if (ret) |
2544 | break; |
2545 | } |
2546 | return ret; |
2547 | } |
2548 | |
2549 | /** |
2550 | * i40e_ctrl_vf_rx_rings |
2551 | * @vsi: the SRIOV VSI being configured |
2552 | * @q_map: bit map of the queues to be enabled |
2553 | * @enable: start or stop the queue |
2554 | **/ |
2555 | static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, |
2556 | bool enable) |
2557 | { |
2558 | struct i40e_pf *pf = vsi->back; |
2559 | int ret = 0; |
2560 | u16 q_id; |
2561 | |
2562 | for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { |
2563 | ret = i40e_control_wait_rx_q(pf, pf_q: vsi->base_queue + q_id, |
2564 | enable); |
2565 | if (ret) |
2566 | break; |
2567 | } |
2568 | return ret; |
2569 | } |
2570 | |
2571 | /** |
2572 | * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL |
2573 | * @vqs: virtchnl_queue_select structure containing bitmaps to validate |
2574 | * |
2575 | * Returns true if validation was successful, else false. |
2576 | */ |
2577 | static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) |
2578 | { |
2579 | if ((!vqs->rx_queues && !vqs->tx_queues) || |
2580 | vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) || |
2581 | vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES)) |
2582 | return false; |
2583 | |
2584 | return true; |
2585 | } |
2586 | |
2587 | /** |
2588 | * i40e_vc_enable_queues_msg |
2589 | * @vf: pointer to the VF info |
2590 | * @msg: pointer to the msg buffer |
2591 | * |
2592 | * called from the VF to enable all or specific queue(s) |
2593 | **/ |
2594 | static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) |
2595 | { |
2596 | struct virtchnl_queue_select *vqs = |
2597 | (struct virtchnl_queue_select *)msg; |
2598 | struct i40e_pf *pf = vf->pf; |
2599 | int aq_ret = 0; |
2600 | int i; |
2601 | |
2602 | if (vf->is_disabled_from_host) { |
2603 | aq_ret = -EPERM; |
2604 | dev_info(&pf->pdev->dev, |
2605 | "Admin has disabled VF %d, will not enable queues\n" , |
2606 | vf->vf_id); |
2607 | goto error_param; |
2608 | } |
2609 | |
2610 | if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { |
2611 | aq_ret = -EINVAL; |
2612 | goto error_param; |
2613 | } |
2614 | |
2615 | if (!i40e_vc_isvalid_vsi_id(vf, vsi_id: vqs->vsi_id)) { |
2616 | aq_ret = -EINVAL; |
2617 | goto error_param; |
2618 | } |
2619 | |
2620 | if (!i40e_vc_validate_vqs_bitmaps(vqs)) { |
2621 | aq_ret = -EINVAL; |
2622 | goto error_param; |
2623 | } |
2624 | |
2625 | /* Use the queue bit map sent by the VF */ |
2626 | if (i40e_ctrl_vf_rx_rings(vsi: pf->vsi[vf->lan_vsi_idx], q_map: vqs->rx_queues, |
2627 | enable: true)) { |
2628 | aq_ret = -EIO; |
2629 | goto error_param; |
2630 | } |
2631 | if (i40e_ctrl_vf_tx_rings(vsi: pf->vsi[vf->lan_vsi_idx], q_map: vqs->tx_queues, |
2632 | enable: true)) { |
2633 | aq_ret = -EIO; |
2634 | goto error_param; |
2635 | } |
2636 | |
2637 | /* need to start the rings for additional ADq VSI's as well */ |
2638 | if (vf->adq_enabled) { |
2639 | /* zero belongs to LAN VSI */ |
2640 | for (i = 1; i < vf->num_tc; i++) { |
2641 | if (i40e_vsi_start_rings(vsi: pf->vsi[vf->ch[i].vsi_idx])) |
2642 | aq_ret = -EIO; |
2643 | } |
2644 | } |
2645 | |
2646 | error_param: |
2647 | /* send the response to the VF */ |
2648 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_ENABLE_QUEUES, |
2649 | retval: aq_ret); |
2650 | } |
2651 | |
2652 | /** |
2653 | * i40e_vc_disable_queues_msg |
2654 | * @vf: pointer to the VF info |
2655 | * @msg: pointer to the msg buffer |
2656 | * |
2657 | * called from the VF to disable all or specific |
2658 | * queue(s) |
2659 | **/ |
2660 | static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) |
2661 | { |
2662 | struct virtchnl_queue_select *vqs = |
2663 | (struct virtchnl_queue_select *)msg; |
2664 | struct i40e_pf *pf = vf->pf; |
2665 | int aq_ret = 0; |
2666 | |
2667 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
2668 | aq_ret = -EINVAL; |
2669 | goto error_param; |
2670 | } |
2671 | |
2672 | if (!i40e_vc_isvalid_vsi_id(vf, vsi_id: vqs->vsi_id)) { |
2673 | aq_ret = -EINVAL; |
2674 | goto error_param; |
2675 | } |
2676 | |
2677 | if (!i40e_vc_validate_vqs_bitmaps(vqs)) { |
2678 | aq_ret = -EINVAL; |
2679 | goto error_param; |
2680 | } |
2681 | |
2682 | /* Use the queue bit map sent by the VF */ |
2683 | if (i40e_ctrl_vf_tx_rings(vsi: pf->vsi[vf->lan_vsi_idx], q_map: vqs->tx_queues, |
2684 | enable: false)) { |
2685 | aq_ret = -EIO; |
2686 | goto error_param; |
2687 | } |
2688 | if (i40e_ctrl_vf_rx_rings(vsi: pf->vsi[vf->lan_vsi_idx], q_map: vqs->rx_queues, |
2689 | enable: false)) { |
2690 | aq_ret = -EIO; |
2691 | goto error_param; |
2692 | } |
2693 | error_param: |
2694 | /* send the response to the VF */ |
2695 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_DISABLE_QUEUES, |
2696 | retval: aq_ret); |
2697 | } |
2698 | |
2699 | /** |
2700 | * i40e_check_enough_queue - find big enough queue number |
2701 | * @vf: pointer to the VF info |
2702 | * @needed: the number of items needed |
2703 | * |
2704 | * Returns the base item index of the queue, or negative for error |
2705 | **/ |
2706 | static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed) |
2707 | { |
2708 | unsigned int i, cur_queues, more, pool_size; |
2709 | struct i40e_lump_tracking *pile; |
2710 | struct i40e_pf *pf = vf->pf; |
2711 | struct i40e_vsi *vsi; |
2712 | |
2713 | vsi = pf->vsi[vf->lan_vsi_idx]; |
2714 | cur_queues = vsi->alloc_queue_pairs; |
2715 | |
2716 | /* if current allocated queues are enough for need */ |
2717 | if (cur_queues >= needed) |
2718 | return vsi->base_queue; |
2719 | |
2720 | pile = pf->qp_pile; |
2721 | if (cur_queues > 0) { |
2722 | /* if the allocated queues are not zero |
2723 | * just check if there are enough queues for more |
2724 | * behind the allocated queues. |
2725 | */ |
2726 | more = needed - cur_queues; |
2727 | for (i = vsi->base_queue + cur_queues; |
2728 | i < pile->num_entries; i++) { |
2729 | if (pile->list[i] & I40E_PILE_VALID_BIT) |
2730 | break; |
2731 | |
2732 | if (more-- == 1) |
2733 | /* there is enough */ |
2734 | return vsi->base_queue; |
2735 | } |
2736 | } |
2737 | |
2738 | pool_size = 0; |
2739 | for (i = 0; i < pile->num_entries; i++) { |
2740 | if (pile->list[i] & I40E_PILE_VALID_BIT) { |
2741 | pool_size = 0; |
2742 | continue; |
2743 | } |
2744 | if (needed <= ++pool_size) |
2745 | /* there is enough */ |
2746 | return i; |
2747 | } |
2748 | |
2749 | return -ENOMEM; |
2750 | } |
2751 | |
2752 | /** |
2753 | * i40e_vc_request_queues_msg |
2754 | * @vf: pointer to the VF info |
2755 | * @msg: pointer to the msg buffer |
2756 | * |
2757 | * VFs get a default number of queues but can use this message to request a |
2758 | * different number. If the request is successful, PF will reset the VF and |
2759 | * return 0. If unsuccessful, PF will send message informing VF of number of |
2760 | * available queues and return result of sending VF a message. |
2761 | **/ |
2762 | static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) |
2763 | { |
2764 | struct virtchnl_vf_res_request *vfres = |
2765 | (struct virtchnl_vf_res_request *)msg; |
2766 | u16 req_pairs = vfres->num_queue_pairs; |
2767 | u8 cur_pairs = vf->num_queue_pairs; |
2768 | struct i40e_pf *pf = vf->pf; |
2769 | |
2770 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) |
2771 | return -EINVAL; |
2772 | |
2773 | if (req_pairs > I40E_MAX_VF_QUEUES) { |
2774 | dev_err(&pf->pdev->dev, |
2775 | "VF %d tried to request more than %d queues.\n" , |
2776 | vf->vf_id, |
2777 | I40E_MAX_VF_QUEUES); |
2778 | vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; |
2779 | } else if (req_pairs - cur_pairs > pf->queues_left) { |
2780 | dev_warn(&pf->pdev->dev, |
2781 | "VF %d requested %d more queues, but only %d left.\n" , |
2782 | vf->vf_id, |
2783 | req_pairs - cur_pairs, |
2784 | pf->queues_left); |
2785 | vfres->num_queue_pairs = pf->queues_left + cur_pairs; |
2786 | } else if (i40e_check_enough_queue(vf, needed: req_pairs) < 0) { |
2787 | dev_warn(&pf->pdev->dev, |
2788 | "VF %d requested %d more queues, but there is not enough for it.\n" , |
2789 | vf->vf_id, |
2790 | req_pairs - cur_pairs); |
2791 | vfres->num_queue_pairs = cur_pairs; |
2792 | } else { |
2793 | /* successful request */ |
2794 | vf->num_req_queues = req_pairs; |
2795 | i40e_vc_reset_vf(vf, notify_vf: true); |
2796 | return 0; |
2797 | } |
2798 | |
2799 | return i40e_vc_send_msg_to_vf(vf, v_opcode: VIRTCHNL_OP_REQUEST_QUEUES, v_retval: 0, |
2800 | msg: (u8 *)vfres, msglen: sizeof(*vfres)); |
2801 | } |
2802 | |
2803 | /** |
2804 | * i40e_vc_get_stats_msg |
2805 | * @vf: pointer to the VF info |
2806 | * @msg: pointer to the msg buffer |
2807 | * |
2808 | * called from the VF to get vsi stats |
2809 | **/ |
2810 | static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) |
2811 | { |
2812 | struct virtchnl_queue_select *vqs = |
2813 | (struct virtchnl_queue_select *)msg; |
2814 | struct i40e_pf *pf = vf->pf; |
2815 | struct i40e_eth_stats stats; |
2816 | int aq_ret = 0; |
2817 | struct i40e_vsi *vsi; |
2818 | |
2819 | memset(&stats, 0, sizeof(struct i40e_eth_stats)); |
2820 | |
2821 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
2822 | aq_ret = -EINVAL; |
2823 | goto error_param; |
2824 | } |
2825 | |
2826 | if (!i40e_vc_isvalid_vsi_id(vf, vsi_id: vqs->vsi_id)) { |
2827 | aq_ret = -EINVAL; |
2828 | goto error_param; |
2829 | } |
2830 | |
2831 | vsi = pf->vsi[vf->lan_vsi_idx]; |
2832 | if (!vsi) { |
2833 | aq_ret = -EINVAL; |
2834 | goto error_param; |
2835 | } |
2836 | i40e_update_eth_stats(vsi); |
2837 | stats = vsi->eth_stats; |
2838 | |
2839 | error_param: |
2840 | /* send the response back to the VF */ |
2841 | return i40e_vc_send_msg_to_vf(vf, v_opcode: VIRTCHNL_OP_GET_STATS, v_retval: aq_ret, |
2842 | msg: (u8 *)&stats, msglen: sizeof(stats)); |
2843 | } |
2844 | |
2845 | /** |
2846 | * i40e_can_vf_change_mac |
2847 | * @vf: pointer to the VF info |
2848 | * |
2849 | * Return true if the VF is allowed to change its MAC filters, false otherwise |
2850 | */ |
2851 | static bool i40e_can_vf_change_mac(struct i40e_vf *vf) |
2852 | { |
2853 | /* If the VF MAC address has been set administratively (via the |
2854 | * ndo_set_vf_mac command), then deny permission to the VF to |
2855 | * add/delete unicast MAC addresses, unless the VF is trusted |
2856 | */ |
2857 | if (vf->pf_set_mac && !vf->trusted) |
2858 | return false; |
2859 | |
2860 | return true; |
2861 | } |
2862 | |
2863 | #define I40E_MAX_MACVLAN_PER_HW 3072 |
2864 | #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ |
2865 | (num_ports)) |
2866 | /* If the VF is not trusted restrict the number of MAC/VLAN it can program |
2867 | * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast |
2868 | */ |
2869 | #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) |
2870 | #define I40E_VC_MAX_VLAN_PER_VF 16 |
2871 | |
2872 | #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \ |
2873 | ({ typeof(vf_num) vf_num_ = (vf_num); \ |
2874 | typeof(num_ports) num_ports_ = (num_ports); \ |
2875 | ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \ |
2876 | I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \ |
2877 | I40E_VC_MAX_MAC_ADDR_PER_VF; }) |
2878 | /** |
2879 | * i40e_check_vf_permission |
2880 | * @vf: pointer to the VF info |
2881 | * @al: MAC address list from virtchnl |
2882 | * |
2883 | * Check that the given list of MAC addresses is allowed. Will return -EPERM |
2884 | * if any address in the list is not valid. Checks the following conditions: |
2885 | * |
2886 | * 1) broadcast and zero addresses are never valid |
2887 | * 2) unicast addresses are not allowed if the VMM has administratively set |
2888 | * the VF MAC address, unless the VF is marked as privileged. |
2889 | * 3) There is enough space to add all the addresses. |
2890 | * |
2891 | * Note that to guarantee consistency, it is expected this function be called |
2892 | * while holding the mac_filter_hash_lock, as otherwise the current number of |
2893 | * addresses might not be accurate. |
2894 | **/ |
2895 | static inline int i40e_check_vf_permission(struct i40e_vf *vf, |
2896 | struct virtchnl_ether_addr_list *al) |
2897 | { |
2898 | struct i40e_pf *pf = vf->pf; |
2899 | struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; |
2900 | struct i40e_hw *hw = &pf->hw; |
2901 | int mac2add_cnt = 0; |
2902 | int i; |
2903 | |
2904 | for (i = 0; i < al->num_elements; i++) { |
2905 | struct i40e_mac_filter *f; |
2906 | u8 *addr = al->list[i].addr; |
2907 | |
2908 | if (is_broadcast_ether_addr(addr) || |
2909 | is_zero_ether_addr(addr)) { |
2910 | dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n" , |
2911 | addr); |
2912 | return -EINVAL; |
2913 | } |
2914 | |
2915 | /* If the host VMM administrator has set the VF MAC address |
2916 | * administratively via the ndo_set_vf_mac command then deny |
2917 | * permission to the VF to add or delete unicast MAC addresses. |
2918 | * Unless the VF is privileged and then it can do whatever. |
2919 | * The VF may request to set the MAC address filter already |
2920 | * assigned to it so do not return an error in that case. |
2921 | */ |
2922 | if (!i40e_can_vf_change_mac(vf) && |
2923 | !is_multicast_ether_addr(addr) && |
2924 | !ether_addr_equal(addr1: addr, addr2: vf->default_lan_addr.addr)) { |
2925 | dev_err(&pf->pdev->dev, |
2926 | "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n" ); |
2927 | return -EPERM; |
2928 | } |
2929 | |
2930 | /*count filters that really will be added*/ |
2931 | f = i40e_find_mac(vsi, macaddr: addr); |
2932 | if (!f) |
2933 | ++mac2add_cnt; |
2934 | } |
2935 | |
2936 | /* If this VF is not privileged, then we can't add more than a limited |
2937 | * number of addresses. Check to make sure that the additions do not |
2938 | * push us over the limit. |
2939 | */ |
2940 | if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { |
2941 | if ((i40e_count_filters(vsi) + mac2add_cnt) > |
2942 | I40E_VC_MAX_MAC_ADDR_PER_VF) { |
2943 | dev_err(&pf->pdev->dev, |
2944 | "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n" ); |
2945 | return -EPERM; |
2946 | } |
2947 | /* If this VF is trusted, it can use more resources than untrusted. |
2948 | * However to ensure that every trusted VF has appropriate number of |
2949 | * resources, divide whole pool of resources per port and then across |
2950 | * all VFs. |
2951 | */ |
2952 | } else { |
2953 | if ((i40e_count_filters(vsi) + mac2add_cnt) > |
2954 | I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, |
2955 | hw->num_ports)) { |
2956 | dev_err(&pf->pdev->dev, |
2957 | "Cannot add more MAC addresses, trusted VF exhausted it's resources\n" ); |
2958 | return -EPERM; |
2959 | } |
2960 | } |
2961 | return 0; |
2962 | } |
2963 | |
2964 | /** |
2965 | * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr |
2966 | * @vc_ether_addr: used to extract the type |
2967 | **/ |
2968 | static u8 |
2969 | i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) |
2970 | { |
2971 | return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK; |
2972 | } |
2973 | |
2974 | /** |
2975 | * i40e_is_vc_addr_legacy |
2976 | * @vc_ether_addr: VIRTCHNL structure that contains MAC and type |
2977 | * |
2978 | * check if the MAC address is from an older VF |
2979 | **/ |
2980 | static bool |
2981 | i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr) |
2982 | { |
2983 | return i40e_vc_ether_addr_type(vc_ether_addr) == |
2984 | VIRTCHNL_ETHER_ADDR_LEGACY; |
2985 | } |
2986 | |
2987 | /** |
2988 | * i40e_is_vc_addr_primary |
2989 | * @vc_ether_addr: VIRTCHNL structure that contains MAC and type |
2990 | * |
2991 | * check if the MAC address is the VF's primary MAC |
2992 | * This function should only be called when the MAC address in |
2993 | * virtchnl_ether_addr is a valid unicast MAC |
2994 | **/ |
2995 | static bool |
2996 | i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr) |
2997 | { |
2998 | return i40e_vc_ether_addr_type(vc_ether_addr) == |
2999 | VIRTCHNL_ETHER_ADDR_PRIMARY; |
3000 | } |
3001 | |
3002 | /** |
3003 | * i40e_update_vf_mac_addr |
3004 | * @vf: VF to update |
3005 | * @vc_ether_addr: structure from VIRTCHNL with MAC to add |
3006 | * |
3007 | * update the VF's cached hardware MAC if allowed |
3008 | **/ |
3009 | static void |
3010 | i40e_update_vf_mac_addr(struct i40e_vf *vf, |
3011 | struct virtchnl_ether_addr *vc_ether_addr) |
3012 | { |
3013 | u8 *mac_addr = vc_ether_addr->addr; |
3014 | |
3015 | if (!is_valid_ether_addr(addr: mac_addr)) |
3016 | return; |
3017 | |
3018 | /* If request to add MAC filter is a primary request update its default |
3019 | * MAC address with the requested one. If it is a legacy request then |
3020 | * check if current default is empty if so update the default MAC |
3021 | */ |
3022 | if (i40e_is_vc_addr_primary(vc_ether_addr)) { |
3023 | ether_addr_copy(dst: vf->default_lan_addr.addr, src: mac_addr); |
3024 | } else if (i40e_is_vc_addr_legacy(vc_ether_addr)) { |
3025 | if (is_zero_ether_addr(addr: vf->default_lan_addr.addr)) |
3026 | ether_addr_copy(dst: vf->default_lan_addr.addr, src: mac_addr); |
3027 | } |
3028 | } |
3029 | |
3030 | /** |
3031 | * i40e_vc_add_mac_addr_msg |
3032 | * @vf: pointer to the VF info |
3033 | * @msg: pointer to the msg buffer |
3034 | * |
3035 | * add guest mac address filter |
3036 | **/ |
3037 | static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) |
3038 | { |
3039 | struct virtchnl_ether_addr_list *al = |
3040 | (struct virtchnl_ether_addr_list *)msg; |
3041 | struct i40e_pf *pf = vf->pf; |
3042 | struct i40e_vsi *vsi = NULL; |
3043 | int ret = 0; |
3044 | int i; |
3045 | |
3046 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE) || |
3047 | !i40e_vc_isvalid_vsi_id(vf, vsi_id: al->vsi_id)) { |
3048 | ret = -EINVAL; |
3049 | goto error_param; |
3050 | } |
3051 | |
3052 | vsi = pf->vsi[vf->lan_vsi_idx]; |
3053 | |
3054 | /* Lock once, because all function inside for loop accesses VSI's |
3055 | * MAC filter list which needs to be protected using same lock. |
3056 | */ |
3057 | spin_lock_bh(lock: &vsi->mac_filter_hash_lock); |
3058 | |
3059 | ret = i40e_check_vf_permission(vf, al); |
3060 | if (ret) { |
3061 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
3062 | goto error_param; |
3063 | } |
3064 | |
3065 | /* add new addresses to the list */ |
3066 | for (i = 0; i < al->num_elements; i++) { |
3067 | struct i40e_mac_filter *f; |
3068 | |
3069 | f = i40e_find_mac(vsi, macaddr: al->list[i].addr); |
3070 | if (!f) { |
3071 | f = i40e_add_mac_filter(vsi, macaddr: al->list[i].addr); |
3072 | |
3073 | if (!f) { |
3074 | dev_err(&pf->pdev->dev, |
3075 | "Unable to add MAC filter %pM for VF %d\n" , |
3076 | al->list[i].addr, vf->vf_id); |
3077 | ret = -EINVAL; |
3078 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
3079 | goto error_param; |
3080 | } |
3081 | } |
3082 | i40e_update_vf_mac_addr(vf, vc_ether_addr: &al->list[i]); |
3083 | } |
3084 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
3085 | |
3086 | /* program the updated filter list */ |
3087 | ret = i40e_sync_vsi_filters(vsi); |
3088 | if (ret) |
3089 | dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n" , |
3090 | vf->vf_id, ret); |
3091 | |
3092 | error_param: |
3093 | /* send the response to the VF */ |
3094 | return i40e_vc_send_msg_to_vf(vf, v_opcode: VIRTCHNL_OP_ADD_ETH_ADDR, |
3095 | v_retval: ret, NULL, msglen: 0); |
3096 | } |
3097 | |
3098 | /** |
3099 | * i40e_vc_del_mac_addr_msg |
3100 | * @vf: pointer to the VF info |
3101 | * @msg: pointer to the msg buffer |
3102 | * |
3103 | * remove guest mac address filter |
3104 | **/ |
3105 | static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) |
3106 | { |
3107 | struct virtchnl_ether_addr_list *al = |
3108 | (struct virtchnl_ether_addr_list *)msg; |
3109 | bool was_unimac_deleted = false; |
3110 | struct i40e_pf *pf = vf->pf; |
3111 | struct i40e_vsi *vsi = NULL; |
3112 | int ret = 0; |
3113 | int i; |
3114 | |
3115 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE) || |
3116 | !i40e_vc_isvalid_vsi_id(vf, vsi_id: al->vsi_id)) { |
3117 | ret = -EINVAL; |
3118 | goto error_param; |
3119 | } |
3120 | |
3121 | for (i = 0; i < al->num_elements; i++) { |
3122 | if (is_broadcast_ether_addr(addr: al->list[i].addr) || |
3123 | is_zero_ether_addr(addr: al->list[i].addr)) { |
3124 | dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n" , |
3125 | al->list[i].addr, vf->vf_id); |
3126 | ret = -EINVAL; |
3127 | goto error_param; |
3128 | } |
3129 | } |
3130 | vsi = pf->vsi[vf->lan_vsi_idx]; |
3131 | |
3132 | spin_lock_bh(lock: &vsi->mac_filter_hash_lock); |
3133 | /* delete addresses from the list */ |
3134 | for (i = 0; i < al->num_elements; i++) { |
3135 | const u8 *addr = al->list[i].addr; |
3136 | |
3137 | /* Allow to delete VF primary MAC only if it was not set |
3138 | * administratively by PF or if VF is trusted. |
3139 | */ |
3140 | if (ether_addr_equal(addr1: addr, addr2: vf->default_lan_addr.addr)) { |
3141 | if (i40e_can_vf_change_mac(vf)) |
3142 | was_unimac_deleted = true; |
3143 | else |
3144 | continue; |
3145 | } |
3146 | |
3147 | if (i40e_del_mac_filter(vsi, macaddr: al->list[i].addr)) { |
3148 | ret = -EINVAL; |
3149 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
3150 | goto error_param; |
3151 | } |
3152 | } |
3153 | |
3154 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
3155 | |
3156 | if (was_unimac_deleted) |
3157 | eth_zero_addr(addr: vf->default_lan_addr.addr); |
3158 | |
3159 | /* program the updated filter list */ |
3160 | ret = i40e_sync_vsi_filters(vsi); |
3161 | if (ret) |
3162 | dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n" , |
3163 | vf->vf_id, ret); |
3164 | |
3165 | if (vf->trusted && was_unimac_deleted) { |
3166 | struct i40e_mac_filter *f; |
3167 | struct hlist_node *h; |
3168 | u8 *macaddr = NULL; |
3169 | int bkt; |
3170 | |
3171 | /* set last unicast mac address as default */ |
3172 | spin_lock_bh(lock: &vsi->mac_filter_hash_lock); |
3173 | hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { |
3174 | if (is_valid_ether_addr(addr: f->macaddr)) |
3175 | macaddr = f->macaddr; |
3176 | } |
3177 | if (macaddr) |
3178 | ether_addr_copy(dst: vf->default_lan_addr.addr, src: macaddr); |
3179 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
3180 | } |
3181 | error_param: |
3182 | /* send the response to the VF */ |
3183 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_DEL_ETH_ADDR, retval: ret); |
3184 | } |
3185 | |
3186 | /** |
3187 | * i40e_vc_add_vlan_msg |
3188 | * @vf: pointer to the VF info |
3189 | * @msg: pointer to the msg buffer |
3190 | * |
3191 | * program guest vlan id |
3192 | **/ |
3193 | static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg) |
3194 | { |
3195 | struct virtchnl_vlan_filter_list *vfl = |
3196 | (struct virtchnl_vlan_filter_list *)msg; |
3197 | struct i40e_pf *pf = vf->pf; |
3198 | struct i40e_vsi *vsi = NULL; |
3199 | int aq_ret = 0; |
3200 | int i; |
3201 | |
3202 | if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && |
3203 | !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { |
3204 | dev_err(&pf->pdev->dev, |
3205 | "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n" ); |
3206 | goto error_param; |
3207 | } |
3208 | if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || |
3209 | !i40e_vc_isvalid_vsi_id(vf, vsi_id: vfl->vsi_id)) { |
3210 | aq_ret = -EINVAL; |
3211 | goto error_param; |
3212 | } |
3213 | |
3214 | for (i = 0; i < vfl->num_elements; i++) { |
3215 | if (vfl->vlan_id[i] > I40E_MAX_VLANID) { |
3216 | aq_ret = -EINVAL; |
3217 | dev_err(&pf->pdev->dev, |
3218 | "invalid VF VLAN id %d\n" , vfl->vlan_id[i]); |
3219 | goto error_param; |
3220 | } |
3221 | } |
3222 | vsi = pf->vsi[vf->lan_vsi_idx]; |
3223 | if (vsi->info.pvid) { |
3224 | aq_ret = -EINVAL; |
3225 | goto error_param; |
3226 | } |
3227 | |
3228 | i40e_vlan_stripping_enable(vsi); |
3229 | for (i = 0; i < vfl->num_elements; i++) { |
3230 | /* add new VLAN filter */ |
3231 | int ret = i40e_vsi_add_vlan(vsi, vid: vfl->vlan_id[i]); |
3232 | if (!ret) |
3233 | vf->num_vlan++; |
3234 | |
3235 | if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) |
3236 | i40e_aq_set_vsi_uc_promisc_on_vlan(hw: &pf->hw, seid: vsi->seid, |
3237 | enable: true, |
3238 | vid: vfl->vlan_id[i], |
3239 | NULL); |
3240 | if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) |
3241 | i40e_aq_set_vsi_mc_promisc_on_vlan(hw: &pf->hw, seid: vsi->seid, |
3242 | enable: true, |
3243 | vid: vfl->vlan_id[i], |
3244 | NULL); |
3245 | |
3246 | if (ret) |
3247 | dev_err(&pf->pdev->dev, |
3248 | "Unable to add VLAN filter %d for VF %d, error %d\n" , |
3249 | vfl->vlan_id[i], vf->vf_id, ret); |
3250 | } |
3251 | |
3252 | error_param: |
3253 | /* send the response to the VF */ |
3254 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_ADD_VLAN, retval: aq_ret); |
3255 | } |
3256 | |
3257 | /** |
3258 | * i40e_vc_remove_vlan_msg |
3259 | * @vf: pointer to the VF info |
3260 | * @msg: pointer to the msg buffer |
3261 | * |
3262 | * remove programmed guest vlan id |
3263 | **/ |
3264 | static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) |
3265 | { |
3266 | struct virtchnl_vlan_filter_list *vfl = |
3267 | (struct virtchnl_vlan_filter_list *)msg; |
3268 | struct i40e_pf *pf = vf->pf; |
3269 | struct i40e_vsi *vsi = NULL; |
3270 | int aq_ret = 0; |
3271 | int i; |
3272 | |
3273 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE) || |
3274 | !i40e_vc_isvalid_vsi_id(vf, vsi_id: vfl->vsi_id)) { |
3275 | aq_ret = -EINVAL; |
3276 | goto error_param; |
3277 | } |
3278 | |
3279 | for (i = 0; i < vfl->num_elements; i++) { |
3280 | if (vfl->vlan_id[i] > I40E_MAX_VLANID) { |
3281 | aq_ret = -EINVAL; |
3282 | goto error_param; |
3283 | } |
3284 | } |
3285 | |
3286 | vsi = pf->vsi[vf->lan_vsi_idx]; |
3287 | if (vsi->info.pvid) { |
3288 | if (vfl->num_elements > 1 || vfl->vlan_id[0]) |
3289 | aq_ret = -EINVAL; |
3290 | goto error_param; |
3291 | } |
3292 | |
3293 | for (i = 0; i < vfl->num_elements; i++) { |
3294 | i40e_vsi_kill_vlan(vsi, vid: vfl->vlan_id[i]); |
3295 | vf->num_vlan--; |
3296 | |
3297 | if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) |
3298 | i40e_aq_set_vsi_uc_promisc_on_vlan(hw: &pf->hw, seid: vsi->seid, |
3299 | enable: false, |
3300 | vid: vfl->vlan_id[i], |
3301 | NULL); |
3302 | if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) |
3303 | i40e_aq_set_vsi_mc_promisc_on_vlan(hw: &pf->hw, seid: vsi->seid, |
3304 | enable: false, |
3305 | vid: vfl->vlan_id[i], |
3306 | NULL); |
3307 | } |
3308 | |
3309 | error_param: |
3310 | /* send the response to the VF */ |
3311 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_DEL_VLAN, retval: aq_ret); |
3312 | } |
3313 | |
3314 | /** |
3315 | * i40e_vc_rdma_msg |
3316 | * @vf: pointer to the VF info |
3317 | * @msg: pointer to the msg buffer |
3318 | * @msglen: msg length |
3319 | * |
3320 | * called from the VF for the iwarp msgs |
3321 | **/ |
3322 | static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) |
3323 | { |
3324 | struct i40e_pf *pf = vf->pf; |
3325 | int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; |
3326 | int aq_ret = 0; |
3327 | |
3328 | if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || |
3329 | !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { |
3330 | aq_ret = -EINVAL; |
3331 | goto error_param; |
3332 | } |
3333 | |
3334 | i40e_notify_client_of_vf_msg(vsi: pf->vsi[pf->lan_vsi], vf_id: abs_vf_id, |
3335 | msg, len: msglen); |
3336 | |
3337 | error_param: |
3338 | /* send the response to the VF */ |
3339 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_RDMA, |
3340 | retval: aq_ret); |
3341 | } |
3342 | |
3343 | /** |
3344 | * i40e_vc_rdma_qvmap_msg |
3345 | * @vf: pointer to the VF info |
3346 | * @msg: pointer to the msg buffer |
3347 | * @config: config qvmap or release it |
3348 | * |
3349 | * called from the VF for the iwarp msgs |
3350 | **/ |
3351 | static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config) |
3352 | { |
3353 | struct virtchnl_rdma_qvlist_info *qvlist_info = |
3354 | (struct virtchnl_rdma_qvlist_info *)msg; |
3355 | int aq_ret = 0; |
3356 | |
3357 | if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || |
3358 | !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { |
3359 | aq_ret = -EINVAL; |
3360 | goto error_param; |
3361 | } |
3362 | |
3363 | if (config) { |
3364 | if (i40e_config_rdma_qvlist(vf, qvlist_info)) |
3365 | aq_ret = -EINVAL; |
3366 | } else { |
3367 | i40e_release_rdma_qvlist(vf); |
3368 | } |
3369 | |
3370 | error_param: |
3371 | /* send the response to the VF */ |
3372 | return i40e_vc_send_resp_to_vf(vf, |
3373 | opcode: config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP : |
3374 | VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP, |
3375 | retval: aq_ret); |
3376 | } |
3377 | |
3378 | /** |
3379 | * i40e_vc_config_rss_key |
3380 | * @vf: pointer to the VF info |
3381 | * @msg: pointer to the msg buffer |
3382 | * |
3383 | * Configure the VF's RSS key |
3384 | **/ |
3385 | static int (struct i40e_vf *vf, u8 *msg) |
3386 | { |
3387 | struct virtchnl_rss_key *vrk = |
3388 | (struct virtchnl_rss_key *)msg; |
3389 | struct i40e_pf *pf = vf->pf; |
3390 | struct i40e_vsi *vsi = NULL; |
3391 | int aq_ret = 0; |
3392 | |
3393 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE) || |
3394 | !i40e_vc_isvalid_vsi_id(vf, vsi_id: vrk->vsi_id) || |
3395 | vrk->key_len != I40E_HKEY_ARRAY_SIZE) { |
3396 | aq_ret = -EINVAL; |
3397 | goto err; |
3398 | } |
3399 | |
3400 | vsi = pf->vsi[vf->lan_vsi_idx]; |
3401 | aq_ret = i40e_config_rss(vsi, seed: vrk->key, NULL, lut_size: 0); |
3402 | err: |
3403 | /* send the response to the VF */ |
3404 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_CONFIG_RSS_KEY, |
3405 | retval: aq_ret); |
3406 | } |
3407 | |
3408 | /** |
3409 | * i40e_vc_config_rss_lut |
3410 | * @vf: pointer to the VF info |
3411 | * @msg: pointer to the msg buffer |
3412 | * |
3413 | * Configure the VF's RSS LUT |
3414 | **/ |
3415 | static int (struct i40e_vf *vf, u8 *msg) |
3416 | { |
3417 | struct virtchnl_rss_lut *vrl = |
3418 | (struct virtchnl_rss_lut *)msg; |
3419 | struct i40e_pf *pf = vf->pf; |
3420 | struct i40e_vsi *vsi = NULL; |
3421 | int aq_ret = 0; |
3422 | u16 i; |
3423 | |
3424 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE) || |
3425 | !i40e_vc_isvalid_vsi_id(vf, vsi_id: vrl->vsi_id) || |
3426 | vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { |
3427 | aq_ret = -EINVAL; |
3428 | goto err; |
3429 | } |
3430 | |
3431 | for (i = 0; i < vrl->lut_entries; i++) |
3432 | if (vrl->lut[i] >= vf->num_queue_pairs) { |
3433 | aq_ret = -EINVAL; |
3434 | goto err; |
3435 | } |
3436 | |
3437 | vsi = pf->vsi[vf->lan_vsi_idx]; |
3438 | aq_ret = i40e_config_rss(vsi, NULL, lut: vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); |
3439 | /* send the response to the VF */ |
3440 | err: |
3441 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_CONFIG_RSS_LUT, |
3442 | retval: aq_ret); |
3443 | } |
3444 | |
3445 | /** |
3446 | * i40e_vc_get_rss_hena |
3447 | * @vf: pointer to the VF info |
3448 | * @msg: pointer to the msg buffer |
3449 | * |
3450 | * Return the RSS HENA bits allowed by the hardware |
3451 | **/ |
3452 | static int (struct i40e_vf *vf, u8 *msg) |
3453 | { |
3454 | struct virtchnl_rss_hena *vrh = NULL; |
3455 | struct i40e_pf *pf = vf->pf; |
3456 | int aq_ret = 0; |
3457 | int len = 0; |
3458 | |
3459 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
3460 | aq_ret = -EINVAL; |
3461 | goto err; |
3462 | } |
3463 | len = sizeof(struct virtchnl_rss_hena); |
3464 | |
3465 | vrh = kzalloc(size: len, GFP_KERNEL); |
3466 | if (!vrh) { |
3467 | aq_ret = -ENOMEM; |
3468 | len = 0; |
3469 | goto err; |
3470 | } |
3471 | vrh->hena = i40e_pf_get_default_rss_hena(pf); |
3472 | err: |
3473 | /* send the response back to the VF */ |
3474 | aq_ret = i40e_vc_send_msg_to_vf(vf, v_opcode: VIRTCHNL_OP_GET_RSS_HENA_CAPS, |
3475 | v_retval: aq_ret, msg: (u8 *)vrh, msglen: len); |
3476 | kfree(objp: vrh); |
3477 | return aq_ret; |
3478 | } |
3479 | |
3480 | /** |
3481 | * i40e_vc_set_rss_hena |
3482 | * @vf: pointer to the VF info |
3483 | * @msg: pointer to the msg buffer |
3484 | * |
3485 | * Set the RSS HENA bits for the VF |
3486 | **/ |
3487 | static int (struct i40e_vf *vf, u8 *msg) |
3488 | { |
3489 | struct virtchnl_rss_hena *vrh = |
3490 | (struct virtchnl_rss_hena *)msg; |
3491 | struct i40e_pf *pf = vf->pf; |
3492 | struct i40e_hw *hw = &pf->hw; |
3493 | int aq_ret = 0; |
3494 | |
3495 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
3496 | aq_ret = -EINVAL; |
3497 | goto err; |
3498 | } |
3499 | i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), reg_val: (u32)vrh->hena); |
3500 | i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), |
3501 | reg_val: (u32)(vrh->hena >> 32)); |
3502 | |
3503 | /* send the response to the VF */ |
3504 | err: |
3505 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_SET_RSS_HENA, retval: aq_ret); |
3506 | } |
3507 | |
3508 | /** |
3509 | * i40e_vc_enable_vlan_stripping |
3510 | * @vf: pointer to the VF info |
3511 | * @msg: pointer to the msg buffer |
3512 | * |
3513 | * Enable vlan header stripping for the VF |
3514 | **/ |
3515 | static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) |
3516 | { |
3517 | struct i40e_vsi *vsi; |
3518 | int aq_ret = 0; |
3519 | |
3520 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
3521 | aq_ret = -EINVAL; |
3522 | goto err; |
3523 | } |
3524 | |
3525 | vsi = vf->pf->vsi[vf->lan_vsi_idx]; |
3526 | i40e_vlan_stripping_enable(vsi); |
3527 | |
3528 | /* send the response to the VF */ |
3529 | err: |
3530 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, |
3531 | retval: aq_ret); |
3532 | } |
3533 | |
3534 | /** |
3535 | * i40e_vc_disable_vlan_stripping |
3536 | * @vf: pointer to the VF info |
3537 | * @msg: pointer to the msg buffer |
3538 | * |
3539 | * Disable vlan header stripping for the VF |
3540 | **/ |
3541 | static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) |
3542 | { |
3543 | struct i40e_vsi *vsi; |
3544 | int aq_ret = 0; |
3545 | |
3546 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
3547 | aq_ret = -EINVAL; |
3548 | goto err; |
3549 | } |
3550 | |
3551 | vsi = vf->pf->vsi[vf->lan_vsi_idx]; |
3552 | i40e_vlan_stripping_disable(vsi); |
3553 | |
3554 | /* send the response to the VF */ |
3555 | err: |
3556 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, |
3557 | retval: aq_ret); |
3558 | } |
3559 | |
3560 | /** |
3561 | * i40e_validate_cloud_filter |
3562 | * @vf: pointer to VF structure |
3563 | * @tc_filter: pointer to filter requested |
3564 | * |
3565 | * This function validates cloud filter programmed as TC filter for ADq |
3566 | **/ |
3567 | static int i40e_validate_cloud_filter(struct i40e_vf *vf, |
3568 | struct virtchnl_filter *tc_filter) |
3569 | { |
3570 | struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec; |
3571 | struct virtchnl_l4_spec data = tc_filter->data.tcp_spec; |
3572 | struct i40e_pf *pf = vf->pf; |
3573 | struct i40e_vsi *vsi = NULL; |
3574 | struct i40e_mac_filter *f; |
3575 | struct hlist_node *h; |
3576 | bool found = false; |
3577 | int bkt; |
3578 | |
3579 | if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) { |
3580 | dev_info(&pf->pdev->dev, |
3581 | "VF %d: ADQ doesn't support this action (%d)\n" , |
3582 | vf->vf_id, tc_filter->action); |
3583 | goto err; |
3584 | } |
3585 | |
3586 | /* action_meta is TC number here to which the filter is applied */ |
3587 | if (!tc_filter->action_meta || |
3588 | tc_filter->action_meta > vf->num_tc) { |
3589 | dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n" , |
3590 | vf->vf_id, tc_filter->action_meta); |
3591 | goto err; |
3592 | } |
3593 | |
3594 | /* Check filter if it's programmed for advanced mode or basic mode. |
3595 | * There are two ADq modes (for VF only), |
3596 | * 1. Basic mode: intended to allow as many filter options as possible |
3597 | * to be added to a VF in Non-trusted mode. Main goal is |
3598 | * to add filters to its own MAC and VLAN id. |
3599 | * 2. Advanced mode: is for allowing filters to be applied other than |
3600 | * its own MAC or VLAN. This mode requires the VF to be |
3601 | * Trusted. |
3602 | */ |
3603 | if (mask.dst_mac[0] && !mask.dst_ip[0]) { |
3604 | vsi = pf->vsi[vf->lan_vsi_idx]; |
3605 | f = i40e_find_mac(vsi, macaddr: data.dst_mac); |
3606 | |
3607 | if (!f) { |
3608 | dev_info(&pf->pdev->dev, |
3609 | "Destination MAC %pM doesn't belong to VF %d\n" , |
3610 | data.dst_mac, vf->vf_id); |
3611 | goto err; |
3612 | } |
3613 | |
3614 | if (mask.vlan_id) { |
3615 | hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, |
3616 | hlist) { |
3617 | if (f->vlan == ntohs(data.vlan_id)) { |
3618 | found = true; |
3619 | break; |
3620 | } |
3621 | } |
3622 | if (!found) { |
3623 | dev_info(&pf->pdev->dev, |
3624 | "VF %d doesn't have any VLAN id %u\n" , |
3625 | vf->vf_id, ntohs(data.vlan_id)); |
3626 | goto err; |
3627 | } |
3628 | } |
3629 | } else { |
3630 | /* Check if VF is trusted */ |
3631 | if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { |
3632 | dev_err(&pf->pdev->dev, |
3633 | "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n" , |
3634 | vf->vf_id); |
3635 | return -EIO; |
3636 | } |
3637 | } |
3638 | |
3639 | if (mask.dst_mac[0] & data.dst_mac[0]) { |
3640 | if (is_broadcast_ether_addr(addr: data.dst_mac) || |
3641 | is_zero_ether_addr(addr: data.dst_mac)) { |
3642 | dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n" , |
3643 | vf->vf_id, data.dst_mac); |
3644 | goto err; |
3645 | } |
3646 | } |
3647 | |
3648 | if (mask.src_mac[0] & data.src_mac[0]) { |
3649 | if (is_broadcast_ether_addr(addr: data.src_mac) || |
3650 | is_zero_ether_addr(addr: data.src_mac)) { |
3651 | dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n" , |
3652 | vf->vf_id, data.src_mac); |
3653 | goto err; |
3654 | } |
3655 | } |
3656 | |
3657 | if (mask.dst_port & data.dst_port) { |
3658 | if (!data.dst_port) { |
3659 | dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n" , |
3660 | vf->vf_id); |
3661 | goto err; |
3662 | } |
3663 | } |
3664 | |
3665 | if (mask.src_port & data.src_port) { |
3666 | if (!data.src_port) { |
3667 | dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n" , |
3668 | vf->vf_id); |
3669 | goto err; |
3670 | } |
3671 | } |
3672 | |
3673 | if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW && |
3674 | tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) { |
3675 | dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n" , |
3676 | vf->vf_id); |
3677 | goto err; |
3678 | } |
3679 | |
3680 | if (mask.vlan_id & data.vlan_id) { |
3681 | if (ntohs(data.vlan_id) > I40E_MAX_VLANID) { |
3682 | dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n" , |
3683 | vf->vf_id); |
3684 | goto err; |
3685 | } |
3686 | } |
3687 | |
3688 | return 0; |
3689 | err: |
3690 | return -EIO; |
3691 | } |
3692 | |
3693 | /** |
3694 | * i40e_find_vsi_from_seid - searches for the vsi with the given seid |
3695 | * @vf: pointer to the VF info |
3696 | * @seid: seid of the vsi it is searching for |
3697 | **/ |
3698 | static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid) |
3699 | { |
3700 | struct i40e_pf *pf = vf->pf; |
3701 | struct i40e_vsi *vsi = NULL; |
3702 | int i; |
3703 | |
3704 | for (i = 0; i < vf->num_tc ; i++) { |
3705 | vsi = i40e_find_vsi_from_id(pf, id: vf->ch[i].vsi_id); |
3706 | if (vsi && vsi->seid == seid) |
3707 | return vsi; |
3708 | } |
3709 | return NULL; |
3710 | } |
3711 | |
3712 | /** |
3713 | * i40e_del_all_cloud_filters |
3714 | * @vf: pointer to the VF info |
3715 | * |
3716 | * This function deletes all cloud filters |
3717 | **/ |
3718 | static void i40e_del_all_cloud_filters(struct i40e_vf *vf) |
3719 | { |
3720 | struct i40e_cloud_filter *cfilter = NULL; |
3721 | struct i40e_pf *pf = vf->pf; |
3722 | struct i40e_vsi *vsi = NULL; |
3723 | struct hlist_node *node; |
3724 | int ret; |
3725 | |
3726 | hlist_for_each_entry_safe(cfilter, node, |
3727 | &vf->cloud_filter_list, cloud_node) { |
3728 | vsi = i40e_find_vsi_from_seid(vf, seid: cfilter->seid); |
3729 | |
3730 | if (!vsi) { |
3731 | dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n" , |
3732 | vf->vf_id, cfilter->seid); |
3733 | continue; |
3734 | } |
3735 | |
3736 | if (cfilter->dst_port) |
3737 | ret = i40e_add_del_cloud_filter_big_buf(vsi, filter: cfilter, |
3738 | add: false); |
3739 | else |
3740 | ret = i40e_add_del_cloud_filter(vsi, filter: cfilter, add: false); |
3741 | if (ret) |
3742 | dev_err(&pf->pdev->dev, |
3743 | "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n" , |
3744 | vf->vf_id, ERR_PTR(ret), |
3745 | i40e_aq_str(&pf->hw, |
3746 | pf->hw.aq.asq_last_status)); |
3747 | |
3748 | hlist_del(n: &cfilter->cloud_node); |
3749 | kfree(objp: cfilter); |
3750 | vf->num_cloud_filters--; |
3751 | } |
3752 | } |
3753 | |
3754 | /** |
3755 | * i40e_vc_del_cloud_filter |
3756 | * @vf: pointer to the VF info |
3757 | * @msg: pointer to the msg buffer |
3758 | * |
3759 | * This function deletes a cloud filter programmed as TC filter for ADq |
3760 | **/ |
3761 | static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) |
3762 | { |
3763 | struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; |
3764 | struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; |
3765 | struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; |
3766 | struct i40e_cloud_filter cfilter, *cf = NULL; |
3767 | struct i40e_pf *pf = vf->pf; |
3768 | struct i40e_vsi *vsi = NULL; |
3769 | struct hlist_node *node; |
3770 | int aq_ret = 0; |
3771 | int i, ret; |
3772 | |
3773 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
3774 | aq_ret = -EINVAL; |
3775 | goto err; |
3776 | } |
3777 | |
3778 | if (!vf->adq_enabled) { |
3779 | dev_info(&pf->pdev->dev, |
3780 | "VF %d: ADq not enabled, can't apply cloud filter\n" , |
3781 | vf->vf_id); |
3782 | aq_ret = -EINVAL; |
3783 | goto err; |
3784 | } |
3785 | |
3786 | if (i40e_validate_cloud_filter(vf, tc_filter: vcf)) { |
3787 | dev_info(&pf->pdev->dev, |
3788 | "VF %d: Invalid input, can't apply cloud filter\n" , |
3789 | vf->vf_id); |
3790 | aq_ret = -EINVAL; |
3791 | goto err; |
3792 | } |
3793 | |
3794 | memset(&cfilter, 0, sizeof(cfilter)); |
3795 | /* parse destination mac address */ |
3796 | for (i = 0; i < ETH_ALEN; i++) |
3797 | cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; |
3798 | |
3799 | /* parse source mac address */ |
3800 | for (i = 0; i < ETH_ALEN; i++) |
3801 | cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; |
3802 | |
3803 | cfilter.vlan_id = mask.vlan_id & tcf.vlan_id; |
3804 | cfilter.dst_port = mask.dst_port & tcf.dst_port; |
3805 | cfilter.src_port = mask.src_port & tcf.src_port; |
3806 | |
3807 | switch (vcf->flow_type) { |
3808 | case VIRTCHNL_TCP_V4_FLOW: |
3809 | cfilter.n_proto = ETH_P_IP; |
3810 | if (mask.dst_ip[0] & tcf.dst_ip[0]) |
3811 | memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, |
3812 | ARRAY_SIZE(tcf.dst_ip)); |
3813 | else if (mask.src_ip[0] & tcf.dst_ip[0]) |
3814 | memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, |
3815 | ARRAY_SIZE(tcf.dst_ip)); |
3816 | break; |
3817 | case VIRTCHNL_TCP_V6_FLOW: |
3818 | cfilter.n_proto = ETH_P_IPV6; |
3819 | if (mask.dst_ip[3] & tcf.dst_ip[3]) |
3820 | memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip, |
3821 | sizeof(cfilter.ip.v6.dst_ip6)); |
3822 | if (mask.src_ip[3] & tcf.src_ip[3]) |
3823 | memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip, |
3824 | sizeof(cfilter.ip.v6.src_ip6)); |
3825 | break; |
3826 | default: |
3827 | /* TC filter can be configured based on different combinations |
3828 | * and in this case IP is not a part of filter config |
3829 | */ |
3830 | dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n" , |
3831 | vf->vf_id); |
3832 | } |
3833 | |
3834 | /* get the vsi to which the tc belongs to */ |
3835 | vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; |
3836 | cfilter.seid = vsi->seid; |
3837 | cfilter.flags = vcf->field_flags; |
3838 | |
3839 | /* Deleting TC filter */ |
3840 | if (tcf.dst_port) |
3841 | ret = i40e_add_del_cloud_filter_big_buf(vsi, filter: &cfilter, add: false); |
3842 | else |
3843 | ret = i40e_add_del_cloud_filter(vsi, filter: &cfilter, add: false); |
3844 | if (ret) { |
3845 | dev_err(&pf->pdev->dev, |
3846 | "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n" , |
3847 | vf->vf_id, ERR_PTR(ret), |
3848 | i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); |
3849 | goto err; |
3850 | } |
3851 | |
3852 | hlist_for_each_entry_safe(cf, node, |
3853 | &vf->cloud_filter_list, cloud_node) { |
3854 | if (cf->seid != cfilter.seid) |
3855 | continue; |
3856 | if (mask.dst_port) |
3857 | if (cfilter.dst_port != cf->dst_port) |
3858 | continue; |
3859 | if (mask.dst_mac[0]) |
3860 | if (!ether_addr_equal(addr1: cf->src_mac, addr2: cfilter.src_mac)) |
3861 | continue; |
3862 | /* for ipv4 data to be valid, only first byte of mask is set */ |
3863 | if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0]) |
3864 | if (memcmp(p: &cfilter.ip.v4.dst_ip, q: &cf->ip.v4.dst_ip, |
3865 | ARRAY_SIZE(tcf.dst_ip))) |
3866 | continue; |
3867 | /* for ipv6, mask is set for all sixteen bytes (4 words) */ |
3868 | if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) |
3869 | if (memcmp(p: &cfilter.ip.v6.dst_ip6, q: &cf->ip.v6.dst_ip6, |
3870 | size: sizeof(cfilter.ip.v6.src_ip6))) |
3871 | continue; |
3872 | if (mask.vlan_id) |
3873 | if (cfilter.vlan_id != cf->vlan_id) |
3874 | continue; |
3875 | |
3876 | hlist_del(n: &cf->cloud_node); |
3877 | kfree(objp: cf); |
3878 | vf->num_cloud_filters--; |
3879 | } |
3880 | |
3881 | err: |
3882 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_DEL_CLOUD_FILTER, |
3883 | retval: aq_ret); |
3884 | } |
3885 | |
3886 | /** |
3887 | * i40e_vc_add_cloud_filter |
3888 | * @vf: pointer to the VF info |
3889 | * @msg: pointer to the msg buffer |
3890 | * |
3891 | * This function adds a cloud filter programmed as TC filter for ADq |
3892 | **/ |
3893 | static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) |
3894 | { |
3895 | struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg; |
3896 | struct virtchnl_l4_spec mask = vcf->mask.tcp_spec; |
3897 | struct virtchnl_l4_spec tcf = vcf->data.tcp_spec; |
3898 | struct i40e_cloud_filter *cfilter = NULL; |
3899 | struct i40e_pf *pf = vf->pf; |
3900 | struct i40e_vsi *vsi = NULL; |
3901 | int aq_ret = 0; |
3902 | int i; |
3903 | |
3904 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
3905 | aq_ret = -EINVAL; |
3906 | goto err_out; |
3907 | } |
3908 | |
3909 | if (!vf->adq_enabled) { |
3910 | dev_info(&pf->pdev->dev, |
3911 | "VF %d: ADq is not enabled, can't apply cloud filter\n" , |
3912 | vf->vf_id); |
3913 | aq_ret = -EINVAL; |
3914 | goto err_out; |
3915 | } |
3916 | |
3917 | if (i40e_validate_cloud_filter(vf, tc_filter: vcf)) { |
3918 | dev_info(&pf->pdev->dev, |
3919 | "VF %d: Invalid input/s, can't apply cloud filter\n" , |
3920 | vf->vf_id); |
3921 | aq_ret = -EINVAL; |
3922 | goto err_out; |
3923 | } |
3924 | |
3925 | cfilter = kzalloc(size: sizeof(*cfilter), GFP_KERNEL); |
3926 | if (!cfilter) { |
3927 | aq_ret = -ENOMEM; |
3928 | goto err_out; |
3929 | } |
3930 | |
3931 | /* parse destination mac address */ |
3932 | for (i = 0; i < ETH_ALEN; i++) |
3933 | cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i]; |
3934 | |
3935 | /* parse source mac address */ |
3936 | for (i = 0; i < ETH_ALEN; i++) |
3937 | cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i]; |
3938 | |
3939 | cfilter->vlan_id = mask.vlan_id & tcf.vlan_id; |
3940 | cfilter->dst_port = mask.dst_port & tcf.dst_port; |
3941 | cfilter->src_port = mask.src_port & tcf.src_port; |
3942 | |
3943 | switch (vcf->flow_type) { |
3944 | case VIRTCHNL_TCP_V4_FLOW: |
3945 | cfilter->n_proto = ETH_P_IP; |
3946 | if (mask.dst_ip[0] & tcf.dst_ip[0]) |
3947 | memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, |
3948 | ARRAY_SIZE(tcf.dst_ip)); |
3949 | else if (mask.src_ip[0] & tcf.dst_ip[0]) |
3950 | memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, |
3951 | ARRAY_SIZE(tcf.dst_ip)); |
3952 | break; |
3953 | case VIRTCHNL_TCP_V6_FLOW: |
3954 | cfilter->n_proto = ETH_P_IPV6; |
3955 | if (mask.dst_ip[3] & tcf.dst_ip[3]) |
3956 | memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip, |
3957 | sizeof(cfilter->ip.v6.dst_ip6)); |
3958 | if (mask.src_ip[3] & tcf.src_ip[3]) |
3959 | memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip, |
3960 | sizeof(cfilter->ip.v6.src_ip6)); |
3961 | break; |
3962 | default: |
3963 | /* TC filter can be configured based on different combinations |
3964 | * and in this case IP is not a part of filter config |
3965 | */ |
3966 | dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n" , |
3967 | vf->vf_id); |
3968 | } |
3969 | |
3970 | /* get the VSI to which the TC belongs to */ |
3971 | vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx]; |
3972 | cfilter->seid = vsi->seid; |
3973 | cfilter->flags = vcf->field_flags; |
3974 | |
3975 | /* Adding cloud filter programmed as TC filter */ |
3976 | if (tcf.dst_port) |
3977 | aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, filter: cfilter, add: true); |
3978 | else |
3979 | aq_ret = i40e_add_del_cloud_filter(vsi, filter: cfilter, add: true); |
3980 | if (aq_ret) { |
3981 | dev_err(&pf->pdev->dev, |
3982 | "VF %d: Failed to add cloud filter, err %pe aq_err %s\n" , |
3983 | vf->vf_id, ERR_PTR(aq_ret), |
3984 | i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); |
3985 | goto err_free; |
3986 | } |
3987 | |
3988 | INIT_HLIST_NODE(h: &cfilter->cloud_node); |
3989 | hlist_add_head(n: &cfilter->cloud_node, h: &vf->cloud_filter_list); |
3990 | /* release the pointer passing it to the collection */ |
3991 | cfilter = NULL; |
3992 | vf->num_cloud_filters++; |
3993 | err_free: |
3994 | kfree(objp: cfilter); |
3995 | err_out: |
3996 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_ADD_CLOUD_FILTER, |
3997 | retval: aq_ret); |
3998 | } |
3999 | |
4000 | /** |
4001 | * i40e_vc_add_qch_msg: Add queue channel and enable ADq |
4002 | * @vf: pointer to the VF info |
4003 | * @msg: pointer to the msg buffer |
4004 | **/ |
4005 | static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) |
4006 | { |
4007 | struct virtchnl_tc_info *tci = |
4008 | (struct virtchnl_tc_info *)msg; |
4009 | struct i40e_pf *pf = vf->pf; |
4010 | struct i40e_link_status *ls = &pf->hw.phy.link_info; |
4011 | int i, adq_request_qps = 0; |
4012 | int aq_ret = 0; |
4013 | u64 speed = 0; |
4014 | |
4015 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
4016 | aq_ret = -EINVAL; |
4017 | goto err; |
4018 | } |
4019 | |
4020 | /* ADq cannot be applied if spoof check is ON */ |
4021 | if (vf->spoofchk) { |
4022 | dev_err(&pf->pdev->dev, |
4023 | "Spoof check is ON, turn it OFF to enable ADq\n" ); |
4024 | aq_ret = -EINVAL; |
4025 | goto err; |
4026 | } |
4027 | |
4028 | if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) { |
4029 | dev_err(&pf->pdev->dev, |
4030 | "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n" , |
4031 | vf->vf_id); |
4032 | aq_ret = -EINVAL; |
4033 | goto err; |
4034 | } |
4035 | |
4036 | /* max number of traffic classes for VF currently capped at 4 */ |
4037 | if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) { |
4038 | dev_err(&pf->pdev->dev, |
4039 | "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n" , |
4040 | vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI); |
4041 | aq_ret = -EINVAL; |
4042 | goto err; |
4043 | } |
4044 | |
4045 | /* validate queues for each TC */ |
4046 | for (i = 0; i < tci->num_tc; i++) |
4047 | if (!tci->list[i].count || |
4048 | tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) { |
4049 | dev_err(&pf->pdev->dev, |
4050 | "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n" , |
4051 | vf->vf_id, i, tci->list[i].count, |
4052 | I40E_DEFAULT_QUEUES_PER_VF); |
4053 | aq_ret = -EINVAL; |
4054 | goto err; |
4055 | } |
4056 | |
4057 | /* need Max VF queues but already have default number of queues */ |
4058 | adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF; |
4059 | |
4060 | if (pf->queues_left < adq_request_qps) { |
4061 | dev_err(&pf->pdev->dev, |
4062 | "No queues left to allocate to VF %d\n" , |
4063 | vf->vf_id); |
4064 | aq_ret = -EINVAL; |
4065 | goto err; |
4066 | } else { |
4067 | /* we need to allocate max VF queues to enable ADq so as to |
4068 | * make sure ADq enabled VF always gets back queues when it |
4069 | * goes through a reset. |
4070 | */ |
4071 | vf->num_queue_pairs = I40E_MAX_VF_QUEUES; |
4072 | } |
4073 | |
4074 | /* get link speed in MB to validate rate limit */ |
4075 | speed = i40e_vc_link_speed2mbps(link_speed: ls->link_speed); |
4076 | if (speed == SPEED_UNKNOWN) { |
4077 | dev_err(&pf->pdev->dev, |
4078 | "Cannot detect link speed\n" ); |
4079 | aq_ret = -EINVAL; |
4080 | goto err; |
4081 | } |
4082 | |
4083 | /* parse data from the queue channel info */ |
4084 | vf->num_tc = tci->num_tc; |
4085 | for (i = 0; i < vf->num_tc; i++) { |
4086 | if (tci->list[i].max_tx_rate) { |
4087 | if (tci->list[i].max_tx_rate > speed) { |
4088 | dev_err(&pf->pdev->dev, |
4089 | "Invalid max tx rate %llu specified for VF %d." , |
4090 | tci->list[i].max_tx_rate, |
4091 | vf->vf_id); |
4092 | aq_ret = -EINVAL; |
4093 | goto err; |
4094 | } else { |
4095 | vf->ch[i].max_tx_rate = |
4096 | tci->list[i].max_tx_rate; |
4097 | } |
4098 | } |
4099 | vf->ch[i].num_qps = tci->list[i].count; |
4100 | } |
4101 | |
4102 | /* set this flag only after making sure all inputs are sane */ |
4103 | vf->adq_enabled = true; |
4104 | |
4105 | /* reset the VF in order to allocate resources */ |
4106 | i40e_vc_reset_vf(vf, notify_vf: true); |
4107 | |
4108 | return 0; |
4109 | |
4110 | /* send the response to the VF */ |
4111 | err: |
4112 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_ENABLE_CHANNELS, |
4113 | retval: aq_ret); |
4114 | } |
4115 | |
4116 | /** |
4117 | * i40e_vc_del_qch_msg |
4118 | * @vf: pointer to the VF info |
4119 | * @msg: pointer to the msg buffer |
4120 | **/ |
4121 | static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) |
4122 | { |
4123 | struct i40e_pf *pf = vf->pf; |
4124 | int aq_ret = 0; |
4125 | |
4126 | if (!i40e_sync_vf_state(vf, state: I40E_VF_STATE_ACTIVE)) { |
4127 | aq_ret = -EINVAL; |
4128 | goto err; |
4129 | } |
4130 | |
4131 | if (vf->adq_enabled) { |
4132 | i40e_del_all_cloud_filters(vf); |
4133 | i40e_del_qch(vf); |
4134 | vf->adq_enabled = false; |
4135 | vf->num_tc = 0; |
4136 | dev_info(&pf->pdev->dev, |
4137 | "Deleting Queue Channels and cloud filters for ADq on VF %d\n" , |
4138 | vf->vf_id); |
4139 | } else { |
4140 | dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n" , |
4141 | vf->vf_id); |
4142 | aq_ret = -EINVAL; |
4143 | } |
4144 | |
4145 | /* reset the VF in order to allocate resources */ |
4146 | i40e_vc_reset_vf(vf, notify_vf: true); |
4147 | |
4148 | return 0; |
4149 | |
4150 | err: |
4151 | return i40e_vc_send_resp_to_vf(vf, opcode: VIRTCHNL_OP_DISABLE_CHANNELS, |
4152 | retval: aq_ret); |
4153 | } |
4154 | |
4155 | /** |
4156 | * i40e_vc_process_vf_msg |
4157 | * @pf: pointer to the PF structure |
4158 | * @vf_id: source VF id |
4159 | * @v_opcode: operation code |
4160 | * @v_retval: unused return value code |
4161 | * @msg: pointer to the msg buffer |
4162 | * @msglen: msg length |
4163 | * |
4164 | * called from the common aeq/arq handler to |
4165 | * process request from VF |
4166 | **/ |
4167 | int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, |
4168 | u32 __always_unused v_retval, u8 *msg, u16 msglen) |
4169 | { |
4170 | struct i40e_hw *hw = &pf->hw; |
4171 | int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; |
4172 | struct i40e_vf *vf; |
4173 | int ret; |
4174 | |
4175 | pf->vf_aq_requests++; |
4176 | if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) |
4177 | return -EINVAL; |
4178 | vf = &(pf->vf[local_vf_id]); |
4179 | |
4180 | /* Check if VF is disabled. */ |
4181 | if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) |
4182 | return -EINVAL; |
4183 | |
4184 | /* perform basic checks on the msg */ |
4185 | ret = virtchnl_vc_validate_vf_msg(ver: &vf->vf_ver, v_opcode, msg, msglen); |
4186 | |
4187 | if (ret) { |
4188 | i40e_vc_send_resp_to_vf(vf, opcode: v_opcode, retval: -EINVAL); |
4189 | dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n" , |
4190 | local_vf_id, v_opcode, msglen); |
4191 | return ret; |
4192 | } |
4193 | |
4194 | switch (v_opcode) { |
4195 | case VIRTCHNL_OP_VERSION: |
4196 | ret = i40e_vc_get_version_msg(vf, msg); |
4197 | break; |
4198 | case VIRTCHNL_OP_GET_VF_RESOURCES: |
4199 | ret = i40e_vc_get_vf_resources_msg(vf, msg); |
4200 | i40e_vc_notify_vf_link_state(vf); |
4201 | break; |
4202 | case VIRTCHNL_OP_RESET_VF: |
4203 | i40e_vc_reset_vf(vf, notify_vf: false); |
4204 | ret = 0; |
4205 | break; |
4206 | case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: |
4207 | ret = i40e_vc_config_promiscuous_mode_msg(vf, msg); |
4208 | break; |
4209 | case VIRTCHNL_OP_CONFIG_VSI_QUEUES: |
4210 | ret = i40e_vc_config_queues_msg(vf, msg); |
4211 | break; |
4212 | case VIRTCHNL_OP_CONFIG_IRQ_MAP: |
4213 | ret = i40e_vc_config_irq_map_msg(vf, msg); |
4214 | break; |
4215 | case VIRTCHNL_OP_ENABLE_QUEUES: |
4216 | ret = i40e_vc_enable_queues_msg(vf, msg); |
4217 | i40e_vc_notify_vf_link_state(vf); |
4218 | break; |
4219 | case VIRTCHNL_OP_DISABLE_QUEUES: |
4220 | ret = i40e_vc_disable_queues_msg(vf, msg); |
4221 | break; |
4222 | case VIRTCHNL_OP_ADD_ETH_ADDR: |
4223 | ret = i40e_vc_add_mac_addr_msg(vf, msg); |
4224 | break; |
4225 | case VIRTCHNL_OP_DEL_ETH_ADDR: |
4226 | ret = i40e_vc_del_mac_addr_msg(vf, msg); |
4227 | break; |
4228 | case VIRTCHNL_OP_ADD_VLAN: |
4229 | ret = i40e_vc_add_vlan_msg(vf, msg); |
4230 | break; |
4231 | case VIRTCHNL_OP_DEL_VLAN: |
4232 | ret = i40e_vc_remove_vlan_msg(vf, msg); |
4233 | break; |
4234 | case VIRTCHNL_OP_GET_STATS: |
4235 | ret = i40e_vc_get_stats_msg(vf, msg); |
4236 | break; |
4237 | case VIRTCHNL_OP_RDMA: |
4238 | ret = i40e_vc_rdma_msg(vf, msg, msglen); |
4239 | break; |
4240 | case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: |
4241 | ret = i40e_vc_rdma_qvmap_msg(vf, msg, config: true); |
4242 | break; |
4243 | case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: |
4244 | ret = i40e_vc_rdma_qvmap_msg(vf, msg, config: false); |
4245 | break; |
4246 | case VIRTCHNL_OP_CONFIG_RSS_KEY: |
4247 | ret = i40e_vc_config_rss_key(vf, msg); |
4248 | break; |
4249 | case VIRTCHNL_OP_CONFIG_RSS_LUT: |
4250 | ret = i40e_vc_config_rss_lut(vf, msg); |
4251 | break; |
4252 | case VIRTCHNL_OP_GET_RSS_HENA_CAPS: |
4253 | ret = i40e_vc_get_rss_hena(vf, msg); |
4254 | break; |
4255 | case VIRTCHNL_OP_SET_RSS_HENA: |
4256 | ret = i40e_vc_set_rss_hena(vf, msg); |
4257 | break; |
4258 | case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: |
4259 | ret = i40e_vc_enable_vlan_stripping(vf, msg); |
4260 | break; |
4261 | case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: |
4262 | ret = i40e_vc_disable_vlan_stripping(vf, msg); |
4263 | break; |
4264 | case VIRTCHNL_OP_REQUEST_QUEUES: |
4265 | ret = i40e_vc_request_queues_msg(vf, msg); |
4266 | break; |
4267 | case VIRTCHNL_OP_ENABLE_CHANNELS: |
4268 | ret = i40e_vc_add_qch_msg(vf, msg); |
4269 | break; |
4270 | case VIRTCHNL_OP_DISABLE_CHANNELS: |
4271 | ret = i40e_vc_del_qch_msg(vf, msg); |
4272 | break; |
4273 | case VIRTCHNL_OP_ADD_CLOUD_FILTER: |
4274 | ret = i40e_vc_add_cloud_filter(vf, msg); |
4275 | break; |
4276 | case VIRTCHNL_OP_DEL_CLOUD_FILTER: |
4277 | ret = i40e_vc_del_cloud_filter(vf, msg); |
4278 | break; |
4279 | case VIRTCHNL_OP_UNKNOWN: |
4280 | default: |
4281 | dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n" , |
4282 | v_opcode, local_vf_id); |
4283 | ret = i40e_vc_send_resp_to_vf(vf, opcode: v_opcode, |
4284 | retval: -EOPNOTSUPP); |
4285 | break; |
4286 | } |
4287 | |
4288 | return ret; |
4289 | } |
4290 | |
4291 | /** |
4292 | * i40e_vc_process_vflr_event |
4293 | * @pf: pointer to the PF structure |
4294 | * |
4295 | * called from the vlfr irq handler to |
4296 | * free up VF resources and state variables |
4297 | **/ |
4298 | int i40e_vc_process_vflr_event(struct i40e_pf *pf) |
4299 | { |
4300 | struct i40e_hw *hw = &pf->hw; |
4301 | u32 reg, reg_idx, bit_idx; |
4302 | struct i40e_vf *vf; |
4303 | int vf_id; |
4304 | |
4305 | if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) |
4306 | return 0; |
4307 | |
4308 | /* Re-enable the VFLR interrupt cause here, before looking for which |
4309 | * VF got reset. Otherwise, if another VF gets a reset while the |
4310 | * first one is being processed, that interrupt will be lost, and |
4311 | * that VF will be stuck in reset forever. |
4312 | */ |
4313 | reg = rd32(hw, I40E_PFINT_ICR0_ENA); |
4314 | reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; |
4315 | wr32(hw, I40E_PFINT_ICR0_ENA, reg); |
4316 | i40e_flush(hw); |
4317 | |
4318 | clear_bit(nr: __I40E_VFLR_EVENT_PENDING, addr: pf->state); |
4319 | for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { |
4320 | reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; |
4321 | bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; |
4322 | /* read GLGEN_VFLRSTAT register to find out the flr VFs */ |
4323 | vf = &pf->vf[vf_id]; |
4324 | reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); |
4325 | if (reg & BIT(bit_idx)) |
4326 | /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ |
4327 | i40e_reset_vf(vf, flr: true); |
4328 | } |
4329 | |
4330 | return 0; |
4331 | } |
4332 | |
4333 | /** |
4334 | * i40e_validate_vf |
4335 | * @pf: the physical function |
4336 | * @vf_id: VF identifier |
4337 | * |
4338 | * Check that the VF is enabled and the VSI exists. |
4339 | * |
4340 | * Returns 0 on success, negative on failure |
4341 | **/ |
4342 | static int i40e_validate_vf(struct i40e_pf *pf, int vf_id) |
4343 | { |
4344 | struct i40e_vsi *vsi; |
4345 | struct i40e_vf *vf; |
4346 | int ret = 0; |
4347 | |
4348 | if (vf_id >= pf->num_alloc_vfs) { |
4349 | dev_err(&pf->pdev->dev, |
4350 | "Invalid VF Identifier %d\n" , vf_id); |
4351 | ret = -EINVAL; |
4352 | goto err_out; |
4353 | } |
4354 | vf = &pf->vf[vf_id]; |
4355 | vsi = i40e_find_vsi_from_id(pf, id: vf->lan_vsi_id); |
4356 | if (!vsi) |
4357 | ret = -EINVAL; |
4358 | err_out: |
4359 | return ret; |
4360 | } |
4361 | |
4362 | /** |
4363 | * i40e_check_vf_init_timeout |
4364 | * @vf: the virtual function |
4365 | * |
4366 | * Check that the VF's initialization was successfully done and if not |
4367 | * wait up to 300ms for its finish. |
4368 | * |
4369 | * Returns true when VF is initialized, false on timeout |
4370 | **/ |
4371 | static bool i40e_check_vf_init_timeout(struct i40e_vf *vf) |
4372 | { |
4373 | int i; |
4374 | |
4375 | /* When the VF is resetting wait until it is done. |
4376 | * It can take up to 200 milliseconds, but wait for |
4377 | * up to 300 milliseconds to be safe. |
4378 | */ |
4379 | for (i = 0; i < 15; i++) { |
4380 | if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) |
4381 | return true; |
4382 | msleep(msecs: 20); |
4383 | } |
4384 | |
4385 | if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { |
4386 | dev_err(&vf->pf->pdev->dev, |
4387 | "VF %d still in reset. Try again.\n" , vf->vf_id); |
4388 | return false; |
4389 | } |
4390 | |
4391 | return true; |
4392 | } |
4393 | |
4394 | /** |
4395 | * i40e_ndo_set_vf_mac |
4396 | * @netdev: network interface device structure |
4397 | * @vf_id: VF identifier |
4398 | * @mac: mac address |
4399 | * |
4400 | * program VF mac address |
4401 | **/ |
4402 | int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) |
4403 | { |
4404 | struct i40e_netdev_priv *np = netdev_priv(dev: netdev); |
4405 | struct i40e_vsi *vsi = np->vsi; |
4406 | struct i40e_pf *pf = vsi->back; |
4407 | struct i40e_mac_filter *f; |
4408 | struct i40e_vf *vf; |
4409 | int ret = 0; |
4410 | struct hlist_node *h; |
4411 | int bkt; |
4412 | |
4413 | if (test_and_set_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state)) { |
4414 | dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n" ); |
4415 | return -EAGAIN; |
4416 | } |
4417 | |
4418 | /* validate the request */ |
4419 | ret = i40e_validate_vf(pf, vf_id); |
4420 | if (ret) |
4421 | goto error_param; |
4422 | |
4423 | vf = &pf->vf[vf_id]; |
4424 | if (!i40e_check_vf_init_timeout(vf)) { |
4425 | ret = -EAGAIN; |
4426 | goto error_param; |
4427 | } |
4428 | vsi = pf->vsi[vf->lan_vsi_idx]; |
4429 | |
4430 | if (is_multicast_ether_addr(addr: mac)) { |
4431 | dev_err(&pf->pdev->dev, |
4432 | "Invalid Ethernet address %pM for VF %d\n" , mac, vf_id); |
4433 | ret = -EINVAL; |
4434 | goto error_param; |
4435 | } |
4436 | |
4437 | /* Lock once because below invoked function add/del_filter requires |
4438 | * mac_filter_hash_lock to be held |
4439 | */ |
4440 | spin_lock_bh(lock: &vsi->mac_filter_hash_lock); |
4441 | |
4442 | /* delete the temporary mac address */ |
4443 | if (!is_zero_ether_addr(addr: vf->default_lan_addr.addr)) |
4444 | i40e_del_mac_filter(vsi, macaddr: vf->default_lan_addr.addr); |
4445 | |
4446 | /* Delete all the filters for this VSI - we're going to kill it |
4447 | * anyway. |
4448 | */ |
4449 | hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) |
4450 | __i40e_del_filter(vsi, f); |
4451 | |
4452 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
4453 | |
4454 | /* program mac filter */ |
4455 | if (i40e_sync_vsi_filters(vsi)) { |
4456 | dev_err(&pf->pdev->dev, "Unable to program ucast filters\n" ); |
4457 | ret = -EIO; |
4458 | goto error_param; |
4459 | } |
4460 | ether_addr_copy(dst: vf->default_lan_addr.addr, src: mac); |
4461 | |
4462 | if (is_zero_ether_addr(addr: mac)) { |
4463 | vf->pf_set_mac = false; |
4464 | dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n" , vf_id); |
4465 | } else { |
4466 | vf->pf_set_mac = true; |
4467 | dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n" , |
4468 | mac, vf_id); |
4469 | } |
4470 | |
4471 | /* Force the VF interface down so it has to bring up with new MAC |
4472 | * address |
4473 | */ |
4474 | i40e_vc_reset_vf(vf, notify_vf: true); |
4475 | dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n" ); |
4476 | |
4477 | error_param: |
4478 | clear_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state); |
4479 | return ret; |
4480 | } |
4481 | |
4482 | /** |
4483 | * i40e_ndo_set_vf_port_vlan |
4484 | * @netdev: network interface device structure |
4485 | * @vf_id: VF identifier |
4486 | * @vlan_id: mac address |
4487 | * @qos: priority setting |
4488 | * @vlan_proto: vlan protocol |
4489 | * |
4490 | * program VF vlan id and/or qos |
4491 | **/ |
4492 | int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, |
4493 | u16 vlan_id, u8 qos, __be16 vlan_proto) |
4494 | { |
4495 | u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT); |
4496 | struct i40e_netdev_priv *np = netdev_priv(dev: netdev); |
4497 | bool allmulti = false, alluni = false; |
4498 | struct i40e_pf *pf = np->vsi->back; |
4499 | struct i40e_vsi *vsi; |
4500 | struct i40e_vf *vf; |
4501 | int ret = 0; |
4502 | |
4503 | if (test_and_set_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state)) { |
4504 | dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n" ); |
4505 | return -EAGAIN; |
4506 | } |
4507 | |
4508 | /* validate the request */ |
4509 | ret = i40e_validate_vf(pf, vf_id); |
4510 | if (ret) |
4511 | goto error_pvid; |
4512 | |
4513 | if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { |
4514 | dev_err(&pf->pdev->dev, "Invalid VF Parameters\n" ); |
4515 | ret = -EINVAL; |
4516 | goto error_pvid; |
4517 | } |
4518 | |
4519 | if (vlan_proto != htons(ETH_P_8021Q)) { |
4520 | dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n" ); |
4521 | ret = -EPROTONOSUPPORT; |
4522 | goto error_pvid; |
4523 | } |
4524 | |
4525 | vf = &pf->vf[vf_id]; |
4526 | if (!i40e_check_vf_init_timeout(vf)) { |
4527 | ret = -EAGAIN; |
4528 | goto error_pvid; |
4529 | } |
4530 | vsi = pf->vsi[vf->lan_vsi_idx]; |
4531 | |
4532 | if (le16_to_cpu(vsi->info.pvid) == vlanprio) |
4533 | /* duplicate request, so just return success */ |
4534 | goto error_pvid; |
4535 | |
4536 | i40e_vlan_stripping_enable(vsi); |
4537 | |
4538 | /* Locked once because multiple functions below iterate list */ |
4539 | spin_lock_bh(lock: &vsi->mac_filter_hash_lock); |
4540 | |
4541 | /* Check for condition where there was already a port VLAN ID |
4542 | * filter set and now it is being deleted by setting it to zero. |
4543 | * Additionally check for the condition where there was a port |
4544 | * VLAN but now there is a new and different port VLAN being set. |
4545 | * Before deleting all the old VLAN filters we must add new ones |
4546 | * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our |
4547 | * MAC addresses deleted. |
4548 | */ |
4549 | if ((!(vlan_id || qos) || |
4550 | vlanprio != le16_to_cpu(vsi->info.pvid)) && |
4551 | vsi->info.pvid) { |
4552 | ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY); |
4553 | if (ret) { |
4554 | dev_info(&vsi->back->pdev->dev, |
4555 | "add VF VLAN failed, ret=%d aq_err=%d\n" , ret, |
4556 | vsi->back->hw.aq.asq_last_status); |
4557 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
4558 | goto error_pvid; |
4559 | } |
4560 | } |
4561 | |
4562 | if (vsi->info.pvid) { |
4563 | /* remove all filters on the old VLAN */ |
4564 | i40e_rm_vlan_all_mac(vsi, vid: (le16_to_cpu(vsi->info.pvid) & |
4565 | VLAN_VID_MASK)); |
4566 | } |
4567 | |
4568 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
4569 | |
4570 | /* disable promisc modes in case they were enabled */ |
4571 | ret = i40e_config_vf_promiscuous_mode(vf, vsi_id: vf->lan_vsi_id, |
4572 | allmulti, alluni); |
4573 | if (ret) { |
4574 | dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n" ); |
4575 | goto error_pvid; |
4576 | } |
4577 | |
4578 | if (vlan_id || qos) |
4579 | ret = i40e_vsi_add_pvid(vsi, vid: vlanprio); |
4580 | else |
4581 | i40e_vsi_remove_pvid(vsi); |
4582 | spin_lock_bh(lock: &vsi->mac_filter_hash_lock); |
4583 | |
4584 | if (vlan_id) { |
4585 | dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n" , |
4586 | vlan_id, qos, vf_id); |
4587 | |
4588 | /* add new VLAN filter for each MAC */ |
4589 | ret = i40e_add_vlan_all_mac(vsi, vid: vlan_id); |
4590 | if (ret) { |
4591 | dev_info(&vsi->back->pdev->dev, |
4592 | "add VF VLAN failed, ret=%d aq_err=%d\n" , ret, |
4593 | vsi->back->hw.aq.asq_last_status); |
4594 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
4595 | goto error_pvid; |
4596 | } |
4597 | |
4598 | /* remove the previously added non-VLAN MAC filters */ |
4599 | i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY); |
4600 | } |
4601 | |
4602 | spin_unlock_bh(lock: &vsi->mac_filter_hash_lock); |
4603 | |
4604 | if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states)) |
4605 | alluni = true; |
4606 | |
4607 | if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states)) |
4608 | allmulti = true; |
4609 | |
4610 | /* Schedule the worker thread to take care of applying changes */ |
4611 | i40e_service_event_schedule(pf: vsi->back); |
4612 | |
4613 | if (ret) { |
4614 | dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n" ); |
4615 | goto error_pvid; |
4616 | } |
4617 | |
4618 | /* The Port VLAN needs to be saved across resets the same as the |
4619 | * default LAN MAC address. |
4620 | */ |
4621 | vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); |
4622 | |
4623 | i40e_vc_reset_vf(vf, notify_vf: true); |
4624 | /* During reset the VF got a new VSI, so refresh a pointer. */ |
4625 | vsi = pf->vsi[vf->lan_vsi_idx]; |
4626 | |
4627 | ret = i40e_config_vf_promiscuous_mode(vf, vsi_id: vsi->id, allmulti, alluni); |
4628 | if (ret) { |
4629 | dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n" ); |
4630 | goto error_pvid; |
4631 | } |
4632 | |
4633 | ret = 0; |
4634 | |
4635 | error_pvid: |
4636 | clear_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state); |
4637 | return ret; |
4638 | } |
4639 | |
4640 | /** |
4641 | * i40e_ndo_set_vf_bw |
4642 | * @netdev: network interface device structure |
4643 | * @vf_id: VF identifier |
4644 | * @min_tx_rate: Minimum Tx rate |
4645 | * @max_tx_rate: Maximum Tx rate |
4646 | * |
4647 | * configure VF Tx rate |
4648 | **/ |
4649 | int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, |
4650 | int max_tx_rate) |
4651 | { |
4652 | struct i40e_netdev_priv *np = netdev_priv(dev: netdev); |
4653 | struct i40e_pf *pf = np->vsi->back; |
4654 | struct i40e_vsi *vsi; |
4655 | struct i40e_vf *vf; |
4656 | int ret = 0; |
4657 | |
4658 | if (test_and_set_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state)) { |
4659 | dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n" ); |
4660 | return -EAGAIN; |
4661 | } |
4662 | |
4663 | /* validate the request */ |
4664 | ret = i40e_validate_vf(pf, vf_id); |
4665 | if (ret) |
4666 | goto error; |
4667 | |
4668 | if (min_tx_rate) { |
4669 | dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n" , |
4670 | min_tx_rate, vf_id); |
4671 | ret = -EINVAL; |
4672 | goto error; |
4673 | } |
4674 | |
4675 | vf = &pf->vf[vf_id]; |
4676 | if (!i40e_check_vf_init_timeout(vf)) { |
4677 | ret = -EAGAIN; |
4678 | goto error; |
4679 | } |
4680 | vsi = pf->vsi[vf->lan_vsi_idx]; |
4681 | |
4682 | ret = i40e_set_bw_limit(vsi, seid: vsi->seid, max_tx_rate); |
4683 | if (ret) |
4684 | goto error; |
4685 | |
4686 | vf->tx_rate = max_tx_rate; |
4687 | error: |
4688 | clear_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state); |
4689 | return ret; |
4690 | } |
4691 | |
4692 | /** |
4693 | * i40e_ndo_get_vf_config |
4694 | * @netdev: network interface device structure |
4695 | * @vf_id: VF identifier |
4696 | * @ivi: VF configuration structure |
4697 | * |
4698 | * return VF configuration |
4699 | **/ |
4700 | int i40e_ndo_get_vf_config(struct net_device *netdev, |
4701 | int vf_id, struct ifla_vf_info *ivi) |
4702 | { |
4703 | struct i40e_netdev_priv *np = netdev_priv(dev: netdev); |
4704 | struct i40e_vsi *vsi = np->vsi; |
4705 | struct i40e_pf *pf = vsi->back; |
4706 | struct i40e_vf *vf; |
4707 | int ret = 0; |
4708 | |
4709 | if (test_and_set_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state)) { |
4710 | dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n" ); |
4711 | return -EAGAIN; |
4712 | } |
4713 | |
4714 | /* validate the request */ |
4715 | ret = i40e_validate_vf(pf, vf_id); |
4716 | if (ret) |
4717 | goto error_param; |
4718 | |
4719 | vf = &pf->vf[vf_id]; |
4720 | /* first vsi is always the LAN vsi */ |
4721 | vsi = pf->vsi[vf->lan_vsi_idx]; |
4722 | if (!vsi) { |
4723 | ret = -ENOENT; |
4724 | goto error_param; |
4725 | } |
4726 | |
4727 | ivi->vf = vf_id; |
4728 | |
4729 | ether_addr_copy(dst: ivi->mac, src: vf->default_lan_addr.addr); |
4730 | |
4731 | ivi->max_tx_rate = vf->tx_rate; |
4732 | ivi->min_tx_rate = 0; |
4733 | ivi->vlan = le16_get_bits(v: vsi->info.pvid, I40E_VLAN_MASK); |
4734 | ivi->qos = le16_get_bits(v: vsi->info.pvid, I40E_PRIORITY_MASK); |
4735 | if (vf->link_forced == false) |
4736 | ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; |
4737 | else if (vf->link_up == true) |
4738 | ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; |
4739 | else |
4740 | ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; |
4741 | ivi->spoofchk = vf->spoofchk; |
4742 | ivi->trusted = vf->trusted; |
4743 | ret = 0; |
4744 | |
4745 | error_param: |
4746 | clear_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state); |
4747 | return ret; |
4748 | } |
4749 | |
4750 | /** |
4751 | * i40e_ndo_set_vf_link_state |
4752 | * @netdev: network interface device structure |
4753 | * @vf_id: VF identifier |
4754 | * @link: required link state |
4755 | * |
4756 | * Set the link state of a specified VF, regardless of physical link state |
4757 | **/ |
4758 | int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) |
4759 | { |
4760 | struct i40e_netdev_priv *np = netdev_priv(dev: netdev); |
4761 | struct i40e_pf *pf = np->vsi->back; |
4762 | struct i40e_link_status *ls = &pf->hw.phy.link_info; |
4763 | struct virtchnl_pf_event pfe; |
4764 | struct i40e_hw *hw = &pf->hw; |
4765 | struct i40e_vsi *vsi; |
4766 | unsigned long q_map; |
4767 | struct i40e_vf *vf; |
4768 | int abs_vf_id; |
4769 | int ret = 0; |
4770 | int tmp; |
4771 | |
4772 | if (test_and_set_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state)) { |
4773 | dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n" ); |
4774 | return -EAGAIN; |
4775 | } |
4776 | |
4777 | /* validate the request */ |
4778 | if (vf_id >= pf->num_alloc_vfs) { |
4779 | dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n" , vf_id); |
4780 | ret = -EINVAL; |
4781 | goto error_out; |
4782 | } |
4783 | |
4784 | vf = &pf->vf[vf_id]; |
4785 | abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; |
4786 | |
4787 | pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; |
4788 | pfe.severity = PF_EVENT_SEVERITY_INFO; |
4789 | |
4790 | switch (link) { |
4791 | case IFLA_VF_LINK_STATE_AUTO: |
4792 | vf->link_forced = false; |
4793 | vf->is_disabled_from_host = false; |
4794 | /* reset needed to reinit VF resources */ |
4795 | i40e_vc_reset_vf(vf, notify_vf: true); |
4796 | i40e_set_vf_link_state(vf, pfe: &pfe, ls); |
4797 | break; |
4798 | case IFLA_VF_LINK_STATE_ENABLE: |
4799 | vf->link_forced = true; |
4800 | vf->link_up = true; |
4801 | vf->is_disabled_from_host = false; |
4802 | /* reset needed to reinit VF resources */ |
4803 | i40e_vc_reset_vf(vf, notify_vf: true); |
4804 | i40e_set_vf_link_state(vf, pfe: &pfe, ls); |
4805 | break; |
4806 | case IFLA_VF_LINK_STATE_DISABLE: |
4807 | vf->link_forced = true; |
4808 | vf->link_up = false; |
4809 | i40e_set_vf_link_state(vf, pfe: &pfe, ls); |
4810 | |
4811 | vsi = pf->vsi[vf->lan_vsi_idx]; |
4812 | q_map = BIT(vsi->num_queue_pairs) - 1; |
4813 | |
4814 | vf->is_disabled_from_host = true; |
4815 | |
4816 | /* Try to stop both Tx&Rx rings even if one of the calls fails |
4817 | * to ensure we stop the rings even in case of errors. |
4818 | * If any of them returns with an error then the first |
4819 | * error that occurred will be returned. |
4820 | */ |
4821 | tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, enable: false); |
4822 | ret = i40e_ctrl_vf_rx_rings(vsi, q_map, enable: false); |
4823 | |
4824 | ret = tmp ? tmp : ret; |
4825 | break; |
4826 | default: |
4827 | ret = -EINVAL; |
4828 | goto error_out; |
4829 | } |
4830 | /* Notify the VF of its new link state */ |
4831 | i40e_aq_send_msg_to_vf(hw, vfid: abs_vf_id, v_opcode: VIRTCHNL_OP_EVENT, |
4832 | v_retval: 0, msg: (u8 *)&pfe, msglen: sizeof(pfe), NULL); |
4833 | |
4834 | error_out: |
4835 | clear_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state); |
4836 | return ret; |
4837 | } |
4838 | |
4839 | /** |
4840 | * i40e_ndo_set_vf_spoofchk |
4841 | * @netdev: network interface device structure |
4842 | * @vf_id: VF identifier |
4843 | * @enable: flag to enable or disable feature |
4844 | * |
4845 | * Enable or disable VF spoof checking |
4846 | **/ |
4847 | int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) |
4848 | { |
4849 | struct i40e_netdev_priv *np = netdev_priv(dev: netdev); |
4850 | struct i40e_vsi *vsi = np->vsi; |
4851 | struct i40e_pf *pf = vsi->back; |
4852 | struct i40e_vsi_context ctxt; |
4853 | struct i40e_hw *hw = &pf->hw; |
4854 | struct i40e_vf *vf; |
4855 | int ret = 0; |
4856 | |
4857 | if (test_and_set_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state)) { |
4858 | dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n" ); |
4859 | return -EAGAIN; |
4860 | } |
4861 | |
4862 | /* validate the request */ |
4863 | if (vf_id >= pf->num_alloc_vfs) { |
4864 | dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n" , vf_id); |
4865 | ret = -EINVAL; |
4866 | goto out; |
4867 | } |
4868 | |
4869 | vf = &(pf->vf[vf_id]); |
4870 | if (!i40e_check_vf_init_timeout(vf)) { |
4871 | ret = -EAGAIN; |
4872 | goto out; |
4873 | } |
4874 | |
4875 | if (enable == vf->spoofchk) |
4876 | goto out; |
4877 | |
4878 | vf->spoofchk = enable; |
4879 | memset(&ctxt, 0, sizeof(ctxt)); |
4880 | ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; |
4881 | ctxt.pf_num = pf->hw.pf_id; |
4882 | ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); |
4883 | if (enable) |
4884 | ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | |
4885 | I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); |
4886 | ret = i40e_aq_update_vsi_params(hw, vsi_ctx: &ctxt, NULL); |
4887 | if (ret) { |
4888 | dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n" , |
4889 | ret); |
4890 | ret = -EIO; |
4891 | } |
4892 | out: |
4893 | clear_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state); |
4894 | return ret; |
4895 | } |
4896 | |
4897 | /** |
4898 | * i40e_ndo_set_vf_trust |
4899 | * @netdev: network interface device structure of the pf |
4900 | * @vf_id: VF identifier |
4901 | * @setting: trust setting |
4902 | * |
4903 | * Enable or disable VF trust setting |
4904 | **/ |
4905 | int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) |
4906 | { |
4907 | struct i40e_netdev_priv *np = netdev_priv(dev: netdev); |
4908 | struct i40e_pf *pf = np->vsi->back; |
4909 | struct i40e_vf *vf; |
4910 | int ret = 0; |
4911 | |
4912 | if (test_and_set_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state)) { |
4913 | dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n" ); |
4914 | return -EAGAIN; |
4915 | } |
4916 | |
4917 | /* validate the request */ |
4918 | if (vf_id >= pf->num_alloc_vfs) { |
4919 | dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n" , vf_id); |
4920 | ret = -EINVAL; |
4921 | goto out; |
4922 | } |
4923 | |
4924 | if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { |
4925 | dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n" ); |
4926 | ret = -EINVAL; |
4927 | goto out; |
4928 | } |
4929 | |
4930 | vf = &pf->vf[vf_id]; |
4931 | |
4932 | if (setting == vf->trusted) |
4933 | goto out; |
4934 | |
4935 | vf->trusted = setting; |
4936 | |
4937 | /* request PF to sync mac/vlan filters for the VF */ |
4938 | set_bit(nr: __I40E_MACVLAN_SYNC_PENDING, addr: pf->state); |
4939 | pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED; |
4940 | |
4941 | i40e_vc_reset_vf(vf, notify_vf: true); |
4942 | dev_info(&pf->pdev->dev, "VF %u is now %strusted\n" , |
4943 | vf_id, setting ? "" : "un" ); |
4944 | |
4945 | if (vf->adq_enabled) { |
4946 | if (!vf->trusted) { |
4947 | dev_info(&pf->pdev->dev, |
4948 | "VF %u no longer Trusted, deleting all cloud filters\n" , |
4949 | vf_id); |
4950 | i40e_del_all_cloud_filters(vf); |
4951 | } |
4952 | } |
4953 | |
4954 | out: |
4955 | clear_bit(nr: __I40E_VIRTCHNL_OP_PENDING, addr: pf->state); |
4956 | return ret; |
4957 | } |
4958 | |
4959 | /** |
4960 | * i40e_get_vf_stats - populate some stats for the VF |
4961 | * @netdev: the netdev of the PF |
4962 | * @vf_id: the host OS identifier (0-127) |
4963 | * @vf_stats: pointer to the OS memory to be initialized |
4964 | */ |
4965 | int i40e_get_vf_stats(struct net_device *netdev, int vf_id, |
4966 | struct ifla_vf_stats *vf_stats) |
4967 | { |
4968 | struct i40e_netdev_priv *np = netdev_priv(dev: netdev); |
4969 | struct i40e_pf *pf = np->vsi->back; |
4970 | struct i40e_eth_stats *stats; |
4971 | struct i40e_vsi *vsi; |
4972 | struct i40e_vf *vf; |
4973 | |
4974 | /* validate the request */ |
4975 | if (i40e_validate_vf(pf, vf_id)) |
4976 | return -EINVAL; |
4977 | |
4978 | vf = &pf->vf[vf_id]; |
4979 | if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { |
4980 | dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n" , vf_id); |
4981 | return -EBUSY; |
4982 | } |
4983 | |
4984 | vsi = pf->vsi[vf->lan_vsi_idx]; |
4985 | if (!vsi) |
4986 | return -EINVAL; |
4987 | |
4988 | i40e_update_eth_stats(vsi); |
4989 | stats = &vsi->eth_stats; |
4990 | |
4991 | memset(vf_stats, 0, sizeof(*vf_stats)); |
4992 | |
4993 | vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + |
4994 | stats->rx_multicast; |
4995 | vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + |
4996 | stats->tx_multicast; |
4997 | vf_stats->rx_bytes = stats->rx_bytes; |
4998 | vf_stats->tx_bytes = stats->tx_bytes; |
4999 | vf_stats->broadcast = stats->rx_broadcast; |
5000 | vf_stats->multicast = stats->rx_multicast; |
5001 | vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; |
5002 | vf_stats->tx_dropped = stats->tx_discards; |
5003 | |
5004 | return 0; |
5005 | } |
5006 | |