1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/etherdevice.h> |
8 | #include <linux/crc32.h> |
9 | #include <linux/vmalloc.h> |
10 | #include <linux/crash_dump.h> |
11 | #include <linux/qed/qed_iov_if.h> |
12 | #include "qed_cxt.h" |
13 | #include "qed_hsi.h" |
14 | #include "qed_iro_hsi.h" |
15 | #include "qed_hw.h" |
16 | #include "qed_init_ops.h" |
17 | #include "qed_int.h" |
18 | #include "qed_mcp.h" |
19 | #include "qed_reg_addr.h" |
20 | #include "qed_sp.h" |
21 | #include "qed_sriov.h" |
22 | #include "qed_vf.h" |
23 | static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); |
24 | |
25 | static u16 qed_vf_from_entity_id(__le16 entity_id) |
26 | { |
27 | return le16_to_cpu(entity_id) - MAX_NUM_PFS; |
28 | } |
29 | |
30 | static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) |
31 | { |
32 | u8 legacy = 0; |
33 | |
34 | if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == |
35 | ETH_HSI_VER_NO_PKT_LEN_TUNN) |
36 | legacy |= QED_QCID_LEGACY_VF_RX_PROD; |
37 | |
38 | if (!(p_vf->acquire.vfdev_info.capabilities & |
39 | VFPF_ACQUIRE_CAP_QUEUE_QIDS)) |
40 | legacy |= QED_QCID_LEGACY_VF_CID; |
41 | |
42 | return legacy; |
43 | } |
44 | |
45 | /* IOV ramrods */ |
46 | static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) |
47 | { |
48 | struct vf_start_ramrod_data *p_ramrod = NULL; |
49 | struct qed_spq_entry *p_ent = NULL; |
50 | struct qed_sp_init_data init_data; |
51 | int rc = -EINVAL; |
52 | u8 fp_minor; |
53 | |
54 | /* Get SPQ entry */ |
55 | memset(&init_data, 0, sizeof(init_data)); |
56 | init_data.cid = qed_spq_get_cid(p_hwfn); |
57 | init_data.opaque_fid = p_vf->opaque_fid; |
58 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
59 | |
60 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
61 | cmd: COMMON_RAMROD_VF_START, |
62 | protocol: PROTOCOLID_COMMON, p_data: &init_data); |
63 | if (rc) |
64 | return rc; |
65 | |
66 | p_ramrod = &p_ent->ramrod.vf_start; |
67 | |
68 | p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); |
69 | p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); |
70 | |
71 | switch (p_hwfn->hw_info.personality) { |
72 | case QED_PCI_ETH: |
73 | p_ramrod->personality = PERSONALITY_ETH; |
74 | break; |
75 | case QED_PCI_ETH_ROCE: |
76 | case QED_PCI_ETH_IWARP: |
77 | p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; |
78 | break; |
79 | default: |
80 | DP_NOTICE(p_hwfn, "Unknown VF personality %d\n" , |
81 | p_hwfn->hw_info.personality); |
82 | qed_sp_destroy_request(p_hwfn, p_ent); |
83 | return -EINVAL; |
84 | } |
85 | |
86 | fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; |
87 | if (fp_minor > ETH_HSI_VER_MINOR && |
88 | fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { |
89 | DP_VERBOSE(p_hwfn, |
90 | QED_MSG_IOV, |
91 | "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n" , |
92 | p_vf->abs_vf_id, |
93 | ETH_HSI_VER_MAJOR, |
94 | fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); |
95 | fp_minor = ETH_HSI_VER_MINOR; |
96 | } |
97 | |
98 | p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; |
99 | p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; |
100 | |
101 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
102 | "VF[%d] - Starting using HSI %02x.%02x\n" , |
103 | p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); |
104 | |
105 | return qed_spq_post(p_hwfn, p_ent, NULL); |
106 | } |
107 | |
108 | static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, |
109 | u32 concrete_vfid, u16 opaque_vfid) |
110 | { |
111 | struct vf_stop_ramrod_data *p_ramrod = NULL; |
112 | struct qed_spq_entry *p_ent = NULL; |
113 | struct qed_sp_init_data init_data; |
114 | int rc = -EINVAL; |
115 | |
116 | /* Get SPQ entry */ |
117 | memset(&init_data, 0, sizeof(init_data)); |
118 | init_data.cid = qed_spq_get_cid(p_hwfn); |
119 | init_data.opaque_fid = opaque_vfid; |
120 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
121 | |
122 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
123 | cmd: COMMON_RAMROD_VF_STOP, |
124 | protocol: PROTOCOLID_COMMON, p_data: &init_data); |
125 | if (rc) |
126 | return rc; |
127 | |
128 | p_ramrod = &p_ent->ramrod.vf_stop; |
129 | |
130 | p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); |
131 | |
132 | return qed_spq_post(p_hwfn, p_ent, NULL); |
133 | } |
134 | |
135 | bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, |
136 | int rel_vf_id, |
137 | bool b_enabled_only, bool b_non_malicious) |
138 | { |
139 | if (!p_hwfn->pf_iov_info) { |
140 | DP_NOTICE(p_hwfn->cdev, "No iov info\n" ); |
141 | return false; |
142 | } |
143 | |
144 | if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || |
145 | (rel_vf_id < 0)) |
146 | return false; |
147 | |
148 | if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && |
149 | b_enabled_only) |
150 | return false; |
151 | |
152 | if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && |
153 | b_non_malicious) |
154 | return false; |
155 | |
156 | return true; |
157 | } |
158 | |
159 | static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, |
160 | u16 relative_vf_id, |
161 | bool b_enabled_only) |
162 | { |
163 | struct qed_vf_info *vf = NULL; |
164 | |
165 | if (!p_hwfn->pf_iov_info) { |
166 | DP_NOTICE(p_hwfn->cdev, "No iov info\n" ); |
167 | return NULL; |
168 | } |
169 | |
170 | if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id: relative_vf_id, |
171 | b_enabled_only, b_non_malicious: false)) |
172 | vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; |
173 | else |
174 | DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n" , |
175 | __func__, relative_vf_id); |
176 | |
177 | return vf; |
178 | } |
179 | |
180 | static struct qed_queue_cid * |
181 | qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue) |
182 | { |
183 | int i; |
184 | |
185 | for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { |
186 | if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx) |
187 | return p_queue->cids[i].p_cid; |
188 | } |
189 | |
190 | return NULL; |
191 | } |
192 | |
193 | enum qed_iov_validate_q_mode { |
194 | QED_IOV_VALIDATE_Q_NA, |
195 | QED_IOV_VALIDATE_Q_ENABLE, |
196 | QED_IOV_VALIDATE_Q_DISABLE, |
197 | }; |
198 | |
199 | static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn, |
200 | struct qed_vf_info *p_vf, |
201 | u16 qid, |
202 | enum qed_iov_validate_q_mode mode, |
203 | bool b_is_tx) |
204 | { |
205 | int i; |
206 | |
207 | if (mode == QED_IOV_VALIDATE_Q_NA) |
208 | return true; |
209 | |
210 | for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { |
211 | struct qed_vf_queue_cid *p_qcid; |
212 | |
213 | p_qcid = &p_vf->vf_queues[qid].cids[i]; |
214 | |
215 | if (!p_qcid->p_cid) |
216 | continue; |
217 | |
218 | if (p_qcid->b_is_tx != b_is_tx) |
219 | continue; |
220 | |
221 | return mode == QED_IOV_VALIDATE_Q_ENABLE; |
222 | } |
223 | |
224 | /* In case we haven't found any valid cid, then its disabled */ |
225 | return mode == QED_IOV_VALIDATE_Q_DISABLE; |
226 | } |
227 | |
228 | static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, |
229 | struct qed_vf_info *p_vf, |
230 | u16 rx_qid, |
231 | enum qed_iov_validate_q_mode mode) |
232 | { |
233 | if (rx_qid >= p_vf->num_rxqs) { |
234 | DP_VERBOSE(p_hwfn, |
235 | QED_MSG_IOV, |
236 | "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n" , |
237 | p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); |
238 | return false; |
239 | } |
240 | |
241 | return qed_iov_validate_queue_mode(p_hwfn, p_vf, qid: rx_qid, mode, b_is_tx: false); |
242 | } |
243 | |
244 | static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, |
245 | struct qed_vf_info *p_vf, |
246 | u16 tx_qid, |
247 | enum qed_iov_validate_q_mode mode) |
248 | { |
249 | if (tx_qid >= p_vf->num_txqs) { |
250 | DP_VERBOSE(p_hwfn, |
251 | QED_MSG_IOV, |
252 | "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n" , |
253 | p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); |
254 | return false; |
255 | } |
256 | |
257 | return qed_iov_validate_queue_mode(p_hwfn, p_vf, qid: tx_qid, mode, b_is_tx: true); |
258 | } |
259 | |
260 | static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, |
261 | struct qed_vf_info *p_vf, u16 sb_idx) |
262 | { |
263 | int i; |
264 | |
265 | for (i = 0; i < p_vf->num_sbs; i++) |
266 | if (p_vf->igu_sbs[i] == sb_idx) |
267 | return true; |
268 | |
269 | DP_VERBOSE(p_hwfn, |
270 | QED_MSG_IOV, |
271 | "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n" , |
272 | p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); |
273 | |
274 | return false; |
275 | } |
276 | |
277 | static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn, |
278 | struct qed_vf_info *p_vf) |
279 | { |
280 | u8 i; |
281 | |
282 | for (i = 0; i < p_vf->num_rxqs; i++) |
283 | if (qed_iov_validate_queue_mode(p_hwfn, p_vf, qid: i, |
284 | mode: QED_IOV_VALIDATE_Q_ENABLE, |
285 | b_is_tx: false)) |
286 | return true; |
287 | |
288 | return false; |
289 | } |
290 | |
291 | static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn, |
292 | struct qed_vf_info *p_vf) |
293 | { |
294 | u8 i; |
295 | |
296 | for (i = 0; i < p_vf->num_txqs; i++) |
297 | if (qed_iov_validate_queue_mode(p_hwfn, p_vf, qid: i, |
298 | mode: QED_IOV_VALIDATE_Q_ENABLE, |
299 | b_is_tx: true)) |
300 | return true; |
301 | |
302 | return false; |
303 | } |
304 | |
305 | static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, |
306 | int vfid, struct qed_ptt *p_ptt) |
307 | { |
308 | struct qed_bulletin_content *p_bulletin; |
309 | int crc_size = sizeof(p_bulletin->crc); |
310 | struct qed_dmae_params params; |
311 | struct qed_vf_info *p_vf; |
312 | |
313 | p_vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
314 | if (!p_vf) |
315 | return -EINVAL; |
316 | |
317 | if (!p_vf->vf_bulletin) |
318 | return -EINVAL; |
319 | |
320 | p_bulletin = p_vf->bulletin.p_virt; |
321 | |
322 | /* Increment bulletin board version and compute crc */ |
323 | p_bulletin->version++; |
324 | p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, |
325 | p_vf->bulletin.size - crc_size); |
326 | |
327 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
328 | "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n" , |
329 | p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); |
330 | |
331 | /* propagate bulletin board via dmae to vm memory */ |
332 | memset(¶ms, 0, sizeof(params)); |
333 | SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); |
334 | params.dst_vfid = p_vf->abs_vf_id; |
335 | return qed_dmae_host2host(p_hwfn, p_ptt, source_addr: p_vf->bulletin.phys, |
336 | dest_addr: p_vf->vf_bulletin, size_in_dwords: p_vf->bulletin.size / 4, |
337 | p_params: ¶ms); |
338 | } |
339 | |
340 | static int qed_iov_pci_cfg_info(struct qed_dev *cdev) |
341 | { |
342 | struct qed_hw_sriov_info *iov = cdev->p_iov_info; |
343 | int pos = iov->pos; |
344 | |
345 | DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n" , pos); |
346 | pci_read_config_word(dev: cdev->pdev, where: pos + PCI_SRIOV_CTRL, val: &iov->ctrl); |
347 | |
348 | pci_read_config_word(dev: cdev->pdev, |
349 | where: pos + PCI_SRIOV_TOTAL_VF, val: &iov->total_vfs); |
350 | pci_read_config_word(dev: cdev->pdev, |
351 | where: pos + PCI_SRIOV_INITIAL_VF, val: &iov->initial_vfs); |
352 | |
353 | pci_read_config_word(dev: cdev->pdev, where: pos + PCI_SRIOV_NUM_VF, val: &iov->num_vfs); |
354 | if (iov->num_vfs) { |
355 | DP_VERBOSE(cdev, |
356 | QED_MSG_IOV, |
357 | "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n" ); |
358 | iov->num_vfs = 0; |
359 | } |
360 | |
361 | pci_read_config_word(dev: cdev->pdev, |
362 | where: pos + PCI_SRIOV_VF_OFFSET, val: &iov->offset); |
363 | |
364 | pci_read_config_word(dev: cdev->pdev, |
365 | where: pos + PCI_SRIOV_VF_STRIDE, val: &iov->stride); |
366 | |
367 | pci_read_config_word(dev: cdev->pdev, |
368 | where: pos + PCI_SRIOV_VF_DID, val: &iov->vf_device_id); |
369 | |
370 | pci_read_config_dword(dev: cdev->pdev, |
371 | where: pos + PCI_SRIOV_SUP_PGSIZE, val: &iov->pgsz); |
372 | |
373 | pci_read_config_dword(dev: cdev->pdev, where: pos + PCI_SRIOV_CAP, val: &iov->cap); |
374 | |
375 | pci_read_config_byte(dev: cdev->pdev, where: pos + PCI_SRIOV_FUNC_LINK, val: &iov->link); |
376 | |
377 | DP_VERBOSE(cdev, |
378 | QED_MSG_IOV, |
379 | "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n" , |
380 | iov->nres, |
381 | iov->cap, |
382 | iov->ctrl, |
383 | iov->total_vfs, |
384 | iov->initial_vfs, |
385 | iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); |
386 | |
387 | /* Some sanity checks */ |
388 | if (iov->num_vfs > NUM_OF_VFS(cdev) || |
389 | iov->total_vfs > NUM_OF_VFS(cdev)) { |
390 | /* This can happen only due to a bug. In this case we set |
391 | * num_vfs to zero to avoid memory corruption in the code that |
392 | * assumes max number of vfs |
393 | */ |
394 | DP_NOTICE(cdev, |
395 | "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n" , |
396 | iov->num_vfs); |
397 | |
398 | iov->num_vfs = 0; |
399 | iov->total_vfs = 0; |
400 | } |
401 | |
402 | return 0; |
403 | } |
404 | |
405 | static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) |
406 | { |
407 | struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; |
408 | struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; |
409 | struct qed_bulletin_content *p_bulletin_virt; |
410 | dma_addr_t req_p, rply_p, bulletin_p; |
411 | union pfvf_tlvs *p_reply_virt_addr; |
412 | union vfpf_tlvs *p_req_virt_addr; |
413 | u8 idx = 0; |
414 | |
415 | memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); |
416 | |
417 | p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; |
418 | req_p = p_iov_info->mbx_msg_phys_addr; |
419 | p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; |
420 | rply_p = p_iov_info->mbx_reply_phys_addr; |
421 | p_bulletin_virt = p_iov_info->p_bulletins; |
422 | bulletin_p = p_iov_info->bulletins_phys; |
423 | if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { |
424 | DP_ERR(p_hwfn, |
425 | "%s called without allocating mem first\n" , __func__); |
426 | return; |
427 | } |
428 | |
429 | for (idx = 0; idx < p_iov->total_vfs; idx++) { |
430 | struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; |
431 | u32 concrete; |
432 | |
433 | vf->vf_mbx.req_virt = p_req_virt_addr + idx; |
434 | vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); |
435 | vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; |
436 | vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); |
437 | |
438 | vf->state = VF_STOPPED; |
439 | vf->b_init = false; |
440 | |
441 | vf->bulletin.phys = idx * |
442 | sizeof(struct qed_bulletin_content) + |
443 | bulletin_p; |
444 | vf->bulletin.p_virt = p_bulletin_virt + idx; |
445 | vf->bulletin.size = sizeof(struct qed_bulletin_content); |
446 | |
447 | vf->relative_vf_id = idx; |
448 | vf->abs_vf_id = idx + p_iov->first_vf_in_pf; |
449 | concrete = qed_vfid_to_concrete(p_hwfn, vfid: vf->abs_vf_id); |
450 | vf->concrete_fid = concrete; |
451 | vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | |
452 | (vf->abs_vf_id << 8); |
453 | vf->vport_id = idx + 1; |
454 | |
455 | vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; |
456 | vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; |
457 | } |
458 | } |
459 | |
460 | static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) |
461 | { |
462 | struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; |
463 | void **p_v_addr; |
464 | u16 num_vfs = 0; |
465 | |
466 | num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; |
467 | |
468 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
469 | "%s for %d VFs\n" , __func__, num_vfs); |
470 | |
471 | /* Allocate PF Mailbox buffer (per-VF) */ |
472 | p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; |
473 | p_v_addr = &p_iov_info->mbx_msg_virt_addr; |
474 | *p_v_addr = dma_alloc_coherent(dev: &p_hwfn->cdev->pdev->dev, |
475 | size: p_iov_info->mbx_msg_size, |
476 | dma_handle: &p_iov_info->mbx_msg_phys_addr, |
477 | GFP_KERNEL); |
478 | if (!*p_v_addr) |
479 | return -ENOMEM; |
480 | |
481 | /* Allocate PF Mailbox Reply buffer (per-VF) */ |
482 | p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; |
483 | p_v_addr = &p_iov_info->mbx_reply_virt_addr; |
484 | *p_v_addr = dma_alloc_coherent(dev: &p_hwfn->cdev->pdev->dev, |
485 | size: p_iov_info->mbx_reply_size, |
486 | dma_handle: &p_iov_info->mbx_reply_phys_addr, |
487 | GFP_KERNEL); |
488 | if (!*p_v_addr) |
489 | return -ENOMEM; |
490 | |
491 | p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * |
492 | num_vfs; |
493 | p_v_addr = &p_iov_info->p_bulletins; |
494 | *p_v_addr = dma_alloc_coherent(dev: &p_hwfn->cdev->pdev->dev, |
495 | size: p_iov_info->bulletins_size, |
496 | dma_handle: &p_iov_info->bulletins_phys, |
497 | GFP_KERNEL); |
498 | if (!*p_v_addr) |
499 | return -ENOMEM; |
500 | |
501 | DP_VERBOSE(p_hwfn, |
502 | QED_MSG_IOV, |
503 | "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n" , |
504 | p_iov_info->mbx_msg_virt_addr, |
505 | (u64)p_iov_info->mbx_msg_phys_addr, |
506 | p_iov_info->mbx_reply_virt_addr, |
507 | (u64)p_iov_info->mbx_reply_phys_addr, |
508 | p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys); |
509 | |
510 | return 0; |
511 | } |
512 | |
513 | static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) |
514 | { |
515 | struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; |
516 | |
517 | if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) |
518 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
519 | size: p_iov_info->mbx_msg_size, |
520 | cpu_addr: p_iov_info->mbx_msg_virt_addr, |
521 | dma_handle: p_iov_info->mbx_msg_phys_addr); |
522 | |
523 | if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) |
524 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
525 | size: p_iov_info->mbx_reply_size, |
526 | cpu_addr: p_iov_info->mbx_reply_virt_addr, |
527 | dma_handle: p_iov_info->mbx_reply_phys_addr); |
528 | |
529 | if (p_iov_info->p_bulletins) |
530 | dma_free_coherent(dev: &p_hwfn->cdev->pdev->dev, |
531 | size: p_iov_info->bulletins_size, |
532 | cpu_addr: p_iov_info->p_bulletins, |
533 | dma_handle: p_iov_info->bulletins_phys); |
534 | } |
535 | |
536 | int qed_iov_alloc(struct qed_hwfn *p_hwfn) |
537 | { |
538 | struct qed_pf_iov *p_sriov; |
539 | |
540 | if (!IS_PF_SRIOV(p_hwfn)) { |
541 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
542 | "No SR-IOV - no need for IOV db\n" ); |
543 | return 0; |
544 | } |
545 | |
546 | p_sriov = kzalloc(size: sizeof(*p_sriov), GFP_KERNEL); |
547 | if (!p_sriov) |
548 | return -ENOMEM; |
549 | |
550 | p_hwfn->pf_iov_info = p_sriov; |
551 | |
552 | qed_spq_register_async_cb(p_hwfn, protocol_id: PROTOCOLID_COMMON, |
553 | cb: qed_sriov_eqe_event); |
554 | |
555 | return qed_iov_allocate_vfdb(p_hwfn); |
556 | } |
557 | |
558 | void qed_iov_setup(struct qed_hwfn *p_hwfn) |
559 | { |
560 | if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) |
561 | return; |
562 | |
563 | qed_iov_setup_vfdb(p_hwfn); |
564 | } |
565 | |
566 | void qed_iov_free(struct qed_hwfn *p_hwfn) |
567 | { |
568 | qed_spq_unregister_async_cb(p_hwfn, protocol_id: PROTOCOLID_COMMON); |
569 | |
570 | if (IS_PF_SRIOV_ALLOC(p_hwfn)) { |
571 | qed_iov_free_vfdb(p_hwfn); |
572 | kfree(objp: p_hwfn->pf_iov_info); |
573 | } |
574 | } |
575 | |
576 | void qed_iov_free_hw_info(struct qed_dev *cdev) |
577 | { |
578 | kfree(objp: cdev->p_iov_info); |
579 | cdev->p_iov_info = NULL; |
580 | } |
581 | |
582 | int qed_iov_hw_info(struct qed_hwfn *p_hwfn) |
583 | { |
584 | struct qed_dev *cdev = p_hwfn->cdev; |
585 | int pos; |
586 | int rc; |
587 | |
588 | if (is_kdump_kernel()) |
589 | return 0; |
590 | |
591 | if (IS_VF(p_hwfn->cdev)) |
592 | return 0; |
593 | |
594 | /* Learn the PCI configuration */ |
595 | pos = pci_find_ext_capability(dev: p_hwfn->cdev->pdev, |
596 | PCI_EXT_CAP_ID_SRIOV); |
597 | if (!pos) { |
598 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n" ); |
599 | return 0; |
600 | } |
601 | |
602 | /* Allocate a new struct for IOV information */ |
603 | cdev->p_iov_info = kzalloc(size: sizeof(*cdev->p_iov_info), GFP_KERNEL); |
604 | if (!cdev->p_iov_info) |
605 | return -ENOMEM; |
606 | |
607 | cdev->p_iov_info->pos = pos; |
608 | |
609 | rc = qed_iov_pci_cfg_info(cdev); |
610 | if (rc) |
611 | return rc; |
612 | |
613 | /* We want PF IOV to be synonemous with the existence of p_iov_info; |
614 | * In case the capability is published but there are no VFs, simply |
615 | * de-allocate the struct. |
616 | */ |
617 | if (!cdev->p_iov_info->total_vfs) { |
618 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
619 | "IOV capabilities, but no VFs are published\n" ); |
620 | kfree(objp: cdev->p_iov_info); |
621 | cdev->p_iov_info = NULL; |
622 | return 0; |
623 | } |
624 | |
625 | /* First VF index based on offset is tricky: |
626 | * - If ARI is supported [likely], offset - (16 - pf_id) would |
627 | * provide the number for eng0. 2nd engine Vfs would begin |
628 | * after the first engine's VFs. |
629 | * - If !ARI, VFs would start on next device. |
630 | * so offset - (256 - pf_id) would provide the number. |
631 | * Utilize the fact that (256 - pf_id) is achieved only by later |
632 | * to differentiate between the two. |
633 | */ |
634 | |
635 | if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { |
636 | u32 first = p_hwfn->cdev->p_iov_info->offset + |
637 | p_hwfn->abs_pf_id - 16; |
638 | |
639 | cdev->p_iov_info->first_vf_in_pf = first; |
640 | |
641 | if (QED_PATH_ID(p_hwfn)) |
642 | cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; |
643 | } else { |
644 | u32 first = p_hwfn->cdev->p_iov_info->offset + |
645 | p_hwfn->abs_pf_id - 256; |
646 | |
647 | cdev->p_iov_info->first_vf_in_pf = first; |
648 | } |
649 | |
650 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
651 | "First VF in hwfn 0x%08x\n" , |
652 | cdev->p_iov_info->first_vf_in_pf); |
653 | |
654 | return 0; |
655 | } |
656 | |
657 | static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, |
658 | int vfid, bool b_fail_malicious) |
659 | { |
660 | /* Check PF supports sriov */ |
661 | if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || |
662 | !IS_PF_SRIOV_ALLOC(p_hwfn)) |
663 | return false; |
664 | |
665 | /* Check VF validity */ |
666 | if (!qed_iov_is_valid_vfid(p_hwfn, rel_vf_id: vfid, b_enabled_only: true, b_non_malicious: b_fail_malicious)) |
667 | return false; |
668 | |
669 | return true; |
670 | } |
671 | |
672 | static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) |
673 | { |
674 | return _qed_iov_pf_sanity_check(p_hwfn, vfid, b_fail_malicious: true); |
675 | } |
676 | |
677 | static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, |
678 | u16 rel_vf_id, u8 to_disable) |
679 | { |
680 | struct qed_vf_info *vf; |
681 | int i; |
682 | |
683 | for_each_hwfn(cdev, i) { |
684 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
685 | |
686 | vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: rel_vf_id, b_enabled_only: false); |
687 | if (!vf) |
688 | continue; |
689 | |
690 | vf->to_disable = to_disable; |
691 | } |
692 | } |
693 | |
694 | static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) |
695 | { |
696 | u16 i; |
697 | |
698 | if (!IS_QED_SRIOV(cdev)) |
699 | return; |
700 | |
701 | for (i = 0; i < cdev->p_iov_info->total_vfs; i++) |
702 | qed_iov_set_vf_to_disable(cdev, rel_vf_id: i, to_disable); |
703 | } |
704 | |
705 | static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, |
706 | struct qed_ptt *p_ptt, u8 abs_vfid) |
707 | { |
708 | qed_wr(p_hwfn, p_ptt, |
709 | PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, |
710 | val: 1 << (abs_vfid & 0x1f)); |
711 | } |
712 | |
713 | static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, |
714 | struct qed_ptt *p_ptt, struct qed_vf_info *vf) |
715 | { |
716 | int i; |
717 | |
718 | /* Set VF masks and configuration - pretend */ |
719 | qed_fid_pretend(p_hwfn, p_ptt, fid: (u16)vf->concrete_fid); |
720 | |
721 | qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, val: 0); |
722 | |
723 | /* unpretend */ |
724 | qed_fid_pretend(p_hwfn, p_ptt, fid: (u16)p_hwfn->hw_info.concrete_fid); |
725 | |
726 | /* iterate over all queues, clear sb consumer */ |
727 | for (i = 0; i < vf->num_sbs; i++) |
728 | qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, |
729 | igu_sb_id: vf->igu_sbs[i], |
730 | opaque: vf->opaque_fid, b_set: true); |
731 | } |
732 | |
733 | static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, |
734 | struct qed_ptt *p_ptt, |
735 | struct qed_vf_info *vf, bool enable) |
736 | { |
737 | u32 igu_vf_conf; |
738 | |
739 | qed_fid_pretend(p_hwfn, p_ptt, fid: (u16)vf->concrete_fid); |
740 | |
741 | igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); |
742 | |
743 | if (enable) |
744 | igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; |
745 | else |
746 | igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; |
747 | |
748 | qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, val: igu_vf_conf); |
749 | |
750 | /* unpretend */ |
751 | qed_fid_pretend(p_hwfn, p_ptt, fid: (u16)p_hwfn->hw_info.concrete_fid); |
752 | } |
753 | |
754 | static int |
755 | qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn, |
756 | struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs) |
757 | { |
758 | u8 current_max = 0; |
759 | int i; |
760 | |
761 | /* For AH onward, configuration is per-PF. Find maximum of all |
762 | * the currently enabled child VFs, and set the number to be that. |
763 | */ |
764 | if (!QED_IS_BB(p_hwfn->cdev)) { |
765 | qed_for_each_vf(p_hwfn, i) { |
766 | struct qed_vf_info *p_vf; |
767 | |
768 | p_vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)i, b_enabled_only: true); |
769 | if (!p_vf) |
770 | continue; |
771 | |
772 | current_max = max_t(u8, current_max, p_vf->num_sbs); |
773 | } |
774 | } |
775 | |
776 | if (num_sbs > current_max) |
777 | return qed_mcp_config_vf_msix(p_hwfn, p_ptt, |
778 | vf_id: abs_vf_id, num: num_sbs); |
779 | |
780 | return 0; |
781 | } |
782 | |
783 | static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, |
784 | struct qed_ptt *p_ptt, |
785 | struct qed_vf_info *vf) |
786 | { |
787 | u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; |
788 | int rc; |
789 | |
790 | /* It's possible VF was previously considered malicious - |
791 | * clear the indication even if we're only going to disable VF. |
792 | */ |
793 | vf->b_malicious = false; |
794 | |
795 | if (vf->to_disable) |
796 | return 0; |
797 | |
798 | DP_VERBOSE(p_hwfn, |
799 | QED_MSG_IOV, |
800 | "Enable internal access for vf %x [abs %x]\n" , |
801 | vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); |
802 | |
803 | qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); |
804 | |
805 | qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); |
806 | |
807 | rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt, |
808 | abs_vf_id: vf->abs_vf_id, num_sbs: vf->num_sbs); |
809 | if (rc) |
810 | return rc; |
811 | |
812 | qed_fid_pretend(p_hwfn, p_ptt, fid: (u16)vf->concrete_fid); |
813 | |
814 | SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); |
815 | STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); |
816 | |
817 | qed_init_run(p_hwfn, p_ptt, phase: PHASE_VF, phase_id: vf->abs_vf_id, |
818 | modes: p_hwfn->hw_info.hw_mode); |
819 | |
820 | /* unpretend */ |
821 | qed_fid_pretend(p_hwfn, p_ptt, fid: (u16)p_hwfn->hw_info.concrete_fid); |
822 | |
823 | vf->state = VF_FREE; |
824 | |
825 | return rc; |
826 | } |
827 | |
828 | /** |
829 | * qed_iov_config_perm_table() - Configure the permission zone table. |
830 | * |
831 | * @p_hwfn: HW device data. |
832 | * @p_ptt: PTT window for writing the registers. |
833 | * @vf: VF info data. |
834 | * @enable: The actual permission for this VF. |
835 | * |
836 | * In E4, queue zone permission table size is 320x9. There |
837 | * are 320 VF queues for single engine device (256 for dual |
838 | * engine device), and each entry has the following format: |
839 | * {Valid, VF[7:0]} |
840 | */ |
841 | static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, |
842 | struct qed_ptt *p_ptt, |
843 | struct qed_vf_info *vf, u8 enable) |
844 | { |
845 | u32 reg_addr, val; |
846 | u16 qzone_id = 0; |
847 | int qid; |
848 | |
849 | for (qid = 0; qid < vf->num_rxqs; qid++) { |
850 | qed_fw_l2_queue(p_hwfn, src_id: vf->vf_queues[qid].fw_rx_qid, |
851 | dst_id: &qzone_id); |
852 | |
853 | reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; |
854 | val = enable ? (vf->abs_vf_id | BIT(8)) : 0; |
855 | qed_wr(p_hwfn, p_ptt, hw_addr: reg_addr, val); |
856 | } |
857 | } |
858 | |
859 | static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, |
860 | struct qed_ptt *p_ptt, |
861 | struct qed_vf_info *vf) |
862 | { |
863 | /* Reset vf in IGU - interrupts are still disabled */ |
864 | qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); |
865 | |
866 | qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, enable: 1); |
867 | |
868 | /* Permission Table */ |
869 | qed_iov_config_perm_table(p_hwfn, p_ptt, vf, enable: true); |
870 | } |
871 | |
872 | static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, |
873 | struct qed_ptt *p_ptt, |
874 | struct qed_vf_info *vf, u16 num_rx_queues) |
875 | { |
876 | struct qed_igu_block *p_block; |
877 | struct cau_sb_entry sb_entry; |
878 | int qid = 0; |
879 | u32 val = 0; |
880 | |
881 | if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) |
882 | num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; |
883 | p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; |
884 | |
885 | SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); |
886 | SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); |
887 | SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); |
888 | |
889 | for (qid = 0; qid < num_rx_queues; qid++) { |
890 | p_block = qed_get_igu_free_sb(p_hwfn, b_is_pf: false); |
891 | vf->igu_sbs[qid] = p_block->igu_sb_id; |
892 | p_block->status &= ~QED_IGU_STATUS_FREE; |
893 | SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); |
894 | |
895 | qed_wr(p_hwfn, p_ptt, |
896 | IGU_REG_MAPPING_MEMORY + |
897 | sizeof(u32) * p_block->igu_sb_id, val); |
898 | |
899 | /* Configure igu sb in CAU which were marked valid */ |
900 | qed_init_cau_sb_entry(p_hwfn, p_sb_entry: &sb_entry, |
901 | pf_id: p_hwfn->rel_pf_id, vf_number: vf->abs_vf_id, vf_valid: 1); |
902 | |
903 | qed_dmae_host2grc(p_hwfn, p_ptt, |
904 | source_addr: (u64)(uintptr_t)&sb_entry, |
905 | CAU_REG_SB_VAR_MEMORY + |
906 | p_block->igu_sb_id * sizeof(u64), size_in_dwords: 2, NULL); |
907 | } |
908 | |
909 | vf->num_sbs = (u8)num_rx_queues; |
910 | |
911 | return vf->num_sbs; |
912 | } |
913 | |
914 | static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, |
915 | struct qed_ptt *p_ptt, |
916 | struct qed_vf_info *vf) |
917 | { |
918 | struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; |
919 | int idx, igu_id; |
920 | u32 addr, val; |
921 | |
922 | /* Invalidate igu CAM lines and mark them as free */ |
923 | for (idx = 0; idx < vf->num_sbs; idx++) { |
924 | igu_id = vf->igu_sbs[idx]; |
925 | addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; |
926 | |
927 | val = qed_rd(p_hwfn, p_ptt, hw_addr: addr); |
928 | SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); |
929 | qed_wr(p_hwfn, p_ptt, hw_addr: addr, val); |
930 | |
931 | p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; |
932 | p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; |
933 | } |
934 | |
935 | vf->num_sbs = 0; |
936 | } |
937 | |
938 | static void qed_iov_set_link(struct qed_hwfn *p_hwfn, |
939 | u16 vfid, |
940 | struct qed_mcp_link_params *params, |
941 | struct qed_mcp_link_state *link, |
942 | struct qed_mcp_link_capabilities *p_caps) |
943 | { |
944 | struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, |
945 | relative_vf_id: vfid, |
946 | b_enabled_only: false); |
947 | struct qed_bulletin_content *p_bulletin; |
948 | |
949 | if (!p_vf) |
950 | return; |
951 | |
952 | p_bulletin = p_vf->bulletin.p_virt; |
953 | p_bulletin->req_autoneg = params->speed.autoneg; |
954 | p_bulletin->req_adv_speed = params->speed.advertised_speeds; |
955 | p_bulletin->req_forced_speed = params->speed.forced_speed; |
956 | p_bulletin->req_autoneg_pause = params->pause.autoneg; |
957 | p_bulletin->req_forced_rx = params->pause.forced_rx; |
958 | p_bulletin->req_forced_tx = params->pause.forced_tx; |
959 | p_bulletin->req_loopback = params->loopback_mode; |
960 | |
961 | p_bulletin->link_up = link->link_up; |
962 | p_bulletin->speed = link->speed; |
963 | p_bulletin->full_duplex = link->full_duplex; |
964 | p_bulletin->autoneg = link->an; |
965 | p_bulletin->autoneg_complete = link->an_complete; |
966 | p_bulletin->parallel_detection = link->parallel_detection; |
967 | p_bulletin->pfc_enabled = link->pfc_enabled; |
968 | p_bulletin->partner_adv_speed = link->partner_adv_speed; |
969 | p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; |
970 | p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; |
971 | p_bulletin->partner_adv_pause = link->partner_adv_pause; |
972 | p_bulletin->sfp_tx_fault = link->sfp_tx_fault; |
973 | |
974 | p_bulletin->capability_speed = p_caps->speed_capabilities; |
975 | } |
976 | |
977 | static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, |
978 | struct qed_ptt *p_ptt, |
979 | struct qed_iov_vf_init_params *p_params) |
980 | { |
981 | struct qed_mcp_link_capabilities link_caps; |
982 | struct qed_mcp_link_params link_params; |
983 | struct qed_mcp_link_state link_state; |
984 | u8 num_of_vf_avaiable_chains = 0; |
985 | struct qed_vf_info *vf = NULL; |
986 | u16 qid, num_irqs; |
987 | int rc = 0; |
988 | u32 cids; |
989 | u8 i; |
990 | |
991 | vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: p_params->rel_vf_id, b_enabled_only: false); |
992 | if (!vf) { |
993 | DP_ERR(p_hwfn, "%s : vf is NULL\n" , __func__); |
994 | return -EINVAL; |
995 | } |
996 | |
997 | if (vf->b_init) { |
998 | DP_NOTICE(p_hwfn, "VF[%d] is already active.\n" , |
999 | p_params->rel_vf_id); |
1000 | return -EINVAL; |
1001 | } |
1002 | |
1003 | /* Perform sanity checking on the requested queue_id */ |
1004 | for (i = 0; i < p_params->num_queues; i++) { |
1005 | u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); |
1006 | u16 max_vf_qzone = min_vf_qzone + |
1007 | FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; |
1008 | |
1009 | qid = p_params->req_rx_queue[i]; |
1010 | if (qid < min_vf_qzone || qid > max_vf_qzone) { |
1011 | DP_NOTICE(p_hwfn, |
1012 | "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n" , |
1013 | qid, |
1014 | p_params->rel_vf_id, |
1015 | min_vf_qzone, max_vf_qzone); |
1016 | return -EINVAL; |
1017 | } |
1018 | |
1019 | qid = p_params->req_tx_queue[i]; |
1020 | if (qid > max_vf_qzone) { |
1021 | DP_NOTICE(p_hwfn, |
1022 | "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n" , |
1023 | qid, p_params->rel_vf_id, max_vf_qzone); |
1024 | return -EINVAL; |
1025 | } |
1026 | |
1027 | /* If client *really* wants, Tx qid can be shared with PF */ |
1028 | if (qid < min_vf_qzone) |
1029 | DP_VERBOSE(p_hwfn, |
1030 | QED_MSG_IOV, |
1031 | "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n" , |
1032 | p_params->rel_vf_id, qid, i); |
1033 | } |
1034 | |
1035 | /* Limit number of queues according to number of CIDs */ |
1036 | qed_cxt_get_proto_cid_count(p_hwfn, type: PROTOCOLID_ETH, vf_cid: &cids); |
1037 | DP_VERBOSE(p_hwfn, |
1038 | QED_MSG_IOV, |
1039 | "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n" , |
1040 | vf->relative_vf_id, p_params->num_queues, (u16)cids); |
1041 | num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); |
1042 | |
1043 | num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, |
1044 | p_ptt, |
1045 | vf, num_rx_queues: num_irqs); |
1046 | if (!num_of_vf_avaiable_chains) { |
1047 | DP_ERR(p_hwfn, "no available igu sbs\n" ); |
1048 | return -ENOMEM; |
1049 | } |
1050 | |
1051 | /* Choose queue number and index ranges */ |
1052 | vf->num_rxqs = num_of_vf_avaiable_chains; |
1053 | vf->num_txqs = num_of_vf_avaiable_chains; |
1054 | |
1055 | for (i = 0; i < vf->num_rxqs; i++) { |
1056 | struct qed_vf_queue *p_queue = &vf->vf_queues[i]; |
1057 | |
1058 | p_queue->fw_rx_qid = p_params->req_rx_queue[i]; |
1059 | p_queue->fw_tx_qid = p_params->req_tx_queue[i]; |
1060 | |
1061 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
1062 | "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n" , |
1063 | vf->relative_vf_id, i, vf->igu_sbs[i], |
1064 | p_queue->fw_rx_qid, p_queue->fw_tx_qid); |
1065 | } |
1066 | |
1067 | /* Update the link configuration in bulletin */ |
1068 | memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), |
1069 | sizeof(link_params)); |
1070 | memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); |
1071 | memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), |
1072 | sizeof(link_caps)); |
1073 | qed_iov_set_link(p_hwfn, vfid: p_params->rel_vf_id, |
1074 | params: &link_params, link: &link_state, p_caps: &link_caps); |
1075 | |
1076 | rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); |
1077 | if (!rc) { |
1078 | vf->b_init = true; |
1079 | |
1080 | if (IS_LEAD_HWFN(p_hwfn)) |
1081 | p_hwfn->cdev->p_iov_info->num_vfs++; |
1082 | } |
1083 | |
1084 | return rc; |
1085 | } |
1086 | |
1087 | static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, |
1088 | struct qed_ptt *p_ptt, u16 rel_vf_id) |
1089 | { |
1090 | struct qed_mcp_link_capabilities caps; |
1091 | struct qed_mcp_link_params params; |
1092 | struct qed_mcp_link_state link; |
1093 | struct qed_vf_info *vf = NULL; |
1094 | |
1095 | vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: rel_vf_id, b_enabled_only: true); |
1096 | if (!vf) { |
1097 | DP_ERR(p_hwfn, "%s : vf is NULL\n" , __func__); |
1098 | return -EINVAL; |
1099 | } |
1100 | |
1101 | if (vf->bulletin.p_virt) |
1102 | memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); |
1103 | |
1104 | memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); |
1105 | |
1106 | /* Get the link configuration back in bulletin so |
1107 | * that when VFs are re-enabled they get the actual |
1108 | * link configuration. |
1109 | */ |
1110 | memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); |
1111 | memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); |
1112 | memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); |
1113 | qed_iov_set_link(p_hwfn, vfid: rel_vf_id, params: ¶ms, link: &link, p_caps: &caps); |
1114 | |
1115 | /* Forget the VF's acquisition message */ |
1116 | memset(&vf->acquire, 0, sizeof(vf->acquire)); |
1117 | |
1118 | /* disablng interrupts and resetting permission table was done during |
1119 | * vf-close, however, we could get here without going through vf_close |
1120 | */ |
1121 | /* Disable Interrupts for VF */ |
1122 | qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, enable: 0); |
1123 | |
1124 | /* Reset Permission table */ |
1125 | qed_iov_config_perm_table(p_hwfn, p_ptt, vf, enable: 0); |
1126 | |
1127 | vf->num_rxqs = 0; |
1128 | vf->num_txqs = 0; |
1129 | qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); |
1130 | |
1131 | if (vf->b_init) { |
1132 | vf->b_init = false; |
1133 | |
1134 | if (IS_LEAD_HWFN(p_hwfn)) |
1135 | p_hwfn->cdev->p_iov_info->num_vfs--; |
1136 | } |
1137 | |
1138 | return 0; |
1139 | } |
1140 | |
1141 | static bool qed_iov_tlv_supported(u16 tlvtype) |
1142 | { |
1143 | return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; |
1144 | } |
1145 | |
1146 | /* place a given tlv on the tlv buffer, continuing current tlv list */ |
1147 | void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) |
1148 | { |
1149 | struct channel_tlv *tl = (struct channel_tlv *)*offset; |
1150 | |
1151 | tl->type = type; |
1152 | tl->length = length; |
1153 | |
1154 | /* Offset should keep pointing to next TLV (the end of the last) */ |
1155 | *offset += length; |
1156 | |
1157 | /* Return a pointer to the start of the added tlv */ |
1158 | return *offset - length; |
1159 | } |
1160 | |
1161 | /* list the types and lengths of the tlvs on the buffer */ |
1162 | void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) |
1163 | { |
1164 | u16 i = 1, total_length = 0; |
1165 | struct channel_tlv *tlv; |
1166 | |
1167 | do { |
1168 | tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); |
1169 | |
1170 | /* output tlv */ |
1171 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
1172 | "TLV number %d: type %d, length %d\n" , |
1173 | i, tlv->type, tlv->length); |
1174 | |
1175 | if (tlv->type == CHANNEL_TLV_LIST_END) |
1176 | return; |
1177 | |
1178 | /* Validate entry - protect against malicious VFs */ |
1179 | if (!tlv->length) { |
1180 | DP_NOTICE(p_hwfn, "TLV of length 0 found\n" ); |
1181 | return; |
1182 | } |
1183 | |
1184 | total_length += tlv->length; |
1185 | |
1186 | if (total_length >= sizeof(struct tlv_buffer_size)) { |
1187 | DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n" ); |
1188 | return; |
1189 | } |
1190 | |
1191 | i++; |
1192 | } while (1); |
1193 | } |
1194 | |
1195 | static void qed_iov_send_response(struct qed_hwfn *p_hwfn, |
1196 | struct qed_ptt *p_ptt, |
1197 | struct qed_vf_info *p_vf, |
1198 | u16 length, u8 status) |
1199 | { |
1200 | struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; |
1201 | struct qed_dmae_params params; |
1202 | u8 eng_vf_id; |
1203 | |
1204 | mbx->reply_virt->default_resp.hdr.status = status; |
1205 | |
1206 | qed_dp_tlv_list(p_hwfn, tlvs_list: mbx->reply_virt); |
1207 | |
1208 | eng_vf_id = p_vf->abs_vf_id; |
1209 | |
1210 | memset(¶ms, 0, sizeof(params)); |
1211 | SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); |
1212 | params.dst_vfid = eng_vf_id; |
1213 | |
1214 | qed_dmae_host2host(p_hwfn, p_ptt, source_addr: mbx->reply_phys + sizeof(u64), |
1215 | dest_addr: mbx->req_virt->first_tlv.reply_address + |
1216 | sizeof(u64), |
1217 | size_in_dwords: (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, |
1218 | p_params: ¶ms); |
1219 | |
1220 | /* Once PF copies the rc to the VF, the latter can continue |
1221 | * and send an additional message. So we have to make sure the |
1222 | * channel would be re-set to ready prior to that. |
1223 | */ |
1224 | REG_WR(p_hwfn, |
1225 | GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, |
1226 | USTORM_VF_PF_CHANNEL_READY, eng_vf_id), 1); |
1227 | |
1228 | qed_dmae_host2host(p_hwfn, p_ptt, source_addr: mbx->reply_phys, |
1229 | dest_addr: mbx->req_virt->first_tlv.reply_address, |
1230 | size_in_dwords: sizeof(u64) / 4, p_params: ¶ms); |
1231 | } |
1232 | |
1233 | static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, |
1234 | enum qed_iov_vport_update_flag flag) |
1235 | { |
1236 | switch (flag) { |
1237 | case QED_IOV_VP_UPDATE_ACTIVATE: |
1238 | return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; |
1239 | case QED_IOV_VP_UPDATE_VLAN_STRIP: |
1240 | return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; |
1241 | case QED_IOV_VP_UPDATE_TX_SWITCH: |
1242 | return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; |
1243 | case QED_IOV_VP_UPDATE_MCAST: |
1244 | return CHANNEL_TLV_VPORT_UPDATE_MCAST; |
1245 | case QED_IOV_VP_UPDATE_ACCEPT_PARAM: |
1246 | return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; |
1247 | case QED_IOV_VP_UPDATE_RSS: |
1248 | return CHANNEL_TLV_VPORT_UPDATE_RSS; |
1249 | case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: |
1250 | return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; |
1251 | case QED_IOV_VP_UPDATE_SGE_TPA: |
1252 | return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; |
1253 | default: |
1254 | return 0; |
1255 | } |
1256 | } |
1257 | |
1258 | static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, |
1259 | struct qed_vf_info *p_vf, |
1260 | struct qed_iov_vf_mbx *p_mbx, |
1261 | u8 status, |
1262 | u16 tlvs_mask, u16 tlvs_accepted) |
1263 | { |
1264 | struct pfvf_def_resp_tlv *resp; |
1265 | u16 size, total_len, i; |
1266 | |
1267 | memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); |
1268 | p_mbx->offset = (u8 *)p_mbx->reply_virt; |
1269 | size = sizeof(struct pfvf_def_resp_tlv); |
1270 | total_len = size; |
1271 | |
1272 | qed_add_tlv(p_hwfn, offset: &p_mbx->offset, type: CHANNEL_TLV_VPORT_UPDATE, length: size); |
1273 | |
1274 | /* Prepare response for all extended tlvs if they are found by PF */ |
1275 | for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { |
1276 | if (!(tlvs_mask & BIT(i))) |
1277 | continue; |
1278 | |
1279 | resp = qed_add_tlv(p_hwfn, offset: &p_mbx->offset, |
1280 | type: qed_iov_vport_to_tlv(p_hwfn, flag: i), length: size); |
1281 | |
1282 | if (tlvs_accepted & BIT(i)) |
1283 | resp->hdr.status = status; |
1284 | else |
1285 | resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; |
1286 | |
1287 | DP_VERBOSE(p_hwfn, |
1288 | QED_MSG_IOV, |
1289 | "VF[%d] - vport_update response: TLV %d, status %02x\n" , |
1290 | p_vf->relative_vf_id, |
1291 | qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); |
1292 | |
1293 | total_len += size; |
1294 | } |
1295 | |
1296 | qed_add_tlv(p_hwfn, offset: &p_mbx->offset, type: CHANNEL_TLV_LIST_END, |
1297 | length: sizeof(struct channel_list_end_tlv)); |
1298 | |
1299 | return total_len; |
1300 | } |
1301 | |
1302 | static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, |
1303 | struct qed_ptt *p_ptt, |
1304 | struct qed_vf_info *vf_info, |
1305 | u16 type, u16 length, u8 status) |
1306 | { |
1307 | struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; |
1308 | |
1309 | mbx->offset = (u8 *)mbx->reply_virt; |
1310 | |
1311 | qed_add_tlv(p_hwfn, offset: &mbx->offset, type, length); |
1312 | qed_add_tlv(p_hwfn, offset: &mbx->offset, type: CHANNEL_TLV_LIST_END, |
1313 | length: sizeof(struct channel_list_end_tlv)); |
1314 | |
1315 | qed_iov_send_response(p_hwfn, p_ptt, p_vf: vf_info, length, status); |
1316 | } |
1317 | |
1318 | static struct |
1319 | qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, |
1320 | u16 relative_vf_id, |
1321 | bool b_enabled_only) |
1322 | { |
1323 | struct qed_vf_info *vf = NULL; |
1324 | |
1325 | vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); |
1326 | if (!vf) |
1327 | return NULL; |
1328 | |
1329 | return &vf->p_vf_info; |
1330 | } |
1331 | |
1332 | static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) |
1333 | { |
1334 | struct qed_public_vf_info *vf_info; |
1335 | |
1336 | vf_info = qed_iov_get_public_vf_info(p_hwfn, relative_vf_id: vfid, b_enabled_only: false); |
1337 | |
1338 | if (!vf_info) |
1339 | return; |
1340 | |
1341 | /* Clear the VF mac */ |
1342 | eth_zero_addr(addr: vf_info->mac); |
1343 | |
1344 | vf_info->rx_accept_mode = 0; |
1345 | vf_info->tx_accept_mode = 0; |
1346 | } |
1347 | |
1348 | static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, |
1349 | struct qed_vf_info *p_vf) |
1350 | { |
1351 | u32 i, j; |
1352 | |
1353 | p_vf->vf_bulletin = 0; |
1354 | p_vf->vport_instance = 0; |
1355 | p_vf->configured_features = 0; |
1356 | |
1357 | /* If VF previously requested less resources, go back to default */ |
1358 | p_vf->num_rxqs = p_vf->num_sbs; |
1359 | p_vf->num_txqs = p_vf->num_sbs; |
1360 | |
1361 | p_vf->num_active_rxqs = 0; |
1362 | |
1363 | for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { |
1364 | struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; |
1365 | |
1366 | for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) { |
1367 | if (!p_queue->cids[j].p_cid) |
1368 | continue; |
1369 | |
1370 | qed_eth_queue_cid_release(p_hwfn, |
1371 | p_cid: p_queue->cids[j].p_cid); |
1372 | p_queue->cids[j].p_cid = NULL; |
1373 | } |
1374 | } |
1375 | |
1376 | memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); |
1377 | memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); |
1378 | qed_iov_clean_vf(p_hwfn, vfid: p_vf->relative_vf_id); |
1379 | } |
1380 | |
1381 | /* Returns either 0, or log(size) */ |
1382 | static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn, |
1383 | struct qed_ptt *p_ptt) |
1384 | { |
1385 | u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); |
1386 | |
1387 | if (val) |
1388 | return val + 11; |
1389 | return 0; |
1390 | } |
1391 | |
1392 | static void |
1393 | qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn, |
1394 | struct qed_ptt *p_ptt, |
1395 | struct qed_vf_info *p_vf, |
1396 | struct vf_pf_resc_request *p_req, |
1397 | struct pf_vf_resc *p_resp) |
1398 | { |
1399 | u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; |
1400 | u8 db_size = qed_db_addr_vf(cid: 1, DQ_DEMS_LEGACY) - |
1401 | qed_db_addr_vf(cid: 0, DQ_DEMS_LEGACY); |
1402 | u32 bar_size; |
1403 | |
1404 | p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons); |
1405 | |
1406 | /* If VF didn't bother asking for QIDs than don't bother limiting |
1407 | * number of CIDs. The VF doesn't care about the number, and this |
1408 | * has the likely result of causing an additional acquisition. |
1409 | */ |
1410 | if (!(p_vf->acquire.vfdev_info.capabilities & |
1411 | VFPF_ACQUIRE_CAP_QUEUE_QIDS)) |
1412 | return; |
1413 | |
1414 | /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount |
1415 | * that would make sure doorbells for all CIDs fall within the bar. |
1416 | * If it doesn't, make sure regview window is sufficient. |
1417 | */ |
1418 | if (p_vf->acquire.vfdev_info.capabilities & |
1419 | VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { |
1420 | bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); |
1421 | if (bar_size) |
1422 | bar_size = 1 << bar_size; |
1423 | |
1424 | if (p_hwfn->cdev->num_hwfns > 1) |
1425 | bar_size /= 2; |
1426 | } else { |
1427 | bar_size = PXP_VF_BAR0_DQ_LENGTH; |
1428 | } |
1429 | |
1430 | if (bar_size / db_size < 256) |
1431 | p_resp->num_cids = min_t(u8, p_resp->num_cids, |
1432 | (u8)(bar_size / db_size)); |
1433 | } |
1434 | |
1435 | static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, |
1436 | struct qed_ptt *p_ptt, |
1437 | struct qed_vf_info *p_vf, |
1438 | struct vf_pf_resc_request *p_req, |
1439 | struct pf_vf_resc *p_resp) |
1440 | { |
1441 | u8 i; |
1442 | |
1443 | /* Queue related information */ |
1444 | p_resp->num_rxqs = p_vf->num_rxqs; |
1445 | p_resp->num_txqs = p_vf->num_txqs; |
1446 | p_resp->num_sbs = p_vf->num_sbs; |
1447 | |
1448 | for (i = 0; i < p_resp->num_sbs; i++) { |
1449 | p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; |
1450 | p_resp->hw_sbs[i].sb_qid = 0; |
1451 | } |
1452 | |
1453 | /* These fields are filled for backward compatibility. |
1454 | * Unused by modern vfs. |
1455 | */ |
1456 | for (i = 0; i < p_resp->num_rxqs; i++) { |
1457 | qed_fw_l2_queue(p_hwfn, src_id: p_vf->vf_queues[i].fw_rx_qid, |
1458 | dst_id: (u16 *)&p_resp->hw_qid[i]); |
1459 | p_resp->cid[i] = i; |
1460 | } |
1461 | |
1462 | /* Filter related information */ |
1463 | p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, |
1464 | p_req->num_mac_filters); |
1465 | p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, |
1466 | p_req->num_vlan_filters); |
1467 | |
1468 | qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); |
1469 | |
1470 | /* This isn't really needed/enforced, but some legacy VFs might depend |
1471 | * on the correct filling of this field. |
1472 | */ |
1473 | p_resp->num_mc_filters = QED_MAX_MC_ADDRS; |
1474 | |
1475 | /* Validate sufficient resources for VF */ |
1476 | if (p_resp->num_rxqs < p_req->num_rxqs || |
1477 | p_resp->num_txqs < p_req->num_txqs || |
1478 | p_resp->num_sbs < p_req->num_sbs || |
1479 | p_resp->num_mac_filters < p_req->num_mac_filters || |
1480 | p_resp->num_vlan_filters < p_req->num_vlan_filters || |
1481 | p_resp->num_mc_filters < p_req->num_mc_filters || |
1482 | p_resp->num_cids < p_req->num_cids) { |
1483 | DP_VERBOSE(p_hwfn, |
1484 | QED_MSG_IOV, |
1485 | "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n" , |
1486 | p_vf->abs_vf_id, |
1487 | p_req->num_rxqs, |
1488 | p_resp->num_rxqs, |
1489 | p_req->num_rxqs, |
1490 | p_resp->num_txqs, |
1491 | p_req->num_sbs, |
1492 | p_resp->num_sbs, |
1493 | p_req->num_mac_filters, |
1494 | p_resp->num_mac_filters, |
1495 | p_req->num_vlan_filters, |
1496 | p_resp->num_vlan_filters, |
1497 | p_req->num_mc_filters, |
1498 | p_resp->num_mc_filters, |
1499 | p_req->num_cids, p_resp->num_cids); |
1500 | |
1501 | /* Some legacy OSes are incapable of correctly handling this |
1502 | * failure. |
1503 | */ |
1504 | if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == |
1505 | ETH_HSI_VER_NO_PKT_LEN_TUNN) && |
1506 | (p_vf->acquire.vfdev_info.os_type == |
1507 | VFPF_ACQUIRE_OS_WINDOWS)) |
1508 | return PFVF_STATUS_SUCCESS; |
1509 | |
1510 | return PFVF_STATUS_NO_RESOURCE; |
1511 | } |
1512 | |
1513 | return PFVF_STATUS_SUCCESS; |
1514 | } |
1515 | |
1516 | static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, |
1517 | struct pfvf_stats_info *p_stats) |
1518 | { |
1519 | p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + |
1520 | offsetof(struct mstorm_vf_zone, |
1521 | non_trigger.eth_queue_stat); |
1522 | p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); |
1523 | p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + |
1524 | offsetof(struct ustorm_vf_zone, |
1525 | non_trigger.eth_queue_stat); |
1526 | p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); |
1527 | p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + |
1528 | offsetof(struct pstorm_vf_zone, |
1529 | non_trigger.eth_queue_stat); |
1530 | p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); |
1531 | p_stats->tstats.address = 0; |
1532 | p_stats->tstats.len = 0; |
1533 | } |
1534 | |
1535 | static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, |
1536 | struct qed_ptt *p_ptt, |
1537 | struct qed_vf_info *vf) |
1538 | { |
1539 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
1540 | struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; |
1541 | struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; |
1542 | struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; |
1543 | u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; |
1544 | struct pf_vf_resc *resc = &resp->resc; |
1545 | int rc; |
1546 | |
1547 | memset(resp, 0, sizeof(*resp)); |
1548 | |
1549 | /* Write the PF version so that VF would know which version |
1550 | * is supported - might be later overridden. This guarantees that |
1551 | * VF could recognize legacy PF based on lack of versions in reply. |
1552 | */ |
1553 | pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; |
1554 | pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; |
1555 | |
1556 | if (vf->state != VF_FREE && vf->state != VF_STOPPED) { |
1557 | DP_VERBOSE(p_hwfn, |
1558 | QED_MSG_IOV, |
1559 | "VF[%d] sent ACQUIRE but is already in state %d - fail request\n" , |
1560 | vf->abs_vf_id, vf->state); |
1561 | goto out; |
1562 | } |
1563 | |
1564 | /* Validate FW compatibility */ |
1565 | if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { |
1566 | if (req->vfdev_info.capabilities & |
1567 | VFPF_ACQUIRE_CAP_PRE_FP_HSI) { |
1568 | struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; |
1569 | |
1570 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
1571 | "VF[%d] is pre-fastpath HSI\n" , |
1572 | vf->abs_vf_id); |
1573 | p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; |
1574 | p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; |
1575 | } else { |
1576 | DP_INFO(p_hwfn, |
1577 | "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n" , |
1578 | vf->abs_vf_id, |
1579 | req->vfdev_info.eth_fp_hsi_major, |
1580 | req->vfdev_info.eth_fp_hsi_minor, |
1581 | ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); |
1582 | |
1583 | goto out; |
1584 | } |
1585 | } |
1586 | |
1587 | /* On 100g PFs, prevent old VFs from loading */ |
1588 | if ((p_hwfn->cdev->num_hwfns > 1) && |
1589 | !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { |
1590 | DP_INFO(p_hwfn, |
1591 | "VF[%d] is running an old driver that doesn't support 100g\n" , |
1592 | vf->abs_vf_id); |
1593 | goto out; |
1594 | } |
1595 | |
1596 | /* Store the acquire message */ |
1597 | memcpy(&vf->acquire, req, sizeof(vf->acquire)); |
1598 | |
1599 | vf->opaque_fid = req->vfdev_info.opaque_fid; |
1600 | |
1601 | vf->vf_bulletin = req->bulletin_addr; |
1602 | vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? |
1603 | vf->bulletin.size : req->bulletin_size; |
1604 | |
1605 | /* fill in pfdev info */ |
1606 | pfdev_info->chip_num = p_hwfn->cdev->chip_num; |
1607 | pfdev_info->db_size = 0; |
1608 | pfdev_info->indices_per_sb = PIS_PER_SB; |
1609 | |
1610 | pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | |
1611 | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; |
1612 | if (p_hwfn->cdev->num_hwfns > 1) |
1613 | pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; |
1614 | |
1615 | /* Share our ability to use multiple queue-ids only with VFs |
1616 | * that request it. |
1617 | */ |
1618 | if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) |
1619 | pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; |
1620 | |
1621 | /* Share the sizes of the bars with VF */ |
1622 | resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); |
1623 | |
1624 | qed_iov_vf_mbx_acquire_stats(p_hwfn, p_stats: &pfdev_info->stats_info); |
1625 | |
1626 | memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); |
1627 | |
1628 | pfdev_info->fw_major = FW_MAJOR_VERSION; |
1629 | pfdev_info->fw_minor = FW_MINOR_VERSION; |
1630 | pfdev_info->fw_rev = FW_REVISION_VERSION; |
1631 | pfdev_info->fw_eng = FW_ENGINEERING_VERSION; |
1632 | |
1633 | /* Incorrect when legacy, but doesn't matter as legacy isn't reading |
1634 | * this field. |
1635 | */ |
1636 | pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, |
1637 | req->vfdev_info.eth_fp_hsi_minor); |
1638 | pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; |
1639 | qed_mcp_get_mfw_ver(p_hwfn, p_ptt, p_mfw_ver: &pfdev_info->mfw_ver, NULL); |
1640 | |
1641 | pfdev_info->dev_type = p_hwfn->cdev->type; |
1642 | pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; |
1643 | |
1644 | /* Fill resources available to VF; Make sure there are enough to |
1645 | * satisfy the VF's request. |
1646 | */ |
1647 | vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, p_vf: vf, |
1648 | p_req: &req->resc_request, p_resp: resc); |
1649 | if (vfpf_status != PFVF_STATUS_SUCCESS) |
1650 | goto out; |
1651 | |
1652 | /* Start the VF in FW */ |
1653 | rc = qed_sp_vf_start(p_hwfn, p_vf: vf); |
1654 | if (rc) { |
1655 | DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n" , vf->abs_vf_id); |
1656 | vfpf_status = PFVF_STATUS_FAILURE; |
1657 | goto out; |
1658 | } |
1659 | |
1660 | /* Fill agreed size of bulletin board in response */ |
1661 | resp->bulletin_size = vf->bulletin.size; |
1662 | qed_iov_post_vf_bulletin(p_hwfn, vfid: vf->relative_vf_id, p_ptt); |
1663 | |
1664 | DP_VERBOSE(p_hwfn, |
1665 | QED_MSG_IOV, |
1666 | "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" |
1667 | "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n" , |
1668 | vf->abs_vf_id, |
1669 | resp->pfdev_info.chip_num, |
1670 | resp->pfdev_info.db_size, |
1671 | resp->pfdev_info.indices_per_sb, |
1672 | resp->pfdev_info.capabilities, |
1673 | resc->num_rxqs, |
1674 | resc->num_txqs, |
1675 | resc->num_sbs, |
1676 | resc->num_mac_filters, |
1677 | resc->num_vlan_filters); |
1678 | vf->state = VF_ACQUIRED; |
1679 | |
1680 | /* Prepare Response */ |
1681 | out: |
1682 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: vf, type: CHANNEL_TLV_ACQUIRE, |
1683 | length: sizeof(struct pfvf_acquire_resp_tlv), status: vfpf_status); |
1684 | } |
1685 | |
1686 | static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, |
1687 | struct qed_vf_info *p_vf, bool val) |
1688 | { |
1689 | struct qed_sp_vport_update_params params; |
1690 | int rc; |
1691 | |
1692 | if (val == p_vf->spoof_chk) { |
1693 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
1694 | "Spoofchk value[%d] is already configured\n" , val); |
1695 | return 0; |
1696 | } |
1697 | |
1698 | memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); |
1699 | params.opaque_fid = p_vf->opaque_fid; |
1700 | params.vport_id = p_vf->vport_id; |
1701 | params.update_anti_spoofing_en_flg = 1; |
1702 | params.anti_spoofing_en = val; |
1703 | |
1704 | rc = qed_sp_vport_update(p_hwfn, p_params: ¶ms, comp_mode: QED_SPQ_MODE_EBLOCK, NULL); |
1705 | if (!rc) { |
1706 | p_vf->spoof_chk = val; |
1707 | p_vf->req_spoofchk_val = p_vf->spoof_chk; |
1708 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
1709 | "Spoofchk val[%d] configured\n" , val); |
1710 | } else { |
1711 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
1712 | "Spoofchk configuration[val:%d] failed for VF[%d]\n" , |
1713 | val, p_vf->relative_vf_id); |
1714 | } |
1715 | |
1716 | return rc; |
1717 | } |
1718 | |
1719 | static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, |
1720 | struct qed_vf_info *p_vf) |
1721 | { |
1722 | struct qed_filter_ucast filter; |
1723 | int rc = 0; |
1724 | int i; |
1725 | |
1726 | memset(&filter, 0, sizeof(filter)); |
1727 | filter.is_rx_filter = 1; |
1728 | filter.is_tx_filter = 1; |
1729 | filter.vport_to_add_to = p_vf->vport_id; |
1730 | filter.opcode = QED_FILTER_ADD; |
1731 | |
1732 | /* Reconfigure vlans */ |
1733 | for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { |
1734 | if (!p_vf->shadow_config.vlans[i].used) |
1735 | continue; |
1736 | |
1737 | filter.type = QED_FILTER_VLAN; |
1738 | filter.vlan = p_vf->shadow_config.vlans[i].vid; |
1739 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
1740 | "Reconfiguring VLAN [0x%04x] for VF [%04x]\n" , |
1741 | filter.vlan, p_vf->relative_vf_id); |
1742 | rc = qed_sp_eth_filter_ucast(p_hwfn, opaque_fid: p_vf->opaque_fid, |
1743 | p_filter_cmd: &filter, comp_mode: QED_SPQ_MODE_CB, NULL); |
1744 | if (rc) { |
1745 | DP_NOTICE(p_hwfn, |
1746 | "Failed to configure VLAN [%04x] to VF [%04x]\n" , |
1747 | filter.vlan, p_vf->relative_vf_id); |
1748 | break; |
1749 | } |
1750 | } |
1751 | |
1752 | return rc; |
1753 | } |
1754 | |
1755 | static int |
1756 | qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, |
1757 | struct qed_vf_info *p_vf, u64 events) |
1758 | { |
1759 | int rc = 0; |
1760 | |
1761 | if ((events & BIT(VLAN_ADDR_FORCED)) && |
1762 | !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) |
1763 | rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); |
1764 | |
1765 | return rc; |
1766 | } |
1767 | |
1768 | static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, |
1769 | struct qed_vf_info *p_vf, u64 events) |
1770 | { |
1771 | int rc = 0; |
1772 | struct qed_filter_ucast filter; |
1773 | |
1774 | if (!p_vf->vport_instance) |
1775 | return -EINVAL; |
1776 | |
1777 | if ((events & BIT(MAC_ADDR_FORCED)) || |
1778 | p_vf->p_vf_info.is_trusted_configured) { |
1779 | /* Since there's no way [currently] of removing the MAC, |
1780 | * we can always assume this means we need to force it. |
1781 | */ |
1782 | memset(&filter, 0, sizeof(filter)); |
1783 | filter.type = QED_FILTER_MAC; |
1784 | filter.opcode = QED_FILTER_REPLACE; |
1785 | filter.is_rx_filter = 1; |
1786 | filter.is_tx_filter = 1; |
1787 | filter.vport_to_add_to = p_vf->vport_id; |
1788 | ether_addr_copy(dst: filter.mac, src: p_vf->bulletin.p_virt->mac); |
1789 | |
1790 | rc = qed_sp_eth_filter_ucast(p_hwfn, opaque_fid: p_vf->opaque_fid, |
1791 | p_filter_cmd: &filter, comp_mode: QED_SPQ_MODE_CB, NULL); |
1792 | if (rc) { |
1793 | DP_NOTICE(p_hwfn, |
1794 | "PF failed to configure MAC for VF\n" ); |
1795 | return rc; |
1796 | } |
1797 | if (p_vf->p_vf_info.is_trusted_configured) |
1798 | p_vf->configured_features |= |
1799 | BIT(VFPF_BULLETIN_MAC_ADDR); |
1800 | else |
1801 | p_vf->configured_features |= |
1802 | BIT(MAC_ADDR_FORCED); |
1803 | } |
1804 | |
1805 | if (events & BIT(VLAN_ADDR_FORCED)) { |
1806 | struct qed_sp_vport_update_params vport_update; |
1807 | u8 removal; |
1808 | int i; |
1809 | |
1810 | memset(&filter, 0, sizeof(filter)); |
1811 | filter.type = QED_FILTER_VLAN; |
1812 | filter.is_rx_filter = 1; |
1813 | filter.is_tx_filter = 1; |
1814 | filter.vport_to_add_to = p_vf->vport_id; |
1815 | filter.vlan = p_vf->bulletin.p_virt->pvid; |
1816 | filter.opcode = filter.vlan ? QED_FILTER_REPLACE : |
1817 | QED_FILTER_FLUSH; |
1818 | |
1819 | /* Send the ramrod */ |
1820 | rc = qed_sp_eth_filter_ucast(p_hwfn, opaque_fid: p_vf->opaque_fid, |
1821 | p_filter_cmd: &filter, comp_mode: QED_SPQ_MODE_CB, NULL); |
1822 | if (rc) { |
1823 | DP_NOTICE(p_hwfn, |
1824 | "PF failed to configure VLAN for VF\n" ); |
1825 | return rc; |
1826 | } |
1827 | |
1828 | /* Update the default-vlan & silent vlan stripping */ |
1829 | memset(&vport_update, 0, sizeof(vport_update)); |
1830 | vport_update.opaque_fid = p_vf->opaque_fid; |
1831 | vport_update.vport_id = p_vf->vport_id; |
1832 | vport_update.update_default_vlan_enable_flg = 1; |
1833 | vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; |
1834 | vport_update.update_default_vlan_flg = 1; |
1835 | vport_update.default_vlan = filter.vlan; |
1836 | |
1837 | vport_update.update_inner_vlan_removal_flg = 1; |
1838 | removal = filter.vlan ? 1 |
1839 | : p_vf->shadow_config.inner_vlan_removal; |
1840 | vport_update.inner_vlan_removal_flg = removal; |
1841 | vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; |
1842 | rc = qed_sp_vport_update(p_hwfn, |
1843 | p_params: &vport_update, |
1844 | comp_mode: QED_SPQ_MODE_EBLOCK, NULL); |
1845 | if (rc) { |
1846 | DP_NOTICE(p_hwfn, |
1847 | "PF failed to configure VF vport for vlan\n" ); |
1848 | return rc; |
1849 | } |
1850 | |
1851 | /* Update all the Rx queues */ |
1852 | for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { |
1853 | struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; |
1854 | struct qed_queue_cid *p_cid = NULL; |
1855 | |
1856 | /* There can be at most 1 Rx queue on qzone. Find it */ |
1857 | p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); |
1858 | if (!p_cid) |
1859 | continue; |
1860 | |
1861 | rc = qed_sp_eth_rx_queues_update(p_hwfn, |
1862 | pp_rxq_handlers: (void **)&p_cid, |
1863 | num_rxqs: 1, complete_cqe_flg: 0, complete_event_flg: 1, |
1864 | comp_mode: QED_SPQ_MODE_EBLOCK, |
1865 | NULL); |
1866 | if (rc) { |
1867 | DP_NOTICE(p_hwfn, |
1868 | "Failed to send Rx update fo queue[0x%04x]\n" , |
1869 | p_cid->rel.queue_id); |
1870 | return rc; |
1871 | } |
1872 | } |
1873 | |
1874 | if (filter.vlan) |
1875 | p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; |
1876 | else |
1877 | p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); |
1878 | } |
1879 | |
1880 | /* If forced features are terminated, we need to configure the shadow |
1881 | * configuration back again. |
1882 | */ |
1883 | if (events) |
1884 | qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); |
1885 | |
1886 | return rc; |
1887 | } |
1888 | |
1889 | static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, |
1890 | struct qed_ptt *p_ptt, |
1891 | struct qed_vf_info *vf) |
1892 | { |
1893 | struct qed_sp_vport_start_params params = { 0 }; |
1894 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
1895 | struct vfpf_vport_start_tlv *start; |
1896 | u8 status = PFVF_STATUS_SUCCESS; |
1897 | struct qed_vf_info *vf_info; |
1898 | u64 *p_bitmap; |
1899 | int sb_id; |
1900 | int rc; |
1901 | |
1902 | vf_info = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vf->relative_vf_id, b_enabled_only: true); |
1903 | if (!vf_info) { |
1904 | DP_NOTICE(p_hwfn->cdev, |
1905 | "Failed to get VF info, invalid vfid [%d]\n" , |
1906 | vf->relative_vf_id); |
1907 | return; |
1908 | } |
1909 | |
1910 | vf->state = VF_ENABLED; |
1911 | start = &mbx->req_virt->start_vport; |
1912 | |
1913 | qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); |
1914 | |
1915 | /* Initialize Status block in CAU */ |
1916 | for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { |
1917 | if (!start->sb_addr[sb_id]) { |
1918 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
1919 | "VF[%d] did not fill the address of SB %d\n" , |
1920 | vf->relative_vf_id, sb_id); |
1921 | break; |
1922 | } |
1923 | |
1924 | qed_int_cau_conf_sb(p_hwfn, p_ptt, |
1925 | sb_phys: start->sb_addr[sb_id], |
1926 | igu_sb_id: vf->igu_sbs[sb_id], vf_number: vf->abs_vf_id, vf_valid: 1); |
1927 | } |
1928 | |
1929 | vf->mtu = start->mtu; |
1930 | vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; |
1931 | |
1932 | /* Take into consideration configuration forced by hypervisor; |
1933 | * If none is configured, use the supplied VF values [for old |
1934 | * vfs that would still be fine, since they passed '0' as padding]. |
1935 | */ |
1936 | p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; |
1937 | if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { |
1938 | u8 vf_req = start->only_untagged; |
1939 | |
1940 | vf_info->bulletin.p_virt->default_only_untagged = vf_req; |
1941 | *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; |
1942 | } |
1943 | |
1944 | params.tpa_mode = start->tpa_mode; |
1945 | params.remove_inner_vlan = start->inner_vlan_removal; |
1946 | params.tx_switching = true; |
1947 | |
1948 | params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; |
1949 | params.drop_ttl0 = false; |
1950 | params.concrete_fid = vf->concrete_fid; |
1951 | params.opaque_fid = vf->opaque_fid; |
1952 | params.vport_id = vf->vport_id; |
1953 | params.max_buffers_per_cqe = start->max_buffers_per_cqe; |
1954 | params.mtu = vf->mtu; |
1955 | |
1956 | /* Non trusted VFs should enable control frame filtering */ |
1957 | params.check_mac = !vf->p_vf_info.is_trusted_configured; |
1958 | |
1959 | rc = qed_sp_eth_vport_start(p_hwfn, p_params: ¶ms); |
1960 | if (rc) { |
1961 | DP_ERR(p_hwfn, |
1962 | "%s returned error %d\n" , __func__, rc); |
1963 | status = PFVF_STATUS_FAILURE; |
1964 | } else { |
1965 | vf->vport_instance++; |
1966 | |
1967 | /* Force configuration if needed on the newly opened vport */ |
1968 | qed_iov_configure_vport_forced(p_hwfn, p_vf: vf, events: *p_bitmap); |
1969 | |
1970 | __qed_iov_spoofchk_set(p_hwfn, p_vf: vf, val: vf->req_spoofchk_val); |
1971 | } |
1972 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: vf, type: CHANNEL_TLV_VPORT_START, |
1973 | length: sizeof(struct pfvf_def_resp_tlv), status); |
1974 | } |
1975 | |
1976 | static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, |
1977 | struct qed_ptt *p_ptt, |
1978 | struct qed_vf_info *vf) |
1979 | { |
1980 | u8 status = PFVF_STATUS_SUCCESS; |
1981 | int rc; |
1982 | |
1983 | vf->vport_instance--; |
1984 | vf->spoof_chk = false; |
1985 | |
1986 | if ((qed_iov_validate_active_rxq(p_hwfn, p_vf: vf)) || |
1987 | (qed_iov_validate_active_txq(p_hwfn, p_vf: vf))) { |
1988 | vf->b_malicious = true; |
1989 | DP_NOTICE(p_hwfn, |
1990 | "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n" , |
1991 | vf->abs_vf_id); |
1992 | status = PFVF_STATUS_MALICIOUS; |
1993 | goto out; |
1994 | } |
1995 | |
1996 | rc = qed_sp_vport_stop(p_hwfn, opaque_fid: vf->opaque_fid, vport_id: vf->vport_id); |
1997 | if (rc) { |
1998 | DP_ERR(p_hwfn, "%s returned error %d\n" , |
1999 | __func__, rc); |
2000 | status = PFVF_STATUS_FAILURE; |
2001 | } |
2002 | |
2003 | /* Forget the configuration on the vport */ |
2004 | vf->configured_features = 0; |
2005 | memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); |
2006 | |
2007 | out: |
2008 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: vf, type: CHANNEL_TLV_VPORT_TEARDOWN, |
2009 | length: sizeof(struct pfvf_def_resp_tlv), status); |
2010 | } |
2011 | |
2012 | static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, |
2013 | struct qed_ptt *p_ptt, |
2014 | struct qed_vf_info *vf, |
2015 | u8 status, bool b_legacy) |
2016 | { |
2017 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
2018 | struct pfvf_start_queue_resp_tlv *p_tlv; |
2019 | struct vfpf_start_rxq_tlv *req; |
2020 | u16 length; |
2021 | |
2022 | mbx->offset = (u8 *)mbx->reply_virt; |
2023 | |
2024 | /* Taking a bigger struct instead of adding a TLV to list was a |
2025 | * mistake, but one which we're now stuck with, as some older |
2026 | * clients assume the size of the previous response. |
2027 | */ |
2028 | if (!b_legacy) |
2029 | length = sizeof(*p_tlv); |
2030 | else |
2031 | length = sizeof(struct pfvf_def_resp_tlv); |
2032 | |
2033 | p_tlv = qed_add_tlv(p_hwfn, offset: &mbx->offset, type: CHANNEL_TLV_START_RXQ, |
2034 | length); |
2035 | qed_add_tlv(p_hwfn, offset: &mbx->offset, type: CHANNEL_TLV_LIST_END, |
2036 | length: sizeof(struct channel_list_end_tlv)); |
2037 | |
2038 | /* Update the TLV with the response */ |
2039 | if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { |
2040 | req = &mbx->req_virt->start_rxq; |
2041 | p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + |
2042 | offsetof(struct mstorm_vf_zone, |
2043 | non_trigger.eth_rx_queue_producers) + |
2044 | sizeof(struct eth_rx_prod_data) * req->rx_qid; |
2045 | } |
2046 | |
2047 | qed_iov_send_response(p_hwfn, p_ptt, p_vf: vf, length, status); |
2048 | } |
2049 | |
2050 | static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, |
2051 | struct qed_vf_info *p_vf, bool b_is_tx) |
2052 | { |
2053 | struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; |
2054 | struct vfpf_qid_tlv *p_qid_tlv; |
2055 | |
2056 | /* Search for the qid if the VF published its going to provide it */ |
2057 | if (!(p_vf->acquire.vfdev_info.capabilities & |
2058 | VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { |
2059 | if (b_is_tx) |
2060 | return QED_IOV_LEGACY_QID_TX; |
2061 | else |
2062 | return QED_IOV_LEGACY_QID_RX; |
2063 | } |
2064 | |
2065 | p_qid_tlv = (struct vfpf_qid_tlv *) |
2066 | qed_iov_search_list_tlvs(p_hwfn, p_tlvs_list: p_mbx->req_virt, |
2067 | req_type: CHANNEL_TLV_QID); |
2068 | if (!p_qid_tlv) { |
2069 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
2070 | "VF[%2x]: Failed to provide qid\n" , |
2071 | p_vf->relative_vf_id); |
2072 | |
2073 | return QED_IOV_QID_INVALID; |
2074 | } |
2075 | |
2076 | if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { |
2077 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
2078 | "VF[%02x]: Provided qid out-of-bounds %02x\n" , |
2079 | p_vf->relative_vf_id, p_qid_tlv->qid); |
2080 | return QED_IOV_QID_INVALID; |
2081 | } |
2082 | |
2083 | return p_qid_tlv->qid; |
2084 | } |
2085 | |
2086 | static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, |
2087 | struct qed_ptt *p_ptt, |
2088 | struct qed_vf_info *vf) |
2089 | { |
2090 | struct qed_queue_start_common_params params; |
2091 | struct qed_queue_cid_vf_params vf_params; |
2092 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
2093 | u8 status = PFVF_STATUS_NO_RESOURCE; |
2094 | u8 qid_usage_idx, vf_legacy = 0; |
2095 | struct vfpf_start_rxq_tlv *req; |
2096 | struct qed_vf_queue *p_queue; |
2097 | struct qed_queue_cid *p_cid; |
2098 | struct qed_sb_info sb_dummy; |
2099 | int rc; |
2100 | |
2101 | req = &mbx->req_virt->start_rxq; |
2102 | |
2103 | if (!qed_iov_validate_rxq(p_hwfn, p_vf: vf, rx_qid: req->rx_qid, |
2104 | mode: QED_IOV_VALIDATE_Q_DISABLE) || |
2105 | !qed_iov_validate_sb(p_hwfn, p_vf: vf, sb_idx: req->hw_sb)) |
2106 | goto out; |
2107 | |
2108 | qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, p_vf: vf, b_is_tx: false); |
2109 | if (qid_usage_idx == QED_IOV_QID_INVALID) |
2110 | goto out; |
2111 | |
2112 | p_queue = &vf->vf_queues[req->rx_qid]; |
2113 | if (p_queue->cids[qid_usage_idx].p_cid) |
2114 | goto out; |
2115 | |
2116 | vf_legacy = qed_vf_calculate_legacy(p_vf: vf); |
2117 | |
2118 | /* Acquire a new queue-cid */ |
2119 | memset(¶ms, 0, sizeof(params)); |
2120 | params.queue_id = p_queue->fw_rx_qid; |
2121 | params.vport_id = vf->vport_id; |
2122 | params.stats_id = vf->abs_vf_id + 0x10; |
2123 | /* Since IGU index is passed via sb_info, construct a dummy one */ |
2124 | memset(&sb_dummy, 0, sizeof(sb_dummy)); |
2125 | sb_dummy.igu_sb_id = req->hw_sb; |
2126 | params.p_sb = &sb_dummy; |
2127 | params.sb_idx = req->sb_index; |
2128 | |
2129 | memset(&vf_params, 0, sizeof(vf_params)); |
2130 | vf_params.vfid = vf->relative_vf_id; |
2131 | vf_params.vf_qid = (u8)req->rx_qid; |
2132 | vf_params.vf_legacy = vf_legacy; |
2133 | vf_params.qid_usage_idx = qid_usage_idx; |
2134 | p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid: vf->opaque_fid, |
2135 | p_params: ¶ms, b_is_rx: true, p_vf_params: &vf_params); |
2136 | if (!p_cid) |
2137 | goto out; |
2138 | |
2139 | /* Legacy VFs have their Producers in a different location, which they |
2140 | * calculate on their own and clean the producer prior to this. |
2141 | */ |
2142 | if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) |
2143 | qed_wr(p_hwfn, p_ptt, MSEM_REG_FAST_MEMORY + |
2144 | SEM_FAST_REG_INT_RAM + |
2145 | MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, |
2146 | req->rx_qid), val: 0); |
2147 | |
2148 | rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, |
2149 | bd_max_bytes: req->bd_max_bytes, |
2150 | bd_chain_phys_addr: req->rxq_addr, |
2151 | cqe_pbl_addr: req->cqe_pbl_addr, cqe_pbl_size: req->cqe_pbl_size); |
2152 | if (rc) { |
2153 | status = PFVF_STATUS_FAILURE; |
2154 | qed_eth_queue_cid_release(p_hwfn, p_cid); |
2155 | } else { |
2156 | p_queue->cids[qid_usage_idx].p_cid = p_cid; |
2157 | p_queue->cids[qid_usage_idx].b_is_tx = false; |
2158 | status = PFVF_STATUS_SUCCESS; |
2159 | vf->num_active_rxqs++; |
2160 | } |
2161 | |
2162 | out: |
2163 | qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, |
2164 | b_legacy: !!(vf_legacy & |
2165 | QED_QCID_LEGACY_VF_RX_PROD)); |
2166 | } |
2167 | |
2168 | static void |
2169 | qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, |
2170 | struct qed_tunnel_info *p_tun, |
2171 | u16 tunn_feature_mask) |
2172 | { |
2173 | p_resp->tunn_feature_mask = tunn_feature_mask; |
2174 | p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; |
2175 | p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; |
2176 | p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; |
2177 | p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; |
2178 | p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; |
2179 | p_resp->vxlan_clss = p_tun->vxlan.tun_cls; |
2180 | p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; |
2181 | p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; |
2182 | p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; |
2183 | p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; |
2184 | p_resp->geneve_udp_port = p_tun->geneve_port.port; |
2185 | p_resp->vxlan_udp_port = p_tun->vxlan_port.port; |
2186 | } |
2187 | |
2188 | static void |
2189 | __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, |
2190 | struct qed_tunn_update_type *p_tun, |
2191 | enum qed_tunn_mode mask, u8 tun_cls) |
2192 | { |
2193 | if (p_req->tun_mode_update_mask & BIT(mask)) { |
2194 | p_tun->b_update_mode = true; |
2195 | |
2196 | if (p_req->tunn_mode & BIT(mask)) |
2197 | p_tun->b_mode_enabled = true; |
2198 | } |
2199 | |
2200 | p_tun->tun_cls = tun_cls; |
2201 | } |
2202 | |
2203 | static void |
2204 | qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, |
2205 | struct qed_tunn_update_type *p_tun, |
2206 | struct qed_tunn_update_udp_port *p_port, |
2207 | enum qed_tunn_mode mask, |
2208 | u8 tun_cls, u8 update_port, u16 port) |
2209 | { |
2210 | if (update_port) { |
2211 | p_port->b_update_port = true; |
2212 | p_port->port = port; |
2213 | } |
2214 | |
2215 | __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); |
2216 | } |
2217 | |
2218 | static bool |
2219 | qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) |
2220 | { |
2221 | bool b_update_requested = false; |
2222 | |
2223 | if (p_req->tun_mode_update_mask || p_req->update_tun_cls || |
2224 | p_req->update_geneve_port || p_req->update_vxlan_port) |
2225 | b_update_requested = true; |
2226 | |
2227 | return b_update_requested; |
2228 | } |
2229 | |
2230 | static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) |
2231 | { |
2232 | if (tun->b_update_mode && !tun->b_mode_enabled) { |
2233 | tun->b_update_mode = false; |
2234 | *rc = -EINVAL; |
2235 | } |
2236 | } |
2237 | |
2238 | static int |
2239 | qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn, |
2240 | u16 *tun_features, bool *update, |
2241 | struct qed_tunnel_info *tun_src) |
2242 | { |
2243 | struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; |
2244 | struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; |
2245 | u16 bultn_vxlan_port, bultn_geneve_port; |
2246 | void *cookie = p_hwfn->cdev->ops_cookie; |
2247 | int i, rc = 0; |
2248 | |
2249 | *tun_features = p_hwfn->cdev->tunn_feature_mask; |
2250 | bultn_vxlan_port = tun->vxlan_port.port; |
2251 | bultn_geneve_port = tun->geneve_port.port; |
2252 | qed_pf_validate_tunn_mode(tun: &tun_src->vxlan, rc: &rc); |
2253 | qed_pf_validate_tunn_mode(tun: &tun_src->l2_geneve, rc: &rc); |
2254 | qed_pf_validate_tunn_mode(tun: &tun_src->ip_geneve, rc: &rc); |
2255 | qed_pf_validate_tunn_mode(tun: &tun_src->l2_gre, rc: &rc); |
2256 | qed_pf_validate_tunn_mode(tun: &tun_src->ip_gre, rc: &rc); |
2257 | |
2258 | if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && |
2259 | (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || |
2260 | tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || |
2261 | tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || |
2262 | tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || |
2263 | tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { |
2264 | tun_src->b_update_rx_cls = false; |
2265 | tun_src->b_update_tx_cls = false; |
2266 | rc = -EINVAL; |
2267 | } |
2268 | |
2269 | if (tun_src->vxlan_port.b_update_port) { |
2270 | if (tun_src->vxlan_port.port == tun->vxlan_port.port) { |
2271 | tun_src->vxlan_port.b_update_port = false; |
2272 | } else { |
2273 | *update = true; |
2274 | bultn_vxlan_port = tun_src->vxlan_port.port; |
2275 | } |
2276 | } |
2277 | |
2278 | if (tun_src->geneve_port.b_update_port) { |
2279 | if (tun_src->geneve_port.port == tun->geneve_port.port) { |
2280 | tun_src->geneve_port.b_update_port = false; |
2281 | } else { |
2282 | *update = true; |
2283 | bultn_geneve_port = tun_src->geneve_port.port; |
2284 | } |
2285 | } |
2286 | |
2287 | qed_for_each_vf(p_hwfn, i) { |
2288 | qed_iov_bulletin_set_udp_ports(p_hwfn, vfid: i, vxlan_port: bultn_vxlan_port, |
2289 | geneve_port: bultn_geneve_port); |
2290 | } |
2291 | |
2292 | qed_schedule_iov(hwfn: p_hwfn, flag: QED_IOV_WQ_BULLETIN_UPDATE_FLAG); |
2293 | ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); |
2294 | |
2295 | return rc; |
2296 | } |
2297 | |
2298 | static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, |
2299 | struct qed_ptt *p_ptt, |
2300 | struct qed_vf_info *p_vf) |
2301 | { |
2302 | struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; |
2303 | struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; |
2304 | struct pfvf_update_tunn_param_tlv *p_resp; |
2305 | struct vfpf_update_tunn_param_tlv *p_req; |
2306 | u8 status = PFVF_STATUS_SUCCESS; |
2307 | bool b_update_required = false; |
2308 | struct qed_tunnel_info tunn; |
2309 | u16 tunn_feature_mask = 0; |
2310 | int i, rc = 0; |
2311 | |
2312 | mbx->offset = (u8 *)mbx->reply_virt; |
2313 | |
2314 | memset(&tunn, 0, sizeof(tunn)); |
2315 | p_req = &mbx->req_virt->tunn_param_update; |
2316 | |
2317 | if (!qed_iov_pf_validate_tunn_param(p_req)) { |
2318 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
2319 | "No tunnel update requested by VF\n" ); |
2320 | status = PFVF_STATUS_FAILURE; |
2321 | goto send_resp; |
2322 | } |
2323 | |
2324 | tunn.b_update_rx_cls = p_req->update_tun_cls; |
2325 | tunn.b_update_tx_cls = p_req->update_tun_cls; |
2326 | |
2327 | qed_iov_pf_update_tun_param(p_req, p_tun: &tunn.vxlan, p_port: &tunn.vxlan_port, |
2328 | mask: QED_MODE_VXLAN_TUNN, tun_cls: p_req->vxlan_clss, |
2329 | update_port: p_req->update_vxlan_port, |
2330 | port: p_req->vxlan_port); |
2331 | qed_iov_pf_update_tun_param(p_req, p_tun: &tunn.l2_geneve, p_port: &tunn.geneve_port, |
2332 | mask: QED_MODE_L2GENEVE_TUNN, |
2333 | tun_cls: p_req->l2geneve_clss, |
2334 | update_port: p_req->update_geneve_port, |
2335 | port: p_req->geneve_port); |
2336 | __qed_iov_pf_update_tun_param(p_req, p_tun: &tunn.ip_geneve, |
2337 | mask: QED_MODE_IPGENEVE_TUNN, |
2338 | tun_cls: p_req->ipgeneve_clss); |
2339 | __qed_iov_pf_update_tun_param(p_req, p_tun: &tunn.l2_gre, |
2340 | mask: QED_MODE_L2GRE_TUNN, tun_cls: p_req->l2gre_clss); |
2341 | __qed_iov_pf_update_tun_param(p_req, p_tun: &tunn.ip_gre, |
2342 | mask: QED_MODE_IPGRE_TUNN, tun_cls: p_req->ipgre_clss); |
2343 | |
2344 | /* If PF modifies VF's req then it should |
2345 | * still return an error in case of partial configuration |
2346 | * or modified configuration as opposed to requested one. |
2347 | */ |
2348 | rc = qed_pf_validate_modify_tunn_config(p_hwfn, tun_features: &tunn_feature_mask, |
2349 | update: &b_update_required, tun_src: &tunn); |
2350 | |
2351 | if (rc) |
2352 | status = PFVF_STATUS_FAILURE; |
2353 | |
2354 | /* If QED client is willing to update anything ? */ |
2355 | if (b_update_required) { |
2356 | u16 geneve_port; |
2357 | |
2358 | rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, p_tunn: &tunn, |
2359 | comp_mode: QED_SPQ_MODE_EBLOCK, NULL); |
2360 | if (rc) |
2361 | status = PFVF_STATUS_FAILURE; |
2362 | |
2363 | geneve_port = p_tun->geneve_port.port; |
2364 | qed_for_each_vf(p_hwfn, i) { |
2365 | qed_iov_bulletin_set_udp_ports(p_hwfn, vfid: i, |
2366 | vxlan_port: p_tun->vxlan_port.port, |
2367 | geneve_port); |
2368 | } |
2369 | } |
2370 | |
2371 | send_resp: |
2372 | p_resp = qed_add_tlv(p_hwfn, offset: &mbx->offset, |
2373 | type: CHANNEL_TLV_UPDATE_TUNN_PARAM, length: sizeof(*p_resp)); |
2374 | |
2375 | qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); |
2376 | qed_add_tlv(p_hwfn, offset: &mbx->offset, type: CHANNEL_TLV_LIST_END, |
2377 | length: sizeof(struct channel_list_end_tlv)); |
2378 | |
2379 | qed_iov_send_response(p_hwfn, p_ptt, p_vf, length: sizeof(*p_resp), status); |
2380 | } |
2381 | |
2382 | static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, |
2383 | struct qed_ptt *p_ptt, |
2384 | struct qed_vf_info *p_vf, |
2385 | u32 cid, u8 status) |
2386 | { |
2387 | struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; |
2388 | struct pfvf_start_queue_resp_tlv *p_tlv; |
2389 | bool b_legacy = false; |
2390 | u16 length; |
2391 | |
2392 | mbx->offset = (u8 *)mbx->reply_virt; |
2393 | |
2394 | /* Taking a bigger struct instead of adding a TLV to list was a |
2395 | * mistake, but one which we're now stuck with, as some older |
2396 | * clients assume the size of the previous response. |
2397 | */ |
2398 | if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == |
2399 | ETH_HSI_VER_NO_PKT_LEN_TUNN) |
2400 | b_legacy = true; |
2401 | |
2402 | if (!b_legacy) |
2403 | length = sizeof(*p_tlv); |
2404 | else |
2405 | length = sizeof(struct pfvf_def_resp_tlv); |
2406 | |
2407 | p_tlv = qed_add_tlv(p_hwfn, offset: &mbx->offset, type: CHANNEL_TLV_START_TXQ, |
2408 | length); |
2409 | qed_add_tlv(p_hwfn, offset: &mbx->offset, type: CHANNEL_TLV_LIST_END, |
2410 | length: sizeof(struct channel_list_end_tlv)); |
2411 | |
2412 | /* Update the TLV with the response */ |
2413 | if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) |
2414 | p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); |
2415 | |
2416 | qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); |
2417 | } |
2418 | |
2419 | static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, |
2420 | struct qed_ptt *p_ptt, |
2421 | struct qed_vf_info *vf) |
2422 | { |
2423 | struct qed_queue_start_common_params params; |
2424 | struct qed_queue_cid_vf_params vf_params; |
2425 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
2426 | u8 status = PFVF_STATUS_NO_RESOURCE; |
2427 | struct vfpf_start_txq_tlv *req; |
2428 | struct qed_vf_queue *p_queue; |
2429 | struct qed_queue_cid *p_cid; |
2430 | struct qed_sb_info sb_dummy; |
2431 | u8 qid_usage_idx, vf_legacy; |
2432 | u32 cid = 0; |
2433 | int rc; |
2434 | u16 pq; |
2435 | |
2436 | memset(¶ms, 0, sizeof(params)); |
2437 | req = &mbx->req_virt->start_txq; |
2438 | |
2439 | if (!qed_iov_validate_txq(p_hwfn, p_vf: vf, tx_qid: req->tx_qid, |
2440 | mode: QED_IOV_VALIDATE_Q_NA) || |
2441 | !qed_iov_validate_sb(p_hwfn, p_vf: vf, sb_idx: req->hw_sb)) |
2442 | goto out; |
2443 | |
2444 | qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, p_vf: vf, b_is_tx: true); |
2445 | if (qid_usage_idx == QED_IOV_QID_INVALID) |
2446 | goto out; |
2447 | |
2448 | p_queue = &vf->vf_queues[req->tx_qid]; |
2449 | if (p_queue->cids[qid_usage_idx].p_cid) |
2450 | goto out; |
2451 | |
2452 | vf_legacy = qed_vf_calculate_legacy(p_vf: vf); |
2453 | |
2454 | /* Acquire a new queue-cid */ |
2455 | params.queue_id = p_queue->fw_tx_qid; |
2456 | params.vport_id = vf->vport_id; |
2457 | params.stats_id = vf->abs_vf_id + 0x10; |
2458 | |
2459 | /* Since IGU index is passed via sb_info, construct a dummy one */ |
2460 | memset(&sb_dummy, 0, sizeof(sb_dummy)); |
2461 | sb_dummy.igu_sb_id = req->hw_sb; |
2462 | params.p_sb = &sb_dummy; |
2463 | params.sb_idx = req->sb_index; |
2464 | |
2465 | memset(&vf_params, 0, sizeof(vf_params)); |
2466 | vf_params.vfid = vf->relative_vf_id; |
2467 | vf_params.vf_qid = (u8)req->tx_qid; |
2468 | vf_params.vf_legacy = vf_legacy; |
2469 | vf_params.qid_usage_idx = qid_usage_idx; |
2470 | |
2471 | p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid: vf->opaque_fid, |
2472 | p_params: ¶ms, b_is_rx: false, p_vf_params: &vf_params); |
2473 | if (!p_cid) |
2474 | goto out; |
2475 | |
2476 | pq = qed_get_cm_pq_idx_vf(p_hwfn, vf: vf->relative_vf_id); |
2477 | rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, |
2478 | pbl_addr: req->pbl_addr, pbl_size: req->pbl_size, pq_id: pq); |
2479 | if (rc) { |
2480 | status = PFVF_STATUS_FAILURE; |
2481 | qed_eth_queue_cid_release(p_hwfn, p_cid); |
2482 | } else { |
2483 | status = PFVF_STATUS_SUCCESS; |
2484 | p_queue->cids[qid_usage_idx].p_cid = p_cid; |
2485 | p_queue->cids[qid_usage_idx].b_is_tx = true; |
2486 | cid = p_cid->cid; |
2487 | } |
2488 | |
2489 | out: |
2490 | qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, p_vf: vf, cid, status); |
2491 | } |
2492 | |
2493 | static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, |
2494 | struct qed_vf_info *vf, |
2495 | u16 rxq_id, |
2496 | u8 qid_usage_idx, bool cqe_completion) |
2497 | { |
2498 | struct qed_vf_queue *p_queue; |
2499 | int rc = 0; |
2500 | |
2501 | if (!qed_iov_validate_rxq(p_hwfn, p_vf: vf, rx_qid: rxq_id, mode: QED_IOV_VALIDATE_Q_NA)) { |
2502 | DP_VERBOSE(p_hwfn, |
2503 | QED_MSG_IOV, |
2504 | "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n" , |
2505 | vf->relative_vf_id, rxq_id, qid_usage_idx); |
2506 | return -EINVAL; |
2507 | } |
2508 | |
2509 | p_queue = &vf->vf_queues[rxq_id]; |
2510 | |
2511 | /* We've validated the index and the existence of the active RXQ - |
2512 | * now we need to make sure that it's using the correct qid. |
2513 | */ |
2514 | if (!p_queue->cids[qid_usage_idx].p_cid || |
2515 | p_queue->cids[qid_usage_idx].b_is_tx) { |
2516 | struct qed_queue_cid *p_cid; |
2517 | |
2518 | p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); |
2519 | DP_VERBOSE(p_hwfn, |
2520 | QED_MSG_IOV, |
2521 | "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n" , |
2522 | vf->relative_vf_id, |
2523 | rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); |
2524 | return -EINVAL; |
2525 | } |
2526 | |
2527 | /* Now that we know we have a valid Rx-queue - close it */ |
2528 | rc = qed_eth_rx_queue_stop(p_hwfn, |
2529 | p_rxq: p_queue->cids[qid_usage_idx].p_cid, |
2530 | eq_completion_only: false, cqe_completion); |
2531 | if (rc) |
2532 | return rc; |
2533 | |
2534 | p_queue->cids[qid_usage_idx].p_cid = NULL; |
2535 | vf->num_active_rxqs--; |
2536 | |
2537 | return 0; |
2538 | } |
2539 | |
2540 | static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, |
2541 | struct qed_vf_info *vf, |
2542 | u16 txq_id, u8 qid_usage_idx) |
2543 | { |
2544 | struct qed_vf_queue *p_queue; |
2545 | int rc = 0; |
2546 | |
2547 | if (!qed_iov_validate_txq(p_hwfn, p_vf: vf, tx_qid: txq_id, mode: QED_IOV_VALIDATE_Q_NA)) |
2548 | return -EINVAL; |
2549 | |
2550 | p_queue = &vf->vf_queues[txq_id]; |
2551 | if (!p_queue->cids[qid_usage_idx].p_cid || |
2552 | !p_queue->cids[qid_usage_idx].b_is_tx) |
2553 | return -EINVAL; |
2554 | |
2555 | rc = qed_eth_tx_queue_stop(p_hwfn, p_txq: p_queue->cids[qid_usage_idx].p_cid); |
2556 | if (rc) |
2557 | return rc; |
2558 | |
2559 | p_queue->cids[qid_usage_idx].p_cid = NULL; |
2560 | return 0; |
2561 | } |
2562 | |
2563 | static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, |
2564 | struct qed_ptt *p_ptt, |
2565 | struct qed_vf_info *vf) |
2566 | { |
2567 | u16 length = sizeof(struct pfvf_def_resp_tlv); |
2568 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
2569 | u8 status = PFVF_STATUS_FAILURE; |
2570 | struct vfpf_stop_rxqs_tlv *req; |
2571 | u8 qid_usage_idx; |
2572 | int rc; |
2573 | |
2574 | /* There has never been an official driver that used this interface |
2575 | * for stopping multiple queues, and it is now considered deprecated. |
2576 | * Validate this isn't used here. |
2577 | */ |
2578 | req = &mbx->req_virt->stop_rxqs; |
2579 | if (req->num_rxqs != 1) { |
2580 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
2581 | "Odd; VF[%d] tried stopping multiple Rx queues\n" , |
2582 | vf->relative_vf_id); |
2583 | status = PFVF_STATUS_NOT_SUPPORTED; |
2584 | goto out; |
2585 | } |
2586 | |
2587 | /* Find which qid-index is associated with the queue */ |
2588 | qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, p_vf: vf, b_is_tx: false); |
2589 | if (qid_usage_idx == QED_IOV_QID_INVALID) |
2590 | goto out; |
2591 | |
2592 | rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, rxq_id: req->rx_qid, |
2593 | qid_usage_idx, cqe_completion: req->cqe_completion); |
2594 | if (!rc) |
2595 | status = PFVF_STATUS_SUCCESS; |
2596 | out: |
2597 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: vf, type: CHANNEL_TLV_STOP_RXQS, |
2598 | length, status); |
2599 | } |
2600 | |
2601 | static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, |
2602 | struct qed_ptt *p_ptt, |
2603 | struct qed_vf_info *vf) |
2604 | { |
2605 | u16 length = sizeof(struct pfvf_def_resp_tlv); |
2606 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
2607 | u8 status = PFVF_STATUS_FAILURE; |
2608 | struct vfpf_stop_txqs_tlv *req; |
2609 | u8 qid_usage_idx; |
2610 | int rc; |
2611 | |
2612 | /* There has never been an official driver that used this interface |
2613 | * for stopping multiple queues, and it is now considered deprecated. |
2614 | * Validate this isn't used here. |
2615 | */ |
2616 | req = &mbx->req_virt->stop_txqs; |
2617 | if (req->num_txqs != 1) { |
2618 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
2619 | "Odd; VF[%d] tried stopping multiple Tx queues\n" , |
2620 | vf->relative_vf_id); |
2621 | status = PFVF_STATUS_NOT_SUPPORTED; |
2622 | goto out; |
2623 | } |
2624 | |
2625 | /* Find which qid-index is associated with the queue */ |
2626 | qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, p_vf: vf, b_is_tx: true); |
2627 | if (qid_usage_idx == QED_IOV_QID_INVALID) |
2628 | goto out; |
2629 | |
2630 | rc = qed_iov_vf_stop_txqs(p_hwfn, vf, txq_id: req->tx_qid, qid_usage_idx); |
2631 | if (!rc) |
2632 | status = PFVF_STATUS_SUCCESS; |
2633 | |
2634 | out: |
2635 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: vf, type: CHANNEL_TLV_STOP_TXQS, |
2636 | length, status); |
2637 | } |
2638 | |
2639 | static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, |
2640 | struct qed_ptt *p_ptt, |
2641 | struct qed_vf_info *vf) |
2642 | { |
2643 | struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; |
2644 | u16 length = sizeof(struct pfvf_def_resp_tlv); |
2645 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
2646 | struct vfpf_update_rxq_tlv *req; |
2647 | u8 status = PFVF_STATUS_FAILURE; |
2648 | u8 complete_event_flg; |
2649 | u8 complete_cqe_flg; |
2650 | u8 qid_usage_idx; |
2651 | int rc; |
2652 | u8 i; |
2653 | |
2654 | req = &mbx->req_virt->update_rxq; |
2655 | complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); |
2656 | complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); |
2657 | |
2658 | qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, p_vf: vf, b_is_tx: false); |
2659 | if (qid_usage_idx == QED_IOV_QID_INVALID) |
2660 | goto out; |
2661 | |
2662 | /* There shouldn't exist a VF that uses queue-qids yet uses this |
2663 | * API with multiple Rx queues. Validate this. |
2664 | */ |
2665 | if ((vf->acquire.vfdev_info.capabilities & |
2666 | VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { |
2667 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
2668 | "VF[%d] supports QIDs but sends multiple queues\n" , |
2669 | vf->relative_vf_id); |
2670 | goto out; |
2671 | } |
2672 | |
2673 | /* Validate inputs - for the legacy case this is still true since |
2674 | * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. |
2675 | */ |
2676 | for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { |
2677 | if (!qed_iov_validate_rxq(p_hwfn, p_vf: vf, rx_qid: i, |
2678 | mode: QED_IOV_VALIDATE_Q_NA) || |
2679 | !vf->vf_queues[i].cids[qid_usage_idx].p_cid || |
2680 | vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { |
2681 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
2682 | "VF[%d]: Incorrect Rxqs [%04x, %02x]\n" , |
2683 | vf->relative_vf_id, req->rx_qid, |
2684 | req->num_rxqs); |
2685 | goto out; |
2686 | } |
2687 | } |
2688 | |
2689 | /* Prepare the handlers */ |
2690 | for (i = 0; i < req->num_rxqs; i++) { |
2691 | u16 qid = req->rx_qid + i; |
2692 | |
2693 | handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; |
2694 | } |
2695 | |
2696 | rc = qed_sp_eth_rx_queues_update(p_hwfn, pp_rxq_handlers: (void **)&handlers, |
2697 | num_rxqs: req->num_rxqs, |
2698 | complete_cqe_flg, |
2699 | complete_event_flg, |
2700 | comp_mode: QED_SPQ_MODE_EBLOCK, NULL); |
2701 | if (rc) |
2702 | goto out; |
2703 | |
2704 | status = PFVF_STATUS_SUCCESS; |
2705 | out: |
2706 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: vf, type: CHANNEL_TLV_UPDATE_RXQ, |
2707 | length, status); |
2708 | } |
2709 | |
2710 | void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, |
2711 | void *p_tlvs_list, u16 req_type) |
2712 | { |
2713 | struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; |
2714 | int len = 0; |
2715 | |
2716 | do { |
2717 | if (!p_tlv->length) { |
2718 | DP_NOTICE(p_hwfn, "Zero length TLV found\n" ); |
2719 | return NULL; |
2720 | } |
2721 | |
2722 | if (p_tlv->type == req_type) { |
2723 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
2724 | "Extended tlv type %d, length %d found\n" , |
2725 | p_tlv->type, p_tlv->length); |
2726 | return p_tlv; |
2727 | } |
2728 | |
2729 | len += p_tlv->length; |
2730 | p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); |
2731 | |
2732 | if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { |
2733 | DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n" ); |
2734 | return NULL; |
2735 | } |
2736 | } while (p_tlv->type != CHANNEL_TLV_LIST_END); |
2737 | |
2738 | return NULL; |
2739 | } |
2740 | |
2741 | static void |
2742 | qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, |
2743 | struct qed_sp_vport_update_params *p_data, |
2744 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) |
2745 | { |
2746 | struct vfpf_vport_update_activate_tlv *p_act_tlv; |
2747 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; |
2748 | |
2749 | p_act_tlv = (struct vfpf_vport_update_activate_tlv *) |
2750 | qed_iov_search_list_tlvs(p_hwfn, p_tlvs_list: p_mbx->req_virt, req_type: tlv); |
2751 | if (!p_act_tlv) |
2752 | return; |
2753 | |
2754 | p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; |
2755 | p_data->vport_active_rx_flg = p_act_tlv->active_rx; |
2756 | p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; |
2757 | p_data->vport_active_tx_flg = p_act_tlv->active_tx; |
2758 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; |
2759 | } |
2760 | |
2761 | static void |
2762 | qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, |
2763 | struct qed_sp_vport_update_params *p_data, |
2764 | struct qed_vf_info *p_vf, |
2765 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) |
2766 | { |
2767 | struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; |
2768 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; |
2769 | |
2770 | p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) |
2771 | qed_iov_search_list_tlvs(p_hwfn, p_tlvs_list: p_mbx->req_virt, req_type: tlv); |
2772 | if (!p_vlan_tlv) |
2773 | return; |
2774 | |
2775 | p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; |
2776 | |
2777 | /* Ignore the VF request if we're forcing a vlan */ |
2778 | if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { |
2779 | p_data->update_inner_vlan_removal_flg = 1; |
2780 | p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; |
2781 | } |
2782 | |
2783 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; |
2784 | } |
2785 | |
2786 | static void |
2787 | qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, |
2788 | struct qed_sp_vport_update_params *p_data, |
2789 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) |
2790 | { |
2791 | struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; |
2792 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; |
2793 | |
2794 | p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) |
2795 | qed_iov_search_list_tlvs(p_hwfn, p_tlvs_list: p_mbx->req_virt, |
2796 | req_type: tlv); |
2797 | if (!p_tx_switch_tlv) |
2798 | return; |
2799 | |
2800 | p_data->update_tx_switching_flg = 1; |
2801 | p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; |
2802 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; |
2803 | } |
2804 | |
2805 | static void |
2806 | qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, |
2807 | struct qed_sp_vport_update_params *p_data, |
2808 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) |
2809 | { |
2810 | struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; |
2811 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; |
2812 | |
2813 | p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) |
2814 | qed_iov_search_list_tlvs(p_hwfn, p_tlvs_list: p_mbx->req_virt, req_type: tlv); |
2815 | if (!p_mcast_tlv) |
2816 | return; |
2817 | |
2818 | p_data->update_approx_mcast_flg = 1; |
2819 | memcpy(p_data->bins, p_mcast_tlv->bins, |
2820 | sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); |
2821 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; |
2822 | } |
2823 | |
2824 | static void |
2825 | qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, |
2826 | struct qed_sp_vport_update_params *p_data, |
2827 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) |
2828 | { |
2829 | struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; |
2830 | struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; |
2831 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; |
2832 | |
2833 | p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) |
2834 | qed_iov_search_list_tlvs(p_hwfn, p_tlvs_list: p_mbx->req_virt, req_type: tlv); |
2835 | if (!p_accept_tlv) |
2836 | return; |
2837 | |
2838 | p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; |
2839 | p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; |
2840 | p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; |
2841 | p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; |
2842 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; |
2843 | } |
2844 | |
2845 | static void |
2846 | qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, |
2847 | struct qed_sp_vport_update_params *p_data, |
2848 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) |
2849 | { |
2850 | struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; |
2851 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; |
2852 | |
2853 | p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) |
2854 | qed_iov_search_list_tlvs(p_hwfn, p_tlvs_list: p_mbx->req_virt, |
2855 | req_type: tlv); |
2856 | if (!p_accept_any_vlan) |
2857 | return; |
2858 | |
2859 | p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; |
2860 | p_data->update_accept_any_vlan_flg = |
2861 | p_accept_any_vlan->update_accept_any_vlan_flg; |
2862 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; |
2863 | } |
2864 | |
2865 | static void |
2866 | (struct qed_hwfn *p_hwfn, |
2867 | struct qed_vf_info *vf, |
2868 | struct qed_sp_vport_update_params *p_data, |
2869 | struct qed_rss_params *, |
2870 | struct qed_iov_vf_mbx *p_mbx, |
2871 | u16 *tlvs_mask, u16 *tlvs_accepted) |
2872 | { |
2873 | struct vfpf_vport_update_rss_tlv *; |
2874 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; |
2875 | bool b_reject = false; |
2876 | u16 table_size; |
2877 | u16 i, q_idx; |
2878 | |
2879 | p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) |
2880 | qed_iov_search_list_tlvs(p_hwfn, p_tlvs_list: p_mbx->req_virt, req_type: tlv); |
2881 | if (!p_rss_tlv) { |
2882 | p_data->rss_params = NULL; |
2883 | return; |
2884 | } |
2885 | |
2886 | memset(p_rss, 0, sizeof(struct qed_rss_params)); |
2887 | |
2888 | p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & |
2889 | VFPF_UPDATE_RSS_CONFIG_FLAG); |
2890 | p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & |
2891 | VFPF_UPDATE_RSS_CAPS_FLAG); |
2892 | p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & |
2893 | VFPF_UPDATE_RSS_IND_TABLE_FLAG); |
2894 | p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & |
2895 | VFPF_UPDATE_RSS_KEY_FLAG); |
2896 | |
2897 | p_rss->rss_enable = p_rss_tlv->rss_enable; |
2898 | p_rss->rss_eng_id = vf->relative_vf_id + 1; |
2899 | p_rss->rss_caps = p_rss_tlv->rss_caps; |
2900 | p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; |
2901 | memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); |
2902 | |
2903 | table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), |
2904 | (1 << p_rss_tlv->rss_table_size_log)); |
2905 | |
2906 | for (i = 0; i < table_size; i++) { |
2907 | struct qed_queue_cid *p_cid; |
2908 | |
2909 | q_idx = p_rss_tlv->rss_ind_table[i]; |
2910 | if (!qed_iov_validate_rxq(p_hwfn, p_vf: vf, rx_qid: q_idx, |
2911 | mode: QED_IOV_VALIDATE_Q_ENABLE)) { |
2912 | DP_VERBOSE(p_hwfn, |
2913 | QED_MSG_IOV, |
2914 | "VF[%d]: Omitting RSS due to wrong queue %04x\n" , |
2915 | vf->relative_vf_id, q_idx); |
2916 | b_reject = true; |
2917 | goto out; |
2918 | } |
2919 | |
2920 | p_cid = qed_iov_get_vf_rx_queue_cid(p_queue: &vf->vf_queues[q_idx]); |
2921 | p_rss->rss_ind_table[i] = p_cid; |
2922 | } |
2923 | |
2924 | p_data->rss_params = p_rss; |
2925 | out: |
2926 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; |
2927 | if (!b_reject) |
2928 | *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; |
2929 | } |
2930 | |
2931 | static void |
2932 | qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, |
2933 | struct qed_vf_info *vf, |
2934 | struct qed_sp_vport_update_params *p_data, |
2935 | struct qed_sge_tpa_params *p_sge_tpa, |
2936 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) |
2937 | { |
2938 | struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; |
2939 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; |
2940 | |
2941 | p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) |
2942 | qed_iov_search_list_tlvs(p_hwfn, p_tlvs_list: p_mbx->req_virt, req_type: tlv); |
2943 | |
2944 | if (!p_sge_tpa_tlv) { |
2945 | p_data->sge_tpa_params = NULL; |
2946 | return; |
2947 | } |
2948 | |
2949 | memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); |
2950 | |
2951 | p_sge_tpa->update_tpa_en_flg = |
2952 | !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); |
2953 | p_sge_tpa->update_tpa_param_flg = |
2954 | !!(p_sge_tpa_tlv->update_sge_tpa_flags & |
2955 | VFPF_UPDATE_TPA_PARAM_FLAG); |
2956 | |
2957 | p_sge_tpa->tpa_ipv4_en_flg = |
2958 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); |
2959 | p_sge_tpa->tpa_ipv6_en_flg = |
2960 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); |
2961 | p_sge_tpa->tpa_pkt_split_flg = |
2962 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); |
2963 | p_sge_tpa->tpa_hdr_data_split_flg = |
2964 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); |
2965 | p_sge_tpa->tpa_gro_consistent_flg = |
2966 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); |
2967 | |
2968 | p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; |
2969 | p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; |
2970 | p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; |
2971 | p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; |
2972 | p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; |
2973 | |
2974 | p_data->sge_tpa_params = p_sge_tpa; |
2975 | |
2976 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; |
2977 | } |
2978 | |
2979 | static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, |
2980 | u8 vfid, |
2981 | struct qed_sp_vport_update_params *params, |
2982 | u16 *tlvs) |
2983 | { |
2984 | u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; |
2985 | struct qed_filter_accept_flags *flags = ¶ms->accept_flags; |
2986 | struct qed_public_vf_info *vf_info; |
2987 | u16 tlv_mask; |
2988 | |
2989 | tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) | |
2990 | BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN); |
2991 | |
2992 | /* Untrusted VFs can't even be trusted to know that fact. |
2993 | * Simply indicate everything is configured fine, and trace |
2994 | * configuration 'behind their back'. |
2995 | */ |
2996 | if (!(*tlvs & tlv_mask)) |
2997 | return 0; |
2998 | |
2999 | vf_info = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: vfid, b_enabled_only: true); |
3000 | |
3001 | if (flags->update_rx_mode_config) { |
3002 | vf_info->rx_accept_mode = flags->rx_accept_filter; |
3003 | if (!vf_info->is_trusted_configured) |
3004 | flags->rx_accept_filter &= ~mask; |
3005 | } |
3006 | |
3007 | if (flags->update_tx_mode_config) { |
3008 | vf_info->tx_accept_mode = flags->tx_accept_filter; |
3009 | if (!vf_info->is_trusted_configured) |
3010 | flags->tx_accept_filter &= ~mask; |
3011 | } |
3012 | |
3013 | if (params->update_accept_any_vlan_flg) { |
3014 | vf_info->accept_any_vlan = params->accept_any_vlan; |
3015 | |
3016 | if (vf_info->forced_vlan && !vf_info->is_trusted_configured) |
3017 | params->accept_any_vlan = false; |
3018 | } |
3019 | |
3020 | return 0; |
3021 | } |
3022 | |
3023 | static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, |
3024 | struct qed_ptt *p_ptt, |
3025 | struct qed_vf_info *vf) |
3026 | { |
3027 | struct qed_rss_params * = NULL; |
3028 | struct qed_sp_vport_update_params params; |
3029 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
3030 | struct qed_sge_tpa_params sge_tpa_params; |
3031 | u16 tlvs_mask = 0, tlvs_accepted = 0; |
3032 | u8 status = PFVF_STATUS_SUCCESS; |
3033 | u16 length; |
3034 | int rc; |
3035 | |
3036 | /* Valiate PF can send such a request */ |
3037 | if (!vf->vport_instance) { |
3038 | DP_VERBOSE(p_hwfn, |
3039 | QED_MSG_IOV, |
3040 | "No VPORT instance available for VF[%d], failing vport update\n" , |
3041 | vf->abs_vf_id); |
3042 | status = PFVF_STATUS_FAILURE; |
3043 | goto out; |
3044 | } |
3045 | p_rss_params = vzalloc(size: sizeof(*p_rss_params)); |
3046 | if (!p_rss_params) { |
3047 | status = PFVF_STATUS_FAILURE; |
3048 | goto out; |
3049 | } |
3050 | |
3051 | memset(¶ms, 0, sizeof(params)); |
3052 | params.opaque_fid = vf->opaque_fid; |
3053 | params.vport_id = vf->vport_id; |
3054 | params.rss_params = NULL; |
3055 | |
3056 | /* Search for extended tlvs list and update values |
3057 | * from VF in struct qed_sp_vport_update_params. |
3058 | */ |
3059 | qed_iov_vp_update_act_param(p_hwfn, p_data: ¶ms, p_mbx: mbx, tlvs_mask: &tlvs_mask); |
3060 | qed_iov_vp_update_vlan_param(p_hwfn, p_data: ¶ms, p_vf: vf, p_mbx: mbx, tlvs_mask: &tlvs_mask); |
3061 | qed_iov_vp_update_tx_switch(p_hwfn, p_data: ¶ms, p_mbx: mbx, tlvs_mask: &tlvs_mask); |
3062 | qed_iov_vp_update_mcast_bin_param(p_hwfn, p_data: ¶ms, p_mbx: mbx, tlvs_mask: &tlvs_mask); |
3063 | qed_iov_vp_update_accept_flag(p_hwfn, p_data: ¶ms, p_mbx: mbx, tlvs_mask: &tlvs_mask); |
3064 | qed_iov_vp_update_accept_any_vlan(p_hwfn, p_data: ¶ms, p_mbx: mbx, tlvs_mask: &tlvs_mask); |
3065 | qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, p_data: ¶ms, |
3066 | p_sge_tpa: &sge_tpa_params, p_mbx: mbx, tlvs_mask: &tlvs_mask); |
3067 | |
3068 | tlvs_accepted = tlvs_mask; |
3069 | |
3070 | /* Some of the extended TLVs need to be validated first; In that case, |
3071 | * they can update the mask without updating the accepted [so that |
3072 | * PF could communicate to VF it has rejected request]. |
3073 | */ |
3074 | qed_iov_vp_update_rss_param(p_hwfn, vf, p_data: ¶ms, p_rss: p_rss_params, |
3075 | p_mbx: mbx, tlvs_mask: &tlvs_mask, tlvs_accepted: &tlvs_accepted); |
3076 | |
3077 | if (qed_iov_pre_update_vport(hwfn: p_hwfn, vfid: vf->relative_vf_id, |
3078 | params: ¶ms, tlvs: &tlvs_accepted)) { |
3079 | tlvs_accepted = 0; |
3080 | status = PFVF_STATUS_NOT_SUPPORTED; |
3081 | goto out; |
3082 | } |
3083 | |
3084 | if (!tlvs_accepted) { |
3085 | if (tlvs_mask) |
3086 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3087 | "Upper-layer prevents VF vport configuration\n" ); |
3088 | else |
3089 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3090 | "No feature tlvs found for vport update\n" ); |
3091 | status = PFVF_STATUS_NOT_SUPPORTED; |
3092 | goto out; |
3093 | } |
3094 | |
3095 | rc = qed_sp_vport_update(p_hwfn, p_params: ¶ms, comp_mode: QED_SPQ_MODE_EBLOCK, NULL); |
3096 | |
3097 | if (rc) |
3098 | status = PFVF_STATUS_FAILURE; |
3099 | |
3100 | out: |
3101 | vfree(addr: p_rss_params); |
3102 | length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, p_vf: vf, p_mbx: mbx, status, |
3103 | tlvs_mask, tlvs_accepted); |
3104 | qed_iov_send_response(p_hwfn, p_ptt, p_vf: vf, length, status); |
3105 | } |
3106 | |
3107 | static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, |
3108 | struct qed_vf_info *p_vf, |
3109 | struct qed_filter_ucast *p_params) |
3110 | { |
3111 | int i; |
3112 | |
3113 | /* First remove entries and then add new ones */ |
3114 | if (p_params->opcode == QED_FILTER_REMOVE) { |
3115 | for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) |
3116 | if (p_vf->shadow_config.vlans[i].used && |
3117 | p_vf->shadow_config.vlans[i].vid == |
3118 | p_params->vlan) { |
3119 | p_vf->shadow_config.vlans[i].used = false; |
3120 | break; |
3121 | } |
3122 | if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { |
3123 | DP_VERBOSE(p_hwfn, |
3124 | QED_MSG_IOV, |
3125 | "VF [%d] - Tries to remove a non-existing vlan\n" , |
3126 | p_vf->relative_vf_id); |
3127 | return -EINVAL; |
3128 | } |
3129 | } else if (p_params->opcode == QED_FILTER_REPLACE || |
3130 | p_params->opcode == QED_FILTER_FLUSH) { |
3131 | for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) |
3132 | p_vf->shadow_config.vlans[i].used = false; |
3133 | } |
3134 | |
3135 | /* In forced mode, we're willing to remove entries - but we don't add |
3136 | * new ones. |
3137 | */ |
3138 | if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) |
3139 | return 0; |
3140 | |
3141 | if (p_params->opcode == QED_FILTER_ADD || |
3142 | p_params->opcode == QED_FILTER_REPLACE) { |
3143 | for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { |
3144 | if (p_vf->shadow_config.vlans[i].used) |
3145 | continue; |
3146 | |
3147 | p_vf->shadow_config.vlans[i].used = true; |
3148 | p_vf->shadow_config.vlans[i].vid = p_params->vlan; |
3149 | break; |
3150 | } |
3151 | |
3152 | if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { |
3153 | DP_VERBOSE(p_hwfn, |
3154 | QED_MSG_IOV, |
3155 | "VF [%d] - Tries to configure more than %d vlan filters\n" , |
3156 | p_vf->relative_vf_id, |
3157 | QED_ETH_VF_NUM_VLAN_FILTERS + 1); |
3158 | return -EINVAL; |
3159 | } |
3160 | } |
3161 | |
3162 | return 0; |
3163 | } |
3164 | |
3165 | static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, |
3166 | struct qed_vf_info *p_vf, |
3167 | struct qed_filter_ucast *p_params) |
3168 | { |
3169 | int i; |
3170 | |
3171 | /* If we're in forced-mode, we don't allow any change */ |
3172 | if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) |
3173 | return 0; |
3174 | |
3175 | /* Don't keep track of shadow copy since we don't intend to restore. */ |
3176 | if (p_vf->p_vf_info.is_trusted_configured) |
3177 | return 0; |
3178 | |
3179 | /* First remove entries and then add new ones */ |
3180 | if (p_params->opcode == QED_FILTER_REMOVE) { |
3181 | for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { |
3182 | if (ether_addr_equal(addr1: p_vf->shadow_config.macs[i], |
3183 | addr2: p_params->mac)) { |
3184 | eth_zero_addr(addr: p_vf->shadow_config.macs[i]); |
3185 | break; |
3186 | } |
3187 | } |
3188 | |
3189 | if (i == QED_ETH_VF_NUM_MAC_FILTERS) { |
3190 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3191 | "MAC isn't configured\n" ); |
3192 | return -EINVAL; |
3193 | } |
3194 | } else if (p_params->opcode == QED_FILTER_REPLACE || |
3195 | p_params->opcode == QED_FILTER_FLUSH) { |
3196 | for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) |
3197 | eth_zero_addr(addr: p_vf->shadow_config.macs[i]); |
3198 | } |
3199 | |
3200 | /* List the new MAC address */ |
3201 | if (p_params->opcode != QED_FILTER_ADD && |
3202 | p_params->opcode != QED_FILTER_REPLACE) |
3203 | return 0; |
3204 | |
3205 | for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { |
3206 | if (is_zero_ether_addr(addr: p_vf->shadow_config.macs[i])) { |
3207 | ether_addr_copy(dst: p_vf->shadow_config.macs[i], |
3208 | src: p_params->mac); |
3209 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3210 | "Added MAC at %d entry in shadow\n" , i); |
3211 | break; |
3212 | } |
3213 | } |
3214 | |
3215 | if (i == QED_ETH_VF_NUM_MAC_FILTERS) { |
3216 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n" ); |
3217 | return -EINVAL; |
3218 | } |
3219 | |
3220 | return 0; |
3221 | } |
3222 | |
3223 | static int |
3224 | qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, |
3225 | struct qed_vf_info *p_vf, |
3226 | struct qed_filter_ucast *p_params) |
3227 | { |
3228 | int rc = 0; |
3229 | |
3230 | if (p_params->type == QED_FILTER_MAC) { |
3231 | rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); |
3232 | if (rc) |
3233 | return rc; |
3234 | } |
3235 | |
3236 | if (p_params->type == QED_FILTER_VLAN) |
3237 | rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); |
3238 | |
3239 | return rc; |
3240 | } |
3241 | |
3242 | static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, |
3243 | int vfid, struct qed_filter_ucast *params) |
3244 | { |
3245 | struct qed_public_vf_info *vf; |
3246 | |
3247 | vf = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: vfid, b_enabled_only: true); |
3248 | if (!vf) |
3249 | return -EINVAL; |
3250 | |
3251 | /* No real decision to make; Store the configured MAC */ |
3252 | if (params->type == QED_FILTER_MAC || |
3253 | params->type == QED_FILTER_MAC_VLAN) { |
3254 | ether_addr_copy(dst: vf->mac, src: params->mac); |
3255 | |
3256 | if (vf->is_trusted_configured) { |
3257 | qed_iov_bulletin_set_mac(p_hwfn: hwfn, mac: vf->mac, vfid); |
3258 | |
3259 | /* Update and post bulleitin again */ |
3260 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_BULLETIN_UPDATE_FLAG); |
3261 | } |
3262 | } |
3263 | |
3264 | return 0; |
3265 | } |
3266 | |
3267 | static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, |
3268 | struct qed_ptt *p_ptt, |
3269 | struct qed_vf_info *vf) |
3270 | { |
3271 | struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; |
3272 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
3273 | struct vfpf_ucast_filter_tlv *req; |
3274 | u8 status = PFVF_STATUS_SUCCESS; |
3275 | struct qed_filter_ucast params; |
3276 | int rc; |
3277 | |
3278 | /* Prepare the unicast filter params */ |
3279 | memset(¶ms, 0, sizeof(struct qed_filter_ucast)); |
3280 | req = &mbx->req_virt->ucast_filter; |
3281 | params.opcode = (enum qed_filter_opcode)req->opcode; |
3282 | params.type = (enum qed_filter_ucast_type)req->type; |
3283 | |
3284 | params.is_rx_filter = 1; |
3285 | params.is_tx_filter = 1; |
3286 | params.vport_to_remove_from = vf->vport_id; |
3287 | params.vport_to_add_to = vf->vport_id; |
3288 | memcpy(params.mac, req->mac, ETH_ALEN); |
3289 | params.vlan = req->vlan; |
3290 | |
3291 | DP_VERBOSE(p_hwfn, |
3292 | QED_MSG_IOV, |
3293 | "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n" , |
3294 | vf->abs_vf_id, params.opcode, params.type, |
3295 | params.is_rx_filter ? "RX" : "" , |
3296 | params.is_tx_filter ? "TX" : "" , |
3297 | params.vport_to_add_to, |
3298 | params.mac, params.vlan); |
3299 | |
3300 | if (!vf->vport_instance) { |
3301 | DP_VERBOSE(p_hwfn, |
3302 | QED_MSG_IOV, |
3303 | "No VPORT instance available for VF[%d], failing ucast MAC configuration\n" , |
3304 | vf->abs_vf_id); |
3305 | status = PFVF_STATUS_FAILURE; |
3306 | goto out; |
3307 | } |
3308 | |
3309 | /* Update shadow copy of the VF configuration */ |
3310 | if (qed_iov_vf_update_unicast_shadow(p_hwfn, p_vf: vf, p_params: ¶ms)) { |
3311 | status = PFVF_STATUS_FAILURE; |
3312 | goto out; |
3313 | } |
3314 | |
3315 | /* Determine if the unicast filtering is acceptible by PF */ |
3316 | if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && |
3317 | (params.type == QED_FILTER_VLAN || |
3318 | params.type == QED_FILTER_MAC_VLAN)) { |
3319 | /* Once VLAN is forced or PVID is set, do not allow |
3320 | * to add/replace any further VLANs. |
3321 | */ |
3322 | if (params.opcode == QED_FILTER_ADD || |
3323 | params.opcode == QED_FILTER_REPLACE) |
3324 | status = PFVF_STATUS_FORCED; |
3325 | goto out; |
3326 | } |
3327 | |
3328 | if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && |
3329 | (params.type == QED_FILTER_MAC || |
3330 | params.type == QED_FILTER_MAC_VLAN)) { |
3331 | if (!ether_addr_equal(addr1: p_bulletin->mac, addr2: params.mac) || |
3332 | (params.opcode != QED_FILTER_ADD && |
3333 | params.opcode != QED_FILTER_REPLACE)) |
3334 | status = PFVF_STATUS_FORCED; |
3335 | goto out; |
3336 | } |
3337 | |
3338 | rc = qed_iov_chk_ucast(hwfn: p_hwfn, vfid: vf->relative_vf_id, params: ¶ms); |
3339 | if (rc) { |
3340 | status = PFVF_STATUS_FAILURE; |
3341 | goto out; |
3342 | } |
3343 | |
3344 | rc = qed_sp_eth_filter_ucast(p_hwfn, opaque_fid: vf->opaque_fid, p_filter_cmd: ¶ms, |
3345 | comp_mode: QED_SPQ_MODE_CB, NULL); |
3346 | if (rc) |
3347 | status = PFVF_STATUS_FAILURE; |
3348 | |
3349 | out: |
3350 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: vf, type: CHANNEL_TLV_UCAST_FILTER, |
3351 | length: sizeof(struct pfvf_def_resp_tlv), status); |
3352 | } |
3353 | |
3354 | static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, |
3355 | struct qed_ptt *p_ptt, |
3356 | struct qed_vf_info *vf) |
3357 | { |
3358 | int i; |
3359 | |
3360 | /* Reset the SBs */ |
3361 | for (i = 0; i < vf->num_sbs; i++) |
3362 | qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, |
3363 | igu_sb_id: vf->igu_sbs[i], |
3364 | opaque: vf->opaque_fid, b_set: false); |
3365 | |
3366 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: vf, type: CHANNEL_TLV_INT_CLEANUP, |
3367 | length: sizeof(struct pfvf_def_resp_tlv), |
3368 | status: PFVF_STATUS_SUCCESS); |
3369 | } |
3370 | |
3371 | static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, |
3372 | struct qed_ptt *p_ptt, struct qed_vf_info *vf) |
3373 | { |
3374 | u16 length = sizeof(struct pfvf_def_resp_tlv); |
3375 | u8 status = PFVF_STATUS_SUCCESS; |
3376 | |
3377 | /* Disable Interrupts for VF */ |
3378 | qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, enable: 0); |
3379 | |
3380 | /* Reset Permission table */ |
3381 | qed_iov_config_perm_table(p_hwfn, p_ptt, vf, enable: 0); |
3382 | |
3383 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: vf, type: CHANNEL_TLV_CLOSE, |
3384 | length, status); |
3385 | } |
3386 | |
3387 | static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, |
3388 | struct qed_ptt *p_ptt, |
3389 | struct qed_vf_info *p_vf) |
3390 | { |
3391 | u16 length = sizeof(struct pfvf_def_resp_tlv); |
3392 | u8 status = PFVF_STATUS_SUCCESS; |
3393 | int rc = 0; |
3394 | |
3395 | qed_iov_vf_cleanup(p_hwfn, p_vf); |
3396 | |
3397 | if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { |
3398 | /* Stopping the VF */ |
3399 | rc = qed_sp_vf_stop(p_hwfn, concrete_vfid: p_vf->concrete_fid, |
3400 | opaque_vfid: p_vf->opaque_fid); |
3401 | |
3402 | if (rc) { |
3403 | DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n" , |
3404 | rc); |
3405 | status = PFVF_STATUS_FAILURE; |
3406 | } |
3407 | |
3408 | p_vf->state = VF_STOPPED; |
3409 | } |
3410 | |
3411 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: p_vf, type: CHANNEL_TLV_RELEASE, |
3412 | length, status); |
3413 | } |
3414 | |
3415 | static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, |
3416 | struct qed_ptt *p_ptt, |
3417 | struct qed_vf_info *p_vf) |
3418 | { |
3419 | struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; |
3420 | struct pfvf_read_coal_resp_tlv *p_resp; |
3421 | struct vfpf_read_coal_req_tlv *req; |
3422 | u8 status = PFVF_STATUS_FAILURE; |
3423 | struct qed_vf_queue *p_queue; |
3424 | struct qed_queue_cid *p_cid; |
3425 | u16 coal = 0, qid, i; |
3426 | bool b_is_rx; |
3427 | int rc = 0; |
3428 | |
3429 | mbx->offset = (u8 *)mbx->reply_virt; |
3430 | req = &mbx->req_virt->read_coal_req; |
3431 | |
3432 | qid = req->qid; |
3433 | b_is_rx = req->is_rx ? true : false; |
3434 | |
3435 | if (b_is_rx) { |
3436 | if (!qed_iov_validate_rxq(p_hwfn, p_vf, rx_qid: qid, |
3437 | mode: QED_IOV_VALIDATE_Q_ENABLE)) { |
3438 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3439 | "VF[%d]: Invalid Rx queue_id = %d\n" , |
3440 | p_vf->abs_vf_id, qid); |
3441 | goto send_resp; |
3442 | } |
3443 | |
3444 | p_cid = qed_iov_get_vf_rx_queue_cid(p_queue: &p_vf->vf_queues[qid]); |
3445 | rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_hw_coal: &coal); |
3446 | if (rc) |
3447 | goto send_resp; |
3448 | } else { |
3449 | if (!qed_iov_validate_txq(p_hwfn, p_vf, tx_qid: qid, |
3450 | mode: QED_IOV_VALIDATE_Q_ENABLE)) { |
3451 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3452 | "VF[%d]: Invalid Tx queue_id = %d\n" , |
3453 | p_vf->abs_vf_id, qid); |
3454 | goto send_resp; |
3455 | } |
3456 | for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { |
3457 | p_queue = &p_vf->vf_queues[qid]; |
3458 | if ((!p_queue->cids[i].p_cid) || |
3459 | (!p_queue->cids[i].b_is_tx)) |
3460 | continue; |
3461 | |
3462 | p_cid = p_queue->cids[i].p_cid; |
3463 | |
3464 | rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_hw_coal: &coal); |
3465 | if (rc) |
3466 | goto send_resp; |
3467 | break; |
3468 | } |
3469 | } |
3470 | |
3471 | status = PFVF_STATUS_SUCCESS; |
3472 | |
3473 | send_resp: |
3474 | p_resp = qed_add_tlv(p_hwfn, offset: &mbx->offset, type: CHANNEL_TLV_COALESCE_READ, |
3475 | length: sizeof(*p_resp)); |
3476 | p_resp->coal = coal; |
3477 | |
3478 | qed_add_tlv(p_hwfn, offset: &mbx->offset, type: CHANNEL_TLV_LIST_END, |
3479 | length: sizeof(struct channel_list_end_tlv)); |
3480 | |
3481 | qed_iov_send_response(p_hwfn, p_ptt, p_vf, length: sizeof(*p_resp), status); |
3482 | } |
3483 | |
3484 | static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, |
3485 | struct qed_ptt *p_ptt, |
3486 | struct qed_vf_info *vf) |
3487 | { |
3488 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
3489 | struct vfpf_update_coalesce *req; |
3490 | u8 status = PFVF_STATUS_FAILURE; |
3491 | struct qed_queue_cid *p_cid; |
3492 | u16 rx_coal, tx_coal; |
3493 | int rc = 0, i; |
3494 | u16 qid; |
3495 | |
3496 | req = &mbx->req_virt->update_coalesce; |
3497 | |
3498 | rx_coal = req->rx_coal; |
3499 | tx_coal = req->tx_coal; |
3500 | qid = req->qid; |
3501 | |
3502 | if (!qed_iov_validate_rxq(p_hwfn, p_vf: vf, rx_qid: qid, |
3503 | mode: QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) { |
3504 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3505 | "VF[%d]: Invalid Rx queue_id = %d\n" , |
3506 | vf->abs_vf_id, qid); |
3507 | goto out; |
3508 | } |
3509 | |
3510 | if (!qed_iov_validate_txq(p_hwfn, p_vf: vf, tx_qid: qid, |
3511 | mode: QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) { |
3512 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3513 | "VF[%d]: Invalid Tx queue_id = %d\n" , |
3514 | vf->abs_vf_id, qid); |
3515 | goto out; |
3516 | } |
3517 | |
3518 | DP_VERBOSE(p_hwfn, |
3519 | QED_MSG_IOV, |
3520 | "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n" , |
3521 | vf->abs_vf_id, rx_coal, tx_coal, qid); |
3522 | |
3523 | if (rx_coal) { |
3524 | p_cid = qed_iov_get_vf_rx_queue_cid(p_queue: &vf->vf_queues[qid]); |
3525 | |
3526 | rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, coalesce: rx_coal, p_cid); |
3527 | if (rc) { |
3528 | DP_VERBOSE(p_hwfn, |
3529 | QED_MSG_IOV, |
3530 | "VF[%d]: Unable to set rx queue = %d coalesce\n" , |
3531 | vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); |
3532 | goto out; |
3533 | } |
3534 | vf->rx_coal = rx_coal; |
3535 | } |
3536 | |
3537 | if (tx_coal) { |
3538 | struct qed_vf_queue *p_queue = &vf->vf_queues[qid]; |
3539 | |
3540 | for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { |
3541 | if (!p_queue->cids[i].p_cid) |
3542 | continue; |
3543 | |
3544 | if (!p_queue->cids[i].b_is_tx) |
3545 | continue; |
3546 | |
3547 | rc = qed_set_txq_coalesce(p_hwfn, p_ptt, coalesce: tx_coal, |
3548 | p_cid: p_queue->cids[i].p_cid); |
3549 | |
3550 | if (rc) { |
3551 | DP_VERBOSE(p_hwfn, |
3552 | QED_MSG_IOV, |
3553 | "VF[%d]: Unable to set tx queue coalesce\n" , |
3554 | vf->abs_vf_id); |
3555 | goto out; |
3556 | } |
3557 | } |
3558 | vf->tx_coal = tx_coal; |
3559 | } |
3560 | |
3561 | status = PFVF_STATUS_SUCCESS; |
3562 | out: |
3563 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: vf, type: CHANNEL_TLV_COALESCE_UPDATE, |
3564 | length: sizeof(struct pfvf_def_resp_tlv), status); |
3565 | } |
3566 | |
3567 | static int |
3568 | qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, |
3569 | struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) |
3570 | { |
3571 | int cnt; |
3572 | u32 val; |
3573 | |
3574 | qed_fid_pretend(p_hwfn, p_ptt, fid: (u16)p_vf->concrete_fid); |
3575 | |
3576 | for (cnt = 0; cnt < 50; cnt++) { |
3577 | val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); |
3578 | if (!val) |
3579 | break; |
3580 | msleep(msecs: 20); |
3581 | } |
3582 | qed_fid_pretend(p_hwfn, p_ptt, fid: (u16)p_hwfn->hw_info.concrete_fid); |
3583 | |
3584 | if (cnt == 50) { |
3585 | DP_ERR(p_hwfn, |
3586 | "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n" , |
3587 | p_vf->abs_vf_id, val); |
3588 | return -EBUSY; |
3589 | } |
3590 | |
3591 | return 0; |
3592 | } |
3593 | |
3594 | #define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS) |
3595 | |
3596 | static int |
3597 | qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, |
3598 | struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) |
3599 | { |
3600 | u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp; |
3601 | u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port; |
3602 | u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; |
3603 | u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0; |
3604 | u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0; |
3605 | u8 port_id, tc, tc_id = 0, voq = 0; |
3606 | int cnt; |
3607 | |
3608 | memset(cons, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); |
3609 | memset(distance, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); |
3610 | |
3611 | /* Read initial consumers & producers */ |
3612 | for (port_id = 0; port_id < max_ports_per_engine; port_id++) { |
3613 | /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ |
3614 | for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) { |
3615 | tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC; |
3616 | voq = VOQ(port_id, tc_id, max_phys_tcs_per_port); |
3617 | cons[voq] = qed_rd(p_hwfn, p_ptt, |
3618 | hw_addr: cons_voq0_addr + voq * 0x40); |
3619 | prod = qed_rd(p_hwfn, p_ptt, |
3620 | hw_addr: prod_voq0_addr + voq * 0x40); |
3621 | distance[voq] = prod - cons[voq]; |
3622 | } |
3623 | } |
3624 | |
3625 | /* Wait for consumers to pass the producers */ |
3626 | port_id = 0; |
3627 | tc = 0; |
3628 | for (cnt = 0; cnt < 50; cnt++) { |
3629 | for (; port_id < max_ports_per_engine; port_id++) { |
3630 | /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ |
3631 | for (; tc < max_phys_tcs_per_port + 1; tc++) { |
3632 | tc_id = (tc < max_phys_tcs_per_port) ? |
3633 | tc : PURE_LB_TC; |
3634 | voq = VOQ(port_id, |
3635 | tc_id, max_phys_tcs_per_port); |
3636 | tmp = qed_rd(p_hwfn, p_ptt, |
3637 | hw_addr: cons_voq0_addr + voq * 0x40); |
3638 | if (distance[voq] > tmp - cons[voq]) |
3639 | break; |
3640 | } |
3641 | |
3642 | if (tc == max_phys_tcs_per_port + 1) |
3643 | tc = 0; |
3644 | else |
3645 | break; |
3646 | } |
3647 | |
3648 | if (port_id == max_ports_per_engine) |
3649 | break; |
3650 | |
3651 | msleep(msecs: 20); |
3652 | } |
3653 | |
3654 | if (cnt == 50) { |
3655 | DP_ERR(p_hwfn, "VF[%d]: pbf poll failed on VOQ%d\n" , |
3656 | p_vf->abs_vf_id, (int)voq); |
3657 | |
3658 | DP_ERR(p_hwfn, "VOQ %d has port_id as %d and tc_id as %d]\n" , |
3659 | (int)voq, (int)port_id, (int)tc_id); |
3660 | |
3661 | return -EBUSY; |
3662 | } |
3663 | |
3664 | return 0; |
3665 | } |
3666 | |
3667 | static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, |
3668 | struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) |
3669 | { |
3670 | int rc; |
3671 | |
3672 | rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); |
3673 | if (rc) |
3674 | return rc; |
3675 | |
3676 | rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); |
3677 | if (rc) |
3678 | return rc; |
3679 | |
3680 | return 0; |
3681 | } |
3682 | |
3683 | static int |
3684 | qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, |
3685 | struct qed_ptt *p_ptt, |
3686 | u16 rel_vf_id, u32 *ack_vfs) |
3687 | { |
3688 | struct qed_vf_info *p_vf; |
3689 | int rc = 0; |
3690 | |
3691 | p_vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: rel_vf_id, b_enabled_only: false); |
3692 | if (!p_vf) |
3693 | return 0; |
3694 | |
3695 | if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & |
3696 | (1ULL << (rel_vf_id % 64))) { |
3697 | u16 vfid = p_vf->abs_vf_id; |
3698 | |
3699 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3700 | "VF[%d] - Handling FLR\n" , vfid); |
3701 | |
3702 | qed_iov_vf_cleanup(p_hwfn, p_vf); |
3703 | |
3704 | /* If VF isn't active, no need for anything but SW */ |
3705 | if (!p_vf->b_init) |
3706 | goto cleanup; |
3707 | |
3708 | rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); |
3709 | if (rc) |
3710 | goto cleanup; |
3711 | |
3712 | rc = qed_final_cleanup(p_hwfn, p_ptt, id: vfid, is_vf: true); |
3713 | if (rc) { |
3714 | DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n" , vfid); |
3715 | return rc; |
3716 | } |
3717 | |
3718 | /* Workaround to make VF-PF channel ready, as FW |
3719 | * doesn't do that as a part of FLR. |
3720 | */ |
3721 | REG_WR(p_hwfn, |
3722 | GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, |
3723 | USTORM_VF_PF_CHANNEL_READY, vfid), 1); |
3724 | |
3725 | /* VF_STOPPED has to be set only after final cleanup |
3726 | * but prior to re-enabling the VF. |
3727 | */ |
3728 | p_vf->state = VF_STOPPED; |
3729 | |
3730 | rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf: p_vf); |
3731 | if (rc) { |
3732 | DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n" , |
3733 | vfid); |
3734 | return rc; |
3735 | } |
3736 | cleanup: |
3737 | /* Mark VF for ack and clean pending state */ |
3738 | if (p_vf->state == VF_RESET) |
3739 | p_vf->state = VF_STOPPED; |
3740 | ack_vfs[vfid / 32] |= BIT((vfid % 32)); |
3741 | p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= |
3742 | ~(1ULL << (rel_vf_id % 64)); |
3743 | p_vf->vf_mbx.b_pending_msg = false; |
3744 | } |
3745 | |
3746 | return rc; |
3747 | } |
3748 | |
3749 | static int |
3750 | qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
3751 | { |
3752 | u32 ack_vfs[VF_MAX_STATIC / 32]; |
3753 | int rc = 0; |
3754 | u16 i; |
3755 | |
3756 | memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); |
3757 | |
3758 | /* Since BRB <-> PRS interface can't be tested as part of the flr |
3759 | * polling due to HW limitations, simply sleep a bit. And since |
3760 | * there's no need to wait per-vf, do it before looping. |
3761 | */ |
3762 | msleep(msecs: 100); |
3763 | |
3764 | for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) |
3765 | qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id: i, ack_vfs); |
3766 | |
3767 | rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, vfs_to_ack: ack_vfs); |
3768 | return rc; |
3769 | } |
3770 | |
3771 | bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) |
3772 | { |
3773 | bool found = false; |
3774 | u16 i; |
3775 | |
3776 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n" ); |
3777 | for (i = 0; i < (VF_MAX_STATIC / 32); i++) |
3778 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3779 | "[%08x,...,%08x]: %08x\n" , |
3780 | i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); |
3781 | |
3782 | if (!p_hwfn->cdev->p_iov_info) { |
3783 | DP_NOTICE(p_hwfn, "VF flr but no IOV\n" ); |
3784 | return false; |
3785 | } |
3786 | |
3787 | /* Mark VFs */ |
3788 | for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { |
3789 | struct qed_vf_info *p_vf; |
3790 | u8 vfid; |
3791 | |
3792 | p_vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: i, b_enabled_only: false); |
3793 | if (!p_vf) |
3794 | continue; |
3795 | |
3796 | vfid = p_vf->abs_vf_id; |
3797 | if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { |
3798 | u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; |
3799 | u16 rel_vf_id = p_vf->relative_vf_id; |
3800 | |
3801 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3802 | "VF[%d] [rel %d] got FLR-ed\n" , |
3803 | vfid, rel_vf_id); |
3804 | |
3805 | p_vf->state = VF_RESET; |
3806 | |
3807 | /* No need to lock here, since pending_flr should |
3808 | * only change here and before ACKing MFw. Since |
3809 | * MFW will not trigger an additional attention for |
3810 | * VF flr until ACKs, we're safe. |
3811 | */ |
3812 | p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); |
3813 | found = true; |
3814 | } |
3815 | } |
3816 | |
3817 | return found; |
3818 | } |
3819 | |
3820 | static int qed_iov_get_link(struct qed_hwfn *p_hwfn, |
3821 | u16 vfid, |
3822 | struct qed_mcp_link_params *p_params, |
3823 | struct qed_mcp_link_state *p_link, |
3824 | struct qed_mcp_link_capabilities *p_caps) |
3825 | { |
3826 | struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, |
3827 | relative_vf_id: vfid, |
3828 | b_enabled_only: false); |
3829 | struct qed_bulletin_content *p_bulletin; |
3830 | |
3831 | if (!p_vf) |
3832 | return -EINVAL; |
3833 | |
3834 | p_bulletin = p_vf->bulletin.p_virt; |
3835 | |
3836 | if (p_params) |
3837 | __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); |
3838 | if (p_link) |
3839 | __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); |
3840 | if (p_caps) |
3841 | __qed_vf_get_link_caps(p_hwfn, p_link_caps: p_caps, p_bulletin); |
3842 | return 0; |
3843 | } |
3844 | |
3845 | static int |
3846 | qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, |
3847 | struct qed_ptt *p_ptt, |
3848 | struct qed_vf_info *p_vf) |
3849 | { |
3850 | struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; |
3851 | struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; |
3852 | struct vfpf_bulletin_update_mac_tlv *p_req; |
3853 | u8 status = PFVF_STATUS_SUCCESS; |
3854 | int rc = 0; |
3855 | |
3856 | if (!p_vf->p_vf_info.is_trusted_configured) { |
3857 | DP_VERBOSE(p_hwfn, |
3858 | QED_MSG_IOV, |
3859 | "Blocking bulletin update request from untrusted VF[%d]\n" , |
3860 | p_vf->abs_vf_id); |
3861 | status = PFVF_STATUS_NOT_SUPPORTED; |
3862 | rc = -EINVAL; |
3863 | goto send_status; |
3864 | } |
3865 | |
3866 | p_req = &mbx->req_virt->bulletin_update_mac; |
3867 | ether_addr_copy(dst: p_bulletin->mac, src: p_req->mac); |
3868 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3869 | "Updated bulletin of VF[%d] with requested MAC[%pM]\n" , |
3870 | p_vf->abs_vf_id, p_req->mac); |
3871 | |
3872 | send_status: |
3873 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: p_vf, |
3874 | type: CHANNEL_TLV_BULLETIN_UPDATE_MAC, |
3875 | length: sizeof(struct pfvf_def_resp_tlv), status); |
3876 | return rc; |
3877 | } |
3878 | |
3879 | static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, |
3880 | struct qed_ptt *p_ptt, int vfid) |
3881 | { |
3882 | struct qed_iov_vf_mbx *mbx; |
3883 | struct qed_vf_info *p_vf; |
3884 | |
3885 | p_vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
3886 | if (!p_vf) |
3887 | return; |
3888 | |
3889 | mbx = &p_vf->vf_mbx; |
3890 | |
3891 | /* qed_iov_process_mbx_request */ |
3892 | if (!mbx->b_pending_msg) { |
3893 | DP_NOTICE(p_hwfn, |
3894 | "VF[%02x]: Trying to process mailbox message when none is pending\n" , |
3895 | p_vf->abs_vf_id); |
3896 | return; |
3897 | } |
3898 | mbx->b_pending_msg = false; |
3899 | |
3900 | mbx->first_tlv = mbx->req_virt->first_tlv; |
3901 | |
3902 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3903 | "VF[%02x]: Processing mailbox message [type %04x]\n" , |
3904 | p_vf->abs_vf_id, mbx->first_tlv.tl.type); |
3905 | |
3906 | /* check if tlv type is known */ |
3907 | if (qed_iov_tlv_supported(tlvtype: mbx->first_tlv.tl.type) && |
3908 | !p_vf->b_malicious) { |
3909 | switch (mbx->first_tlv.tl.type) { |
3910 | case CHANNEL_TLV_ACQUIRE: |
3911 | qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, vf: p_vf); |
3912 | break; |
3913 | case CHANNEL_TLV_VPORT_START: |
3914 | qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, vf: p_vf); |
3915 | break; |
3916 | case CHANNEL_TLV_VPORT_TEARDOWN: |
3917 | qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, vf: p_vf); |
3918 | break; |
3919 | case CHANNEL_TLV_START_RXQ: |
3920 | qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, vf: p_vf); |
3921 | break; |
3922 | case CHANNEL_TLV_START_TXQ: |
3923 | qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, vf: p_vf); |
3924 | break; |
3925 | case CHANNEL_TLV_STOP_RXQS: |
3926 | qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, vf: p_vf); |
3927 | break; |
3928 | case CHANNEL_TLV_STOP_TXQS: |
3929 | qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, vf: p_vf); |
3930 | break; |
3931 | case CHANNEL_TLV_UPDATE_RXQ: |
3932 | qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, vf: p_vf); |
3933 | break; |
3934 | case CHANNEL_TLV_VPORT_UPDATE: |
3935 | qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, vf: p_vf); |
3936 | break; |
3937 | case CHANNEL_TLV_UCAST_FILTER: |
3938 | qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, vf: p_vf); |
3939 | break; |
3940 | case CHANNEL_TLV_CLOSE: |
3941 | qed_iov_vf_mbx_close(p_hwfn, p_ptt, vf: p_vf); |
3942 | break; |
3943 | case CHANNEL_TLV_INT_CLEANUP: |
3944 | qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, vf: p_vf); |
3945 | break; |
3946 | case CHANNEL_TLV_RELEASE: |
3947 | qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); |
3948 | break; |
3949 | case CHANNEL_TLV_UPDATE_TUNN_PARAM: |
3950 | qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); |
3951 | break; |
3952 | case CHANNEL_TLV_COALESCE_UPDATE: |
3953 | qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, vf: p_vf); |
3954 | break; |
3955 | case CHANNEL_TLV_COALESCE_READ: |
3956 | qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); |
3957 | break; |
3958 | case CHANNEL_TLV_BULLETIN_UPDATE_MAC: |
3959 | qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); |
3960 | break; |
3961 | } |
3962 | } else if (qed_iov_tlv_supported(tlvtype: mbx->first_tlv.tl.type)) { |
3963 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3964 | "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n" , |
3965 | p_vf->abs_vf_id, mbx->first_tlv.tl.type); |
3966 | |
3967 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: p_vf, |
3968 | type: mbx->first_tlv.tl.type, |
3969 | length: sizeof(struct pfvf_def_resp_tlv), |
3970 | status: PFVF_STATUS_MALICIOUS); |
3971 | } else { |
3972 | /* unknown TLV - this may belong to a VF driver from the future |
3973 | * - a version written after this PF driver was written, which |
3974 | * supports features unknown as of yet. Too bad since we don't |
3975 | * support them. Or this may be because someone wrote a crappy |
3976 | * VF driver and is sending garbage over the channel. |
3977 | */ |
3978 | DP_NOTICE(p_hwfn, |
3979 | "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n" , |
3980 | p_vf->abs_vf_id, |
3981 | mbx->first_tlv.tl.type, |
3982 | mbx->first_tlv.tl.length, |
3983 | mbx->first_tlv.padding, mbx->first_tlv.reply_address); |
3984 | |
3985 | /* Try replying in case reply address matches the acquisition's |
3986 | * posted address. |
3987 | */ |
3988 | if (p_vf->acquire.first_tlv.reply_address && |
3989 | (mbx->first_tlv.reply_address == |
3990 | p_vf->acquire.first_tlv.reply_address)) { |
3991 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf_info: p_vf, |
3992 | type: mbx->first_tlv.tl.type, |
3993 | length: sizeof(struct pfvf_def_resp_tlv), |
3994 | status: PFVF_STATUS_NOT_SUPPORTED); |
3995 | } else { |
3996 | DP_VERBOSE(p_hwfn, |
3997 | QED_MSG_IOV, |
3998 | "VF[%02x]: Can't respond to TLV - no valid reply address\n" , |
3999 | p_vf->abs_vf_id); |
4000 | } |
4001 | } |
4002 | } |
4003 | |
4004 | static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) |
4005 | { |
4006 | int i; |
4007 | |
4008 | memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); |
4009 | |
4010 | qed_for_each_vf(p_hwfn, i) { |
4011 | struct qed_vf_info *p_vf; |
4012 | |
4013 | p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; |
4014 | if (p_vf->vf_mbx.b_pending_msg) |
4015 | events[i / 64] |= 1ULL << (i % 64); |
4016 | } |
4017 | } |
4018 | |
4019 | static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, |
4020 | u16 abs_vfid) |
4021 | { |
4022 | u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; |
4023 | |
4024 | if (!_qed_iov_pf_sanity_check(p_hwfn, vfid: (int)abs_vfid - min, b_fail_malicious: false)) { |
4025 | DP_VERBOSE(p_hwfn, |
4026 | QED_MSG_IOV, |
4027 | "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n" , |
4028 | abs_vfid); |
4029 | return NULL; |
4030 | } |
4031 | |
4032 | return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; |
4033 | } |
4034 | |
4035 | static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, |
4036 | u16 abs_vfid, struct regpair *vf_msg) |
4037 | { |
4038 | struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, |
4039 | abs_vfid); |
4040 | |
4041 | if (!p_vf) |
4042 | return 0; |
4043 | |
4044 | /* List the physical address of the request so that handler |
4045 | * could later on copy the message from it. |
4046 | */ |
4047 | p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo); |
4048 | |
4049 | /* Mark the event and schedule the workqueue */ |
4050 | p_vf->vf_mbx.b_pending_msg = true; |
4051 | qed_schedule_iov(hwfn: p_hwfn, flag: QED_IOV_WQ_MSG_FLAG); |
4052 | |
4053 | return 0; |
4054 | } |
4055 | |
4056 | void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, |
4057 | struct fw_err_data *p_data) |
4058 | { |
4059 | struct qed_vf_info *p_vf; |
4060 | |
4061 | p_vf = qed_sriov_get_vf_from_absid(p_hwfn, abs_vfid: qed_vf_from_entity_id |
4062 | (entity_id: p_data->entity_id)); |
4063 | if (!p_vf) |
4064 | return; |
4065 | |
4066 | if (!p_vf->b_malicious) { |
4067 | DP_NOTICE(p_hwfn, |
4068 | "VF [%d] - Malicious behavior [%02x]\n" , |
4069 | p_vf->abs_vf_id, p_data->err_id); |
4070 | |
4071 | p_vf->b_malicious = true; |
4072 | } else { |
4073 | DP_INFO(p_hwfn, |
4074 | "VF [%d] - Malicious behavior [%02x]\n" , |
4075 | p_vf->abs_vf_id, p_data->err_id); |
4076 | } |
4077 | } |
4078 | |
4079 | int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, |
4080 | union event_ring_data *data, u8 fw_return_code) |
4081 | { |
4082 | switch (opcode) { |
4083 | case COMMON_EVENT_VF_PF_CHANNEL: |
4084 | return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), |
4085 | vf_msg: &data->vf_pf_channel.msg_addr); |
4086 | default: |
4087 | DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n" , |
4088 | opcode); |
4089 | return -EINVAL; |
4090 | } |
4091 | } |
4092 | |
4093 | u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) |
4094 | { |
4095 | struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; |
4096 | u16 i; |
4097 | |
4098 | if (!p_iov) |
4099 | goto out; |
4100 | |
4101 | for (i = rel_vf_id; i < p_iov->total_vfs; i++) |
4102 | if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, b_enabled_only: true, b_non_malicious: false)) |
4103 | return i; |
4104 | |
4105 | out: |
4106 | return MAX_NUM_VFS; |
4107 | } |
4108 | |
4109 | static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, |
4110 | int vfid) |
4111 | { |
4112 | struct qed_dmae_params params; |
4113 | struct qed_vf_info *vf_info; |
4114 | |
4115 | vf_info = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4116 | if (!vf_info) |
4117 | return -EINVAL; |
4118 | |
4119 | memset(¶ms, 0, sizeof(params)); |
4120 | SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1); |
4121 | SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1); |
4122 | params.src_vfid = vf_info->abs_vf_id; |
4123 | |
4124 | if (qed_dmae_host2host(p_hwfn, p_ptt: ptt, |
4125 | source_addr: vf_info->vf_mbx.pending_req, |
4126 | dest_addr: vf_info->vf_mbx.req_phys, |
4127 | size_in_dwords: sizeof(union vfpf_tlvs) / 4, p_params: ¶ms)) { |
4128 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
4129 | "Failed to copy message from VF 0x%02x\n" , vfid); |
4130 | |
4131 | return -EIO; |
4132 | } |
4133 | |
4134 | return 0; |
4135 | } |
4136 | |
4137 | static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, |
4138 | u8 *mac, int vfid) |
4139 | { |
4140 | struct qed_vf_info *vf_info; |
4141 | u64 feature; |
4142 | |
4143 | vf_info = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4144 | if (!vf_info) { |
4145 | DP_NOTICE(p_hwfn->cdev, |
4146 | "Can not set forced MAC, invalid vfid [%d]\n" , vfid); |
4147 | return; |
4148 | } |
4149 | |
4150 | if (vf_info->b_malicious) { |
4151 | DP_NOTICE(p_hwfn->cdev, |
4152 | "Can't set forced MAC to malicious VF [%d]\n" , vfid); |
4153 | return; |
4154 | } |
4155 | |
4156 | if (vf_info->p_vf_info.is_trusted_configured) { |
4157 | feature = BIT(VFPF_BULLETIN_MAC_ADDR); |
4158 | /* Trust mode will disable Forced MAC */ |
4159 | vf_info->bulletin.p_virt->valid_bitmap &= |
4160 | ~BIT(MAC_ADDR_FORCED); |
4161 | } else { |
4162 | feature = BIT(MAC_ADDR_FORCED); |
4163 | /* Forced MAC will disable MAC_ADDR */ |
4164 | vf_info->bulletin.p_virt->valid_bitmap &= |
4165 | ~BIT(VFPF_BULLETIN_MAC_ADDR); |
4166 | } |
4167 | |
4168 | memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); |
4169 | |
4170 | vf_info->bulletin.p_virt->valid_bitmap |= feature; |
4171 | |
4172 | qed_iov_configure_vport_forced(p_hwfn, p_vf: vf_info, events: feature); |
4173 | } |
4174 | |
4175 | static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid) |
4176 | { |
4177 | struct qed_vf_info *vf_info; |
4178 | u64 feature; |
4179 | |
4180 | vf_info = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4181 | if (!vf_info) { |
4182 | DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n" , |
4183 | vfid); |
4184 | return -EINVAL; |
4185 | } |
4186 | |
4187 | if (vf_info->b_malicious) { |
4188 | DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n" , |
4189 | vfid); |
4190 | return -EINVAL; |
4191 | } |
4192 | |
4193 | if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) { |
4194 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
4195 | "Can not set MAC, Forced MAC is configured\n" ); |
4196 | return -EINVAL; |
4197 | } |
4198 | |
4199 | feature = BIT(VFPF_BULLETIN_MAC_ADDR); |
4200 | ether_addr_copy(dst: vf_info->bulletin.p_virt->mac, src: mac); |
4201 | |
4202 | vf_info->bulletin.p_virt->valid_bitmap |= feature; |
4203 | |
4204 | if (vf_info->p_vf_info.is_trusted_configured) |
4205 | qed_iov_configure_vport_forced(p_hwfn, p_vf: vf_info, events: feature); |
4206 | |
4207 | return 0; |
4208 | } |
4209 | |
4210 | static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, |
4211 | u16 pvid, int vfid) |
4212 | { |
4213 | struct qed_vf_info *vf_info; |
4214 | u64 feature; |
4215 | |
4216 | vf_info = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4217 | if (!vf_info) { |
4218 | DP_NOTICE(p_hwfn->cdev, |
4219 | "Can not set forced MAC, invalid vfid [%d]\n" , vfid); |
4220 | return; |
4221 | } |
4222 | |
4223 | if (vf_info->b_malicious) { |
4224 | DP_NOTICE(p_hwfn->cdev, |
4225 | "Can't set forced vlan to malicious VF [%d]\n" , vfid); |
4226 | return; |
4227 | } |
4228 | |
4229 | feature = 1 << VLAN_ADDR_FORCED; |
4230 | vf_info->bulletin.p_virt->pvid = pvid; |
4231 | if (pvid) |
4232 | vf_info->bulletin.p_virt->valid_bitmap |= feature; |
4233 | else |
4234 | vf_info->bulletin.p_virt->valid_bitmap &= ~feature; |
4235 | |
4236 | qed_iov_configure_vport_forced(p_hwfn, p_vf: vf_info, events: feature); |
4237 | } |
4238 | |
4239 | void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, |
4240 | int vfid, u16 vxlan_port, u16 geneve_port) |
4241 | { |
4242 | struct qed_vf_info *vf_info; |
4243 | |
4244 | vf_info = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4245 | if (!vf_info) { |
4246 | DP_NOTICE(p_hwfn->cdev, |
4247 | "Can not set udp ports, invalid vfid [%d]\n" , vfid); |
4248 | return; |
4249 | } |
4250 | |
4251 | if (vf_info->b_malicious) { |
4252 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
4253 | "Can not set udp ports to malicious VF [%d]\n" , |
4254 | vfid); |
4255 | return; |
4256 | } |
4257 | |
4258 | vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; |
4259 | vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; |
4260 | } |
4261 | |
4262 | static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) |
4263 | { |
4264 | struct qed_vf_info *p_vf_info; |
4265 | |
4266 | p_vf_info = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4267 | if (!p_vf_info) |
4268 | return false; |
4269 | |
4270 | return !!p_vf_info->vport_instance; |
4271 | } |
4272 | |
4273 | static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) |
4274 | { |
4275 | struct qed_vf_info *p_vf_info; |
4276 | |
4277 | p_vf_info = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4278 | if (!p_vf_info) |
4279 | return true; |
4280 | |
4281 | return p_vf_info->state == VF_STOPPED; |
4282 | } |
4283 | |
4284 | static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) |
4285 | { |
4286 | struct qed_vf_info *vf_info; |
4287 | |
4288 | vf_info = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4289 | if (!vf_info) |
4290 | return false; |
4291 | |
4292 | return vf_info->spoof_chk; |
4293 | } |
4294 | |
4295 | static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) |
4296 | { |
4297 | struct qed_vf_info *vf; |
4298 | int rc = -EINVAL; |
4299 | |
4300 | if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { |
4301 | DP_NOTICE(p_hwfn, |
4302 | "SR-IOV sanity check failed, can't set spoofchk\n" ); |
4303 | goto out; |
4304 | } |
4305 | |
4306 | vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4307 | if (!vf) |
4308 | goto out; |
4309 | |
4310 | if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { |
4311 | /* After VF VPORT start PF will configure spoof check */ |
4312 | vf->req_spoofchk_val = val; |
4313 | rc = 0; |
4314 | goto out; |
4315 | } |
4316 | |
4317 | rc = __qed_iov_spoofchk_set(p_hwfn, p_vf: vf, val); |
4318 | |
4319 | out: |
4320 | return rc; |
4321 | } |
4322 | |
4323 | static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) |
4324 | { |
4325 | struct qed_vf_info *p_vf; |
4326 | |
4327 | p_vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: rel_vf_id, b_enabled_only: true); |
4328 | if (!p_vf || !p_vf->bulletin.p_virt) |
4329 | return NULL; |
4330 | |
4331 | if (!(p_vf->bulletin.p_virt->valid_bitmap & |
4332 | BIT(VFPF_BULLETIN_MAC_ADDR))) |
4333 | return NULL; |
4334 | |
4335 | return p_vf->bulletin.p_virt->mac; |
4336 | } |
4337 | |
4338 | static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, |
4339 | u16 rel_vf_id) |
4340 | { |
4341 | struct qed_vf_info *p_vf; |
4342 | |
4343 | p_vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: rel_vf_id, b_enabled_only: true); |
4344 | if (!p_vf || !p_vf->bulletin.p_virt) |
4345 | return NULL; |
4346 | |
4347 | if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) |
4348 | return NULL; |
4349 | |
4350 | return p_vf->bulletin.p_virt->mac; |
4351 | } |
4352 | |
4353 | static u16 |
4354 | qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) |
4355 | { |
4356 | struct qed_vf_info *p_vf; |
4357 | |
4358 | p_vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: rel_vf_id, b_enabled_only: true); |
4359 | if (!p_vf || !p_vf->bulletin.p_virt) |
4360 | return 0; |
4361 | |
4362 | if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) |
4363 | return 0; |
4364 | |
4365 | return p_vf->bulletin.p_virt->pvid; |
4366 | } |
4367 | |
4368 | static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, |
4369 | struct qed_ptt *p_ptt, int vfid, int val) |
4370 | { |
4371 | struct qed_vf_info *vf; |
4372 | u8 abs_vp_id = 0; |
4373 | u16 rl_id; |
4374 | int rc; |
4375 | |
4376 | vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4377 | if (!vf) |
4378 | return -EINVAL; |
4379 | |
4380 | rc = qed_fw_vport(p_hwfn, src_id: vf->vport_id, dst_id: &abs_vp_id); |
4381 | if (rc) |
4382 | return rc; |
4383 | |
4384 | rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ |
4385 | return qed_init_global_rl(p_hwfn, p_ptt, rl_id, rate_limit: (u32)val, |
4386 | vport_rl_type: QM_RL_TYPE_NORMAL); |
4387 | } |
4388 | |
4389 | static int |
4390 | qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) |
4391 | { |
4392 | struct qed_vf_info *vf; |
4393 | u8 vport_id; |
4394 | int i; |
4395 | |
4396 | for_each_hwfn(cdev, i) { |
4397 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
4398 | |
4399 | if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { |
4400 | DP_NOTICE(p_hwfn, |
4401 | "SR-IOV sanity check failed, can't set min rate\n" ); |
4402 | return -EINVAL; |
4403 | } |
4404 | } |
4405 | |
4406 | vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), relative_vf_id: (u16)vfid, b_enabled_only: true); |
4407 | if (!vf) |
4408 | return -EINVAL; |
4409 | |
4410 | vport_id = vf->vport_id; |
4411 | |
4412 | return qed_configure_vport_wfq(cdev, vp_id: vport_id, rate); |
4413 | } |
4414 | |
4415 | static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) |
4416 | { |
4417 | struct qed_wfq_data *vf_vp_wfq; |
4418 | struct qed_vf_info *vf_info; |
4419 | |
4420 | vf_info = qed_iov_get_vf_info(p_hwfn, relative_vf_id: (u16)vfid, b_enabled_only: true); |
4421 | if (!vf_info) |
4422 | return 0; |
4423 | |
4424 | vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; |
4425 | |
4426 | if (vf_vp_wfq->configured) |
4427 | return vf_vp_wfq->min_speed; |
4428 | else |
4429 | return 0; |
4430 | } |
4431 | |
4432 | /** |
4433 | * qed_schedule_iov - schedules IOV task for VF and PF |
4434 | * @hwfn: hardware function pointer |
4435 | * @flag: IOV flag for VF/PF |
4436 | */ |
4437 | void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) |
4438 | { |
4439 | /* Memory barrier for setting atomic bit */ |
4440 | smp_mb__before_atomic(); |
4441 | set_bit(nr: flag, addr: &hwfn->iov_task_flags); |
4442 | /* Memory barrier after setting atomic bit */ |
4443 | smp_mb__after_atomic(); |
4444 | DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n" , flag); |
4445 | queue_delayed_work(wq: hwfn->iov_wq, dwork: &hwfn->iov_task, delay: 0); |
4446 | } |
4447 | |
4448 | void qed_vf_start_iov_wq(struct qed_dev *cdev) |
4449 | { |
4450 | int i; |
4451 | |
4452 | for_each_hwfn(cdev, i) |
4453 | queue_delayed_work(wq: cdev->hwfns[i].iov_wq, |
4454 | dwork: &cdev->hwfns[i].iov_task, delay: 0); |
4455 | } |
4456 | |
4457 | int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) |
4458 | { |
4459 | int i, j; |
4460 | |
4461 | for_each_hwfn(cdev, i) |
4462 | if (cdev->hwfns[i].iov_wq) |
4463 | flush_workqueue(cdev->hwfns[i].iov_wq); |
4464 | |
4465 | /* Mark VFs for disablement */ |
4466 | qed_iov_set_vfs_to_disable(cdev, to_disable: true); |
4467 | |
4468 | if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) |
4469 | pci_disable_sriov(dev: cdev->pdev); |
4470 | |
4471 | if (cdev->recov_in_prog) { |
4472 | DP_VERBOSE(cdev, |
4473 | QED_MSG_IOV, |
4474 | "Skip SRIOV disable operations in the device since a recovery is in progress\n" ); |
4475 | goto out; |
4476 | } |
4477 | |
4478 | for_each_hwfn(cdev, i) { |
4479 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; |
4480 | struct qed_ptt *ptt = qed_ptt_acquire(p_hwfn: hwfn); |
4481 | |
4482 | /* Failure to acquire the ptt in 100g creates an odd error |
4483 | * where the first engine has already relased IOV. |
4484 | */ |
4485 | if (!ptt) { |
4486 | DP_ERR(hwfn, "Failed to acquire ptt\n" ); |
4487 | return -EBUSY; |
4488 | } |
4489 | |
4490 | /* Clean WFQ db and configure equal weight for all vports */ |
4491 | qed_clean_wfq_db(p_hwfn: hwfn, p_ptt: ptt); |
4492 | |
4493 | qed_for_each_vf(hwfn, j) { |
4494 | int k; |
4495 | |
4496 | if (!qed_iov_is_valid_vfid(p_hwfn: hwfn, rel_vf_id: j, b_enabled_only: true, b_non_malicious: false)) |
4497 | continue; |
4498 | |
4499 | /* Wait until VF is disabled before releasing */ |
4500 | for (k = 0; k < 100; k++) { |
4501 | if (!qed_iov_is_vf_stopped(p_hwfn: hwfn, vfid: j)) |
4502 | msleep(msecs: 20); |
4503 | else |
4504 | break; |
4505 | } |
4506 | |
4507 | if (k < 100) |
4508 | qed_iov_release_hw_for_vf(p_hwfn: &cdev->hwfns[i], |
4509 | p_ptt: ptt, rel_vf_id: j); |
4510 | else |
4511 | DP_ERR(hwfn, |
4512 | "Timeout waiting for VF's FLR to end\n" ); |
4513 | } |
4514 | |
4515 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
4516 | } |
4517 | out: |
4518 | qed_iov_set_vfs_to_disable(cdev, to_disable: false); |
4519 | |
4520 | return 0; |
4521 | } |
4522 | |
4523 | static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, |
4524 | u16 vfid, |
4525 | struct qed_iov_vf_init_params *params) |
4526 | { |
4527 | u16 base, i; |
4528 | |
4529 | /* Since we have an equal resource distribution per-VF, and we assume |
4530 | * PF has acquired the QED_PF_L2_QUE first queues, we start setting |
4531 | * sequentially from there. |
4532 | */ |
4533 | base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; |
4534 | |
4535 | params->rel_vf_id = vfid; |
4536 | for (i = 0; i < params->num_queues; i++) { |
4537 | params->req_rx_queue[i] = base + i; |
4538 | params->req_tx_queue[i] = base + i; |
4539 | } |
4540 | } |
4541 | |
4542 | static int qed_sriov_enable(struct qed_dev *cdev, int num) |
4543 | { |
4544 | struct qed_iov_vf_init_params params; |
4545 | struct qed_hwfn *hwfn; |
4546 | struct qed_ptt *ptt; |
4547 | int i, j, rc; |
4548 | |
4549 | if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { |
4550 | DP_NOTICE(cdev, "Can start at most %d VFs\n" , |
4551 | RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); |
4552 | return -EINVAL; |
4553 | } |
4554 | |
4555 | memset(¶ms, 0, sizeof(params)); |
4556 | |
4557 | /* Initialize HW for VF access */ |
4558 | for_each_hwfn(cdev, j) { |
4559 | hwfn = &cdev->hwfns[j]; |
4560 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
4561 | |
4562 | /* Make sure not to use more than 16 queues per VF */ |
4563 | params.num_queues = min_t(int, |
4564 | FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, |
4565 | 16); |
4566 | |
4567 | if (!ptt) { |
4568 | DP_ERR(hwfn, "Failed to acquire ptt\n" ); |
4569 | rc = -EBUSY; |
4570 | goto err; |
4571 | } |
4572 | |
4573 | for (i = 0; i < num; i++) { |
4574 | if (!qed_iov_is_valid_vfid(p_hwfn: hwfn, rel_vf_id: i, b_enabled_only: false, b_non_malicious: true)) |
4575 | continue; |
4576 | |
4577 | qed_sriov_enable_qid_config(hwfn, vfid: i, params: ¶ms); |
4578 | rc = qed_iov_init_hw_for_vf(p_hwfn: hwfn, p_ptt: ptt, p_params: ¶ms); |
4579 | if (rc) { |
4580 | DP_ERR(cdev, "Failed to enable VF[%d]\n" , i); |
4581 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
4582 | goto err; |
4583 | } |
4584 | } |
4585 | |
4586 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
4587 | } |
4588 | |
4589 | /* Enable SRIOV PCIe functions */ |
4590 | rc = pci_enable_sriov(dev: cdev->pdev, nr_virtfn: num); |
4591 | if (rc) { |
4592 | DP_ERR(cdev, "Failed to enable sriov [%d]\n" , rc); |
4593 | goto err; |
4594 | } |
4595 | |
4596 | hwfn = QED_LEADING_HWFN(cdev); |
4597 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
4598 | if (!ptt) { |
4599 | DP_ERR(hwfn, "Failed to acquire ptt\n" ); |
4600 | rc = -EBUSY; |
4601 | goto err; |
4602 | } |
4603 | |
4604 | rc = qed_mcp_ov_update_eswitch(p_hwfn: hwfn, p_ptt: ptt, eswitch: QED_OV_ESWITCH_VEB); |
4605 | if (rc) |
4606 | DP_INFO(cdev, "Failed to update eswitch mode\n" ); |
4607 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
4608 | |
4609 | return num; |
4610 | |
4611 | err: |
4612 | qed_sriov_disable(cdev, pci_enabled: false); |
4613 | return rc; |
4614 | } |
4615 | |
4616 | static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) |
4617 | { |
4618 | if (!IS_QED_SRIOV(cdev)) { |
4619 | DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n" ); |
4620 | return -EOPNOTSUPP; |
4621 | } |
4622 | |
4623 | if (num_vfs_param) |
4624 | return qed_sriov_enable(cdev, num: num_vfs_param); |
4625 | else |
4626 | return qed_sriov_disable(cdev, pci_enabled: true); |
4627 | } |
4628 | |
4629 | static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) |
4630 | { |
4631 | int i; |
4632 | |
4633 | if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { |
4634 | DP_VERBOSE(cdev, QED_MSG_IOV, |
4635 | "Cannot set a VF MAC; Sriov is not enabled\n" ); |
4636 | return -EINVAL; |
4637 | } |
4638 | |
4639 | if (!qed_iov_is_valid_vfid(p_hwfn: &cdev->hwfns[0], rel_vf_id: vfid, b_enabled_only: true, b_non_malicious: true)) { |
4640 | DP_VERBOSE(cdev, QED_MSG_IOV, |
4641 | "Cannot set VF[%d] MAC (VF is not active)\n" , vfid); |
4642 | return -EINVAL; |
4643 | } |
4644 | |
4645 | for_each_hwfn(cdev, i) { |
4646 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; |
4647 | struct qed_public_vf_info *vf_info; |
4648 | |
4649 | vf_info = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: vfid, b_enabled_only: true); |
4650 | if (!vf_info) |
4651 | continue; |
4652 | |
4653 | /* Set the MAC, and schedule the IOV task */ |
4654 | if (vf_info->is_trusted_configured) |
4655 | ether_addr_copy(dst: vf_info->mac, src: mac); |
4656 | else |
4657 | ether_addr_copy(dst: vf_info->forced_mac, src: mac); |
4658 | |
4659 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); |
4660 | } |
4661 | |
4662 | return 0; |
4663 | } |
4664 | |
4665 | static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) |
4666 | { |
4667 | int i; |
4668 | |
4669 | if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { |
4670 | DP_VERBOSE(cdev, QED_MSG_IOV, |
4671 | "Cannot set a VF MAC; Sriov is not enabled\n" ); |
4672 | return -EINVAL; |
4673 | } |
4674 | |
4675 | if (!qed_iov_is_valid_vfid(p_hwfn: &cdev->hwfns[0], rel_vf_id: vfid, b_enabled_only: true, b_non_malicious: true)) { |
4676 | DP_VERBOSE(cdev, QED_MSG_IOV, |
4677 | "Cannot set VF[%d] MAC (VF is not active)\n" , vfid); |
4678 | return -EINVAL; |
4679 | } |
4680 | |
4681 | for_each_hwfn(cdev, i) { |
4682 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; |
4683 | struct qed_public_vf_info *vf_info; |
4684 | |
4685 | vf_info = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: vfid, b_enabled_only: true); |
4686 | if (!vf_info) |
4687 | continue; |
4688 | |
4689 | /* Set the forced vlan, and schedule the IOV task */ |
4690 | vf_info->forced_vlan = vid; |
4691 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); |
4692 | } |
4693 | |
4694 | return 0; |
4695 | } |
4696 | |
4697 | static int qed_get_vf_config(struct qed_dev *cdev, |
4698 | int vf_id, struct ifla_vf_info *ivi) |
4699 | { |
4700 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); |
4701 | struct qed_public_vf_info *vf_info; |
4702 | struct qed_mcp_link_state link; |
4703 | u32 tx_rate; |
4704 | int ret; |
4705 | |
4706 | /* Sanitize request */ |
4707 | if (IS_VF(cdev)) |
4708 | return -EINVAL; |
4709 | |
4710 | if (!qed_iov_is_valid_vfid(p_hwfn: &cdev->hwfns[0], rel_vf_id: vf_id, b_enabled_only: true, b_non_malicious: false)) { |
4711 | DP_VERBOSE(cdev, QED_MSG_IOV, |
4712 | "VF index [%d] isn't active\n" , vf_id); |
4713 | return -EINVAL; |
4714 | } |
4715 | |
4716 | vf_info = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: vf_id, b_enabled_only: true); |
4717 | |
4718 | ret = qed_iov_get_link(p_hwfn: hwfn, vfid: vf_id, NULL, p_link: &link, NULL); |
4719 | if (ret) |
4720 | return ret; |
4721 | |
4722 | /* Fill information about VF */ |
4723 | ivi->vf = vf_id; |
4724 | |
4725 | if (is_valid_ether_addr(addr: vf_info->forced_mac)) |
4726 | ether_addr_copy(dst: ivi->mac, src: vf_info->forced_mac); |
4727 | else |
4728 | ether_addr_copy(dst: ivi->mac, src: vf_info->mac); |
4729 | |
4730 | ivi->vlan = vf_info->forced_vlan; |
4731 | ivi->spoofchk = qed_iov_spoofchk_get(p_hwfn: hwfn, vfid: vf_id); |
4732 | ivi->linkstate = vf_info->link_state; |
4733 | tx_rate = vf_info->tx_rate; |
4734 | ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; |
4735 | ivi->min_tx_rate = qed_iov_get_vf_min_rate(p_hwfn: hwfn, vfid: vf_id); |
4736 | ivi->trusted = vf_info->is_trusted_request; |
4737 | |
4738 | return 0; |
4739 | } |
4740 | |
4741 | void qed_inform_vf_link_state(struct qed_hwfn *hwfn) |
4742 | { |
4743 | struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); |
4744 | struct qed_mcp_link_capabilities caps; |
4745 | struct qed_mcp_link_params params; |
4746 | struct qed_mcp_link_state link; |
4747 | int i; |
4748 | |
4749 | if (!hwfn->pf_iov_info) |
4750 | return; |
4751 | |
4752 | /* Update bulletin of all future possible VFs with link configuration */ |
4753 | for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { |
4754 | struct qed_public_vf_info *vf_info; |
4755 | |
4756 | vf_info = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: i, b_enabled_only: false); |
4757 | if (!vf_info) |
4758 | continue; |
4759 | |
4760 | /* Only hwfn0 is actually interested in the link speed. |
4761 | * But since only it would receive an MFW indication of link, |
4762 | * need to take configuration from it - otherwise things like |
4763 | * rate limiting for hwfn1 VF would not work. |
4764 | */ |
4765 | memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn), |
4766 | sizeof(params)); |
4767 | memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link)); |
4768 | memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn), |
4769 | sizeof(caps)); |
4770 | |
4771 | /* Modify link according to the VF's configured link state */ |
4772 | switch (vf_info->link_state) { |
4773 | case IFLA_VF_LINK_STATE_DISABLE: |
4774 | link.link_up = false; |
4775 | break; |
4776 | case IFLA_VF_LINK_STATE_ENABLE: |
4777 | link.link_up = true; |
4778 | /* Set speed according to maximum supported by HW. |
4779 | * that is 40G for regular devices and 100G for CMT |
4780 | * mode devices. |
4781 | */ |
4782 | link.speed = (hwfn->cdev->num_hwfns > 1) ? |
4783 | 100000 : 40000; |
4784 | break; |
4785 | default: |
4786 | /* In auto mode pass PF link image to VF */ |
4787 | break; |
4788 | } |
4789 | |
4790 | if (link.link_up && vf_info->tx_rate) { |
4791 | struct qed_ptt *ptt; |
4792 | int rate; |
4793 | |
4794 | rate = min_t(int, vf_info->tx_rate, link.speed); |
4795 | |
4796 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
4797 | if (!ptt) { |
4798 | DP_NOTICE(hwfn, "Failed to acquire PTT\n" ); |
4799 | return; |
4800 | } |
4801 | |
4802 | if (!qed_iov_configure_tx_rate(p_hwfn: hwfn, p_ptt: ptt, vfid: i, val: rate)) { |
4803 | vf_info->tx_rate = rate; |
4804 | link.speed = rate; |
4805 | } |
4806 | |
4807 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
4808 | } |
4809 | |
4810 | qed_iov_set_link(p_hwfn: hwfn, vfid: i, params: ¶ms, link: &link, p_caps: &caps); |
4811 | } |
4812 | |
4813 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_BULLETIN_UPDATE_FLAG); |
4814 | } |
4815 | |
4816 | static int qed_set_vf_link_state(struct qed_dev *cdev, |
4817 | int vf_id, int link_state) |
4818 | { |
4819 | int i; |
4820 | |
4821 | /* Sanitize request */ |
4822 | if (IS_VF(cdev)) |
4823 | return -EINVAL; |
4824 | |
4825 | if (!qed_iov_is_valid_vfid(p_hwfn: &cdev->hwfns[0], rel_vf_id: vf_id, b_enabled_only: true, b_non_malicious: true)) { |
4826 | DP_VERBOSE(cdev, QED_MSG_IOV, |
4827 | "VF index [%d] isn't active\n" , vf_id); |
4828 | return -EINVAL; |
4829 | } |
4830 | |
4831 | /* Handle configuration of link state */ |
4832 | for_each_hwfn(cdev, i) { |
4833 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; |
4834 | struct qed_public_vf_info *vf; |
4835 | |
4836 | vf = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: vf_id, b_enabled_only: true); |
4837 | if (!vf) |
4838 | continue; |
4839 | |
4840 | if (vf->link_state == link_state) |
4841 | continue; |
4842 | |
4843 | vf->link_state = link_state; |
4844 | qed_inform_vf_link_state(hwfn: &cdev->hwfns[i]); |
4845 | } |
4846 | |
4847 | return 0; |
4848 | } |
4849 | |
4850 | static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) |
4851 | { |
4852 | int i, rc = -EINVAL; |
4853 | |
4854 | for_each_hwfn(cdev, i) { |
4855 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
4856 | |
4857 | rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); |
4858 | if (rc) |
4859 | break; |
4860 | } |
4861 | |
4862 | return rc; |
4863 | } |
4864 | |
4865 | static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) |
4866 | { |
4867 | int i; |
4868 | |
4869 | for_each_hwfn(cdev, i) { |
4870 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
4871 | struct qed_public_vf_info *vf; |
4872 | |
4873 | if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { |
4874 | DP_NOTICE(p_hwfn, |
4875 | "SR-IOV sanity check failed, can't set tx rate\n" ); |
4876 | return -EINVAL; |
4877 | } |
4878 | |
4879 | vf = qed_iov_get_public_vf_info(p_hwfn, relative_vf_id: vfid, b_enabled_only: true); |
4880 | |
4881 | vf->tx_rate = rate; |
4882 | |
4883 | qed_inform_vf_link_state(hwfn: p_hwfn); |
4884 | } |
4885 | |
4886 | return 0; |
4887 | } |
4888 | |
4889 | static int qed_set_vf_rate(struct qed_dev *cdev, |
4890 | int vfid, u32 min_rate, u32 max_rate) |
4891 | { |
4892 | int rc_min = 0, rc_max = 0; |
4893 | |
4894 | if (max_rate) |
4895 | rc_max = qed_configure_max_vf_rate(cdev, vfid, rate: max_rate); |
4896 | |
4897 | if (min_rate) |
4898 | rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, rate: min_rate); |
4899 | |
4900 | if (rc_max | rc_min) |
4901 | return -EINVAL; |
4902 | |
4903 | return 0; |
4904 | } |
4905 | |
4906 | static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) |
4907 | { |
4908 | int i; |
4909 | |
4910 | for_each_hwfn(cdev, i) { |
4911 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; |
4912 | struct qed_public_vf_info *vf; |
4913 | |
4914 | if (!qed_iov_pf_sanity_check(p_hwfn: hwfn, vfid)) { |
4915 | DP_NOTICE(hwfn, |
4916 | "SR-IOV sanity check failed, can't set trust\n" ); |
4917 | return -EINVAL; |
4918 | } |
4919 | |
4920 | vf = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: vfid, b_enabled_only: true); |
4921 | |
4922 | if (vf->is_trusted_request == trust) |
4923 | return 0; |
4924 | vf->is_trusted_request = trust; |
4925 | |
4926 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_TRUST_FLAG); |
4927 | } |
4928 | |
4929 | return 0; |
4930 | } |
4931 | |
4932 | static void qed_handle_vf_msg(struct qed_hwfn *hwfn) |
4933 | { |
4934 | u64 events[QED_VF_ARRAY_LENGTH]; |
4935 | struct qed_ptt *ptt; |
4936 | int i; |
4937 | |
4938 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
4939 | if (!ptt) { |
4940 | DP_VERBOSE(hwfn, QED_MSG_IOV, |
4941 | "Can't acquire PTT; re-scheduling\n" ); |
4942 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_MSG_FLAG); |
4943 | return; |
4944 | } |
4945 | |
4946 | qed_iov_pf_get_pending_events(p_hwfn: hwfn, events); |
4947 | |
4948 | DP_VERBOSE(hwfn, QED_MSG_IOV, |
4949 | "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n" , |
4950 | events[0], events[1], events[2]); |
4951 | |
4952 | qed_for_each_vf(hwfn, i) { |
4953 | /* Skip VFs with no pending messages */ |
4954 | if (!(events[i / 64] & (1ULL << (i % 64)))) |
4955 | continue; |
4956 | |
4957 | DP_VERBOSE(hwfn, QED_MSG_IOV, |
4958 | "Handling VF message from VF 0x%02x [Abs 0x%02x]\n" , |
4959 | i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); |
4960 | |
4961 | /* Copy VF's message to PF's request buffer for that VF */ |
4962 | if (qed_iov_copy_vf_msg(p_hwfn: hwfn, ptt, vfid: i)) |
4963 | continue; |
4964 | |
4965 | qed_iov_process_mbx_req(p_hwfn: hwfn, p_ptt: ptt, vfid: i); |
4966 | } |
4967 | |
4968 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
4969 | } |
4970 | |
4971 | static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn, |
4972 | u8 *mac, |
4973 | struct qed_public_vf_info *info) |
4974 | { |
4975 | if (info->is_trusted_configured) { |
4976 | if (is_valid_ether_addr(addr: info->mac) && |
4977 | (!mac || !ether_addr_equal(addr1: mac, addr2: info->mac))) |
4978 | return true; |
4979 | } else { |
4980 | if (is_valid_ether_addr(addr: info->forced_mac) && |
4981 | (!mac || !ether_addr_equal(addr1: mac, addr2: info->forced_mac))) |
4982 | return true; |
4983 | } |
4984 | |
4985 | return false; |
4986 | } |
4987 | |
4988 | static void qed_set_bulletin_mac(struct qed_hwfn *hwfn, |
4989 | struct qed_public_vf_info *info, |
4990 | int vfid) |
4991 | { |
4992 | if (info->is_trusted_configured) |
4993 | qed_iov_bulletin_set_mac(p_hwfn: hwfn, mac: info->mac, vfid); |
4994 | else |
4995 | qed_iov_bulletin_set_forced_mac(p_hwfn: hwfn, mac: info->forced_mac, vfid); |
4996 | } |
4997 | |
4998 | static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) |
4999 | { |
5000 | int i; |
5001 | |
5002 | qed_for_each_vf(hwfn, i) { |
5003 | struct qed_public_vf_info *info; |
5004 | bool update = false; |
5005 | u8 *mac; |
5006 | |
5007 | info = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: i, b_enabled_only: true); |
5008 | if (!info) |
5009 | continue; |
5010 | |
5011 | /* Update data on bulletin board */ |
5012 | if (info->is_trusted_configured) |
5013 | mac = qed_iov_bulletin_get_mac(p_hwfn: hwfn, rel_vf_id: i); |
5014 | else |
5015 | mac = qed_iov_bulletin_get_forced_mac(p_hwfn: hwfn, rel_vf_id: i); |
5016 | |
5017 | if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) { |
5018 | DP_VERBOSE(hwfn, |
5019 | QED_MSG_IOV, |
5020 | "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n" , |
5021 | i, |
5022 | hwfn->cdev->p_iov_info->first_vf_in_pf + i); |
5023 | |
5024 | /* Update bulletin board with MAC */ |
5025 | qed_set_bulletin_mac(hwfn, info, vfid: i); |
5026 | update = true; |
5027 | } |
5028 | |
5029 | if (qed_iov_bulletin_get_forced_vlan(p_hwfn: hwfn, rel_vf_id: i) ^ |
5030 | info->forced_vlan) { |
5031 | DP_VERBOSE(hwfn, |
5032 | QED_MSG_IOV, |
5033 | "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n" , |
5034 | info->forced_vlan, |
5035 | i, |
5036 | hwfn->cdev->p_iov_info->first_vf_in_pf + i); |
5037 | qed_iov_bulletin_set_forced_vlan(p_hwfn: hwfn, |
5038 | pvid: info->forced_vlan, vfid: i); |
5039 | update = true; |
5040 | } |
5041 | |
5042 | if (update) |
5043 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_BULLETIN_UPDATE_FLAG); |
5044 | } |
5045 | } |
5046 | |
5047 | static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) |
5048 | { |
5049 | struct qed_ptt *ptt; |
5050 | int i; |
5051 | |
5052 | ptt = qed_ptt_acquire(p_hwfn: hwfn); |
5053 | if (!ptt) { |
5054 | DP_NOTICE(hwfn, "Failed allocating a ptt entry\n" ); |
5055 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_BULLETIN_UPDATE_FLAG); |
5056 | return; |
5057 | } |
5058 | |
5059 | qed_for_each_vf(hwfn, i) |
5060 | qed_iov_post_vf_bulletin(p_hwfn: hwfn, vfid: i, p_ptt: ptt); |
5061 | |
5062 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
5063 | } |
5064 | |
5065 | static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) |
5066 | { |
5067 | struct qed_public_vf_info *vf_info; |
5068 | struct qed_vf_info *vf; |
5069 | u8 *force_mac; |
5070 | int i; |
5071 | |
5072 | vf_info = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: vf_id, b_enabled_only: true); |
5073 | vf = qed_iov_get_vf_info(p_hwfn: hwfn, relative_vf_id: vf_id, b_enabled_only: true); |
5074 | |
5075 | if (!vf_info || !vf) |
5076 | return; |
5077 | |
5078 | /* Force MAC converted to generic MAC in case of VF trust on */ |
5079 | if (vf_info->is_trusted_configured && |
5080 | (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) { |
5081 | force_mac = qed_iov_bulletin_get_forced_mac(p_hwfn: hwfn, rel_vf_id: vf_id); |
5082 | |
5083 | if (force_mac) { |
5084 | /* Clear existing shadow copy of MAC to have a clean |
5085 | * slate. |
5086 | */ |
5087 | for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { |
5088 | if (ether_addr_equal(addr1: vf->shadow_config.macs[i], |
5089 | addr2: vf_info->mac)) { |
5090 | eth_zero_addr(addr: vf->shadow_config.macs[i]); |
5091 | DP_VERBOSE(hwfn, QED_MSG_IOV, |
5092 | "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n" , |
5093 | vf_info->mac, vf_id); |
5094 | break; |
5095 | } |
5096 | } |
5097 | |
5098 | ether_addr_copy(dst: vf_info->mac, src: force_mac); |
5099 | eth_zero_addr(addr: vf_info->forced_mac); |
5100 | vf->bulletin.p_virt->valid_bitmap &= |
5101 | ~BIT(MAC_ADDR_FORCED); |
5102 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_BULLETIN_UPDATE_FLAG); |
5103 | } |
5104 | } |
5105 | |
5106 | /* Update shadow copy with VF MAC when trust mode is turned off */ |
5107 | if (!vf_info->is_trusted_configured) { |
5108 | u8 empty_mac[ETH_ALEN]; |
5109 | |
5110 | eth_zero_addr(addr: empty_mac); |
5111 | for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { |
5112 | if (ether_addr_equal(addr1: vf->shadow_config.macs[i], |
5113 | addr2: empty_mac)) { |
5114 | ether_addr_copy(dst: vf->shadow_config.macs[i], |
5115 | src: vf_info->mac); |
5116 | DP_VERBOSE(hwfn, QED_MSG_IOV, |
5117 | "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n" , |
5118 | vf_info->mac, vf_id); |
5119 | break; |
5120 | } |
5121 | } |
5122 | /* Clear bulletin when trust mode is turned off, |
5123 | * to have a clean slate for next (normal) operations. |
5124 | */ |
5125 | qed_iov_bulletin_set_mac(p_hwfn: hwfn, mac: empty_mac, vfid: vf_id); |
5126 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_BULLETIN_UPDATE_FLAG); |
5127 | } |
5128 | } |
5129 | |
5130 | static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) |
5131 | { |
5132 | struct qed_sp_vport_update_params params; |
5133 | struct qed_filter_accept_flags *flags; |
5134 | struct qed_public_vf_info *vf_info; |
5135 | struct qed_vf_info *vf; |
5136 | u8 mask; |
5137 | int i; |
5138 | |
5139 | mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; |
5140 | flags = ¶ms.accept_flags; |
5141 | |
5142 | qed_for_each_vf(hwfn, i) { |
5143 | /* Need to make sure current requested configuration didn't |
5144 | * flip so that we'll end up configuring something that's not |
5145 | * needed. |
5146 | */ |
5147 | vf_info = qed_iov_get_public_vf_info(p_hwfn: hwfn, relative_vf_id: i, b_enabled_only: true); |
5148 | if (vf_info->is_trusted_configured == |
5149 | vf_info->is_trusted_request) |
5150 | continue; |
5151 | vf_info->is_trusted_configured = vf_info->is_trusted_request; |
5152 | |
5153 | /* Handle forced MAC mode */ |
5154 | qed_update_mac_for_vf_trust_change(hwfn, vf_id: i); |
5155 | |
5156 | /* Validate that the VF has a configured vport */ |
5157 | vf = qed_iov_get_vf_info(p_hwfn: hwfn, relative_vf_id: i, b_enabled_only: true); |
5158 | if (!vf || !vf->vport_instance) |
5159 | continue; |
5160 | |
5161 | memset(¶ms, 0, sizeof(params)); |
5162 | params.opaque_fid = vf->opaque_fid; |
5163 | params.vport_id = vf->vport_id; |
5164 | |
5165 | params.update_ctl_frame_check = 1; |
5166 | params.mac_chk_en = !vf_info->is_trusted_configured; |
5167 | params.update_accept_any_vlan_flg = 0; |
5168 | |
5169 | if (vf_info->accept_any_vlan && vf_info->forced_vlan) { |
5170 | params.update_accept_any_vlan_flg = 1; |
5171 | params.accept_any_vlan = vf_info->accept_any_vlan; |
5172 | } |
5173 | |
5174 | if (vf_info->rx_accept_mode & mask) { |
5175 | flags->update_rx_mode_config = 1; |
5176 | flags->rx_accept_filter = vf_info->rx_accept_mode; |
5177 | } |
5178 | |
5179 | if (vf_info->tx_accept_mode & mask) { |
5180 | flags->update_tx_mode_config = 1; |
5181 | flags->tx_accept_filter = vf_info->tx_accept_mode; |
5182 | } |
5183 | |
5184 | /* Remove if needed; Otherwise this would set the mask */ |
5185 | if (!vf_info->is_trusted_configured) { |
5186 | flags->rx_accept_filter &= ~mask; |
5187 | flags->tx_accept_filter &= ~mask; |
5188 | params.accept_any_vlan = false; |
5189 | } |
5190 | |
5191 | if (flags->update_rx_mode_config || |
5192 | flags->update_tx_mode_config || |
5193 | params.update_ctl_frame_check || |
5194 | params.update_accept_any_vlan_flg) { |
5195 | DP_VERBOSE(hwfn, QED_MSG_IOV, |
5196 | "vport update config for %s VF[abs 0x%x rel 0x%x]\n" , |
5197 | vf_info->is_trusted_configured ? "trusted" : "untrusted" , |
5198 | vf->abs_vf_id, vf->relative_vf_id); |
5199 | qed_sp_vport_update(p_hwfn: hwfn, p_params: ¶ms, |
5200 | comp_mode: QED_SPQ_MODE_EBLOCK, NULL); |
5201 | } |
5202 | } |
5203 | } |
5204 | |
5205 | static void qed_iov_pf_task(struct work_struct *work) |
5206 | |
5207 | { |
5208 | struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, |
5209 | iov_task.work); |
5210 | int rc; |
5211 | |
5212 | if (test_and_clear_bit(nr: QED_IOV_WQ_STOP_WQ_FLAG, addr: &hwfn->iov_task_flags)) |
5213 | return; |
5214 | |
5215 | if (test_and_clear_bit(nr: QED_IOV_WQ_FLR_FLAG, addr: &hwfn->iov_task_flags)) { |
5216 | struct qed_ptt *ptt = qed_ptt_acquire(p_hwfn: hwfn); |
5217 | |
5218 | if (!ptt) { |
5219 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_FLR_FLAG); |
5220 | return; |
5221 | } |
5222 | |
5223 | rc = qed_iov_vf_flr_cleanup(p_hwfn: hwfn, p_ptt: ptt); |
5224 | if (rc) |
5225 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_FLR_FLAG); |
5226 | |
5227 | qed_ptt_release(p_hwfn: hwfn, p_ptt: ptt); |
5228 | } |
5229 | |
5230 | if (test_and_clear_bit(nr: QED_IOV_WQ_MSG_FLAG, addr: &hwfn->iov_task_flags)) |
5231 | qed_handle_vf_msg(hwfn); |
5232 | |
5233 | if (test_and_clear_bit(nr: QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, |
5234 | addr: &hwfn->iov_task_flags)) |
5235 | qed_handle_pf_set_vf_unicast(hwfn); |
5236 | |
5237 | if (test_and_clear_bit(nr: QED_IOV_WQ_BULLETIN_UPDATE_FLAG, |
5238 | addr: &hwfn->iov_task_flags)) |
5239 | qed_handle_bulletin_post(hwfn); |
5240 | |
5241 | if (test_and_clear_bit(nr: QED_IOV_WQ_TRUST_FLAG, addr: &hwfn->iov_task_flags)) |
5242 | qed_iov_handle_trust_change(hwfn); |
5243 | } |
5244 | |
5245 | void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) |
5246 | { |
5247 | int i; |
5248 | |
5249 | for_each_hwfn(cdev, i) { |
5250 | if (!cdev->hwfns[i].iov_wq) |
5251 | continue; |
5252 | |
5253 | if (schedule_first) { |
5254 | qed_schedule_iov(hwfn: &cdev->hwfns[i], |
5255 | flag: QED_IOV_WQ_STOP_WQ_FLAG); |
5256 | cancel_delayed_work_sync(dwork: &cdev->hwfns[i].iov_task); |
5257 | } |
5258 | |
5259 | destroy_workqueue(wq: cdev->hwfns[i].iov_wq); |
5260 | } |
5261 | } |
5262 | |
5263 | int qed_iov_wq_start(struct qed_dev *cdev) |
5264 | { |
5265 | char name[NAME_SIZE]; |
5266 | int i; |
5267 | |
5268 | for_each_hwfn(cdev, i) { |
5269 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
5270 | |
5271 | /* PFs needs a dedicated workqueue only if they support IOV. |
5272 | * VFs always require one. |
5273 | */ |
5274 | if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) |
5275 | continue; |
5276 | |
5277 | snprintf(buf: name, NAME_SIZE, fmt: "iov-%02x:%02x.%02x" , |
5278 | cdev->pdev->bus->number, |
5279 | PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); |
5280 | |
5281 | p_hwfn->iov_wq = create_singlethread_workqueue(name); |
5282 | if (!p_hwfn->iov_wq) { |
5283 | DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n" ); |
5284 | return -ENOMEM; |
5285 | } |
5286 | |
5287 | if (IS_PF(cdev)) |
5288 | INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); |
5289 | else |
5290 | INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); |
5291 | } |
5292 | |
5293 | return 0; |
5294 | } |
5295 | |
5296 | const struct qed_iov_hv_ops qed_iov_ops_pass = { |
5297 | .configure = &qed_sriov_configure, |
5298 | .set_mac = &qed_sriov_pf_set_mac, |
5299 | .set_vlan = &qed_sriov_pf_set_vlan, |
5300 | .get_config = &qed_get_vf_config, |
5301 | .set_link_state = &qed_set_vf_link_state, |
5302 | .set_spoof = &qed_spoof_configure, |
5303 | .set_rate = &qed_set_vf_rate, |
5304 | .set_trust = &qed_set_vf_trust, |
5305 | }; |
5306 | |