1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/types.h> |
8 | #include <asm/byteorder.h> |
9 | #include <asm/param.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/etherdevice.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/module.h> |
16 | #include <linux/pci.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/stddef.h> |
19 | #include <linux/string.h> |
20 | #include <linux/workqueue.h> |
21 | #include <linux/bitops.h> |
22 | #include <linux/bug.h> |
23 | #include <linux/vmalloc.h> |
24 | #include "qed.h" |
25 | #include <linux/qed/qed_chain.h> |
26 | #include "qed_cxt.h" |
27 | #include "qed_dcbx.h" |
28 | #include "qed_dev_api.h" |
29 | #include <linux/qed/qed_eth_if.h> |
30 | #include "qed_hsi.h" |
31 | #include "qed_iro_hsi.h" |
32 | #include "qed_hw.h" |
33 | #include "qed_int.h" |
34 | #include "qed_l2.h" |
35 | #include "qed_mcp.h" |
36 | #include "qed_ptp.h" |
37 | #include "qed_reg_addr.h" |
38 | #include "qed_sp.h" |
39 | #include "qed_sriov.h" |
40 | |
41 | #define QED_MAX_SGES_NUM 16 |
42 | #define CRC32_POLY 0x1edc6f41 |
43 | |
44 | struct qed_l2_info { |
45 | u32 queues; |
46 | unsigned long **pp_qid_usage; |
47 | |
48 | /* The lock is meant to synchronize access to the qid usage */ |
49 | struct mutex lock; |
50 | }; |
51 | |
52 | int qed_l2_alloc(struct qed_hwfn *p_hwfn) |
53 | { |
54 | struct qed_l2_info *p_l2_info; |
55 | unsigned long **pp_qids; |
56 | u32 i; |
57 | |
58 | if (!QED_IS_L2_PERSONALITY(p_hwfn)) |
59 | return 0; |
60 | |
61 | p_l2_info = kzalloc(size: sizeof(*p_l2_info), GFP_KERNEL); |
62 | if (!p_l2_info) |
63 | return -ENOMEM; |
64 | p_hwfn->p_l2_info = p_l2_info; |
65 | |
66 | if (IS_PF(p_hwfn->cdev)) { |
67 | p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE); |
68 | } else { |
69 | u8 rx = 0, tx = 0; |
70 | |
71 | qed_vf_get_num_rxqs(p_hwfn, num_rxqs: &rx); |
72 | qed_vf_get_num_txqs(p_hwfn, num_txqs: &tx); |
73 | |
74 | p_l2_info->queues = max_t(u8, rx, tx); |
75 | } |
76 | |
77 | pp_qids = kcalloc(n: p_l2_info->queues, size: sizeof(unsigned long *), |
78 | GFP_KERNEL); |
79 | if (!pp_qids) |
80 | return -ENOMEM; |
81 | p_l2_info->pp_qid_usage = pp_qids; |
82 | |
83 | for (i = 0; i < p_l2_info->queues; i++) { |
84 | pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL); |
85 | if (!pp_qids[i]) |
86 | return -ENOMEM; |
87 | } |
88 | |
89 | return 0; |
90 | } |
91 | |
92 | void qed_l2_setup(struct qed_hwfn *p_hwfn) |
93 | { |
94 | if (!QED_IS_L2_PERSONALITY(p_hwfn)) |
95 | return; |
96 | |
97 | mutex_init(&p_hwfn->p_l2_info->lock); |
98 | } |
99 | |
100 | void qed_l2_free(struct qed_hwfn *p_hwfn) |
101 | { |
102 | u32 i; |
103 | |
104 | if (!QED_IS_L2_PERSONALITY(p_hwfn)) |
105 | return; |
106 | |
107 | if (!p_hwfn->p_l2_info) |
108 | return; |
109 | |
110 | if (!p_hwfn->p_l2_info->pp_qid_usage) |
111 | goto out_l2_info; |
112 | |
113 | /* Free until hit first uninitialized entry */ |
114 | for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { |
115 | if (!p_hwfn->p_l2_info->pp_qid_usage[i]) |
116 | break; |
117 | kfree(objp: p_hwfn->p_l2_info->pp_qid_usage[i]); |
118 | } |
119 | |
120 | kfree(objp: p_hwfn->p_l2_info->pp_qid_usage); |
121 | |
122 | out_l2_info: |
123 | kfree(objp: p_hwfn->p_l2_info); |
124 | p_hwfn->p_l2_info = NULL; |
125 | } |
126 | |
127 | static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn, |
128 | struct qed_queue_cid *p_cid) |
129 | { |
130 | struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info; |
131 | u16 queue_id = p_cid->rel.queue_id; |
132 | bool b_rc = true; |
133 | u8 first; |
134 | |
135 | mutex_lock(&p_l2_info->lock); |
136 | |
137 | if (queue_id >= p_l2_info->queues) { |
138 | DP_NOTICE(p_hwfn, |
139 | "Requested to increase usage for qzone %04x out of %08x\n" , |
140 | queue_id, p_l2_info->queues); |
141 | b_rc = false; |
142 | goto out; |
143 | } |
144 | |
145 | first = (u8)find_first_zero_bit(addr: p_l2_info->pp_qid_usage[queue_id], |
146 | MAX_QUEUES_PER_QZONE); |
147 | if (first >= MAX_QUEUES_PER_QZONE) { |
148 | b_rc = false; |
149 | goto out; |
150 | } |
151 | |
152 | __set_bit(first, p_l2_info->pp_qid_usage[queue_id]); |
153 | p_cid->qid_usage_idx = first; |
154 | |
155 | out: |
156 | mutex_unlock(lock: &p_l2_info->lock); |
157 | return b_rc; |
158 | } |
159 | |
160 | static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, |
161 | struct qed_queue_cid *p_cid) |
162 | { |
163 | mutex_lock(&p_hwfn->p_l2_info->lock); |
164 | |
165 | clear_bit(nr: p_cid->qid_usage_idx, |
166 | addr: p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); |
167 | |
168 | mutex_unlock(lock: &p_hwfn->p_l2_info->lock); |
169 | } |
170 | |
171 | void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, |
172 | struct qed_queue_cid *p_cid) |
173 | { |
174 | bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID); |
175 | |
176 | if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) |
177 | _qed_cxt_release_cid(p_hwfn, cid: p_cid->cid, vfid: p_cid->vfid); |
178 | |
179 | /* For PF's VFs we maintain the index inside queue-zone in IOV */ |
180 | if (p_cid->vfid == QED_QUEUE_CID_SELF) |
181 | qed_eth_queue_qid_usage_del(p_hwfn, p_cid); |
182 | |
183 | vfree(addr: p_cid); |
184 | } |
185 | |
186 | /* The internal is only meant to be directly called by PFs initializeing CIDs |
187 | * for their VFs. |
188 | */ |
189 | static struct qed_queue_cid * |
190 | _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, |
191 | u16 opaque_fid, |
192 | u32 cid, |
193 | struct qed_queue_start_common_params *p_params, |
194 | bool b_is_rx, |
195 | struct qed_queue_cid_vf_params *p_vf_params) |
196 | { |
197 | struct qed_queue_cid *p_cid; |
198 | int rc; |
199 | |
200 | p_cid = vzalloc(size: sizeof(*p_cid)); |
201 | if (!p_cid) |
202 | return NULL; |
203 | |
204 | p_cid->opaque_fid = opaque_fid; |
205 | p_cid->cid = cid; |
206 | p_cid->p_owner = p_hwfn; |
207 | |
208 | /* Fill in parameters */ |
209 | p_cid->rel.vport_id = p_params->vport_id; |
210 | p_cid->rel.queue_id = p_params->queue_id; |
211 | p_cid->rel.stats_id = p_params->stats_id; |
212 | p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; |
213 | p_cid->b_is_rx = b_is_rx; |
214 | p_cid->sb_idx = p_params->sb_idx; |
215 | |
216 | /* Fill-in bits related to VFs' queues if information was provided */ |
217 | if (p_vf_params) { |
218 | p_cid->vfid = p_vf_params->vfid; |
219 | p_cid->vf_qid = p_vf_params->vf_qid; |
220 | p_cid->vf_legacy = p_vf_params->vf_legacy; |
221 | } else { |
222 | p_cid->vfid = QED_QUEUE_CID_SELF; |
223 | } |
224 | |
225 | /* Don't try calculating the absolute indices for VFs */ |
226 | if (IS_VF(p_hwfn->cdev)) { |
227 | p_cid->abs = p_cid->rel; |
228 | goto out; |
229 | } |
230 | |
231 | /* Calculate the engine-absolute indices of the resources. |
232 | * This would guarantee they're valid later on. |
233 | * In some cases [SBs] we already have the right values. |
234 | */ |
235 | rc = qed_fw_vport(p_hwfn, src_id: p_cid->rel.vport_id, dst_id: &p_cid->abs.vport_id); |
236 | if (rc) |
237 | goto fail; |
238 | |
239 | rc = qed_fw_l2_queue(p_hwfn, src_id: p_cid->rel.queue_id, dst_id: &p_cid->abs.queue_id); |
240 | if (rc) |
241 | goto fail; |
242 | |
243 | /* In case of a PF configuring its VF's queues, the stats-id is already |
244 | * absolute [since there's a single index that's suitable per-VF]. |
245 | */ |
246 | if (p_cid->vfid == QED_QUEUE_CID_SELF) { |
247 | rc = qed_fw_vport(p_hwfn, src_id: p_cid->rel.stats_id, |
248 | dst_id: &p_cid->abs.stats_id); |
249 | if (rc) |
250 | goto fail; |
251 | } else { |
252 | p_cid->abs.stats_id = p_cid->rel.stats_id; |
253 | } |
254 | |
255 | out: |
256 | /* VF-images have provided the qid_usage_idx on their own. |
257 | * Otherwise, we need to allocate a unique one. |
258 | */ |
259 | if (!p_vf_params) { |
260 | if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid)) |
261 | goto fail; |
262 | } else { |
263 | p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; |
264 | } |
265 | |
266 | DP_VERBOSE(p_hwfn, |
267 | QED_MSG_SP, |
268 | "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n" , |
269 | p_cid->opaque_fid, |
270 | p_cid->cid, |
271 | p_cid->rel.vport_id, |
272 | p_cid->abs.vport_id, |
273 | p_cid->rel.queue_id, |
274 | p_cid->qid_usage_idx, |
275 | p_cid->abs.queue_id, |
276 | p_cid->rel.stats_id, |
277 | p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); |
278 | |
279 | return p_cid; |
280 | |
281 | fail: |
282 | vfree(addr: p_cid); |
283 | return NULL; |
284 | } |
285 | |
286 | struct qed_queue_cid * |
287 | qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, |
288 | u16 opaque_fid, |
289 | struct qed_queue_start_common_params *p_params, |
290 | bool b_is_rx, |
291 | struct qed_queue_cid_vf_params *p_vf_params) |
292 | { |
293 | struct qed_queue_cid *p_cid; |
294 | u8 vfid = QED_CXT_PF_CID; |
295 | bool b_legacy_vf = false; |
296 | u32 cid = 0; |
297 | |
298 | /* In case of legacy VFs, The CID can be derived from the additional |
299 | * VF parameters - the VF assumes queue X uses CID X, so we can simply |
300 | * use the vf_qid for this purpose as well. |
301 | */ |
302 | if (p_vf_params) { |
303 | vfid = p_vf_params->vfid; |
304 | |
305 | if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) { |
306 | b_legacy_vf = true; |
307 | cid = p_vf_params->vf_qid; |
308 | } |
309 | } |
310 | |
311 | /* Get a unique firmware CID for this queue, in case it's a PF. |
312 | * VF's don't need a CID as the queue configuration will be done |
313 | * by PF. |
314 | */ |
315 | if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { |
316 | if (_qed_cxt_acquire_cid(p_hwfn, type: PROTOCOLID_ETH, |
317 | p_cid: &cid, vfid)) { |
318 | DP_NOTICE(p_hwfn, "Failed to acquire cid\n" ); |
319 | return NULL; |
320 | } |
321 | } |
322 | |
323 | p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, |
324 | p_params, b_is_rx, p_vf_params); |
325 | if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) |
326 | _qed_cxt_release_cid(p_hwfn, cid, vfid); |
327 | |
328 | return p_cid; |
329 | } |
330 | |
331 | static struct qed_queue_cid * |
332 | qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn, |
333 | u16 opaque_fid, |
334 | bool b_is_rx, |
335 | struct qed_queue_start_common_params *p_params) |
336 | { |
337 | return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, |
338 | NULL); |
339 | } |
340 | |
341 | int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, |
342 | struct qed_sp_vport_start_params *p_params) |
343 | { |
344 | struct vport_start_ramrod_data *p_ramrod = NULL; |
345 | struct eth_vport_tpa_param *tpa_param; |
346 | struct qed_spq_entry *p_ent = NULL; |
347 | struct qed_sp_init_data init_data; |
348 | u16 min_size, rx_mode = 0; |
349 | u8 abs_vport_id = 0; |
350 | int rc; |
351 | |
352 | rc = qed_fw_vport(p_hwfn, src_id: p_params->vport_id, dst_id: &abs_vport_id); |
353 | if (rc) |
354 | return rc; |
355 | |
356 | memset(&init_data, 0, sizeof(init_data)); |
357 | init_data.cid = qed_spq_get_cid(p_hwfn); |
358 | init_data.opaque_fid = p_params->opaque_fid; |
359 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
360 | |
361 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
362 | cmd: ETH_RAMROD_VPORT_START, |
363 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
364 | if (rc) |
365 | return rc; |
366 | |
367 | p_ramrod = &p_ent->ramrod.vport_start; |
368 | p_ramrod->vport_id = abs_vport_id; |
369 | |
370 | p_ramrod->mtu = cpu_to_le16(p_params->mtu); |
371 | p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; |
372 | p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; |
373 | p_ramrod->drop_ttl0_en = p_params->drop_ttl0; |
374 | p_ramrod->untagged = p_params->only_untagged; |
375 | |
376 | SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); |
377 | SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); |
378 | |
379 | p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); |
380 | |
381 | /* TPA related fields */ |
382 | tpa_param = &p_ramrod->tpa_param; |
383 | memset(tpa_param, 0, sizeof(*tpa_param)); |
384 | |
385 | tpa_param->max_buff_num = p_params->max_buffers_per_cqe; |
386 | |
387 | switch (p_params->tpa_mode) { |
388 | case QED_TPA_MODE_GRO: |
389 | min_size = p_params->mtu / 2; |
390 | |
391 | tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; |
392 | tpa_param->tpa_max_size = cpu_to_le16(U16_MAX); |
393 | tpa_param->tpa_min_size_to_cont = cpu_to_le16(min_size); |
394 | tpa_param->tpa_min_size_to_start = cpu_to_le16(min_size); |
395 | tpa_param->tpa_ipv4_en_flg = 1; |
396 | tpa_param->tpa_ipv6_en_flg = 1; |
397 | tpa_param->tpa_pkt_split_flg = 1; |
398 | tpa_param->tpa_gro_consistent_flg = 1; |
399 | break; |
400 | default: |
401 | break; |
402 | } |
403 | |
404 | p_ramrod->tx_switching_en = p_params->tx_switching; |
405 | |
406 | p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; |
407 | p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; |
408 | |
409 | /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ |
410 | p_ramrod->sw_fid = qed_concrete_to_sw_fid(cdev: p_hwfn->cdev, |
411 | concrete_fid: p_params->concrete_fid); |
412 | |
413 | return qed_spq_post(p_hwfn, p_ent, NULL); |
414 | } |
415 | |
416 | static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, |
417 | struct qed_sp_vport_start_params *p_params) |
418 | { |
419 | if (IS_VF(p_hwfn->cdev)) { |
420 | return qed_vf_pf_vport_start(p_hwfn, vport_id: p_params->vport_id, |
421 | mtu: p_params->mtu, |
422 | inner_vlan_removal: p_params->remove_inner_vlan, |
423 | tpa_mode: p_params->tpa_mode, |
424 | max_buffers_per_cqe: p_params->max_buffers_per_cqe, |
425 | only_untagged: p_params->only_untagged); |
426 | } |
427 | |
428 | return qed_sp_eth_vport_start(p_hwfn, p_params); |
429 | } |
430 | |
431 | static int |
432 | (struct qed_hwfn *p_hwfn, |
433 | struct vport_update_ramrod_data *p_ramrod, |
434 | struct qed_rss_params *) |
435 | { |
436 | struct eth_vport_rss_config *p_config; |
437 | u16 capabilities = 0; |
438 | int i, table_size; |
439 | int rc = 0; |
440 | |
441 | if (!p_rss) { |
442 | p_ramrod->common.update_rss_flg = 0; |
443 | return rc; |
444 | } |
445 | p_config = &p_ramrod->rss_config; |
446 | |
447 | BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); |
448 | |
449 | rc = qed_fw_rss_eng(p_hwfn, src_id: p_rss->rss_eng_id, dst_id: &p_config->rss_id); |
450 | if (rc) |
451 | return rc; |
452 | |
453 | p_ramrod->common.update_rss_flg = p_rss->update_rss_config; |
454 | p_config->update_rss_capabilities = p_rss->update_rss_capabilities; |
455 | p_config->update_rss_ind_table = p_rss->update_rss_ind_table; |
456 | p_config->update_rss_key = p_rss->update_rss_key; |
457 | |
458 | p_config->rss_mode = p_rss->rss_enable ? |
459 | ETH_VPORT_RSS_MODE_REGULAR : |
460 | ETH_VPORT_RSS_MODE_DISABLED; |
461 | |
462 | SET_FIELD(capabilities, |
463 | ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, |
464 | !!(p_rss->rss_caps & QED_RSS_IPV4)); |
465 | SET_FIELD(capabilities, |
466 | ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, |
467 | !!(p_rss->rss_caps & QED_RSS_IPV6)); |
468 | SET_FIELD(capabilities, |
469 | ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, |
470 | !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); |
471 | SET_FIELD(capabilities, |
472 | ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, |
473 | !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); |
474 | SET_FIELD(capabilities, |
475 | ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, |
476 | !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); |
477 | SET_FIELD(capabilities, |
478 | ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, |
479 | !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); |
480 | p_config->tbl_size = p_rss->rss_table_size_log; |
481 | |
482 | p_config->capabilities = cpu_to_le16(capabilities); |
483 | |
484 | DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, |
485 | "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n" , |
486 | p_ramrod->common.update_rss_flg, |
487 | p_config->rss_mode, |
488 | p_config->update_rss_capabilities, |
489 | p_config->capabilities, |
490 | p_config->update_rss_ind_table, p_config->update_rss_key); |
491 | |
492 | table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, |
493 | 1 << p_config->tbl_size); |
494 | for (i = 0; i < table_size; i++) { |
495 | struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; |
496 | |
497 | if (!p_queue) |
498 | return -EINVAL; |
499 | |
500 | p_config->indirection_table[i] = |
501 | cpu_to_le16(p_queue->abs.queue_id); |
502 | } |
503 | |
504 | DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, |
505 | "Configured RSS indirection table [%d entries]:\n" , |
506 | table_size); |
507 | for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { |
508 | DP_VERBOSE(p_hwfn, |
509 | NETIF_MSG_IFUP, |
510 | "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n" , |
511 | le16_to_cpu(p_config->indirection_table[i]), |
512 | le16_to_cpu(p_config->indirection_table[i + 1]), |
513 | le16_to_cpu(p_config->indirection_table[i + 2]), |
514 | le16_to_cpu(p_config->indirection_table[i + 3]), |
515 | le16_to_cpu(p_config->indirection_table[i + 4]), |
516 | le16_to_cpu(p_config->indirection_table[i + 5]), |
517 | le16_to_cpu(p_config->indirection_table[i + 6]), |
518 | le16_to_cpu(p_config->indirection_table[i + 7]), |
519 | le16_to_cpu(p_config->indirection_table[i + 8]), |
520 | le16_to_cpu(p_config->indirection_table[i + 9]), |
521 | le16_to_cpu(p_config->indirection_table[i + 10]), |
522 | le16_to_cpu(p_config->indirection_table[i + 11]), |
523 | le16_to_cpu(p_config->indirection_table[i + 12]), |
524 | le16_to_cpu(p_config->indirection_table[i + 13]), |
525 | le16_to_cpu(p_config->indirection_table[i + 14]), |
526 | le16_to_cpu(p_config->indirection_table[i + 15])); |
527 | } |
528 | |
529 | for (i = 0; i < 10; i++) |
530 | p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); |
531 | |
532 | return rc; |
533 | } |
534 | |
535 | static void |
536 | qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, |
537 | struct vport_update_ramrod_data *p_ramrod, |
538 | struct qed_filter_accept_flags accept_flags) |
539 | { |
540 | p_ramrod->common.update_rx_mode_flg = |
541 | accept_flags.update_rx_mode_config; |
542 | |
543 | p_ramrod->common.update_tx_mode_flg = |
544 | accept_flags.update_tx_mode_config; |
545 | |
546 | /* Set Rx mode accept flags */ |
547 | if (p_ramrod->common.update_rx_mode_flg) { |
548 | u8 accept_filter = accept_flags.rx_accept_filter; |
549 | u16 state = 0; |
550 | |
551 | SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, |
552 | !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || |
553 | !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); |
554 | |
555 | SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, |
556 | !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); |
557 | |
558 | SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, |
559 | !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || |
560 | !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); |
561 | |
562 | SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, |
563 | (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && |
564 | !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); |
565 | |
566 | SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, |
567 | !!(accept_filter & QED_ACCEPT_BCAST)); |
568 | |
569 | SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI, |
570 | !!(accept_filter & QED_ACCEPT_ANY_VNI)); |
571 | |
572 | p_ramrod->rx_mode.state = cpu_to_le16(state); |
573 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
574 | "p_ramrod->rx_mode.state = 0x%x\n" , state); |
575 | } |
576 | |
577 | /* Set Tx mode accept flags */ |
578 | if (p_ramrod->common.update_tx_mode_flg) { |
579 | u8 accept_filter = accept_flags.tx_accept_filter; |
580 | u16 state = 0; |
581 | |
582 | SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, |
583 | !!(accept_filter & QED_ACCEPT_NONE)); |
584 | |
585 | SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, |
586 | !!(accept_filter & QED_ACCEPT_NONE)); |
587 | |
588 | SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, |
589 | (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && |
590 | !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); |
591 | |
592 | SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, |
593 | (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && |
594 | !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); |
595 | |
596 | SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, |
597 | !!(accept_filter & QED_ACCEPT_BCAST)); |
598 | |
599 | p_ramrod->tx_mode.state = cpu_to_le16(state); |
600 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
601 | "p_ramrod->tx_mode.state = 0x%x\n" , state); |
602 | } |
603 | } |
604 | |
605 | static void |
606 | qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, |
607 | struct vport_update_ramrod_data *p_ramrod, |
608 | const struct qed_sge_tpa_params *param) |
609 | { |
610 | struct eth_vport_tpa_param *tpa; |
611 | |
612 | if (!param) { |
613 | p_ramrod->common.update_tpa_param_flg = 0; |
614 | p_ramrod->common.update_tpa_en_flg = 0; |
615 | p_ramrod->common.update_tpa_param_flg = 0; |
616 | return; |
617 | } |
618 | |
619 | p_ramrod->common.update_tpa_en_flg = param->update_tpa_en_flg; |
620 | tpa = &p_ramrod->tpa_param; |
621 | tpa->tpa_ipv4_en_flg = param->tpa_ipv4_en_flg; |
622 | tpa->tpa_ipv6_en_flg = param->tpa_ipv6_en_flg; |
623 | tpa->tpa_ipv4_tunn_en_flg = param->tpa_ipv4_tunn_en_flg; |
624 | tpa->tpa_ipv6_tunn_en_flg = param->tpa_ipv6_tunn_en_flg; |
625 | |
626 | p_ramrod->common.update_tpa_param_flg = param->update_tpa_param_flg; |
627 | tpa->max_buff_num = param->max_buffers_per_cqe; |
628 | tpa->tpa_pkt_split_flg = param->tpa_pkt_split_flg; |
629 | tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg; |
630 | tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg; |
631 | tpa->tpa_max_aggs_num = param->tpa_max_aggs_num; |
632 | tpa->tpa_max_size = cpu_to_le16(param->tpa_max_size); |
633 | tpa->tpa_min_size_to_start = cpu_to_le16(param->tpa_min_size_to_start); |
634 | tpa->tpa_min_size_to_cont = cpu_to_le16(param->tpa_min_size_to_cont); |
635 | } |
636 | |
637 | static void |
638 | qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, |
639 | struct vport_update_ramrod_data *p_ramrod, |
640 | struct qed_sp_vport_update_params *p_params) |
641 | { |
642 | int i; |
643 | |
644 | memset(&p_ramrod->approx_mcast.bins, 0, |
645 | sizeof(p_ramrod->approx_mcast.bins)); |
646 | |
647 | if (!p_params->update_approx_mcast_flg) |
648 | return; |
649 | |
650 | p_ramrod->common.update_approx_mcast_flg = 1; |
651 | for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { |
652 | u32 *p_bins = p_params->bins; |
653 | |
654 | p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); |
655 | } |
656 | } |
657 | |
658 | int qed_sp_vport_update(struct qed_hwfn *p_hwfn, |
659 | struct qed_sp_vport_update_params *p_params, |
660 | enum spq_mode comp_mode, |
661 | struct qed_spq_comp_cb *p_comp_data) |
662 | { |
663 | struct qed_rss_params * = p_params->rss_params; |
664 | struct vport_update_ramrod_data_cmn *p_cmn; |
665 | struct qed_sp_init_data init_data; |
666 | struct vport_update_ramrod_data *p_ramrod = NULL; |
667 | struct qed_spq_entry *p_ent = NULL; |
668 | u8 abs_vport_id = 0, val; |
669 | int rc = -EINVAL; |
670 | |
671 | if (IS_VF(p_hwfn->cdev)) { |
672 | rc = qed_vf_pf_vport_update(p_hwfn, p_params); |
673 | return rc; |
674 | } |
675 | |
676 | rc = qed_fw_vport(p_hwfn, src_id: p_params->vport_id, dst_id: &abs_vport_id); |
677 | if (rc) |
678 | return rc; |
679 | |
680 | memset(&init_data, 0, sizeof(init_data)); |
681 | init_data.cid = qed_spq_get_cid(p_hwfn); |
682 | init_data.opaque_fid = p_params->opaque_fid; |
683 | init_data.comp_mode = comp_mode; |
684 | init_data.p_comp_data = p_comp_data; |
685 | |
686 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
687 | cmd: ETH_RAMROD_VPORT_UPDATE, |
688 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
689 | if (rc) |
690 | return rc; |
691 | |
692 | /* Copy input params to ramrod according to FW struct */ |
693 | p_ramrod = &p_ent->ramrod.vport_update; |
694 | p_cmn = &p_ramrod->common; |
695 | |
696 | p_cmn->vport_id = abs_vport_id; |
697 | p_cmn->rx_active_flg = p_params->vport_active_rx_flg; |
698 | p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; |
699 | p_cmn->tx_active_flg = p_params->vport_active_tx_flg; |
700 | p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; |
701 | p_cmn->accept_any_vlan = p_params->accept_any_vlan; |
702 | val = p_params->update_accept_any_vlan_flg; |
703 | p_cmn->update_accept_any_vlan_flg = val; |
704 | |
705 | p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; |
706 | val = p_params->update_inner_vlan_removal_flg; |
707 | p_cmn->update_inner_vlan_removal_en_flg = val; |
708 | |
709 | p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; |
710 | val = p_params->update_default_vlan_enable_flg; |
711 | p_cmn->update_default_vlan_en_flg = val; |
712 | |
713 | p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); |
714 | p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; |
715 | |
716 | p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; |
717 | |
718 | p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; |
719 | p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; |
720 | |
721 | p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; |
722 | val = p_params->update_anti_spoofing_en_flg; |
723 | p_ramrod->common.update_anti_spoofing_en_flg = val; |
724 | |
725 | rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss: p_rss_params); |
726 | if (rc) { |
727 | qed_sp_destroy_request(p_hwfn, p_ent); |
728 | return rc; |
729 | } |
730 | |
731 | if (p_params->update_ctl_frame_check) { |
732 | p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; |
733 | p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; |
734 | } |
735 | |
736 | /* Update mcast bins for VFs, PF doesn't use this functionality */ |
737 | qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); |
738 | |
739 | qed_sp_update_accept_mode(p_hwfn, p_ramrod, accept_flags: p_params->accept_flags); |
740 | qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, param: p_params->sge_tpa_params); |
741 | return qed_spq_post(p_hwfn, p_ent, NULL); |
742 | } |
743 | |
744 | int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) |
745 | { |
746 | struct vport_stop_ramrod_data *p_ramrod; |
747 | struct qed_sp_init_data init_data; |
748 | struct qed_spq_entry *p_ent; |
749 | u8 abs_vport_id = 0; |
750 | int rc; |
751 | |
752 | if (IS_VF(p_hwfn->cdev)) |
753 | return qed_vf_pf_vport_stop(p_hwfn); |
754 | |
755 | rc = qed_fw_vport(p_hwfn, src_id: vport_id, dst_id: &abs_vport_id); |
756 | if (rc) |
757 | return rc; |
758 | |
759 | memset(&init_data, 0, sizeof(init_data)); |
760 | init_data.cid = qed_spq_get_cid(p_hwfn); |
761 | init_data.opaque_fid = opaque_fid; |
762 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
763 | |
764 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
765 | cmd: ETH_RAMROD_VPORT_STOP, |
766 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
767 | if (rc) |
768 | return rc; |
769 | |
770 | p_ramrod = &p_ent->ramrod.vport_stop; |
771 | p_ramrod->vport_id = abs_vport_id; |
772 | |
773 | return qed_spq_post(p_hwfn, p_ent, NULL); |
774 | } |
775 | |
776 | static int |
777 | qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, |
778 | struct qed_filter_accept_flags *p_accept_flags) |
779 | { |
780 | struct qed_sp_vport_update_params s_params; |
781 | |
782 | memset(&s_params, 0, sizeof(s_params)); |
783 | memcpy(&s_params.accept_flags, p_accept_flags, |
784 | sizeof(struct qed_filter_accept_flags)); |
785 | |
786 | return qed_vf_pf_vport_update(p_hwfn, p_params: &s_params); |
787 | } |
788 | |
789 | static int qed_filter_accept_cmd(struct qed_dev *cdev, |
790 | u8 vport, |
791 | struct qed_filter_accept_flags accept_flags, |
792 | u8 update_accept_any_vlan, |
793 | u8 accept_any_vlan, |
794 | enum spq_mode comp_mode, |
795 | struct qed_spq_comp_cb *p_comp_data) |
796 | { |
797 | struct qed_sp_vport_update_params vport_update_params; |
798 | int i, rc; |
799 | |
800 | /* Prepare and send the vport rx_mode change */ |
801 | memset(&vport_update_params, 0, sizeof(vport_update_params)); |
802 | vport_update_params.vport_id = vport; |
803 | vport_update_params.accept_flags = accept_flags; |
804 | vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; |
805 | vport_update_params.accept_any_vlan = accept_any_vlan; |
806 | |
807 | for_each_hwfn(cdev, i) { |
808 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
809 | |
810 | vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; |
811 | |
812 | if (IS_VF(cdev)) { |
813 | rc = qed_vf_pf_accept_flags(p_hwfn, p_accept_flags: &accept_flags); |
814 | if (rc) |
815 | return rc; |
816 | continue; |
817 | } |
818 | |
819 | rc = qed_sp_vport_update(p_hwfn, p_params: &vport_update_params, |
820 | comp_mode, p_comp_data); |
821 | if (rc) { |
822 | DP_ERR(cdev, "Update rx_mode failed %d\n" , rc); |
823 | return rc; |
824 | } |
825 | |
826 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
827 | "Accept filter configured, flags = [Rx]%x [Tx]%x\n" , |
828 | accept_flags.rx_accept_filter, |
829 | accept_flags.tx_accept_filter); |
830 | if (update_accept_any_vlan) |
831 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
832 | "accept_any_vlan=%d configured\n" , |
833 | accept_any_vlan); |
834 | } |
835 | |
836 | return 0; |
837 | } |
838 | |
839 | int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, |
840 | struct qed_queue_cid *p_cid, |
841 | u16 bd_max_bytes, |
842 | dma_addr_t bd_chain_phys_addr, |
843 | dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) |
844 | { |
845 | struct rx_queue_start_ramrod_data *p_ramrod = NULL; |
846 | struct qed_spq_entry *p_ent = NULL; |
847 | struct qed_sp_init_data init_data; |
848 | int rc = -EINVAL; |
849 | |
850 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
851 | "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n" , |
852 | p_cid->opaque_fid, p_cid->cid, |
853 | p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); |
854 | |
855 | /* Get SPQ entry */ |
856 | memset(&init_data, 0, sizeof(init_data)); |
857 | init_data.cid = p_cid->cid; |
858 | init_data.opaque_fid = p_cid->opaque_fid; |
859 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
860 | |
861 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
862 | cmd: ETH_RAMROD_RX_QUEUE_START, |
863 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
864 | if (rc) |
865 | return rc; |
866 | |
867 | p_ramrod = &p_ent->ramrod.rx_queue_start; |
868 | |
869 | p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); |
870 | p_ramrod->sb_index = p_cid->sb_idx; |
871 | p_ramrod->vport_id = p_cid->abs.vport_id; |
872 | p_ramrod->stats_counter_id = p_cid->abs.stats_id; |
873 | p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); |
874 | p_ramrod->complete_cqe_flg = 0; |
875 | p_ramrod->complete_event_flg = 1; |
876 | |
877 | p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); |
878 | DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); |
879 | |
880 | p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); |
881 | DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); |
882 | |
883 | if (p_cid->vfid != QED_QUEUE_CID_SELF) { |
884 | bool b_legacy_vf = !!(p_cid->vf_legacy & |
885 | QED_QCID_LEGACY_VF_RX_PROD); |
886 | |
887 | p_ramrod->vf_rx_prod_index = p_cid->vf_qid; |
888 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
889 | "Queue%s is meant for VF rxq[%02x]\n" , |
890 | b_legacy_vf ? " [legacy]" : "" , p_cid->vf_qid); |
891 | p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; |
892 | } |
893 | |
894 | return qed_spq_post(p_hwfn, p_ent, NULL); |
895 | } |
896 | |
897 | static int |
898 | qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, |
899 | struct qed_queue_cid *p_cid, |
900 | u16 bd_max_bytes, |
901 | dma_addr_t bd_chain_phys_addr, |
902 | dma_addr_t cqe_pbl_addr, |
903 | u16 cqe_pbl_size, void __iomem **pp_prod) |
904 | { |
905 | u32 init_prod_val = 0; |
906 | |
907 | *pp_prod = (u8 __iomem *) |
908 | p_hwfn->regview + |
909 | GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, |
910 | MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id); |
911 | |
912 | /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ |
913 | __internal_ram_wr(p_hwfn, addr: *pp_prod, size: sizeof(u32), |
914 | data: (u32 *)(&init_prod_val)); |
915 | |
916 | return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, |
917 | bd_max_bytes, |
918 | bd_chain_phys_addr, |
919 | cqe_pbl_addr, cqe_pbl_size); |
920 | } |
921 | |
922 | static int |
923 | qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, |
924 | u16 opaque_fid, |
925 | struct qed_queue_start_common_params *p_params, |
926 | u16 bd_max_bytes, |
927 | dma_addr_t bd_chain_phys_addr, |
928 | dma_addr_t cqe_pbl_addr, |
929 | u16 cqe_pbl_size, |
930 | struct qed_rxq_start_ret_params *p_ret_params) |
931 | { |
932 | struct qed_queue_cid *p_cid; |
933 | int rc; |
934 | |
935 | /* Allocate a CID for the queue */ |
936 | p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, b_is_rx: true, p_params); |
937 | if (!p_cid) |
938 | return -ENOMEM; |
939 | |
940 | if (IS_PF(p_hwfn->cdev)) { |
941 | rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, |
942 | bd_max_bytes, |
943 | bd_chain_phys_addr, |
944 | cqe_pbl_addr, cqe_pbl_size, |
945 | pp_prod: &p_ret_params->p_prod); |
946 | } else { |
947 | rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, |
948 | bd_max_bytes, |
949 | bd_chain_phys_addr, |
950 | cqe_pbl_addr, |
951 | cqe_pbl_size, pp_prod: &p_ret_params->p_prod); |
952 | } |
953 | |
954 | /* Provide the caller with a reference to as handler */ |
955 | if (rc) |
956 | qed_eth_queue_cid_release(p_hwfn, p_cid); |
957 | else |
958 | p_ret_params->p_handle = (void *)p_cid; |
959 | |
960 | return rc; |
961 | } |
962 | |
963 | int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, |
964 | void **pp_rxq_handles, |
965 | u8 num_rxqs, |
966 | u8 complete_cqe_flg, |
967 | u8 complete_event_flg, |
968 | enum spq_mode comp_mode, |
969 | struct qed_spq_comp_cb *p_comp_data) |
970 | { |
971 | struct rx_queue_update_ramrod_data *p_ramrod = NULL; |
972 | struct qed_spq_entry *p_ent = NULL; |
973 | struct qed_sp_init_data init_data; |
974 | struct qed_queue_cid *p_cid; |
975 | int rc = -EINVAL; |
976 | u8 i; |
977 | |
978 | memset(&init_data, 0, sizeof(init_data)); |
979 | init_data.comp_mode = comp_mode; |
980 | init_data.p_comp_data = p_comp_data; |
981 | |
982 | for (i = 0; i < num_rxqs; i++) { |
983 | p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; |
984 | |
985 | /* Get SPQ entry */ |
986 | init_data.cid = p_cid->cid; |
987 | init_data.opaque_fid = p_cid->opaque_fid; |
988 | |
989 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
990 | cmd: ETH_RAMROD_RX_QUEUE_UPDATE, |
991 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
992 | if (rc) |
993 | return rc; |
994 | |
995 | p_ramrod = &p_ent->ramrod.rx_queue_update; |
996 | p_ramrod->vport_id = p_cid->abs.vport_id; |
997 | |
998 | p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); |
999 | p_ramrod->complete_cqe_flg = complete_cqe_flg; |
1000 | p_ramrod->complete_event_flg = complete_event_flg; |
1001 | |
1002 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
1003 | if (rc) |
1004 | return rc; |
1005 | } |
1006 | |
1007 | return rc; |
1008 | } |
1009 | |
1010 | static int |
1011 | qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, |
1012 | struct qed_queue_cid *p_cid, |
1013 | bool b_eq_completion_only, bool b_cqe_completion) |
1014 | { |
1015 | struct rx_queue_stop_ramrod_data *p_ramrod = NULL; |
1016 | struct qed_spq_entry *p_ent = NULL; |
1017 | struct qed_sp_init_data init_data; |
1018 | int rc; |
1019 | |
1020 | memset(&init_data, 0, sizeof(init_data)); |
1021 | init_data.cid = p_cid->cid; |
1022 | init_data.opaque_fid = p_cid->opaque_fid; |
1023 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1024 | |
1025 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1026 | cmd: ETH_RAMROD_RX_QUEUE_STOP, |
1027 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
1028 | if (rc) |
1029 | return rc; |
1030 | |
1031 | p_ramrod = &p_ent->ramrod.rx_queue_stop; |
1032 | p_ramrod->vport_id = p_cid->abs.vport_id; |
1033 | p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); |
1034 | |
1035 | /* Cleaning the queue requires the completion to arrive there. |
1036 | * In addition, VFs require the answer to come as eqe to PF. |
1037 | */ |
1038 | p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) && |
1039 | !b_eq_completion_only) || |
1040 | b_cqe_completion; |
1041 | p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) || |
1042 | b_eq_completion_only; |
1043 | |
1044 | return qed_spq_post(p_hwfn, p_ent, NULL); |
1045 | } |
1046 | |
1047 | int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, |
1048 | void *p_rxq, |
1049 | bool eq_completion_only, bool cqe_completion) |
1050 | { |
1051 | struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; |
1052 | int rc = -EINVAL; |
1053 | |
1054 | if (IS_PF(p_hwfn->cdev)) |
1055 | rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, |
1056 | b_eq_completion_only: eq_completion_only, |
1057 | b_cqe_completion: cqe_completion); |
1058 | else |
1059 | rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); |
1060 | |
1061 | if (!rc) |
1062 | qed_eth_queue_cid_release(p_hwfn, p_cid); |
1063 | return rc; |
1064 | } |
1065 | |
1066 | int |
1067 | qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, |
1068 | struct qed_queue_cid *p_cid, |
1069 | dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) |
1070 | { |
1071 | struct tx_queue_start_ramrod_data *p_ramrod = NULL; |
1072 | struct qed_spq_entry *p_ent = NULL; |
1073 | struct qed_sp_init_data init_data; |
1074 | int rc = -EINVAL; |
1075 | |
1076 | /* Get SPQ entry */ |
1077 | memset(&init_data, 0, sizeof(init_data)); |
1078 | init_data.cid = p_cid->cid; |
1079 | init_data.opaque_fid = p_cid->opaque_fid; |
1080 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1081 | |
1082 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1083 | cmd: ETH_RAMROD_TX_QUEUE_START, |
1084 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
1085 | if (rc) |
1086 | return rc; |
1087 | |
1088 | p_ramrod = &p_ent->ramrod.tx_queue_start; |
1089 | p_ramrod->vport_id = p_cid->abs.vport_id; |
1090 | |
1091 | p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); |
1092 | p_ramrod->sb_index = p_cid->sb_idx; |
1093 | p_ramrod->stats_counter_id = p_cid->abs.stats_id; |
1094 | |
1095 | p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); |
1096 | p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); |
1097 | |
1098 | p_ramrod->pbl_size = cpu_to_le16(pbl_size); |
1099 | DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); |
1100 | |
1101 | p_ramrod->qm_pq_id = cpu_to_le16(pq_id); |
1102 | |
1103 | return qed_spq_post(p_hwfn, p_ent, NULL); |
1104 | } |
1105 | |
1106 | static int |
1107 | qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, |
1108 | struct qed_queue_cid *p_cid, |
1109 | u8 tc, |
1110 | dma_addr_t pbl_addr, |
1111 | u16 pbl_size, void __iomem **pp_doorbell) |
1112 | { |
1113 | int rc; |
1114 | |
1115 | rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, |
1116 | pbl_addr, pbl_size, |
1117 | pq_id: qed_get_cm_pq_idx_mcos(p_hwfn, tc)); |
1118 | if (rc) |
1119 | return rc; |
1120 | |
1121 | /* Provide the caller with the necessary return values */ |
1122 | *pp_doorbell = p_hwfn->doorbells + |
1123 | qed_db_addr(cid: p_cid->cid, DQ_DEMS_LEGACY); |
1124 | |
1125 | return 0; |
1126 | } |
1127 | |
1128 | static int |
1129 | qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, |
1130 | u16 opaque_fid, |
1131 | struct qed_queue_start_common_params *p_params, |
1132 | u8 tc, |
1133 | dma_addr_t pbl_addr, |
1134 | u16 pbl_size, |
1135 | struct qed_txq_start_ret_params *p_ret_params) |
1136 | { |
1137 | struct qed_queue_cid *p_cid; |
1138 | int rc; |
1139 | |
1140 | p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, b_is_rx: false, p_params); |
1141 | if (!p_cid) |
1142 | return -EINVAL; |
1143 | |
1144 | if (IS_PF(p_hwfn->cdev)) |
1145 | rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, |
1146 | pbl_addr, pbl_size, |
1147 | pp_doorbell: &p_ret_params->p_doorbell); |
1148 | else |
1149 | rc = qed_vf_pf_txq_start(p_hwfn, p_cid, |
1150 | pbl_addr, pbl_size, |
1151 | pp_doorbell: &p_ret_params->p_doorbell); |
1152 | |
1153 | if (rc) |
1154 | qed_eth_queue_cid_release(p_hwfn, p_cid); |
1155 | else |
1156 | p_ret_params->p_handle = (void *)p_cid; |
1157 | |
1158 | return rc; |
1159 | } |
1160 | |
1161 | static int |
1162 | qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) |
1163 | { |
1164 | struct qed_spq_entry *p_ent = NULL; |
1165 | struct qed_sp_init_data init_data; |
1166 | int rc; |
1167 | |
1168 | memset(&init_data, 0, sizeof(init_data)); |
1169 | init_data.cid = p_cid->cid; |
1170 | init_data.opaque_fid = p_cid->opaque_fid; |
1171 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
1172 | |
1173 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1174 | cmd: ETH_RAMROD_TX_QUEUE_STOP, |
1175 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
1176 | if (rc) |
1177 | return rc; |
1178 | |
1179 | return qed_spq_post(p_hwfn, p_ent, NULL); |
1180 | } |
1181 | |
1182 | int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) |
1183 | { |
1184 | struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; |
1185 | int rc; |
1186 | |
1187 | if (IS_PF(p_hwfn->cdev)) |
1188 | rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); |
1189 | else |
1190 | rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); |
1191 | |
1192 | if (!rc) |
1193 | qed_eth_queue_cid_release(p_hwfn, p_cid); |
1194 | return rc; |
1195 | } |
1196 | |
1197 | static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) |
1198 | { |
1199 | enum eth_filter_action action = MAX_ETH_FILTER_ACTION; |
1200 | |
1201 | switch (opcode) { |
1202 | case QED_FILTER_ADD: |
1203 | action = ETH_FILTER_ACTION_ADD; |
1204 | break; |
1205 | case QED_FILTER_REMOVE: |
1206 | action = ETH_FILTER_ACTION_REMOVE; |
1207 | break; |
1208 | case QED_FILTER_FLUSH: |
1209 | action = ETH_FILTER_ACTION_REMOVE_ALL; |
1210 | break; |
1211 | default: |
1212 | action = MAX_ETH_FILTER_ACTION; |
1213 | } |
1214 | |
1215 | return action; |
1216 | } |
1217 | |
1218 | static int |
1219 | qed_filter_ucast_common(struct qed_hwfn *p_hwfn, |
1220 | u16 opaque_fid, |
1221 | struct qed_filter_ucast *p_filter_cmd, |
1222 | struct vport_filter_update_ramrod_data **pp_ramrod, |
1223 | struct qed_spq_entry **pp_ent, |
1224 | enum spq_mode comp_mode, |
1225 | struct qed_spq_comp_cb *p_comp_data) |
1226 | { |
1227 | u8 vport_to_add_to = 0, vport_to_remove_from = 0; |
1228 | struct vport_filter_update_ramrod_data *p_ramrod; |
1229 | struct eth_filter_cmd *p_first_filter; |
1230 | struct eth_filter_cmd *p_second_filter; |
1231 | struct qed_sp_init_data init_data; |
1232 | enum eth_filter_action action; |
1233 | int rc; |
1234 | |
1235 | rc = qed_fw_vport(p_hwfn, src_id: p_filter_cmd->vport_to_remove_from, |
1236 | dst_id: &vport_to_remove_from); |
1237 | if (rc) |
1238 | return rc; |
1239 | |
1240 | rc = qed_fw_vport(p_hwfn, src_id: p_filter_cmd->vport_to_add_to, |
1241 | dst_id: &vport_to_add_to); |
1242 | if (rc) |
1243 | return rc; |
1244 | |
1245 | /* Get SPQ entry */ |
1246 | memset(&init_data, 0, sizeof(init_data)); |
1247 | init_data.cid = qed_spq_get_cid(p_hwfn); |
1248 | init_data.opaque_fid = opaque_fid; |
1249 | init_data.comp_mode = comp_mode; |
1250 | init_data.p_comp_data = p_comp_data; |
1251 | |
1252 | rc = qed_sp_init_request(p_hwfn, pp_ent, |
1253 | cmd: ETH_RAMROD_FILTERS_UPDATE, |
1254 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
1255 | if (rc) |
1256 | return rc; |
1257 | |
1258 | *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; |
1259 | p_ramrod = *pp_ramrod; |
1260 | p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; |
1261 | p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; |
1262 | |
1263 | switch (p_filter_cmd->opcode) { |
1264 | case QED_FILTER_REPLACE: |
1265 | case QED_FILTER_MOVE: |
1266 | p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; |
1267 | default: |
1268 | p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; |
1269 | } |
1270 | |
1271 | p_first_filter = &p_ramrod->filter_cmds[0]; |
1272 | p_second_filter = &p_ramrod->filter_cmds[1]; |
1273 | |
1274 | switch (p_filter_cmd->type) { |
1275 | case QED_FILTER_MAC: |
1276 | p_first_filter->type = ETH_FILTER_TYPE_MAC; break; |
1277 | case QED_FILTER_VLAN: |
1278 | p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; |
1279 | case QED_FILTER_MAC_VLAN: |
1280 | p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; |
1281 | case QED_FILTER_INNER_MAC: |
1282 | p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; |
1283 | case QED_FILTER_INNER_VLAN: |
1284 | p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; |
1285 | case QED_FILTER_INNER_PAIR: |
1286 | p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; |
1287 | case QED_FILTER_INNER_MAC_VNI_PAIR: |
1288 | p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; |
1289 | break; |
1290 | case QED_FILTER_MAC_VNI_PAIR: |
1291 | p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; |
1292 | case QED_FILTER_VNI: |
1293 | p_first_filter->type = ETH_FILTER_TYPE_VNI; break; |
1294 | } |
1295 | |
1296 | if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || |
1297 | (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || |
1298 | (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || |
1299 | (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || |
1300 | (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || |
1301 | (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { |
1302 | qed_set_fw_mac_addr(fw_msb: &p_first_filter->mac_msb, |
1303 | fw_mid: &p_first_filter->mac_mid, |
1304 | fw_lsb: &p_first_filter->mac_lsb, |
1305 | mac: (u8 *)p_filter_cmd->mac); |
1306 | } |
1307 | |
1308 | if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || |
1309 | (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || |
1310 | (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || |
1311 | (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) |
1312 | p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); |
1313 | |
1314 | if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || |
1315 | (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || |
1316 | (p_first_filter->type == ETH_FILTER_TYPE_VNI)) |
1317 | p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); |
1318 | |
1319 | if (p_filter_cmd->opcode == QED_FILTER_MOVE) { |
1320 | p_second_filter->type = p_first_filter->type; |
1321 | p_second_filter->mac_msb = p_first_filter->mac_msb; |
1322 | p_second_filter->mac_mid = p_first_filter->mac_mid; |
1323 | p_second_filter->mac_lsb = p_first_filter->mac_lsb; |
1324 | p_second_filter->vlan_id = p_first_filter->vlan_id; |
1325 | p_second_filter->vni = p_first_filter->vni; |
1326 | |
1327 | p_first_filter->action = ETH_FILTER_ACTION_REMOVE; |
1328 | |
1329 | p_first_filter->vport_id = vport_to_remove_from; |
1330 | |
1331 | p_second_filter->action = ETH_FILTER_ACTION_ADD; |
1332 | p_second_filter->vport_id = vport_to_add_to; |
1333 | } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { |
1334 | p_first_filter->vport_id = vport_to_add_to; |
1335 | memcpy(p_second_filter, p_first_filter, |
1336 | sizeof(*p_second_filter)); |
1337 | p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; |
1338 | p_second_filter->action = ETH_FILTER_ACTION_ADD; |
1339 | } else { |
1340 | action = qed_filter_action(opcode: p_filter_cmd->opcode); |
1341 | |
1342 | if (action == MAX_ETH_FILTER_ACTION) { |
1343 | DP_NOTICE(p_hwfn, |
1344 | "%d is not supported yet\n" , |
1345 | p_filter_cmd->opcode); |
1346 | qed_sp_destroy_request(p_hwfn, p_ent: *pp_ent); |
1347 | return -EINVAL; |
1348 | } |
1349 | |
1350 | p_first_filter->action = action; |
1351 | p_first_filter->vport_id = (p_filter_cmd->opcode == |
1352 | QED_FILTER_REMOVE) ? |
1353 | vport_to_remove_from : |
1354 | vport_to_add_to; |
1355 | } |
1356 | |
1357 | return 0; |
1358 | } |
1359 | |
1360 | int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, |
1361 | u16 opaque_fid, |
1362 | struct qed_filter_ucast *p_filter_cmd, |
1363 | enum spq_mode comp_mode, |
1364 | struct qed_spq_comp_cb *p_comp_data) |
1365 | { |
1366 | struct vport_filter_update_ramrod_data *p_ramrod = NULL; |
1367 | struct qed_spq_entry *p_ent = NULL; |
1368 | struct eth_filter_cmd_header *; |
1369 | int rc; |
1370 | |
1371 | rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, |
1372 | pp_ramrod: &p_ramrod, pp_ent: &p_ent, |
1373 | comp_mode, p_comp_data); |
1374 | if (rc) { |
1375 | DP_ERR(p_hwfn, "Uni. filter command failed %d\n" , rc); |
1376 | return rc; |
1377 | } |
1378 | p_header = &p_ramrod->filter_cmd_hdr; |
1379 | p_header->assert_on_error = p_filter_cmd->assert_on_error; |
1380 | |
1381 | rc = qed_spq_post(p_hwfn, p_ent, NULL); |
1382 | if (rc) { |
1383 | DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n" , rc); |
1384 | return rc; |
1385 | } |
1386 | |
1387 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
1388 | "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n" , |
1389 | (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : |
1390 | ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? |
1391 | "REMOVE" : |
1392 | ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? |
1393 | "MOVE" : "REPLACE" )), |
1394 | (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : |
1395 | ((p_filter_cmd->type == QED_FILTER_VLAN) ? |
1396 | "VLAN" : "MAC & VLAN" ), |
1397 | p_ramrod->filter_cmd_hdr.cmd_cnt, |
1398 | p_filter_cmd->is_rx_filter, |
1399 | p_filter_cmd->is_tx_filter); |
1400 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
1401 | "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n" , |
1402 | p_filter_cmd->vport_to_add_to, |
1403 | p_filter_cmd->vport_to_remove_from, |
1404 | p_filter_cmd->mac[0], |
1405 | p_filter_cmd->mac[1], |
1406 | p_filter_cmd->mac[2], |
1407 | p_filter_cmd->mac[3], |
1408 | p_filter_cmd->mac[4], |
1409 | p_filter_cmd->mac[5], |
1410 | p_filter_cmd->vlan); |
1411 | |
1412 | return 0; |
1413 | } |
1414 | |
1415 | /******************************************************************************* |
1416 | * Description: |
1417 | * Calculates crc 32 on a buffer |
1418 | * Note: crc32_length MUST be aligned to 8 |
1419 | * Return: |
1420 | ******************************************************************************/ |
1421 | static u32 qed_calc_crc32c(u8 *crc32_packet, |
1422 | u32 crc32_length, u32 crc32_seed, u8 complement) |
1423 | { |
1424 | u32 byte = 0, bit = 0, crc32_result = crc32_seed; |
1425 | u8 msb = 0, current_byte = 0; |
1426 | |
1427 | if ((!crc32_packet) || |
1428 | (crc32_length == 0) || |
1429 | ((crc32_length % 8) != 0)) |
1430 | return crc32_result; |
1431 | for (byte = 0; byte < crc32_length; byte++) { |
1432 | current_byte = crc32_packet[byte]; |
1433 | for (bit = 0; bit < 8; bit++) { |
1434 | msb = (u8)(crc32_result >> 31); |
1435 | crc32_result = crc32_result << 1; |
1436 | if (msb != (0x1 & (current_byte >> bit))) { |
1437 | crc32_result = crc32_result ^ CRC32_POLY; |
1438 | crc32_result |= 1; /*crc32_result[0] = 1;*/ |
1439 | } |
1440 | } |
1441 | } |
1442 | return crc32_result; |
1443 | } |
1444 | |
1445 | static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) |
1446 | { |
1447 | u32 packet_buf[2] = { 0 }; |
1448 | |
1449 | memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); |
1450 | return qed_calc_crc32c(crc32_packet: (u8 *)packet_buf, crc32_length: 8, crc32_seed: seed, complement: 0); |
1451 | } |
1452 | |
1453 | u8 qed_mcast_bin_from_mac(u8 *mac) |
1454 | { |
1455 | u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, |
1456 | mac, ETH_ALEN); |
1457 | |
1458 | return crc & 0xff; |
1459 | } |
1460 | |
1461 | static int |
1462 | qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, |
1463 | u16 opaque_fid, |
1464 | struct qed_filter_mcast *p_filter_cmd, |
1465 | enum spq_mode comp_mode, |
1466 | struct qed_spq_comp_cb *p_comp_data) |
1467 | { |
1468 | struct vport_update_ramrod_data *p_ramrod = NULL; |
1469 | u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; |
1470 | struct qed_spq_entry *p_ent = NULL; |
1471 | struct qed_sp_init_data init_data; |
1472 | u8 abs_vport_id = 0; |
1473 | int rc, i; |
1474 | |
1475 | if (p_filter_cmd->opcode == QED_FILTER_ADD) |
1476 | rc = qed_fw_vport(p_hwfn, src_id: p_filter_cmd->vport_to_add_to, |
1477 | dst_id: &abs_vport_id); |
1478 | else |
1479 | rc = qed_fw_vport(p_hwfn, src_id: p_filter_cmd->vport_to_remove_from, |
1480 | dst_id: &abs_vport_id); |
1481 | if (rc) |
1482 | return rc; |
1483 | |
1484 | /* Get SPQ entry */ |
1485 | memset(&init_data, 0, sizeof(init_data)); |
1486 | init_data.cid = qed_spq_get_cid(p_hwfn); |
1487 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
1488 | init_data.comp_mode = comp_mode; |
1489 | init_data.p_comp_data = p_comp_data; |
1490 | |
1491 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
1492 | cmd: ETH_RAMROD_VPORT_UPDATE, |
1493 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
1494 | if (rc) { |
1495 | DP_ERR(p_hwfn, "Multi-cast command failed %d\n" , rc); |
1496 | return rc; |
1497 | } |
1498 | |
1499 | p_ramrod = &p_ent->ramrod.vport_update; |
1500 | p_ramrod->common.update_approx_mcast_flg = 1; |
1501 | |
1502 | /* explicitly clear out the entire vector */ |
1503 | memset(&p_ramrod->approx_mcast.bins, 0, |
1504 | sizeof(p_ramrod->approx_mcast.bins)); |
1505 | memset(bins, 0, sizeof(bins)); |
1506 | /* filter ADD op is explicit set op and it removes |
1507 | * any existing filters for the vport |
1508 | */ |
1509 | if (p_filter_cmd->opcode == QED_FILTER_ADD) { |
1510 | for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { |
1511 | u32 bit, nbits; |
1512 | |
1513 | bit = qed_mcast_bin_from_mac(mac: p_filter_cmd->mac[i]); |
1514 | nbits = sizeof(u32) * BITS_PER_BYTE; |
1515 | bins[bit / nbits] |= 1 << (bit % nbits); |
1516 | } |
1517 | |
1518 | /* Convert to correct endianity */ |
1519 | for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { |
1520 | struct vport_update_ramrod_mcast *p_ramrod_bins; |
1521 | |
1522 | p_ramrod_bins = &p_ramrod->approx_mcast; |
1523 | p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]); |
1524 | } |
1525 | } |
1526 | |
1527 | p_ramrod->common.vport_id = abs_vport_id; |
1528 | |
1529 | return qed_spq_post(p_hwfn, p_ent, NULL); |
1530 | } |
1531 | |
1532 | static int qed_filter_mcast_cmd(struct qed_dev *cdev, |
1533 | struct qed_filter_mcast *p_filter_cmd, |
1534 | enum spq_mode comp_mode, |
1535 | struct qed_spq_comp_cb *p_comp_data) |
1536 | { |
1537 | int rc = 0; |
1538 | int i; |
1539 | |
1540 | /* only ADD and REMOVE operations are supported for multi-cast */ |
1541 | if ((p_filter_cmd->opcode != QED_FILTER_ADD && |
1542 | (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || |
1543 | (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) |
1544 | return -EINVAL; |
1545 | |
1546 | for_each_hwfn(cdev, i) { |
1547 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
1548 | |
1549 | u16 opaque_fid; |
1550 | |
1551 | if (IS_VF(cdev)) { |
1552 | qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); |
1553 | continue; |
1554 | } |
1555 | |
1556 | opaque_fid = p_hwfn->hw_info.opaque_fid; |
1557 | |
1558 | rc = qed_sp_eth_filter_mcast(p_hwfn, |
1559 | opaque_fid, |
1560 | p_filter_cmd, |
1561 | comp_mode, p_comp_data); |
1562 | } |
1563 | return rc; |
1564 | } |
1565 | |
1566 | static int qed_filter_ucast_cmd(struct qed_dev *cdev, |
1567 | struct qed_filter_ucast *p_filter_cmd, |
1568 | enum spq_mode comp_mode, |
1569 | struct qed_spq_comp_cb *p_comp_data) |
1570 | { |
1571 | int rc = 0; |
1572 | int i; |
1573 | |
1574 | for_each_hwfn(cdev, i) { |
1575 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
1576 | u16 opaque_fid; |
1577 | |
1578 | if (IS_VF(cdev)) { |
1579 | rc = qed_vf_pf_filter_ucast(p_hwfn, p_param: p_filter_cmd); |
1580 | continue; |
1581 | } |
1582 | |
1583 | opaque_fid = p_hwfn->hw_info.opaque_fid; |
1584 | |
1585 | rc = qed_sp_eth_filter_ucast(p_hwfn, |
1586 | opaque_fid, |
1587 | p_filter_cmd, |
1588 | comp_mode, p_comp_data); |
1589 | if (rc) |
1590 | break; |
1591 | } |
1592 | |
1593 | return rc; |
1594 | } |
1595 | |
1596 | /* Statistics related code */ |
1597 | static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, |
1598 | u32 *p_addr, |
1599 | u32 *p_len, u16 statistics_bin) |
1600 | { |
1601 | if (IS_PF(p_hwfn->cdev)) { |
1602 | *p_addr = BAR0_MAP_REG_PSDM_RAM + |
1603 | PSTORM_QUEUE_STAT_OFFSET(statistics_bin); |
1604 | *p_len = sizeof(struct eth_pstorm_per_queue_stat); |
1605 | } else { |
1606 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; |
1607 | struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; |
1608 | |
1609 | *p_addr = p_resp->pfdev_info.stats_info.pstats.address; |
1610 | *p_len = p_resp->pfdev_info.stats_info.pstats.len; |
1611 | } |
1612 | } |
1613 | |
1614 | static noinline_for_stack void |
1615 | __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
1616 | struct qed_eth_stats *p_stats, u16 statistics_bin) |
1617 | { |
1618 | struct eth_pstorm_per_queue_stat pstats; |
1619 | u32 pstats_addr = 0, pstats_len = 0; |
1620 | |
1621 | __qed_get_vport_pstats_addrlen(p_hwfn, p_addr: &pstats_addr, p_len: &pstats_len, |
1622 | statistics_bin); |
1623 | |
1624 | memset(&pstats, 0, sizeof(pstats)); |
1625 | qed_memcpy_from(p_hwfn, p_ptt, dest: &pstats, hw_addr: pstats_addr, n: pstats_len); |
1626 | |
1627 | p_stats->common.tx_ucast_bytes += |
1628 | HILO_64_REGPAIR(pstats.sent_ucast_bytes); |
1629 | p_stats->common.tx_mcast_bytes += |
1630 | HILO_64_REGPAIR(pstats.sent_mcast_bytes); |
1631 | p_stats->common.tx_bcast_bytes += |
1632 | HILO_64_REGPAIR(pstats.sent_bcast_bytes); |
1633 | p_stats->common.tx_ucast_pkts += |
1634 | HILO_64_REGPAIR(pstats.sent_ucast_pkts); |
1635 | p_stats->common.tx_mcast_pkts += |
1636 | HILO_64_REGPAIR(pstats.sent_mcast_pkts); |
1637 | p_stats->common.tx_bcast_pkts += |
1638 | HILO_64_REGPAIR(pstats.sent_bcast_pkts); |
1639 | p_stats->common.tx_err_drop_pkts += |
1640 | HILO_64_REGPAIR(pstats.error_drop_pkts); |
1641 | } |
1642 | |
1643 | static noinline_for_stack void |
1644 | __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
1645 | struct qed_eth_stats *p_stats, u16 statistics_bin) |
1646 | { |
1647 | struct tstorm_per_port_stat tstats; |
1648 | u32 tstats_addr, tstats_len; |
1649 | |
1650 | if (IS_PF(p_hwfn->cdev)) { |
1651 | tstats_addr = BAR0_MAP_REG_TSDM_RAM + |
1652 | TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); |
1653 | tstats_len = sizeof(struct tstorm_per_port_stat); |
1654 | } else { |
1655 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; |
1656 | struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; |
1657 | |
1658 | tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; |
1659 | tstats_len = p_resp->pfdev_info.stats_info.tstats.len; |
1660 | } |
1661 | |
1662 | memset(&tstats, 0, sizeof(tstats)); |
1663 | qed_memcpy_from(p_hwfn, p_ptt, dest: &tstats, hw_addr: tstats_addr, n: tstats_len); |
1664 | |
1665 | p_stats->common.mftag_filter_discards += |
1666 | HILO_64_REGPAIR(tstats.mftag_filter_discard); |
1667 | p_stats->common.mac_filter_discards += |
1668 | HILO_64_REGPAIR(tstats.eth_mac_filter_discard); |
1669 | p_stats->common.gft_filter_drop += |
1670 | HILO_64_REGPAIR(tstats.eth_gft_drop_pkt); |
1671 | } |
1672 | |
1673 | static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, |
1674 | u32 *p_addr, |
1675 | u32 *p_len, u16 statistics_bin) |
1676 | { |
1677 | if (IS_PF(p_hwfn->cdev)) { |
1678 | *p_addr = BAR0_MAP_REG_USDM_RAM + |
1679 | USTORM_QUEUE_STAT_OFFSET(statistics_bin); |
1680 | *p_len = sizeof(struct eth_ustorm_per_queue_stat); |
1681 | } else { |
1682 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; |
1683 | struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; |
1684 | |
1685 | *p_addr = p_resp->pfdev_info.stats_info.ustats.address; |
1686 | *p_len = p_resp->pfdev_info.stats_info.ustats.len; |
1687 | } |
1688 | } |
1689 | |
1690 | static noinline_for_stack |
1691 | void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
1692 | struct qed_eth_stats *p_stats, u16 statistics_bin) |
1693 | { |
1694 | struct eth_ustorm_per_queue_stat ustats; |
1695 | u32 ustats_addr = 0, ustats_len = 0; |
1696 | |
1697 | __qed_get_vport_ustats_addrlen(p_hwfn, p_addr: &ustats_addr, p_len: &ustats_len, |
1698 | statistics_bin); |
1699 | |
1700 | memset(&ustats, 0, sizeof(ustats)); |
1701 | qed_memcpy_from(p_hwfn, p_ptt, dest: &ustats, hw_addr: ustats_addr, n: ustats_len); |
1702 | |
1703 | p_stats->common.rx_ucast_bytes += |
1704 | HILO_64_REGPAIR(ustats.rcv_ucast_bytes); |
1705 | p_stats->common.rx_mcast_bytes += |
1706 | HILO_64_REGPAIR(ustats.rcv_mcast_bytes); |
1707 | p_stats->common.rx_bcast_bytes += |
1708 | HILO_64_REGPAIR(ustats.rcv_bcast_bytes); |
1709 | p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); |
1710 | p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); |
1711 | p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); |
1712 | } |
1713 | |
1714 | static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, |
1715 | u32 *p_addr, |
1716 | u32 *p_len, u16 statistics_bin) |
1717 | { |
1718 | if (IS_PF(p_hwfn->cdev)) { |
1719 | *p_addr = BAR0_MAP_REG_MSDM_RAM + |
1720 | MSTORM_QUEUE_STAT_OFFSET(statistics_bin); |
1721 | *p_len = sizeof(struct eth_mstorm_per_queue_stat); |
1722 | } else { |
1723 | struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; |
1724 | struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; |
1725 | |
1726 | *p_addr = p_resp->pfdev_info.stats_info.mstats.address; |
1727 | *p_len = p_resp->pfdev_info.stats_info.mstats.len; |
1728 | } |
1729 | } |
1730 | |
1731 | static noinline_for_stack void |
1732 | __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
1733 | struct qed_eth_stats *p_stats, u16 statistics_bin) |
1734 | { |
1735 | struct eth_mstorm_per_queue_stat mstats; |
1736 | u32 mstats_addr = 0, mstats_len = 0; |
1737 | |
1738 | __qed_get_vport_mstats_addrlen(p_hwfn, p_addr: &mstats_addr, p_len: &mstats_len, |
1739 | statistics_bin); |
1740 | |
1741 | memset(&mstats, 0, sizeof(mstats)); |
1742 | qed_memcpy_from(p_hwfn, p_ptt, dest: &mstats, hw_addr: mstats_addr, n: mstats_len); |
1743 | |
1744 | p_stats->common.no_buff_discards += |
1745 | HILO_64_REGPAIR(mstats.no_buff_discard); |
1746 | p_stats->common.packet_too_big_discard += |
1747 | HILO_64_REGPAIR(mstats.packet_too_big_discard); |
1748 | p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); |
1749 | p_stats->common.tpa_coalesced_pkts += |
1750 | HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); |
1751 | p_stats->common.tpa_coalesced_events += |
1752 | HILO_64_REGPAIR(mstats.tpa_coalesced_events); |
1753 | p_stats->common.tpa_aborts_num += |
1754 | HILO_64_REGPAIR(mstats.tpa_aborts_num); |
1755 | p_stats->common.tpa_coalesced_bytes += |
1756 | HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); |
1757 | } |
1758 | |
1759 | static noinline_for_stack void |
1760 | __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
1761 | struct qed_eth_stats *p_stats) |
1762 | { |
1763 | struct qed_eth_stats_common *p_common = &p_stats->common; |
1764 | struct port_stats port_stats; |
1765 | int j; |
1766 | |
1767 | memset(&port_stats, 0, sizeof(port_stats)); |
1768 | |
1769 | qed_memcpy_from(p_hwfn, p_ptt, dest: &port_stats, |
1770 | hw_addr: p_hwfn->mcp_info->port_addr + |
1771 | offsetof(struct public_port, stats), |
1772 | n: sizeof(port_stats)); |
1773 | |
1774 | p_common->rx_64_byte_packets += port_stats.eth.r64; |
1775 | p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; |
1776 | p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; |
1777 | p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; |
1778 | p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; |
1779 | p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; |
1780 | p_common->rx_crc_errors += port_stats.eth.rfcs; |
1781 | p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; |
1782 | p_common->rx_pause_frames += port_stats.eth.rxpf; |
1783 | p_common->rx_pfc_frames += port_stats.eth.rxpp; |
1784 | p_common->rx_align_errors += port_stats.eth.raln; |
1785 | p_common->rx_carrier_errors += port_stats.eth.rfcr; |
1786 | p_common->rx_oversize_packets += port_stats.eth.rovr; |
1787 | p_common->rx_jabbers += port_stats.eth.rjbr; |
1788 | p_common->rx_undersize_packets += port_stats.eth.rund; |
1789 | p_common->rx_fragments += port_stats.eth.rfrg; |
1790 | p_common->tx_64_byte_packets += port_stats.eth.t64; |
1791 | p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; |
1792 | p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; |
1793 | p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; |
1794 | p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; |
1795 | p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; |
1796 | p_common->tx_pause_frames += port_stats.eth.txpf; |
1797 | p_common->tx_pfc_frames += port_stats.eth.txpp; |
1798 | p_common->rx_mac_bytes += port_stats.eth.rbyte; |
1799 | p_common->rx_mac_uc_packets += port_stats.eth.rxuca; |
1800 | p_common->rx_mac_mc_packets += port_stats.eth.rxmca; |
1801 | p_common->rx_mac_bc_packets += port_stats.eth.rxbca; |
1802 | p_common->rx_mac_frames_ok += port_stats.eth.rxpok; |
1803 | p_common->tx_mac_bytes += port_stats.eth.tbyte; |
1804 | p_common->tx_mac_uc_packets += port_stats.eth.txuca; |
1805 | p_common->tx_mac_mc_packets += port_stats.eth.txmca; |
1806 | p_common->tx_mac_bc_packets += port_stats.eth.txbca; |
1807 | p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; |
1808 | for (j = 0; j < 8; j++) { |
1809 | p_common->brb_truncates += port_stats.brb.brb_truncate[j]; |
1810 | p_common->brb_discards += port_stats.brb.brb_discard[j]; |
1811 | } |
1812 | |
1813 | if (QED_IS_BB(p_hwfn->cdev)) { |
1814 | struct qed_eth_stats_bb *p_bb = &p_stats->bb; |
1815 | |
1816 | p_bb->rx_1519_to_1522_byte_packets += |
1817 | port_stats.eth.u0.bb0.r1522; |
1818 | p_bb->rx_1519_to_2047_byte_packets += |
1819 | port_stats.eth.u0.bb0.r2047; |
1820 | p_bb->rx_2048_to_4095_byte_packets += |
1821 | port_stats.eth.u0.bb0.r4095; |
1822 | p_bb->rx_4096_to_9216_byte_packets += |
1823 | port_stats.eth.u0.bb0.r9216; |
1824 | p_bb->rx_9217_to_16383_byte_packets += |
1825 | port_stats.eth.u0.bb0.r16383; |
1826 | p_bb->tx_1519_to_2047_byte_packets += |
1827 | port_stats.eth.u1.bb1.t2047; |
1828 | p_bb->tx_2048_to_4095_byte_packets += |
1829 | port_stats.eth.u1.bb1.t4095; |
1830 | p_bb->tx_4096_to_9216_byte_packets += |
1831 | port_stats.eth.u1.bb1.t9216; |
1832 | p_bb->tx_9217_to_16383_byte_packets += |
1833 | port_stats.eth.u1.bb1.t16383; |
1834 | p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; |
1835 | p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; |
1836 | } else { |
1837 | struct qed_eth_stats_ah *p_ah = &p_stats->ah; |
1838 | |
1839 | p_ah->rx_1519_to_max_byte_packets += |
1840 | port_stats.eth.u0.ah0.r1519_to_max; |
1841 | p_ah->tx_1519_to_max_byte_packets = |
1842 | port_stats.eth.u1.ah1.t1519_to_max; |
1843 | } |
1844 | |
1845 | p_common->link_change_count = qed_rd(p_hwfn, p_ptt, |
1846 | hw_addr: p_hwfn->mcp_info->port_addr + |
1847 | offsetof(struct public_port, |
1848 | link_change_count)); |
1849 | } |
1850 | |
1851 | static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, |
1852 | struct qed_ptt *p_ptt, |
1853 | struct qed_eth_stats *stats, |
1854 | u16 statistics_bin, bool b_get_port_stats) |
1855 | { |
1856 | __qed_get_vport_mstats(p_hwfn, p_ptt, p_stats: stats, statistics_bin); |
1857 | __qed_get_vport_ustats(p_hwfn, p_ptt, p_stats: stats, statistics_bin); |
1858 | __qed_get_vport_tstats(p_hwfn, p_ptt, p_stats: stats, statistics_bin); |
1859 | __qed_get_vport_pstats(p_hwfn, p_ptt, p_stats: stats, statistics_bin); |
1860 | |
1861 | if (b_get_port_stats && p_hwfn->mcp_info) |
1862 | __qed_get_vport_port_stats(p_hwfn, p_ptt, p_stats: stats); |
1863 | } |
1864 | |
1865 | static void _qed_get_vport_stats(struct qed_dev *cdev, |
1866 | struct qed_eth_stats *stats, |
1867 | bool is_atomic) |
1868 | { |
1869 | u8 fw_vport = 0; |
1870 | int i; |
1871 | |
1872 | memset(stats, 0, sizeof(*stats)); |
1873 | |
1874 | for_each_hwfn(cdev, i) { |
1875 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
1876 | struct qed_ptt *p_ptt; |
1877 | bool b_get_port_stats; |
1878 | |
1879 | p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic) |
1880 | : NULL; |
1881 | if (IS_PF(cdev)) { |
1882 | /* The main vport index is relative first */ |
1883 | if (qed_fw_vport(p_hwfn, src_id: 0, dst_id: &fw_vport)) { |
1884 | DP_ERR(p_hwfn, "No vport available!\n" ); |
1885 | goto out; |
1886 | } |
1887 | } |
1888 | |
1889 | if (IS_PF(cdev) && !p_ptt) { |
1890 | DP_ERR(p_hwfn, "Failed to acquire ptt\n" ); |
1891 | continue; |
1892 | } |
1893 | |
1894 | b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn); |
1895 | __qed_get_vport_stats(p_hwfn, p_ptt, stats, statistics_bin: fw_vport, |
1896 | b_get_port_stats); |
1897 | |
1898 | out: |
1899 | if (IS_PF(cdev) && p_ptt) |
1900 | qed_ptt_release(p_hwfn, p_ptt); |
1901 | } |
1902 | } |
1903 | |
1904 | void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) |
1905 | { |
1906 | qed_get_vport_stats_context(cdev, stats, is_atomic: false); |
1907 | } |
1908 | |
1909 | void qed_get_vport_stats_context(struct qed_dev *cdev, |
1910 | struct qed_eth_stats *stats, |
1911 | bool is_atomic) |
1912 | { |
1913 | u32 i; |
1914 | |
1915 | if (!cdev || cdev->recov_in_prog) { |
1916 | memset(stats, 0, sizeof(*stats)); |
1917 | return; |
1918 | } |
1919 | |
1920 | _qed_get_vport_stats(cdev, stats, is_atomic); |
1921 | |
1922 | if (!cdev->reset_stats) |
1923 | return; |
1924 | |
1925 | /* Reduce the statistics baseline */ |
1926 | for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) |
1927 | ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; |
1928 | } |
1929 | |
1930 | /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ |
1931 | void qed_reset_vport_stats(struct qed_dev *cdev) |
1932 | { |
1933 | int i; |
1934 | |
1935 | for_each_hwfn(cdev, i) { |
1936 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
1937 | struct eth_mstorm_per_queue_stat mstats; |
1938 | struct eth_ustorm_per_queue_stat ustats; |
1939 | struct eth_pstorm_per_queue_stat pstats; |
1940 | struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) |
1941 | : NULL; |
1942 | u32 addr = 0, len = 0; |
1943 | |
1944 | if (IS_PF(cdev) && !p_ptt) { |
1945 | DP_ERR(p_hwfn, "Failed to acquire ptt\n" ); |
1946 | continue; |
1947 | } |
1948 | |
1949 | memset(&mstats, 0, sizeof(mstats)); |
1950 | __qed_get_vport_mstats_addrlen(p_hwfn, p_addr: &addr, p_len: &len, statistics_bin: 0); |
1951 | qed_memcpy_to(p_hwfn, p_ptt, hw_addr: addr, src: &mstats, n: len); |
1952 | |
1953 | memset(&ustats, 0, sizeof(ustats)); |
1954 | __qed_get_vport_ustats_addrlen(p_hwfn, p_addr: &addr, p_len: &len, statistics_bin: 0); |
1955 | qed_memcpy_to(p_hwfn, p_ptt, hw_addr: addr, src: &ustats, n: len); |
1956 | |
1957 | memset(&pstats, 0, sizeof(pstats)); |
1958 | __qed_get_vport_pstats_addrlen(p_hwfn, p_addr: &addr, p_len: &len, statistics_bin: 0); |
1959 | qed_memcpy_to(p_hwfn, p_ptt, hw_addr: addr, src: &pstats, n: len); |
1960 | |
1961 | if (IS_PF(cdev)) |
1962 | qed_ptt_release(p_hwfn, p_ptt); |
1963 | } |
1964 | |
1965 | /* PORT statistics are not necessarily reset, so we need to |
1966 | * read and create a baseline for future statistics. |
1967 | * Link change stat is maintained by MFW, return its value as is. |
1968 | */ |
1969 | if (!cdev->reset_stats) { |
1970 | DP_INFO(cdev, "Reset stats not allocated\n" ); |
1971 | } else { |
1972 | _qed_get_vport_stats(cdev, stats: cdev->reset_stats, is_atomic: false); |
1973 | cdev->reset_stats->common.link_change_count = 0; |
1974 | } |
1975 | } |
1976 | |
1977 | static enum gft_profile_type |
1978 | qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) |
1979 | { |
1980 | if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE) |
1981 | return GFT_PROFILE_TYPE_4_TUPLE; |
1982 | if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) |
1983 | return GFT_PROFILE_TYPE_IP_DST_ADDR; |
1984 | if (mode == QED_FILTER_CONFIG_MODE_IP_SRC) |
1985 | return GFT_PROFILE_TYPE_IP_SRC_ADDR; |
1986 | return GFT_PROFILE_TYPE_L4_DST_PORT; |
1987 | } |
1988 | |
1989 | void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, |
1990 | struct qed_ptt *p_ptt, |
1991 | struct qed_arfs_config_params *p_cfg_params) |
1992 | { |
1993 | if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits)) |
1994 | return; |
1995 | |
1996 | if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) { |
1997 | qed_gft_config(p_hwfn, p_ptt, pf_id: p_hwfn->rel_pf_id, |
1998 | tcp: p_cfg_params->tcp, |
1999 | udp: p_cfg_params->udp, |
2000 | ipv4: p_cfg_params->ipv4, |
2001 | ipv6: p_cfg_params->ipv6, |
2002 | profile_type: qed_arfs_mode_to_hsi(mode: p_cfg_params->mode)); |
2003 | DP_VERBOSE(p_hwfn, |
2004 | QED_MSG_SP, |
2005 | "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n" , |
2006 | p_cfg_params->tcp ? "Enable" : "Disable" , |
2007 | p_cfg_params->udp ? "Enable" : "Disable" , |
2008 | p_cfg_params->ipv4 ? "Enable" : "Disable" , |
2009 | p_cfg_params->ipv6 ? "Enable" : "Disable" , |
2010 | (u32)p_cfg_params->mode); |
2011 | } else { |
2012 | DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n" ); |
2013 | qed_gft_disable(p_hwfn, p_ptt, pf_id: p_hwfn->rel_pf_id); |
2014 | } |
2015 | } |
2016 | |
2017 | int |
2018 | qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, |
2019 | struct qed_spq_comp_cb *p_cb, |
2020 | struct qed_ntuple_filter_params *p_params) |
2021 | { |
2022 | struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL; |
2023 | struct qed_spq_entry *p_ent = NULL; |
2024 | struct qed_sp_init_data init_data; |
2025 | u16 abs_rx_q_id = 0; |
2026 | u8 abs_vport_id = 0; |
2027 | int rc = -EINVAL; |
2028 | |
2029 | /* Get SPQ entry */ |
2030 | memset(&init_data, 0, sizeof(init_data)); |
2031 | init_data.cid = qed_spq_get_cid(p_hwfn); |
2032 | |
2033 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; |
2034 | |
2035 | if (p_cb) { |
2036 | init_data.comp_mode = QED_SPQ_MODE_CB; |
2037 | init_data.p_comp_data = p_cb; |
2038 | } else { |
2039 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
2040 | } |
2041 | |
2042 | rc = qed_sp_init_request(p_hwfn, pp_ent: &p_ent, |
2043 | cmd: ETH_RAMROD_RX_UPDATE_GFT_FILTER, |
2044 | protocol: PROTOCOLID_ETH, p_data: &init_data); |
2045 | if (rc) |
2046 | return rc; |
2047 | |
2048 | p_ramrod = &p_ent->ramrod.rx_update_gft; |
2049 | |
2050 | DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); |
2051 | p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length); |
2052 | |
2053 | if (p_params->b_is_drop) { |
2054 | p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT); |
2055 | } else { |
2056 | rc = qed_fw_vport(p_hwfn, src_id: p_params->vport_id, dst_id: &abs_vport_id); |
2057 | if (rc) |
2058 | goto err; |
2059 | |
2060 | if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { |
2061 | rc = qed_fw_l2_queue(p_hwfn, src_id: p_params->qid, |
2062 | dst_id: &abs_rx_q_id); |
2063 | if (rc) |
2064 | goto err; |
2065 | |
2066 | p_ramrod->rx_qid_valid = 1; |
2067 | p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); |
2068 | } |
2069 | |
2070 | p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); |
2071 | } |
2072 | |
2073 | p_ramrod->flow_id_valid = 0; |
2074 | p_ramrod->flow_id = 0; |
2075 | p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER |
2076 | : GFT_DELETE_FILTER; |
2077 | |
2078 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
2079 | "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n" , |
2080 | abs_vport_id, abs_rx_q_id, |
2081 | p_params->b_is_add ? "Adding" : "Removing" , |
2082 | (u64)p_params->addr, p_params->length); |
2083 | |
2084 | return qed_spq_post(p_hwfn, p_ent, NULL); |
2085 | |
2086 | err: |
2087 | qed_sp_destroy_request(p_hwfn, p_ent); |
2088 | return rc; |
2089 | } |
2090 | |
2091 | int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, |
2092 | struct qed_ptt *p_ptt, |
2093 | struct qed_queue_cid *p_cid, u16 *p_rx_coal) |
2094 | { |
2095 | u32 coalesce, address, is_valid; |
2096 | struct cau_sb_entry sb_entry; |
2097 | u8 timer_res; |
2098 | int rc; |
2099 | |
2100 | rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + |
2101 | p_cid->sb_igu_id * sizeof(u64), |
2102 | dest_addr: (u64)(uintptr_t)&sb_entry, size_in_dwords: 2, NULL); |
2103 | if (rc) { |
2104 | DP_ERR(p_hwfn, "dmae_grc2host failed %d\n" , rc); |
2105 | return rc; |
2106 | } |
2107 | |
2108 | timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), |
2109 | CAU_SB_ENTRY_TIMER_RES0); |
2110 | |
2111 | address = BAR0_MAP_REG_USDM_RAM + |
2112 | USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); |
2113 | coalesce = qed_rd(p_hwfn, p_ptt, hw_addr: address); |
2114 | |
2115 | is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); |
2116 | if (!is_valid) |
2117 | return -EINVAL; |
2118 | |
2119 | coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); |
2120 | *p_rx_coal = (u16)(coalesce << timer_res); |
2121 | |
2122 | return 0; |
2123 | } |
2124 | |
2125 | int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, |
2126 | struct qed_ptt *p_ptt, |
2127 | struct qed_queue_cid *p_cid, u16 *p_tx_coal) |
2128 | { |
2129 | u32 coalesce, address, is_valid; |
2130 | struct cau_sb_entry sb_entry; |
2131 | u8 timer_res; |
2132 | int rc; |
2133 | |
2134 | rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + |
2135 | p_cid->sb_igu_id * sizeof(u64), |
2136 | dest_addr: (u64)(uintptr_t)&sb_entry, size_in_dwords: 2, NULL); |
2137 | if (rc) { |
2138 | DP_ERR(p_hwfn, "dmae_grc2host failed %d\n" , rc); |
2139 | return rc; |
2140 | } |
2141 | |
2142 | timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), |
2143 | CAU_SB_ENTRY_TIMER_RES1); |
2144 | |
2145 | address = BAR0_MAP_REG_XSDM_RAM + |
2146 | XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); |
2147 | coalesce = qed_rd(p_hwfn, p_ptt, hw_addr: address); |
2148 | |
2149 | is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); |
2150 | if (!is_valid) |
2151 | return -EINVAL; |
2152 | |
2153 | coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); |
2154 | *p_tx_coal = (u16)(coalesce << timer_res); |
2155 | |
2156 | return 0; |
2157 | } |
2158 | |
2159 | int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle) |
2160 | { |
2161 | struct qed_queue_cid *p_cid = handle; |
2162 | struct qed_ptt *p_ptt; |
2163 | int rc = 0; |
2164 | |
2165 | if (IS_VF(p_hwfn->cdev)) { |
2166 | rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); |
2167 | if (rc) |
2168 | DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n" ); |
2169 | |
2170 | return rc; |
2171 | } |
2172 | |
2173 | p_ptt = qed_ptt_acquire(p_hwfn); |
2174 | if (!p_ptt) |
2175 | return -EAGAIN; |
2176 | |
2177 | if (p_cid->b_is_rx) { |
2178 | rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_rx_coal: p_coal); |
2179 | if (rc) |
2180 | goto out; |
2181 | } else { |
2182 | rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_tx_coal: p_coal); |
2183 | if (rc) |
2184 | goto out; |
2185 | } |
2186 | |
2187 | out: |
2188 | qed_ptt_release(p_hwfn, p_ptt); |
2189 | |
2190 | return rc; |
2191 | } |
2192 | |
2193 | static int qed_fill_eth_dev_info(struct qed_dev *cdev, |
2194 | struct qed_dev_eth_info *info) |
2195 | { |
2196 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
2197 | int i; |
2198 | |
2199 | memset(info, 0, sizeof(*info)); |
2200 | |
2201 | if (IS_PF(cdev)) { |
2202 | int max_vf_vlan_filters = 0; |
2203 | int max_vf_mac_filters = 0; |
2204 | |
2205 | info->num_tc = p_hwfn->hw_info.num_hw_tc; |
2206 | |
2207 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { |
2208 | u16 num_queues = 0; |
2209 | |
2210 | /* Since the feature controls only queue-zones, |
2211 | * make sure we have the contexts [rx, xdp, tcs] to |
2212 | * match. |
2213 | */ |
2214 | for_each_hwfn(cdev, i) { |
2215 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; |
2216 | u16 l2_queues = (u16)FEAT_NUM(hwfn, |
2217 | QED_PF_L2_QUE); |
2218 | u16 cids; |
2219 | |
2220 | cids = hwfn->pf_params.eth_pf_params.num_cons; |
2221 | cids /= (2 + info->num_tc); |
2222 | num_queues += min_t(u16, l2_queues, cids); |
2223 | } |
2224 | |
2225 | /* queues might theoretically be >256, but interrupts' |
2226 | * upper-limit guarantes that it would fit in a u8. |
2227 | */ |
2228 | if (cdev->int_params.fp_msix_cnt) { |
2229 | u8 irqs = cdev->int_params.fp_msix_cnt; |
2230 | |
2231 | info->num_queues = (u8)min_t(u16, |
2232 | num_queues, irqs); |
2233 | } |
2234 | } else { |
2235 | info->num_queues = cdev->num_hwfns; |
2236 | } |
2237 | |
2238 | if (IS_QED_SRIOV(cdev)) { |
2239 | max_vf_vlan_filters = cdev->p_iov_info->total_vfs * |
2240 | QED_ETH_VF_NUM_VLAN_FILTERS; |
2241 | max_vf_mac_filters = cdev->p_iov_info->total_vfs * |
2242 | QED_ETH_VF_NUM_MAC_FILTERS; |
2243 | } |
2244 | info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), |
2245 | QED_VLAN) - |
2246 | max_vf_vlan_filters; |
2247 | info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), |
2248 | QED_MAC) - |
2249 | max_vf_mac_filters; |
2250 | |
2251 | ether_addr_copy(dst: info->port_mac, |
2252 | src: cdev->hwfns[0].hw_info.hw_mac_addr); |
2253 | |
2254 | info->xdp_supported = true; |
2255 | } else { |
2256 | u16 total_cids = 0; |
2257 | |
2258 | info->num_tc = 1; |
2259 | |
2260 | /* Determine queues & XDP support */ |
2261 | for_each_hwfn(cdev, i) { |
2262 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
2263 | u8 queues, cids; |
2264 | |
2265 | qed_vf_get_num_cids(p_hwfn, num_cids: &cids); |
2266 | qed_vf_get_num_rxqs(p_hwfn, num_rxqs: &queues); |
2267 | info->num_queues += queues; |
2268 | total_cids += cids; |
2269 | } |
2270 | |
2271 | /* Enable VF XDP in case PF guarntees sufficient connections */ |
2272 | if (total_cids >= info->num_queues * 3) |
2273 | info->xdp_supported = true; |
2274 | |
2275 | qed_vf_get_num_vlan_filters(p_hwfn: &cdev->hwfns[0], |
2276 | num_vlan_filters: (u8 *)&info->num_vlan_filters); |
2277 | qed_vf_get_num_mac_filters(p_hwfn: &cdev->hwfns[0], |
2278 | num_mac_filters: (u8 *)&info->num_mac_filters); |
2279 | qed_vf_get_port_mac(p_hwfn: &cdev->hwfns[0], port_mac: info->port_mac); |
2280 | |
2281 | info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; |
2282 | } |
2283 | |
2284 | qed_fill_dev_info(cdev, dev_info: &info->common); |
2285 | |
2286 | if (IS_VF(cdev)) |
2287 | eth_zero_addr(addr: info->common.hw_mac); |
2288 | |
2289 | return 0; |
2290 | } |
2291 | |
2292 | static void qed_register_eth_ops(struct qed_dev *cdev, |
2293 | struct qed_eth_cb_ops *ops, void *cookie) |
2294 | { |
2295 | cdev->protocol_ops.eth = ops; |
2296 | cdev->ops_cookie = cookie; |
2297 | |
2298 | /* For VF, we start bulletin reading */ |
2299 | if (IS_VF(cdev)) |
2300 | qed_vf_start_iov_wq(cdev); |
2301 | } |
2302 | |
2303 | static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) |
2304 | { |
2305 | if (IS_PF(cdev)) |
2306 | return true; |
2307 | |
2308 | return qed_vf_check_mac(p_hwfn: &cdev->hwfns[0], mac); |
2309 | } |
2310 | |
2311 | static int qed_start_vport(struct qed_dev *cdev, |
2312 | struct qed_start_vport_params *params) |
2313 | { |
2314 | int rc, i; |
2315 | |
2316 | for_each_hwfn(cdev, i) { |
2317 | struct qed_sp_vport_start_params start = { 0 }; |
2318 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
2319 | |
2320 | start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : |
2321 | QED_TPA_MODE_NONE; |
2322 | start.remove_inner_vlan = params->remove_inner_vlan; |
2323 | start.only_untagged = true; /* untagged only */ |
2324 | start.drop_ttl0 = params->drop_ttl0; |
2325 | start.opaque_fid = p_hwfn->hw_info.opaque_fid; |
2326 | start.concrete_fid = p_hwfn->hw_info.concrete_fid; |
2327 | start.handle_ptp_pkts = params->handle_ptp_pkts; |
2328 | start.vport_id = params->vport_id; |
2329 | start.max_buffers_per_cqe = 16; |
2330 | start.mtu = params->mtu; |
2331 | |
2332 | rc = qed_sp_vport_start(p_hwfn, p_params: &start); |
2333 | if (rc) { |
2334 | DP_ERR(cdev, "Failed to start VPORT\n" ); |
2335 | return rc; |
2336 | } |
2337 | |
2338 | rc = qed_hw_start_fastpath(p_hwfn); |
2339 | if (rc) { |
2340 | DP_ERR(cdev, "Failed to start VPORT fastpath\n" ); |
2341 | return rc; |
2342 | } |
2343 | |
2344 | DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), |
2345 | "Started V-PORT %d with MTU %d\n" , |
2346 | start.vport_id, start.mtu); |
2347 | } |
2348 | |
2349 | if (params->clear_stats) |
2350 | qed_reset_vport_stats(cdev); |
2351 | |
2352 | return 0; |
2353 | } |
2354 | |
2355 | static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) |
2356 | { |
2357 | int rc, i; |
2358 | |
2359 | for_each_hwfn(cdev, i) { |
2360 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
2361 | |
2362 | rc = qed_sp_vport_stop(p_hwfn, |
2363 | opaque_fid: p_hwfn->hw_info.opaque_fid, vport_id); |
2364 | |
2365 | if (rc) { |
2366 | DP_ERR(cdev, "Failed to stop VPORT\n" ); |
2367 | return rc; |
2368 | } |
2369 | } |
2370 | return 0; |
2371 | } |
2372 | |
2373 | static int (struct qed_dev *cdev, |
2374 | struct qed_update_vport_rss_params *input, |
2375 | struct qed_rss_params *) |
2376 | { |
2377 | int i, fn; |
2378 | |
2379 | /* Update configuration with what's correct regardless of CMT */ |
2380 | rss->update_rss_config = 1; |
2381 | rss->rss_enable = 1; |
2382 | rss->update_rss_capabilities = 1; |
2383 | rss->update_rss_ind_table = 1; |
2384 | rss->update_rss_key = 1; |
2385 | rss->rss_caps = input->rss_caps; |
2386 | memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); |
2387 | |
2388 | /* In regular scenario, we'd simply need to take input handlers. |
2389 | * But in CMT, we'd have to split the handlers according to the |
2390 | * engine they were configured on. We'd then have to understand |
2391 | * whether RSS is really required, since 2-queues on CMT doesn't |
2392 | * require RSS. |
2393 | */ |
2394 | if (cdev->num_hwfns == 1) { |
2395 | memcpy(rss->rss_ind_table, |
2396 | input->rss_ind_table, |
2397 | QED_RSS_IND_TABLE_SIZE * sizeof(void *)); |
2398 | rss->rss_table_size_log = 7; |
2399 | return 0; |
2400 | } |
2401 | |
2402 | /* Start by copying the non-spcific information to the 2nd copy */ |
2403 | memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); |
2404 | |
2405 | /* CMT should be round-robin */ |
2406 | for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { |
2407 | struct qed_queue_cid *cid = input->rss_ind_table[i]; |
2408 | struct qed_rss_params *; |
2409 | |
2410 | if (cid->p_owner == QED_LEADING_HWFN(cdev)) |
2411 | t_rss = &rss[0]; |
2412 | else |
2413 | t_rss = &rss[1]; |
2414 | |
2415 | t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; |
2416 | } |
2417 | |
2418 | /* Make sure RSS is actually required */ |
2419 | for_each_hwfn(cdev, fn) { |
2420 | for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { |
2421 | if (rss[fn].rss_ind_table[i] != |
2422 | rss[fn].rss_ind_table[0]) |
2423 | break; |
2424 | } |
2425 | if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { |
2426 | DP_VERBOSE(cdev, NETIF_MSG_IFUP, |
2427 | "CMT - 1 queue per-hwfn; Disabling RSS\n" ); |
2428 | return -EINVAL; |
2429 | } |
2430 | rss[fn].rss_table_size_log = 6; |
2431 | } |
2432 | |
2433 | return 0; |
2434 | } |
2435 | |
2436 | static int qed_update_vport(struct qed_dev *cdev, |
2437 | struct qed_update_vport_params *params) |
2438 | { |
2439 | struct qed_sp_vport_update_params sp_params; |
2440 | struct qed_rss_params *; |
2441 | int rc = 0, i; |
2442 | |
2443 | if (!cdev) |
2444 | return -ENODEV; |
2445 | |
2446 | rss = vzalloc(array_size(sizeof(*rss), cdev->num_hwfns)); |
2447 | if (!rss) |
2448 | return -ENOMEM; |
2449 | |
2450 | memset(&sp_params, 0, sizeof(sp_params)); |
2451 | |
2452 | /* Translate protocol params into sp params */ |
2453 | sp_params.vport_id = params->vport_id; |
2454 | sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; |
2455 | sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; |
2456 | sp_params.vport_active_rx_flg = params->vport_active_flg; |
2457 | sp_params.vport_active_tx_flg = params->vport_active_flg; |
2458 | sp_params.update_tx_switching_flg = params->update_tx_switching_flg; |
2459 | sp_params.tx_switching_flg = params->tx_switching_flg; |
2460 | sp_params.accept_any_vlan = params->accept_any_vlan; |
2461 | sp_params.update_accept_any_vlan_flg = |
2462 | params->update_accept_any_vlan_flg; |
2463 | |
2464 | /* Prepare the RSS configuration */ |
2465 | if (params->update_rss_flg) |
2466 | if (qed_update_vport_rss(cdev, input: ¶ms->rss_params, rss)) |
2467 | params->update_rss_flg = 0; |
2468 | |
2469 | for_each_hwfn(cdev, i) { |
2470 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
2471 | |
2472 | if (params->update_rss_flg) |
2473 | sp_params.rss_params = &rss[i]; |
2474 | |
2475 | sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; |
2476 | rc = qed_sp_vport_update(p_hwfn, p_params: &sp_params, |
2477 | comp_mode: QED_SPQ_MODE_EBLOCK, |
2478 | NULL); |
2479 | if (rc) { |
2480 | DP_ERR(cdev, "Failed to update VPORT\n" ); |
2481 | goto out; |
2482 | } |
2483 | |
2484 | DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), |
2485 | "Updated V-PORT %d: active_flag %d [update %d]\n" , |
2486 | params->vport_id, params->vport_active_flg, |
2487 | params->update_vport_active_flg); |
2488 | } |
2489 | |
2490 | out: |
2491 | vfree(addr: rss); |
2492 | return rc; |
2493 | } |
2494 | |
2495 | static int qed_start_rxq(struct qed_dev *cdev, |
2496 | u8 , |
2497 | struct qed_queue_start_common_params *p_params, |
2498 | u16 bd_max_bytes, |
2499 | dma_addr_t bd_chain_phys_addr, |
2500 | dma_addr_t cqe_pbl_addr, |
2501 | u16 cqe_pbl_size, |
2502 | struct qed_rxq_start_ret_params *ret_params) |
2503 | { |
2504 | struct qed_hwfn *p_hwfn; |
2505 | int rc, hwfn_index; |
2506 | |
2507 | hwfn_index = rss_num % cdev->num_hwfns; |
2508 | p_hwfn = &cdev->hwfns[hwfn_index]; |
2509 | |
2510 | p_params->queue_id = p_params->queue_id / cdev->num_hwfns; |
2511 | p_params->stats_id = p_params->vport_id; |
2512 | |
2513 | rc = qed_eth_rx_queue_start(p_hwfn, |
2514 | opaque_fid: p_hwfn->hw_info.opaque_fid, |
2515 | p_params, |
2516 | bd_max_bytes, |
2517 | bd_chain_phys_addr, |
2518 | cqe_pbl_addr, cqe_pbl_size, p_ret_params: ret_params); |
2519 | if (rc) { |
2520 | DP_ERR(cdev, "Failed to start RXQ#%d\n" , p_params->queue_id); |
2521 | return rc; |
2522 | } |
2523 | |
2524 | DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), |
2525 | "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n" , |
2526 | p_params->queue_id, rss_num, p_params->vport_id, |
2527 | p_params->p_sb->igu_sb_id); |
2528 | |
2529 | return 0; |
2530 | } |
2531 | |
2532 | static int qed_stop_rxq(struct qed_dev *cdev, u8 , void *handle) |
2533 | { |
2534 | int rc, hwfn_index; |
2535 | struct qed_hwfn *p_hwfn; |
2536 | |
2537 | hwfn_index = rss_id % cdev->num_hwfns; |
2538 | p_hwfn = &cdev->hwfns[hwfn_index]; |
2539 | |
2540 | rc = qed_eth_rx_queue_stop(p_hwfn, p_rxq: handle, eq_completion_only: false, cqe_completion: false); |
2541 | if (rc) { |
2542 | DP_ERR(cdev, "Failed to stop RXQ#%02x\n" , rss_id); |
2543 | return rc; |
2544 | } |
2545 | |
2546 | return 0; |
2547 | } |
2548 | |
2549 | static int qed_start_txq(struct qed_dev *cdev, |
2550 | u8 , |
2551 | struct qed_queue_start_common_params *p_params, |
2552 | dma_addr_t pbl_addr, |
2553 | u16 pbl_size, |
2554 | struct qed_txq_start_ret_params *ret_params) |
2555 | { |
2556 | struct qed_hwfn *p_hwfn; |
2557 | int rc, hwfn_index; |
2558 | |
2559 | hwfn_index = rss_num % cdev->num_hwfns; |
2560 | p_hwfn = &cdev->hwfns[hwfn_index]; |
2561 | p_params->queue_id = p_params->queue_id / cdev->num_hwfns; |
2562 | p_params->stats_id = p_params->vport_id; |
2563 | |
2564 | rc = qed_eth_tx_queue_start(p_hwfn, |
2565 | opaque_fid: p_hwfn->hw_info.opaque_fid, |
2566 | p_params, tc: p_params->tc, |
2567 | pbl_addr, pbl_size, p_ret_params: ret_params); |
2568 | |
2569 | if (rc) { |
2570 | DP_ERR(cdev, "Failed to start TXQ#%d\n" , p_params->queue_id); |
2571 | return rc; |
2572 | } |
2573 | |
2574 | DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), |
2575 | "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n" , |
2576 | p_params->queue_id, rss_num, p_params->vport_id, |
2577 | p_params->p_sb->igu_sb_id); |
2578 | |
2579 | return 0; |
2580 | } |
2581 | |
2582 | #define QED_HW_STOP_RETRY_LIMIT (10) |
2583 | static int qed_fastpath_stop(struct qed_dev *cdev) |
2584 | { |
2585 | int rc; |
2586 | |
2587 | rc = qed_hw_stop_fastpath(cdev); |
2588 | if (rc) { |
2589 | DP_ERR(cdev, "Failed to stop Fastpath\n" ); |
2590 | return rc; |
2591 | } |
2592 | |
2593 | return 0; |
2594 | } |
2595 | |
2596 | static int qed_stop_txq(struct qed_dev *cdev, u8 , void *handle) |
2597 | { |
2598 | struct qed_hwfn *p_hwfn; |
2599 | int rc, hwfn_index; |
2600 | |
2601 | hwfn_index = rss_id % cdev->num_hwfns; |
2602 | p_hwfn = &cdev->hwfns[hwfn_index]; |
2603 | |
2604 | rc = qed_eth_tx_queue_stop(p_hwfn, p_handle: handle); |
2605 | if (rc) { |
2606 | DP_ERR(cdev, "Failed to stop TXQ#%02x\n" , rss_id); |
2607 | return rc; |
2608 | } |
2609 | |
2610 | return 0; |
2611 | } |
2612 | |
2613 | static int qed_tunn_configure(struct qed_dev *cdev, |
2614 | struct qed_tunn_params *tunn_params) |
2615 | { |
2616 | struct qed_tunnel_info tunn_info; |
2617 | int i, rc; |
2618 | |
2619 | memset(&tunn_info, 0, sizeof(tunn_info)); |
2620 | if (tunn_params->update_vxlan_port) { |
2621 | tunn_info.vxlan_port.b_update_port = true; |
2622 | tunn_info.vxlan_port.port = tunn_params->vxlan_port; |
2623 | } |
2624 | |
2625 | if (tunn_params->update_geneve_port) { |
2626 | tunn_info.geneve_port.b_update_port = true; |
2627 | tunn_info.geneve_port.port = tunn_params->geneve_port; |
2628 | } |
2629 | |
2630 | for_each_hwfn(cdev, i) { |
2631 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; |
2632 | struct qed_ptt *p_ptt; |
2633 | struct qed_tunnel_info *tun; |
2634 | |
2635 | tun = &hwfn->cdev->tunnel; |
2636 | if (IS_PF(cdev)) { |
2637 | p_ptt = qed_ptt_acquire(p_hwfn: hwfn); |
2638 | if (!p_ptt) |
2639 | return -EAGAIN; |
2640 | } else { |
2641 | p_ptt = NULL; |
2642 | } |
2643 | |
2644 | rc = qed_sp_pf_update_tunn_cfg(p_hwfn: hwfn, p_ptt, p_tunn: &tunn_info, |
2645 | comp_mode: QED_SPQ_MODE_EBLOCK, NULL); |
2646 | if (rc) { |
2647 | if (IS_PF(cdev)) |
2648 | qed_ptt_release(p_hwfn: hwfn, p_ptt); |
2649 | return rc; |
2650 | } |
2651 | |
2652 | if (IS_PF_SRIOV(hwfn)) { |
2653 | u16 vxlan_port, geneve_port; |
2654 | int j; |
2655 | |
2656 | vxlan_port = tun->vxlan_port.port; |
2657 | geneve_port = tun->geneve_port.port; |
2658 | |
2659 | qed_for_each_vf(hwfn, j) { |
2660 | qed_iov_bulletin_set_udp_ports(p_hwfn: hwfn, vfid: j, |
2661 | vxlan_port, |
2662 | geneve_port); |
2663 | } |
2664 | |
2665 | qed_schedule_iov(hwfn, flag: QED_IOV_WQ_BULLETIN_UPDATE_FLAG); |
2666 | } |
2667 | if (IS_PF(cdev)) |
2668 | qed_ptt_release(p_hwfn: hwfn, p_ptt); |
2669 | } |
2670 | |
2671 | return 0; |
2672 | } |
2673 | |
2674 | static int qed_configure_filter_rx_mode(struct qed_dev *cdev, |
2675 | enum qed_filter_rx_mode_type type) |
2676 | { |
2677 | struct qed_filter_accept_flags accept_flags; |
2678 | |
2679 | memset(&accept_flags, 0, sizeof(accept_flags)); |
2680 | |
2681 | accept_flags.update_rx_mode_config = 1; |
2682 | accept_flags.update_tx_mode_config = 1; |
2683 | accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | |
2684 | QED_ACCEPT_MCAST_MATCHED | |
2685 | QED_ACCEPT_BCAST; |
2686 | accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | |
2687 | QED_ACCEPT_MCAST_MATCHED | |
2688 | QED_ACCEPT_BCAST; |
2689 | |
2690 | if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { |
2691 | accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | |
2692 | QED_ACCEPT_MCAST_UNMATCHED; |
2693 | accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | |
2694 | QED_ACCEPT_MCAST_UNMATCHED; |
2695 | } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { |
2696 | accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; |
2697 | accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; |
2698 | } |
2699 | |
2700 | return qed_filter_accept_cmd(cdev, vport: 0, accept_flags, update_accept_any_vlan: false, accept_any_vlan: false, |
2701 | comp_mode: QED_SPQ_MODE_CB, NULL); |
2702 | } |
2703 | |
2704 | static int qed_configure_filter_ucast(struct qed_dev *cdev, |
2705 | struct qed_filter_ucast_params *params) |
2706 | { |
2707 | struct qed_filter_ucast ucast; |
2708 | |
2709 | if (!params->vlan_valid && !params->mac_valid) { |
2710 | DP_NOTICE(cdev, |
2711 | "Tried configuring a unicast filter, but both MAC and VLAN are not set\n" ); |
2712 | return -EINVAL; |
2713 | } |
2714 | |
2715 | memset(&ucast, 0, sizeof(ucast)); |
2716 | switch (params->type) { |
2717 | case QED_FILTER_XCAST_TYPE_ADD: |
2718 | ucast.opcode = QED_FILTER_ADD; |
2719 | break; |
2720 | case QED_FILTER_XCAST_TYPE_DEL: |
2721 | ucast.opcode = QED_FILTER_REMOVE; |
2722 | break; |
2723 | case QED_FILTER_XCAST_TYPE_REPLACE: |
2724 | ucast.opcode = QED_FILTER_REPLACE; |
2725 | break; |
2726 | default: |
2727 | DP_NOTICE(cdev, "Unknown unicast filter type %d\n" , |
2728 | params->type); |
2729 | } |
2730 | |
2731 | if (params->vlan_valid && params->mac_valid) { |
2732 | ucast.type = QED_FILTER_MAC_VLAN; |
2733 | ether_addr_copy(dst: ucast.mac, src: params->mac); |
2734 | ucast.vlan = params->vlan; |
2735 | } else if (params->mac_valid) { |
2736 | ucast.type = QED_FILTER_MAC; |
2737 | ether_addr_copy(dst: ucast.mac, src: params->mac); |
2738 | } else { |
2739 | ucast.type = QED_FILTER_VLAN; |
2740 | ucast.vlan = params->vlan; |
2741 | } |
2742 | |
2743 | ucast.is_rx_filter = true; |
2744 | ucast.is_tx_filter = true; |
2745 | |
2746 | return qed_filter_ucast_cmd(cdev, p_filter_cmd: &ucast, comp_mode: QED_SPQ_MODE_CB, NULL); |
2747 | } |
2748 | |
2749 | static int qed_configure_filter_mcast(struct qed_dev *cdev, |
2750 | struct qed_filter_mcast_params *params) |
2751 | { |
2752 | struct qed_filter_mcast mcast; |
2753 | int i; |
2754 | |
2755 | memset(&mcast, 0, sizeof(mcast)); |
2756 | switch (params->type) { |
2757 | case QED_FILTER_XCAST_TYPE_ADD: |
2758 | mcast.opcode = QED_FILTER_ADD; |
2759 | break; |
2760 | case QED_FILTER_XCAST_TYPE_DEL: |
2761 | mcast.opcode = QED_FILTER_REMOVE; |
2762 | break; |
2763 | default: |
2764 | DP_NOTICE(cdev, "Unknown multicast filter type %d\n" , |
2765 | params->type); |
2766 | } |
2767 | |
2768 | mcast.num_mc_addrs = params->num; |
2769 | for (i = 0; i < mcast.num_mc_addrs; i++) |
2770 | ether_addr_copy(dst: mcast.mac[i], src: params->mac[i]); |
2771 | |
2772 | return qed_filter_mcast_cmd(cdev, p_filter_cmd: &mcast, comp_mode: QED_SPQ_MODE_CB, NULL); |
2773 | } |
2774 | |
2775 | static int qed_configure_arfs_searcher(struct qed_dev *cdev, |
2776 | enum qed_filter_config_mode mode) |
2777 | { |
2778 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
2779 | struct qed_arfs_config_params arfs_config_params; |
2780 | |
2781 | memset(&arfs_config_params, 0, sizeof(arfs_config_params)); |
2782 | arfs_config_params.tcp = true; |
2783 | arfs_config_params.udp = true; |
2784 | arfs_config_params.ipv4 = true; |
2785 | arfs_config_params.ipv6 = true; |
2786 | arfs_config_params.mode = mode; |
2787 | qed_arfs_mode_configure(p_hwfn, p_ptt: p_hwfn->p_arfs_ptt, |
2788 | p_cfg_params: &arfs_config_params); |
2789 | return 0; |
2790 | } |
2791 | |
2792 | static void |
2793 | qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, |
2794 | void *cookie, |
2795 | union event_ring_data *data, u8 fw_return_code) |
2796 | { |
2797 | struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; |
2798 | void *dev = p_hwfn->cdev->ops_cookie; |
2799 | |
2800 | op->arfs_filter_op(dev, cookie, fw_return_code); |
2801 | } |
2802 | |
2803 | static int |
2804 | qed_ntuple_arfs_filter_config(struct qed_dev *cdev, |
2805 | void *cookie, |
2806 | struct qed_ntuple_filter_params *params) |
2807 | { |
2808 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
2809 | struct qed_spq_comp_cb cb; |
2810 | int rc = -EINVAL; |
2811 | |
2812 | cb.function = qed_arfs_sp_response_handler; |
2813 | cb.cookie = cookie; |
2814 | |
2815 | if (params->b_is_vf) { |
2816 | if (!qed_iov_is_valid_vfid(p_hwfn, rel_vf_id: params->vf_id, b_enabled_only: false, |
2817 | b_non_malicious: false)) { |
2818 | DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n" , |
2819 | params->vf_id); |
2820 | return rc; |
2821 | } |
2822 | |
2823 | params->vport_id = params->vf_id + 1; |
2824 | params->qid = QED_RFS_NTUPLE_QID_RSS; |
2825 | } |
2826 | |
2827 | rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_cb: &cb, p_params: params); |
2828 | if (rc) |
2829 | DP_NOTICE(p_hwfn, |
2830 | "Failed to issue a-RFS filter configuration\n" ); |
2831 | else |
2832 | DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, |
2833 | "Successfully issued a-RFS filter configuration\n" ); |
2834 | |
2835 | return rc; |
2836 | } |
2837 | |
2838 | static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) |
2839 | { |
2840 | struct qed_queue_cid *p_cid = handle; |
2841 | struct qed_hwfn *p_hwfn; |
2842 | int rc; |
2843 | |
2844 | p_hwfn = p_cid->p_owner; |
2845 | rc = qed_get_queue_coalesce(p_hwfn, p_coal: coal, handle); |
2846 | if (rc) |
2847 | DP_VERBOSE(cdev, QED_MSG_DEBUG, |
2848 | "Unable to read queue coalescing\n" ); |
2849 | |
2850 | return rc; |
2851 | } |
2852 | |
2853 | static int qed_fp_cqe_completion(struct qed_dev *dev, |
2854 | u8 , struct eth_slow_path_rx_cqe *cqe) |
2855 | { |
2856 | return qed_eth_cqe_completion(p_hwfn: &dev->hwfns[rss_id % dev->num_hwfns], |
2857 | cqe); |
2858 | } |
2859 | |
2860 | static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac) |
2861 | { |
2862 | int i, ret; |
2863 | |
2864 | if (IS_PF(cdev)) |
2865 | return 0; |
2866 | |
2867 | for_each_hwfn(cdev, i) { |
2868 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
2869 | |
2870 | ret = qed_vf_pf_bulletin_update_mac(p_hwfn, p_mac: mac); |
2871 | if (ret) |
2872 | return ret; |
2873 | } |
2874 | |
2875 | return 0; |
2876 | } |
2877 | |
2878 | static const struct qed_eth_ops qed_eth_ops_pass = { |
2879 | .common = &qed_common_ops_pass, |
2880 | #ifdef CONFIG_QED_SRIOV |
2881 | .iov = &qed_iov_ops_pass, |
2882 | #endif |
2883 | #ifdef CONFIG_DCB |
2884 | .dcb = &qed_dcbnl_ops_pass, |
2885 | #endif |
2886 | .ptp = &qed_ptp_ops_pass, |
2887 | .fill_dev_info = &qed_fill_eth_dev_info, |
2888 | .register_ops = &qed_register_eth_ops, |
2889 | .check_mac = &qed_check_mac, |
2890 | .vport_start = &qed_start_vport, |
2891 | .vport_stop = &qed_stop_vport, |
2892 | .vport_update = &qed_update_vport, |
2893 | .q_rx_start = &qed_start_rxq, |
2894 | .q_rx_stop = &qed_stop_rxq, |
2895 | .q_tx_start = &qed_start_txq, |
2896 | .q_tx_stop = &qed_stop_txq, |
2897 | .filter_config_rx_mode = &qed_configure_filter_rx_mode, |
2898 | .filter_config_ucast = &qed_configure_filter_ucast, |
2899 | .filter_config_mcast = &qed_configure_filter_mcast, |
2900 | .fastpath_stop = &qed_fastpath_stop, |
2901 | .eth_cqe_completion = &qed_fp_cqe_completion, |
2902 | .get_vport_stats = &qed_get_vport_stats, |
2903 | .tunn_config = &qed_tunn_configure, |
2904 | .ntuple_filter_config = &qed_ntuple_arfs_filter_config, |
2905 | .configure_arfs_searcher = &qed_configure_arfs_searcher, |
2906 | .get_coalesce = &qed_get_coalesce, |
2907 | .req_bulletin_update_mac = &qed_req_bulletin_update_mac, |
2908 | }; |
2909 | |
2910 | const struct qed_eth_ops *qed_get_eth_ops(void) |
2911 | { |
2912 | return &qed_eth_ops_pass; |
2913 | } |
2914 | EXPORT_SYMBOL(qed_get_eth_ops); |
2915 | |
2916 | void qed_put_eth_ops(void) |
2917 | { |
2918 | /* TODO - reference count for module? */ |
2919 | } |
2920 | EXPORT_SYMBOL(qed_put_eth_ops); |
2921 | |