1 | /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ |
2 | /* QLogic qede NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #ifndef _QEDE_H_ |
8 | #define _QEDE_H_ |
9 | #include <linux/workqueue.h> |
10 | #include <linux/netdevice.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/bitmap.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/mutex.h> |
15 | #include <linux/bpf.h> |
16 | #include <net/xdp.h> |
17 | #include <linux/qed/qede_rdma.h> |
18 | #include <linux/io.h> |
19 | #ifdef CONFIG_RFS_ACCEL |
20 | #include <linux/cpu_rmap.h> |
21 | #endif |
22 | #include <linux/qed/common_hsi.h> |
23 | #include <linux/qed/eth_common.h> |
24 | #include <linux/qed/qed_if.h> |
25 | #include <linux/qed/qed_chain.h> |
26 | #include <linux/qed/qed_eth_if.h> |
27 | |
28 | #include <net/pkt_cls.h> |
29 | #include <net/tc_act/tc_gact.h> |
30 | |
31 | #define DRV_MODULE_SYM qede |
32 | |
33 | struct qede_stats_common { |
34 | u64 no_buff_discards; |
35 | u64 packet_too_big_discard; |
36 | u64 ttl0_discard; |
37 | u64 rx_ucast_bytes; |
38 | u64 rx_mcast_bytes; |
39 | u64 rx_bcast_bytes; |
40 | u64 rx_ucast_pkts; |
41 | u64 rx_mcast_pkts; |
42 | u64 rx_bcast_pkts; |
43 | u64 mftag_filter_discards; |
44 | u64 mac_filter_discards; |
45 | u64 gft_filter_drop; |
46 | u64 tx_ucast_bytes; |
47 | u64 tx_mcast_bytes; |
48 | u64 tx_bcast_bytes; |
49 | u64 tx_ucast_pkts; |
50 | u64 tx_mcast_pkts; |
51 | u64 tx_bcast_pkts; |
52 | u64 tx_err_drop_pkts; |
53 | u64 coalesced_pkts; |
54 | u64 coalesced_events; |
55 | u64 coalesced_aborts_num; |
56 | u64 non_coalesced_pkts; |
57 | u64 coalesced_bytes; |
58 | u64 link_change_count; |
59 | u64 ptp_skip_txts; |
60 | |
61 | /* port */ |
62 | u64 rx_64_byte_packets; |
63 | u64 rx_65_to_127_byte_packets; |
64 | u64 rx_128_to_255_byte_packets; |
65 | u64 rx_256_to_511_byte_packets; |
66 | u64 rx_512_to_1023_byte_packets; |
67 | u64 rx_1024_to_1518_byte_packets; |
68 | u64 rx_crc_errors; |
69 | u64 rx_mac_crtl_frames; |
70 | u64 rx_pause_frames; |
71 | u64 rx_pfc_frames; |
72 | u64 rx_align_errors; |
73 | u64 rx_carrier_errors; |
74 | u64 rx_oversize_packets; |
75 | u64 rx_jabbers; |
76 | u64 rx_undersize_packets; |
77 | u64 rx_fragments; |
78 | u64 tx_64_byte_packets; |
79 | u64 tx_65_to_127_byte_packets; |
80 | u64 tx_128_to_255_byte_packets; |
81 | u64 tx_256_to_511_byte_packets; |
82 | u64 tx_512_to_1023_byte_packets; |
83 | u64 tx_1024_to_1518_byte_packets; |
84 | u64 tx_pause_frames; |
85 | u64 tx_pfc_frames; |
86 | u64 brb_truncates; |
87 | u64 brb_discards; |
88 | u64 tx_mac_ctrl_frames; |
89 | }; |
90 | |
91 | struct qede_stats_bb { |
92 | u64 rx_1519_to_1522_byte_packets; |
93 | u64 rx_1519_to_2047_byte_packets; |
94 | u64 rx_2048_to_4095_byte_packets; |
95 | u64 rx_4096_to_9216_byte_packets; |
96 | u64 rx_9217_to_16383_byte_packets; |
97 | u64 tx_1519_to_2047_byte_packets; |
98 | u64 tx_2048_to_4095_byte_packets; |
99 | u64 tx_4096_to_9216_byte_packets; |
100 | u64 tx_9217_to_16383_byte_packets; |
101 | u64 tx_lpi_entry_count; |
102 | u64 tx_total_collisions; |
103 | }; |
104 | |
105 | struct qede_stats_ah { |
106 | u64 rx_1519_to_max_byte_packets; |
107 | u64 tx_1519_to_max_byte_packets; |
108 | }; |
109 | |
110 | struct qede_stats { |
111 | struct qede_stats_common common; |
112 | |
113 | union { |
114 | struct qede_stats_bb bb; |
115 | struct qede_stats_ah ah; |
116 | }; |
117 | }; |
118 | |
119 | struct qede_vlan { |
120 | struct list_head list; |
121 | u16 vid; |
122 | bool configured; |
123 | }; |
124 | |
125 | struct qede_rdma_dev { |
126 | struct qedr_dev *qedr_dev; |
127 | struct list_head entry; |
128 | struct list_head rdma_event_list; |
129 | struct workqueue_struct *rdma_wq; |
130 | struct kref refcnt; |
131 | struct completion event_comp; |
132 | bool exp_recovery; |
133 | }; |
134 | |
135 | struct qede_ptp; |
136 | |
137 | #define QEDE_RFS_MAX_FLTR 256 |
138 | |
139 | enum qede_flags_bit { |
140 | QEDE_FLAGS_IS_VF = 0, |
141 | QEDE_FLAGS_LINK_REQUESTED, |
142 | QEDE_FLAGS_PTP_TX_IN_PRORGESS, |
143 | QEDE_FLAGS_TX_TIMESTAMPING_EN |
144 | }; |
145 | |
146 | #define QEDE_DUMP_MAX_ARGS 4 |
147 | enum qede_dump_cmd { |
148 | QEDE_DUMP_CMD_NONE = 0, |
149 | QEDE_DUMP_CMD_NVM_CFG, |
150 | QEDE_DUMP_CMD_GRCDUMP, |
151 | QEDE_DUMP_CMD_MAX |
152 | }; |
153 | |
154 | struct qede_dump_info { |
155 | enum qede_dump_cmd cmd; |
156 | u8 num_args; |
157 | u32 args[QEDE_DUMP_MAX_ARGS]; |
158 | }; |
159 | |
160 | struct qede_coalesce { |
161 | bool isvalid; |
162 | u16 rxc; |
163 | u16 txc; |
164 | }; |
165 | |
166 | struct qede_dev { |
167 | struct qed_dev *cdev; |
168 | struct net_device *ndev; |
169 | struct pci_dev *pdev; |
170 | struct devlink *devlink; |
171 | |
172 | u32 dp_module; |
173 | u8 dp_level; |
174 | |
175 | unsigned long flags; |
176 | #define IS_VF(edev) test_bit(QEDE_FLAGS_IS_VF, \ |
177 | &(edev)->flags) |
178 | |
179 | const struct qed_eth_ops *ops; |
180 | struct qede_ptp *ptp; |
181 | u64 ptp_skip_txts; |
182 | |
183 | struct qed_dev_eth_info dev_info; |
184 | #define (edev) ((edev)->dev_info.num_queues) |
185 | #define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) |
186 | #define QEDE_IS_BB(edev) \ |
187 | ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB) |
188 | #define QEDE_IS_AH(edev) \ |
189 | ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH) |
190 | |
191 | struct qede_fastpath *fp_array; |
192 | struct qede_coalesce *coal_entry; |
193 | u8 req_num_tx; |
194 | u8 fp_num_tx; |
195 | u8 req_num_rx; |
196 | u8 fp_num_rx; |
197 | u16 req_queues; |
198 | u16 num_queues; |
199 | u16 total_xdp_queues; |
200 | |
201 | #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) |
202 | #define (edev) ((edev)->num_queues - (edev)->fp_num_tx) |
203 | #define QEDE_RX_QUEUE_IDX(edev, i) (i) |
204 | #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) |
205 | |
206 | struct qed_int_info int_info; |
207 | |
208 | /* Smaller private variant of the RTNL lock */ |
209 | struct mutex qede_lock; |
210 | u32 state; /* Protected by qede_lock */ |
211 | u16 rx_buf_size; |
212 | u32 rx_copybreak; |
213 | |
214 | /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ |
215 | #define ETH_OVERHEAD (ETH_HLEN + 8 + 8) |
216 | /* Max supported alignment is 256 (8 shift) |
217 | * minimal alignment shift 6 is optimal for 57xxx HW performance |
218 | */ |
219 | #define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT)) |
220 | /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes |
221 | * at the end of skb->data, to avoid wasting a full cache line. |
222 | * This reduces memory use (skb->truesize). |
223 | */ |
224 | #define QEDE_FW_RX_ALIGN_END \ |
225 | max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \ |
226 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
227 | |
228 | struct qede_stats stats; |
229 | |
230 | /* Bitfield to track initialized RSS params */ |
231 | u32 ; |
232 | #define BIT(0) |
233 | #define BIT(1) |
234 | #define BIT(2) |
235 | |
236 | u16 [128]; |
237 | u32 [10]; |
238 | u8 ; |
239 | |
240 | /* Both must be a power of two */ |
241 | u16 q_num_rx_buffers; |
242 | u16 q_num_tx_buffers; |
243 | |
244 | bool gro_disable; |
245 | |
246 | struct list_head vlan_list; |
247 | u16 configured_vlans; |
248 | u16 non_configured_vlans; |
249 | bool accept_any_vlan; |
250 | |
251 | struct delayed_work sp_task; |
252 | unsigned long sp_flags; |
253 | u16 vxlan_dst_port; |
254 | u16 geneve_dst_port; |
255 | |
256 | struct qede_arfs *arfs; |
257 | bool wol_enabled; |
258 | |
259 | struct qede_rdma_dev rdma_info; |
260 | |
261 | struct bpf_prog *xdp_prog; |
262 | |
263 | enum qed_hw_err_type last_err_type; |
264 | unsigned long err_flags; |
265 | #define QEDE_ERR_IS_HANDLED 31 |
266 | #define QEDE_ERR_ATTN_CLR_EN 0 |
267 | #define QEDE_ERR_GET_DBG_INFO 1 |
268 | #define QEDE_ERR_IS_RECOVERABLE 2 |
269 | #define QEDE_ERR_WARN 3 |
270 | |
271 | struct qede_dump_info dump_info; |
272 | struct delayed_work periodic_task; |
273 | unsigned long stats_coal_ticks; |
274 | u32 stats_coal_usecs; |
275 | spinlock_t stats_lock; /* lock for vport stats access */ |
276 | }; |
277 | |
278 | enum QEDE_STATE { |
279 | QEDE_STATE_CLOSED, |
280 | QEDE_STATE_OPEN, |
281 | QEDE_STATE_RECOVERY, |
282 | }; |
283 | |
284 | #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) |
285 | |
286 | #define MAX_NUM_TC 8 |
287 | #define MAX_NUM_PRI 8 |
288 | |
289 | /* The driver supports the new build_skb() API: |
290 | * RX ring buffer contains pointer to kmalloc() data only, |
291 | * skb are built only after the frame was DMA-ed. |
292 | */ |
293 | struct sw_rx_data { |
294 | struct page *data; |
295 | dma_addr_t mapping; |
296 | unsigned int page_offset; |
297 | }; |
298 | |
299 | enum qede_agg_state { |
300 | QEDE_AGG_STATE_NONE = 0, |
301 | QEDE_AGG_STATE_START = 1, |
302 | QEDE_AGG_STATE_ERROR = 2 |
303 | }; |
304 | |
305 | struct qede_agg_info { |
306 | /* rx_buf is a data buffer that can be placed / consumed from rx bd |
307 | * chain. It has two purposes: We will preallocate the data buffer |
308 | * for each aggregation when we open the interface and will place this |
309 | * buffer on the rx-bd-ring when we receive TPA_START. We don't want |
310 | * to be in a state where allocation fails, as we can't reuse the |
311 | * consumer buffer in the rx-chain since FW may still be writing to it |
312 | * (since header needs to be modified for TPA). |
313 | * The second purpose is to keep a pointer to the bd buffer during |
314 | * aggregation. |
315 | */ |
316 | struct sw_rx_data buffer; |
317 | struct sk_buff *skb; |
318 | |
319 | /* We need some structs from the start cookie until termination */ |
320 | u16 vlan_tag; |
321 | |
322 | bool tpa_start_fail; |
323 | u8 state; |
324 | u8 frag_id; |
325 | |
326 | u8 tunnel_type; |
327 | }; |
328 | |
329 | struct qede_rx_queue { |
330 | __le16 *hw_cons_ptr; |
331 | void __iomem *hw_rxq_prod_addr; |
332 | |
333 | /* Required for the allocation of replacement buffers */ |
334 | struct device *dev; |
335 | |
336 | struct bpf_prog *xdp_prog; |
337 | |
338 | u16 sw_rx_cons; |
339 | u16 sw_rx_prod; |
340 | |
341 | u16 filled_buffers; |
342 | u8 data_direction; |
343 | u8 rxq_id; |
344 | |
345 | /* Used once per each NAPI run */ |
346 | u16 num_rx_buffers; |
347 | |
348 | u16 rx_headroom; |
349 | |
350 | u32 rx_buf_size; |
351 | u32 rx_buf_seg_size; |
352 | |
353 | struct sw_rx_data *sw_rx_ring; |
354 | struct qed_chain rx_bd_ring; |
355 | struct qed_chain rx_comp_ring ____cacheline_aligned; |
356 | |
357 | /* GRO */ |
358 | struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; |
359 | |
360 | /* Used once per each NAPI run */ |
361 | u64 rcv_pkts; |
362 | |
363 | u64 rx_hw_errors; |
364 | u64 rx_alloc_errors; |
365 | u64 rx_ip_frags; |
366 | |
367 | u64 xdp_no_pass; |
368 | |
369 | void *handle; |
370 | struct xdp_rxq_info xdp_rxq; |
371 | }; |
372 | |
373 | union db_prod { |
374 | struct eth_db_data data; |
375 | u32 raw; |
376 | }; |
377 | |
378 | struct sw_tx_bd { |
379 | struct sk_buff *skb; |
380 | u8 flags; |
381 | /* Set on the first BD descriptor when there is a split BD */ |
382 | #define QEDE_TSO_SPLIT_BD BIT(0) |
383 | }; |
384 | |
385 | struct sw_tx_xdp { |
386 | struct page *page; |
387 | struct xdp_frame *xdpf; |
388 | dma_addr_t mapping; |
389 | }; |
390 | |
391 | struct qede_tx_queue { |
392 | u8 is_xdp; |
393 | bool is_legacy; |
394 | u16 sw_tx_cons; |
395 | u16 sw_tx_prod; |
396 | u16 num_tx_buffers; /* Slowpath only */ |
397 | |
398 | u64 xmit_pkts; |
399 | u64 stopped_cnt; |
400 | u64 tx_mem_alloc_err; |
401 | |
402 | __le16 *hw_cons_ptr; |
403 | |
404 | /* Needed for the mapping of packets */ |
405 | struct device *dev; |
406 | |
407 | void __iomem *doorbell_addr; |
408 | union db_prod tx_db; |
409 | |
410 | /* Spinlock for XDP queues in case of XDP_REDIRECT */ |
411 | spinlock_t xdp_tx_lock; |
412 | |
413 | int index; /* Slowpath only */ |
414 | #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \ |
415 | QEDE_MAX_TSS_CNT(edev)) |
416 | #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev)) |
417 | #define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx) ((edev)->fp_num_rx + \ |
418 | ((idx) % QEDE_TSS_COUNT(edev))) |
419 | #define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx) ((idx) / QEDE_TSS_COUNT(edev)) |
420 | #define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \ |
421 | (txq)->cos) + (txq)->index) |
422 | #define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \ |
423 | (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \ |
424 | [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)])) |
425 | #define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0])) |
426 | |
427 | /* Regular Tx requires skb + metadata for release purpose, |
428 | * while XDP requires the pages and the mapped address. |
429 | */ |
430 | union { |
431 | struct sw_tx_bd *skbs; |
432 | struct sw_tx_xdp *xdp; |
433 | } sw_tx_ring; |
434 | |
435 | struct qed_chain tx_pbl; |
436 | |
437 | /* Slowpath; Should be kept in end [unless missing padding] */ |
438 | void *handle; |
439 | u16 cos; |
440 | u16 ndev_txq_id; |
441 | }; |
442 | |
443 | #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ |
444 | le32_to_cpu((bd)->addr.lo)) |
445 | #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \ |
446 | do { \ |
447 | (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \ |
448 | (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \ |
449 | (bd)->nbytes = cpu_to_le16(len); \ |
450 | } while (0) |
451 | #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) |
452 | |
453 | struct qede_fastpath { |
454 | struct qede_dev *edev; |
455 | |
456 | u8 type; |
457 | #define QEDE_FASTPATH_TX BIT(0) |
458 | #define QEDE_FASTPATH_RX BIT(1) |
459 | #define QEDE_FASTPATH_XDP BIT(2) |
460 | #define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) |
461 | |
462 | u8 id; |
463 | |
464 | u8 xdp_xmit; |
465 | #define QEDE_XDP_TX BIT(0) |
466 | #define QEDE_XDP_REDIRECT BIT(1) |
467 | |
468 | struct napi_struct napi; |
469 | struct qed_sb_info *sb_info; |
470 | struct qede_rx_queue *rxq; |
471 | struct qede_tx_queue *txq; |
472 | struct qede_tx_queue *xdp_tx; |
473 | |
474 | char name[IFNAMSIZ + 8]; |
475 | }; |
476 | |
477 | /* Debug print definitions */ |
478 | #define DP_NAME(edev) netdev_name((edev)->ndev) |
479 | |
480 | #define XMIT_PLAIN 0 |
481 | #define XMIT_L4_CSUM BIT(0) |
482 | #define XMIT_LSO BIT(1) |
483 | #define XMIT_ENC BIT(2) |
484 | #define XMIT_ENC_GSO_L4_CSUM BIT(3) |
485 | |
486 | #define QEDE_CSUM_ERROR BIT(0) |
487 | #define QEDE_CSUM_UNNECESSARY BIT(1) |
488 | #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) |
489 | |
490 | #define QEDE_SP_RECOVERY 0 |
491 | #define QEDE_SP_RX_MODE 1 |
492 | #define QEDE_SP_RSVD1 2 |
493 | #define QEDE_SP_RSVD2 3 |
494 | #define QEDE_SP_HW_ERR 4 |
495 | #define QEDE_SP_ARFS_CONFIG 5 |
496 | #define QEDE_SP_AER 7 |
497 | #define QEDE_SP_DISABLE 8 |
498 | |
499 | #ifdef CONFIG_RFS_ACCEL |
500 | int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, |
501 | u16 rxq_index, u32 flow_id); |
502 | #define QEDE_SP_TASK_POLL_DELAY (5 * HZ) |
503 | #endif |
504 | |
505 | void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr); |
506 | void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev); |
507 | void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc); |
508 | void qede_free_arfs(struct qede_dev *edev); |
509 | int qede_alloc_arfs(struct qede_dev *edev); |
510 | int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); |
511 | int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie); |
512 | int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd); |
513 | int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, |
514 | u32 *rule_locs); |
515 | int qede_get_arfs_filter_count(struct qede_dev *edev); |
516 | |
517 | struct qede_reload_args { |
518 | void (*func)(struct qede_dev *edev, struct qede_reload_args *args); |
519 | union { |
520 | netdev_features_t features; |
521 | struct bpf_prog *new_prog; |
522 | u16 mtu; |
523 | } u; |
524 | }; |
525 | |
526 | /* Datapath functions definition */ |
527 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); |
528 | int qede_xdp_transmit(struct net_device *dev, int n_frames, |
529 | struct xdp_frame **frames, u32 flags); |
530 | u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, |
531 | struct net_device *sb_dev); |
532 | netdev_features_t qede_features_check(struct sk_buff *skb, |
533 | struct net_device *dev, |
534 | netdev_features_t features); |
535 | int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy); |
536 | int qede_free_tx_pkt(struct qede_dev *edev, |
537 | struct qede_tx_queue *txq, int *len); |
538 | int qede_poll(struct napi_struct *napi, int budget); |
539 | irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie); |
540 | |
541 | /* Filtering function definitions */ |
542 | void qede_force_mac(void *dev, u8 *mac, bool forced); |
543 | void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port); |
544 | int qede_set_mac_addr(struct net_device *ndev, void *p); |
545 | |
546 | int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid); |
547 | int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid); |
548 | void qede_vlan_mark_nonconfigured(struct qede_dev *edev); |
549 | int qede_configure_vlan_filters(struct qede_dev *edev); |
550 | |
551 | netdev_features_t qede_fix_features(struct net_device *dev, |
552 | netdev_features_t features); |
553 | int qede_set_features(struct net_device *dev, netdev_features_t features); |
554 | void qede_set_rx_mode(struct net_device *ndev); |
555 | void qede_config_rx_mode(struct net_device *ndev); |
556 | void (struct qede_dev *edev, |
557 | struct qed_update_vport_rss_params *, u8 *update); |
558 | |
559 | int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp); |
560 | |
561 | #ifdef CONFIG_DCB |
562 | void qede_set_dcbnl_ops(struct net_device *ndev); |
563 | #endif |
564 | |
565 | void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); |
566 | void qede_set_ethtool_ops(struct net_device *netdev); |
567 | void qede_set_udp_tunnels(struct qede_dev *edev); |
568 | void qede_reload(struct qede_dev *edev, |
569 | struct qede_reload_args *args, bool is_locked); |
570 | int qede_change_mtu(struct net_device *dev, int new_mtu); |
571 | void qede_fill_by_demand_stats(struct qede_dev *edev); |
572 | void __qede_lock(struct qede_dev *edev); |
573 | void __qede_unlock(struct qede_dev *edev); |
574 | bool qede_has_rx_work(struct qede_rx_queue *rxq); |
575 | int qede_txq_has_work(struct qede_tx_queue *txq); |
576 | void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count); |
577 | void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); |
578 | int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, |
579 | struct flow_cls_offload *f); |
580 | |
581 | void qede_forced_speed_maps_init(void); |
582 | int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal, |
583 | struct kernel_ethtool_coalesce *kernel_coal, |
584 | struct netlink_ext_ack *extack); |
585 | int qede_set_per_coalesce(struct net_device *dev, u32 queue, |
586 | struct ethtool_coalesce *coal); |
587 | |
588 | #define RX_RING_SIZE_POW 13 |
589 | #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) |
590 | #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) |
591 | #define NUM_RX_BDS_MIN 128 |
592 | #define NUM_RX_BDS_KDUMP_MIN 63 |
593 | #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1) |
594 | |
595 | #define TX_RING_SIZE_POW 13 |
596 | #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) |
597 | #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) |
598 | #define NUM_TX_BDS_MIN 128 |
599 | #define NUM_TX_BDS_KDUMP_MIN 63 |
600 | #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX |
601 | |
602 | #define QEDE_MIN_PKT_LEN 64 |
603 | #define QEDE_RX_HDR_SIZE 256 |
604 | #define QEDE_MAX_JUMBO_PACKET_SIZE 9600 |
605 | #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++) |
606 | #define for_each_cos_in_txq(edev, var) \ |
607 | for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++) |
608 | |
609 | #endif /* _QEDE_H_ */ |
610 | |