1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
4 | */ |
5 | /* |
6 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
7 | * Copyright (c) 2014-2015 QLogic Corporation |
8 | * All rights reserved |
9 | * www.qlogic.com |
10 | */ |
11 | #ifndef __BNA_TYPES_H__ |
12 | #define __BNA_TYPES_H__ |
13 | |
14 | #include "cna.h" |
15 | #include "bna_hw_defs.h" |
16 | #include "bfa_cee.h" |
17 | #include "bfa_msgq.h" |
18 | |
19 | /* Forward declarations */ |
20 | |
21 | struct bna_mcam_handle; |
22 | struct bna_txq; |
23 | struct bna_tx; |
24 | struct bna_rxq; |
25 | struct bna_cq; |
26 | struct bna_rx; |
27 | struct bna_rxf; |
28 | struct bna_enet; |
29 | struct bna; |
30 | struct bnad; |
31 | |
32 | /* Enums, primitive data types */ |
33 | |
34 | enum bna_status { |
35 | BNA_STATUS_T_DISABLED = 0, |
36 | BNA_STATUS_T_ENABLED = 1 |
37 | }; |
38 | |
39 | enum bna_cleanup_type { |
40 | BNA_HARD_CLEANUP = 0, |
41 | BNA_SOFT_CLEANUP = 1 |
42 | }; |
43 | |
44 | enum bna_cb_status { |
45 | BNA_CB_SUCCESS = 0, |
46 | BNA_CB_FAIL = 1, |
47 | BNA_CB_INTERRUPT = 2, |
48 | BNA_CB_BUSY = 3, |
49 | BNA_CB_INVALID_MAC = 4, |
50 | BNA_CB_MCAST_LIST_FULL = 5, |
51 | BNA_CB_UCAST_CAM_FULL = 6, |
52 | BNA_CB_WAITING = 7, |
53 | BNA_CB_NOT_EXEC = 8 |
54 | }; |
55 | |
56 | enum bna_res_type { |
57 | BNA_RES_T_MEM = 1, |
58 | BNA_RES_T_INTR = 2 |
59 | }; |
60 | |
61 | enum bna_mem_type { |
62 | BNA_MEM_T_KVA = 1, |
63 | BNA_MEM_T_DMA = 2 |
64 | }; |
65 | |
66 | enum bna_intr_type { |
67 | BNA_INTR_T_INTX = 1, |
68 | BNA_INTR_T_MSIX = 2 |
69 | }; |
70 | |
71 | enum bna_res_req_type { |
72 | BNA_RES_MEM_T_COM = 0, |
73 | BNA_RES_MEM_T_ATTR = 1, |
74 | BNA_RES_MEM_T_FWTRC = 2, |
75 | BNA_RES_MEM_T_STATS = 3, |
76 | BNA_RES_T_MAX |
77 | }; |
78 | |
79 | enum bna_mod_res_req_type { |
80 | BNA_MOD_RES_MEM_T_TX_ARRAY = 0, |
81 | BNA_MOD_RES_MEM_T_TXQ_ARRAY = 1, |
82 | BNA_MOD_RES_MEM_T_RX_ARRAY = 2, |
83 | BNA_MOD_RES_MEM_T_RXP_ARRAY = 3, |
84 | BNA_MOD_RES_MEM_T_RXQ_ARRAY = 4, |
85 | BNA_MOD_RES_MEM_T_UCMAC_ARRAY = 5, |
86 | BNA_MOD_RES_MEM_T_MCMAC_ARRAY = 6, |
87 | BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY = 7, |
88 | BNA_MOD_RES_T_MAX |
89 | }; |
90 | |
91 | enum bna_tx_res_req_type { |
92 | BNA_TX_RES_MEM_T_TCB = 0, |
93 | BNA_TX_RES_MEM_T_UNMAPQ = 1, |
94 | BNA_TX_RES_MEM_T_QPT = 2, |
95 | BNA_TX_RES_MEM_T_SWQPT = 3, |
96 | BNA_TX_RES_MEM_T_PAGE = 4, |
97 | BNA_TX_RES_MEM_T_IBIDX = 5, |
98 | BNA_TX_RES_INTR_T_TXCMPL = 6, |
99 | BNA_TX_RES_T_MAX, |
100 | }; |
101 | |
102 | enum bna_rx_mem_type { |
103 | BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */ |
104 | BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */ |
105 | BNA_RX_RES_MEM_T_UNMAPHQ = 2, |
106 | BNA_RX_RES_MEM_T_UNMAPDQ = 3, |
107 | BNA_RX_RES_MEM_T_CQPT = 4, |
108 | BNA_RX_RES_MEM_T_CSWQPT = 5, |
109 | BNA_RX_RES_MEM_T_CQPT_PAGE = 6, |
110 | BNA_RX_RES_MEM_T_HQPT = 7, |
111 | BNA_RX_RES_MEM_T_DQPT = 8, |
112 | BNA_RX_RES_MEM_T_HSWQPT = 9, |
113 | BNA_RX_RES_MEM_T_DSWQPT = 10, |
114 | BNA_RX_RES_MEM_T_DPAGE = 11, |
115 | BNA_RX_RES_MEM_T_HPAGE = 12, |
116 | BNA_RX_RES_MEM_T_IBIDX = 13, |
117 | BNA_RX_RES_MEM_T_RIT = 14, |
118 | BNA_RX_RES_T_INTR = 15, |
119 | BNA_RX_RES_T_MAX = 16 |
120 | }; |
121 | |
122 | enum bna_tx_type { |
123 | BNA_TX_T_REGULAR = 0, |
124 | BNA_TX_T_LOOPBACK = 1, |
125 | }; |
126 | |
127 | enum bna_tx_flags { |
128 | BNA_TX_F_ENET_STARTED = 1, |
129 | BNA_TX_F_ENABLED = 2, |
130 | BNA_TX_F_BW_UPDATED = 8, |
131 | }; |
132 | |
133 | enum bna_tx_mod_flags { |
134 | BNA_TX_MOD_F_ENET_STARTED = 1, |
135 | BNA_TX_MOD_F_ENET_LOOPBACK = 2, |
136 | }; |
137 | |
138 | enum bna_rx_type { |
139 | BNA_RX_T_REGULAR = 0, |
140 | BNA_RX_T_LOOPBACK = 1, |
141 | }; |
142 | |
143 | enum bna_rxp_type { |
144 | BNA_RXP_SINGLE = 1, |
145 | BNA_RXP_SLR = 2, |
146 | BNA_RXP_HDS = 3 |
147 | }; |
148 | |
149 | enum bna_rxmode { |
150 | BNA_RXMODE_PROMISC = 1, |
151 | BNA_RXMODE_DEFAULT = 2, |
152 | BNA_RXMODE_ALLMULTI = 4 |
153 | }; |
154 | |
155 | enum bna_rx_event { |
156 | RX_E_START = 1, |
157 | RX_E_STOP = 2, |
158 | RX_E_FAIL = 3, |
159 | RX_E_STARTED = 4, |
160 | RX_E_STOPPED = 5, |
161 | RX_E_RXF_STARTED = 6, |
162 | RX_E_RXF_STOPPED = 7, |
163 | RX_E_CLEANUP_DONE = 8, |
164 | }; |
165 | |
166 | enum bna_rx_flags { |
167 | BNA_RX_F_ENET_STARTED = 1, |
168 | BNA_RX_F_ENABLED = 2, |
169 | }; |
170 | |
171 | enum bna_rx_mod_flags { |
172 | BNA_RX_MOD_F_ENET_STARTED = 1, |
173 | BNA_RX_MOD_F_ENET_LOOPBACK = 2, |
174 | }; |
175 | |
176 | enum bna_rxf_event { |
177 | RXF_E_START = 1, |
178 | RXF_E_STOP = 2, |
179 | RXF_E_FAIL = 3, |
180 | RXF_E_CONFIG = 4, |
181 | RXF_E_FW_RESP = 7, |
182 | }; |
183 | |
184 | enum bna_enet_type { |
185 | BNA_ENET_T_REGULAR = 0, |
186 | BNA_ENET_T_LOOPBACK_INTERNAL = 1, |
187 | BNA_ENET_T_LOOPBACK_EXTERNAL = 2, |
188 | }; |
189 | |
190 | enum bna_link_status { |
191 | BNA_LINK_DOWN = 0, |
192 | BNA_LINK_UP = 1, |
193 | BNA_CEE_UP = 2 |
194 | }; |
195 | |
196 | enum bna_ethport_flags { |
197 | BNA_ETHPORT_F_ADMIN_UP = 1, |
198 | BNA_ETHPORT_F_PORT_ENABLED = 2, |
199 | BNA_ETHPORT_F_RX_STARTED = 4, |
200 | }; |
201 | |
202 | enum bna_enet_flags { |
203 | BNA_ENET_F_IOCETH_READY = 1, |
204 | BNA_ENET_F_ENABLED = 2, |
205 | BNA_ENET_F_PAUSE_CHANGED = 4, |
206 | BNA_ENET_F_MTU_CHANGED = 8 |
207 | }; |
208 | |
209 | enum { |
210 | = 1, |
211 | = 2, |
212 | = 4, |
213 | }; |
214 | |
215 | enum bna_mod_flags { |
216 | BNA_MOD_F_INIT_DONE = 1, |
217 | }; |
218 | |
219 | enum bna_pkt_rates { |
220 | BNA_PKT_RATE_10K = 10000, |
221 | BNA_PKT_RATE_20K = 20000, |
222 | BNA_PKT_RATE_30K = 30000, |
223 | BNA_PKT_RATE_40K = 40000, |
224 | BNA_PKT_RATE_50K = 50000, |
225 | BNA_PKT_RATE_60K = 60000, |
226 | BNA_PKT_RATE_70K = 70000, |
227 | BNA_PKT_RATE_80K = 80000, |
228 | }; |
229 | |
230 | enum bna_dim_load_types { |
231 | BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */ |
232 | BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */ |
233 | BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */ |
234 | BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */ |
235 | BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */ |
236 | BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */ |
237 | BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */ |
238 | BNA_LOAD_T_LOW_4 = 7, /* r < 10K */ |
239 | BNA_LOAD_T_MAX = 8 |
240 | }; |
241 | |
242 | enum bna_dim_bias_types { |
243 | BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */ |
244 | BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */ |
245 | BNA_BIAS_T_MAX = 2 |
246 | }; |
247 | |
248 | #define BNA_MAX_NAME_SIZE 64 |
249 | struct bna_ident { |
250 | int id; |
251 | char name[BNA_MAX_NAME_SIZE]; |
252 | }; |
253 | |
254 | struct bna_mac { |
255 | /* This should be the first one */ |
256 | struct list_head qe; |
257 | u8 addr[ETH_ALEN]; |
258 | struct bna_mcam_handle *handle; |
259 | }; |
260 | |
261 | struct bna_mem_descr { |
262 | u32 len; |
263 | void *kva; |
264 | struct bna_dma_addr dma; |
265 | }; |
266 | |
267 | struct bna_mem_info { |
268 | enum bna_mem_type mem_type; |
269 | u32 len; |
270 | u32 num; |
271 | u32 align_sz; /* 0/1 = no alignment */ |
272 | struct bna_mem_descr *mdl; |
273 | void *cookie; /* For bnad to unmap dma later */ |
274 | }; |
275 | |
276 | struct bna_intr_descr { |
277 | int vector; |
278 | }; |
279 | |
280 | struct bna_intr_info { |
281 | enum bna_intr_type intr_type; |
282 | int num; |
283 | struct bna_intr_descr *idl; |
284 | }; |
285 | |
286 | union bna_res_u { |
287 | struct bna_mem_info mem_info; |
288 | struct bna_intr_info intr_info; |
289 | }; |
290 | |
291 | struct bna_res_info { |
292 | enum bna_res_type res_type; |
293 | union bna_res_u res_u; |
294 | }; |
295 | |
296 | /* HW QPT */ |
297 | struct bna_qpt { |
298 | struct bna_dma_addr hw_qpt_ptr; |
299 | void *kv_qpt_ptr; |
300 | u32 page_count; |
301 | u32 page_size; |
302 | }; |
303 | |
304 | struct bna_attr { |
305 | bool fw_query_complete; |
306 | int num_txq; |
307 | int num_rxp; |
308 | int num_ucmac; |
309 | int num_mcmac; |
310 | int max_rit_size; |
311 | }; |
312 | |
313 | /* IOCEth */ |
314 | |
315 | enum bna_ioceth_event; |
316 | |
317 | struct bna_ioceth { |
318 | void (*fsm)(struct bna_ioceth *s, enum bna_ioceth_event e); |
319 | struct bfa_ioc ioc; |
320 | |
321 | struct bna_attr attr; |
322 | struct bfa_msgq_cmd_entry msgq_cmd; |
323 | struct bfi_enet_attr_req attr_req; |
324 | |
325 | void (*stop_cbfn)(struct bnad *bnad); |
326 | struct bnad *stop_cbarg; |
327 | |
328 | struct bna *bna; |
329 | }; |
330 | |
331 | /* Enet */ |
332 | |
333 | /* Pause configuration */ |
334 | struct bna_pause_config { |
335 | enum bna_status tx_pause; |
336 | enum bna_status rx_pause; |
337 | }; |
338 | |
339 | enum bna_enet_event; |
340 | |
341 | struct bna_enet { |
342 | void (*fsm)(struct bna_enet *s, enum bna_enet_event e); |
343 | enum bna_enet_flags flags; |
344 | |
345 | enum bna_enet_type type; |
346 | |
347 | struct bna_pause_config pause_config; |
348 | int mtu; |
349 | |
350 | /* Callback for bna_enet_disable(), enet_stop() */ |
351 | void (*stop_cbfn)(void *); |
352 | void *stop_cbarg; |
353 | |
354 | /* Callback for bna_enet_mtu_set() */ |
355 | void (*mtu_cbfn)(struct bnad *); |
356 | |
357 | struct bfa_wc chld_stop_wc; |
358 | |
359 | struct bfa_msgq_cmd_entry msgq_cmd; |
360 | struct bfi_enet_set_pause_req pause_req; |
361 | |
362 | struct bna *bna; |
363 | }; |
364 | |
365 | /* Ethport */ |
366 | |
367 | enum bna_ethport_event; |
368 | |
369 | struct bna_ethport { |
370 | void (*fsm)(struct bna_ethport *s, enum bna_ethport_event e); |
371 | enum bna_ethport_flags flags; |
372 | |
373 | enum bna_link_status link_status; |
374 | |
375 | int rx_started_count; |
376 | |
377 | void (*stop_cbfn)(struct bna_enet *); |
378 | |
379 | void (*adminup_cbfn)(struct bnad *, enum bna_cb_status); |
380 | |
381 | void (*link_cbfn)(struct bnad *, enum bna_link_status); |
382 | |
383 | struct bfa_msgq_cmd_entry msgq_cmd; |
384 | union { |
385 | struct bfi_enet_enable_req admin_req; |
386 | struct bfi_enet_diag_lb_req lpbk_req; |
387 | } bfi_enet_cmd; |
388 | |
389 | struct bna *bna; |
390 | }; |
391 | |
392 | /* Interrupt Block */ |
393 | |
394 | /* Doorbell structure */ |
395 | struct bna_ib_dbell { |
396 | void __iomem *doorbell_addr; |
397 | u32 doorbell_ack; |
398 | }; |
399 | |
400 | /* IB structure */ |
401 | struct bna_ib { |
402 | struct bna_dma_addr ib_seg_host_addr; |
403 | void *ib_seg_host_addr_kva; |
404 | |
405 | struct bna_ib_dbell door_bell; |
406 | |
407 | enum bna_intr_type intr_type; |
408 | int intr_vector; |
409 | |
410 | u8 coalescing_timeo; /* Unit is 5usec. */ |
411 | |
412 | int interpkt_count; |
413 | int interpkt_timeo; |
414 | }; |
415 | |
416 | /* Tx object */ |
417 | |
418 | /* Tx datapath control structure */ |
419 | #define BNA_Q_NAME_SIZE 16 |
420 | struct bna_tcb { |
421 | /* Fast path */ |
422 | void **sw_qpt; |
423 | void *sw_q; |
424 | void *unmap_q; |
425 | u32 producer_index; |
426 | u32 consumer_index; |
427 | volatile u32 *hw_consumer_index; |
428 | u32 q_depth; |
429 | void __iomem *q_dbell; |
430 | struct bna_ib_dbell *i_dbell; |
431 | /* Control path */ |
432 | struct bna_txq *txq; |
433 | struct bnad *bnad; |
434 | void *priv; /* BNAD's cookie */ |
435 | enum bna_intr_type intr_type; |
436 | int intr_vector; |
437 | u8 priority; /* Current priority */ |
438 | unsigned long flags; /* Used by bnad as required */ |
439 | int id; |
440 | char name[BNA_Q_NAME_SIZE]; |
441 | }; |
442 | |
443 | /* TxQ QPT and configuration */ |
444 | struct bna_txq { |
445 | /* This should be the first one */ |
446 | struct list_head qe; |
447 | |
448 | u8 priority; |
449 | |
450 | struct bna_qpt qpt; |
451 | struct bna_tcb *tcb; |
452 | struct bna_ib ib; |
453 | |
454 | struct bna_tx *tx; |
455 | |
456 | int hw_id; |
457 | |
458 | u64 tx_packets; |
459 | u64 tx_bytes; |
460 | }; |
461 | |
462 | /* Tx object */ |
463 | |
464 | enum bna_tx_event; |
465 | |
466 | struct bna_tx { |
467 | /* This should be the first one */ |
468 | struct list_head qe; |
469 | int rid; |
470 | int hw_id; |
471 | |
472 | void (*fsm)(struct bna_tx *s, enum bna_tx_event e); |
473 | enum bna_tx_flags flags; |
474 | |
475 | enum bna_tx_type type; |
476 | int num_txq; |
477 | |
478 | struct list_head txq_q; |
479 | u16 txf_vlan_id; |
480 | |
481 | /* Tx event handlers */ |
482 | void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); |
483 | void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); |
484 | void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *); |
485 | void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *); |
486 | void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *); |
487 | |
488 | /* callback for bna_tx_disable(), bna_tx_stop() */ |
489 | void (*stop_cbfn)(void *arg, struct bna_tx *tx); |
490 | void *stop_cbarg; |
491 | |
492 | struct bfa_msgq_cmd_entry msgq_cmd; |
493 | union { |
494 | struct bfi_enet_tx_cfg_req cfg_req; |
495 | struct bfi_enet_req req; |
496 | struct bfi_enet_tx_cfg_rsp cfg_rsp; |
497 | } bfi_enet_cmd; |
498 | |
499 | struct bna *bna; |
500 | void *priv; /* bnad's cookie */ |
501 | }; |
502 | |
503 | /* Tx object configuration used during creation */ |
504 | struct bna_tx_config { |
505 | int num_txq; |
506 | int txq_depth; |
507 | int coalescing_timeo; |
508 | enum bna_tx_type tx_type; |
509 | }; |
510 | |
511 | struct bna_tx_event_cbfn { |
512 | /* Optional */ |
513 | void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); |
514 | void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); |
515 | /* Mandatory */ |
516 | void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *); |
517 | void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *); |
518 | void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *); |
519 | }; |
520 | |
521 | /* Tx module - keeps track of free, active tx objects */ |
522 | struct bna_tx_mod { |
523 | struct bna_tx *tx; /* BFI_MAX_TXQ entries */ |
524 | struct bna_txq *txq; /* BFI_MAX_TXQ entries */ |
525 | |
526 | struct list_head tx_free_q; |
527 | struct list_head tx_active_q; |
528 | |
529 | struct list_head txq_free_q; |
530 | |
531 | /* callback for bna_tx_mod_stop() */ |
532 | void (*stop_cbfn)(struct bna_enet *enet); |
533 | |
534 | struct bfa_wc tx_stop_wc; |
535 | |
536 | enum bna_tx_mod_flags flags; |
537 | |
538 | u8 prio_map; |
539 | int default_prio; |
540 | int iscsi_over_cee; |
541 | int iscsi_prio; |
542 | int prio_reconfigured; |
543 | |
544 | u32 rid_mask; |
545 | |
546 | struct bna *bna; |
547 | }; |
548 | |
549 | /* Rx object */ |
550 | |
551 | /* Rx datapath control structure */ |
552 | struct bna_rcb { |
553 | /* Fast path */ |
554 | void **sw_qpt; |
555 | void *sw_q; |
556 | void *unmap_q; |
557 | u32 producer_index; |
558 | u32 consumer_index; |
559 | u32 q_depth; |
560 | void __iomem *q_dbell; |
561 | /* Control path */ |
562 | struct bna_rxq *rxq; |
563 | struct bna_ccb *ccb; |
564 | struct bnad *bnad; |
565 | void *priv; /* BNAD's cookie */ |
566 | unsigned long flags; |
567 | int id; |
568 | }; |
569 | |
570 | /* RxQ structure - QPT, configuration */ |
571 | struct bna_rxq { |
572 | struct list_head qe; |
573 | |
574 | int buffer_size; |
575 | int q_depth; |
576 | u32 num_vecs; |
577 | enum bna_status multi_buffer; |
578 | |
579 | struct bna_qpt qpt; |
580 | struct bna_rcb *rcb; |
581 | |
582 | struct bna_rxp *rxp; |
583 | struct bna_rx *rx; |
584 | |
585 | int hw_id; |
586 | |
587 | u64 rx_packets; |
588 | u64 rx_bytes; |
589 | u64 rx_packets_with_error; |
590 | u64 rxbuf_alloc_failed; |
591 | u64 rxbuf_map_failed; |
592 | }; |
593 | |
594 | /* RxQ pair */ |
595 | union bna_rxq_u { |
596 | struct { |
597 | struct bna_rxq *hdr; |
598 | struct bna_rxq *data; |
599 | } hds; |
600 | struct { |
601 | struct bna_rxq *small; |
602 | struct bna_rxq *large; |
603 | } slr; |
604 | struct { |
605 | struct bna_rxq *only; |
606 | struct bna_rxq *reserved; |
607 | } single; |
608 | }; |
609 | |
610 | /* Packet rate for Dynamic Interrupt Moderation */ |
611 | struct bna_pkt_rate { |
612 | u32 small_pkt_cnt; |
613 | u32 large_pkt_cnt; |
614 | }; |
615 | |
616 | /* Completion control structure */ |
617 | struct bna_ccb { |
618 | /* Fast path */ |
619 | void **sw_qpt; |
620 | void *sw_q; |
621 | u32 producer_index; |
622 | volatile u32 *hw_producer_index; |
623 | u32 q_depth; |
624 | struct bna_ib_dbell *i_dbell; |
625 | struct bna_rcb *rcb[2]; |
626 | void *ctrl; /* For bnad */ |
627 | struct bna_pkt_rate pkt_rate; |
628 | u32 pkts_una; |
629 | u32 bytes_per_intr; |
630 | |
631 | /* Control path */ |
632 | struct bna_cq *cq; |
633 | struct bnad *bnad; |
634 | void *priv; /* BNAD's cookie */ |
635 | enum bna_intr_type intr_type; |
636 | int intr_vector; |
637 | u8 rx_coalescing_timeo; /* For NAPI */ |
638 | int id; |
639 | char name[BNA_Q_NAME_SIZE]; |
640 | }; |
641 | |
642 | /* CQ QPT, configuration */ |
643 | struct bna_cq { |
644 | struct bna_qpt qpt; |
645 | struct bna_ccb *ccb; |
646 | |
647 | struct bna_ib ib; |
648 | |
649 | struct bna_rx *rx; |
650 | }; |
651 | |
652 | struct { |
653 | enum bfi_enet_rss_type ; |
654 | u8 ; |
655 | u32 [BFI_ENET_RSS_KEY_LEN]; |
656 | }; |
657 | |
658 | struct bna_hds_config { |
659 | enum bfi_enet_hds_type hdr_type; |
660 | int forced_offset; |
661 | }; |
662 | |
663 | /* Rx object configuration used during creation */ |
664 | struct bna_rx_config { |
665 | enum bna_rx_type rx_type; |
666 | int num_paths; |
667 | enum bna_rxp_type rxp_type; |
668 | int coalescing_timeo; |
669 | /* |
670 | * Small/Large (or Header/Data) buffer size to be configured |
671 | * for SLR and HDS queue type. |
672 | */ |
673 | u32 frame_size; |
674 | |
675 | /* header or small queue */ |
676 | u32 q1_depth; |
677 | u32 q1_buf_size; |
678 | |
679 | /* data or large queue */ |
680 | u32 q0_depth; |
681 | u32 q0_buf_size; |
682 | u32 q0_num_vecs; |
683 | enum bna_status q0_multi_buf; |
684 | |
685 | enum bna_status ; |
686 | struct bna_rss_config ; |
687 | |
688 | struct bna_hds_config hds_config; |
689 | |
690 | enum bna_status vlan_strip_status; |
691 | }; |
692 | |
693 | /* Rx Path structure - one per MSIX vector/CPU */ |
694 | struct bna_rxp { |
695 | /* This should be the first one */ |
696 | struct list_head qe; |
697 | |
698 | enum bna_rxp_type type; |
699 | union bna_rxq_u rxq; |
700 | struct bna_cq cq; |
701 | |
702 | struct bna_rx *rx; |
703 | |
704 | /* MSI-x vector number for configuring RSS */ |
705 | int vector; |
706 | int hw_id; |
707 | }; |
708 | |
709 | /* RxF structure (hardware Rx Function) */ |
710 | |
711 | enum bna_rxf_event; |
712 | |
713 | struct bna_rxf { |
714 | void (*fsm)(struct bna_rxf *s, enum bna_rxf_event e); |
715 | |
716 | struct bfa_msgq_cmd_entry msgq_cmd; |
717 | union { |
718 | struct bfi_enet_enable_req req; |
719 | struct bfi_enet_rss_cfg_req ; |
720 | struct bfi_enet_rit_req rit_req; |
721 | struct bfi_enet_rx_vlan_req vlan_req; |
722 | struct bfi_enet_mcast_add_req mcast_add_req; |
723 | struct bfi_enet_mcast_del_req mcast_del_req; |
724 | struct bfi_enet_ucast_req ucast_req; |
725 | } bfi_enet_cmd; |
726 | |
727 | /* callback for bna_rxf_start() */ |
728 | void (*start_cbfn) (struct bna_rx *rx); |
729 | struct bna_rx *start_cbarg; |
730 | |
731 | /* callback for bna_rxf_stop() */ |
732 | void (*stop_cbfn) (struct bna_rx *rx); |
733 | struct bna_rx *stop_cbarg; |
734 | |
735 | /** |
736 | * callback for: |
737 | * bna_rxf_ucast_set() |
738 | * bna_rxf_{ucast/mcast}_add(), |
739 | * bna_rxf_{ucast/mcast}_del(), |
740 | * bna_rxf_mode_set() |
741 | */ |
742 | void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx); |
743 | struct bnad *cam_fltr_cbarg; |
744 | |
745 | /* List of unicast addresses yet to be applied to h/w */ |
746 | struct list_head ucast_pending_add_q; |
747 | struct list_head ucast_pending_del_q; |
748 | struct bna_mac *ucast_pending_mac; |
749 | int ucast_pending_set; |
750 | /* ucast addresses applied to the h/w */ |
751 | struct list_head ucast_active_q; |
752 | struct bna_mac ucast_active_mac; |
753 | int ucast_active_set; |
754 | |
755 | /* List of multicast addresses yet to be applied to h/w */ |
756 | struct list_head mcast_pending_add_q; |
757 | struct list_head mcast_pending_del_q; |
758 | /* multicast addresses applied to the h/w */ |
759 | struct list_head mcast_active_q; |
760 | struct list_head mcast_handle_q; |
761 | |
762 | /* Rx modes yet to be applied to h/w */ |
763 | enum bna_rxmode rxmode_pending; |
764 | enum bna_rxmode rxmode_pending_bitmask; |
765 | /* Rx modes applied to h/w */ |
766 | enum bna_rxmode rxmode_active; |
767 | |
768 | u8 vlan_pending_bitmask; |
769 | enum bna_status vlan_filter_status; |
770 | u32 vlan_filter_table[(BFI_ENET_VLAN_ID_MAX) / 32]; |
771 | bool vlan_strip_pending; |
772 | enum bna_status vlan_strip_status; |
773 | |
774 | enum bna_rss_flags ; |
775 | enum bna_status ; |
776 | struct bna_rss_config ; |
777 | u8 *rit; |
778 | int rit_size; |
779 | |
780 | struct bna_rx *rx; |
781 | }; |
782 | |
783 | /* Rx object */ |
784 | |
785 | enum bna_rx_event; |
786 | |
787 | struct bna_rx { |
788 | /* This should be the first one */ |
789 | struct list_head qe; |
790 | int rid; |
791 | int hw_id; |
792 | |
793 | void (*fsm)(struct bna_rx *s, enum bna_rx_event e); |
794 | |
795 | enum bna_rx_type type; |
796 | |
797 | int num_paths; |
798 | struct list_head rxp_q; |
799 | |
800 | struct bna_hds_config hds_cfg; |
801 | |
802 | struct bna_rxf rxf; |
803 | |
804 | enum bna_rx_flags rx_flags; |
805 | |
806 | struct bfa_msgq_cmd_entry msgq_cmd; |
807 | union { |
808 | struct bfi_enet_rx_cfg_req cfg_req; |
809 | struct bfi_enet_req req; |
810 | struct bfi_enet_rx_cfg_rsp cfg_rsp; |
811 | } bfi_enet_cmd; |
812 | |
813 | /* Rx event handlers */ |
814 | void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); |
815 | void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); |
816 | void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); |
817 | void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); |
818 | void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *); |
819 | void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); |
820 | void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); |
821 | |
822 | /* callback for bna_rx_disable(), bna_rx_stop() */ |
823 | void (*stop_cbfn)(void *arg, struct bna_rx *rx); |
824 | void *stop_cbarg; |
825 | |
826 | struct bna *bna; |
827 | void *priv; /* bnad's cookie */ |
828 | }; |
829 | |
830 | struct bna_rx_event_cbfn { |
831 | /* Optional */ |
832 | void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); |
833 | void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); |
834 | void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); |
835 | void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); |
836 | void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *); |
837 | /* Mandatory */ |
838 | void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); |
839 | void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); |
840 | }; |
841 | |
842 | /* Rx module - keeps track of free, active rx objects */ |
843 | struct bna_rx_mod { |
844 | struct bna *bna; /* back pointer to parent */ |
845 | struct bna_rx *rx; /* BFI_MAX_RXQ entries */ |
846 | struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */ |
847 | struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */ |
848 | |
849 | struct list_head rx_free_q; |
850 | struct list_head rx_active_q; |
851 | int rx_free_count; |
852 | |
853 | struct list_head rxp_free_q; |
854 | int rxp_free_count; |
855 | |
856 | struct list_head rxq_free_q; |
857 | int rxq_free_count; |
858 | |
859 | enum bna_rx_mod_flags flags; |
860 | |
861 | /* callback for bna_rx_mod_stop() */ |
862 | void (*stop_cbfn)(struct bna_enet *enet); |
863 | |
864 | struct bfa_wc rx_stop_wc; |
865 | u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX]; |
866 | u32 rid_mask; |
867 | }; |
868 | |
869 | /* CAM */ |
870 | |
871 | struct bna_ucam_mod { |
872 | struct bna_mac *ucmac; /* num_ucmac * 2 entries */ |
873 | struct list_head free_q; |
874 | struct list_head del_q; |
875 | |
876 | struct bna *bna; |
877 | }; |
878 | |
879 | struct bna_mcam_handle { |
880 | /* This should be the first one */ |
881 | struct list_head qe; |
882 | int handle; |
883 | int refcnt; |
884 | }; |
885 | |
886 | struct bna_mcam_mod { |
887 | struct bna_mac *mcmac; /* num_mcmac * 2 entries */ |
888 | struct bna_mcam_handle *mchandle; /* num_mcmac entries */ |
889 | struct list_head free_q; |
890 | struct list_head del_q; |
891 | struct list_head free_handle_q; |
892 | |
893 | struct bna *bna; |
894 | }; |
895 | |
896 | /* Statistics */ |
897 | |
898 | struct bna_stats { |
899 | struct bna_dma_addr hw_stats_dma; |
900 | struct bfi_enet_stats *hw_stats_kva; |
901 | struct bfi_enet_stats hw_stats; |
902 | }; |
903 | |
904 | struct bna_stats_mod { |
905 | bool ioc_ready; |
906 | bool stats_get_busy; |
907 | bool stats_clr_busy; |
908 | struct bfa_msgq_cmd_entry stats_get_cmd; |
909 | struct bfa_msgq_cmd_entry stats_clr_cmd; |
910 | struct bfi_enet_stats_req stats_get; |
911 | struct bfi_enet_stats_req stats_clr; |
912 | }; |
913 | |
914 | /* BNA */ |
915 | |
916 | struct bna { |
917 | struct bna_ident ident; |
918 | struct bfa_pcidev pcidev; |
919 | |
920 | struct bna_reg regs; |
921 | struct bna_bit_defn bits; |
922 | |
923 | struct bna_stats stats; |
924 | |
925 | struct bna_ioceth ioceth; |
926 | struct bfa_cee cee; |
927 | struct bfa_flash flash; |
928 | struct bfa_msgq msgq; |
929 | |
930 | struct bna_ethport ethport; |
931 | struct bna_enet enet; |
932 | struct bna_stats_mod stats_mod; |
933 | |
934 | struct bna_tx_mod tx_mod; |
935 | struct bna_rx_mod rx_mod; |
936 | struct bna_ucam_mod ucam_mod; |
937 | struct bna_mcam_mod mcam_mod; |
938 | |
939 | enum bna_mod_flags mod_flags; |
940 | |
941 | int default_mode_rid; |
942 | int promisc_rid; |
943 | |
944 | struct bnad *bnad; |
945 | }; |
946 | #endif /* __BNA_TYPES_H__ */ |
947 | |