1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
4 | */ |
5 | /* |
6 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
7 | * Copyright (c) 2014-2015 QLogic Corporation |
8 | * All rights reserved |
9 | * www.qlogic.com |
10 | */ |
11 | #include "bna.h" |
12 | #include "bfi.h" |
13 | |
14 | /* IB */ |
15 | static void |
16 | bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) |
17 | { |
18 | ib->coalescing_timeo = coalescing_timeo; |
19 | ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( |
20 | (u32)ib->coalescing_timeo, 0); |
21 | } |
22 | |
23 | /* RXF */ |
24 | |
25 | #define bna_rxf_vlan_cfg_soft_reset(rxf) \ |
26 | do { \ |
27 | (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \ |
28 | (rxf)->vlan_strip_pending = true; \ |
29 | } while (0) |
30 | |
31 | #define (rxf) \ |
32 | do { \ |
33 | if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \ |
34 | (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \ |
35 | BNA_RSS_F_CFG_PENDING | \ |
36 | BNA_RSS_F_STATUS_PENDING); \ |
37 | } while (0) |
38 | |
39 | static int bna_rxf_cfg_apply(struct bna_rxf *rxf); |
40 | static void bna_rxf_cfg_reset(struct bna_rxf *rxf); |
41 | static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf); |
42 | static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf); |
43 | static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf); |
44 | static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf); |
45 | static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, |
46 | enum bna_cleanup_type cleanup); |
47 | static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, |
48 | enum bna_cleanup_type cleanup); |
49 | static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, |
50 | enum bna_cleanup_type cleanup); |
51 | |
52 | bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, |
53 | enum bna_rxf_event); |
54 | bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf, |
55 | enum bna_rxf_event); |
56 | bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, |
57 | enum bna_rxf_event); |
58 | bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf, |
59 | enum bna_rxf_event); |
60 | |
61 | static void |
62 | bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) |
63 | { |
64 | call_rxf_stop_cbfn(rxf); |
65 | } |
66 | |
67 | static void |
68 | bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) |
69 | { |
70 | switch (event) { |
71 | case RXF_E_START: |
72 | bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); |
73 | break; |
74 | |
75 | case RXF_E_STOP: |
76 | call_rxf_stop_cbfn(rxf); |
77 | break; |
78 | |
79 | case RXF_E_FAIL: |
80 | /* No-op */ |
81 | break; |
82 | |
83 | case RXF_E_CONFIG: |
84 | call_rxf_cam_fltr_cbfn(rxf); |
85 | break; |
86 | |
87 | default: |
88 | bfa_sm_fault(event); |
89 | } |
90 | } |
91 | |
92 | static void |
93 | bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf) |
94 | { |
95 | if (!bna_rxf_cfg_apply(rxf)) { |
96 | /* No more pending config updates */ |
97 | bfa_fsm_set_state(rxf, bna_rxf_sm_started); |
98 | } |
99 | } |
100 | |
101 | static void |
102 | bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event) |
103 | { |
104 | switch (event) { |
105 | case RXF_E_STOP: |
106 | bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait); |
107 | break; |
108 | |
109 | case RXF_E_FAIL: |
110 | bna_rxf_cfg_reset(rxf); |
111 | call_rxf_start_cbfn(rxf); |
112 | call_rxf_cam_fltr_cbfn(rxf); |
113 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); |
114 | break; |
115 | |
116 | case RXF_E_CONFIG: |
117 | /* No-op */ |
118 | break; |
119 | |
120 | case RXF_E_FW_RESP: |
121 | if (!bna_rxf_cfg_apply(rxf)) { |
122 | /* No more pending config updates */ |
123 | bfa_fsm_set_state(rxf, bna_rxf_sm_started); |
124 | } |
125 | break; |
126 | |
127 | default: |
128 | bfa_sm_fault(event); |
129 | } |
130 | } |
131 | |
132 | static void |
133 | bna_rxf_sm_started_entry(struct bna_rxf *rxf) |
134 | { |
135 | call_rxf_start_cbfn(rxf); |
136 | call_rxf_cam_fltr_cbfn(rxf); |
137 | } |
138 | |
139 | static void |
140 | bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) |
141 | { |
142 | switch (event) { |
143 | case RXF_E_STOP: |
144 | case RXF_E_FAIL: |
145 | bna_rxf_cfg_reset(rxf); |
146 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); |
147 | break; |
148 | |
149 | case RXF_E_CONFIG: |
150 | bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); |
151 | break; |
152 | |
153 | default: |
154 | bfa_sm_fault(event); |
155 | } |
156 | } |
157 | |
158 | static void |
159 | bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf) |
160 | { |
161 | } |
162 | |
163 | static void |
164 | bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event) |
165 | { |
166 | switch (event) { |
167 | case RXF_E_FAIL: |
168 | case RXF_E_FW_RESP: |
169 | bna_rxf_cfg_reset(rxf); |
170 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); |
171 | break; |
172 | |
173 | default: |
174 | bfa_sm_fault(event); |
175 | } |
176 | } |
177 | |
178 | static void |
179 | bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac, |
180 | enum bfi_enet_h2i_msgs req_type) |
181 | { |
182 | struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req; |
183 | |
184 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid); |
185 | req->mh.num_entries = htons( |
186 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req))); |
187 | ether_addr_copy(dst: req->mac_addr, src: mac->addr); |
188 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, |
189 | sizeof(struct bfi_enet_ucast_req), &req->mh); |
190 | bfa_msgq_cmd_post(msgq: &rxf->rx->bna->msgq, cmd: &rxf->msgq_cmd); |
191 | } |
192 | |
193 | static void |
194 | bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac) |
195 | { |
196 | struct bfi_enet_mcast_add_req *req = |
197 | &rxf->bfi_enet_cmd.mcast_add_req; |
198 | |
199 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ, |
200 | 0, rxf->rx->rid); |
201 | req->mh.num_entries = htons( |
202 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req))); |
203 | ether_addr_copy(dst: req->mac_addr, src: mac->addr); |
204 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, |
205 | sizeof(struct bfi_enet_mcast_add_req), &req->mh); |
206 | bfa_msgq_cmd_post(msgq: &rxf->rx->bna->msgq, cmd: &rxf->msgq_cmd); |
207 | } |
208 | |
209 | static void |
210 | bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle) |
211 | { |
212 | struct bfi_enet_mcast_del_req *req = |
213 | &rxf->bfi_enet_cmd.mcast_del_req; |
214 | |
215 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ, |
216 | 0, rxf->rx->rid); |
217 | req->mh.num_entries = htons( |
218 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req))); |
219 | req->handle = htons(handle); |
220 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, |
221 | sizeof(struct bfi_enet_mcast_del_req), &req->mh); |
222 | bfa_msgq_cmd_post(msgq: &rxf->rx->bna->msgq, cmd: &rxf->msgq_cmd); |
223 | } |
224 | |
225 | static void |
226 | bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status) |
227 | { |
228 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; |
229 | |
230 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, |
231 | BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid); |
232 | req->mh.num_entries = htons( |
233 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); |
234 | req->enable = status; |
235 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, |
236 | sizeof(struct bfi_enet_enable_req), &req->mh); |
237 | bfa_msgq_cmd_post(msgq: &rxf->rx->bna->msgq, cmd: &rxf->msgq_cmd); |
238 | } |
239 | |
240 | static void |
241 | bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status) |
242 | { |
243 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; |
244 | |
245 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, |
246 | BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid); |
247 | req->mh.num_entries = htons( |
248 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); |
249 | req->enable = status; |
250 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, |
251 | sizeof(struct bfi_enet_enable_req), &req->mh); |
252 | bfa_msgq_cmd_post(msgq: &rxf->rx->bna->msgq, cmd: &rxf->msgq_cmd); |
253 | } |
254 | |
255 | static void |
256 | bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx) |
257 | { |
258 | struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req; |
259 | int i; |
260 | int j; |
261 | |
262 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, |
263 | BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid); |
264 | req->mh.num_entries = htons( |
265 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req))); |
266 | req->block_idx = block_idx; |
267 | for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) { |
268 | j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i; |
269 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) |
270 | req->bit_mask[i] = |
271 | htonl(rxf->vlan_filter_table[j]); |
272 | else |
273 | req->bit_mask[i] = 0xFFFFFFFF; |
274 | } |
275 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, |
276 | sizeof(struct bfi_enet_rx_vlan_req), &req->mh); |
277 | bfa_msgq_cmd_post(msgq: &rxf->rx->bna->msgq, cmd: &rxf->msgq_cmd); |
278 | } |
279 | |
280 | static void |
281 | bna_bfi_vlan_strip_enable(struct bna_rxf *rxf) |
282 | { |
283 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; |
284 | |
285 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, |
286 | BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid); |
287 | req->mh.num_entries = htons( |
288 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); |
289 | req->enable = rxf->vlan_strip_status; |
290 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, |
291 | sizeof(struct bfi_enet_enable_req), &req->mh); |
292 | bfa_msgq_cmd_post(msgq: &rxf->rx->bna->msgq, cmd: &rxf->msgq_cmd); |
293 | } |
294 | |
295 | static void |
296 | bna_bfi_rit_cfg(struct bna_rxf *rxf) |
297 | { |
298 | struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req; |
299 | |
300 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, |
301 | BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid); |
302 | req->mh.num_entries = htons( |
303 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req))); |
304 | req->size = htons(rxf->rit_size); |
305 | memcpy(&req->table[0], rxf->rit, rxf->rit_size); |
306 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, |
307 | sizeof(struct bfi_enet_rit_req), &req->mh); |
308 | bfa_msgq_cmd_post(msgq: &rxf->rx->bna->msgq, cmd: &rxf->msgq_cmd); |
309 | } |
310 | |
311 | static void |
312 | (struct bna_rxf *rxf) |
313 | { |
314 | struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req; |
315 | int i; |
316 | |
317 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, |
318 | BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid); |
319 | req->mh.num_entries = htons( |
320 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req))); |
321 | req->cfg.type = rxf->rss_cfg.hash_type; |
322 | req->cfg.mask = rxf->rss_cfg.hash_mask; |
323 | for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++) |
324 | req->cfg.key[i] = |
325 | htonl(rxf->rss_cfg.toeplitz_hash_key[i]); |
326 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, |
327 | sizeof(struct bfi_enet_rss_cfg_req), &req->mh); |
328 | bfa_msgq_cmd_post(msgq: &rxf->rx->bna->msgq, cmd: &rxf->msgq_cmd); |
329 | } |
330 | |
331 | static void |
332 | (struct bna_rxf *rxf) |
333 | { |
334 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; |
335 | |
336 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, |
337 | BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid); |
338 | req->mh.num_entries = htons( |
339 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); |
340 | req->enable = rxf->rss_status; |
341 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, |
342 | sizeof(struct bfi_enet_enable_req), &req->mh); |
343 | bfa_msgq_cmd_post(msgq: &rxf->rx->bna->msgq, cmd: &rxf->msgq_cmd); |
344 | } |
345 | |
346 | /* This function gets the multicast MAC that has already been added to CAM */ |
347 | static struct bna_mac * |
348 | bna_rxf_mcmac_get(struct bna_rxf *rxf, const u8 *mac_addr) |
349 | { |
350 | struct bna_mac *mac; |
351 | |
352 | list_for_each_entry(mac, &rxf->mcast_active_q, qe) |
353 | if (ether_addr_equal(addr1: mac->addr, addr2: mac_addr)) |
354 | return mac; |
355 | |
356 | list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe) |
357 | if (ether_addr_equal(addr1: mac->addr, addr2: mac_addr)) |
358 | return mac; |
359 | |
360 | return NULL; |
361 | } |
362 | |
363 | static struct bna_mcam_handle * |
364 | bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle) |
365 | { |
366 | struct bna_mcam_handle *mchandle; |
367 | |
368 | list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe) |
369 | if (mchandle->handle == handle) |
370 | return mchandle; |
371 | |
372 | return NULL; |
373 | } |
374 | |
375 | static void |
376 | bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle) |
377 | { |
378 | struct bna_mac *mcmac; |
379 | struct bna_mcam_handle *mchandle; |
380 | |
381 | mcmac = bna_rxf_mcmac_get(rxf, mac_addr); |
382 | mchandle = bna_rxf_mchandle_get(rxf, handle); |
383 | if (mchandle == NULL) { |
384 | mchandle = bna_mcam_mod_handle_get(mod: &rxf->rx->bna->mcam_mod); |
385 | mchandle->handle = handle; |
386 | mchandle->refcnt = 0; |
387 | list_add_tail(new: &mchandle->qe, head: &rxf->mcast_handle_q); |
388 | } |
389 | mchandle->refcnt++; |
390 | mcmac->handle = mchandle; |
391 | } |
392 | |
393 | static int |
394 | bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac, |
395 | enum bna_cleanup_type cleanup) |
396 | { |
397 | struct bna_mcam_handle *mchandle; |
398 | int ret = 0; |
399 | |
400 | mchandle = mac->handle; |
401 | if (mchandle == NULL) |
402 | return ret; |
403 | |
404 | mchandle->refcnt--; |
405 | if (mchandle->refcnt == 0) { |
406 | if (cleanup == BNA_HARD_CLEANUP) { |
407 | bna_bfi_mcast_del_req(rxf, handle: mchandle->handle); |
408 | ret = 1; |
409 | } |
410 | list_del(entry: &mchandle->qe); |
411 | bna_mcam_mod_handle_put(mcam_mod: &rxf->rx->bna->mcam_mod, handle: mchandle); |
412 | } |
413 | mac->handle = NULL; |
414 | |
415 | return ret; |
416 | } |
417 | |
418 | static int |
419 | bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) |
420 | { |
421 | struct bna_mac *mac = NULL; |
422 | int ret; |
423 | |
424 | /* First delete multicast entries to maintain the count */ |
425 | while (!list_empty(head: &rxf->mcast_pending_del_q)) { |
426 | mac = list_first_entry(&rxf->mcast_pending_del_q, |
427 | struct bna_mac, qe); |
428 | ret = bna_rxf_mcast_del(rxf, mac, cleanup: BNA_HARD_CLEANUP); |
429 | list_move_tail(list: &mac->qe, bna_mcam_mod_del_q(rxf->rx->bna)); |
430 | if (ret) |
431 | return ret; |
432 | } |
433 | |
434 | /* Add multicast entries */ |
435 | if (!list_empty(head: &rxf->mcast_pending_add_q)) { |
436 | mac = list_first_entry(&rxf->mcast_pending_add_q, |
437 | struct bna_mac, qe); |
438 | list_move_tail(list: &mac->qe, head: &rxf->mcast_active_q); |
439 | bna_bfi_mcast_add_req(rxf, mac); |
440 | return 1; |
441 | } |
442 | |
443 | return 0; |
444 | } |
445 | |
446 | static int |
447 | bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf) |
448 | { |
449 | u8 vlan_pending_bitmask; |
450 | int block_idx = 0; |
451 | |
452 | if (rxf->vlan_pending_bitmask) { |
453 | vlan_pending_bitmask = rxf->vlan_pending_bitmask; |
454 | while (!(vlan_pending_bitmask & 0x1)) { |
455 | block_idx++; |
456 | vlan_pending_bitmask >>= 1; |
457 | } |
458 | rxf->vlan_pending_bitmask &= ~BIT(block_idx); |
459 | bna_bfi_rx_vlan_filter_set(rxf, block_idx); |
460 | return 1; |
461 | } |
462 | |
463 | return 0; |
464 | } |
465 | |
466 | static int |
467 | bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) |
468 | { |
469 | struct bna_mac *mac; |
470 | int ret; |
471 | |
472 | /* Throw away delete pending mcast entries */ |
473 | while (!list_empty(head: &rxf->mcast_pending_del_q)) { |
474 | mac = list_first_entry(&rxf->mcast_pending_del_q, |
475 | struct bna_mac, qe); |
476 | ret = bna_rxf_mcast_del(rxf, mac, cleanup); |
477 | list_move_tail(list: &mac->qe, bna_mcam_mod_del_q(rxf->rx->bna)); |
478 | if (ret) |
479 | return ret; |
480 | } |
481 | |
482 | /* Move active mcast entries to pending_add_q */ |
483 | while (!list_empty(head: &rxf->mcast_active_q)) { |
484 | mac = list_first_entry(&rxf->mcast_active_q, |
485 | struct bna_mac, qe); |
486 | list_move_tail(list: &mac->qe, head: &rxf->mcast_pending_add_q); |
487 | if (bna_rxf_mcast_del(rxf, mac, cleanup)) |
488 | return 1; |
489 | } |
490 | |
491 | return 0; |
492 | } |
493 | |
494 | static int |
495 | (struct bna_rxf *rxf) |
496 | { |
497 | if (rxf->rss_pending) { |
498 | if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) { |
499 | rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING; |
500 | bna_bfi_rit_cfg(rxf); |
501 | return 1; |
502 | } |
503 | |
504 | if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) { |
505 | rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING; |
506 | bna_bfi_rss_cfg(rxf); |
507 | return 1; |
508 | } |
509 | |
510 | if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) { |
511 | rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING; |
512 | bna_bfi_rss_enable(rxf); |
513 | return 1; |
514 | } |
515 | } |
516 | |
517 | return 0; |
518 | } |
519 | |
520 | static int |
521 | bna_rxf_cfg_apply(struct bna_rxf *rxf) |
522 | { |
523 | if (bna_rxf_ucast_cfg_apply(rxf)) |
524 | return 1; |
525 | |
526 | if (bna_rxf_mcast_cfg_apply(rxf)) |
527 | return 1; |
528 | |
529 | if (bna_rxf_promisc_cfg_apply(rxf)) |
530 | return 1; |
531 | |
532 | if (bna_rxf_allmulti_cfg_apply(rxf)) |
533 | return 1; |
534 | |
535 | if (bna_rxf_vlan_cfg_apply(rxf)) |
536 | return 1; |
537 | |
538 | if (bna_rxf_vlan_strip_cfg_apply(rxf)) |
539 | return 1; |
540 | |
541 | if (bna_rxf_rss_cfg_apply(rxf)) |
542 | return 1; |
543 | |
544 | return 0; |
545 | } |
546 | |
547 | static void |
548 | bna_rxf_cfg_reset(struct bna_rxf *rxf) |
549 | { |
550 | bna_rxf_ucast_cfg_reset(rxf, cleanup: BNA_SOFT_CLEANUP); |
551 | bna_rxf_mcast_cfg_reset(rxf, cleanup: BNA_SOFT_CLEANUP); |
552 | bna_rxf_promisc_cfg_reset(rxf, cleanup: BNA_SOFT_CLEANUP); |
553 | bna_rxf_allmulti_cfg_reset(rxf, cleanup: BNA_SOFT_CLEANUP); |
554 | bna_rxf_vlan_cfg_soft_reset(rxf); |
555 | bna_rxf_rss_cfg_soft_reset(rxf); |
556 | } |
557 | |
558 | static void |
559 | bna_rit_init(struct bna_rxf *rxf, int rit_size) |
560 | { |
561 | struct bna_rx *rx = rxf->rx; |
562 | struct bna_rxp *rxp; |
563 | int offset = 0; |
564 | |
565 | rxf->rit_size = rit_size; |
566 | list_for_each_entry(rxp, &rx->rxp_q, qe) { |
567 | rxf->rit[offset] = rxp->cq.ccb->id; |
568 | offset++; |
569 | } |
570 | } |
571 | |
572 | void |
573 | bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) |
574 | { |
575 | bfa_fsm_send_event(rxf, RXF_E_FW_RESP); |
576 | } |
577 | |
578 | void |
579 | bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, |
580 | struct bfi_msgq_mhdr *msghdr) |
581 | { |
582 | struct bfi_enet_rsp *rsp = |
583 | container_of(msghdr, struct bfi_enet_rsp, mh); |
584 | |
585 | if (rsp->error) { |
586 | /* Clear ucast from cache */ |
587 | rxf->ucast_active_set = 0; |
588 | } |
589 | |
590 | bfa_fsm_send_event(rxf, RXF_E_FW_RESP); |
591 | } |
592 | |
593 | void |
594 | bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, |
595 | struct bfi_msgq_mhdr *msghdr) |
596 | { |
597 | struct bfi_enet_mcast_add_req *req = |
598 | &rxf->bfi_enet_cmd.mcast_add_req; |
599 | struct bfi_enet_mcast_add_rsp *rsp = |
600 | container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh); |
601 | |
602 | bna_rxf_mchandle_attach(rxf, mac_addr: (u8 *)&req->mac_addr, |
603 | ntohs(rsp->handle)); |
604 | bfa_fsm_send_event(rxf, RXF_E_FW_RESP); |
605 | } |
606 | |
607 | static void |
608 | bna_rxf_init(struct bna_rxf *rxf, |
609 | struct bna_rx *rx, |
610 | struct bna_rx_config *q_config, |
611 | struct bna_res_info *res_info) |
612 | { |
613 | rxf->rx = rx; |
614 | |
615 | INIT_LIST_HEAD(list: &rxf->ucast_pending_add_q); |
616 | INIT_LIST_HEAD(list: &rxf->ucast_pending_del_q); |
617 | rxf->ucast_pending_set = 0; |
618 | rxf->ucast_active_set = 0; |
619 | INIT_LIST_HEAD(list: &rxf->ucast_active_q); |
620 | rxf->ucast_pending_mac = NULL; |
621 | |
622 | INIT_LIST_HEAD(list: &rxf->mcast_pending_add_q); |
623 | INIT_LIST_HEAD(list: &rxf->mcast_pending_del_q); |
624 | INIT_LIST_HEAD(list: &rxf->mcast_active_q); |
625 | INIT_LIST_HEAD(list: &rxf->mcast_handle_q); |
626 | |
627 | rxf->rit = (u8 *) |
628 | res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva; |
629 | bna_rit_init(rxf, rit_size: q_config->num_paths); |
630 | |
631 | rxf->rss_status = q_config->rss_status; |
632 | if (rxf->rss_status == BNA_STATUS_T_ENABLED) { |
633 | rxf->rss_cfg = q_config->rss_config; |
634 | rxf->rss_pending |= BNA_RSS_F_CFG_PENDING; |
635 | rxf->rss_pending |= BNA_RSS_F_RIT_PENDING; |
636 | rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING; |
637 | } |
638 | |
639 | rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; |
640 | memset(rxf->vlan_filter_table, 0, |
641 | (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32))); |
642 | rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */ |
643 | rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; |
644 | |
645 | rxf->vlan_strip_status = q_config->vlan_strip_status; |
646 | |
647 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); |
648 | } |
649 | |
650 | static void |
651 | bna_rxf_uninit(struct bna_rxf *rxf) |
652 | { |
653 | struct bna_mac *mac; |
654 | |
655 | rxf->ucast_pending_set = 0; |
656 | rxf->ucast_active_set = 0; |
657 | |
658 | while (!list_empty(head: &rxf->ucast_pending_add_q)) { |
659 | mac = list_first_entry(&rxf->ucast_pending_add_q, |
660 | struct bna_mac, qe); |
661 | list_move_tail(list: &mac->qe, bna_ucam_mod_free_q(rxf->rx->bna)); |
662 | } |
663 | |
664 | if (rxf->ucast_pending_mac) { |
665 | list_add_tail(new: &rxf->ucast_pending_mac->qe, |
666 | bna_ucam_mod_free_q(rxf->rx->bna)); |
667 | rxf->ucast_pending_mac = NULL; |
668 | } |
669 | |
670 | while (!list_empty(head: &rxf->mcast_pending_add_q)) { |
671 | mac = list_first_entry(&rxf->mcast_pending_add_q, |
672 | struct bna_mac, qe); |
673 | list_move_tail(list: &mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); |
674 | } |
675 | |
676 | rxf->rxmode_pending = 0; |
677 | rxf->rxmode_pending_bitmask = 0; |
678 | if (rxf->rx->bna->promisc_rid == rxf->rx->rid) |
679 | rxf->rx->bna->promisc_rid = BFI_INVALID_RID; |
680 | if (rxf->rx->bna->default_mode_rid == rxf->rx->rid) |
681 | rxf->rx->bna->default_mode_rid = BFI_INVALID_RID; |
682 | |
683 | rxf->rss_pending = 0; |
684 | rxf->vlan_strip_pending = false; |
685 | |
686 | rxf->rx = NULL; |
687 | } |
688 | |
689 | static void |
690 | bna_rx_cb_rxf_started(struct bna_rx *rx) |
691 | { |
692 | bfa_fsm_send_event(rx, RX_E_RXF_STARTED); |
693 | } |
694 | |
695 | static void |
696 | bna_rxf_start(struct bna_rxf *rxf) |
697 | { |
698 | rxf->start_cbfn = bna_rx_cb_rxf_started; |
699 | rxf->start_cbarg = rxf->rx; |
700 | bfa_fsm_send_event(rxf, RXF_E_START); |
701 | } |
702 | |
703 | static void |
704 | bna_rx_cb_rxf_stopped(struct bna_rx *rx) |
705 | { |
706 | bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); |
707 | } |
708 | |
709 | static void |
710 | bna_rxf_stop(struct bna_rxf *rxf) |
711 | { |
712 | rxf->stop_cbfn = bna_rx_cb_rxf_stopped; |
713 | rxf->stop_cbarg = rxf->rx; |
714 | bfa_fsm_send_event(rxf, RXF_E_STOP); |
715 | } |
716 | |
717 | static void |
718 | bna_rxf_fail(struct bna_rxf *rxf) |
719 | { |
720 | bfa_fsm_send_event(rxf, RXF_E_FAIL); |
721 | } |
722 | |
723 | enum bna_cb_status |
724 | bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac) |
725 | { |
726 | struct bna_rxf *rxf = &rx->rxf; |
727 | |
728 | if (rxf->ucast_pending_mac == NULL) { |
729 | rxf->ucast_pending_mac = |
730 | bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna)); |
731 | if (rxf->ucast_pending_mac == NULL) |
732 | return BNA_CB_UCAST_CAM_FULL; |
733 | } |
734 | |
735 | ether_addr_copy(dst: rxf->ucast_pending_mac->addr, src: ucmac); |
736 | rxf->ucast_pending_set = 1; |
737 | rxf->cam_fltr_cbfn = NULL; |
738 | rxf->cam_fltr_cbarg = rx->bna->bnad; |
739 | |
740 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
741 | |
742 | return BNA_CB_SUCCESS; |
743 | } |
744 | |
745 | enum bna_cb_status |
746 | bna_rx_mcast_add(struct bna_rx *rx, const u8 *addr, |
747 | void (*cbfn)(struct bnad *, struct bna_rx *)) |
748 | { |
749 | struct bna_rxf *rxf = &rx->rxf; |
750 | struct bna_mac *mac; |
751 | |
752 | /* Check if already added or pending addition */ |
753 | if (bna_mac_find(q: &rxf->mcast_active_q, addr) || |
754 | bna_mac_find(q: &rxf->mcast_pending_add_q, addr)) { |
755 | if (cbfn) |
756 | cbfn(rx->bna->bnad, rx); |
757 | return BNA_CB_SUCCESS; |
758 | } |
759 | |
760 | mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna)); |
761 | if (mac == NULL) |
762 | return BNA_CB_MCAST_LIST_FULL; |
763 | ether_addr_copy(dst: mac->addr, src: addr); |
764 | list_add_tail(new: &mac->qe, head: &rxf->mcast_pending_add_q); |
765 | |
766 | rxf->cam_fltr_cbfn = cbfn; |
767 | rxf->cam_fltr_cbarg = rx->bna->bnad; |
768 | |
769 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
770 | |
771 | return BNA_CB_SUCCESS; |
772 | } |
773 | |
774 | enum bna_cb_status |
775 | bna_rx_ucast_listset(struct bna_rx *rx, int count, const u8 *uclist) |
776 | { |
777 | struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; |
778 | struct bna_rxf *rxf = &rx->rxf; |
779 | struct list_head list_head; |
780 | const u8 *mcaddr; |
781 | struct bna_mac *mac, *del_mac; |
782 | int i; |
783 | |
784 | /* Purge the pending_add_q */ |
785 | while (!list_empty(head: &rxf->ucast_pending_add_q)) { |
786 | mac = list_first_entry(&rxf->ucast_pending_add_q, |
787 | struct bna_mac, qe); |
788 | list_move_tail(list: &mac->qe, head: &ucam_mod->free_q); |
789 | } |
790 | |
791 | /* Schedule active_q entries for deletion */ |
792 | while (!list_empty(head: &rxf->ucast_active_q)) { |
793 | mac = list_first_entry(&rxf->ucast_active_q, |
794 | struct bna_mac, qe); |
795 | del_mac = bna_cam_mod_mac_get(head: &ucam_mod->del_q); |
796 | ether_addr_copy(dst: del_mac->addr, src: mac->addr); |
797 | del_mac->handle = mac->handle; |
798 | list_add_tail(new: &del_mac->qe, head: &rxf->ucast_pending_del_q); |
799 | list_move_tail(list: &mac->qe, head: &ucam_mod->free_q); |
800 | } |
801 | |
802 | /* Allocate nodes */ |
803 | INIT_LIST_HEAD(list: &list_head); |
804 | for (i = 0, mcaddr = uclist; i < count; i++) { |
805 | mac = bna_cam_mod_mac_get(head: &ucam_mod->free_q); |
806 | if (mac == NULL) |
807 | goto err_return; |
808 | ether_addr_copy(dst: mac->addr, src: mcaddr); |
809 | list_add_tail(new: &mac->qe, head: &list_head); |
810 | mcaddr += ETH_ALEN; |
811 | } |
812 | |
813 | /* Add the new entries */ |
814 | while (!list_empty(head: &list_head)) { |
815 | mac = list_first_entry(&list_head, struct bna_mac, qe); |
816 | list_move_tail(list: &mac->qe, head: &rxf->ucast_pending_add_q); |
817 | } |
818 | |
819 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
820 | |
821 | return BNA_CB_SUCCESS; |
822 | |
823 | err_return: |
824 | while (!list_empty(head: &list_head)) { |
825 | mac = list_first_entry(&list_head, struct bna_mac, qe); |
826 | list_move_tail(list: &mac->qe, head: &ucam_mod->free_q); |
827 | } |
828 | |
829 | return BNA_CB_UCAST_CAM_FULL; |
830 | } |
831 | |
832 | enum bna_cb_status |
833 | bna_rx_mcast_listset(struct bna_rx *rx, int count, const u8 *mclist) |
834 | { |
835 | struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod; |
836 | struct bna_rxf *rxf = &rx->rxf; |
837 | struct list_head list_head; |
838 | const u8 *mcaddr; |
839 | struct bna_mac *mac, *del_mac; |
840 | int i; |
841 | |
842 | /* Purge the pending_add_q */ |
843 | while (!list_empty(head: &rxf->mcast_pending_add_q)) { |
844 | mac = list_first_entry(&rxf->mcast_pending_add_q, |
845 | struct bna_mac, qe); |
846 | list_move_tail(list: &mac->qe, head: &mcam_mod->free_q); |
847 | } |
848 | |
849 | /* Schedule active_q entries for deletion */ |
850 | while (!list_empty(head: &rxf->mcast_active_q)) { |
851 | mac = list_first_entry(&rxf->mcast_active_q, |
852 | struct bna_mac, qe); |
853 | del_mac = bna_cam_mod_mac_get(head: &mcam_mod->del_q); |
854 | ether_addr_copy(dst: del_mac->addr, src: mac->addr); |
855 | del_mac->handle = mac->handle; |
856 | list_add_tail(new: &del_mac->qe, head: &rxf->mcast_pending_del_q); |
857 | mac->handle = NULL; |
858 | list_move_tail(list: &mac->qe, head: &mcam_mod->free_q); |
859 | } |
860 | |
861 | /* Allocate nodes */ |
862 | INIT_LIST_HEAD(list: &list_head); |
863 | for (i = 0, mcaddr = mclist; i < count; i++) { |
864 | mac = bna_cam_mod_mac_get(head: &mcam_mod->free_q); |
865 | if (mac == NULL) |
866 | goto err_return; |
867 | ether_addr_copy(dst: mac->addr, src: mcaddr); |
868 | list_add_tail(new: &mac->qe, head: &list_head); |
869 | |
870 | mcaddr += ETH_ALEN; |
871 | } |
872 | |
873 | /* Add the new entries */ |
874 | while (!list_empty(head: &list_head)) { |
875 | mac = list_first_entry(&list_head, struct bna_mac, qe); |
876 | list_move_tail(list: &mac->qe, head: &rxf->mcast_pending_add_q); |
877 | } |
878 | |
879 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
880 | |
881 | return BNA_CB_SUCCESS; |
882 | |
883 | err_return: |
884 | while (!list_empty(head: &list_head)) { |
885 | mac = list_first_entry(&list_head, struct bna_mac, qe); |
886 | list_move_tail(list: &mac->qe, head: &mcam_mod->free_q); |
887 | } |
888 | |
889 | return BNA_CB_MCAST_LIST_FULL; |
890 | } |
891 | |
892 | void |
893 | bna_rx_mcast_delall(struct bna_rx *rx) |
894 | { |
895 | struct bna_rxf *rxf = &rx->rxf; |
896 | struct bna_mac *mac, *del_mac; |
897 | int need_hw_config = 0; |
898 | |
899 | /* Purge all entries from pending_add_q */ |
900 | while (!list_empty(head: &rxf->mcast_pending_add_q)) { |
901 | mac = list_first_entry(&rxf->mcast_pending_add_q, |
902 | struct bna_mac, qe); |
903 | list_move_tail(list: &mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); |
904 | } |
905 | |
906 | /* Schedule all entries in active_q for deletion */ |
907 | while (!list_empty(head: &rxf->mcast_active_q)) { |
908 | mac = list_first_entry(&rxf->mcast_active_q, |
909 | struct bna_mac, qe); |
910 | list_del(entry: &mac->qe); |
911 | del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna)); |
912 | memcpy(del_mac, mac, sizeof(*del_mac)); |
913 | list_add_tail(new: &del_mac->qe, head: &rxf->mcast_pending_del_q); |
914 | mac->handle = NULL; |
915 | list_add_tail(new: &mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); |
916 | need_hw_config = 1; |
917 | } |
918 | |
919 | if (need_hw_config) |
920 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
921 | } |
922 | |
923 | void |
924 | bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) |
925 | { |
926 | struct bna_rxf *rxf = &rx->rxf; |
927 | int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); |
928 | int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK); |
929 | int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); |
930 | |
931 | rxf->vlan_filter_table[index] |= bit; |
932 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { |
933 | rxf->vlan_pending_bitmask |= BIT(group_id); |
934 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
935 | } |
936 | } |
937 | |
938 | void |
939 | bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) |
940 | { |
941 | struct bna_rxf *rxf = &rx->rxf; |
942 | int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); |
943 | int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK); |
944 | int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); |
945 | |
946 | rxf->vlan_filter_table[index] &= ~bit; |
947 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { |
948 | rxf->vlan_pending_bitmask |= BIT(group_id); |
949 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
950 | } |
951 | } |
952 | |
953 | static int |
954 | bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) |
955 | { |
956 | struct bna_mac *mac = NULL; |
957 | |
958 | /* Delete MAC addresses previousely added */ |
959 | if (!list_empty(head: &rxf->ucast_pending_del_q)) { |
960 | mac = list_first_entry(&rxf->ucast_pending_del_q, |
961 | struct bna_mac, qe); |
962 | bna_bfi_ucast_req(rxf, mac, req_type: BFI_ENET_H2I_MAC_UCAST_DEL_REQ); |
963 | list_move_tail(list: &mac->qe, bna_ucam_mod_del_q(rxf->rx->bna)); |
964 | return 1; |
965 | } |
966 | |
967 | /* Set default unicast MAC */ |
968 | if (rxf->ucast_pending_set) { |
969 | rxf->ucast_pending_set = 0; |
970 | ether_addr_copy(dst: rxf->ucast_active_mac.addr, |
971 | src: rxf->ucast_pending_mac->addr); |
972 | rxf->ucast_active_set = 1; |
973 | bna_bfi_ucast_req(rxf, mac: &rxf->ucast_active_mac, |
974 | req_type: BFI_ENET_H2I_MAC_UCAST_SET_REQ); |
975 | return 1; |
976 | } |
977 | |
978 | /* Add additional MAC entries */ |
979 | if (!list_empty(head: &rxf->ucast_pending_add_q)) { |
980 | mac = list_first_entry(&rxf->ucast_pending_add_q, |
981 | struct bna_mac, qe); |
982 | list_move_tail(list: &mac->qe, head: &rxf->ucast_active_q); |
983 | bna_bfi_ucast_req(rxf, mac, req_type: BFI_ENET_H2I_MAC_UCAST_ADD_REQ); |
984 | return 1; |
985 | } |
986 | |
987 | return 0; |
988 | } |
989 | |
990 | static int |
991 | bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) |
992 | { |
993 | struct bna_mac *mac; |
994 | |
995 | /* Throw away delete pending ucast entries */ |
996 | while (!list_empty(head: &rxf->ucast_pending_del_q)) { |
997 | mac = list_first_entry(&rxf->ucast_pending_del_q, |
998 | struct bna_mac, qe); |
999 | if (cleanup == BNA_SOFT_CLEANUP) |
1000 | list_move_tail(list: &mac->qe, |
1001 | bna_ucam_mod_del_q(rxf->rx->bna)); |
1002 | else { |
1003 | bna_bfi_ucast_req(rxf, mac, |
1004 | req_type: BFI_ENET_H2I_MAC_UCAST_DEL_REQ); |
1005 | list_move_tail(list: &mac->qe, |
1006 | bna_ucam_mod_del_q(rxf->rx->bna)); |
1007 | return 1; |
1008 | } |
1009 | } |
1010 | |
1011 | /* Move active ucast entries to pending_add_q */ |
1012 | while (!list_empty(head: &rxf->ucast_active_q)) { |
1013 | mac = list_first_entry(&rxf->ucast_active_q, |
1014 | struct bna_mac, qe); |
1015 | list_move_tail(list: &mac->qe, head: &rxf->ucast_pending_add_q); |
1016 | if (cleanup == BNA_HARD_CLEANUP) { |
1017 | bna_bfi_ucast_req(rxf, mac, |
1018 | req_type: BFI_ENET_H2I_MAC_UCAST_DEL_REQ); |
1019 | return 1; |
1020 | } |
1021 | } |
1022 | |
1023 | if (rxf->ucast_active_set) { |
1024 | rxf->ucast_pending_set = 1; |
1025 | rxf->ucast_active_set = 0; |
1026 | if (cleanup == BNA_HARD_CLEANUP) { |
1027 | bna_bfi_ucast_req(rxf, mac: &rxf->ucast_active_mac, |
1028 | req_type: BFI_ENET_H2I_MAC_UCAST_CLR_REQ); |
1029 | return 1; |
1030 | } |
1031 | } |
1032 | |
1033 | return 0; |
1034 | } |
1035 | |
1036 | static int |
1037 | bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf) |
1038 | { |
1039 | struct bna *bna = rxf->rx->bna; |
1040 | |
1041 | /* Enable/disable promiscuous mode */ |
1042 | if (is_promisc_enable(rxf->rxmode_pending, |
1043 | rxf->rxmode_pending_bitmask)) { |
1044 | /* move promisc configuration from pending -> active */ |
1045 | promisc_inactive(rxf->rxmode_pending, |
1046 | rxf->rxmode_pending_bitmask); |
1047 | rxf->rxmode_active |= BNA_RXMODE_PROMISC; |
1048 | bna_bfi_rx_promisc_req(rxf, status: BNA_STATUS_T_ENABLED); |
1049 | return 1; |
1050 | } else if (is_promisc_disable(rxf->rxmode_pending, |
1051 | rxf->rxmode_pending_bitmask)) { |
1052 | /* move promisc configuration from pending -> active */ |
1053 | promisc_inactive(rxf->rxmode_pending, |
1054 | rxf->rxmode_pending_bitmask); |
1055 | rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; |
1056 | bna->promisc_rid = BFI_INVALID_RID; |
1057 | bna_bfi_rx_promisc_req(rxf, status: BNA_STATUS_T_DISABLED); |
1058 | return 1; |
1059 | } |
1060 | |
1061 | return 0; |
1062 | } |
1063 | |
1064 | static int |
1065 | bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) |
1066 | { |
1067 | struct bna *bna = rxf->rx->bna; |
1068 | |
1069 | /* Clear pending promisc mode disable */ |
1070 | if (is_promisc_disable(rxf->rxmode_pending, |
1071 | rxf->rxmode_pending_bitmask)) { |
1072 | promisc_inactive(rxf->rxmode_pending, |
1073 | rxf->rxmode_pending_bitmask); |
1074 | rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; |
1075 | bna->promisc_rid = BFI_INVALID_RID; |
1076 | if (cleanup == BNA_HARD_CLEANUP) { |
1077 | bna_bfi_rx_promisc_req(rxf, status: BNA_STATUS_T_DISABLED); |
1078 | return 1; |
1079 | } |
1080 | } |
1081 | |
1082 | /* Move promisc mode config from active -> pending */ |
1083 | if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { |
1084 | promisc_enable(rxf->rxmode_pending, |
1085 | rxf->rxmode_pending_bitmask); |
1086 | rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; |
1087 | if (cleanup == BNA_HARD_CLEANUP) { |
1088 | bna_bfi_rx_promisc_req(rxf, status: BNA_STATUS_T_DISABLED); |
1089 | return 1; |
1090 | } |
1091 | } |
1092 | |
1093 | return 0; |
1094 | } |
1095 | |
1096 | static int |
1097 | bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf) |
1098 | { |
1099 | /* Enable/disable allmulti mode */ |
1100 | if (is_allmulti_enable(rxf->rxmode_pending, |
1101 | rxf->rxmode_pending_bitmask)) { |
1102 | /* move allmulti configuration from pending -> active */ |
1103 | allmulti_inactive(rxf->rxmode_pending, |
1104 | rxf->rxmode_pending_bitmask); |
1105 | rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; |
1106 | bna_bfi_mcast_filter_req(rxf, status: BNA_STATUS_T_DISABLED); |
1107 | return 1; |
1108 | } else if (is_allmulti_disable(rxf->rxmode_pending, |
1109 | rxf->rxmode_pending_bitmask)) { |
1110 | /* move allmulti configuration from pending -> active */ |
1111 | allmulti_inactive(rxf->rxmode_pending, |
1112 | rxf->rxmode_pending_bitmask); |
1113 | rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; |
1114 | bna_bfi_mcast_filter_req(rxf, status: BNA_STATUS_T_ENABLED); |
1115 | return 1; |
1116 | } |
1117 | |
1118 | return 0; |
1119 | } |
1120 | |
1121 | static int |
1122 | bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) |
1123 | { |
1124 | /* Clear pending allmulti mode disable */ |
1125 | if (is_allmulti_disable(rxf->rxmode_pending, |
1126 | rxf->rxmode_pending_bitmask)) { |
1127 | allmulti_inactive(rxf->rxmode_pending, |
1128 | rxf->rxmode_pending_bitmask); |
1129 | rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; |
1130 | if (cleanup == BNA_HARD_CLEANUP) { |
1131 | bna_bfi_mcast_filter_req(rxf, status: BNA_STATUS_T_ENABLED); |
1132 | return 1; |
1133 | } |
1134 | } |
1135 | |
1136 | /* Move allmulti mode config from active -> pending */ |
1137 | if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { |
1138 | allmulti_enable(rxf->rxmode_pending, |
1139 | rxf->rxmode_pending_bitmask); |
1140 | rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; |
1141 | if (cleanup == BNA_HARD_CLEANUP) { |
1142 | bna_bfi_mcast_filter_req(rxf, status: BNA_STATUS_T_ENABLED); |
1143 | return 1; |
1144 | } |
1145 | } |
1146 | |
1147 | return 0; |
1148 | } |
1149 | |
1150 | static int |
1151 | bna_rxf_promisc_enable(struct bna_rxf *rxf) |
1152 | { |
1153 | struct bna *bna = rxf->rx->bna; |
1154 | int ret = 0; |
1155 | |
1156 | if (is_promisc_enable(rxf->rxmode_pending, |
1157 | rxf->rxmode_pending_bitmask) || |
1158 | (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { |
1159 | /* Do nothing if pending enable or already enabled */ |
1160 | } else if (is_promisc_disable(rxf->rxmode_pending, |
1161 | rxf->rxmode_pending_bitmask)) { |
1162 | /* Turn off pending disable command */ |
1163 | promisc_inactive(rxf->rxmode_pending, |
1164 | rxf->rxmode_pending_bitmask); |
1165 | } else { |
1166 | /* Schedule enable */ |
1167 | promisc_enable(rxf->rxmode_pending, |
1168 | rxf->rxmode_pending_bitmask); |
1169 | bna->promisc_rid = rxf->rx->rid; |
1170 | ret = 1; |
1171 | } |
1172 | |
1173 | return ret; |
1174 | } |
1175 | |
1176 | static int |
1177 | bna_rxf_promisc_disable(struct bna_rxf *rxf) |
1178 | { |
1179 | struct bna *bna = rxf->rx->bna; |
1180 | int ret = 0; |
1181 | |
1182 | if (is_promisc_disable(rxf->rxmode_pending, |
1183 | rxf->rxmode_pending_bitmask) || |
1184 | (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) { |
1185 | /* Do nothing if pending disable or already disabled */ |
1186 | } else if (is_promisc_enable(rxf->rxmode_pending, |
1187 | rxf->rxmode_pending_bitmask)) { |
1188 | /* Turn off pending enable command */ |
1189 | promisc_inactive(rxf->rxmode_pending, |
1190 | rxf->rxmode_pending_bitmask); |
1191 | bna->promisc_rid = BFI_INVALID_RID; |
1192 | } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { |
1193 | /* Schedule disable */ |
1194 | promisc_disable(rxf->rxmode_pending, |
1195 | rxf->rxmode_pending_bitmask); |
1196 | ret = 1; |
1197 | } |
1198 | |
1199 | return ret; |
1200 | } |
1201 | |
1202 | static int |
1203 | bna_rxf_allmulti_enable(struct bna_rxf *rxf) |
1204 | { |
1205 | int ret = 0; |
1206 | |
1207 | if (is_allmulti_enable(rxf->rxmode_pending, |
1208 | rxf->rxmode_pending_bitmask) || |
1209 | (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { |
1210 | /* Do nothing if pending enable or already enabled */ |
1211 | } else if (is_allmulti_disable(rxf->rxmode_pending, |
1212 | rxf->rxmode_pending_bitmask)) { |
1213 | /* Turn off pending disable command */ |
1214 | allmulti_inactive(rxf->rxmode_pending, |
1215 | rxf->rxmode_pending_bitmask); |
1216 | } else { |
1217 | /* Schedule enable */ |
1218 | allmulti_enable(rxf->rxmode_pending, |
1219 | rxf->rxmode_pending_bitmask); |
1220 | ret = 1; |
1221 | } |
1222 | |
1223 | return ret; |
1224 | } |
1225 | |
1226 | static int |
1227 | bna_rxf_allmulti_disable(struct bna_rxf *rxf) |
1228 | { |
1229 | int ret = 0; |
1230 | |
1231 | if (is_allmulti_disable(rxf->rxmode_pending, |
1232 | rxf->rxmode_pending_bitmask) || |
1233 | (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) { |
1234 | /* Do nothing if pending disable or already disabled */ |
1235 | } else if (is_allmulti_enable(rxf->rxmode_pending, |
1236 | rxf->rxmode_pending_bitmask)) { |
1237 | /* Turn off pending enable command */ |
1238 | allmulti_inactive(rxf->rxmode_pending, |
1239 | rxf->rxmode_pending_bitmask); |
1240 | } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { |
1241 | /* Schedule disable */ |
1242 | allmulti_disable(rxf->rxmode_pending, |
1243 | rxf->rxmode_pending_bitmask); |
1244 | ret = 1; |
1245 | } |
1246 | |
1247 | return ret; |
1248 | } |
1249 | |
1250 | static int |
1251 | bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf) |
1252 | { |
1253 | if (rxf->vlan_strip_pending) { |
1254 | rxf->vlan_strip_pending = false; |
1255 | bna_bfi_vlan_strip_enable(rxf); |
1256 | return 1; |
1257 | } |
1258 | |
1259 | return 0; |
1260 | } |
1261 | |
1262 | /* RX */ |
1263 | |
1264 | #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ |
1265 | (qcfg)->num_paths : ((qcfg)->num_paths * 2)) |
1266 | |
1267 | #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\ |
1268 | (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) |
1269 | |
1270 | #define call_rx_stop_cbfn(rx) \ |
1271 | do { \ |
1272 | if ((rx)->stop_cbfn) { \ |
1273 | void (*cbfn)(void *, struct bna_rx *); \ |
1274 | void *cbarg; \ |
1275 | cbfn = (rx)->stop_cbfn; \ |
1276 | cbarg = (rx)->stop_cbarg; \ |
1277 | (rx)->stop_cbfn = NULL; \ |
1278 | (rx)->stop_cbarg = NULL; \ |
1279 | cbfn(cbarg, rx); \ |
1280 | } \ |
1281 | } while (0) |
1282 | |
1283 | #define call_rx_stall_cbfn(rx) \ |
1284 | do { \ |
1285 | if ((rx)->rx_stall_cbfn) \ |
1286 | (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \ |
1287 | } while (0) |
1288 | |
1289 | #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \ |
1290 | do { \ |
1291 | struct bna_dma_addr cur_q_addr = \ |
1292 | *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \ |
1293 | (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \ |
1294 | (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \ |
1295 | (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \ |
1296 | (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \ |
1297 | (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \ |
1298 | (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\ |
1299 | } while (0) |
1300 | |
1301 | static void bna_bfi_rx_enet_start(struct bna_rx *rx); |
1302 | static void bna_rx_enet_stop(struct bna_rx *rx); |
1303 | static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx); |
1304 | |
1305 | bfa_fsm_state_decl(bna_rx, stopped, |
1306 | struct bna_rx, enum bna_rx_event); |
1307 | bfa_fsm_state_decl(bna_rx, start_wait, |
1308 | struct bna_rx, enum bna_rx_event); |
1309 | bfa_fsm_state_decl(bna_rx, start_stop_wait, |
1310 | struct bna_rx, enum bna_rx_event); |
1311 | bfa_fsm_state_decl(bna_rx, rxf_start_wait, |
1312 | struct bna_rx, enum bna_rx_event); |
1313 | bfa_fsm_state_decl(bna_rx, started, |
1314 | struct bna_rx, enum bna_rx_event); |
1315 | bfa_fsm_state_decl(bna_rx, rxf_stop_wait, |
1316 | struct bna_rx, enum bna_rx_event); |
1317 | bfa_fsm_state_decl(bna_rx, stop_wait, |
1318 | struct bna_rx, enum bna_rx_event); |
1319 | bfa_fsm_state_decl(bna_rx, cleanup_wait, |
1320 | struct bna_rx, enum bna_rx_event); |
1321 | bfa_fsm_state_decl(bna_rx, failed, |
1322 | struct bna_rx, enum bna_rx_event); |
1323 | bfa_fsm_state_decl(bna_rx, quiesce_wait, |
1324 | struct bna_rx, enum bna_rx_event); |
1325 | |
1326 | static void bna_rx_sm_stopped_entry(struct bna_rx *rx) |
1327 | { |
1328 | call_rx_stop_cbfn(rx); |
1329 | } |
1330 | |
1331 | static void bna_rx_sm_stopped(struct bna_rx *rx, |
1332 | enum bna_rx_event event) |
1333 | { |
1334 | switch (event) { |
1335 | case RX_E_START: |
1336 | bfa_fsm_set_state(rx, bna_rx_sm_start_wait); |
1337 | break; |
1338 | |
1339 | case RX_E_STOP: |
1340 | call_rx_stop_cbfn(rx); |
1341 | break; |
1342 | |
1343 | case RX_E_FAIL: |
1344 | /* no-op */ |
1345 | break; |
1346 | |
1347 | default: |
1348 | bfa_sm_fault(event); |
1349 | break; |
1350 | } |
1351 | } |
1352 | |
1353 | static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) |
1354 | { |
1355 | bna_bfi_rx_enet_start(rx); |
1356 | } |
1357 | |
1358 | static void |
1359 | bna_rx_sm_stop_wait_entry(struct bna_rx *rx) |
1360 | { |
1361 | } |
1362 | |
1363 | static void |
1364 | bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event) |
1365 | { |
1366 | switch (event) { |
1367 | case RX_E_FAIL: |
1368 | case RX_E_STOPPED: |
1369 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); |
1370 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); |
1371 | break; |
1372 | |
1373 | case RX_E_STARTED: |
1374 | bna_rx_enet_stop(rx); |
1375 | break; |
1376 | |
1377 | default: |
1378 | bfa_sm_fault(event); |
1379 | break; |
1380 | } |
1381 | } |
1382 | |
1383 | static void bna_rx_sm_start_wait(struct bna_rx *rx, |
1384 | enum bna_rx_event event) |
1385 | { |
1386 | switch (event) { |
1387 | case RX_E_STOP: |
1388 | bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait); |
1389 | break; |
1390 | |
1391 | case RX_E_FAIL: |
1392 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); |
1393 | break; |
1394 | |
1395 | case RX_E_STARTED: |
1396 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); |
1397 | break; |
1398 | |
1399 | default: |
1400 | bfa_sm_fault(event); |
1401 | break; |
1402 | } |
1403 | } |
1404 | |
1405 | static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) |
1406 | { |
1407 | rx->rx_post_cbfn(rx->bna->bnad, rx); |
1408 | bna_rxf_start(rxf: &rx->rxf); |
1409 | } |
1410 | |
1411 | static void |
1412 | bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) |
1413 | { |
1414 | } |
1415 | |
1416 | static void |
1417 | bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) |
1418 | { |
1419 | switch (event) { |
1420 | case RX_E_FAIL: |
1421 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); |
1422 | bna_rxf_fail(rxf: &rx->rxf); |
1423 | call_rx_stall_cbfn(rx); |
1424 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); |
1425 | break; |
1426 | |
1427 | case RX_E_RXF_STARTED: |
1428 | bna_rxf_stop(rxf: &rx->rxf); |
1429 | break; |
1430 | |
1431 | case RX_E_RXF_STOPPED: |
1432 | bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); |
1433 | call_rx_stall_cbfn(rx); |
1434 | bna_rx_enet_stop(rx); |
1435 | break; |
1436 | |
1437 | default: |
1438 | bfa_sm_fault(event); |
1439 | break; |
1440 | } |
1441 | |
1442 | } |
1443 | |
1444 | static void |
1445 | bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx) |
1446 | { |
1447 | } |
1448 | |
1449 | static void |
1450 | bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event) |
1451 | { |
1452 | switch (event) { |
1453 | case RX_E_FAIL: |
1454 | case RX_E_STOPPED: |
1455 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); |
1456 | break; |
1457 | |
1458 | case RX_E_STARTED: |
1459 | bna_rx_enet_stop(rx); |
1460 | break; |
1461 | |
1462 | default: |
1463 | bfa_sm_fault(event); |
1464 | } |
1465 | } |
1466 | |
1467 | static void |
1468 | bna_rx_sm_started_entry(struct bna_rx *rx) |
1469 | { |
1470 | struct bna_rxp *rxp; |
1471 | int is_regular = (rx->type == BNA_RX_T_REGULAR); |
1472 | |
1473 | /* Start IB */ |
1474 | list_for_each_entry(rxp, &rx->rxp_q, qe) |
1475 | bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); |
1476 | |
1477 | bna_ethport_cb_rx_started(ethport: &rx->bna->ethport); |
1478 | } |
1479 | |
1480 | static void |
1481 | bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) |
1482 | { |
1483 | switch (event) { |
1484 | case RX_E_STOP: |
1485 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); |
1486 | bna_ethport_cb_rx_stopped(ethport: &rx->bna->ethport); |
1487 | bna_rxf_stop(rxf: &rx->rxf); |
1488 | break; |
1489 | |
1490 | case RX_E_FAIL: |
1491 | bfa_fsm_set_state(rx, bna_rx_sm_failed); |
1492 | bna_ethport_cb_rx_stopped(ethport: &rx->bna->ethport); |
1493 | bna_rxf_fail(rxf: &rx->rxf); |
1494 | call_rx_stall_cbfn(rx); |
1495 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); |
1496 | break; |
1497 | |
1498 | default: |
1499 | bfa_sm_fault(event); |
1500 | break; |
1501 | } |
1502 | } |
1503 | |
1504 | static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, |
1505 | enum bna_rx_event event) |
1506 | { |
1507 | switch (event) { |
1508 | case RX_E_STOP: |
1509 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); |
1510 | break; |
1511 | |
1512 | case RX_E_FAIL: |
1513 | bfa_fsm_set_state(rx, bna_rx_sm_failed); |
1514 | bna_rxf_fail(rxf: &rx->rxf); |
1515 | call_rx_stall_cbfn(rx); |
1516 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); |
1517 | break; |
1518 | |
1519 | case RX_E_RXF_STARTED: |
1520 | bfa_fsm_set_state(rx, bna_rx_sm_started); |
1521 | break; |
1522 | |
1523 | default: |
1524 | bfa_sm_fault(event); |
1525 | break; |
1526 | } |
1527 | } |
1528 | |
1529 | static void |
1530 | bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) |
1531 | { |
1532 | } |
1533 | |
1534 | static void |
1535 | bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) |
1536 | { |
1537 | switch (event) { |
1538 | case RX_E_FAIL: |
1539 | case RX_E_RXF_STOPPED: |
1540 | /* No-op */ |
1541 | break; |
1542 | |
1543 | case RX_E_CLEANUP_DONE: |
1544 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); |
1545 | break; |
1546 | |
1547 | default: |
1548 | bfa_sm_fault(event); |
1549 | break; |
1550 | } |
1551 | } |
1552 | |
1553 | static void |
1554 | bna_rx_sm_failed_entry(struct bna_rx *rx) |
1555 | { |
1556 | } |
1557 | |
1558 | static void |
1559 | bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event) |
1560 | { |
1561 | switch (event) { |
1562 | case RX_E_START: |
1563 | bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait); |
1564 | break; |
1565 | |
1566 | case RX_E_STOP: |
1567 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); |
1568 | break; |
1569 | |
1570 | case RX_E_FAIL: |
1571 | case RX_E_RXF_STARTED: |
1572 | case RX_E_RXF_STOPPED: |
1573 | /* No-op */ |
1574 | break; |
1575 | |
1576 | case RX_E_CLEANUP_DONE: |
1577 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); |
1578 | break; |
1579 | |
1580 | default: |
1581 | bfa_sm_fault(event); |
1582 | break; |
1583 | } } |
1584 | |
1585 | static void |
1586 | bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx) |
1587 | { |
1588 | } |
1589 | |
1590 | static void |
1591 | bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event) |
1592 | { |
1593 | switch (event) { |
1594 | case RX_E_STOP: |
1595 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); |
1596 | break; |
1597 | |
1598 | case RX_E_FAIL: |
1599 | bfa_fsm_set_state(rx, bna_rx_sm_failed); |
1600 | break; |
1601 | |
1602 | case RX_E_CLEANUP_DONE: |
1603 | bfa_fsm_set_state(rx, bna_rx_sm_start_wait); |
1604 | break; |
1605 | |
1606 | default: |
1607 | bfa_sm_fault(event); |
1608 | break; |
1609 | } |
1610 | } |
1611 | |
1612 | static void |
1613 | bna_bfi_rx_enet_start(struct bna_rx *rx) |
1614 | { |
1615 | struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req; |
1616 | struct bna_rxp *rxp = NULL; |
1617 | struct bna_rxq *q0 = NULL, *q1 = NULL; |
1618 | int i; |
1619 | |
1620 | bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, |
1621 | BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid); |
1622 | cfg_req->mh.num_entries = htons( |
1623 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); |
1624 | |
1625 | cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(enet: &rx->bna->enet); |
1626 | cfg_req->num_queue_sets = rx->num_paths; |
1627 | for (i = 0; i < rx->num_paths; i++) { |
1628 | rxp = rxp ? list_next_entry(rxp, qe) |
1629 | : list_first_entry(&rx->rxp_q, struct bna_rxp, qe); |
1630 | GET_RXQS(rxp, q0, q1); |
1631 | switch (rxp->type) { |
1632 | case BNA_RXP_SLR: |
1633 | case BNA_RXP_HDS: |
1634 | /* Small RxQ */ |
1635 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, |
1636 | &q1->qpt); |
1637 | cfg_req->q_cfg[i].qs.rx_buffer_size = |
1638 | htons((u16)q1->buffer_size); |
1639 | fallthrough; |
1640 | |
1641 | case BNA_RXP_SINGLE: |
1642 | /* Large/Single RxQ */ |
1643 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, |
1644 | &q0->qpt); |
1645 | if (q0->multi_buffer) |
1646 | /* multi-buffer is enabled by allocating |
1647 | * a new rx with new set of resources. |
1648 | * q0->buffer_size should be initialized to |
1649 | * fragment size. |
1650 | */ |
1651 | cfg_req->rx_cfg.multi_buffer = |
1652 | BNA_STATUS_T_ENABLED; |
1653 | else |
1654 | q0->buffer_size = |
1655 | bna_enet_mtu_get(enet: &rx->bna->enet); |
1656 | cfg_req->q_cfg[i].ql.rx_buffer_size = |
1657 | htons((u16)q0->buffer_size); |
1658 | break; |
1659 | |
1660 | default: |
1661 | BUG_ON(1); |
1662 | } |
1663 | |
1664 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, |
1665 | &rxp->cq.qpt); |
1666 | |
1667 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = |
1668 | rxp->cq.ib.ib_seg_host_addr.lsb; |
1669 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = |
1670 | rxp->cq.ib.ib_seg_host_addr.msb; |
1671 | cfg_req->q_cfg[i].ib.intr.msix_index = |
1672 | htons((u16)rxp->cq.ib.intr_vector); |
1673 | } |
1674 | |
1675 | cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED; |
1676 | cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; |
1677 | cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; |
1678 | cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED; |
1679 | cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) |
1680 | ? BNA_STATUS_T_ENABLED : |
1681 | BNA_STATUS_T_DISABLED; |
1682 | cfg_req->ib_cfg.coalescing_timeout = |
1683 | htonl((u32)rxp->cq.ib.coalescing_timeo); |
1684 | cfg_req->ib_cfg.inter_pkt_timeout = |
1685 | htonl((u32)rxp->cq.ib.interpkt_timeo); |
1686 | cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; |
1687 | |
1688 | switch (rxp->type) { |
1689 | case BNA_RXP_SLR: |
1690 | cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL; |
1691 | break; |
1692 | |
1693 | case BNA_RXP_HDS: |
1694 | cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS; |
1695 | cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type; |
1696 | cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset; |
1697 | cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset; |
1698 | break; |
1699 | |
1700 | case BNA_RXP_SINGLE: |
1701 | cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE; |
1702 | break; |
1703 | |
1704 | default: |
1705 | BUG_ON(1); |
1706 | } |
1707 | cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status; |
1708 | |
1709 | bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, |
1710 | sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh); |
1711 | bfa_msgq_cmd_post(msgq: &rx->bna->msgq, cmd: &rx->msgq_cmd); |
1712 | } |
1713 | |
1714 | static void |
1715 | bna_bfi_rx_enet_stop(struct bna_rx *rx) |
1716 | { |
1717 | struct bfi_enet_req *req = &rx->bfi_enet_cmd.req; |
1718 | |
1719 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, |
1720 | BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid); |
1721 | req->mh.num_entries = htons( |
1722 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); |
1723 | bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), |
1724 | &req->mh); |
1725 | bfa_msgq_cmd_post(msgq: &rx->bna->msgq, cmd: &rx->msgq_cmd); |
1726 | } |
1727 | |
1728 | static void |
1729 | bna_rx_enet_stop(struct bna_rx *rx) |
1730 | { |
1731 | struct bna_rxp *rxp; |
1732 | |
1733 | /* Stop IB */ |
1734 | list_for_each_entry(rxp, &rx->rxp_q, qe) |
1735 | bna_ib_stop(rx->bna, &rxp->cq.ib); |
1736 | |
1737 | bna_bfi_rx_enet_stop(rx); |
1738 | } |
1739 | |
1740 | static int |
1741 | bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg) |
1742 | { |
1743 | if ((rx_mod->rx_free_count == 0) || |
1744 | (rx_mod->rxp_free_count == 0) || |
1745 | (rx_mod->rxq_free_count == 0)) |
1746 | return 0; |
1747 | |
1748 | if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { |
1749 | if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || |
1750 | (rx_mod->rxq_free_count < rx_cfg->num_paths)) |
1751 | return 0; |
1752 | } else { |
1753 | if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || |
1754 | (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) |
1755 | return 0; |
1756 | } |
1757 | |
1758 | return 1; |
1759 | } |
1760 | |
1761 | static struct bna_rxq * |
1762 | bna_rxq_get(struct bna_rx_mod *rx_mod) |
1763 | { |
1764 | struct bna_rxq *rxq = NULL; |
1765 | |
1766 | rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe); |
1767 | list_del(entry: &rxq->qe); |
1768 | rx_mod->rxq_free_count--; |
1769 | |
1770 | return rxq; |
1771 | } |
1772 | |
1773 | static void |
1774 | bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) |
1775 | { |
1776 | list_add_tail(new: &rxq->qe, head: &rx_mod->rxq_free_q); |
1777 | rx_mod->rxq_free_count++; |
1778 | } |
1779 | |
1780 | static struct bna_rxp * |
1781 | bna_rxp_get(struct bna_rx_mod *rx_mod) |
1782 | { |
1783 | struct bna_rxp *rxp = NULL; |
1784 | |
1785 | rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe); |
1786 | list_del(entry: &rxp->qe); |
1787 | rx_mod->rxp_free_count--; |
1788 | |
1789 | return rxp; |
1790 | } |
1791 | |
1792 | static void |
1793 | bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) |
1794 | { |
1795 | list_add_tail(new: &rxp->qe, head: &rx_mod->rxp_free_q); |
1796 | rx_mod->rxp_free_count++; |
1797 | } |
1798 | |
1799 | static struct bna_rx * |
1800 | bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type) |
1801 | { |
1802 | struct bna_rx *rx = NULL; |
1803 | |
1804 | BUG_ON(list_empty(&rx_mod->rx_free_q)); |
1805 | if (type == BNA_RX_T_REGULAR) |
1806 | rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe); |
1807 | else |
1808 | rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe); |
1809 | |
1810 | rx_mod->rx_free_count--; |
1811 | list_move_tail(list: &rx->qe, head: &rx_mod->rx_active_q); |
1812 | rx->type = type; |
1813 | |
1814 | return rx; |
1815 | } |
1816 | |
1817 | static void |
1818 | bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx) |
1819 | { |
1820 | struct list_head *qe; |
1821 | |
1822 | list_for_each_prev(qe, &rx_mod->rx_free_q) |
1823 | if (((struct bna_rx *)qe)->rid < rx->rid) |
1824 | break; |
1825 | |
1826 | list_add(new: &rx->qe, head: qe); |
1827 | rx_mod->rx_free_count++; |
1828 | } |
1829 | |
1830 | static void |
1831 | bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0, |
1832 | struct bna_rxq *q1) |
1833 | { |
1834 | switch (rxp->type) { |
1835 | case BNA_RXP_SINGLE: |
1836 | rxp->rxq.single.only = q0; |
1837 | rxp->rxq.single.reserved = NULL; |
1838 | break; |
1839 | case BNA_RXP_SLR: |
1840 | rxp->rxq.slr.large = q0; |
1841 | rxp->rxq.slr.small = q1; |
1842 | break; |
1843 | case BNA_RXP_HDS: |
1844 | rxp->rxq.hds.data = q0; |
1845 | rxp->rxq.hds.hdr = q1; |
1846 | break; |
1847 | default: |
1848 | break; |
1849 | } |
1850 | } |
1851 | |
1852 | static void |
1853 | bna_rxq_qpt_setup(struct bna_rxq *rxq, |
1854 | struct bna_rxp *rxp, |
1855 | u32 page_count, |
1856 | u32 page_size, |
1857 | struct bna_mem_descr *qpt_mem, |
1858 | struct bna_mem_descr *swqpt_mem, |
1859 | struct bna_mem_descr *page_mem) |
1860 | { |
1861 | u8 *kva; |
1862 | u64 dma; |
1863 | struct bna_dma_addr bna_dma; |
1864 | int i; |
1865 | |
1866 | rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; |
1867 | rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; |
1868 | rxq->qpt.kv_qpt_ptr = qpt_mem->kva; |
1869 | rxq->qpt.page_count = page_count; |
1870 | rxq->qpt.page_size = page_size; |
1871 | |
1872 | rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; |
1873 | rxq->rcb->sw_q = page_mem->kva; |
1874 | |
1875 | kva = page_mem->kva; |
1876 | BNA_GET_DMA_ADDR(&page_mem->dma, dma); |
1877 | |
1878 | for (i = 0; i < rxq->qpt.page_count; i++) { |
1879 | rxq->rcb->sw_qpt[i] = kva; |
1880 | kva += PAGE_SIZE; |
1881 | |
1882 | BNA_SET_DMA_ADDR(dma, &bna_dma); |
1883 | ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = |
1884 | bna_dma.lsb; |
1885 | ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = |
1886 | bna_dma.msb; |
1887 | dma += PAGE_SIZE; |
1888 | } |
1889 | } |
1890 | |
1891 | static void |
1892 | bna_rxp_cqpt_setup(struct bna_rxp *rxp, |
1893 | u32 page_count, |
1894 | u32 page_size, |
1895 | struct bna_mem_descr *qpt_mem, |
1896 | struct bna_mem_descr *swqpt_mem, |
1897 | struct bna_mem_descr *page_mem) |
1898 | { |
1899 | u8 *kva; |
1900 | u64 dma; |
1901 | struct bna_dma_addr bna_dma; |
1902 | int i; |
1903 | |
1904 | rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; |
1905 | rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; |
1906 | rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; |
1907 | rxp->cq.qpt.page_count = page_count; |
1908 | rxp->cq.qpt.page_size = page_size; |
1909 | |
1910 | rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; |
1911 | rxp->cq.ccb->sw_q = page_mem->kva; |
1912 | |
1913 | kva = page_mem->kva; |
1914 | BNA_GET_DMA_ADDR(&page_mem->dma, dma); |
1915 | |
1916 | for (i = 0; i < rxp->cq.qpt.page_count; i++) { |
1917 | rxp->cq.ccb->sw_qpt[i] = kva; |
1918 | kva += PAGE_SIZE; |
1919 | |
1920 | BNA_SET_DMA_ADDR(dma, &bna_dma); |
1921 | ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = |
1922 | bna_dma.lsb; |
1923 | ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = |
1924 | bna_dma.msb; |
1925 | dma += PAGE_SIZE; |
1926 | } |
1927 | } |
1928 | |
1929 | static void |
1930 | bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx) |
1931 | { |
1932 | struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; |
1933 | |
1934 | bfa_wc_down(wc: &rx_mod->rx_stop_wc); |
1935 | } |
1936 | |
1937 | static void |
1938 | bna_rx_mod_cb_rx_stopped_all(void *arg) |
1939 | { |
1940 | struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; |
1941 | |
1942 | if (rx_mod->stop_cbfn) |
1943 | rx_mod->stop_cbfn(&rx_mod->bna->enet); |
1944 | rx_mod->stop_cbfn = NULL; |
1945 | } |
1946 | |
1947 | static void |
1948 | bna_rx_start(struct bna_rx *rx) |
1949 | { |
1950 | rx->rx_flags |= BNA_RX_F_ENET_STARTED; |
1951 | if (rx->rx_flags & BNA_RX_F_ENABLED) |
1952 | bfa_fsm_send_event(rx, RX_E_START); |
1953 | } |
1954 | |
1955 | static void |
1956 | bna_rx_stop(struct bna_rx *rx) |
1957 | { |
1958 | rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; |
1959 | if (rx->fsm == bna_rx_sm_stopped) |
1960 | bna_rx_mod_cb_rx_stopped(arg: &rx->bna->rx_mod, rx); |
1961 | else { |
1962 | rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; |
1963 | rx->stop_cbarg = &rx->bna->rx_mod; |
1964 | bfa_fsm_send_event(rx, RX_E_STOP); |
1965 | } |
1966 | } |
1967 | |
1968 | static void |
1969 | bna_rx_fail(struct bna_rx *rx) |
1970 | { |
1971 | /* Indicate Enet is not enabled, and failed */ |
1972 | rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; |
1973 | bfa_fsm_send_event(rx, RX_E_FAIL); |
1974 | } |
1975 | |
1976 | void |
1977 | bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) |
1978 | { |
1979 | struct bna_rx *rx; |
1980 | |
1981 | rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED; |
1982 | if (type == BNA_RX_T_LOOPBACK) |
1983 | rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK; |
1984 | |
1985 | list_for_each_entry(rx, &rx_mod->rx_active_q, qe) |
1986 | if (rx->type == type) |
1987 | bna_rx_start(rx); |
1988 | } |
1989 | |
1990 | void |
1991 | bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type) |
1992 | { |
1993 | struct bna_rx *rx; |
1994 | |
1995 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; |
1996 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; |
1997 | |
1998 | rx_mod->stop_cbfn = bna_enet_cb_rx_stopped; |
1999 | |
2000 | bfa_wc_init(wc: &rx_mod->rx_stop_wc, wc_resume: bna_rx_mod_cb_rx_stopped_all, wc_cbarg: rx_mod); |
2001 | |
2002 | list_for_each_entry(rx, &rx_mod->rx_active_q, qe) |
2003 | if (rx->type == type) { |
2004 | bfa_wc_up(wc: &rx_mod->rx_stop_wc); |
2005 | bna_rx_stop(rx); |
2006 | } |
2007 | |
2008 | bfa_wc_wait(wc: &rx_mod->rx_stop_wc); |
2009 | } |
2010 | |
2011 | void |
2012 | bna_rx_mod_fail(struct bna_rx_mod *rx_mod) |
2013 | { |
2014 | struct bna_rx *rx; |
2015 | |
2016 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; |
2017 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; |
2018 | |
2019 | list_for_each_entry(rx, &rx_mod->rx_active_q, qe) |
2020 | bna_rx_fail(rx); |
2021 | } |
2022 | |
2023 | void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, |
2024 | struct bna_res_info *res_info) |
2025 | { |
2026 | int index; |
2027 | struct bna_rx *rx_ptr; |
2028 | struct bna_rxp *rxp_ptr; |
2029 | struct bna_rxq *rxq_ptr; |
2030 | |
2031 | rx_mod->bna = bna; |
2032 | rx_mod->flags = 0; |
2033 | |
2034 | rx_mod->rx = (struct bna_rx *) |
2035 | res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva; |
2036 | rx_mod->rxp = (struct bna_rxp *) |
2037 | res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva; |
2038 | rx_mod->rxq = (struct bna_rxq *) |
2039 | res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva; |
2040 | |
2041 | /* Initialize the queues */ |
2042 | INIT_LIST_HEAD(list: &rx_mod->rx_free_q); |
2043 | rx_mod->rx_free_count = 0; |
2044 | INIT_LIST_HEAD(list: &rx_mod->rxq_free_q); |
2045 | rx_mod->rxq_free_count = 0; |
2046 | INIT_LIST_HEAD(list: &rx_mod->rxp_free_q); |
2047 | rx_mod->rxp_free_count = 0; |
2048 | INIT_LIST_HEAD(list: &rx_mod->rx_active_q); |
2049 | |
2050 | /* Build RX queues */ |
2051 | for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { |
2052 | rx_ptr = &rx_mod->rx[index]; |
2053 | |
2054 | INIT_LIST_HEAD(list: &rx_ptr->rxp_q); |
2055 | rx_ptr->bna = NULL; |
2056 | rx_ptr->rid = index; |
2057 | rx_ptr->stop_cbfn = NULL; |
2058 | rx_ptr->stop_cbarg = NULL; |
2059 | |
2060 | list_add_tail(new: &rx_ptr->qe, head: &rx_mod->rx_free_q); |
2061 | rx_mod->rx_free_count++; |
2062 | } |
2063 | |
2064 | /* build RX-path queue */ |
2065 | for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { |
2066 | rxp_ptr = &rx_mod->rxp[index]; |
2067 | list_add_tail(new: &rxp_ptr->qe, head: &rx_mod->rxp_free_q); |
2068 | rx_mod->rxp_free_count++; |
2069 | } |
2070 | |
2071 | /* build RXQ queue */ |
2072 | for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) { |
2073 | rxq_ptr = &rx_mod->rxq[index]; |
2074 | list_add_tail(new: &rxq_ptr->qe, head: &rx_mod->rxq_free_q); |
2075 | rx_mod->rxq_free_count++; |
2076 | } |
2077 | } |
2078 | |
2079 | void |
2080 | bna_rx_mod_uninit(struct bna_rx_mod *rx_mod) |
2081 | { |
2082 | rx_mod->bna = NULL; |
2083 | } |
2084 | |
2085 | void |
2086 | bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) |
2087 | { |
2088 | struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp; |
2089 | struct bna_rxp *rxp = NULL; |
2090 | struct bna_rxq *q0 = NULL, *q1 = NULL; |
2091 | int i; |
2092 | |
2093 | bfa_msgq_rsp_copy(msgq: &rx->bna->msgq, buf: (u8 *)cfg_rsp, |
2094 | buf_len: sizeof(struct bfi_enet_rx_cfg_rsp)); |
2095 | |
2096 | rx->hw_id = cfg_rsp->hw_id; |
2097 | |
2098 | for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe); |
2099 | i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) { |
2100 | GET_RXQS(rxp, q0, q1); |
2101 | |
2102 | /* Setup doorbells */ |
2103 | rxp->cq.ccb->i_dbell->doorbell_addr = |
2104 | rx->bna->pcidev.pci_bar_kva |
2105 | + ntohl(cfg_rsp->q_handles[i].i_dbell); |
2106 | rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid; |
2107 | q0->rcb->q_dbell = |
2108 | rx->bna->pcidev.pci_bar_kva |
2109 | + ntohl(cfg_rsp->q_handles[i].ql_dbell); |
2110 | q0->hw_id = cfg_rsp->q_handles[i].hw_lqid; |
2111 | if (q1) { |
2112 | q1->rcb->q_dbell = |
2113 | rx->bna->pcidev.pci_bar_kva |
2114 | + ntohl(cfg_rsp->q_handles[i].qs_dbell); |
2115 | q1->hw_id = cfg_rsp->q_handles[i].hw_sqid; |
2116 | } |
2117 | |
2118 | /* Initialize producer/consumer indexes */ |
2119 | (*rxp->cq.ccb->hw_producer_index) = 0; |
2120 | rxp->cq.ccb->producer_index = 0; |
2121 | q0->rcb->producer_index = q0->rcb->consumer_index = 0; |
2122 | if (q1) |
2123 | q1->rcb->producer_index = q1->rcb->consumer_index = 0; |
2124 | } |
2125 | |
2126 | bfa_fsm_send_event(rx, RX_E_STARTED); |
2127 | } |
2128 | |
2129 | void |
2130 | bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) |
2131 | { |
2132 | bfa_fsm_send_event(rx, RX_E_STOPPED); |
2133 | } |
2134 | |
2135 | void |
2136 | bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) |
2137 | { |
2138 | u32 cq_size, hq_size, dq_size; |
2139 | u32 cpage_count, hpage_count, dpage_count; |
2140 | struct bna_mem_info *mem_info; |
2141 | u32 cq_depth; |
2142 | u32 hq_depth; |
2143 | u32 dq_depth; |
2144 | |
2145 | dq_depth = q_cfg->q0_depth; |
2146 | hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth); |
2147 | cq_depth = roundup_pow_of_two(dq_depth + hq_depth); |
2148 | |
2149 | cq_size = cq_depth * BFI_CQ_WI_SIZE; |
2150 | cq_size = ALIGN(cq_size, PAGE_SIZE); |
2151 | cpage_count = SIZE_TO_PAGES(cq_size); |
2152 | |
2153 | dq_depth = roundup_pow_of_two(dq_depth); |
2154 | dq_size = dq_depth * BFI_RXQ_WI_SIZE; |
2155 | dq_size = ALIGN(dq_size, PAGE_SIZE); |
2156 | dpage_count = SIZE_TO_PAGES(dq_size); |
2157 | |
2158 | if (BNA_RXP_SINGLE != q_cfg->rxp_type) { |
2159 | hq_depth = roundup_pow_of_two(hq_depth); |
2160 | hq_size = hq_depth * BFI_RXQ_WI_SIZE; |
2161 | hq_size = ALIGN(hq_size, PAGE_SIZE); |
2162 | hpage_count = SIZE_TO_PAGES(hq_size); |
2163 | } else |
2164 | hpage_count = 0; |
2165 | |
2166 | res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM; |
2167 | mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info; |
2168 | mem_info->mem_type = BNA_MEM_T_KVA; |
2169 | mem_info->len = sizeof(struct bna_ccb); |
2170 | mem_info->num = q_cfg->num_paths; |
2171 | |
2172 | res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM; |
2173 | mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info; |
2174 | mem_info->mem_type = BNA_MEM_T_KVA; |
2175 | mem_info->len = sizeof(struct bna_rcb); |
2176 | mem_info->num = BNA_GET_RXQS(q_cfg); |
2177 | |
2178 | res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM; |
2179 | mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info; |
2180 | mem_info->mem_type = BNA_MEM_T_DMA; |
2181 | mem_info->len = cpage_count * sizeof(struct bna_dma_addr); |
2182 | mem_info->num = q_cfg->num_paths; |
2183 | |
2184 | res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM; |
2185 | mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info; |
2186 | mem_info->mem_type = BNA_MEM_T_KVA; |
2187 | mem_info->len = cpage_count * sizeof(void *); |
2188 | mem_info->num = q_cfg->num_paths; |
2189 | |
2190 | res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; |
2191 | mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; |
2192 | mem_info->mem_type = BNA_MEM_T_DMA; |
2193 | mem_info->len = PAGE_SIZE * cpage_count; |
2194 | mem_info->num = q_cfg->num_paths; |
2195 | |
2196 | res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; |
2197 | mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; |
2198 | mem_info->mem_type = BNA_MEM_T_DMA; |
2199 | mem_info->len = dpage_count * sizeof(struct bna_dma_addr); |
2200 | mem_info->num = q_cfg->num_paths; |
2201 | |
2202 | res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM; |
2203 | mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info; |
2204 | mem_info->mem_type = BNA_MEM_T_KVA; |
2205 | mem_info->len = dpage_count * sizeof(void *); |
2206 | mem_info->num = q_cfg->num_paths; |
2207 | |
2208 | res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; |
2209 | mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; |
2210 | mem_info->mem_type = BNA_MEM_T_DMA; |
2211 | mem_info->len = PAGE_SIZE * dpage_count; |
2212 | mem_info->num = q_cfg->num_paths; |
2213 | |
2214 | res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; |
2215 | mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; |
2216 | mem_info->mem_type = BNA_MEM_T_DMA; |
2217 | mem_info->len = hpage_count * sizeof(struct bna_dma_addr); |
2218 | mem_info->num = (hpage_count ? q_cfg->num_paths : 0); |
2219 | |
2220 | res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM; |
2221 | mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info; |
2222 | mem_info->mem_type = BNA_MEM_T_KVA; |
2223 | mem_info->len = hpage_count * sizeof(void *); |
2224 | mem_info->num = (hpage_count ? q_cfg->num_paths : 0); |
2225 | |
2226 | res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; |
2227 | mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; |
2228 | mem_info->mem_type = BNA_MEM_T_DMA; |
2229 | mem_info->len = PAGE_SIZE * hpage_count; |
2230 | mem_info->num = (hpage_count ? q_cfg->num_paths : 0); |
2231 | |
2232 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; |
2233 | mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info; |
2234 | mem_info->mem_type = BNA_MEM_T_DMA; |
2235 | mem_info->len = BFI_IBIDX_SIZE; |
2236 | mem_info->num = q_cfg->num_paths; |
2237 | |
2238 | res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM; |
2239 | mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info; |
2240 | mem_info->mem_type = BNA_MEM_T_KVA; |
2241 | mem_info->len = BFI_ENET_RSS_RIT_MAX; |
2242 | mem_info->num = 1; |
2243 | |
2244 | res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR; |
2245 | res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; |
2246 | res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; |
2247 | } |
2248 | |
2249 | struct bna_rx * |
2250 | bna_rx_create(struct bna *bna, struct bnad *bnad, |
2251 | struct bna_rx_config *rx_cfg, |
2252 | const struct bna_rx_event_cbfn *rx_cbfn, |
2253 | struct bna_res_info *res_info, |
2254 | void *priv) |
2255 | { |
2256 | struct bna_rx_mod *rx_mod = &bna->rx_mod; |
2257 | struct bna_rx *rx; |
2258 | struct bna_rxp *rxp; |
2259 | struct bna_rxq *q0; |
2260 | struct bna_rxq *q1; |
2261 | struct bna_intr_info *intr_info; |
2262 | struct bna_mem_descr *hqunmap_mem; |
2263 | struct bna_mem_descr *dqunmap_mem; |
2264 | struct bna_mem_descr *ccb_mem; |
2265 | struct bna_mem_descr *rcb_mem; |
2266 | struct bna_mem_descr *cqpt_mem; |
2267 | struct bna_mem_descr *cswqpt_mem; |
2268 | struct bna_mem_descr *cpage_mem; |
2269 | struct bna_mem_descr *hqpt_mem; |
2270 | struct bna_mem_descr *dqpt_mem; |
2271 | struct bna_mem_descr *hsqpt_mem; |
2272 | struct bna_mem_descr *dsqpt_mem; |
2273 | struct bna_mem_descr *hpage_mem; |
2274 | struct bna_mem_descr *dpage_mem; |
2275 | u32 dpage_count, hpage_count; |
2276 | u32 hq_idx, dq_idx, rcb_idx; |
2277 | u32 cq_depth, i; |
2278 | u32 page_count; |
2279 | |
2280 | if (!bna_rx_res_check(rx_mod, rx_cfg)) |
2281 | return NULL; |
2282 | |
2283 | intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; |
2284 | ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; |
2285 | rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; |
2286 | dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0]; |
2287 | hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0]; |
2288 | cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; |
2289 | cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; |
2290 | cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; |
2291 | hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0]; |
2292 | dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0]; |
2293 | hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0]; |
2294 | dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0]; |
2295 | hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; |
2296 | dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; |
2297 | |
2298 | page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len / |
2299 | PAGE_SIZE; |
2300 | |
2301 | dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len / |
2302 | PAGE_SIZE; |
2303 | |
2304 | hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len / |
2305 | PAGE_SIZE; |
2306 | |
2307 | rx = bna_rx_get(rx_mod, type: rx_cfg->rx_type); |
2308 | rx->bna = bna; |
2309 | rx->rx_flags = 0; |
2310 | INIT_LIST_HEAD(list: &rx->rxp_q); |
2311 | rx->stop_cbfn = NULL; |
2312 | rx->stop_cbarg = NULL; |
2313 | rx->priv = priv; |
2314 | |
2315 | rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; |
2316 | rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; |
2317 | rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; |
2318 | rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; |
2319 | rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn; |
2320 | /* Following callbacks are mandatory */ |
2321 | rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; |
2322 | rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; |
2323 | |
2324 | if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) { |
2325 | switch (rx->type) { |
2326 | case BNA_RX_T_REGULAR: |
2327 | if (!(rx->bna->rx_mod.flags & |
2328 | BNA_RX_MOD_F_ENET_LOOPBACK)) |
2329 | rx->rx_flags |= BNA_RX_F_ENET_STARTED; |
2330 | break; |
2331 | case BNA_RX_T_LOOPBACK: |
2332 | if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK) |
2333 | rx->rx_flags |= BNA_RX_F_ENET_STARTED; |
2334 | break; |
2335 | } |
2336 | } |
2337 | |
2338 | rx->num_paths = rx_cfg->num_paths; |
2339 | for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0; |
2340 | i < rx->num_paths; i++) { |
2341 | rxp = bna_rxp_get(rx_mod); |
2342 | list_add_tail(new: &rxp->qe, head: &rx->rxp_q); |
2343 | rxp->type = rx_cfg->rxp_type; |
2344 | rxp->rx = rx; |
2345 | rxp->cq.rx = rx; |
2346 | |
2347 | q0 = bna_rxq_get(rx_mod); |
2348 | if (BNA_RXP_SINGLE == rx_cfg->rxp_type) |
2349 | q1 = NULL; |
2350 | else |
2351 | q1 = bna_rxq_get(rx_mod); |
2352 | |
2353 | if (1 == intr_info->num) |
2354 | rxp->vector = intr_info->idl[0].vector; |
2355 | else |
2356 | rxp->vector = intr_info->idl[i].vector; |
2357 | |
2358 | /* Setup IB */ |
2359 | |
2360 | rxp->cq.ib.ib_seg_host_addr.lsb = |
2361 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; |
2362 | rxp->cq.ib.ib_seg_host_addr.msb = |
2363 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; |
2364 | rxp->cq.ib.ib_seg_host_addr_kva = |
2365 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; |
2366 | rxp->cq.ib.intr_type = intr_info->intr_type; |
2367 | if (intr_info->intr_type == BNA_INTR_T_MSIX) |
2368 | rxp->cq.ib.intr_vector = rxp->vector; |
2369 | else |
2370 | rxp->cq.ib.intr_vector = BIT(rxp->vector); |
2371 | rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; |
2372 | rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; |
2373 | rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; |
2374 | |
2375 | bna_rxp_add_rxqs(rxp, q0, q1); |
2376 | |
2377 | /* Setup large Q */ |
2378 | |
2379 | q0->rx = rx; |
2380 | q0->rxp = rxp; |
2381 | |
2382 | q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; |
2383 | q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva; |
2384 | rcb_idx++; dq_idx++; |
2385 | q0->rcb->q_depth = rx_cfg->q0_depth; |
2386 | q0->q_depth = rx_cfg->q0_depth; |
2387 | q0->multi_buffer = rx_cfg->q0_multi_buf; |
2388 | q0->buffer_size = rx_cfg->q0_buf_size; |
2389 | q0->num_vecs = rx_cfg->q0_num_vecs; |
2390 | q0->rcb->rxq = q0; |
2391 | q0->rcb->bnad = bna->bnad; |
2392 | q0->rcb->id = 0; |
2393 | q0->rx_packets = q0->rx_bytes = 0; |
2394 | q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; |
2395 | q0->rxbuf_map_failed = 0; |
2396 | |
2397 | bna_rxq_qpt_setup(rxq: q0, rxp, page_count: dpage_count, PAGE_SIZE, |
2398 | qpt_mem: &dqpt_mem[i], swqpt_mem: &dsqpt_mem[i], page_mem: &dpage_mem[i]); |
2399 | |
2400 | if (rx->rcb_setup_cbfn) |
2401 | rx->rcb_setup_cbfn(bnad, q0->rcb); |
2402 | |
2403 | /* Setup small Q */ |
2404 | |
2405 | if (q1) { |
2406 | q1->rx = rx; |
2407 | q1->rxp = rxp; |
2408 | |
2409 | q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; |
2410 | q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva; |
2411 | rcb_idx++; hq_idx++; |
2412 | q1->rcb->q_depth = rx_cfg->q1_depth; |
2413 | q1->q_depth = rx_cfg->q1_depth; |
2414 | q1->multi_buffer = BNA_STATUS_T_DISABLED; |
2415 | q1->num_vecs = 1; |
2416 | q1->rcb->rxq = q1; |
2417 | q1->rcb->bnad = bna->bnad; |
2418 | q1->rcb->id = 1; |
2419 | q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? |
2420 | rx_cfg->hds_config.forced_offset |
2421 | : rx_cfg->q1_buf_size; |
2422 | q1->rx_packets = q1->rx_bytes = 0; |
2423 | q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; |
2424 | q1->rxbuf_map_failed = 0; |
2425 | |
2426 | bna_rxq_qpt_setup(rxq: q1, rxp, page_count: hpage_count, PAGE_SIZE, |
2427 | qpt_mem: &hqpt_mem[i], swqpt_mem: &hsqpt_mem[i], |
2428 | page_mem: &hpage_mem[i]); |
2429 | |
2430 | if (rx->rcb_setup_cbfn) |
2431 | rx->rcb_setup_cbfn(bnad, q1->rcb); |
2432 | } |
2433 | |
2434 | /* Setup CQ */ |
2435 | |
2436 | rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; |
2437 | cq_depth = rx_cfg->q0_depth + |
2438 | ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? |
2439 | 0 : rx_cfg->q1_depth); |
2440 | /* if multi-buffer is enabled sum of q0_depth |
2441 | * and q1_depth need not be a power of 2 |
2442 | */ |
2443 | cq_depth = roundup_pow_of_two(cq_depth); |
2444 | rxp->cq.ccb->q_depth = cq_depth; |
2445 | rxp->cq.ccb->cq = &rxp->cq; |
2446 | rxp->cq.ccb->rcb[0] = q0->rcb; |
2447 | q0->rcb->ccb = rxp->cq.ccb; |
2448 | if (q1) { |
2449 | rxp->cq.ccb->rcb[1] = q1->rcb; |
2450 | q1->rcb->ccb = rxp->cq.ccb; |
2451 | } |
2452 | rxp->cq.ccb->hw_producer_index = |
2453 | (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; |
2454 | rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; |
2455 | rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; |
2456 | rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; |
2457 | rxp->cq.ccb->rx_coalescing_timeo = |
2458 | rxp->cq.ib.coalescing_timeo; |
2459 | rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; |
2460 | rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; |
2461 | rxp->cq.ccb->bnad = bna->bnad; |
2462 | rxp->cq.ccb->id = i; |
2463 | |
2464 | bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, |
2465 | qpt_mem: &cqpt_mem[i], swqpt_mem: &cswqpt_mem[i], page_mem: &cpage_mem[i]); |
2466 | |
2467 | if (rx->ccb_setup_cbfn) |
2468 | rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); |
2469 | } |
2470 | |
2471 | rx->hds_cfg = rx_cfg->hds_config; |
2472 | |
2473 | bna_rxf_init(rxf: &rx->rxf, rx, q_config: rx_cfg, res_info); |
2474 | |
2475 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); |
2476 | |
2477 | rx_mod->rid_mask |= BIT(rx->rid); |
2478 | |
2479 | return rx; |
2480 | } |
2481 | |
2482 | void |
2483 | bna_rx_destroy(struct bna_rx *rx) |
2484 | { |
2485 | struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; |
2486 | struct bna_rxq *q0 = NULL; |
2487 | struct bna_rxq *q1 = NULL; |
2488 | struct bna_rxp *rxp; |
2489 | struct list_head *qe; |
2490 | |
2491 | bna_rxf_uninit(rxf: &rx->rxf); |
2492 | |
2493 | while (!list_empty(head: &rx->rxp_q)) { |
2494 | rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe); |
2495 | list_del(entry: &rxp->qe); |
2496 | GET_RXQS(rxp, q0, q1); |
2497 | if (rx->rcb_destroy_cbfn) |
2498 | rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); |
2499 | q0->rcb = NULL; |
2500 | q0->rxp = NULL; |
2501 | q0->rx = NULL; |
2502 | bna_rxq_put(rx_mod, rxq: q0); |
2503 | |
2504 | if (q1) { |
2505 | if (rx->rcb_destroy_cbfn) |
2506 | rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); |
2507 | q1->rcb = NULL; |
2508 | q1->rxp = NULL; |
2509 | q1->rx = NULL; |
2510 | bna_rxq_put(rx_mod, rxq: q1); |
2511 | } |
2512 | rxp->rxq.slr.large = NULL; |
2513 | rxp->rxq.slr.small = NULL; |
2514 | |
2515 | if (rx->ccb_destroy_cbfn) |
2516 | rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); |
2517 | rxp->cq.ccb = NULL; |
2518 | rxp->rx = NULL; |
2519 | bna_rxp_put(rx_mod, rxp); |
2520 | } |
2521 | |
2522 | list_for_each(qe, &rx_mod->rx_active_q) |
2523 | if (qe == &rx->qe) { |
2524 | list_del(entry: &rx->qe); |
2525 | break; |
2526 | } |
2527 | |
2528 | rx_mod->rid_mask &= ~BIT(rx->rid); |
2529 | |
2530 | rx->bna = NULL; |
2531 | rx->priv = NULL; |
2532 | bna_rx_put(rx_mod, rx); |
2533 | } |
2534 | |
2535 | void |
2536 | bna_rx_enable(struct bna_rx *rx) |
2537 | { |
2538 | if (rx->fsm != bna_rx_sm_stopped) |
2539 | return; |
2540 | |
2541 | rx->rx_flags |= BNA_RX_F_ENABLED; |
2542 | if (rx->rx_flags & BNA_RX_F_ENET_STARTED) |
2543 | bfa_fsm_send_event(rx, RX_E_START); |
2544 | } |
2545 | |
2546 | void |
2547 | bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, |
2548 | void (*cbfn)(void *, struct bna_rx *)) |
2549 | { |
2550 | if (type == BNA_SOFT_CLEANUP) { |
2551 | /* h/w should not be accessed. Treat we're stopped */ |
2552 | (*cbfn)(rx->bna->bnad, rx); |
2553 | } else { |
2554 | rx->stop_cbfn = cbfn; |
2555 | rx->stop_cbarg = rx->bna->bnad; |
2556 | |
2557 | rx->rx_flags &= ~BNA_RX_F_ENABLED; |
2558 | |
2559 | bfa_fsm_send_event(rx, RX_E_STOP); |
2560 | } |
2561 | } |
2562 | |
2563 | void |
2564 | bna_rx_cleanup_complete(struct bna_rx *rx) |
2565 | { |
2566 | bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); |
2567 | } |
2568 | |
2569 | void |
2570 | bna_rx_vlan_strip_enable(struct bna_rx *rx) |
2571 | { |
2572 | struct bna_rxf *rxf = &rx->rxf; |
2573 | |
2574 | if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) { |
2575 | rxf->vlan_strip_status = BNA_STATUS_T_ENABLED; |
2576 | rxf->vlan_strip_pending = true; |
2577 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
2578 | } |
2579 | } |
2580 | |
2581 | void |
2582 | bna_rx_vlan_strip_disable(struct bna_rx *rx) |
2583 | { |
2584 | struct bna_rxf *rxf = &rx->rxf; |
2585 | |
2586 | if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) { |
2587 | rxf->vlan_strip_status = BNA_STATUS_T_DISABLED; |
2588 | rxf->vlan_strip_pending = true; |
2589 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
2590 | } |
2591 | } |
2592 | |
2593 | enum bna_cb_status |
2594 | bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, |
2595 | enum bna_rxmode bitmask) |
2596 | { |
2597 | struct bna_rxf *rxf = &rx->rxf; |
2598 | int need_hw_config = 0; |
2599 | |
2600 | /* Error checks */ |
2601 | |
2602 | if (is_promisc_enable(new_mode, bitmask)) { |
2603 | /* If promisc mode is already enabled elsewhere in the system */ |
2604 | if ((rx->bna->promisc_rid != BFI_INVALID_RID) && |
2605 | (rx->bna->promisc_rid != rxf->rx->rid)) |
2606 | goto err_return; |
2607 | |
2608 | /* If default mode is already enabled in the system */ |
2609 | if (rx->bna->default_mode_rid != BFI_INVALID_RID) |
2610 | goto err_return; |
2611 | |
2612 | /* Trying to enable promiscuous and default mode together */ |
2613 | if (is_default_enable(new_mode, bitmask)) |
2614 | goto err_return; |
2615 | } |
2616 | |
2617 | if (is_default_enable(new_mode, bitmask)) { |
2618 | /* If default mode is already enabled elsewhere in the system */ |
2619 | if ((rx->bna->default_mode_rid != BFI_INVALID_RID) && |
2620 | (rx->bna->default_mode_rid != rxf->rx->rid)) { |
2621 | goto err_return; |
2622 | } |
2623 | |
2624 | /* If promiscuous mode is already enabled in the system */ |
2625 | if (rx->bna->promisc_rid != BFI_INVALID_RID) |
2626 | goto err_return; |
2627 | } |
2628 | |
2629 | /* Process the commands */ |
2630 | |
2631 | if (is_promisc_enable(new_mode, bitmask)) { |
2632 | if (bna_rxf_promisc_enable(rxf)) |
2633 | need_hw_config = 1; |
2634 | } else if (is_promisc_disable(new_mode, bitmask)) { |
2635 | if (bna_rxf_promisc_disable(rxf)) |
2636 | need_hw_config = 1; |
2637 | } |
2638 | |
2639 | if (is_allmulti_enable(new_mode, bitmask)) { |
2640 | if (bna_rxf_allmulti_enable(rxf)) |
2641 | need_hw_config = 1; |
2642 | } else if (is_allmulti_disable(new_mode, bitmask)) { |
2643 | if (bna_rxf_allmulti_disable(rxf)) |
2644 | need_hw_config = 1; |
2645 | } |
2646 | |
2647 | /* Trigger h/w if needed */ |
2648 | |
2649 | if (need_hw_config) { |
2650 | rxf->cam_fltr_cbfn = NULL; |
2651 | rxf->cam_fltr_cbarg = rx->bna->bnad; |
2652 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
2653 | } |
2654 | |
2655 | return BNA_CB_SUCCESS; |
2656 | |
2657 | err_return: |
2658 | return BNA_CB_FAIL; |
2659 | } |
2660 | |
2661 | void |
2662 | bna_rx_vlanfilter_enable(struct bna_rx *rx) |
2663 | { |
2664 | struct bna_rxf *rxf = &rx->rxf; |
2665 | |
2666 | if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { |
2667 | rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; |
2668 | rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; |
2669 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); |
2670 | } |
2671 | } |
2672 | |
2673 | void |
2674 | bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) |
2675 | { |
2676 | struct bna_rxp *rxp; |
2677 | |
2678 | list_for_each_entry(rxp, &rx->rxp_q, qe) { |
2679 | rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; |
2680 | bna_ib_coalescing_timeo_set(ib: &rxp->cq.ib, coalescing_timeo); |
2681 | } |
2682 | } |
2683 | |
2684 | void |
2685 | bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) |
2686 | { |
2687 | int i, j; |
2688 | |
2689 | for (i = 0; i < BNA_LOAD_T_MAX; i++) |
2690 | for (j = 0; j < BNA_BIAS_T_MAX; j++) |
2691 | bna->rx_mod.dim_vector[i][j] = vector[i][j]; |
2692 | } |
2693 | |
2694 | void |
2695 | bna_rx_dim_update(struct bna_ccb *ccb) |
2696 | { |
2697 | struct bna *bna = ccb->cq->rx->bna; |
2698 | u32 load, bias; |
2699 | u32 pkt_rt, small_rt, large_rt; |
2700 | u8 coalescing_timeo; |
2701 | |
2702 | if ((ccb->pkt_rate.small_pkt_cnt == 0) && |
2703 | (ccb->pkt_rate.large_pkt_cnt == 0)) |
2704 | return; |
2705 | |
2706 | /* Arrive at preconfigured coalescing timeo value based on pkt rate */ |
2707 | |
2708 | small_rt = ccb->pkt_rate.small_pkt_cnt; |
2709 | large_rt = ccb->pkt_rate.large_pkt_cnt; |
2710 | |
2711 | pkt_rt = small_rt + large_rt; |
2712 | |
2713 | if (pkt_rt < BNA_PKT_RATE_10K) |
2714 | load = BNA_LOAD_T_LOW_4; |
2715 | else if (pkt_rt < BNA_PKT_RATE_20K) |
2716 | load = BNA_LOAD_T_LOW_3; |
2717 | else if (pkt_rt < BNA_PKT_RATE_30K) |
2718 | load = BNA_LOAD_T_LOW_2; |
2719 | else if (pkt_rt < BNA_PKT_RATE_40K) |
2720 | load = BNA_LOAD_T_LOW_1; |
2721 | else if (pkt_rt < BNA_PKT_RATE_50K) |
2722 | load = BNA_LOAD_T_HIGH_1; |
2723 | else if (pkt_rt < BNA_PKT_RATE_60K) |
2724 | load = BNA_LOAD_T_HIGH_2; |
2725 | else if (pkt_rt < BNA_PKT_RATE_80K) |
2726 | load = BNA_LOAD_T_HIGH_3; |
2727 | else |
2728 | load = BNA_LOAD_T_HIGH_4; |
2729 | |
2730 | if (small_rt > (large_rt << 1)) |
2731 | bias = 0; |
2732 | else |
2733 | bias = 1; |
2734 | |
2735 | ccb->pkt_rate.small_pkt_cnt = 0; |
2736 | ccb->pkt_rate.large_pkt_cnt = 0; |
2737 | |
2738 | coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; |
2739 | ccb->rx_coalescing_timeo = coalescing_timeo; |
2740 | |
2741 | /* Set it to IB */ |
2742 | bna_ib_coalescing_timeo_set(ib: &ccb->cq->ib, coalescing_timeo); |
2743 | } |
2744 | |
2745 | const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { |
2746 | {12, 12}, |
2747 | {6, 10}, |
2748 | {5, 10}, |
2749 | {4, 8}, |
2750 | {3, 6}, |
2751 | {3, 6}, |
2752 | {2, 4}, |
2753 | {1, 2}, |
2754 | }; |
2755 | |
2756 | /* TX */ |
2757 | |
2758 | #define call_tx_stop_cbfn(tx) \ |
2759 | do { \ |
2760 | if ((tx)->stop_cbfn) { \ |
2761 | void (*cbfn)(void *, struct bna_tx *); \ |
2762 | void *cbarg; \ |
2763 | cbfn = (tx)->stop_cbfn; \ |
2764 | cbarg = (tx)->stop_cbarg; \ |
2765 | (tx)->stop_cbfn = NULL; \ |
2766 | (tx)->stop_cbarg = NULL; \ |
2767 | cbfn(cbarg, (tx)); \ |
2768 | } \ |
2769 | } while (0) |
2770 | |
2771 | static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx); |
2772 | static void bna_bfi_tx_enet_start(struct bna_tx *tx); |
2773 | static void bna_tx_enet_stop(struct bna_tx *tx); |
2774 | |
2775 | enum bna_tx_event { |
2776 | TX_E_START = 1, |
2777 | TX_E_STOP = 2, |
2778 | TX_E_FAIL = 3, |
2779 | TX_E_STARTED = 4, |
2780 | TX_E_STOPPED = 5, |
2781 | TX_E_CLEANUP_DONE = 7, |
2782 | TX_E_BW_UPDATE = 8, |
2783 | }; |
2784 | |
2785 | bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event); |
2786 | bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event); |
2787 | bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event); |
2788 | bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event); |
2789 | bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx, |
2790 | enum bna_tx_event); |
2791 | bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx, |
2792 | enum bna_tx_event); |
2793 | bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx, |
2794 | enum bna_tx_event); |
2795 | bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event); |
2796 | bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx, |
2797 | enum bna_tx_event); |
2798 | |
2799 | static void |
2800 | bna_tx_sm_stopped_entry(struct bna_tx *tx) |
2801 | { |
2802 | call_tx_stop_cbfn(tx); |
2803 | } |
2804 | |
2805 | static void |
2806 | bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) |
2807 | { |
2808 | switch (event) { |
2809 | case TX_E_START: |
2810 | bfa_fsm_set_state(tx, bna_tx_sm_start_wait); |
2811 | break; |
2812 | |
2813 | case TX_E_STOP: |
2814 | call_tx_stop_cbfn(tx); |
2815 | break; |
2816 | |
2817 | case TX_E_FAIL: |
2818 | /* No-op */ |
2819 | break; |
2820 | |
2821 | case TX_E_BW_UPDATE: |
2822 | /* No-op */ |
2823 | break; |
2824 | |
2825 | default: |
2826 | bfa_sm_fault(event); |
2827 | } |
2828 | } |
2829 | |
2830 | static void |
2831 | bna_tx_sm_start_wait_entry(struct bna_tx *tx) |
2832 | { |
2833 | bna_bfi_tx_enet_start(tx); |
2834 | } |
2835 | |
2836 | static void |
2837 | bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) |
2838 | { |
2839 | switch (event) { |
2840 | case TX_E_STOP: |
2841 | tx->flags &= ~BNA_TX_F_BW_UPDATED; |
2842 | bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); |
2843 | break; |
2844 | |
2845 | case TX_E_FAIL: |
2846 | tx->flags &= ~BNA_TX_F_BW_UPDATED; |
2847 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); |
2848 | break; |
2849 | |
2850 | case TX_E_STARTED: |
2851 | if (tx->flags & BNA_TX_F_BW_UPDATED) { |
2852 | tx->flags &= ~BNA_TX_F_BW_UPDATED; |
2853 | bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); |
2854 | } else |
2855 | bfa_fsm_set_state(tx, bna_tx_sm_started); |
2856 | break; |
2857 | |
2858 | case TX_E_BW_UPDATE: |
2859 | tx->flags |= BNA_TX_F_BW_UPDATED; |
2860 | break; |
2861 | |
2862 | default: |
2863 | bfa_sm_fault(event); |
2864 | } |
2865 | } |
2866 | |
2867 | static void |
2868 | bna_tx_sm_started_entry(struct bna_tx *tx) |
2869 | { |
2870 | struct bna_txq *txq; |
2871 | int is_regular = (tx->type == BNA_TX_T_REGULAR); |
2872 | |
2873 | list_for_each_entry(txq, &tx->txq_q, qe) { |
2874 | txq->tcb->priority = txq->priority; |
2875 | /* Start IB */ |
2876 | bna_ib_start(tx->bna, &txq->ib, is_regular); |
2877 | } |
2878 | tx->tx_resume_cbfn(tx->bna->bnad, tx); |
2879 | } |
2880 | |
2881 | static void |
2882 | bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) |
2883 | { |
2884 | switch (event) { |
2885 | case TX_E_STOP: |
2886 | bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); |
2887 | tx->tx_stall_cbfn(tx->bna->bnad, tx); |
2888 | bna_tx_enet_stop(tx); |
2889 | break; |
2890 | |
2891 | case TX_E_FAIL: |
2892 | bfa_fsm_set_state(tx, bna_tx_sm_failed); |
2893 | tx->tx_stall_cbfn(tx->bna->bnad, tx); |
2894 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); |
2895 | break; |
2896 | |
2897 | case TX_E_BW_UPDATE: |
2898 | bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); |
2899 | break; |
2900 | |
2901 | default: |
2902 | bfa_sm_fault(event); |
2903 | } |
2904 | } |
2905 | |
2906 | static void |
2907 | bna_tx_sm_stop_wait_entry(struct bna_tx *tx) |
2908 | { |
2909 | } |
2910 | |
2911 | static void |
2912 | bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) |
2913 | { |
2914 | switch (event) { |
2915 | case TX_E_FAIL: |
2916 | case TX_E_STOPPED: |
2917 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); |
2918 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); |
2919 | break; |
2920 | |
2921 | case TX_E_STARTED: |
2922 | /** |
2923 | * We are here due to start_wait -> stop_wait transition on |
2924 | * TX_E_STOP event |
2925 | */ |
2926 | bna_tx_enet_stop(tx); |
2927 | break; |
2928 | |
2929 | case TX_E_BW_UPDATE: |
2930 | /* No-op */ |
2931 | break; |
2932 | |
2933 | default: |
2934 | bfa_sm_fault(event); |
2935 | } |
2936 | } |
2937 | |
2938 | static void |
2939 | bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) |
2940 | { |
2941 | } |
2942 | |
2943 | static void |
2944 | bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) |
2945 | { |
2946 | switch (event) { |
2947 | case TX_E_FAIL: |
2948 | case TX_E_BW_UPDATE: |
2949 | /* No-op */ |
2950 | break; |
2951 | |
2952 | case TX_E_CLEANUP_DONE: |
2953 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); |
2954 | break; |
2955 | |
2956 | default: |
2957 | bfa_sm_fault(event); |
2958 | } |
2959 | } |
2960 | |
2961 | static void |
2962 | bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) |
2963 | { |
2964 | tx->tx_stall_cbfn(tx->bna->bnad, tx); |
2965 | bna_tx_enet_stop(tx); |
2966 | } |
2967 | |
2968 | static void |
2969 | bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) |
2970 | { |
2971 | switch (event) { |
2972 | case TX_E_STOP: |
2973 | bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); |
2974 | break; |
2975 | |
2976 | case TX_E_FAIL: |
2977 | bfa_fsm_set_state(tx, bna_tx_sm_failed); |
2978 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); |
2979 | break; |
2980 | |
2981 | case TX_E_STOPPED: |
2982 | bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); |
2983 | break; |
2984 | |
2985 | case TX_E_BW_UPDATE: |
2986 | /* No-op */ |
2987 | break; |
2988 | |
2989 | default: |
2990 | bfa_sm_fault(event); |
2991 | } |
2992 | } |
2993 | |
2994 | static void |
2995 | bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) |
2996 | { |
2997 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); |
2998 | } |
2999 | |
3000 | static void |
3001 | bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) |
3002 | { |
3003 | switch (event) { |
3004 | case TX_E_STOP: |
3005 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); |
3006 | break; |
3007 | |
3008 | case TX_E_FAIL: |
3009 | bfa_fsm_set_state(tx, bna_tx_sm_failed); |
3010 | break; |
3011 | |
3012 | case TX_E_BW_UPDATE: |
3013 | /* No-op */ |
3014 | break; |
3015 | |
3016 | case TX_E_CLEANUP_DONE: |
3017 | bfa_fsm_set_state(tx, bna_tx_sm_start_wait); |
3018 | break; |
3019 | |
3020 | default: |
3021 | bfa_sm_fault(event); |
3022 | } |
3023 | } |
3024 | |
3025 | static void |
3026 | bna_tx_sm_failed_entry(struct bna_tx *tx) |
3027 | { |
3028 | } |
3029 | |
3030 | static void |
3031 | bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) |
3032 | { |
3033 | switch (event) { |
3034 | case TX_E_START: |
3035 | bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); |
3036 | break; |
3037 | |
3038 | case TX_E_STOP: |
3039 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); |
3040 | break; |
3041 | |
3042 | case TX_E_FAIL: |
3043 | /* No-op */ |
3044 | break; |
3045 | |
3046 | case TX_E_CLEANUP_DONE: |
3047 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); |
3048 | break; |
3049 | |
3050 | default: |
3051 | bfa_sm_fault(event); |
3052 | } |
3053 | } |
3054 | |
3055 | static void |
3056 | bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) |
3057 | { |
3058 | } |
3059 | |
3060 | static void |
3061 | bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) |
3062 | { |
3063 | switch (event) { |
3064 | case TX_E_STOP: |
3065 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); |
3066 | break; |
3067 | |
3068 | case TX_E_FAIL: |
3069 | bfa_fsm_set_state(tx, bna_tx_sm_failed); |
3070 | break; |
3071 | |
3072 | case TX_E_CLEANUP_DONE: |
3073 | bfa_fsm_set_state(tx, bna_tx_sm_start_wait); |
3074 | break; |
3075 | |
3076 | case TX_E_BW_UPDATE: |
3077 | /* No-op */ |
3078 | break; |
3079 | |
3080 | default: |
3081 | bfa_sm_fault(event); |
3082 | } |
3083 | } |
3084 | |
3085 | static void |
3086 | bna_bfi_tx_enet_start(struct bna_tx *tx) |
3087 | { |
3088 | struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; |
3089 | struct bna_txq *txq = NULL; |
3090 | int i; |
3091 | |
3092 | bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, |
3093 | BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); |
3094 | cfg_req->mh.num_entries = htons( |
3095 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req))); |
3096 | |
3097 | cfg_req->num_queues = tx->num_txq; |
3098 | for (i = 0; i < tx->num_txq; i++) { |
3099 | txq = txq ? list_next_entry(txq, qe) |
3100 | : list_first_entry(&tx->txq_q, struct bna_txq, qe); |
3101 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); |
3102 | cfg_req->q_cfg[i].q.priority = txq->priority; |
3103 | |
3104 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = |
3105 | txq->ib.ib_seg_host_addr.lsb; |
3106 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = |
3107 | txq->ib.ib_seg_host_addr.msb; |
3108 | cfg_req->q_cfg[i].ib.intr.msix_index = |
3109 | htons((u16)txq->ib.intr_vector); |
3110 | } |
3111 | |
3112 | cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED; |
3113 | cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; |
3114 | cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; |
3115 | cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED; |
3116 | cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX) |
3117 | ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; |
3118 | cfg_req->ib_cfg.coalescing_timeout = |
3119 | htonl((u32)txq->ib.coalescing_timeo); |
3120 | cfg_req->ib_cfg.inter_pkt_timeout = |
3121 | htonl((u32)txq->ib.interpkt_timeo); |
3122 | cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count; |
3123 | |
3124 | cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; |
3125 | cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); |
3126 | cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; |
3127 | cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; |
3128 | |
3129 | bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, |
3130 | sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh); |
3131 | bfa_msgq_cmd_post(msgq: &tx->bna->msgq, cmd: &tx->msgq_cmd); |
3132 | } |
3133 | |
3134 | static void |
3135 | bna_bfi_tx_enet_stop(struct bna_tx *tx) |
3136 | { |
3137 | struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; |
3138 | |
3139 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, |
3140 | BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); |
3141 | req->mh.num_entries = htons( |
3142 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); |
3143 | bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), |
3144 | &req->mh); |
3145 | bfa_msgq_cmd_post(msgq: &tx->bna->msgq, cmd: &tx->msgq_cmd); |
3146 | } |
3147 | |
3148 | static void |
3149 | bna_tx_enet_stop(struct bna_tx *tx) |
3150 | { |
3151 | struct bna_txq *txq; |
3152 | |
3153 | /* Stop IB */ |
3154 | list_for_each_entry(txq, &tx->txq_q, qe) |
3155 | bna_ib_stop(tx->bna, &txq->ib); |
3156 | |
3157 | bna_bfi_tx_enet_stop(tx); |
3158 | } |
3159 | |
3160 | static void |
3161 | bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, |
3162 | struct bna_mem_descr *qpt_mem, |
3163 | struct bna_mem_descr *swqpt_mem, |
3164 | struct bna_mem_descr *page_mem) |
3165 | { |
3166 | u8 *kva; |
3167 | u64 dma; |
3168 | struct bna_dma_addr bna_dma; |
3169 | int i; |
3170 | |
3171 | txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; |
3172 | txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; |
3173 | txq->qpt.kv_qpt_ptr = qpt_mem->kva; |
3174 | txq->qpt.page_count = page_count; |
3175 | txq->qpt.page_size = page_size; |
3176 | |
3177 | txq->tcb->sw_qpt = (void **) swqpt_mem->kva; |
3178 | txq->tcb->sw_q = page_mem->kva; |
3179 | |
3180 | kva = page_mem->kva; |
3181 | BNA_GET_DMA_ADDR(&page_mem->dma, dma); |
3182 | |
3183 | for (i = 0; i < page_count; i++) { |
3184 | txq->tcb->sw_qpt[i] = kva; |
3185 | kva += PAGE_SIZE; |
3186 | |
3187 | BNA_SET_DMA_ADDR(dma, &bna_dma); |
3188 | ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = |
3189 | bna_dma.lsb; |
3190 | ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = |
3191 | bna_dma.msb; |
3192 | dma += PAGE_SIZE; |
3193 | } |
3194 | } |
3195 | |
3196 | static struct bna_tx * |
3197 | bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type) |
3198 | { |
3199 | struct bna_tx *tx = NULL; |
3200 | |
3201 | if (list_empty(head: &tx_mod->tx_free_q)) |
3202 | return NULL; |
3203 | if (type == BNA_TX_T_REGULAR) |
3204 | tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe); |
3205 | else |
3206 | tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe); |
3207 | list_del(entry: &tx->qe); |
3208 | tx->type = type; |
3209 | |
3210 | return tx; |
3211 | } |
3212 | |
3213 | static void |
3214 | bna_tx_free(struct bna_tx *tx) |
3215 | { |
3216 | struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; |
3217 | struct bna_txq *txq; |
3218 | struct list_head *qe; |
3219 | |
3220 | while (!list_empty(head: &tx->txq_q)) { |
3221 | txq = list_first_entry(&tx->txq_q, struct bna_txq, qe); |
3222 | txq->tcb = NULL; |
3223 | txq->tx = NULL; |
3224 | list_move_tail(list: &txq->qe, head: &tx_mod->txq_free_q); |
3225 | } |
3226 | |
3227 | list_for_each(qe, &tx_mod->tx_active_q) { |
3228 | if (qe == &tx->qe) { |
3229 | list_del(entry: &tx->qe); |
3230 | break; |
3231 | } |
3232 | } |
3233 | |
3234 | tx->bna = NULL; |
3235 | tx->priv = NULL; |
3236 | |
3237 | list_for_each_prev(qe, &tx_mod->tx_free_q) |
3238 | if (((struct bna_tx *)qe)->rid < tx->rid) |
3239 | break; |
3240 | |
3241 | list_add(new: &tx->qe, head: qe); |
3242 | } |
3243 | |
3244 | static void |
3245 | bna_tx_start(struct bna_tx *tx) |
3246 | { |
3247 | tx->flags |= BNA_TX_F_ENET_STARTED; |
3248 | if (tx->flags & BNA_TX_F_ENABLED) |
3249 | bfa_fsm_send_event(tx, TX_E_START); |
3250 | } |
3251 | |
3252 | static void |
3253 | bna_tx_stop(struct bna_tx *tx) |
3254 | { |
3255 | tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; |
3256 | tx->stop_cbarg = &tx->bna->tx_mod; |
3257 | |
3258 | tx->flags &= ~BNA_TX_F_ENET_STARTED; |
3259 | bfa_fsm_send_event(tx, TX_E_STOP); |
3260 | } |
3261 | |
3262 | static void |
3263 | bna_tx_fail(struct bna_tx *tx) |
3264 | { |
3265 | tx->flags &= ~BNA_TX_F_ENET_STARTED; |
3266 | bfa_fsm_send_event(tx, TX_E_FAIL); |
3267 | } |
3268 | |
3269 | void |
3270 | bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) |
3271 | { |
3272 | struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; |
3273 | struct bna_txq *txq = NULL; |
3274 | int i; |
3275 | |
3276 | bfa_msgq_rsp_copy(msgq: &tx->bna->msgq, buf: (u8 *)cfg_rsp, |
3277 | buf_len: sizeof(struct bfi_enet_tx_cfg_rsp)); |
3278 | |
3279 | tx->hw_id = cfg_rsp->hw_id; |
3280 | |
3281 | for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe); |
3282 | i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) { |
3283 | /* Setup doorbells */ |
3284 | txq->tcb->i_dbell->doorbell_addr = |
3285 | tx->bna->pcidev.pci_bar_kva |
3286 | + ntohl(cfg_rsp->q_handles[i].i_dbell); |
3287 | txq->tcb->q_dbell = |
3288 | tx->bna->pcidev.pci_bar_kva |
3289 | + ntohl(cfg_rsp->q_handles[i].q_dbell); |
3290 | txq->hw_id = cfg_rsp->q_handles[i].hw_qid; |
3291 | |
3292 | /* Initialize producer/consumer indexes */ |
3293 | (*txq->tcb->hw_consumer_index) = 0; |
3294 | txq->tcb->producer_index = txq->tcb->consumer_index = 0; |
3295 | } |
3296 | |
3297 | bfa_fsm_send_event(tx, TX_E_STARTED); |
3298 | } |
3299 | |
3300 | void |
3301 | bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) |
3302 | { |
3303 | bfa_fsm_send_event(tx, TX_E_STOPPED); |
3304 | } |
3305 | |
3306 | void |
3307 | bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod) |
3308 | { |
3309 | struct bna_tx *tx; |
3310 | |
3311 | list_for_each_entry(tx, &tx_mod->tx_active_q, qe) |
3312 | bfa_fsm_send_event(tx, TX_E_BW_UPDATE); |
3313 | } |
3314 | |
3315 | void |
3316 | bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) |
3317 | { |
3318 | u32 q_size; |
3319 | u32 page_count; |
3320 | struct bna_mem_info *mem_info; |
3321 | |
3322 | res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM; |
3323 | mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info; |
3324 | mem_info->mem_type = BNA_MEM_T_KVA; |
3325 | mem_info->len = sizeof(struct bna_tcb); |
3326 | mem_info->num = num_txq; |
3327 | |
3328 | q_size = txq_depth * BFI_TXQ_WI_SIZE; |
3329 | q_size = ALIGN(q_size, PAGE_SIZE); |
3330 | page_count = q_size >> PAGE_SHIFT; |
3331 | |
3332 | res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM; |
3333 | mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info; |
3334 | mem_info->mem_type = BNA_MEM_T_DMA; |
3335 | mem_info->len = page_count * sizeof(struct bna_dma_addr); |
3336 | mem_info->num = num_txq; |
3337 | |
3338 | res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM; |
3339 | mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info; |
3340 | mem_info->mem_type = BNA_MEM_T_KVA; |
3341 | mem_info->len = page_count * sizeof(void *); |
3342 | mem_info->num = num_txq; |
3343 | |
3344 | res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; |
3345 | mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; |
3346 | mem_info->mem_type = BNA_MEM_T_DMA; |
3347 | mem_info->len = PAGE_SIZE * page_count; |
3348 | mem_info->num = num_txq; |
3349 | |
3350 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; |
3351 | mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info; |
3352 | mem_info->mem_type = BNA_MEM_T_DMA; |
3353 | mem_info->len = BFI_IBIDX_SIZE; |
3354 | mem_info->num = num_txq; |
3355 | |
3356 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR; |
3357 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type = |
3358 | BNA_INTR_T_MSIX; |
3359 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; |
3360 | } |
3361 | |
3362 | struct bna_tx * |
3363 | bna_tx_create(struct bna *bna, struct bnad *bnad, |
3364 | struct bna_tx_config *tx_cfg, |
3365 | const struct bna_tx_event_cbfn *tx_cbfn, |
3366 | struct bna_res_info *res_info, void *priv) |
3367 | { |
3368 | struct bna_intr_info *intr_info; |
3369 | struct bna_tx_mod *tx_mod = &bna->tx_mod; |
3370 | struct bna_tx *tx; |
3371 | struct bna_txq *txq; |
3372 | int page_count; |
3373 | int i; |
3374 | |
3375 | intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; |
3376 | page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) / |
3377 | PAGE_SIZE; |
3378 | |
3379 | /** |
3380 | * Get resources |
3381 | */ |
3382 | |
3383 | if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) |
3384 | return NULL; |
3385 | |
3386 | /* Tx */ |
3387 | |
3388 | tx = bna_tx_get(tx_mod, type: tx_cfg->tx_type); |
3389 | if (!tx) |
3390 | return NULL; |
3391 | tx->bna = bna; |
3392 | tx->priv = priv; |
3393 | |
3394 | /* TxQs */ |
3395 | |
3396 | INIT_LIST_HEAD(list: &tx->txq_q); |
3397 | for (i = 0; i < tx_cfg->num_txq; i++) { |
3398 | if (list_empty(head: &tx_mod->txq_free_q)) |
3399 | goto err_return; |
3400 | |
3401 | txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe); |
3402 | list_move_tail(list: &txq->qe, head: &tx->txq_q); |
3403 | txq->tx = tx; |
3404 | } |
3405 | |
3406 | /* |
3407 | * Initialize |
3408 | */ |
3409 | |
3410 | /* Tx */ |
3411 | |
3412 | tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; |
3413 | tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; |
3414 | /* Following callbacks are mandatory */ |
3415 | tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; |
3416 | tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; |
3417 | tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; |
3418 | |
3419 | list_add_tail(new: &tx->qe, head: &tx_mod->tx_active_q); |
3420 | |
3421 | tx->num_txq = tx_cfg->num_txq; |
3422 | |
3423 | tx->flags = 0; |
3424 | if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { |
3425 | switch (tx->type) { |
3426 | case BNA_TX_T_REGULAR: |
3427 | if (!(tx->bna->tx_mod.flags & |
3428 | BNA_TX_MOD_F_ENET_LOOPBACK)) |
3429 | tx->flags |= BNA_TX_F_ENET_STARTED; |
3430 | break; |
3431 | case BNA_TX_T_LOOPBACK: |
3432 | if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) |
3433 | tx->flags |= BNA_TX_F_ENET_STARTED; |
3434 | break; |
3435 | } |
3436 | } |
3437 | |
3438 | /* TxQ */ |
3439 | |
3440 | i = 0; |
3441 | list_for_each_entry(txq, &tx->txq_q, qe) { |
3442 | txq->tcb = (struct bna_tcb *) |
3443 | res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva; |
3444 | txq->tx_packets = 0; |
3445 | txq->tx_bytes = 0; |
3446 | |
3447 | /* IB */ |
3448 | txq->ib.ib_seg_host_addr.lsb = |
3449 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; |
3450 | txq->ib.ib_seg_host_addr.msb = |
3451 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; |
3452 | txq->ib.ib_seg_host_addr_kva = |
3453 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; |
3454 | txq->ib.intr_type = intr_info->intr_type; |
3455 | txq->ib.intr_vector = (intr_info->num == 1) ? |
3456 | intr_info->idl[0].vector : |
3457 | intr_info->idl[i].vector; |
3458 | if (intr_info->intr_type == BNA_INTR_T_INTX) |
3459 | txq->ib.intr_vector = BIT(txq->ib.intr_vector); |
3460 | txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; |
3461 | txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO; |
3462 | txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; |
3463 | |
3464 | /* TCB */ |
3465 | |
3466 | txq->tcb->q_depth = tx_cfg->txq_depth; |
3467 | txq->tcb->unmap_q = (void *) |
3468 | res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva; |
3469 | txq->tcb->hw_consumer_index = |
3470 | (u32 *)txq->ib.ib_seg_host_addr_kva; |
3471 | txq->tcb->i_dbell = &txq->ib.door_bell; |
3472 | txq->tcb->intr_type = txq->ib.intr_type; |
3473 | txq->tcb->intr_vector = txq->ib.intr_vector; |
3474 | txq->tcb->txq = txq; |
3475 | txq->tcb->bnad = bnad; |
3476 | txq->tcb->id = i; |
3477 | |
3478 | /* QPT, SWQPT, Pages */ |
3479 | bna_txq_qpt_setup(txq, page_count, PAGE_SIZE, |
3480 | qpt_mem: &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], |
3481 | swqpt_mem: &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], |
3482 | page_mem: &res_info[BNA_TX_RES_MEM_T_PAGE]. |
3483 | res_u.mem_info.mdl[i]); |
3484 | |
3485 | /* Callback to bnad for setting up TCB */ |
3486 | if (tx->tcb_setup_cbfn) |
3487 | (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); |
3488 | |
3489 | if (tx_cfg->num_txq == BFI_TX_MAX_PRIO) |
3490 | txq->priority = txq->tcb->id; |
3491 | else |
3492 | txq->priority = tx_mod->default_prio; |
3493 | |
3494 | i++; |
3495 | } |
3496 | |
3497 | tx->txf_vlan_id = 0; |
3498 | |
3499 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); |
3500 | |
3501 | tx_mod->rid_mask |= BIT(tx->rid); |
3502 | |
3503 | return tx; |
3504 | |
3505 | err_return: |
3506 | bna_tx_free(tx); |
3507 | return NULL; |
3508 | } |
3509 | |
3510 | void |
3511 | bna_tx_destroy(struct bna_tx *tx) |
3512 | { |
3513 | struct bna_txq *txq; |
3514 | |
3515 | list_for_each_entry(txq, &tx->txq_q, qe) |
3516 | if (tx->tcb_destroy_cbfn) |
3517 | (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); |
3518 | |
3519 | tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid); |
3520 | bna_tx_free(tx); |
3521 | } |
3522 | |
3523 | void |
3524 | bna_tx_enable(struct bna_tx *tx) |
3525 | { |
3526 | if (tx->fsm != bna_tx_sm_stopped) |
3527 | return; |
3528 | |
3529 | tx->flags |= BNA_TX_F_ENABLED; |
3530 | |
3531 | if (tx->flags & BNA_TX_F_ENET_STARTED) |
3532 | bfa_fsm_send_event(tx, TX_E_START); |
3533 | } |
3534 | |
3535 | void |
3536 | bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, |
3537 | void (*cbfn)(void *, struct bna_tx *)) |
3538 | { |
3539 | if (type == BNA_SOFT_CLEANUP) { |
3540 | (*cbfn)(tx->bna->bnad, tx); |
3541 | return; |
3542 | } |
3543 | |
3544 | tx->stop_cbfn = cbfn; |
3545 | tx->stop_cbarg = tx->bna->bnad; |
3546 | |
3547 | tx->flags &= ~BNA_TX_F_ENABLED; |
3548 | |
3549 | bfa_fsm_send_event(tx, TX_E_STOP); |
3550 | } |
3551 | |
3552 | void |
3553 | bna_tx_cleanup_complete(struct bna_tx *tx) |
3554 | { |
3555 | bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); |
3556 | } |
3557 | |
3558 | static void |
3559 | bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) |
3560 | { |
3561 | struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; |
3562 | |
3563 | bfa_wc_down(wc: &tx_mod->tx_stop_wc); |
3564 | } |
3565 | |
3566 | static void |
3567 | bna_tx_mod_cb_tx_stopped_all(void *arg) |
3568 | { |
3569 | struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; |
3570 | |
3571 | if (tx_mod->stop_cbfn) |
3572 | tx_mod->stop_cbfn(&tx_mod->bna->enet); |
3573 | tx_mod->stop_cbfn = NULL; |
3574 | } |
3575 | |
3576 | void |
3577 | bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, |
3578 | struct bna_res_info *res_info) |
3579 | { |
3580 | int i; |
3581 | |
3582 | tx_mod->bna = bna; |
3583 | tx_mod->flags = 0; |
3584 | |
3585 | tx_mod->tx = (struct bna_tx *) |
3586 | res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva; |
3587 | tx_mod->txq = (struct bna_txq *) |
3588 | res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva; |
3589 | |
3590 | INIT_LIST_HEAD(list: &tx_mod->tx_free_q); |
3591 | INIT_LIST_HEAD(list: &tx_mod->tx_active_q); |
3592 | |
3593 | INIT_LIST_HEAD(list: &tx_mod->txq_free_q); |
3594 | |
3595 | for (i = 0; i < bna->ioceth.attr.num_txq; i++) { |
3596 | tx_mod->tx[i].rid = i; |
3597 | list_add_tail(new: &tx_mod->tx[i].qe, head: &tx_mod->tx_free_q); |
3598 | list_add_tail(new: &tx_mod->txq[i].qe, head: &tx_mod->txq_free_q); |
3599 | } |
3600 | |
3601 | tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL; |
3602 | tx_mod->default_prio = 0; |
3603 | tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED; |
3604 | tx_mod->iscsi_prio = -1; |
3605 | } |
3606 | |
3607 | void |
3608 | bna_tx_mod_uninit(struct bna_tx_mod *tx_mod) |
3609 | { |
3610 | tx_mod->bna = NULL; |
3611 | } |
3612 | |
3613 | void |
3614 | bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type) |
3615 | { |
3616 | struct bna_tx *tx; |
3617 | |
3618 | tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED; |
3619 | if (type == BNA_TX_T_LOOPBACK) |
3620 | tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK; |
3621 | |
3622 | list_for_each_entry(tx, &tx_mod->tx_active_q, qe) |
3623 | if (tx->type == type) |
3624 | bna_tx_start(tx); |
3625 | } |
3626 | |
3627 | void |
3628 | bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type) |
3629 | { |
3630 | struct bna_tx *tx; |
3631 | |
3632 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; |
3633 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; |
3634 | |
3635 | tx_mod->stop_cbfn = bna_enet_cb_tx_stopped; |
3636 | |
3637 | bfa_wc_init(wc: &tx_mod->tx_stop_wc, wc_resume: bna_tx_mod_cb_tx_stopped_all, wc_cbarg: tx_mod); |
3638 | |
3639 | list_for_each_entry(tx, &tx_mod->tx_active_q, qe) |
3640 | if (tx->type == type) { |
3641 | bfa_wc_up(wc: &tx_mod->tx_stop_wc); |
3642 | bna_tx_stop(tx); |
3643 | } |
3644 | |
3645 | bfa_wc_wait(wc: &tx_mod->tx_stop_wc); |
3646 | } |
3647 | |
3648 | void |
3649 | bna_tx_mod_fail(struct bna_tx_mod *tx_mod) |
3650 | { |
3651 | struct bna_tx *tx; |
3652 | |
3653 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; |
3654 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; |
3655 | |
3656 | list_for_each_entry(tx, &tx_mod->tx_active_q, qe) |
3657 | bna_tx_fail(tx); |
3658 | } |
3659 | |
3660 | void |
3661 | bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) |
3662 | { |
3663 | struct bna_txq *txq; |
3664 | |
3665 | list_for_each_entry(txq, &tx->txq_q, qe) |
3666 | bna_ib_coalescing_timeo_set(ib: &txq->ib, coalescing_timeo); |
3667 | } |
3668 | |