1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 */
5/*
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
8 * All rights reserved
9 * www.qlogic.com
10 */
11#include <linux/bitops.h>
12#include <linux/netdevice.h>
13#include <linux/skbuff.h>
14#include <linux/etherdevice.h>
15#include <linux/in.h>
16#include <linux/ethtool.h>
17#include <linux/if_vlan.h>
18#include <linux/if_ether.h>
19#include <linux/ip.h>
20#include <linux/prefetch.h>
21#include <linux/module.h>
22
23#include "bnad.h"
24#include "bna.h"
25#include "cna.h"
26
27static DEFINE_MUTEX(bnad_fwimg_mutex);
28
29/*
30 * Module params
31 */
32static uint bnad_msix_disable;
33module_param(bnad_msix_disable, uint, 0444);
34MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
35
36static uint bnad_ioc_auto_recover = 1;
37module_param(bnad_ioc_auto_recover, uint, 0444);
38MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
39
40static uint bna_debugfs_enable = 1;
41module_param(bna_debugfs_enable, uint, 0644);
42MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
43 " Range[false:0|true:1]");
44
45/*
46 * Global variables
47 */
48static u32 bnad_rxqs_per_cq = 2;
49static atomic_t bna_id;
50static const u8 bnad_bcast_addr[] __aligned(2) =
51 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
52
53/*
54 * Local MACROS
55 */
56#define BNAD_GET_MBOX_IRQ(_bnad) \
57 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
58 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
59 ((_bnad)->pcidev->irq))
60
61#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
62do { \
63 (_res_info)->res_type = BNA_RES_T_MEM; \
64 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
65 (_res_info)->res_u.mem_info.num = (_num); \
66 (_res_info)->res_u.mem_info.len = (_size); \
67} while (0)
68
69/*
70 * Reinitialize completions in CQ, once Rx is taken down
71 */
72static void
73bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
74{
75 struct bna_cq_entry *cmpl;
76 int i;
77
78 for (i = 0; i < ccb->q_depth; i++) {
79 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
80 cmpl->valid = 0;
81 }
82}
83
84/* Tx Datapath functions */
85
86
87/* Caller should ensure that the entry at unmap_q[index] is valid */
88static u32
89bnad_tx_buff_unmap(struct bnad *bnad,
90 struct bnad_tx_unmap *unmap_q,
91 u32 q_depth, u32 index)
92{
93 struct bnad_tx_unmap *unmap;
94 struct sk_buff *skb;
95 int vector, nvecs;
96
97 unmap = &unmap_q[index];
98 nvecs = unmap->nvecs;
99
100 skb = unmap->skb;
101 unmap->skb = NULL;
102 unmap->nvecs = 0;
103 dma_unmap_single(&bnad->pcidev->dev,
104 dma_unmap_addr(&unmap->vectors[0], dma_addr),
105 skb_headlen(skb), DMA_TO_DEVICE);
106 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
107 nvecs--;
108
109 vector = 0;
110 while (nvecs) {
111 vector++;
112 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
113 vector = 0;
114 BNA_QE_INDX_INC(index, q_depth);
115 unmap = &unmap_q[index];
116 }
117
118 dma_unmap_page(&bnad->pcidev->dev,
119 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
120 dma_unmap_len(&unmap->vectors[vector], dma_len),
121 DMA_TO_DEVICE);
122 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
123 nvecs--;
124 }
125
126 BNA_QE_INDX_INC(index, q_depth);
127
128 return index;
129}
130
131/*
132 * Frees all pending Tx Bufs
133 * At this point no activity is expected on the Q,
134 * so DMA unmap & freeing is fine.
135 */
136static void
137bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
138{
139 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
140 struct sk_buff *skb;
141 int i;
142
143 for (i = 0; i < tcb->q_depth; i++) {
144 skb = unmap_q[i].skb;
145 if (!skb)
146 continue;
147 bnad_tx_buff_unmap(bnad, unmap_q, q_depth: tcb->q_depth, index: i);
148
149 dev_kfree_skb_any(skb);
150 }
151}
152
153/*
154 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
155 * Can be called in a) Interrupt context
156 * b) Sending context
157 */
158static u32
159bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
160{
161 u32 sent_packets = 0, sent_bytes = 0;
162 u32 wis, unmap_wis, hw_cons, cons, q_depth;
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
164 struct bnad_tx_unmap *unmap;
165 struct sk_buff *skb;
166
167 /* Just return if TX is stopped */
168 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
169 return 0;
170
171 hw_cons = *(tcb->hw_consumer_index);
172 rmb();
173 cons = tcb->consumer_index;
174 q_depth = tcb->q_depth;
175
176 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
177 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
178
179 while (wis) {
180 unmap = &unmap_q[cons];
181
182 skb = unmap->skb;
183
184 sent_packets++;
185 sent_bytes += skb->len;
186
187 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
188 wis -= unmap_wis;
189
190 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, index: cons);
191 dev_kfree_skb_any(skb);
192 }
193
194 /* Update consumer pointers. */
195 tcb->consumer_index = hw_cons;
196
197 tcb->txq->tx_packets += sent_packets;
198 tcb->txq->tx_bytes += sent_bytes;
199
200 return sent_packets;
201}
202
203static u32
204bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
205{
206 struct net_device *netdev = bnad->netdev;
207 u32 sent = 0;
208
209 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, addr: &tcb->flags))
210 return 0;
211
212 sent = bnad_txcmpl_process(bnad, tcb);
213 if (sent) {
214 if (netif_queue_stopped(dev: netdev) &&
215 netif_carrier_ok(dev: netdev) &&
216 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
217 BNAD_NETIF_WAKE_THRESHOLD) {
218 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
219 netif_wake_queue(dev: netdev);
220 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
221 }
222 }
223 }
224
225 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
226 bna_ib_ack(tcb->i_dbell, sent);
227
228 smp_mb__before_atomic();
229 clear_bit(BNAD_TXQ_FREE_SENT, addr: &tcb->flags);
230
231 return sent;
232}
233
234/* MSIX Tx Completion Handler */
235static irqreturn_t
236bnad_msix_tx(int irq, void *data)
237{
238 struct bna_tcb *tcb = (struct bna_tcb *)data;
239 struct bnad *bnad = tcb->bnad;
240
241 bnad_tx_complete(bnad, tcb);
242
243 return IRQ_HANDLED;
244}
245
246static inline void
247bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
248{
249 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
250
251 unmap_q->reuse_pi = -1;
252 unmap_q->alloc_order = -1;
253 unmap_q->map_size = 0;
254 unmap_q->type = BNAD_RXBUF_NONE;
255}
256
257/* Default is page-based allocation. Multi-buffer support - TBD */
258static int
259bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
260{
261 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
262 int order;
263
264 bnad_rxq_alloc_uninit(bnad, rcb);
265
266 order = get_order(size: rcb->rxq->buffer_size);
267
268 unmap_q->type = BNAD_RXBUF_PAGE;
269
270 if (bna_is_small_rxq(rcb->id)) {
271 unmap_q->alloc_order = 0;
272 unmap_q->map_size = rcb->rxq->buffer_size;
273 } else {
274 if (rcb->rxq->multi_buffer) {
275 unmap_q->alloc_order = 0;
276 unmap_q->map_size = rcb->rxq->buffer_size;
277 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
278 } else {
279 unmap_q->alloc_order = order;
280 unmap_q->map_size =
281 (rcb->rxq->buffer_size > 2048) ?
282 PAGE_SIZE << order : 2048;
283 }
284 }
285
286 BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
287
288 return 0;
289}
290
291static inline void
292bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
293{
294 if (!unmap->page)
295 return;
296
297 dma_unmap_page(&bnad->pcidev->dev,
298 dma_unmap_addr(&unmap->vector, dma_addr),
299 unmap->vector.len, DMA_FROM_DEVICE);
300 put_page(page: unmap->page);
301 unmap->page = NULL;
302 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
303 unmap->vector.len = 0;
304}
305
306static inline void
307bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
308{
309 if (!unmap->skb)
310 return;
311
312 dma_unmap_single(&bnad->pcidev->dev,
313 dma_unmap_addr(&unmap->vector, dma_addr),
314 unmap->vector.len, DMA_FROM_DEVICE);
315 dev_kfree_skb_any(skb: unmap->skb);
316 unmap->skb = NULL;
317 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
318 unmap->vector.len = 0;
319}
320
321static void
322bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
323{
324 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
325 int i;
326
327 for (i = 0; i < rcb->q_depth; i++) {
328 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
329
330 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
331 bnad_rxq_cleanup_skb(bnad, unmap);
332 else
333 bnad_rxq_cleanup_page(bnad, unmap);
334 }
335 bnad_rxq_alloc_uninit(bnad, rcb);
336}
337
338static u32
339bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
340{
341 u32 alloced, prod, q_depth;
342 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
343 struct bnad_rx_unmap *unmap, *prev;
344 struct bna_rxq_entry *rxent;
345 struct page *page;
346 u32 page_offset, alloc_size;
347 dma_addr_t dma_addr;
348
349 prod = rcb->producer_index;
350 q_depth = rcb->q_depth;
351
352 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
353 alloced = 0;
354
355 while (nalloc--) {
356 unmap = &unmap_q->unmap[prod];
357
358 if (unmap_q->reuse_pi < 0) {
359 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
360 order: unmap_q->alloc_order);
361 page_offset = 0;
362 } else {
363 prev = &unmap_q->unmap[unmap_q->reuse_pi];
364 page = prev->page;
365 page_offset = prev->page_offset + unmap_q->map_size;
366 get_page(page);
367 }
368
369 if (unlikely(!page)) {
370 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
371 rcb->rxq->rxbuf_alloc_failed++;
372 goto finishing;
373 }
374
375 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
376 unmap_q->map_size, DMA_FROM_DEVICE);
377 if (dma_mapping_error(dev: &bnad->pcidev->dev, dma_addr)) {
378 put_page(page);
379 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
380 rcb->rxq->rxbuf_map_failed++;
381 goto finishing;
382 }
383
384 unmap->page = page;
385 unmap->page_offset = page_offset;
386 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
387 unmap->vector.len = unmap_q->map_size;
388 page_offset += unmap_q->map_size;
389
390 if (page_offset < alloc_size)
391 unmap_q->reuse_pi = prod;
392 else
393 unmap_q->reuse_pi = -1;
394
395 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
396 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
397 BNA_QE_INDX_INC(prod, q_depth);
398 alloced++;
399 }
400
401finishing:
402 if (likely(alloced)) {
403 rcb->producer_index = prod;
404 smp_mb();
405 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
406 bna_rxq_prod_indx_doorbell(rcb);
407 }
408
409 return alloced;
410}
411
412static u32
413bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
414{
415 u32 alloced, prod, q_depth, buff_sz;
416 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
417 struct bnad_rx_unmap *unmap;
418 struct bna_rxq_entry *rxent;
419 struct sk_buff *skb;
420 dma_addr_t dma_addr;
421
422 buff_sz = rcb->rxq->buffer_size;
423 prod = rcb->producer_index;
424 q_depth = rcb->q_depth;
425
426 alloced = 0;
427 while (nalloc--) {
428 unmap = &unmap_q->unmap[prod];
429
430 skb = netdev_alloc_skb_ip_align(dev: bnad->netdev, length: buff_sz);
431
432 if (unlikely(!skb)) {
433 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
434 rcb->rxq->rxbuf_alloc_failed++;
435 goto finishing;
436 }
437
438 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
439 buff_sz, DMA_FROM_DEVICE);
440 if (dma_mapping_error(dev: &bnad->pcidev->dev, dma_addr)) {
441 dev_kfree_skb_any(skb);
442 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
443 rcb->rxq->rxbuf_map_failed++;
444 goto finishing;
445 }
446
447 unmap->skb = skb;
448 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
449 unmap->vector.len = buff_sz;
450
451 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
452 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
453 BNA_QE_INDX_INC(prod, q_depth);
454 alloced++;
455 }
456
457finishing:
458 if (likely(alloced)) {
459 rcb->producer_index = prod;
460 smp_mb();
461 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
462 bna_rxq_prod_indx_doorbell(rcb);
463 }
464
465 return alloced;
466}
467
468static inline void
469bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
470{
471 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
472 u32 to_alloc;
473
474 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
475 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
476 return;
477
478 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
479 bnad_rxq_refill_skb(bnad, rcb, nalloc: to_alloc);
480 else
481 bnad_rxq_refill_page(bnad, rcb, nalloc: to_alloc);
482}
483
484#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
485 BNA_CQ_EF_IPV6 | \
486 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
487 BNA_CQ_EF_L4_CKSUM_OK)
488
489#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
490 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
491#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
492 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
493#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
494 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
495#define flags_udp6 (BNA_CQ_EF_IPV6 | \
496 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
497
498static void
499bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
500 u32 sop_ci, u32 nvecs)
501{
502 struct bnad_rx_unmap_q *unmap_q;
503 struct bnad_rx_unmap *unmap;
504 u32 ci, vec;
505
506 unmap_q = rcb->unmap_q;
507 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
508 unmap = &unmap_q->unmap[ci];
509 BNA_QE_INDX_INC(ci, rcb->q_depth);
510
511 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
512 bnad_rxq_cleanup_skb(bnad, unmap);
513 else
514 bnad_rxq_cleanup_page(bnad, unmap);
515 }
516}
517
518static void
519bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
520{
521 struct bna_rcb *rcb;
522 struct bnad *bnad;
523 struct bnad_rx_unmap_q *unmap_q;
524 struct bna_cq_entry *cq, *cmpl;
525 u32 ci, pi, totlen = 0;
526
527 cq = ccb->sw_q;
528 pi = ccb->producer_index;
529 cmpl = &cq[pi];
530
531 rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
532 unmap_q = rcb->unmap_q;
533 bnad = rcb->bnad;
534 ci = rcb->consumer_index;
535
536 /* prefetch header */
537 prefetch(page_address(unmap_q->unmap[ci].page) +
538 unmap_q->unmap[ci].page_offset);
539
540 while (nvecs--) {
541 struct bnad_rx_unmap *unmap;
542 u32 len;
543
544 unmap = &unmap_q->unmap[ci];
545 BNA_QE_INDX_INC(ci, rcb->q_depth);
546
547 dma_unmap_page(&bnad->pcidev->dev,
548 dma_unmap_addr(&unmap->vector, dma_addr),
549 unmap->vector.len, DMA_FROM_DEVICE);
550
551 len = ntohs(cmpl->length);
552 skb->truesize += unmap->vector.len;
553 totlen += len;
554
555 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
556 page: unmap->page, off: unmap->page_offset, size: len);
557
558 unmap->page = NULL;
559 unmap->vector.len = 0;
560
561 BNA_QE_INDX_INC(pi, ccb->q_depth);
562 cmpl = &cq[pi];
563 }
564
565 skb->len += totlen;
566 skb->data_len += totlen;
567}
568
569static inline void
570bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
571 struct bnad_rx_unmap *unmap, u32 len)
572{
573 prefetch(skb->data);
574
575 dma_unmap_single(&bnad->pcidev->dev,
576 dma_unmap_addr(&unmap->vector, dma_addr),
577 unmap->vector.len, DMA_FROM_DEVICE);
578
579 skb_put(skb, len);
580 skb->protocol = eth_type_trans(skb, dev: bnad->netdev);
581
582 unmap->skb = NULL;
583 unmap->vector.len = 0;
584}
585
586static u32
587bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
588{
589 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
590 struct bna_rcb *rcb = NULL;
591 struct bnad_rx_unmap_q *unmap_q;
592 struct bnad_rx_unmap *unmap = NULL;
593 struct sk_buff *skb = NULL;
594 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
595 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
596 u32 packets = 0, len = 0, totlen = 0;
597 u32 pi, vec, sop_ci = 0, nvecs = 0;
598 u32 flags, masked_flags;
599
600 prefetch(bnad->netdev);
601
602 cq = ccb->sw_q;
603
604 while (packets < budget) {
605 cmpl = &cq[ccb->producer_index];
606 if (!cmpl->valid)
607 break;
608 /* The 'valid' field is set by the adapter, only after writing
609 * the other fields of completion entry. Hence, do not load
610 * other fields of completion entry *before* the 'valid' is
611 * loaded. Adding the rmb() here prevents the compiler and/or
612 * CPU from reordering the reads which would potentially result
613 * in reading stale values in completion entry.
614 */
615 rmb();
616
617 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
618
619 if (bna_is_small_rxq(cmpl->rxq_id))
620 rcb = ccb->rcb[1];
621 else
622 rcb = ccb->rcb[0];
623
624 unmap_q = rcb->unmap_q;
625
626 /* start of packet ci */
627 sop_ci = rcb->consumer_index;
628
629 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
630 unmap = &unmap_q->unmap[sop_ci];
631 skb = unmap->skb;
632 } else {
633 skb = napi_get_frags(napi: &rx_ctrl->napi);
634 if (unlikely(!skb))
635 break;
636 }
637 prefetch(skb);
638
639 flags = ntohl(cmpl->flags);
640 len = ntohs(cmpl->length);
641 totlen = len;
642 nvecs = 1;
643
644 /* Check all the completions for this frame.
645 * busy-wait doesn't help much, break here.
646 */
647 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
648 (flags & BNA_CQ_EF_EOP) == 0) {
649 pi = ccb->producer_index;
650 do {
651 BNA_QE_INDX_INC(pi, ccb->q_depth);
652 next_cmpl = &cq[pi];
653
654 if (!next_cmpl->valid)
655 break;
656 /* The 'valid' field is set by the adapter, only
657 * after writing the other fields of completion
658 * entry. Hence, do not load other fields of
659 * completion entry *before* the 'valid' is
660 * loaded. Adding the rmb() here prevents the
661 * compiler and/or CPU from reordering the reads
662 * which would potentially result in reading
663 * stale values in completion entry.
664 */
665 rmb();
666
667 len = ntohs(next_cmpl->length);
668 flags = ntohl(next_cmpl->flags);
669
670 nvecs++;
671 totlen += len;
672 } while ((flags & BNA_CQ_EF_EOP) == 0);
673
674 if (!next_cmpl->valid)
675 break;
676 }
677 packets++;
678
679 /* TODO: BNA_CQ_EF_LOCAL ? */
680 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
681 BNA_CQ_EF_FCS_ERROR |
682 BNA_CQ_EF_TOO_LONG))) {
683 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
684 rcb->rxq->rx_packets_with_error++;
685
686 goto next;
687 }
688
689 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
690 bnad_cq_setup_skb(bnad, skb, unmap, len);
691 else
692 bnad_cq_setup_skb_frags(ccb, skb, nvecs);
693
694 rcb->rxq->rx_packets++;
695 rcb->rxq->rx_bytes += totlen;
696 ccb->bytes_per_intr += totlen;
697
698 masked_flags = flags & flags_cksum_prot_mask;
699
700 if (likely
701 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
702 ((masked_flags == flags_tcp4) ||
703 (masked_flags == flags_udp4) ||
704 (masked_flags == flags_tcp6) ||
705 (masked_flags == flags_udp6))))
706 skb->ip_summed = CHECKSUM_UNNECESSARY;
707 else
708 skb_checksum_none_assert(skb);
709
710 if ((flags & BNA_CQ_EF_VLAN) &&
711 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
712 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
713
714 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
715 netif_receive_skb(skb);
716 else
717 napi_gro_frags(napi: &rx_ctrl->napi);
718
719next:
720 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
721 for (vec = 0; vec < nvecs; vec++) {
722 cmpl = &cq[ccb->producer_index];
723 cmpl->valid = 0;
724 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
725 }
726 }
727
728 napi_gro_flush(napi: &rx_ctrl->napi, flush_old: false);
729 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
730 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
731
732 bnad_rxq_post(bnad, rcb: ccb->rcb[0]);
733 if (ccb->rcb[1])
734 bnad_rxq_post(bnad, rcb: ccb->rcb[1]);
735
736 return packets;
737}
738
739static void
740bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
741{
742 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
743 struct napi_struct *napi = &rx_ctrl->napi;
744
745 if (likely(napi_schedule_prep(napi))) {
746 __napi_schedule(n: napi);
747 rx_ctrl->rx_schedule++;
748 }
749}
750
751/* MSIX Rx Path Handler */
752static irqreturn_t
753bnad_msix_rx(int irq, void *data)
754{
755 struct bna_ccb *ccb = (struct bna_ccb *)data;
756
757 if (ccb) {
758 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
759 bnad_netif_rx_schedule_poll(bnad: ccb->bnad, ccb);
760 }
761
762 return IRQ_HANDLED;
763}
764
765/* Interrupt handlers */
766
767/* Mbox Interrupt Handlers */
768static irqreturn_t
769bnad_msix_mbox_handler(int irq, void *data)
770{
771 u32 intr_status;
772 unsigned long flags;
773 struct bnad *bnad = (struct bnad *)data;
774
775 spin_lock_irqsave(&bnad->bna_lock, flags);
776 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
778 return IRQ_HANDLED;
779 }
780
781 bna_intr_status_get(&bnad->bna, intr_status);
782
783 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
784 bna_mbox_handler(bna: &bnad->bna, intr_status);
785
786 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
787
788 return IRQ_HANDLED;
789}
790
791static irqreturn_t
792bnad_isr(int irq, void *data)
793{
794 int i, j;
795 u32 intr_status;
796 unsigned long flags;
797 struct bnad *bnad = (struct bnad *)data;
798 struct bnad_rx_info *rx_info;
799 struct bnad_rx_ctrl *rx_ctrl;
800 struct bna_tcb *tcb = NULL;
801
802 spin_lock_irqsave(&bnad->bna_lock, flags);
803 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
804 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
805 return IRQ_NONE;
806 }
807
808 bna_intr_status_get(&bnad->bna, intr_status);
809
810 if (unlikely(!intr_status)) {
811 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
812 return IRQ_NONE;
813 }
814
815 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
816 bna_mbox_handler(bna: &bnad->bna, intr_status);
817
818 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
819
820 if (!BNA_IS_INTX_DATA_INTR(intr_status))
821 return IRQ_HANDLED;
822
823 /* Process data interrupts */
824 /* Tx processing */
825 for (i = 0; i < bnad->num_tx; i++) {
826 for (j = 0; j < bnad->num_txq_per_tx; j++) {
827 tcb = bnad->tx_info[i].tcb[j];
828 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
829 bnad_tx_complete(bnad, tcb: bnad->tx_info[i].tcb[j]);
830 }
831 }
832 /* Rx processing */
833 for (i = 0; i < bnad->num_rx; i++) {
834 rx_info = &bnad->rx_info[i];
835 if (!rx_info->rx)
836 continue;
837 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
838 rx_ctrl = &rx_info->rx_ctrl[j];
839 if (rx_ctrl->ccb)
840 bnad_netif_rx_schedule_poll(bnad,
841 ccb: rx_ctrl->ccb);
842 }
843 }
844 return IRQ_HANDLED;
845}
846
847/*
848 * Called in interrupt / callback context
849 * with bna_lock held, so cfg_flags access is OK
850 */
851static void
852bnad_enable_mbox_irq(struct bnad *bnad)
853{
854 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, addr: &bnad->run_flags);
855
856 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
857}
858
859/*
860 * Called with bnad->bna_lock held b'cos of
861 * bnad->cfg_flags access.
862 */
863static void
864bnad_disable_mbox_irq(struct bnad *bnad)
865{
866 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, addr: &bnad->run_flags);
867
868 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
869}
870
871static void
872bnad_set_netdev_perm_addr(struct bnad *bnad)
873{
874 struct net_device *netdev = bnad->netdev;
875
876 ether_addr_copy(dst: netdev->perm_addr, src: bnad->perm_addr);
877 if (is_zero_ether_addr(addr: netdev->dev_addr))
878 eth_hw_addr_set(dev: netdev, addr: bnad->perm_addr);
879}
880
881/* Control Path Handlers */
882
883/* Callbacks */
884void
885bnad_cb_mbox_intr_enable(struct bnad *bnad)
886{
887 bnad_enable_mbox_irq(bnad);
888}
889
890void
891bnad_cb_mbox_intr_disable(struct bnad *bnad)
892{
893 bnad_disable_mbox_irq(bnad);
894}
895
896void
897bnad_cb_ioceth_ready(struct bnad *bnad)
898{
899 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
900 complete(&bnad->bnad_completions.ioc_comp);
901}
902
903void
904bnad_cb_ioceth_failed(struct bnad *bnad)
905{
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
907 complete(&bnad->bnad_completions.ioc_comp);
908}
909
910void
911bnad_cb_ioceth_disabled(struct bnad *bnad)
912{
913 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
914 complete(&bnad->bnad_completions.ioc_comp);
915}
916
917static void
918bnad_cb_enet_disabled(void *arg)
919{
920 struct bnad *bnad = (struct bnad *)arg;
921
922 netif_carrier_off(dev: bnad->netdev);
923 complete(&bnad->bnad_completions.enet_comp);
924}
925
926void
927bnad_cb_ethport_link_status(struct bnad *bnad,
928 enum bna_link_status link_status)
929{
930 bool link_up = false;
931
932 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
933
934 if (link_status == BNA_CEE_UP) {
935 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
936 BNAD_UPDATE_CTR(bnad, cee_toggle);
937 set_bit(BNAD_RF_CEE_RUNNING, addr: &bnad->run_flags);
938 } else {
939 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
940 BNAD_UPDATE_CTR(bnad, cee_toggle);
941 clear_bit(BNAD_RF_CEE_RUNNING, addr: &bnad->run_flags);
942 }
943
944 if (link_up) {
945 if (!netif_carrier_ok(dev: bnad->netdev)) {
946 uint tx_id, tcb_id;
947 netdev_info(dev: bnad->netdev, format: "link up\n");
948 netif_carrier_on(dev: bnad->netdev);
949 BNAD_UPDATE_CTR(bnad, link_toggle);
950 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
951 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
952 tcb_id++) {
953 struct bna_tcb *tcb =
954 bnad->tx_info[tx_id].tcb[tcb_id];
955 u32 txq_id;
956 if (!tcb)
957 continue;
958
959 txq_id = tcb->id;
960
961 if (test_bit(BNAD_TXQ_TX_STARTED,
962 &tcb->flags)) {
963 /*
964 * Force an immediate
965 * Transmit Schedule */
966 netif_wake_subqueue(
967 dev: bnad->netdev,
968 queue_index: txq_id);
969 BNAD_UPDATE_CTR(bnad,
970 netif_queue_wakeup);
971 } else {
972 netif_stop_subqueue(
973 dev: bnad->netdev,
974 queue_index: txq_id);
975 BNAD_UPDATE_CTR(bnad,
976 netif_queue_stop);
977 }
978 }
979 }
980 }
981 } else {
982 if (netif_carrier_ok(dev: bnad->netdev)) {
983 netdev_info(dev: bnad->netdev, format: "link down\n");
984 netif_carrier_off(dev: bnad->netdev);
985 BNAD_UPDATE_CTR(bnad, link_toggle);
986 }
987 }
988}
989
990static void
991bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
992{
993 struct bnad *bnad = (struct bnad *)arg;
994
995 complete(&bnad->bnad_completions.tx_comp);
996}
997
998static void
999bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1000{
1001 struct bnad_tx_info *tx_info =
1002 (struct bnad_tx_info *)tcb->txq->tx->priv;
1003
1004 tcb->priv = tcb;
1005 tx_info->tcb[tcb->id] = tcb;
1006}
1007
1008static void
1009bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1010{
1011 struct bnad_tx_info *tx_info =
1012 (struct bnad_tx_info *)tcb->txq->tx->priv;
1013
1014 tx_info->tcb[tcb->id] = NULL;
1015 tcb->priv = NULL;
1016}
1017
1018static void
1019bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1020{
1021 struct bnad_rx_info *rx_info =
1022 (struct bnad_rx_info *)ccb->cq->rx->priv;
1023
1024 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1025 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1026}
1027
1028static void
1029bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1030{
1031 struct bnad_rx_info *rx_info =
1032 (struct bnad_rx_info *)ccb->cq->rx->priv;
1033
1034 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1035}
1036
1037static void
1038bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1039{
1040 struct bnad_tx_info *tx_info = tx->priv;
1041 struct bna_tcb *tcb;
1042 u32 txq_id;
1043 int i;
1044
1045 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1046 tcb = tx_info->tcb[i];
1047 if (!tcb)
1048 continue;
1049 txq_id = tcb->id;
1050 clear_bit(BNAD_TXQ_TX_STARTED, addr: &tcb->flags);
1051 netif_stop_subqueue(dev: bnad->netdev, queue_index: txq_id);
1052 }
1053}
1054
1055static void
1056bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1057{
1058 struct bnad_tx_info *tx_info = tx->priv;
1059 struct bna_tcb *tcb;
1060 u32 txq_id;
1061 int i;
1062
1063 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1064 tcb = tx_info->tcb[i];
1065 if (!tcb)
1066 continue;
1067 txq_id = tcb->id;
1068
1069 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1070 set_bit(BNAD_TXQ_TX_STARTED, addr: &tcb->flags);
1071 BUG_ON(*(tcb->hw_consumer_index) != 0);
1072
1073 if (netif_carrier_ok(dev: bnad->netdev)) {
1074 netif_wake_subqueue(dev: bnad->netdev, queue_index: txq_id);
1075 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1076 }
1077 }
1078
1079 /*
1080 * Workaround for first ioceth enable failure & we
1081 * get a 0 MAC address. We try to get the MAC address
1082 * again here.
1083 */
1084 if (is_zero_ether_addr(addr: bnad->perm_addr)) {
1085 bna_enet_perm_mac_get(enet: &bnad->bna.enet, mac: bnad->perm_addr);
1086 bnad_set_netdev_perm_addr(bnad);
1087 }
1088}
1089
1090/*
1091 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1092 */
1093static void
1094bnad_tx_cleanup(struct work_struct *work)
1095{
1096 struct bnad_tx_info *tx_info =
1097 container_of(work, struct bnad_tx_info, tx_cleanup_work.work);
1098 struct bnad *bnad = NULL;
1099 struct bna_tcb *tcb;
1100 unsigned long flags;
1101 u32 i, pending = 0;
1102
1103 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1104 tcb = tx_info->tcb[i];
1105 if (!tcb)
1106 continue;
1107
1108 bnad = tcb->bnad;
1109
1110 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, addr: &tcb->flags)) {
1111 pending++;
1112 continue;
1113 }
1114
1115 bnad_txq_cleanup(bnad, tcb);
1116
1117 smp_mb__before_atomic();
1118 clear_bit(BNAD_TXQ_FREE_SENT, addr: &tcb->flags);
1119 }
1120
1121 if (pending) {
1122 queue_delayed_work(wq: bnad->work_q, dwork: &tx_info->tx_cleanup_work,
1123 delay: msecs_to_jiffies(m: 1));
1124 return;
1125 }
1126
1127 spin_lock_irqsave(&bnad->bna_lock, flags);
1128 bna_tx_cleanup_complete(tx: tx_info->tx);
1129 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1130}
1131
1132static void
1133bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1134{
1135 struct bnad_tx_info *tx_info = tx->priv;
1136 struct bna_tcb *tcb;
1137 int i;
1138
1139 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1140 tcb = tx_info->tcb[i];
1141 if (!tcb)
1142 continue;
1143 }
1144
1145 queue_delayed_work(wq: bnad->work_q, dwork: &tx_info->tx_cleanup_work, delay: 0);
1146}
1147
1148static void
1149bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1150{
1151 struct bnad_rx_info *rx_info = rx->priv;
1152 struct bna_ccb *ccb;
1153 struct bnad_rx_ctrl *rx_ctrl;
1154 int i;
1155
1156 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1157 rx_ctrl = &rx_info->rx_ctrl[i];
1158 ccb = rx_ctrl->ccb;
1159 if (!ccb)
1160 continue;
1161
1162 clear_bit(BNAD_RXQ_POST_OK, addr: &ccb->rcb[0]->flags);
1163
1164 if (ccb->rcb[1])
1165 clear_bit(BNAD_RXQ_POST_OK, addr: &ccb->rcb[1]->flags);
1166 }
1167}
1168
1169/*
1170 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1171 */
1172static void
1173bnad_rx_cleanup(struct work_struct *work)
1174{
1175 struct bnad_rx_info *rx_info =
1176 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1177 struct bnad_rx_ctrl *rx_ctrl;
1178 struct bnad *bnad = NULL;
1179 unsigned long flags;
1180 u32 i;
1181
1182 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1183 rx_ctrl = &rx_info->rx_ctrl[i];
1184
1185 if (!rx_ctrl->ccb)
1186 continue;
1187
1188 bnad = rx_ctrl->ccb->bnad;
1189
1190 /*
1191 * Wait till the poll handler has exited
1192 * and nothing can be scheduled anymore
1193 */
1194 napi_disable(n: &rx_ctrl->napi);
1195
1196 bnad_cq_cleanup(bnad, ccb: rx_ctrl->ccb);
1197 bnad_rxq_cleanup(bnad, rcb: rx_ctrl->ccb->rcb[0]);
1198 if (rx_ctrl->ccb->rcb[1])
1199 bnad_rxq_cleanup(bnad, rcb: rx_ctrl->ccb->rcb[1]);
1200 }
1201
1202 spin_lock_irqsave(&bnad->bna_lock, flags);
1203 bna_rx_cleanup_complete(rx: rx_info->rx);
1204 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1205}
1206
1207static void
1208bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1209{
1210 struct bnad_rx_info *rx_info = rx->priv;
1211 struct bna_ccb *ccb;
1212 struct bnad_rx_ctrl *rx_ctrl;
1213 int i;
1214
1215 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1216 rx_ctrl = &rx_info->rx_ctrl[i];
1217 ccb = rx_ctrl->ccb;
1218 if (!ccb)
1219 continue;
1220
1221 clear_bit(BNAD_RXQ_STARTED, addr: &ccb->rcb[0]->flags);
1222
1223 if (ccb->rcb[1])
1224 clear_bit(BNAD_RXQ_STARTED, addr: &ccb->rcb[1]->flags);
1225 }
1226
1227 queue_work(wq: bnad->work_q, work: &rx_info->rx_cleanup_work);
1228}
1229
1230static void
1231bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1232{
1233 struct bnad_rx_info *rx_info = rx->priv;
1234 struct bna_ccb *ccb;
1235 struct bna_rcb *rcb;
1236 struct bnad_rx_ctrl *rx_ctrl;
1237 int i, j;
1238
1239 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1240 rx_ctrl = &rx_info->rx_ctrl[i];
1241 ccb = rx_ctrl->ccb;
1242 if (!ccb)
1243 continue;
1244
1245 napi_enable(n: &rx_ctrl->napi);
1246
1247 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1248 rcb = ccb->rcb[j];
1249 if (!rcb)
1250 continue;
1251
1252 bnad_rxq_alloc_init(bnad, rcb);
1253 set_bit(BNAD_RXQ_STARTED, addr: &rcb->flags);
1254 set_bit(BNAD_RXQ_POST_OK, addr: &rcb->flags);
1255 bnad_rxq_post(bnad, rcb);
1256 }
1257 }
1258}
1259
1260static void
1261bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1262{
1263 struct bnad *bnad = (struct bnad *)arg;
1264
1265 complete(&bnad->bnad_completions.rx_comp);
1266}
1267
1268static void
1269bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1270{
1271 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1272 complete(&bnad->bnad_completions.mcast_comp);
1273}
1274
1275void
1276bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1277 struct bna_stats *stats)
1278{
1279 if (status == BNA_CB_SUCCESS)
1280 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1281
1282 if (!netif_running(dev: bnad->netdev) ||
1283 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1284 return;
1285
1286 mod_timer(timer: &bnad->stats_timer,
1287 expires: jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1288}
1289
1290static void
1291bnad_cb_enet_mtu_set(struct bnad *bnad)
1292{
1293 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1294 complete(&bnad->bnad_completions.mtu_comp);
1295}
1296
1297void
1298bnad_cb_completion(void *arg, enum bfa_status status)
1299{
1300 struct bnad_iocmd_comp *iocmd_comp =
1301 (struct bnad_iocmd_comp *)arg;
1302
1303 iocmd_comp->comp_status = (u32) status;
1304 complete(&iocmd_comp->comp);
1305}
1306
1307/* Resource allocation, free functions */
1308
1309static void
1310bnad_mem_free(struct bnad *bnad,
1311 struct bna_mem_info *mem_info)
1312{
1313 int i;
1314 dma_addr_t dma_pa;
1315
1316 if (mem_info->mdl == NULL)
1317 return;
1318
1319 for (i = 0; i < mem_info->num; i++) {
1320 if (mem_info->mdl[i].kva != NULL) {
1321 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1322 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1323 dma_pa);
1324 dma_free_coherent(dev: &bnad->pcidev->dev,
1325 size: mem_info->mdl[i].len,
1326 cpu_addr: mem_info->mdl[i].kva, dma_handle: dma_pa);
1327 } else
1328 kfree(objp: mem_info->mdl[i].kva);
1329 }
1330 }
1331 kfree(objp: mem_info->mdl);
1332 mem_info->mdl = NULL;
1333}
1334
1335static int
1336bnad_mem_alloc(struct bnad *bnad,
1337 struct bna_mem_info *mem_info)
1338{
1339 int i;
1340 dma_addr_t dma_pa;
1341
1342 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1343 mem_info->mdl = NULL;
1344 return 0;
1345 }
1346
1347 mem_info->mdl = kcalloc(n: mem_info->num, size: sizeof(struct bna_mem_descr),
1348 GFP_KERNEL);
1349 if (mem_info->mdl == NULL)
1350 return -ENOMEM;
1351
1352 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1353 for (i = 0; i < mem_info->num; i++) {
1354 mem_info->mdl[i].len = mem_info->len;
1355 mem_info->mdl[i].kva =
1356 dma_alloc_coherent(dev: &bnad->pcidev->dev,
1357 size: mem_info->len, dma_handle: &dma_pa,
1358 GFP_KERNEL);
1359 if (mem_info->mdl[i].kva == NULL)
1360 goto err_return;
1361
1362 BNA_SET_DMA_ADDR(dma_pa,
1363 &(mem_info->mdl[i].dma));
1364 }
1365 } else {
1366 for (i = 0; i < mem_info->num; i++) {
1367 mem_info->mdl[i].len = mem_info->len;
1368 mem_info->mdl[i].kva = kzalloc(size: mem_info->len,
1369 GFP_KERNEL);
1370 if (mem_info->mdl[i].kva == NULL)
1371 goto err_return;
1372 }
1373 }
1374
1375 return 0;
1376
1377err_return:
1378 bnad_mem_free(bnad, mem_info);
1379 return -ENOMEM;
1380}
1381
1382/* Free IRQ for Mailbox */
1383static void
1384bnad_mbox_irq_free(struct bnad *bnad)
1385{
1386 int irq;
1387 unsigned long flags;
1388
1389 spin_lock_irqsave(&bnad->bna_lock, flags);
1390 bnad_disable_mbox_irq(bnad);
1391 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1392
1393 irq = BNAD_GET_MBOX_IRQ(bnad);
1394 free_irq(irq, bnad);
1395}
1396
1397/*
1398 * Allocates IRQ for Mailbox, but keep it disabled
1399 * This will be enabled once we get the mbox enable callback
1400 * from bna
1401 */
1402static int
1403bnad_mbox_irq_alloc(struct bnad *bnad)
1404{
1405 int err = 0;
1406 unsigned long irq_flags, flags;
1407 u32 irq;
1408 irq_handler_t irq_handler;
1409
1410 spin_lock_irqsave(&bnad->bna_lock, flags);
1411 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1412 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1413 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1414 irq_flags = 0;
1415 } else {
1416 irq_handler = (irq_handler_t)bnad_isr;
1417 irq = bnad->pcidev->irq;
1418 irq_flags = IRQF_SHARED;
1419 }
1420
1421 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1422 sprintf(buf: bnad->mbox_irq_name, fmt: "%s", BNAD_NAME);
1423
1424 /*
1425 * Set the Mbox IRQ disable flag, so that the IRQ handler
1426 * called from request_irq() for SHARED IRQs do not execute
1427 */
1428 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, addr: &bnad->run_flags);
1429
1430 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1431
1432 err = request_irq(irq, handler: irq_handler, flags: irq_flags,
1433 name: bnad->mbox_irq_name, dev: bnad);
1434
1435 return err;
1436}
1437
1438static void
1439bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1440{
1441 kfree(objp: intr_info->idl);
1442 intr_info->idl = NULL;
1443}
1444
1445/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1446static int
1447bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1448 u32 txrx_id, struct bna_intr_info *intr_info)
1449{
1450 int i, vector_start = 0;
1451 u32 cfg_flags;
1452 unsigned long flags;
1453
1454 spin_lock_irqsave(&bnad->bna_lock, flags);
1455 cfg_flags = bnad->cfg_flags;
1456 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1457
1458 if (cfg_flags & BNAD_CF_MSIX) {
1459 intr_info->intr_type = BNA_INTR_T_MSIX;
1460 intr_info->idl = kcalloc(n: intr_info->num,
1461 size: sizeof(struct bna_intr_descr),
1462 GFP_KERNEL);
1463 if (!intr_info->idl)
1464 return -ENOMEM;
1465
1466 switch (src) {
1467 case BNAD_INTR_TX:
1468 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1469 break;
1470
1471 case BNAD_INTR_RX:
1472 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1473 (bnad->num_tx * bnad->num_txq_per_tx) +
1474 txrx_id;
1475 break;
1476
1477 default:
1478 BUG();
1479 }
1480
1481 for (i = 0; i < intr_info->num; i++)
1482 intr_info->idl[i].vector = vector_start + i;
1483 } else {
1484 intr_info->intr_type = BNA_INTR_T_INTX;
1485 intr_info->num = 1;
1486 intr_info->idl = kcalloc(n: intr_info->num,
1487 size: sizeof(struct bna_intr_descr),
1488 GFP_KERNEL);
1489 if (!intr_info->idl)
1490 return -ENOMEM;
1491
1492 switch (src) {
1493 case BNAD_INTR_TX:
1494 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1495 break;
1496
1497 case BNAD_INTR_RX:
1498 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1499 break;
1500 }
1501 }
1502 return 0;
1503}
1504
1505/* NOTE: Should be called for MSIX only
1506 * Unregisters Tx MSIX vector(s) from the kernel
1507 */
1508static void
1509bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1510 int num_txqs)
1511{
1512 int i;
1513 int vector_num;
1514
1515 for (i = 0; i < num_txqs; i++) {
1516 if (tx_info->tcb[i] == NULL)
1517 continue;
1518
1519 vector_num = tx_info->tcb[i]->intr_vector;
1520 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1521 }
1522}
1523
1524/* NOTE: Should be called for MSIX only
1525 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1526 */
1527static int
1528bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1529 u32 tx_id, int num_txqs)
1530{
1531 int i;
1532 int err;
1533 int vector_num;
1534
1535 for (i = 0; i < num_txqs; i++) {
1536 vector_num = tx_info->tcb[i]->intr_vector;
1537 sprintf(buf: tx_info->tcb[i]->name, fmt: "%s TXQ %d", bnad->netdev->name,
1538 tx_id + tx_info->tcb[i]->id);
1539 err = request_irq(irq: bnad->msix_table[vector_num].vector,
1540 handler: (irq_handler_t)bnad_msix_tx, flags: 0,
1541 name: tx_info->tcb[i]->name,
1542 dev: tx_info->tcb[i]);
1543 if (err)
1544 goto err_return;
1545 }
1546
1547 return 0;
1548
1549err_return:
1550 if (i > 0)
1551 bnad_tx_msix_unregister(bnad, tx_info, num_txqs: (i - 1));
1552 return -1;
1553}
1554
1555/* NOTE: Should be called for MSIX only
1556 * Unregisters Rx MSIX vector(s) from the kernel
1557 */
1558static void
1559bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1560 int num_rxps)
1561{
1562 int i;
1563 int vector_num;
1564
1565 for (i = 0; i < num_rxps; i++) {
1566 if (rx_info->rx_ctrl[i].ccb == NULL)
1567 continue;
1568
1569 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1570 free_irq(bnad->msix_table[vector_num].vector,
1571 rx_info->rx_ctrl[i].ccb);
1572 }
1573}
1574
1575/* NOTE: Should be called for MSIX only
1576 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1577 */
1578static int
1579bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1580 u32 rx_id, int num_rxps)
1581{
1582 int i;
1583 int err;
1584 int vector_num;
1585
1586 for (i = 0; i < num_rxps; i++) {
1587 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1588 sprintf(buf: rx_info->rx_ctrl[i].ccb->name, fmt: "%s CQ %d",
1589 bnad->netdev->name,
1590 rx_id + rx_info->rx_ctrl[i].ccb->id);
1591 err = request_irq(irq: bnad->msix_table[vector_num].vector,
1592 handler: (irq_handler_t)bnad_msix_rx, flags: 0,
1593 name: rx_info->rx_ctrl[i].ccb->name,
1594 dev: rx_info->rx_ctrl[i].ccb);
1595 if (err)
1596 goto err_return;
1597 }
1598
1599 return 0;
1600
1601err_return:
1602 if (i > 0)
1603 bnad_rx_msix_unregister(bnad, rx_info, num_rxps: (i - 1));
1604 return -1;
1605}
1606
1607/* Free Tx object Resources */
1608static void
1609bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1610{
1611 int i;
1612
1613 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1614 if (res_info[i].res_type == BNA_RES_T_MEM)
1615 bnad_mem_free(bnad, mem_info: &res_info[i].res_u.mem_info);
1616 else if (res_info[i].res_type == BNA_RES_T_INTR)
1617 bnad_txrx_irq_free(bnad, intr_info: &res_info[i].res_u.intr_info);
1618 }
1619}
1620
1621/* Allocates memory and interrupt resources for Tx object */
1622static int
1623bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1624 u32 tx_id)
1625{
1626 int i, err = 0;
1627
1628 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1629 if (res_info[i].res_type == BNA_RES_T_MEM)
1630 err = bnad_mem_alloc(bnad,
1631 mem_info: &res_info[i].res_u.mem_info);
1632 else if (res_info[i].res_type == BNA_RES_T_INTR)
1633 err = bnad_txrx_irq_alloc(bnad, src: BNAD_INTR_TX, txrx_id: tx_id,
1634 intr_info: &res_info[i].res_u.intr_info);
1635 if (err)
1636 goto err_return;
1637 }
1638 return 0;
1639
1640err_return:
1641 bnad_tx_res_free(bnad, res_info);
1642 return err;
1643}
1644
1645/* Free Rx object Resources */
1646static void
1647bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1648{
1649 int i;
1650
1651 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1652 if (res_info[i].res_type == BNA_RES_T_MEM)
1653 bnad_mem_free(bnad, mem_info: &res_info[i].res_u.mem_info);
1654 else if (res_info[i].res_type == BNA_RES_T_INTR)
1655 bnad_txrx_irq_free(bnad, intr_info: &res_info[i].res_u.intr_info);
1656 }
1657}
1658
1659/* Allocates memory and interrupt resources for Rx object */
1660static int
1661bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1662 uint rx_id)
1663{
1664 int i, err = 0;
1665
1666 /* All memory needs to be allocated before setup_ccbs */
1667 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1668 if (res_info[i].res_type == BNA_RES_T_MEM)
1669 err = bnad_mem_alloc(bnad,
1670 mem_info: &res_info[i].res_u.mem_info);
1671 else if (res_info[i].res_type == BNA_RES_T_INTR)
1672 err = bnad_txrx_irq_alloc(bnad, src: BNAD_INTR_RX, txrx_id: rx_id,
1673 intr_info: &res_info[i].res_u.intr_info);
1674 if (err)
1675 goto err_return;
1676 }
1677 return 0;
1678
1679err_return:
1680 bnad_rx_res_free(bnad, res_info);
1681 return err;
1682}
1683
1684/* Timer callbacks */
1685/* a) IOC timer */
1686static void
1687bnad_ioc_timeout(struct timer_list *t)
1688{
1689 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1690 unsigned long flags;
1691
1692 spin_lock_irqsave(&bnad->bna_lock, flags);
1693 bfa_nw_ioc_timeout(ioc: &bnad->bna.ioceth.ioc);
1694 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1695}
1696
1697static void
1698bnad_ioc_hb_check(struct timer_list *t)
1699{
1700 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1701 unsigned long flags;
1702
1703 spin_lock_irqsave(&bnad->bna_lock, flags);
1704 bfa_nw_ioc_hb_check(ioc: &bnad->bna.ioceth.ioc);
1705 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1706}
1707
1708static void
1709bnad_iocpf_timeout(struct timer_list *t)
1710{
1711 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1712 unsigned long flags;
1713
1714 spin_lock_irqsave(&bnad->bna_lock, flags);
1715 bfa_nw_iocpf_timeout(ioc: &bnad->bna.ioceth.ioc);
1716 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1717}
1718
1719static void
1720bnad_iocpf_sem_timeout(struct timer_list *t)
1721{
1722 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1723 unsigned long flags;
1724
1725 spin_lock_irqsave(&bnad->bna_lock, flags);
1726 bfa_nw_iocpf_sem_timeout(ioc: &bnad->bna.ioceth.ioc);
1727 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1728}
1729
1730/*
1731 * All timer routines use bnad->bna_lock to protect against
1732 * the following race, which may occur in case of no locking:
1733 * Time CPU m CPU n
1734 * 0 1 = test_bit
1735 * 1 clear_bit
1736 * 2 del_timer_sync
1737 * 3 mod_timer
1738 */
1739
1740/* b) Dynamic Interrupt Moderation Timer */
1741static void
1742bnad_dim_timeout(struct timer_list *t)
1743{
1744 struct bnad *bnad = from_timer(bnad, t, dim_timer);
1745 struct bnad_rx_info *rx_info;
1746 struct bnad_rx_ctrl *rx_ctrl;
1747 int i, j;
1748 unsigned long flags;
1749
1750 if (!netif_carrier_ok(dev: bnad->netdev))
1751 return;
1752
1753 spin_lock_irqsave(&bnad->bna_lock, flags);
1754 for (i = 0; i < bnad->num_rx; i++) {
1755 rx_info = &bnad->rx_info[i];
1756 if (!rx_info->rx)
1757 continue;
1758 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1759 rx_ctrl = &rx_info->rx_ctrl[j];
1760 if (!rx_ctrl->ccb)
1761 continue;
1762 bna_rx_dim_update(ccb: rx_ctrl->ccb);
1763 }
1764 }
1765
1766 /* Check for BNAD_CF_DIM_ENABLED, does not eliminate a race */
1767 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1768 mod_timer(timer: &bnad->dim_timer,
1769 expires: jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1770 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1771}
1772
1773/* c) Statistics Timer */
1774static void
1775bnad_stats_timeout(struct timer_list *t)
1776{
1777 struct bnad *bnad = from_timer(bnad, t, stats_timer);
1778 unsigned long flags;
1779
1780 if (!netif_running(dev: bnad->netdev) ||
1781 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1782 return;
1783
1784 spin_lock_irqsave(&bnad->bna_lock, flags);
1785 bna_hw_stats_get(bna: &bnad->bna);
1786 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1787}
1788
1789/*
1790 * Set up timer for DIM
1791 * Called with bnad->bna_lock held
1792 */
1793void
1794bnad_dim_timer_start(struct bnad *bnad)
1795{
1796 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1797 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1798 timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1799 set_bit(BNAD_RF_DIM_TIMER_RUNNING, addr: &bnad->run_flags);
1800 mod_timer(timer: &bnad->dim_timer,
1801 expires: jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1802 }
1803}
1804
1805/*
1806 * Set up timer for statistics
1807 * Called with mutex_lock(&bnad->conf_mutex) held
1808 */
1809static void
1810bnad_stats_timer_start(struct bnad *bnad)
1811{
1812 unsigned long flags;
1813
1814 spin_lock_irqsave(&bnad->bna_lock, flags);
1815 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, addr: &bnad->run_flags)) {
1816 timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1817 mod_timer(timer: &bnad->stats_timer,
1818 expires: jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1819 }
1820 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1821}
1822
1823/*
1824 * Stops the stats timer
1825 * Called with mutex_lock(&bnad->conf_mutex) held
1826 */
1827static void
1828bnad_stats_timer_stop(struct bnad *bnad)
1829{
1830 int to_del = 0;
1831 unsigned long flags;
1832
1833 spin_lock_irqsave(&bnad->bna_lock, flags);
1834 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, addr: &bnad->run_flags))
1835 to_del = 1;
1836 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1837 if (to_del)
1838 del_timer_sync(timer: &bnad->stats_timer);
1839}
1840
1841/* Utilities */
1842
1843static void
1844bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1845{
1846 int i = 1; /* Index 0 has broadcast address */
1847 struct netdev_hw_addr *mc_addr;
1848
1849 netdev_for_each_mc_addr(mc_addr, netdev) {
1850 ether_addr_copy(dst: &mc_list[i * ETH_ALEN], src: &mc_addr->addr[0]);
1851 i++;
1852 }
1853}
1854
1855static int
1856bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1857{
1858 struct bnad_rx_ctrl *rx_ctrl =
1859 container_of(napi, struct bnad_rx_ctrl, napi);
1860 struct bnad *bnad = rx_ctrl->bnad;
1861 int rcvd = 0;
1862
1863 rx_ctrl->rx_poll_ctr++;
1864
1865 if (!netif_carrier_ok(dev: bnad->netdev))
1866 goto poll_exit;
1867
1868 rcvd = bnad_cq_process(bnad, ccb: rx_ctrl->ccb, budget);
1869 if (rcvd >= budget)
1870 return rcvd;
1871
1872poll_exit:
1873 napi_complete_done(n: napi, work_done: rcvd);
1874
1875 rx_ctrl->rx_complete++;
1876
1877 if (rx_ctrl->ccb)
1878 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1879
1880 return rcvd;
1881}
1882
1883static void
1884bnad_napi_add(struct bnad *bnad, u32 rx_id)
1885{
1886 struct bnad_rx_ctrl *rx_ctrl;
1887 int i;
1888
1889 /* Initialize & enable NAPI */
1890 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1891 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1892 netif_napi_add(dev: bnad->netdev, napi: &rx_ctrl->napi,
1893 poll: bnad_napi_poll_rx);
1894 }
1895}
1896
1897static void
1898bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1899{
1900 int i;
1901
1902 /* First disable and then clean up */
1903 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1904 netif_napi_del(napi: &bnad->rx_info[rx_id].rx_ctrl[i].napi);
1905}
1906
1907/* Should be held with conf_lock held */
1908void
1909bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1910{
1911 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1912 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1913 unsigned long flags;
1914
1915 if (!tx_info->tx)
1916 return;
1917
1918 init_completion(x: &bnad->bnad_completions.tx_comp);
1919 spin_lock_irqsave(&bnad->bna_lock, flags);
1920 bna_tx_disable(tx: tx_info->tx, type: BNA_HARD_CLEANUP, cbfn: bnad_cb_tx_disabled);
1921 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1922 wait_for_completion(&bnad->bnad_completions.tx_comp);
1923
1924 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1925 bnad_tx_msix_unregister(bnad, tx_info,
1926 num_txqs: bnad->num_txq_per_tx);
1927
1928 spin_lock_irqsave(&bnad->bna_lock, flags);
1929 bna_tx_destroy(tx: tx_info->tx);
1930 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1931
1932 tx_info->tx = NULL;
1933 tx_info->tx_id = 0;
1934
1935 bnad_tx_res_free(bnad, res_info);
1936}
1937
1938/* Should be held with conf_lock held */
1939int
1940bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1941{
1942 int err;
1943 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1944 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1945 struct bna_intr_info *intr_info =
1946 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1947 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1948 static const struct bna_tx_event_cbfn tx_cbfn = {
1949 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1950 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1951 .tx_stall_cbfn = bnad_cb_tx_stall,
1952 .tx_resume_cbfn = bnad_cb_tx_resume,
1953 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1954 };
1955
1956 struct bna_tx *tx;
1957 unsigned long flags;
1958
1959 tx_info->tx_id = tx_id;
1960
1961 /* Initialize the Tx object configuration */
1962 tx_config->num_txq = bnad->num_txq_per_tx;
1963 tx_config->txq_depth = bnad->txq_depth;
1964 tx_config->tx_type = BNA_TX_T_REGULAR;
1965 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1966
1967 /* Get BNA's resource requirement for one tx object */
1968 spin_lock_irqsave(&bnad->bna_lock, flags);
1969 bna_tx_res_req(num_txq: bnad->num_txq_per_tx,
1970 txq_depth: bnad->txq_depth, res_info);
1971 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1972
1973 /* Fill Unmap Q memory requirements */
1974 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1975 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1976 bnad->txq_depth));
1977
1978 /* Allocate resources */
1979 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1980 if (err)
1981 return err;
1982
1983 /* Ask BNA to create one Tx object, supplying required resources */
1984 spin_lock_irqsave(&bnad->bna_lock, flags);
1985 tx = bna_tx_create(bna: &bnad->bna, bnad, tx_cfg: tx_config, tx_cbfn: &tx_cbfn, res_info,
1986 priv: tx_info);
1987 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
1988 if (!tx) {
1989 err = -ENOMEM;
1990 goto err_return;
1991 }
1992 tx_info->tx = tx;
1993
1994 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup);
1995
1996 /* Register ISR for the Tx object */
1997 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1998 err = bnad_tx_msix_register(bnad, tx_info,
1999 tx_id, num_txqs: bnad->num_txq_per_tx);
2000 if (err)
2001 goto cleanup_tx;
2002 }
2003
2004 spin_lock_irqsave(&bnad->bna_lock, flags);
2005 bna_tx_enable(tx);
2006 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2007
2008 return 0;
2009
2010cleanup_tx:
2011 spin_lock_irqsave(&bnad->bna_lock, flags);
2012 bna_tx_destroy(tx: tx_info->tx);
2013 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2014 tx_info->tx = NULL;
2015 tx_info->tx_id = 0;
2016err_return:
2017 bnad_tx_res_free(bnad, res_info);
2018 return err;
2019}
2020
2021/* Setup the rx config for bna_rx_create */
2022/* bnad decides the configuration */
2023static void
2024bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2025{
2026 memset(rx_config, 0, sizeof(*rx_config));
2027 rx_config->rx_type = BNA_RX_T_REGULAR;
2028 rx_config->num_paths = bnad->num_rxp_per_rx;
2029 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2030
2031 if (bnad->num_rxp_per_rx > 1) {
2032 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2033 rx_config->rss_config.hash_type =
2034 (BFI_ENET_RSS_IPV6 |
2035 BFI_ENET_RSS_IPV6_TCP |
2036 BFI_ENET_RSS_IPV4 |
2037 BFI_ENET_RSS_IPV4_TCP);
2038 rx_config->rss_config.hash_mask =
2039 bnad->num_rxp_per_rx - 1;
2040 netdev_rss_key_fill(buffer: rx_config->rss_config.toeplitz_hash_key,
2041 len: sizeof(rx_config->rss_config.toeplitz_hash_key));
2042 } else {
2043 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2044 memset(&rx_config->rss_config, 0,
2045 sizeof(rx_config->rss_config));
2046 }
2047
2048 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2049 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2050
2051 /* BNA_RXP_SINGLE - one data-buffer queue
2052 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2053 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2054 */
2055 /* TODO: configurable param for queue type */
2056 rx_config->rxp_type = BNA_RXP_SLR;
2057
2058 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2059 rx_config->frame_size > 4096) {
2060 /* though size_routing_enable is set in SLR,
2061 * small packets may get routed to same rxq.
2062 * set buf_size to 2048 instead of PAGE_SIZE.
2063 */
2064 rx_config->q0_buf_size = 2048;
2065 /* this should be in multiples of 2 */
2066 rx_config->q0_num_vecs = 4;
2067 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2068 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2069 } else {
2070 rx_config->q0_buf_size = rx_config->frame_size;
2071 rx_config->q0_num_vecs = 1;
2072 rx_config->q0_depth = bnad->rxq_depth;
2073 }
2074
2075 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2076 if (rx_config->rxp_type == BNA_RXP_SLR) {
2077 rx_config->q1_depth = bnad->rxq_depth;
2078 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2079 }
2080
2081 rx_config->vlan_strip_status =
2082 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2083 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2084}
2085
2086static void
2087bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2088{
2089 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2090 int i;
2091
2092 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2093 rx_info->rx_ctrl[i].bnad = bnad;
2094}
2095
2096/* Called with mutex_lock(&bnad->conf_mutex) held */
2097static u32
2098bnad_reinit_rx(struct bnad *bnad)
2099{
2100 struct net_device *netdev = bnad->netdev;
2101 u32 err = 0, current_err = 0;
2102 u32 rx_id = 0, count = 0;
2103 unsigned long flags;
2104
2105 /* destroy and create new rx objects */
2106 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2107 if (!bnad->rx_info[rx_id].rx)
2108 continue;
2109 bnad_destroy_rx(bnad, rx_id);
2110 }
2111
2112 spin_lock_irqsave(&bnad->bna_lock, flags);
2113 bna_enet_mtu_set(enet: &bnad->bna.enet,
2114 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2115 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2116
2117 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2118 count++;
2119 current_err = bnad_setup_rx(bnad, rx_id);
2120 if (current_err && !err) {
2121 err = current_err;
2122 netdev_err(dev: netdev, format: "RXQ:%u setup failed\n", rx_id);
2123 }
2124 }
2125
2126 /* restore rx configuration */
2127 if (bnad->rx_info[0].rx && !err) {
2128 bnad_restore_vlans(bnad, rx_id: 0);
2129 bnad_enable_default_bcast(bnad);
2130 spin_lock_irqsave(&bnad->bna_lock, flags);
2131 bnad_mac_addr_set_locked(bnad, mac_addr: netdev->dev_addr);
2132 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2133 bnad_set_rx_mode(netdev);
2134 }
2135
2136 return count;
2137}
2138
2139/* Called with bnad_conf_lock() held */
2140void
2141bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2142{
2143 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2144 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2145 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2146 unsigned long flags;
2147 int to_del = 0;
2148
2149 if (!rx_info->rx)
2150 return;
2151
2152 if (0 == rx_id) {
2153 spin_lock_irqsave(&bnad->bna_lock, flags);
2154 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2155 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2156 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, addr: &bnad->run_flags);
2157 to_del = 1;
2158 }
2159 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2160 if (to_del)
2161 del_timer_sync(timer: &bnad->dim_timer);
2162 }
2163
2164 init_completion(x: &bnad->bnad_completions.rx_comp);
2165 spin_lock_irqsave(&bnad->bna_lock, flags);
2166 bna_rx_disable(rx: rx_info->rx, type: BNA_HARD_CLEANUP, cbfn: bnad_cb_rx_disabled);
2167 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2168 wait_for_completion(&bnad->bnad_completions.rx_comp);
2169
2170 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2171 bnad_rx_msix_unregister(bnad, rx_info, num_rxps: rx_config->num_paths);
2172
2173 bnad_napi_delete(bnad, rx_id);
2174
2175 spin_lock_irqsave(&bnad->bna_lock, flags);
2176 bna_rx_destroy(rx: rx_info->rx);
2177
2178 rx_info->rx = NULL;
2179 rx_info->rx_id = 0;
2180 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2181
2182 bnad_rx_res_free(bnad, res_info);
2183}
2184
2185/* Called with mutex_lock(&bnad->conf_mutex) held */
2186int
2187bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2188{
2189 int err;
2190 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2191 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2192 struct bna_intr_info *intr_info =
2193 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2194 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2195 static const struct bna_rx_event_cbfn rx_cbfn = {
2196 .rcb_setup_cbfn = NULL,
2197 .rcb_destroy_cbfn = NULL,
2198 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2199 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2200 .rx_stall_cbfn = bnad_cb_rx_stall,
2201 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2202 .rx_post_cbfn = bnad_cb_rx_post,
2203 };
2204 struct bna_rx *rx;
2205 unsigned long flags;
2206
2207 rx_info->rx_id = rx_id;
2208
2209 /* Initialize the Rx object configuration */
2210 bnad_init_rx_config(bnad, rx_config);
2211
2212 /* Get BNA's resource requirement for one Rx object */
2213 spin_lock_irqsave(&bnad->bna_lock, flags);
2214 bna_rx_res_req(rx_config, res_info);
2215 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2216
2217 /* Fill Unmap Q memory requirements */
2218 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2219 rx_config->num_paths,
2220 (rx_config->q0_depth *
2221 sizeof(struct bnad_rx_unmap)) +
2222 sizeof(struct bnad_rx_unmap_q));
2223
2224 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2225 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2226 rx_config->num_paths,
2227 (rx_config->q1_depth *
2228 sizeof(struct bnad_rx_unmap) +
2229 sizeof(struct bnad_rx_unmap_q)));
2230 }
2231 /* Allocate resource */
2232 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2233 if (err)
2234 return err;
2235
2236 bnad_rx_ctrl_init(bnad, rx_id);
2237
2238 /* Ask BNA to create one Rx object, supplying required resources */
2239 spin_lock_irqsave(&bnad->bna_lock, flags);
2240 rx = bna_rx_create(bna: &bnad->bna, bnad, rx_cfg: rx_config, rx_cbfn: &rx_cbfn, res_info,
2241 priv: rx_info);
2242 if (!rx) {
2243 err = -ENOMEM;
2244 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2245 goto err_return;
2246 }
2247 rx_info->rx = rx;
2248 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2249
2250 INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup);
2251
2252 /*
2253 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2254 * so that IRQ handler cannot schedule NAPI at this point.
2255 */
2256 bnad_napi_add(bnad, rx_id);
2257
2258 /* Register ISR for the Rx object */
2259 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2260 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2261 num_rxps: rx_config->num_paths);
2262 if (err)
2263 goto err_return;
2264 }
2265
2266 spin_lock_irqsave(&bnad->bna_lock, flags);
2267 if (0 == rx_id) {
2268 /* Set up Dynamic Interrupt Moderation Vector */
2269 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2270 bna_rx_dim_reconfig(bna: &bnad->bna, vector: bna_napi_dim_vector);
2271
2272 /* Enable VLAN filtering only on the default Rx */
2273 bna_rx_vlanfilter_enable(rx);
2274
2275 /* Start the DIM timer */
2276 bnad_dim_timer_start(bnad);
2277 }
2278
2279 bna_rx_enable(rx);
2280 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2281
2282 return 0;
2283
2284err_return:
2285 bnad_destroy_rx(bnad, rx_id);
2286 return err;
2287}
2288
2289/* Called with conf_lock & bnad->bna_lock held */
2290void
2291bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2292{
2293 struct bnad_tx_info *tx_info;
2294
2295 tx_info = &bnad->tx_info[0];
2296 if (!tx_info->tx)
2297 return;
2298
2299 bna_tx_coalescing_timeo_set(tx: tx_info->tx, coalescing_timeo: bnad->tx_coalescing_timeo);
2300}
2301
2302/* Called with conf_lock & bnad->bna_lock held */
2303void
2304bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2305{
2306 struct bnad_rx_info *rx_info;
2307 int i;
2308
2309 for (i = 0; i < bnad->num_rx; i++) {
2310 rx_info = &bnad->rx_info[i];
2311 if (!rx_info->rx)
2312 continue;
2313 bna_rx_coalescing_timeo_set(rx: rx_info->rx,
2314 coalescing_timeo: bnad->rx_coalescing_timeo);
2315 }
2316}
2317
2318/*
2319 * Called with bnad->bna_lock held
2320 */
2321int
2322bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2323{
2324 int ret;
2325
2326 if (!is_valid_ether_addr(addr: mac_addr))
2327 return -EADDRNOTAVAIL;
2328
2329 /* If datapath is down, pretend everything went through */
2330 if (!bnad->rx_info[0].rx)
2331 return 0;
2332
2333 ret = bna_rx_ucast_set(rx: bnad->rx_info[0].rx, ucmac: mac_addr);
2334 if (ret != BNA_CB_SUCCESS)
2335 return -EADDRNOTAVAIL;
2336
2337 return 0;
2338}
2339
2340/* Should be called with conf_lock held */
2341int
2342bnad_enable_default_bcast(struct bnad *bnad)
2343{
2344 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2345 int ret;
2346 unsigned long flags;
2347
2348 init_completion(x: &bnad->bnad_completions.mcast_comp);
2349
2350 spin_lock_irqsave(&bnad->bna_lock, flags);
2351 ret = bna_rx_mcast_add(rx: rx_info->rx, mcmac: bnad_bcast_addr,
2352 cbfn: bnad_cb_rx_mcast_add);
2353 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2354
2355 if (ret == BNA_CB_SUCCESS)
2356 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2357 else
2358 return -ENODEV;
2359
2360 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2361 return -ENODEV;
2362
2363 return 0;
2364}
2365
2366/* Called with mutex_lock(&bnad->conf_mutex) held */
2367void
2368bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2369{
2370 u16 vid;
2371 unsigned long flags;
2372
2373 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2374 spin_lock_irqsave(&bnad->bna_lock, flags);
2375 bna_rx_vlan_add(rx: bnad->rx_info[rx_id].rx, vlan_id: vid);
2376 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2377 }
2378}
2379
2380/* Statistics utilities */
2381void
2382bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2383{
2384 int i, j;
2385
2386 for (i = 0; i < bnad->num_rx; i++) {
2387 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2388 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2389 stats->rx_packets += bnad->rx_info[i].
2390 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2391 stats->rx_bytes += bnad->rx_info[i].
2392 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2393 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2394 bnad->rx_info[i].rx_ctrl[j].ccb->
2395 rcb[1]->rxq) {
2396 stats->rx_packets +=
2397 bnad->rx_info[i].rx_ctrl[j].
2398 ccb->rcb[1]->rxq->rx_packets;
2399 stats->rx_bytes +=
2400 bnad->rx_info[i].rx_ctrl[j].
2401 ccb->rcb[1]->rxq->rx_bytes;
2402 }
2403 }
2404 }
2405 }
2406 for (i = 0; i < bnad->num_tx; i++) {
2407 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2408 if (bnad->tx_info[i].tcb[j]) {
2409 stats->tx_packets +=
2410 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2411 stats->tx_bytes +=
2412 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2413 }
2414 }
2415 }
2416}
2417
2418/*
2419 * Must be called with the bna_lock held.
2420 */
2421void
2422bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2423{
2424 struct bfi_enet_stats_mac *mac_stats;
2425 u32 bmap;
2426 int i;
2427
2428 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2429 stats->rx_errors =
2430 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2431 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2432 mac_stats->rx_undersize;
2433 stats->tx_errors = mac_stats->tx_fcs_error +
2434 mac_stats->tx_undersize;
2435 stats->rx_dropped = mac_stats->rx_drop;
2436 stats->tx_dropped = mac_stats->tx_drop;
2437 stats->multicast = mac_stats->rx_multicast;
2438 stats->collisions = mac_stats->tx_total_collision;
2439
2440 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2441
2442 /* receive ring buffer overflow ?? */
2443
2444 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2445 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2446 /* recv'r fifo overrun */
2447 bmap = bna_rx_rid_mask(&bnad->bna);
2448 for (i = 0; bmap; i++) {
2449 if (bmap & 1) {
2450 stats->rx_fifo_errors +=
2451 bnad->stats.bna_stats->
2452 hw_stats.rxf_stats[i].frame_drops;
2453 break;
2454 }
2455 bmap >>= 1;
2456 }
2457}
2458
2459static void
2460bnad_mbox_irq_sync(struct bnad *bnad)
2461{
2462 u32 irq;
2463 unsigned long flags;
2464
2465 spin_lock_irqsave(&bnad->bna_lock, flags);
2466 if (bnad->cfg_flags & BNAD_CF_MSIX)
2467 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2468 else
2469 irq = bnad->pcidev->irq;
2470 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2471
2472 synchronize_irq(irq);
2473}
2474
2475/* Utility used by bnad_start_xmit, for doing TSO */
2476static int
2477bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2478{
2479 int err;
2480
2481 err = skb_cow_head(skb, headroom: 0);
2482 if (err < 0) {
2483 BNAD_UPDATE_CTR(bnad, tso_err);
2484 return err;
2485 }
2486
2487 /*
2488 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2489 * excluding the length field.
2490 */
2491 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2492 struct iphdr *iph = ip_hdr(skb);
2493
2494 /* Do we really need these? */
2495 iph->tot_len = 0;
2496 iph->check = 0;
2497
2498 tcp_hdr(skb)->check =
2499 ~csum_tcpudp_magic(saddr: iph->saddr, daddr: iph->daddr, len: 0,
2500 IPPROTO_TCP, sum: 0);
2501 BNAD_UPDATE_CTR(bnad, tso4);
2502 } else {
2503 tcp_v6_gso_csum_prep(skb);
2504 BNAD_UPDATE_CTR(bnad, tso6);
2505 }
2506
2507 return 0;
2508}
2509
2510/*
2511 * Initialize Q numbers depending on Rx Paths
2512 * Called with bnad->bna_lock held, because of cfg_flags
2513 * access.
2514 */
2515static void
2516bnad_q_num_init(struct bnad *bnad)
2517{
2518 int rxps;
2519
2520 rxps = min((uint)num_online_cpus(),
2521 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2522
2523 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2524 rxps = 1; /* INTx */
2525
2526 bnad->num_rx = 1;
2527 bnad->num_tx = 1;
2528 bnad->num_rxp_per_rx = rxps;
2529 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2530}
2531
2532/*
2533 * Adjusts the Q numbers, given a number of msix vectors
2534 * Give preference to RSS as opposed to Tx priority Queues,
2535 * in such a case, just use 1 Tx Q
2536 * Called with bnad->bna_lock held b'cos of cfg_flags access
2537 */
2538static void
2539bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2540{
2541 bnad->num_txq_per_tx = 1;
2542 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2543 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2544 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2545 bnad->num_rxp_per_rx = msix_vectors -
2546 (bnad->num_tx * bnad->num_txq_per_tx) -
2547 BNAD_MAILBOX_MSIX_VECTORS;
2548 } else
2549 bnad->num_rxp_per_rx = 1;
2550}
2551
2552/* Enable / disable ioceth */
2553static int
2554bnad_ioceth_disable(struct bnad *bnad)
2555{
2556 unsigned long flags;
2557 int err = 0;
2558
2559 spin_lock_irqsave(&bnad->bna_lock, flags);
2560 init_completion(x: &bnad->bnad_completions.ioc_comp);
2561 bna_ioceth_disable(ioceth: &bnad->bna.ioceth, type: BNA_HARD_CLEANUP);
2562 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2563
2564 wait_for_completion_timeout(x: &bnad->bnad_completions.ioc_comp,
2565 timeout: msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2566
2567 err = bnad->bnad_completions.ioc_comp_status;
2568 return err;
2569}
2570
2571static int
2572bnad_ioceth_enable(struct bnad *bnad)
2573{
2574 int err = 0;
2575 unsigned long flags;
2576
2577 spin_lock_irqsave(&bnad->bna_lock, flags);
2578 init_completion(x: &bnad->bnad_completions.ioc_comp);
2579 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2580 bna_ioceth_enable(ioceth: &bnad->bna.ioceth);
2581 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2582
2583 wait_for_completion_timeout(x: &bnad->bnad_completions.ioc_comp,
2584 timeout: msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2585
2586 err = bnad->bnad_completions.ioc_comp_status;
2587
2588 return err;
2589}
2590
2591/* Free BNA resources */
2592static void
2593bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2594 u32 res_val_max)
2595{
2596 int i;
2597
2598 for (i = 0; i < res_val_max; i++)
2599 bnad_mem_free(bnad, mem_info: &res_info[i].res_u.mem_info);
2600}
2601
2602/* Allocates memory and interrupt resources for BNA */
2603static int
2604bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2605 u32 res_val_max)
2606{
2607 int i, err;
2608
2609 for (i = 0; i < res_val_max; i++) {
2610 err = bnad_mem_alloc(bnad, mem_info: &res_info[i].res_u.mem_info);
2611 if (err)
2612 goto err_return;
2613 }
2614 return 0;
2615
2616err_return:
2617 bnad_res_free(bnad, res_info, res_val_max);
2618 return err;
2619}
2620
2621/* Interrupt enable / disable */
2622static void
2623bnad_enable_msix(struct bnad *bnad)
2624{
2625 int i, ret;
2626 unsigned long flags;
2627
2628 spin_lock_irqsave(&bnad->bna_lock, flags);
2629 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2630 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2631 return;
2632 }
2633 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2634
2635 if (bnad->msix_table)
2636 return;
2637
2638 bnad->msix_table =
2639 kcalloc(n: bnad->msix_num, size: sizeof(struct msix_entry), GFP_KERNEL);
2640
2641 if (!bnad->msix_table)
2642 goto intx_mode;
2643
2644 for (i = 0; i < bnad->msix_num; i++)
2645 bnad->msix_table[i].entry = i;
2646
2647 ret = pci_enable_msix_range(dev: bnad->pcidev, entries: bnad->msix_table,
2648 minvec: 1, maxvec: bnad->msix_num);
2649 if (ret < 0) {
2650 goto intx_mode;
2651 } else if (ret < bnad->msix_num) {
2652 dev_warn(&bnad->pcidev->dev,
2653 "%d MSI-X vectors allocated < %d requested\n",
2654 ret, bnad->msix_num);
2655
2656 spin_lock_irqsave(&bnad->bna_lock, flags);
2657 /* ret = #of vectors that we got */
2658 bnad_q_num_adjust(bnad, msix_vectors: (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2659 temp: (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2660 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2661
2662 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2663 BNAD_MAILBOX_MSIX_VECTORS;
2664
2665 if (bnad->msix_num > ret) {
2666 pci_disable_msix(dev: bnad->pcidev);
2667 goto intx_mode;
2668 }
2669 }
2670
2671 pci_intx(dev: bnad->pcidev, enable: 0);
2672
2673 return;
2674
2675intx_mode:
2676 dev_warn(&bnad->pcidev->dev,
2677 "MSI-X enable failed - operating in INTx mode\n");
2678
2679 kfree(objp: bnad->msix_table);
2680 bnad->msix_table = NULL;
2681 bnad->msix_num = 0;
2682 spin_lock_irqsave(&bnad->bna_lock, flags);
2683 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2684 bnad_q_num_init(bnad);
2685 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2686}
2687
2688static void
2689bnad_disable_msix(struct bnad *bnad)
2690{
2691 u32 cfg_flags;
2692 unsigned long flags;
2693
2694 spin_lock_irqsave(&bnad->bna_lock, flags);
2695 cfg_flags = bnad->cfg_flags;
2696 if (bnad->cfg_flags & BNAD_CF_MSIX)
2697 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2698 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2699
2700 if (cfg_flags & BNAD_CF_MSIX) {
2701 pci_disable_msix(dev: bnad->pcidev);
2702 kfree(objp: bnad->msix_table);
2703 bnad->msix_table = NULL;
2704 }
2705}
2706
2707/* Netdev entry points */
2708static int
2709bnad_open(struct net_device *netdev)
2710{
2711 int err;
2712 struct bnad *bnad = netdev_priv(dev: netdev);
2713 struct bna_pause_config pause_config;
2714 unsigned long flags;
2715
2716 mutex_lock(&bnad->conf_mutex);
2717
2718 /* Tx */
2719 err = bnad_setup_tx(bnad, tx_id: 0);
2720 if (err)
2721 goto err_return;
2722
2723 /* Rx */
2724 err = bnad_setup_rx(bnad, rx_id: 0);
2725 if (err)
2726 goto cleanup_tx;
2727
2728 /* Port */
2729 pause_config.tx_pause = 0;
2730 pause_config.rx_pause = 0;
2731
2732 spin_lock_irqsave(&bnad->bna_lock, flags);
2733 bna_enet_mtu_set(enet: &bnad->bna.enet,
2734 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2735 bna_enet_pause_config(enet: &bnad->bna.enet, pause_config: &pause_config);
2736 bna_enet_enable(enet: &bnad->bna.enet);
2737 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2738
2739 /* Enable broadcast */
2740 bnad_enable_default_bcast(bnad);
2741
2742 /* Restore VLANs, if any */
2743 bnad_restore_vlans(bnad, rx_id: 0);
2744
2745 /* Set the UCAST address */
2746 spin_lock_irqsave(&bnad->bna_lock, flags);
2747 bnad_mac_addr_set_locked(bnad, mac_addr: netdev->dev_addr);
2748 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2749
2750 /* Start the stats timer */
2751 bnad_stats_timer_start(bnad);
2752
2753 mutex_unlock(lock: &bnad->conf_mutex);
2754
2755 return 0;
2756
2757cleanup_tx:
2758 bnad_destroy_tx(bnad, tx_id: 0);
2759
2760err_return:
2761 mutex_unlock(lock: &bnad->conf_mutex);
2762 return err;
2763}
2764
2765static int
2766bnad_stop(struct net_device *netdev)
2767{
2768 struct bnad *bnad = netdev_priv(dev: netdev);
2769 unsigned long flags;
2770
2771 mutex_lock(&bnad->conf_mutex);
2772
2773 /* Stop the stats timer */
2774 bnad_stats_timer_stop(bnad);
2775
2776 init_completion(x: &bnad->bnad_completions.enet_comp);
2777
2778 spin_lock_irqsave(&bnad->bna_lock, flags);
2779 bna_enet_disable(enet: &bnad->bna.enet, type: BNA_HARD_CLEANUP,
2780 cbfn: bnad_cb_enet_disabled);
2781 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
2782
2783 wait_for_completion(&bnad->bnad_completions.enet_comp);
2784
2785 bnad_destroy_tx(bnad, tx_id: 0);
2786 bnad_destroy_rx(bnad, rx_id: 0);
2787
2788 /* Synchronize mailbox IRQ */
2789 bnad_mbox_irq_sync(bnad);
2790
2791 mutex_unlock(lock: &bnad->conf_mutex);
2792
2793 return 0;
2794}
2795
2796/* TX */
2797/* Returns 0 for success */
2798static int
2799bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2800 struct sk_buff *skb, struct bna_txq_entry *txqent)
2801{
2802 u16 flags = 0;
2803 u32 gso_size;
2804 u16 vlan_tag = 0;
2805
2806 if (skb_vlan_tag_present(skb)) {
2807 vlan_tag = (u16)skb_vlan_tag_get(skb);
2808 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2809 }
2810 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2811 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2812 | (vlan_tag & 0x1fff);
2813 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2814 }
2815 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2816
2817 if (skb_is_gso(skb)) {
2818 gso_size = skb_shinfo(skb)->gso_size;
2819 if (unlikely(gso_size > bnad->netdev->mtu)) {
2820 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2821 return -EINVAL;
2822 }
2823 if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
2824 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2825 txqent->hdr.wi.lso_mss = 0;
2826 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2827 } else {
2828 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2829 txqent->hdr.wi.lso_mss = htons(gso_size);
2830 }
2831
2832 if (bnad_tso_prepare(bnad, skb)) {
2833 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2834 return -EINVAL;
2835 }
2836
2837 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2838 txqent->hdr.wi.l4_hdr_size_n_offset =
2839 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2840 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2841 } else {
2842 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2843 txqent->hdr.wi.lso_mss = 0;
2844
2845 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2846 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2847 return -EINVAL;
2848 }
2849
2850 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2851 __be16 net_proto = vlan_get_protocol(skb);
2852 u8 proto = 0;
2853
2854 if (net_proto == htons(ETH_P_IP))
2855 proto = ip_hdr(skb)->protocol;
2856#ifdef NETIF_F_IPV6_CSUM
2857 else if (net_proto == htons(ETH_P_IPV6)) {
2858 /* nexthdr may not be TCP immediately. */
2859 proto = ipv6_hdr(skb)->nexthdr;
2860 }
2861#endif
2862 if (proto == IPPROTO_TCP) {
2863 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2864 txqent->hdr.wi.l4_hdr_size_n_offset =
2865 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2866 (0, skb_transport_offset(skb)));
2867
2868 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2869
2870 if (unlikely(skb_headlen(skb) <
2871 skb_tcp_all_headers(skb))) {
2872 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2873 return -EINVAL;
2874 }
2875 } else if (proto == IPPROTO_UDP) {
2876 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2877 txqent->hdr.wi.l4_hdr_size_n_offset =
2878 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2879 (0, skb_transport_offset(skb)));
2880
2881 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2882 if (unlikely(skb_headlen(skb) <
2883 skb_transport_offset(skb) +
2884 sizeof(struct udphdr))) {
2885 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2886 return -EINVAL;
2887 }
2888 } else {
2889
2890 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2891 return -EINVAL;
2892 }
2893 } else
2894 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2895 }
2896
2897 txqent->hdr.wi.flags = htons(flags);
2898 txqent->hdr.wi.frame_length = htonl(skb->len);
2899
2900 return 0;
2901}
2902
2903/*
2904 * bnad_start_xmit : Netdev entry point for Transmit
2905 * Called under lock held by net_device
2906 */
2907static netdev_tx_t
2908bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2909{
2910 struct bnad *bnad = netdev_priv(dev: netdev);
2911 u32 txq_id = 0;
2912 struct bna_tcb *tcb = NULL;
2913 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2914 u32 prod, q_depth, vect_id;
2915 u32 wis, vectors, len;
2916 int i;
2917 dma_addr_t dma_addr;
2918 struct bna_txq_entry *txqent;
2919
2920 len = skb_headlen(skb);
2921
2922 /* Sanity checks for the skb */
2923
2924 if (unlikely(skb->len <= ETH_HLEN)) {
2925 dev_kfree_skb_any(skb);
2926 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2927 return NETDEV_TX_OK;
2928 }
2929 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2930 dev_kfree_skb_any(skb);
2931 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2932 return NETDEV_TX_OK;
2933 }
2934 if (unlikely(len == 0)) {
2935 dev_kfree_skb_any(skb);
2936 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2937 return NETDEV_TX_OK;
2938 }
2939
2940 tcb = bnad->tx_info[0].tcb[txq_id];
2941
2942 /*
2943 * Takes care of the Tx that is scheduled between clearing the flag
2944 * and the netif_tx_stop_all_queues() call.
2945 */
2946 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2947 dev_kfree_skb_any(skb);
2948 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2949 return NETDEV_TX_OK;
2950 }
2951
2952 q_depth = tcb->q_depth;
2953 prod = tcb->producer_index;
2954 unmap_q = tcb->unmap_q;
2955
2956 vectors = 1 + skb_shinfo(skb)->nr_frags;
2957 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2958
2959 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2960 dev_kfree_skb_any(skb);
2961 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2962 return NETDEV_TX_OK;
2963 }
2964
2965 /* Check for available TxQ resources */
2966 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2967 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2968 !test_and_set_bit(BNAD_TXQ_FREE_SENT, addr: &tcb->flags)) {
2969 u32 sent;
2970 sent = bnad_txcmpl_process(bnad, tcb);
2971 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2972 bna_ib_ack(tcb->i_dbell, sent);
2973 smp_mb__before_atomic();
2974 clear_bit(BNAD_TXQ_FREE_SENT, addr: &tcb->flags);
2975 } else {
2976 netif_stop_queue(dev: netdev);
2977 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2978 }
2979
2980 smp_mb();
2981 /*
2982 * Check again to deal with race condition between
2983 * netif_stop_queue here, and netif_wake_queue in
2984 * interrupt handler which is not inside netif tx lock.
2985 */
2986 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2987 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2988 return NETDEV_TX_BUSY;
2989 } else {
2990 netif_wake_queue(dev: netdev);
2991 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2992 }
2993 }
2994
2995 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2996 head_unmap = &unmap_q[prod];
2997
2998 /* Program the opcode, flags, frame_len, num_vectors in WI */
2999 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3000 dev_kfree_skb_any(skb);
3001 return NETDEV_TX_OK;
3002 }
3003 txqent->hdr.wi.reserved = 0;
3004 txqent->hdr.wi.num_vectors = vectors;
3005
3006 head_unmap->skb = skb;
3007 head_unmap->nvecs = 0;
3008
3009 /* Program the vectors */
3010 unmap = head_unmap;
3011 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3012 len, DMA_TO_DEVICE);
3013 if (dma_mapping_error(dev: &bnad->pcidev->dev, dma_addr)) {
3014 dev_kfree_skb_any(skb);
3015 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3016 return NETDEV_TX_OK;
3017 }
3018 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3019 txqent->vector[0].length = htons(len);
3020 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3021 head_unmap->nvecs++;
3022
3023 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3024 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3025 u32 size = skb_frag_size(frag);
3026
3027 if (unlikely(size == 0)) {
3028 /* Undo the changes starting at tcb->producer_index */
3029 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3030 index: tcb->producer_index);
3031 dev_kfree_skb_any(skb);
3032 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3033 return NETDEV_TX_OK;
3034 }
3035
3036 len += size;
3037
3038 vect_id++;
3039 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3040 vect_id = 0;
3041 BNA_QE_INDX_INC(prod, q_depth);
3042 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3043 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3044 unmap = &unmap_q[prod];
3045 }
3046
3047 dma_addr = skb_frag_dma_map(dev: &bnad->pcidev->dev, frag,
3048 offset: 0, size, dir: DMA_TO_DEVICE);
3049 if (dma_mapping_error(dev: &bnad->pcidev->dev, dma_addr)) {
3050 /* Undo the changes starting at tcb->producer_index */
3051 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3052 index: tcb->producer_index);
3053 dev_kfree_skb_any(skb);
3054 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3055 return NETDEV_TX_OK;
3056 }
3057
3058 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3059 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3060 txqent->vector[vect_id].length = htons(size);
3061 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3062 dma_addr);
3063 head_unmap->nvecs++;
3064 }
3065
3066 if (unlikely(len != skb->len)) {
3067 /* Undo the changes starting at tcb->producer_index */
3068 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, index: tcb->producer_index);
3069 dev_kfree_skb_any(skb);
3070 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3071 return NETDEV_TX_OK;
3072 }
3073
3074 BNA_QE_INDX_INC(prod, q_depth);
3075 tcb->producer_index = prod;
3076
3077 wmb();
3078
3079 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3080 return NETDEV_TX_OK;
3081
3082 skb_tx_timestamp(skb);
3083
3084 bna_txq_prod_indx_doorbell(tcb);
3085
3086 return NETDEV_TX_OK;
3087}
3088
3089/*
3090 * Used spin_lock to synchronize reading of stats structures, which
3091 * is written by BNA under the same lock.
3092 */
3093static void
3094bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3095{
3096 struct bnad *bnad = netdev_priv(dev: netdev);
3097 unsigned long flags;
3098
3099 spin_lock_irqsave(&bnad->bna_lock, flags);
3100
3101 bnad_netdev_qstats_fill(bnad, stats);
3102 bnad_netdev_hwstats_fill(bnad, stats);
3103
3104 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3105}
3106
3107static void
3108bnad_set_rx_ucast_fltr(struct bnad *bnad)
3109{
3110 struct net_device *netdev = bnad->netdev;
3111 int uc_count = netdev_uc_count(netdev);
3112 enum bna_cb_status ret;
3113 u8 *mac_list;
3114 struct netdev_hw_addr *ha;
3115 int entry;
3116
3117 if (netdev_uc_empty(bnad->netdev)) {
3118 bna_rx_ucast_listset(rx: bnad->rx_info[0].rx, count: 0, NULL);
3119 return;
3120 }
3121
3122 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3123 goto mode_default;
3124
3125 mac_list = kcalloc(ETH_ALEN, size: uc_count, GFP_ATOMIC);
3126 if (mac_list == NULL)
3127 goto mode_default;
3128
3129 entry = 0;
3130 netdev_for_each_uc_addr(ha, netdev) {
3131 ether_addr_copy(dst: &mac_list[entry * ETH_ALEN], src: &ha->addr[0]);
3132 entry++;
3133 }
3134
3135 ret = bna_rx_ucast_listset(rx: bnad->rx_info[0].rx, count: entry, uclist: mac_list);
3136 kfree(objp: mac_list);
3137
3138 if (ret != BNA_CB_SUCCESS)
3139 goto mode_default;
3140
3141 return;
3142
3143 /* ucast packets not in UCAM are routed to default function */
3144mode_default:
3145 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3146 bna_rx_ucast_listset(rx: bnad->rx_info[0].rx, count: 0, NULL);
3147}
3148
3149static void
3150bnad_set_rx_mcast_fltr(struct bnad *bnad)
3151{
3152 struct net_device *netdev = bnad->netdev;
3153 int mc_count = netdev_mc_count(netdev);
3154 enum bna_cb_status ret;
3155 u8 *mac_list;
3156
3157 if (netdev->flags & IFF_ALLMULTI)
3158 goto mode_allmulti;
3159
3160 if (netdev_mc_empty(netdev))
3161 return;
3162
3163 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3164 goto mode_allmulti;
3165
3166 mac_list = kcalloc(n: mc_count + 1, ETH_ALEN, GFP_ATOMIC);
3167
3168 if (mac_list == NULL)
3169 goto mode_allmulti;
3170
3171 ether_addr_copy(dst: &mac_list[0], src: &bnad_bcast_addr[0]);
3172
3173 /* copy rest of the MCAST addresses */
3174 bnad_netdev_mc_list_get(netdev, mc_list: mac_list);
3175 ret = bna_rx_mcast_listset(rx: bnad->rx_info[0].rx, count: mc_count + 1, mcmac: mac_list);
3176 kfree(objp: mac_list);
3177
3178 if (ret != BNA_CB_SUCCESS)
3179 goto mode_allmulti;
3180
3181 return;
3182
3183mode_allmulti:
3184 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3185 bna_rx_mcast_delall(rx: bnad->rx_info[0].rx);
3186}
3187
3188void
3189bnad_set_rx_mode(struct net_device *netdev)
3190{
3191 struct bnad *bnad = netdev_priv(dev: netdev);
3192 enum bna_rxmode new_mode, mode_mask;
3193 unsigned long flags;
3194
3195 spin_lock_irqsave(&bnad->bna_lock, flags);
3196
3197 if (bnad->rx_info[0].rx == NULL) {
3198 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3199 return;
3200 }
3201
3202 /* clear bnad flags to update it with new settings */
3203 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3204 BNAD_CF_ALLMULTI);
3205
3206 new_mode = 0;
3207 if (netdev->flags & IFF_PROMISC) {
3208 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3209 bnad->cfg_flags |= BNAD_CF_PROMISC;
3210 } else {
3211 bnad_set_rx_mcast_fltr(bnad);
3212
3213 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3214 new_mode |= BNA_RXMODE_ALLMULTI;
3215
3216 bnad_set_rx_ucast_fltr(bnad);
3217
3218 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3219 new_mode |= BNA_RXMODE_DEFAULT;
3220 }
3221
3222 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3223 BNA_RXMODE_ALLMULTI;
3224 bna_rx_mode_set(rx: bnad->rx_info[0].rx, rxmode: new_mode, bitmask: mode_mask);
3225
3226 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3227}
3228
3229/*
3230 * bna_lock is used to sync writes to netdev->addr
3231 * conf_lock cannot be used since this call may be made
3232 * in a non-blocking context.
3233 */
3234static int
3235bnad_set_mac_address(struct net_device *netdev, void *addr)
3236{
3237 int err;
3238 struct bnad *bnad = netdev_priv(dev: netdev);
3239 struct sockaddr *sa = (struct sockaddr *)addr;
3240 unsigned long flags;
3241
3242 spin_lock_irqsave(&bnad->bna_lock, flags);
3243
3244 err = bnad_mac_addr_set_locked(bnad, mac_addr: sa->sa_data);
3245 if (!err)
3246 eth_hw_addr_set(dev: netdev, addr: sa->sa_data);
3247
3248 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3249
3250 return err;
3251}
3252
3253static int
3254bnad_mtu_set(struct bnad *bnad, int frame_size)
3255{
3256 unsigned long flags;
3257
3258 init_completion(x: &bnad->bnad_completions.mtu_comp);
3259
3260 spin_lock_irqsave(&bnad->bna_lock, flags);
3261 bna_enet_mtu_set(enet: &bnad->bna.enet, mtu: frame_size, cbfn: bnad_cb_enet_mtu_set);
3262 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3263
3264 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3265
3266 return bnad->bnad_completions.mtu_comp_status;
3267}
3268
3269static int
3270bnad_change_mtu(struct net_device *netdev, int new_mtu)
3271{
3272 int err, mtu;
3273 struct bnad *bnad = netdev_priv(dev: netdev);
3274 u32 frame, new_frame;
3275
3276 mutex_lock(&bnad->conf_mutex);
3277
3278 mtu = netdev->mtu;
3279 netdev->mtu = new_mtu;
3280
3281 frame = BNAD_FRAME_SIZE(mtu);
3282 new_frame = BNAD_FRAME_SIZE(new_mtu);
3283
3284 /* check if multi-buffer needs to be enabled */
3285 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3286 netif_running(dev: bnad->netdev)) {
3287 /* only when transition is over 4K */
3288 if ((frame <= 4096 && new_frame > 4096) ||
3289 (frame > 4096 && new_frame <= 4096))
3290 bnad_reinit_rx(bnad);
3291 }
3292
3293 err = bnad_mtu_set(bnad, frame_size: new_frame);
3294 if (err)
3295 err = -EBUSY;
3296
3297 mutex_unlock(lock: &bnad->conf_mutex);
3298 return err;
3299}
3300
3301static int
3302bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3303{
3304 struct bnad *bnad = netdev_priv(dev: netdev);
3305 unsigned long flags;
3306
3307 if (!bnad->rx_info[0].rx)
3308 return 0;
3309
3310 mutex_lock(&bnad->conf_mutex);
3311
3312 spin_lock_irqsave(&bnad->bna_lock, flags);
3313 bna_rx_vlan_add(rx: bnad->rx_info[0].rx, vlan_id: vid);
3314 set_bit(nr: vid, addr: bnad->active_vlans);
3315 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3316
3317 mutex_unlock(lock: &bnad->conf_mutex);
3318
3319 return 0;
3320}
3321
3322static int
3323bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3324{
3325 struct bnad *bnad = netdev_priv(dev: netdev);
3326 unsigned long flags;
3327
3328 if (!bnad->rx_info[0].rx)
3329 return 0;
3330
3331 mutex_lock(&bnad->conf_mutex);
3332
3333 spin_lock_irqsave(&bnad->bna_lock, flags);
3334 clear_bit(nr: vid, addr: bnad->active_vlans);
3335 bna_rx_vlan_del(rx: bnad->rx_info[0].rx, vlan_id: vid);
3336 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3337
3338 mutex_unlock(lock: &bnad->conf_mutex);
3339
3340 return 0;
3341}
3342
3343static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3344{
3345 struct bnad *bnad = netdev_priv(dev);
3346 netdev_features_t changed = features ^ dev->features;
3347
3348 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3349 unsigned long flags;
3350
3351 spin_lock_irqsave(&bnad->bna_lock, flags);
3352
3353 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3354 bna_rx_vlan_strip_enable(rx: bnad->rx_info[0].rx);
3355 else
3356 bna_rx_vlan_strip_disable(rx: bnad->rx_info[0].rx);
3357
3358 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3359 }
3360
3361 return 0;
3362}
3363
3364#ifdef CONFIG_NET_POLL_CONTROLLER
3365static void
3366bnad_netpoll(struct net_device *netdev)
3367{
3368 struct bnad *bnad = netdev_priv(dev: netdev);
3369 struct bnad_rx_info *rx_info;
3370 struct bnad_rx_ctrl *rx_ctrl;
3371 u32 curr_mask;
3372 int i, j;
3373
3374 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3375 bna_intx_disable(&bnad->bna, curr_mask);
3376 bnad_isr(irq: bnad->pcidev->irq, data: netdev);
3377 bna_intx_enable(&bnad->bna, curr_mask);
3378 } else {
3379 /*
3380 * Tx processing may happen in sending context, so no need
3381 * to explicitly process completions here
3382 */
3383
3384 /* Rx processing */
3385 for (i = 0; i < bnad->num_rx; i++) {
3386 rx_info = &bnad->rx_info[i];
3387 if (!rx_info->rx)
3388 continue;
3389 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3390 rx_ctrl = &rx_info->rx_ctrl[j];
3391 if (rx_ctrl->ccb)
3392 bnad_netif_rx_schedule_poll(bnad,
3393 ccb: rx_ctrl->ccb);
3394 }
3395 }
3396 }
3397}
3398#endif
3399
3400static const struct net_device_ops bnad_netdev_ops = {
3401 .ndo_open = bnad_open,
3402 .ndo_stop = bnad_stop,
3403 .ndo_start_xmit = bnad_start_xmit,
3404 .ndo_get_stats64 = bnad_get_stats64,
3405 .ndo_set_rx_mode = bnad_set_rx_mode,
3406 .ndo_validate_addr = eth_validate_addr,
3407 .ndo_set_mac_address = bnad_set_mac_address,
3408 .ndo_change_mtu = bnad_change_mtu,
3409 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3410 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3411 .ndo_set_features = bnad_set_features,
3412#ifdef CONFIG_NET_POLL_CONTROLLER
3413 .ndo_poll_controller = bnad_netpoll
3414#endif
3415};
3416
3417static void
3418bnad_netdev_init(struct bnad *bnad)
3419{
3420 struct net_device *netdev = bnad->netdev;
3421
3422 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3423 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3424 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3425 NETIF_F_HW_VLAN_CTAG_RX;
3426
3427 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3428 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3429 NETIF_F_TSO | NETIF_F_TSO6;
3430
3431 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER |
3432 NETIF_F_HIGHDMA;
3433
3434 netdev->mem_start = bnad->mmio_start;
3435 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3436
3437 /* MTU range: 46 - 9000 */
3438 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
3439 netdev->max_mtu = BNAD_JUMBO_MTU;
3440
3441 netdev->netdev_ops = &bnad_netdev_ops;
3442 bnad_set_ethtool_ops(netdev);
3443}
3444
3445/*
3446 * 1. Initialize the bnad structure
3447 * 2. Setup netdev pointer in pci_dev
3448 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3449 * 4. Initialize work queue.
3450 */
3451static int
3452bnad_init(struct bnad *bnad,
3453 struct pci_dev *pdev, struct net_device *netdev)
3454{
3455 unsigned long flags;
3456
3457 SET_NETDEV_DEV(netdev, &pdev->dev);
3458 pci_set_drvdata(pdev, data: netdev);
3459
3460 bnad->netdev = netdev;
3461 bnad->pcidev = pdev;
3462 bnad->mmio_start = pci_resource_start(pdev, 0);
3463 bnad->mmio_len = pci_resource_len(pdev, 0);
3464 bnad->bar0 = ioremap(offset: bnad->mmio_start, size: bnad->mmio_len);
3465 if (!bnad->bar0) {
3466 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3467 return -ENOMEM;
3468 }
3469 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3470 (unsigned long long) bnad->mmio_len);
3471
3472 spin_lock_irqsave(&bnad->bna_lock, flags);
3473 if (!bnad_msix_disable)
3474 bnad->cfg_flags = BNAD_CF_MSIX;
3475
3476 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3477
3478 bnad_q_num_init(bnad);
3479 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3480
3481 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3482 (bnad->num_rx * bnad->num_rxp_per_rx) +
3483 BNAD_MAILBOX_MSIX_VECTORS;
3484
3485 bnad->txq_depth = BNAD_TXQ_DEPTH;
3486 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3487
3488 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3489 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3490
3491 sprintf(buf: bnad->wq_name, fmt: "%s_wq_%d", BNAD_NAME, bnad->id);
3492 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3493 if (!bnad->work_q) {
3494 iounmap(addr: bnad->bar0);
3495 return -ENOMEM;
3496 }
3497
3498 return 0;
3499}
3500
3501/*
3502 * Must be called after bnad_pci_uninit()
3503 * so that iounmap() and pci_set_drvdata(NULL)
3504 * happens only after PCI uninitialization.
3505 */
3506static void
3507bnad_uninit(struct bnad *bnad)
3508{
3509 if (bnad->work_q) {
3510 destroy_workqueue(wq: bnad->work_q);
3511 bnad->work_q = NULL;
3512 }
3513
3514 if (bnad->bar0)
3515 iounmap(addr: bnad->bar0);
3516}
3517
3518/*
3519 * Initialize locks
3520 a) Per ioceth mutes used for serializing configuration
3521 changes from OS interface
3522 b) spin lock used to protect bna state machine
3523 */
3524static void
3525bnad_lock_init(struct bnad *bnad)
3526{
3527 spin_lock_init(&bnad->bna_lock);
3528 mutex_init(&bnad->conf_mutex);
3529}
3530
3531static void
3532bnad_lock_uninit(struct bnad *bnad)
3533{
3534 mutex_destroy(lock: &bnad->conf_mutex);
3535}
3536
3537/* PCI Initialization */
3538static int
3539bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev)
3540{
3541 int err;
3542
3543 err = pci_enable_device(dev: pdev);
3544 if (err)
3545 return err;
3546 err = pci_request_regions(pdev, BNAD_NAME);
3547 if (err)
3548 goto disable_device;
3549 err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64));
3550 if (err)
3551 goto release_regions;
3552 pci_set_master(dev: pdev);
3553 return 0;
3554
3555release_regions:
3556 pci_release_regions(pdev);
3557disable_device:
3558 pci_disable_device(dev: pdev);
3559
3560 return err;
3561}
3562
3563static void
3564bnad_pci_uninit(struct pci_dev *pdev)
3565{
3566 pci_release_regions(pdev);
3567 pci_disable_device(dev: pdev);
3568}
3569
3570static int
3571bnad_pci_probe(struct pci_dev *pdev,
3572 const struct pci_device_id *pcidev_id)
3573{
3574 int err;
3575 struct bnad *bnad;
3576 struct bna *bna;
3577 struct net_device *netdev;
3578 struct bfa_pcidev pcidev_info;
3579 unsigned long flags;
3580
3581 mutex_lock(&bnad_fwimg_mutex);
3582 if (!cna_get_firmware_buf(pdev)) {
3583 mutex_unlock(lock: &bnad_fwimg_mutex);
3584 dev_err(&pdev->dev, "failed to load firmware image!\n");
3585 return -ENODEV;
3586 }
3587 mutex_unlock(lock: &bnad_fwimg_mutex);
3588
3589 /*
3590 * Allocates sizeof(struct net_device + struct bnad)
3591 * bnad = netdev->priv
3592 */
3593 netdev = alloc_etherdev(sizeof(struct bnad));
3594 if (!netdev) {
3595 err = -ENOMEM;
3596 return err;
3597 }
3598 bnad = netdev_priv(dev: netdev);
3599 bnad_lock_init(bnad);
3600 bnad->id = atomic_inc_return(v: &bna_id) - 1;
3601
3602 mutex_lock(&bnad->conf_mutex);
3603 /* PCI initialization */
3604 err = bnad_pci_init(bnad, pdev);
3605 if (err)
3606 goto unlock_mutex;
3607
3608 /*
3609 * Initialize bnad structure
3610 * Setup relation between pci_dev & netdev
3611 */
3612 err = bnad_init(bnad, pdev, netdev);
3613 if (err)
3614 goto pci_uninit;
3615
3616 /* Initialize netdev structure, set up ethtool ops */
3617 bnad_netdev_init(bnad);
3618
3619 /* Set link to down state */
3620 netif_carrier_off(dev: netdev);
3621
3622 /* Setup the debugfs node for this bfad */
3623 if (bna_debugfs_enable)
3624 bnad_debugfs_init(bnad);
3625
3626 /* Get resource requirement form bna */
3627 spin_lock_irqsave(&bnad->bna_lock, flags);
3628 bna_res_req(res_info: &bnad->res_info[0]);
3629 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3630
3631 /* Allocate resources from bna */
3632 err = bnad_res_alloc(bnad, res_info: &bnad->res_info[0], res_val_max: BNA_RES_T_MAX);
3633 if (err)
3634 goto drv_uninit;
3635
3636 bna = &bnad->bna;
3637
3638 /* Setup pcidev_info for bna_init() */
3639 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3640 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3641 pcidev_info.device_id = bnad->pcidev->device;
3642 pcidev_info.pci_bar_kva = bnad->bar0;
3643
3644 spin_lock_irqsave(&bnad->bna_lock, flags);
3645 bna_init(bna, bnad, pcidev: &pcidev_info, res_info: &bnad->res_info[0]);
3646 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3647
3648 bnad->stats.bna_stats = &bna->stats;
3649
3650 bnad_enable_msix(bnad);
3651 err = bnad_mbox_irq_alloc(bnad);
3652 if (err)
3653 goto res_free;
3654
3655 /* Set up timers */
3656 timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3657 timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3658 timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3659 timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3660 0);
3661
3662 /*
3663 * Start the chip
3664 * If the call back comes with error, we bail out.
3665 * This is a catastrophic error.
3666 */
3667 err = bnad_ioceth_enable(bnad);
3668 if (err) {
3669 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3670 goto probe_success;
3671 }
3672
3673 spin_lock_irqsave(&bnad->bna_lock, flags);
3674 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3675 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3676 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3677 bna_attr(bna)->num_rxp - 1);
3678 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3679 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3680 err = -EIO;
3681 }
3682 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3683 if (err)
3684 goto disable_ioceth;
3685
3686 spin_lock_irqsave(&bnad->bna_lock, flags);
3687 bna_mod_res_req(bna: &bnad->bna, res_info: &bnad->mod_res_info[0]);
3688 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3689
3690 err = bnad_res_alloc(bnad, res_info: &bnad->mod_res_info[0], res_val_max: BNA_MOD_RES_T_MAX);
3691 if (err) {
3692 err = -EIO;
3693 goto disable_ioceth;
3694 }
3695
3696 spin_lock_irqsave(&bnad->bna_lock, flags);
3697 bna_mod_init(bna: &bnad->bna, res_info: &bnad->mod_res_info[0]);
3698 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3699
3700 /* Get the burnt-in mac */
3701 spin_lock_irqsave(&bnad->bna_lock, flags);
3702 bna_enet_perm_mac_get(enet: &bna->enet, mac: bnad->perm_addr);
3703 bnad_set_netdev_perm_addr(bnad);
3704 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3705
3706 mutex_unlock(lock: &bnad->conf_mutex);
3707
3708 /* Finally, reguister with net_device layer */
3709 err = register_netdev(dev: netdev);
3710 if (err) {
3711 dev_err(&pdev->dev, "registering net device failed\n");
3712 goto probe_uninit;
3713 }
3714 set_bit(BNAD_RF_NETDEV_REGISTERED, addr: &bnad->run_flags);
3715
3716 return 0;
3717
3718probe_success:
3719 mutex_unlock(lock: &bnad->conf_mutex);
3720 return 0;
3721
3722probe_uninit:
3723 mutex_lock(&bnad->conf_mutex);
3724 bnad_res_free(bnad, res_info: &bnad->mod_res_info[0], res_val_max: BNA_MOD_RES_T_MAX);
3725disable_ioceth:
3726 bnad_ioceth_disable(bnad);
3727 del_timer_sync(timer: &bnad->bna.ioceth.ioc.ioc_timer);
3728 del_timer_sync(timer: &bnad->bna.ioceth.ioc.sem_timer);
3729 del_timer_sync(timer: &bnad->bna.ioceth.ioc.hb_timer);
3730 spin_lock_irqsave(&bnad->bna_lock, flags);
3731 bna_uninit(bna);
3732 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3733 bnad_mbox_irq_free(bnad);
3734 bnad_disable_msix(bnad);
3735res_free:
3736 bnad_res_free(bnad, res_info: &bnad->res_info[0], res_val_max: BNA_RES_T_MAX);
3737drv_uninit:
3738 /* Remove the debugfs node for this bnad */
3739 kfree(objp: bnad->regdata);
3740 bnad_debugfs_uninit(bnad);
3741 bnad_uninit(bnad);
3742pci_uninit:
3743 bnad_pci_uninit(pdev);
3744unlock_mutex:
3745 mutex_unlock(lock: &bnad->conf_mutex);
3746 bnad_lock_uninit(bnad);
3747 free_netdev(dev: netdev);
3748 return err;
3749}
3750
3751static void
3752bnad_pci_remove(struct pci_dev *pdev)
3753{
3754 struct net_device *netdev = pci_get_drvdata(pdev);
3755 struct bnad *bnad;
3756 struct bna *bna;
3757 unsigned long flags;
3758
3759 if (!netdev)
3760 return;
3761
3762 bnad = netdev_priv(dev: netdev);
3763 bna = &bnad->bna;
3764
3765 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, addr: &bnad->run_flags))
3766 unregister_netdev(dev: netdev);
3767
3768 mutex_lock(&bnad->conf_mutex);
3769 bnad_ioceth_disable(bnad);
3770 del_timer_sync(timer: &bnad->bna.ioceth.ioc.ioc_timer);
3771 del_timer_sync(timer: &bnad->bna.ioceth.ioc.sem_timer);
3772 del_timer_sync(timer: &bnad->bna.ioceth.ioc.hb_timer);
3773 spin_lock_irqsave(&bnad->bna_lock, flags);
3774 bna_uninit(bna);
3775 spin_unlock_irqrestore(lock: &bnad->bna_lock, flags);
3776
3777 bnad_res_free(bnad, res_info: &bnad->mod_res_info[0], res_val_max: BNA_MOD_RES_T_MAX);
3778 bnad_res_free(bnad, res_info: &bnad->res_info[0], res_val_max: BNA_RES_T_MAX);
3779 bnad_mbox_irq_free(bnad);
3780 bnad_disable_msix(bnad);
3781 bnad_pci_uninit(pdev);
3782 mutex_unlock(lock: &bnad->conf_mutex);
3783 bnad_lock_uninit(bnad);
3784 /* Remove the debugfs node for this bnad */
3785 kfree(objp: bnad->regdata);
3786 bnad_debugfs_uninit(bnad);
3787 bnad_uninit(bnad);
3788 free_netdev(dev: netdev);
3789}
3790
3791static const struct pci_device_id bnad_pci_id_table[] = {
3792 {
3793 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3794 PCI_DEVICE_ID_BROCADE_CT),
3795 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3796 .class_mask = 0xffff00
3797 },
3798 {
3799 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3800 BFA_PCI_DEVICE_ID_CT2),
3801 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3802 .class_mask = 0xffff00
3803 },
3804 {0, },
3805};
3806
3807MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3808
3809static struct pci_driver bnad_pci_driver = {
3810 .name = BNAD_NAME,
3811 .id_table = bnad_pci_id_table,
3812 .probe = bnad_pci_probe,
3813 .remove = bnad_pci_remove,
3814};
3815
3816static int __init
3817bnad_module_init(void)
3818{
3819 int err;
3820
3821 bfa_nw_ioc_auto_recover(auto_recover: bnad_ioc_auto_recover);
3822
3823 err = pci_register_driver(&bnad_pci_driver);
3824 if (err < 0) {
3825 pr_err("bna: PCI driver registration failed err=%d\n", err);
3826 return err;
3827 }
3828
3829 return 0;
3830}
3831
3832static void __exit
3833bnad_module_exit(void)
3834{
3835 pci_unregister_driver(dev: &bnad_pci_driver);
3836 release_firmware(fw: bfi_fw);
3837}
3838
3839module_init(bnad_module_init);
3840module_exit(bnad_module_exit);
3841
3842MODULE_AUTHOR("Brocade");
3843MODULE_LICENSE("GPL");
3844MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3845MODULE_FIRMWARE(CNA_FW_FILE_CT);
3846MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3847

source code of linux/drivers/net/ethernet/brocade/bna/bnad.c