1 | /* bnx2x_cmn.c: QLogic Everest network driver. |
2 | * |
3 | * Copyright (c) 2007-2013 Broadcom Corporation |
4 | * Copyright (c) 2014 QLogic Corporation |
5 | * All rights reserved |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by |
9 | * the Free Software Foundation. |
10 | * |
11 | * Maintained by: Ariel Elior <ariel.elior@qlogic.com> |
12 | * Written by: Eliezer Tamir |
13 | * Based on code from Michael Chan's bnx2 driver |
14 | * UDP CSUM errata workaround by Arik Gendelman |
15 | * Slowpath and fastpath rework by Vladislav Zolotarov |
16 | * Statistics and Link management by Yitchak Gertner |
17 | * |
18 | */ |
19 | |
20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
21 | |
22 | #include <linux/etherdevice.h> |
23 | #include <linux/if_vlan.h> |
24 | #include <linux/interrupt.h> |
25 | #include <linux/ip.h> |
26 | #include <linux/crash_dump.h> |
27 | #include <net/tcp.h> |
28 | #include <net/gro.h> |
29 | #include <net/ipv6.h> |
30 | #include <net/ip6_checksum.h> |
31 | #include <linux/prefetch.h> |
32 | #include "bnx2x_cmn.h" |
33 | #include "bnx2x_init.h" |
34 | #include "bnx2x_sp.h" |
35 | |
36 | static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); |
37 | static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); |
38 | static int bnx2x_alloc_fp_mem(struct bnx2x *bp); |
39 | static int bnx2x_poll(struct napi_struct *napi, int budget); |
40 | |
41 | static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) |
42 | { |
43 | int i; |
44 | |
45 | /* Add NAPI objects */ |
46 | for_each_rx_queue_cnic(bp, i) { |
47 | netif_napi_add(dev: bp->dev, napi: &bnx2x_fp(bp, i, napi), poll: bnx2x_poll); |
48 | } |
49 | } |
50 | |
51 | static void bnx2x_add_all_napi(struct bnx2x *bp) |
52 | { |
53 | int i; |
54 | |
55 | /* Add NAPI objects */ |
56 | for_each_eth_queue(bp, i) { |
57 | netif_napi_add(dev: bp->dev, napi: &bnx2x_fp(bp, i, napi), poll: bnx2x_poll); |
58 | } |
59 | } |
60 | |
61 | static int bnx2x_calc_num_queues(struct bnx2x *bp) |
62 | { |
63 | int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); |
64 | |
65 | /* Reduce memory usage in kdump environment by using only one queue */ |
66 | if (is_kdump_kernel()) |
67 | nq = 1; |
68 | |
69 | nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); |
70 | return nq; |
71 | } |
72 | |
73 | /** |
74 | * bnx2x_move_fp - move content of the fastpath structure. |
75 | * |
76 | * @bp: driver handle |
77 | * @from: source FP index |
78 | * @to: destination FP index |
79 | * |
80 | * Makes sure the contents of the bp->fp[to].napi is kept |
81 | * intact. This is done by first copying the napi struct from |
82 | * the target to the source, and then mem copying the entire |
83 | * source onto the target. Update txdata pointers and related |
84 | * content. |
85 | */ |
86 | static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) |
87 | { |
88 | struct bnx2x_fastpath *from_fp = &bp->fp[from]; |
89 | struct bnx2x_fastpath *to_fp = &bp->fp[to]; |
90 | struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; |
91 | struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; |
92 | struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; |
93 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; |
94 | int old_max_eth_txqs, new_max_eth_txqs; |
95 | int old_txdata_index = 0, new_txdata_index = 0; |
96 | struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; |
97 | |
98 | /* Copy the NAPI object as it has been already initialized */ |
99 | from_fp->napi = to_fp->napi; |
100 | |
101 | /* Move bnx2x_fastpath contents */ |
102 | memcpy(to_fp, from_fp, sizeof(*to_fp)); |
103 | to_fp->index = to; |
104 | |
105 | /* Retain the tpa_info of the original `to' version as we don't want |
106 | * 2 FPs to contain the same tpa_info pointer. |
107 | */ |
108 | to_fp->tpa_info = old_tpa_info; |
109 | |
110 | /* move sp_objs contents as well, as their indices match fp ones */ |
111 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); |
112 | |
113 | /* move fp_stats contents as well, as their indices match fp ones */ |
114 | memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats)); |
115 | |
116 | /* Update txdata pointers in fp and move txdata content accordingly: |
117 | * Each fp consumes 'max_cos' txdata structures, so the index should be |
118 | * decremented by max_cos x delta. |
119 | */ |
120 | |
121 | old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; |
122 | new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * |
123 | (bp)->max_cos; |
124 | if (from == FCOE_IDX(bp)) { |
125 | old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; |
126 | new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; |
127 | } |
128 | |
129 | memcpy(&bp->bnx2x_txq[new_txdata_index], |
130 | &bp->bnx2x_txq[old_txdata_index], |
131 | sizeof(struct bnx2x_fp_txdata)); |
132 | to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; |
133 | } |
134 | |
135 | /** |
136 | * bnx2x_fill_fw_str - Fill buffer with FW version string. |
137 | * |
138 | * @bp: driver handle |
139 | * @buf: character buffer to fill with the fw name |
140 | * @buf_len: length of the above buffer |
141 | * |
142 | */ |
143 | void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) |
144 | { |
145 | if (IS_PF(bp)) { |
146 | u8 phy_fw_ver[PHY_FW_VER_LEN]; |
147 | |
148 | phy_fw_ver[0] = '\0'; |
149 | bnx2x_get_ext_phy_fw_version(params: &bp->link_params, |
150 | version: phy_fw_ver, len: sizeof(phy_fw_ver)); |
151 | /* This may become truncated. */ |
152 | scnprintf(buf, size: buf_len, |
153 | fmt: "%sbc %d.%d.%d%s%s" , |
154 | bp->fw_ver, |
155 | (bp->common.bc_ver & 0xff0000) >> 16, |
156 | (bp->common.bc_ver & 0xff00) >> 8, |
157 | (bp->common.bc_ver & 0xff), |
158 | ((phy_fw_ver[0] != '\0') ? " phy " : "" ), phy_fw_ver); |
159 | } else { |
160 | bnx2x_vf_fill_fw_str(bp, buf, buf_len); |
161 | } |
162 | } |
163 | |
164 | /** |
165 | * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact |
166 | * |
167 | * @bp: driver handle |
168 | * @delta: number of eth queues which were not allocated |
169 | */ |
170 | static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) |
171 | { |
172 | int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); |
173 | |
174 | /* Queue pointer cannot be re-set on an fp-basis, as moving pointer |
175 | * backward along the array could cause memory to be overridden |
176 | */ |
177 | for (cos = 1; cos < bp->max_cos; cos++) { |
178 | for (i = 0; i < old_eth_num - delta; i++) { |
179 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
180 | int new_idx = cos * (old_eth_num - delta) + i; |
181 | |
182 | memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], |
183 | sizeof(struct bnx2x_fp_txdata)); |
184 | fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; |
185 | } |
186 | } |
187 | } |
188 | |
189 | int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ |
190 | |
191 | /* free skb in the packet ring at pos idx |
192 | * return idx of last bd freed |
193 | */ |
194 | static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, |
195 | u16 idx, unsigned int *pkts_compl, |
196 | unsigned int *bytes_compl) |
197 | { |
198 | struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; |
199 | struct eth_tx_start_bd *tx_start_bd; |
200 | struct eth_tx_bd *tx_data_bd; |
201 | struct sk_buff *skb = tx_buf->skb; |
202 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; |
203 | int nbd; |
204 | u16 split_bd_len = 0; |
205 | |
206 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ |
207 | prefetch(&skb->end); |
208 | |
209 | DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n" , |
210 | txdata->txq_index, idx, tx_buf, skb); |
211 | |
212 | tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; |
213 | |
214 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; |
215 | #ifdef BNX2X_STOP_ON_ERROR |
216 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { |
217 | BNX2X_ERR("BAD nbd!\n" ); |
218 | bnx2x_panic(); |
219 | } |
220 | #endif |
221 | new_cons = nbd + tx_buf->first_bd; |
222 | |
223 | /* Get the next bd */ |
224 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
225 | |
226 | /* Skip a parse bd... */ |
227 | --nbd; |
228 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
229 | |
230 | if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { |
231 | /* Skip second parse bd... */ |
232 | --nbd; |
233 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
234 | } |
235 | |
236 | /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ |
237 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { |
238 | tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; |
239 | split_bd_len = BD_UNMAP_LEN(tx_data_bd); |
240 | --nbd; |
241 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
242 | } |
243 | |
244 | /* unmap first bd */ |
245 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), |
246 | BD_UNMAP_LEN(tx_start_bd) + split_bd_len, |
247 | DMA_TO_DEVICE); |
248 | |
249 | /* now free frags */ |
250 | while (nbd > 0) { |
251 | |
252 | tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; |
253 | dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), |
254 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); |
255 | if (--nbd) |
256 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
257 | } |
258 | |
259 | /* release skb */ |
260 | WARN_ON(!skb); |
261 | if (likely(skb)) { |
262 | (*pkts_compl)++; |
263 | (*bytes_compl) += skb->len; |
264 | dev_kfree_skb_any(skb); |
265 | } |
266 | |
267 | tx_buf->first_bd = 0; |
268 | tx_buf->skb = NULL; |
269 | |
270 | return new_cons; |
271 | } |
272 | |
273 | int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) |
274 | { |
275 | struct netdev_queue *txq; |
276 | u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; |
277 | unsigned int pkts_compl = 0, bytes_compl = 0; |
278 | |
279 | #ifdef BNX2X_STOP_ON_ERROR |
280 | if (unlikely(bp->panic)) |
281 | return -1; |
282 | #endif |
283 | |
284 | txq = netdev_get_tx_queue(dev: bp->dev, index: txdata->txq_index); |
285 | hw_cons = le16_to_cpu(*txdata->tx_cons_sb); |
286 | sw_cons = txdata->tx_pkt_cons; |
287 | |
288 | /* Ensure subsequent loads occur after hw_cons */ |
289 | smp_rmb(); |
290 | |
291 | while (sw_cons != hw_cons) { |
292 | u16 pkt_cons; |
293 | |
294 | pkt_cons = TX_BD(sw_cons); |
295 | |
296 | DP(NETIF_MSG_TX_DONE, |
297 | "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n" , |
298 | txdata->txq_index, hw_cons, sw_cons, pkt_cons); |
299 | |
300 | bd_cons = bnx2x_free_tx_pkt(bp, txdata, idx: pkt_cons, |
301 | pkts_compl: &pkts_compl, bytes_compl: &bytes_compl); |
302 | |
303 | sw_cons++; |
304 | } |
305 | |
306 | netdev_tx_completed_queue(dev_queue: txq, pkts: pkts_compl, bytes: bytes_compl); |
307 | |
308 | txdata->tx_pkt_cons = sw_cons; |
309 | txdata->tx_bd_cons = bd_cons; |
310 | |
311 | /* Need to make the tx_bd_cons update visible to start_xmit() |
312 | * before checking for netif_tx_queue_stopped(). Without the |
313 | * memory barrier, there is a small possibility that |
314 | * start_xmit() will miss it and cause the queue to be stopped |
315 | * forever. |
316 | * On the other hand we need an rmb() here to ensure the proper |
317 | * ordering of bit testing in the following |
318 | * netif_tx_queue_stopped(txq) call. |
319 | */ |
320 | smp_mb(); |
321 | |
322 | if (unlikely(netif_tx_queue_stopped(txq))) { |
323 | /* Taking tx_lock() is needed to prevent re-enabling the queue |
324 | * while it's empty. This could have happen if rx_action() gets |
325 | * suspended in bnx2x_tx_int() after the condition before |
326 | * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): |
327 | * |
328 | * stops the queue->sees fresh tx_bd_cons->releases the queue-> |
329 | * sends some packets consuming the whole queue again-> |
330 | * stops the queue |
331 | */ |
332 | |
333 | __netif_tx_lock(txq, smp_processor_id()); |
334 | |
335 | if ((netif_tx_queue_stopped(dev_queue: txq)) && |
336 | (bp->state == BNX2X_STATE_OPEN) && |
337 | (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) |
338 | netif_tx_wake_queue(dev_queue: txq); |
339 | |
340 | __netif_tx_unlock(txq); |
341 | } |
342 | return 0; |
343 | } |
344 | |
345 | static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, |
346 | u16 idx) |
347 | { |
348 | u16 last_max = fp->last_max_sge; |
349 | |
350 | if (SUB_S16(idx, last_max) > 0) |
351 | fp->last_max_sge = idx; |
352 | } |
353 | |
354 | static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, |
355 | u16 sge_len, |
356 | struct eth_end_agg_rx_cqe *cqe) |
357 | { |
358 | struct bnx2x *bp = fp->bp; |
359 | u16 last_max, last_elem, first_elem; |
360 | u16 delta = 0; |
361 | u16 i; |
362 | |
363 | if (!sge_len) |
364 | return; |
365 | |
366 | /* First mark all used pages */ |
367 | for (i = 0; i < sge_len; i++) |
368 | BIT_VEC64_CLEAR_BIT(fp->sge_mask, |
369 | RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); |
370 | |
371 | DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n" , |
372 | sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); |
373 | |
374 | /* Here we assume that the last SGE index is the biggest */ |
375 | prefetch((void *)(fp->sge_mask)); |
376 | bnx2x_update_last_max_sge(fp, |
377 | le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); |
378 | |
379 | last_max = RX_SGE(fp->last_max_sge); |
380 | last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; |
381 | first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; |
382 | |
383 | /* If ring is not full */ |
384 | if (last_elem + 1 != first_elem) |
385 | last_elem++; |
386 | |
387 | /* Now update the prod */ |
388 | for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { |
389 | if (likely(fp->sge_mask[i])) |
390 | break; |
391 | |
392 | fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; |
393 | delta += BIT_VEC64_ELEM_SZ; |
394 | } |
395 | |
396 | if (delta > 0) { |
397 | fp->rx_sge_prod += delta; |
398 | /* clear page-end entries */ |
399 | bnx2x_clear_sge_mask_next_elems(fp); |
400 | } |
401 | |
402 | DP(NETIF_MSG_RX_STATUS, |
403 | "fp->last_max_sge = %d fp->rx_sge_prod = %d\n" , |
404 | fp->last_max_sge, fp->rx_sge_prod); |
405 | } |
406 | |
407 | /* Get Toeplitz hash value in the skb using the value from the |
408 | * CQE (calculated by HW). |
409 | */ |
410 | static u32 bnx2x_get_rxhash(const struct bnx2x *bp, |
411 | const struct eth_fast_path_rx_cqe *cqe, |
412 | enum pkt_hash_types *rxhash_type) |
413 | { |
414 | /* Get Toeplitz hash from CQE */ |
415 | if ((bp->dev->features & NETIF_F_RXHASH) && |
416 | (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { |
417 | enum eth_rss_hash_type htype; |
418 | |
419 | htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; |
420 | *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) || |
421 | (htype == TCP_IPV6_HASH_TYPE)) ? |
422 | PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; |
423 | |
424 | return le32_to_cpu(cqe->rss_hash_result); |
425 | } |
426 | *rxhash_type = PKT_HASH_TYPE_NONE; |
427 | return 0; |
428 | } |
429 | |
430 | static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, |
431 | u16 cons, u16 prod, |
432 | struct eth_fast_path_rx_cqe *cqe) |
433 | { |
434 | struct bnx2x *bp = fp->bp; |
435 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; |
436 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; |
437 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; |
438 | dma_addr_t mapping; |
439 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; |
440 | struct sw_rx_bd *first_buf = &tpa_info->first_buf; |
441 | |
442 | /* print error if current state != stop */ |
443 | if (tpa_info->tpa_state != BNX2X_TPA_STOP) |
444 | BNX2X_ERR("start of bin not in stop [%d]\n" , queue); |
445 | |
446 | /* Try to map an empty data buffer from the aggregation info */ |
447 | mapping = dma_map_single(&bp->pdev->dev, |
448 | first_buf->data + NET_SKB_PAD, |
449 | fp->rx_buf_size, DMA_FROM_DEVICE); |
450 | /* |
451 | * ...if it fails - move the skb from the consumer to the producer |
452 | * and set the current aggregation state as ERROR to drop it |
453 | * when TPA_STOP arrives. |
454 | */ |
455 | |
456 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
457 | /* Move the BD from the consumer to the producer */ |
458 | bnx2x_reuse_rx_data(fp, cons, prod); |
459 | tpa_info->tpa_state = BNX2X_TPA_ERROR; |
460 | return; |
461 | } |
462 | |
463 | /* move empty data from pool to prod */ |
464 | prod_rx_buf->data = first_buf->data; |
465 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); |
466 | /* point prod_bd to new data */ |
467 | prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
468 | prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
469 | |
470 | /* move partial skb from cons to pool (don't unmap yet) */ |
471 | *first_buf = *cons_rx_buf; |
472 | |
473 | /* mark bin state as START */ |
474 | tpa_info->parsing_flags = |
475 | le16_to_cpu(cqe->pars_flags.flags); |
476 | tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); |
477 | tpa_info->tpa_state = BNX2X_TPA_START; |
478 | tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); |
479 | tpa_info->placement_offset = cqe->placement_offset; |
480 | tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, rxhash_type: &tpa_info->rxhash_type); |
481 | if (fp->mode == TPA_MODE_GRO) { |
482 | u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); |
483 | tpa_info->full_page = SGE_PAGES / gro_size * gro_size; |
484 | tpa_info->gro_size = gro_size; |
485 | } |
486 | |
487 | #ifdef BNX2X_STOP_ON_ERROR |
488 | fp->tpa_queue_used |= (1 << queue); |
489 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n" , |
490 | fp->tpa_queue_used); |
491 | #endif |
492 | } |
493 | |
494 | /* Timestamp option length allowed for TPA aggregation: |
495 | * |
496 | * nop nop kind length echo val |
497 | */ |
498 | #define TPA_TSTAMP_OPT_LEN 12 |
499 | /** |
500 | * bnx2x_set_gro_params - compute GRO values |
501 | * |
502 | * @skb: packet skb |
503 | * @parsing_flags: parsing flags from the START CQE |
504 | * @len_on_bd: total length of the first packet for the |
505 | * aggregation. |
506 | * @pkt_len: length of all segments |
507 | * @num_of_coalesced_segs: count of segments |
508 | * |
509 | * Approximate value of the MSS for this aggregation calculated using |
510 | * the first packet of it. |
511 | * Compute number of aggregated segments, and gso_type. |
512 | */ |
513 | static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, |
514 | u16 len_on_bd, unsigned int pkt_len, |
515 | u16 num_of_coalesced_segs) |
516 | { |
517 | /* TPA aggregation won't have either IP options or TCP options |
518 | * other than timestamp or IPv6 extension headers. |
519 | */ |
520 | u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); |
521 | |
522 | if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == |
523 | PRS_FLAG_OVERETH_IPV6) { |
524 | hdrs_len += sizeof(struct ipv6hdr); |
525 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
526 | } else { |
527 | hdrs_len += sizeof(struct iphdr); |
528 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
529 | } |
530 | |
531 | /* Check if there was a TCP timestamp, if there is it's will |
532 | * always be 12 bytes length: nop nop kind length echo val. |
533 | * |
534 | * Otherwise FW would close the aggregation. |
535 | */ |
536 | if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) |
537 | hdrs_len += TPA_TSTAMP_OPT_LEN; |
538 | |
539 | skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len; |
540 | |
541 | /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count |
542 | * to skb_shinfo(skb)->gso_segs |
543 | */ |
544 | NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; |
545 | } |
546 | |
547 | static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
548 | u16 index, gfp_t gfp_mask) |
549 | { |
550 | struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; |
551 | struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; |
552 | struct bnx2x_alloc_pool *pool = &fp->page_pool; |
553 | dma_addr_t mapping; |
554 | |
555 | if (!pool->page) { |
556 | pool->page = alloc_pages(gfp: gfp_mask, PAGES_PER_SGE_SHIFT); |
557 | if (unlikely(!pool->page)) |
558 | return -ENOMEM; |
559 | |
560 | pool->offset = 0; |
561 | } |
562 | |
563 | mapping = dma_map_page(&bp->pdev->dev, pool->page, |
564 | pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE); |
565 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
566 | BNX2X_ERR("Can't map sge\n" ); |
567 | return -ENOMEM; |
568 | } |
569 | |
570 | sw_buf->page = pool->page; |
571 | sw_buf->offset = pool->offset; |
572 | |
573 | dma_unmap_addr_set(sw_buf, mapping, mapping); |
574 | |
575 | sge->addr_hi = cpu_to_le32(U64_HI(mapping)); |
576 | sge->addr_lo = cpu_to_le32(U64_LO(mapping)); |
577 | |
578 | pool->offset += SGE_PAGE_SIZE; |
579 | if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE) |
580 | get_page(page: pool->page); |
581 | else |
582 | pool->page = NULL; |
583 | return 0; |
584 | } |
585 | |
586 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
587 | struct bnx2x_agg_info *tpa_info, |
588 | u16 pages, |
589 | struct sk_buff *skb, |
590 | struct eth_end_agg_rx_cqe *cqe, |
591 | u16 cqe_idx) |
592 | { |
593 | struct sw_rx_page *rx_pg, old_rx_pg; |
594 | u32 i, frag_len, frag_size; |
595 | int err, j, frag_id = 0; |
596 | u16 len_on_bd = tpa_info->len_on_bd; |
597 | u16 full_page = 0, gro_size = 0; |
598 | |
599 | frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; |
600 | |
601 | if (fp->mode == TPA_MODE_GRO) { |
602 | gro_size = tpa_info->gro_size; |
603 | full_page = tpa_info->full_page; |
604 | } |
605 | |
606 | /* This is needed in order to enable forwarding support */ |
607 | if (frag_size) |
608 | bnx2x_set_gro_params(skb, parsing_flags: tpa_info->parsing_flags, len_on_bd, |
609 | le16_to_cpu(cqe->pkt_len), |
610 | le16_to_cpu(cqe->num_of_coalesced_segs)); |
611 | |
612 | #ifdef BNX2X_STOP_ON_ERROR |
613 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { |
614 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n" , |
615 | pages, cqe_idx); |
616 | BNX2X_ERR("cqe->pkt_len = %d\n" , cqe->pkt_len); |
617 | bnx2x_panic(); |
618 | return -EINVAL; |
619 | } |
620 | #endif |
621 | |
622 | /* Run through the SGL and compose the fragmented skb */ |
623 | for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { |
624 | u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); |
625 | |
626 | /* FW gives the indices of the SGE as if the ring is an array |
627 | (meaning that "next" element will consume 2 indices) */ |
628 | if (fp->mode == TPA_MODE_GRO) |
629 | frag_len = min_t(u32, frag_size, (u32)full_page); |
630 | else /* LRO */ |
631 | frag_len = min_t(u32, frag_size, (u32)SGE_PAGES); |
632 | |
633 | rx_pg = &fp->rx_page_ring[sge_idx]; |
634 | old_rx_pg = *rx_pg; |
635 | |
636 | /* If we fail to allocate a substitute page, we simply stop |
637 | where we are and drop the whole packet */ |
638 | err = bnx2x_alloc_rx_sge(bp, fp, index: sge_idx, GFP_ATOMIC); |
639 | if (unlikely(err)) { |
640 | bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; |
641 | return err; |
642 | } |
643 | |
644 | dma_unmap_page(&bp->pdev->dev, |
645 | dma_unmap_addr(&old_rx_pg, mapping), |
646 | SGE_PAGE_SIZE, DMA_FROM_DEVICE); |
647 | /* Add one frag and update the appropriate fields in the skb */ |
648 | if (fp->mode == TPA_MODE_LRO) |
649 | skb_fill_page_desc(skb, i: j, page: old_rx_pg.page, |
650 | off: old_rx_pg.offset, size: frag_len); |
651 | else { /* GRO */ |
652 | int rem; |
653 | int offset = 0; |
654 | for (rem = frag_len; rem > 0; rem -= gro_size) { |
655 | int len = rem > gro_size ? gro_size : rem; |
656 | skb_fill_page_desc(skb, i: frag_id++, |
657 | page: old_rx_pg.page, |
658 | off: old_rx_pg.offset + offset, |
659 | size: len); |
660 | if (offset) |
661 | get_page(page: old_rx_pg.page); |
662 | offset += len; |
663 | } |
664 | } |
665 | |
666 | skb->data_len += frag_len; |
667 | skb->truesize += SGE_PAGES; |
668 | skb->len += frag_len; |
669 | |
670 | frag_size -= frag_len; |
671 | } |
672 | |
673 | return 0; |
674 | } |
675 | |
676 | static struct sk_buff * |
677 | bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data) |
678 | { |
679 | struct sk_buff *skb; |
680 | |
681 | if (fp->rx_frag_size) |
682 | skb = build_skb(data, frag_size: fp->rx_frag_size); |
683 | else |
684 | skb = slab_build_skb(data); |
685 | return skb; |
686 | } |
687 | |
688 | static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data) |
689 | { |
690 | if (fp->rx_frag_size) |
691 | skb_free_frag(addr: data); |
692 | else |
693 | kfree(objp: data); |
694 | } |
695 | |
696 | static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) |
697 | { |
698 | if (fp->rx_frag_size) { |
699 | /* GFP_KERNEL allocations are used only during initialization */ |
700 | if (unlikely(gfpflags_allow_blocking(gfp_mask))) |
701 | return (void *)__get_free_page(gfp_mask); |
702 | |
703 | return napi_alloc_frag(fragsz: fp->rx_frag_size); |
704 | } |
705 | |
706 | return kmalloc(size: fp->rx_buf_size + NET_SKB_PAD, flags: gfp_mask); |
707 | } |
708 | |
709 | #ifdef CONFIG_INET |
710 | static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) |
711 | { |
712 | const struct iphdr *iph = ip_hdr(skb); |
713 | struct tcphdr *th; |
714 | |
715 | skb_set_transport_header(skb, offset: sizeof(struct iphdr)); |
716 | th = tcp_hdr(skb); |
717 | |
718 | th->check = ~tcp_v4_check(len: skb->len - skb_transport_offset(skb), |
719 | saddr: iph->saddr, daddr: iph->daddr, base: 0); |
720 | } |
721 | |
722 | static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) |
723 | { |
724 | struct ipv6hdr *iph = ipv6_hdr(skb); |
725 | struct tcphdr *th; |
726 | |
727 | skb_set_transport_header(skb, offset: sizeof(struct ipv6hdr)); |
728 | th = tcp_hdr(skb); |
729 | |
730 | th->check = ~tcp_v6_check(len: skb->len - skb_transport_offset(skb), |
731 | saddr: &iph->saddr, daddr: &iph->daddr, base: 0); |
732 | } |
733 | |
734 | static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, |
735 | void (*gro_func)(struct bnx2x*, struct sk_buff*)) |
736 | { |
737 | skb_reset_network_header(skb); |
738 | gro_func(bp, skb); |
739 | tcp_gro_complete(skb); |
740 | } |
741 | #endif |
742 | |
743 | static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
744 | struct sk_buff *skb) |
745 | { |
746 | #ifdef CONFIG_INET |
747 | if (skb_shinfo(skb)->gso_size) { |
748 | switch (be16_to_cpu(skb->protocol)) { |
749 | case ETH_P_IP: |
750 | bnx2x_gro_csum(bp, skb, gro_func: bnx2x_gro_ip_csum); |
751 | break; |
752 | case ETH_P_IPV6: |
753 | bnx2x_gro_csum(bp, skb, gro_func: bnx2x_gro_ipv6_csum); |
754 | break; |
755 | default: |
756 | netdev_WARN_ONCE(bp->dev, |
757 | "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n" , |
758 | be16_to_cpu(skb->protocol)); |
759 | } |
760 | } |
761 | #endif |
762 | skb_record_rx_queue(skb, rx_queue: fp->rx_queue); |
763 | napi_gro_receive(napi: &fp->napi, skb); |
764 | } |
765 | |
766 | static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
767 | struct bnx2x_agg_info *tpa_info, |
768 | u16 pages, |
769 | struct eth_end_agg_rx_cqe *cqe, |
770 | u16 cqe_idx) |
771 | { |
772 | struct sw_rx_bd *rx_buf = &tpa_info->first_buf; |
773 | u8 pad = tpa_info->placement_offset; |
774 | u16 len = tpa_info->len_on_bd; |
775 | struct sk_buff *skb = NULL; |
776 | u8 *new_data, *data = rx_buf->data; |
777 | u8 old_tpa_state = tpa_info->tpa_state; |
778 | |
779 | tpa_info->tpa_state = BNX2X_TPA_STOP; |
780 | |
781 | /* If we there was an error during the handling of the TPA_START - |
782 | * drop this aggregation. |
783 | */ |
784 | if (old_tpa_state == BNX2X_TPA_ERROR) |
785 | goto drop; |
786 | |
787 | /* Try to allocate the new data */ |
788 | new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC); |
789 | /* Unmap skb in the pool anyway, as we are going to change |
790 | pool entry status to BNX2X_TPA_STOP even if new skb allocation |
791 | fails. */ |
792 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), |
793 | fp->rx_buf_size, DMA_FROM_DEVICE); |
794 | if (likely(new_data)) |
795 | skb = bnx2x_build_skb(fp, data); |
796 | |
797 | if (likely(skb)) { |
798 | #ifdef BNX2X_STOP_ON_ERROR |
799 | if (pad + len > fp->rx_buf_size) { |
800 | BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n" , |
801 | pad, len, fp->rx_buf_size); |
802 | bnx2x_panic(); |
803 | bnx2x_frag_free(fp, new_data); |
804 | return; |
805 | } |
806 | #endif |
807 | |
808 | skb_reserve(skb, len: pad + NET_SKB_PAD); |
809 | skb_put(skb, len); |
810 | skb_set_hash(skb, hash: tpa_info->rxhash, type: tpa_info->rxhash_type); |
811 | |
812 | skb->protocol = eth_type_trans(skb, dev: bp->dev); |
813 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
814 | |
815 | if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, |
816 | skb, cqe, cqe_idx)) { |
817 | if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) |
818 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: tpa_info->vlan_tag); |
819 | bnx2x_gro_receive(bp, fp, skb); |
820 | } else { |
821 | DP(NETIF_MSG_RX_STATUS, |
822 | "Failed to allocate new pages - dropping packet!\n" ); |
823 | dev_kfree_skb_any(skb); |
824 | } |
825 | |
826 | /* put new data in bin */ |
827 | rx_buf->data = new_data; |
828 | |
829 | return; |
830 | } |
831 | if (new_data) |
832 | bnx2x_frag_free(fp, data: new_data); |
833 | drop: |
834 | /* drop the packet and keep the buffer in the bin */ |
835 | DP(NETIF_MSG_RX_STATUS, |
836 | "Failed to allocate or map a new skb - dropping packet!\n" ); |
837 | bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; |
838 | } |
839 | |
840 | static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
841 | u16 index, gfp_t gfp_mask) |
842 | { |
843 | u8 *data; |
844 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; |
845 | struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; |
846 | dma_addr_t mapping; |
847 | |
848 | data = bnx2x_frag_alloc(fp, gfp_mask); |
849 | if (unlikely(data == NULL)) |
850 | return -ENOMEM; |
851 | |
852 | mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, |
853 | fp->rx_buf_size, |
854 | DMA_FROM_DEVICE); |
855 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
856 | bnx2x_frag_free(fp, data); |
857 | BNX2X_ERR("Can't map rx data\n" ); |
858 | return -ENOMEM; |
859 | } |
860 | |
861 | rx_buf->data = data; |
862 | dma_unmap_addr_set(rx_buf, mapping, mapping); |
863 | |
864 | rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
865 | rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
866 | |
867 | return 0; |
868 | } |
869 | |
870 | static |
871 | void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, |
872 | struct bnx2x_fastpath *fp, |
873 | struct bnx2x_eth_q_stats *qstats) |
874 | { |
875 | /* Do nothing if no L4 csum validation was done. |
876 | * We do not check whether IP csum was validated. For IPv4 we assume |
877 | * that if the card got as far as validating the L4 csum, it also |
878 | * validated the IP csum. IPv6 has no IP csum. |
879 | */ |
880 | if (cqe->fast_path_cqe.status_flags & |
881 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) |
882 | return; |
883 | |
884 | /* If L4 validation was done, check if an error was found. */ |
885 | |
886 | if (cqe->fast_path_cqe.type_error_flags & |
887 | (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | |
888 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) |
889 | qstats->hw_csum_err++; |
890 | else |
891 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
892 | } |
893 | |
894 | static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) |
895 | { |
896 | struct bnx2x *bp = fp->bp; |
897 | u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; |
898 | u16 sw_comp_cons, sw_comp_prod; |
899 | int rx_pkt = 0; |
900 | union eth_rx_cqe *cqe; |
901 | struct eth_fast_path_rx_cqe *cqe_fp; |
902 | |
903 | #ifdef BNX2X_STOP_ON_ERROR |
904 | if (unlikely(bp->panic)) |
905 | return 0; |
906 | #endif |
907 | if (budget <= 0) |
908 | return rx_pkt; |
909 | |
910 | bd_cons = fp->rx_bd_cons; |
911 | bd_prod = fp->rx_bd_prod; |
912 | bd_prod_fw = bd_prod; |
913 | sw_comp_cons = fp->rx_comp_cons; |
914 | sw_comp_prod = fp->rx_comp_prod; |
915 | |
916 | comp_ring_cons = RCQ_BD(sw_comp_cons); |
917 | cqe = &fp->rx_comp_ring[comp_ring_cons]; |
918 | cqe_fp = &cqe->fast_path_cqe; |
919 | |
920 | DP(NETIF_MSG_RX_STATUS, |
921 | "queue[%d]: sw_comp_cons %u\n" , fp->index, sw_comp_cons); |
922 | |
923 | while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) { |
924 | struct sw_rx_bd *rx_buf = NULL; |
925 | struct sk_buff *skb; |
926 | u8 cqe_fp_flags; |
927 | enum eth_rx_cqe_type cqe_fp_type; |
928 | u16 len, pad, queue; |
929 | u8 *data; |
930 | u32 rxhash; |
931 | enum pkt_hash_types rxhash_type; |
932 | |
933 | #ifdef BNX2X_STOP_ON_ERROR |
934 | if (unlikely(bp->panic)) |
935 | return 0; |
936 | #endif |
937 | |
938 | bd_prod = RX_BD(bd_prod); |
939 | bd_cons = RX_BD(bd_cons); |
940 | |
941 | /* A rmb() is required to ensure that the CQE is not read |
942 | * before it is written by the adapter DMA. PCI ordering |
943 | * rules will make sure the other fields are written before |
944 | * the marker at the end of struct eth_fast_path_rx_cqe |
945 | * but without rmb() a weakly ordered processor can process |
946 | * stale data. Without the barrier TPA state-machine might |
947 | * enter inconsistent state and kernel stack might be |
948 | * provided with incorrect packet description - these lead |
949 | * to various kernel crashed. |
950 | */ |
951 | rmb(); |
952 | |
953 | cqe_fp_flags = cqe_fp->type_error_flags; |
954 | cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; |
955 | |
956 | DP(NETIF_MSG_RX_STATUS, |
957 | "CQE type %x err %x status %x queue %x vlan %x len %u\n" , |
958 | CQE_TYPE(cqe_fp_flags), |
959 | cqe_fp_flags, cqe_fp->status_flags, |
960 | le32_to_cpu(cqe_fp->rss_hash_result), |
961 | le16_to_cpu(cqe_fp->vlan_tag), |
962 | le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len)); |
963 | |
964 | /* is this a slowpath msg? */ |
965 | if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { |
966 | bnx2x_sp_event(fp, rr_cqe: cqe); |
967 | goto next_cqe; |
968 | } |
969 | |
970 | rx_buf = &fp->rx_buf_ring[bd_cons]; |
971 | data = rx_buf->data; |
972 | |
973 | if (!CQE_TYPE_FAST(cqe_fp_type)) { |
974 | struct bnx2x_agg_info *tpa_info; |
975 | u16 frag_size, pages; |
976 | #ifdef BNX2X_STOP_ON_ERROR |
977 | /* sanity check */ |
978 | if (fp->mode == TPA_MODE_DISABLED && |
979 | (CQE_TYPE_START(cqe_fp_type) || |
980 | CQE_TYPE_STOP(cqe_fp_type))) |
981 | BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n" , |
982 | CQE_TYPE(cqe_fp_type)); |
983 | #endif |
984 | |
985 | if (CQE_TYPE_START(cqe_fp_type)) { |
986 | u16 queue = cqe_fp->queue_index; |
987 | DP(NETIF_MSG_RX_STATUS, |
988 | "calling tpa_start on queue %d\n" , |
989 | queue); |
990 | |
991 | bnx2x_tpa_start(fp, queue, |
992 | cons: bd_cons, prod: bd_prod, |
993 | cqe: cqe_fp); |
994 | |
995 | goto next_rx; |
996 | } |
997 | queue = cqe->end_agg_cqe.queue_index; |
998 | tpa_info = &fp->tpa_info[queue]; |
999 | DP(NETIF_MSG_RX_STATUS, |
1000 | "calling tpa_stop on queue %d\n" , |
1001 | queue); |
1002 | |
1003 | frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) - |
1004 | tpa_info->len_on_bd; |
1005 | |
1006 | if (fp->mode == TPA_MODE_GRO) |
1007 | pages = (frag_size + tpa_info->full_page - 1) / |
1008 | tpa_info->full_page; |
1009 | else |
1010 | pages = SGE_PAGE_ALIGN(frag_size) >> |
1011 | SGE_PAGE_SHIFT; |
1012 | |
1013 | bnx2x_tpa_stop(bp, fp, tpa_info, pages, |
1014 | cqe: &cqe->end_agg_cqe, cqe_idx: comp_ring_cons); |
1015 | #ifdef BNX2X_STOP_ON_ERROR |
1016 | if (bp->panic) |
1017 | return 0; |
1018 | #endif |
1019 | |
1020 | bnx2x_update_sge_prod(fp, sge_len: pages, cqe: &cqe->end_agg_cqe); |
1021 | goto next_cqe; |
1022 | } |
1023 | /* non TPA */ |
1024 | len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len); |
1025 | pad = cqe_fp->placement_offset; |
1026 | dma_sync_single_for_cpu(dev: &bp->pdev->dev, |
1027 | dma_unmap_addr(rx_buf, mapping), |
1028 | size: pad + RX_COPY_THRESH, |
1029 | dir: DMA_FROM_DEVICE); |
1030 | pad += NET_SKB_PAD; |
1031 | prefetch(data + pad); /* speedup eth_type_trans() */ |
1032 | /* is this an error packet? */ |
1033 | if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { |
1034 | DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, |
1035 | "ERROR flags %x rx packet %u\n" , |
1036 | cqe_fp_flags, sw_comp_cons); |
1037 | bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; |
1038 | goto reuse_rx; |
1039 | } |
1040 | |
1041 | /* Since we don't have a jumbo ring |
1042 | * copy small packets if mtu > 1500 |
1043 | */ |
1044 | if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && |
1045 | (len <= RX_COPY_THRESH)) { |
1046 | skb = napi_alloc_skb(napi: &fp->napi, length: len); |
1047 | if (skb == NULL) { |
1048 | DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, |
1049 | "ERROR packet dropped because of alloc failure\n" ); |
1050 | bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; |
1051 | goto reuse_rx; |
1052 | } |
1053 | memcpy(skb->data, data + pad, len); |
1054 | bnx2x_reuse_rx_data(fp, cons: bd_cons, prod: bd_prod); |
1055 | } else { |
1056 | if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, |
1057 | GFP_ATOMIC) == 0)) { |
1058 | dma_unmap_single(&bp->pdev->dev, |
1059 | dma_unmap_addr(rx_buf, mapping), |
1060 | fp->rx_buf_size, |
1061 | DMA_FROM_DEVICE); |
1062 | skb = bnx2x_build_skb(fp, data); |
1063 | if (unlikely(!skb)) { |
1064 | bnx2x_frag_free(fp, data); |
1065 | bnx2x_fp_qstats(bp, fp)-> |
1066 | rx_skb_alloc_failed++; |
1067 | goto next_rx; |
1068 | } |
1069 | skb_reserve(skb, len: pad); |
1070 | } else { |
1071 | DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, |
1072 | "ERROR packet dropped because of alloc failure\n" ); |
1073 | bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; |
1074 | reuse_rx: |
1075 | bnx2x_reuse_rx_data(fp, cons: bd_cons, prod: bd_prod); |
1076 | goto next_rx; |
1077 | } |
1078 | } |
1079 | |
1080 | skb_put(skb, len); |
1081 | skb->protocol = eth_type_trans(skb, dev: bp->dev); |
1082 | |
1083 | /* Set Toeplitz hash for a none-LRO skb */ |
1084 | rxhash = bnx2x_get_rxhash(bp, cqe: cqe_fp, rxhash_type: &rxhash_type); |
1085 | skb_set_hash(skb, hash: rxhash, type: rxhash_type); |
1086 | |
1087 | skb_checksum_none_assert(skb); |
1088 | |
1089 | if (bp->dev->features & NETIF_F_RXCSUM) |
1090 | bnx2x_csum_validate(skb, cqe, fp, |
1091 | bnx2x_fp_qstats(bp, fp)); |
1092 | |
1093 | skb_record_rx_queue(skb, rx_queue: fp->rx_queue); |
1094 | |
1095 | /* Check if this packet was timestamped */ |
1096 | if (unlikely(cqe->fast_path_cqe.type_error_flags & |
1097 | (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT))) |
1098 | bnx2x_set_rx_ts(bp, skb); |
1099 | |
1100 | if (le16_to_cpu(cqe_fp->pars_flags.flags) & |
1101 | PARSING_FLAGS_VLAN) |
1102 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
1103 | le16_to_cpu(cqe_fp->vlan_tag)); |
1104 | |
1105 | napi_gro_receive(napi: &fp->napi, skb); |
1106 | next_rx: |
1107 | rx_buf->data = NULL; |
1108 | |
1109 | bd_cons = NEXT_RX_IDX(bd_cons); |
1110 | bd_prod = NEXT_RX_IDX(bd_prod); |
1111 | bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); |
1112 | rx_pkt++; |
1113 | next_cqe: |
1114 | sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); |
1115 | sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); |
1116 | |
1117 | /* mark CQE as free */ |
1118 | BNX2X_SEED_CQE(cqe_fp); |
1119 | |
1120 | if (rx_pkt == budget) |
1121 | break; |
1122 | |
1123 | comp_ring_cons = RCQ_BD(sw_comp_cons); |
1124 | cqe = &fp->rx_comp_ring[comp_ring_cons]; |
1125 | cqe_fp = &cqe->fast_path_cqe; |
1126 | } /* while */ |
1127 | |
1128 | fp->rx_bd_cons = bd_cons; |
1129 | fp->rx_bd_prod = bd_prod_fw; |
1130 | fp->rx_comp_cons = sw_comp_cons; |
1131 | fp->rx_comp_prod = sw_comp_prod; |
1132 | |
1133 | /* Update producers */ |
1134 | bnx2x_update_rx_prod(bp, fp, bd_prod: bd_prod_fw, rx_comp_prod: sw_comp_prod, |
1135 | rx_sge_prod: fp->rx_sge_prod); |
1136 | |
1137 | return rx_pkt; |
1138 | } |
1139 | |
1140 | static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) |
1141 | { |
1142 | struct bnx2x_fastpath *fp = fp_cookie; |
1143 | struct bnx2x *bp = fp->bp; |
1144 | u8 cos; |
1145 | |
1146 | DP(NETIF_MSG_INTR, |
1147 | "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n" , |
1148 | fp->index, fp->fw_sb_id, fp->igu_sb_id); |
1149 | |
1150 | bnx2x_ack_sb(bp, igu_sb_id: fp->igu_sb_id, storm: USTORM_ID, index: 0, op: IGU_INT_DISABLE, update: 0); |
1151 | |
1152 | #ifdef BNX2X_STOP_ON_ERROR |
1153 | if (unlikely(bp->panic)) |
1154 | return IRQ_HANDLED; |
1155 | #endif |
1156 | |
1157 | /* Handle Rx and Tx according to MSI-X vector */ |
1158 | for_each_cos_in_tx_queue(fp, cos) |
1159 | prefetch(fp->txdata_ptr[cos]->tx_cons_sb); |
1160 | |
1161 | prefetch(&fp->sb_running_index[SM_RX_ID]); |
1162 | napi_schedule_irqoff(n: &bnx2x_fp(bp, fp->index, napi)); |
1163 | |
1164 | return IRQ_HANDLED; |
1165 | } |
1166 | |
1167 | /* HW Lock for shared dual port PHYs */ |
1168 | void bnx2x_acquire_phy_lock(struct bnx2x *bp) |
1169 | { |
1170 | mutex_lock(&bp->port.phy_mutex); |
1171 | |
1172 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); |
1173 | } |
1174 | |
1175 | void bnx2x_release_phy_lock(struct bnx2x *bp) |
1176 | { |
1177 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); |
1178 | |
1179 | mutex_unlock(lock: &bp->port.phy_mutex); |
1180 | } |
1181 | |
1182 | /* calculates MF speed according to current linespeed and MF configuration */ |
1183 | u16 bnx2x_get_mf_speed(struct bnx2x *bp) |
1184 | { |
1185 | u16 line_speed = bp->link_vars.line_speed; |
1186 | if (IS_MF(bp)) { |
1187 | u16 maxCfg = bnx2x_extract_max_cfg(bp, |
1188 | mf_cfg: bp->mf_config[BP_VN(bp)]); |
1189 | |
1190 | /* Calculate the current MAX line speed limit for the MF |
1191 | * devices |
1192 | */ |
1193 | if (IS_MF_PERCENT_BW(bp)) |
1194 | line_speed = (line_speed * maxCfg) / 100; |
1195 | else { /* SD mode */ |
1196 | u16 vn_max_rate = maxCfg * 100; |
1197 | |
1198 | if (vn_max_rate < line_speed) |
1199 | line_speed = vn_max_rate; |
1200 | } |
1201 | } |
1202 | |
1203 | return line_speed; |
1204 | } |
1205 | |
1206 | /** |
1207 | * bnx2x_fill_report_data - fill link report data to report |
1208 | * |
1209 | * @bp: driver handle |
1210 | * @data: link state to update |
1211 | * |
1212 | * It uses a none-atomic bit operations because is called under the mutex. |
1213 | */ |
1214 | static void bnx2x_fill_report_data(struct bnx2x *bp, |
1215 | struct bnx2x_link_report_data *data) |
1216 | { |
1217 | memset(data, 0, sizeof(*data)); |
1218 | |
1219 | if (IS_PF(bp)) { |
1220 | /* Fill the report data: effective line speed */ |
1221 | data->line_speed = bnx2x_get_mf_speed(bp); |
1222 | |
1223 | /* Link is down */ |
1224 | if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) |
1225 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
1226 | &data->link_report_flags); |
1227 | |
1228 | if (!BNX2X_NUM_ETH_QUEUES(bp)) |
1229 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
1230 | &data->link_report_flags); |
1231 | |
1232 | /* Full DUPLEX */ |
1233 | if (bp->link_vars.duplex == DUPLEX_FULL) |
1234 | __set_bit(BNX2X_LINK_REPORT_FD, |
1235 | &data->link_report_flags); |
1236 | |
1237 | /* Rx Flow Control is ON */ |
1238 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) |
1239 | __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, |
1240 | &data->link_report_flags); |
1241 | |
1242 | /* Tx Flow Control is ON */ |
1243 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) |
1244 | __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, |
1245 | &data->link_report_flags); |
1246 | } else { /* VF */ |
1247 | *data = bp->vf_link_vars; |
1248 | } |
1249 | } |
1250 | |
1251 | /** |
1252 | * bnx2x_link_report - report link status to OS. |
1253 | * |
1254 | * @bp: driver handle |
1255 | * |
1256 | * Calls the __bnx2x_link_report() under the same locking scheme |
1257 | * as a link/PHY state managing code to ensure a consistent link |
1258 | * reporting. |
1259 | */ |
1260 | |
1261 | void bnx2x_link_report(struct bnx2x *bp) |
1262 | { |
1263 | bnx2x_acquire_phy_lock(bp); |
1264 | __bnx2x_link_report(bp); |
1265 | bnx2x_release_phy_lock(bp); |
1266 | } |
1267 | |
1268 | /** |
1269 | * __bnx2x_link_report - report link status to OS. |
1270 | * |
1271 | * @bp: driver handle |
1272 | * |
1273 | * None atomic implementation. |
1274 | * Should be called under the phy_lock. |
1275 | */ |
1276 | void __bnx2x_link_report(struct bnx2x *bp) |
1277 | { |
1278 | struct bnx2x_link_report_data cur_data; |
1279 | |
1280 | if (bp->force_link_down) { |
1281 | bp->link_vars.link_up = 0; |
1282 | return; |
1283 | } |
1284 | |
1285 | /* reread mf_cfg */ |
1286 | if (IS_PF(bp) && !CHIP_IS_E1(bp)) |
1287 | bnx2x_read_mf_cfg(bp); |
1288 | |
1289 | /* Read the current link report info */ |
1290 | bnx2x_fill_report_data(bp, data: &cur_data); |
1291 | |
1292 | /* Don't report link down or exactly the same link status twice */ |
1293 | if (!memcmp(p: &cur_data, q: &bp->last_reported_link, size: sizeof(cur_data)) || |
1294 | (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
1295 | &bp->last_reported_link.link_report_flags) && |
1296 | test_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
1297 | &cur_data.link_report_flags))) |
1298 | return; |
1299 | |
1300 | bp->link_cnt++; |
1301 | |
1302 | /* We are going to report a new link parameters now - |
1303 | * remember the current data for the next time. |
1304 | */ |
1305 | memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); |
1306 | |
1307 | /* propagate status to VFs */ |
1308 | if (IS_PF(bp)) |
1309 | bnx2x_iov_link_update(bp); |
1310 | |
1311 | if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
1312 | &cur_data.link_report_flags)) { |
1313 | netif_carrier_off(dev: bp->dev); |
1314 | netdev_err(dev: bp->dev, format: "NIC Link is Down\n" ); |
1315 | return; |
1316 | } else { |
1317 | const char *duplex; |
1318 | const char *flow; |
1319 | |
1320 | netif_carrier_on(dev: bp->dev); |
1321 | |
1322 | if (test_and_clear_bit(nr: BNX2X_LINK_REPORT_FD, |
1323 | addr: &cur_data.link_report_flags)) |
1324 | duplex = "full" ; |
1325 | else |
1326 | duplex = "half" ; |
1327 | |
1328 | /* Handle the FC at the end so that only these flags would be |
1329 | * possibly set. This way we may easily check if there is no FC |
1330 | * enabled. |
1331 | */ |
1332 | if (cur_data.link_report_flags) { |
1333 | if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, |
1334 | &cur_data.link_report_flags)) { |
1335 | if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, |
1336 | &cur_data.link_report_flags)) |
1337 | flow = "ON - receive & transmit" ; |
1338 | else |
1339 | flow = "ON - receive" ; |
1340 | } else { |
1341 | flow = "ON - transmit" ; |
1342 | } |
1343 | } else { |
1344 | flow = "none" ; |
1345 | } |
1346 | netdev_info(dev: bp->dev, format: "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n" , |
1347 | cur_data.line_speed, duplex, flow); |
1348 | } |
1349 | } |
1350 | |
1351 | static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) |
1352 | { |
1353 | int i; |
1354 | |
1355 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { |
1356 | struct eth_rx_sge *sge; |
1357 | |
1358 | sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; |
1359 | sge->addr_hi = |
1360 | cpu_to_le32(U64_HI(fp->rx_sge_mapping + |
1361 | BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); |
1362 | |
1363 | sge->addr_lo = |
1364 | cpu_to_le32(U64_LO(fp->rx_sge_mapping + |
1365 | BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); |
1366 | } |
1367 | } |
1368 | |
1369 | static void bnx2x_free_tpa_pool(struct bnx2x *bp, |
1370 | struct bnx2x_fastpath *fp, int last) |
1371 | { |
1372 | int i; |
1373 | |
1374 | for (i = 0; i < last; i++) { |
1375 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; |
1376 | struct sw_rx_bd *first_buf = &tpa_info->first_buf; |
1377 | u8 *data = first_buf->data; |
1378 | |
1379 | if (data == NULL) { |
1380 | DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n" , i); |
1381 | continue; |
1382 | } |
1383 | if (tpa_info->tpa_state == BNX2X_TPA_START) |
1384 | dma_unmap_single(&bp->pdev->dev, |
1385 | dma_unmap_addr(first_buf, mapping), |
1386 | fp->rx_buf_size, DMA_FROM_DEVICE); |
1387 | bnx2x_frag_free(fp, data); |
1388 | first_buf->data = NULL; |
1389 | } |
1390 | } |
1391 | |
1392 | void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) |
1393 | { |
1394 | int j; |
1395 | |
1396 | for_each_rx_queue_cnic(bp, j) { |
1397 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
1398 | |
1399 | fp->rx_bd_cons = 0; |
1400 | |
1401 | /* Activate BD ring */ |
1402 | /* Warning! |
1403 | * this will generate an interrupt (to the TSTORM) |
1404 | * must only be done after chip is initialized |
1405 | */ |
1406 | bnx2x_update_rx_prod(bp, fp, bd_prod: fp->rx_bd_prod, rx_comp_prod: fp->rx_comp_prod, |
1407 | rx_sge_prod: fp->rx_sge_prod); |
1408 | } |
1409 | } |
1410 | |
1411 | void bnx2x_init_rx_rings(struct bnx2x *bp) |
1412 | { |
1413 | int func = BP_FUNC(bp); |
1414 | u16 ring_prod; |
1415 | int i, j; |
1416 | |
1417 | /* Allocate TPA resources */ |
1418 | for_each_eth_queue(bp, j) { |
1419 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
1420 | |
1421 | DP(NETIF_MSG_IFUP, |
1422 | "mtu %d rx_buf_size %d\n" , bp->dev->mtu, fp->rx_buf_size); |
1423 | |
1424 | if (fp->mode != TPA_MODE_DISABLED) { |
1425 | /* Fill the per-aggregation pool */ |
1426 | for (i = 0; i < MAX_AGG_QS(bp); i++) { |
1427 | struct bnx2x_agg_info *tpa_info = |
1428 | &fp->tpa_info[i]; |
1429 | struct sw_rx_bd *first_buf = |
1430 | &tpa_info->first_buf; |
1431 | |
1432 | first_buf->data = |
1433 | bnx2x_frag_alloc(fp, GFP_KERNEL); |
1434 | if (!first_buf->data) { |
1435 | BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n" , |
1436 | j); |
1437 | bnx2x_free_tpa_pool(bp, fp, last: i); |
1438 | fp->mode = TPA_MODE_DISABLED; |
1439 | break; |
1440 | } |
1441 | dma_unmap_addr_set(first_buf, mapping, 0); |
1442 | tpa_info->tpa_state = BNX2X_TPA_STOP; |
1443 | } |
1444 | |
1445 | /* "next page" elements initialization */ |
1446 | bnx2x_set_next_page_sgl(fp); |
1447 | |
1448 | /* set SGEs bit mask */ |
1449 | bnx2x_init_sge_ring_bit_mask(fp); |
1450 | |
1451 | /* Allocate SGEs and initialize the ring elements */ |
1452 | for (i = 0, ring_prod = 0; |
1453 | i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { |
1454 | |
1455 | if (bnx2x_alloc_rx_sge(bp, fp, index: ring_prod, |
1456 | GFP_KERNEL) < 0) { |
1457 | BNX2X_ERR("was only able to allocate %d rx sges\n" , |
1458 | i); |
1459 | BNX2X_ERR("disabling TPA for queue[%d]\n" , |
1460 | j); |
1461 | /* Cleanup already allocated elements */ |
1462 | bnx2x_free_rx_sge_range(bp, fp, |
1463 | last: ring_prod); |
1464 | bnx2x_free_tpa_pool(bp, fp, |
1465 | MAX_AGG_QS(bp)); |
1466 | fp->mode = TPA_MODE_DISABLED; |
1467 | ring_prod = 0; |
1468 | break; |
1469 | } |
1470 | ring_prod = NEXT_SGE_IDX(ring_prod); |
1471 | } |
1472 | |
1473 | fp->rx_sge_prod = ring_prod; |
1474 | } |
1475 | } |
1476 | |
1477 | for_each_eth_queue(bp, j) { |
1478 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
1479 | |
1480 | fp->rx_bd_cons = 0; |
1481 | |
1482 | /* Activate BD ring */ |
1483 | /* Warning! |
1484 | * this will generate an interrupt (to the TSTORM) |
1485 | * must only be done after chip is initialized |
1486 | */ |
1487 | bnx2x_update_rx_prod(bp, fp, bd_prod: fp->rx_bd_prod, rx_comp_prod: fp->rx_comp_prod, |
1488 | rx_sge_prod: fp->rx_sge_prod); |
1489 | |
1490 | if (j != 0) |
1491 | continue; |
1492 | |
1493 | if (CHIP_IS_E1(bp)) { |
1494 | REG_WR(bp, BAR_USTRORM_INTMEM + |
1495 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), |
1496 | U64_LO(fp->rx_comp_mapping)); |
1497 | REG_WR(bp, BAR_USTRORM_INTMEM + |
1498 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, |
1499 | U64_HI(fp->rx_comp_mapping)); |
1500 | } |
1501 | } |
1502 | } |
1503 | |
1504 | static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp) |
1505 | { |
1506 | u8 cos; |
1507 | struct bnx2x *bp = fp->bp; |
1508 | |
1509 | for_each_cos_in_tx_queue(fp, cos) { |
1510 | struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; |
1511 | unsigned pkts_compl = 0, bytes_compl = 0; |
1512 | |
1513 | u16 sw_prod = txdata->tx_pkt_prod; |
1514 | u16 sw_cons = txdata->tx_pkt_cons; |
1515 | |
1516 | while (sw_cons != sw_prod) { |
1517 | bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), |
1518 | pkts_compl: &pkts_compl, bytes_compl: &bytes_compl); |
1519 | sw_cons++; |
1520 | } |
1521 | |
1522 | netdev_tx_reset_queue( |
1523 | q: netdev_get_tx_queue(dev: bp->dev, |
1524 | index: txdata->txq_index)); |
1525 | } |
1526 | } |
1527 | |
1528 | static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) |
1529 | { |
1530 | int i; |
1531 | |
1532 | for_each_tx_queue_cnic(bp, i) { |
1533 | bnx2x_free_tx_skbs_queue(fp: &bp->fp[i]); |
1534 | } |
1535 | } |
1536 | |
1537 | static void bnx2x_free_tx_skbs(struct bnx2x *bp) |
1538 | { |
1539 | int i; |
1540 | |
1541 | for_each_eth_queue(bp, i) { |
1542 | bnx2x_free_tx_skbs_queue(fp: &bp->fp[i]); |
1543 | } |
1544 | } |
1545 | |
1546 | static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) |
1547 | { |
1548 | struct bnx2x *bp = fp->bp; |
1549 | int i; |
1550 | |
1551 | /* ring wasn't allocated */ |
1552 | if (fp->rx_buf_ring == NULL) |
1553 | return; |
1554 | |
1555 | for (i = 0; i < NUM_RX_BD; i++) { |
1556 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; |
1557 | u8 *data = rx_buf->data; |
1558 | |
1559 | if (data == NULL) |
1560 | continue; |
1561 | dma_unmap_single(&bp->pdev->dev, |
1562 | dma_unmap_addr(rx_buf, mapping), |
1563 | fp->rx_buf_size, DMA_FROM_DEVICE); |
1564 | |
1565 | rx_buf->data = NULL; |
1566 | bnx2x_frag_free(fp, data); |
1567 | } |
1568 | } |
1569 | |
1570 | static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) |
1571 | { |
1572 | int j; |
1573 | |
1574 | for_each_rx_queue_cnic(bp, j) { |
1575 | bnx2x_free_rx_bds(fp: &bp->fp[j]); |
1576 | } |
1577 | } |
1578 | |
1579 | static void bnx2x_free_rx_skbs(struct bnx2x *bp) |
1580 | { |
1581 | int j; |
1582 | |
1583 | for_each_eth_queue(bp, j) { |
1584 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
1585 | |
1586 | bnx2x_free_rx_bds(fp); |
1587 | |
1588 | if (fp->mode != TPA_MODE_DISABLED) |
1589 | bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); |
1590 | } |
1591 | } |
1592 | |
1593 | static void bnx2x_free_skbs_cnic(struct bnx2x *bp) |
1594 | { |
1595 | bnx2x_free_tx_skbs_cnic(bp); |
1596 | bnx2x_free_rx_skbs_cnic(bp); |
1597 | } |
1598 | |
1599 | void bnx2x_free_skbs(struct bnx2x *bp) |
1600 | { |
1601 | bnx2x_free_tx_skbs(bp); |
1602 | bnx2x_free_rx_skbs(bp); |
1603 | } |
1604 | |
1605 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) |
1606 | { |
1607 | /* load old values */ |
1608 | u32 mf_cfg = bp->mf_config[BP_VN(bp)]; |
1609 | |
1610 | if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { |
1611 | /* leave all but MAX value */ |
1612 | mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; |
1613 | |
1614 | /* set new MAX value */ |
1615 | mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) |
1616 | & FUNC_MF_CFG_MAX_BW_MASK; |
1617 | |
1618 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param: mf_cfg); |
1619 | } |
1620 | } |
1621 | |
1622 | /** |
1623 | * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors |
1624 | * |
1625 | * @bp: driver handle |
1626 | * @nvecs: number of vectors to be released |
1627 | */ |
1628 | static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) |
1629 | { |
1630 | int i, offset = 0; |
1631 | |
1632 | if (nvecs == offset) |
1633 | return; |
1634 | |
1635 | /* VFs don't have a default SB */ |
1636 | if (IS_PF(bp)) { |
1637 | free_irq(bp->msix_table[offset].vector, bp->dev); |
1638 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n" , |
1639 | bp->msix_table[offset].vector); |
1640 | offset++; |
1641 | } |
1642 | |
1643 | if (CNIC_SUPPORT(bp)) { |
1644 | if (nvecs == offset) |
1645 | return; |
1646 | offset++; |
1647 | } |
1648 | |
1649 | for_each_eth_queue(bp, i) { |
1650 | if (nvecs == offset) |
1651 | return; |
1652 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n" , |
1653 | i, bp->msix_table[offset].vector); |
1654 | |
1655 | free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); |
1656 | } |
1657 | } |
1658 | |
1659 | void bnx2x_free_irq(struct bnx2x *bp) |
1660 | { |
1661 | if (bp->flags & USING_MSIX_FLAG && |
1662 | !(bp->flags & USING_SINGLE_MSIX_FLAG)) { |
1663 | int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); |
1664 | |
1665 | /* vfs don't have a default status block */ |
1666 | if (IS_PF(bp)) |
1667 | nvecs++; |
1668 | |
1669 | bnx2x_free_msix_irqs(bp, nvecs); |
1670 | } else { |
1671 | free_irq(bp->dev->irq, bp->dev); |
1672 | } |
1673 | } |
1674 | |
1675 | int bnx2x_enable_msix(struct bnx2x *bp) |
1676 | { |
1677 | int msix_vec = 0, i, rc; |
1678 | |
1679 | /* VFs don't have a default status block */ |
1680 | if (IS_PF(bp)) { |
1681 | bp->msix_table[msix_vec].entry = msix_vec; |
1682 | BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n" , |
1683 | bp->msix_table[0].entry); |
1684 | msix_vec++; |
1685 | } |
1686 | |
1687 | /* Cnic requires an msix vector for itself */ |
1688 | if (CNIC_SUPPORT(bp)) { |
1689 | bp->msix_table[msix_vec].entry = msix_vec; |
1690 | BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n" , |
1691 | msix_vec, bp->msix_table[msix_vec].entry); |
1692 | msix_vec++; |
1693 | } |
1694 | |
1695 | /* We need separate vectors for ETH queues only (not FCoE) */ |
1696 | for_each_eth_queue(bp, i) { |
1697 | bp->msix_table[msix_vec].entry = msix_vec; |
1698 | BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n" , |
1699 | msix_vec, msix_vec, i); |
1700 | msix_vec++; |
1701 | } |
1702 | |
1703 | DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n" , |
1704 | msix_vec); |
1705 | |
1706 | rc = pci_enable_msix_range(dev: bp->pdev, entries: &bp->msix_table[0], |
1707 | BNX2X_MIN_MSIX_VEC_CNT(bp), maxvec: msix_vec); |
1708 | /* |
1709 | * reconfigure number of tx/rx queues according to available |
1710 | * MSI-X vectors |
1711 | */ |
1712 | if (rc == -ENOSPC) { |
1713 | /* Get by with single vector */ |
1714 | rc = pci_enable_msix_range(dev: bp->pdev, entries: &bp->msix_table[0], minvec: 1, maxvec: 1); |
1715 | if (rc < 0) { |
1716 | BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n" , |
1717 | rc); |
1718 | goto no_msix; |
1719 | } |
1720 | |
1721 | BNX2X_DEV_INFO("Using single MSI-X vector\n" ); |
1722 | bp->flags |= USING_SINGLE_MSIX_FLAG; |
1723 | |
1724 | BNX2X_DEV_INFO("set number of queues to 1\n" ); |
1725 | bp->num_ethernet_queues = 1; |
1726 | bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; |
1727 | } else if (rc < 0) { |
1728 | BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n" , rc); |
1729 | goto no_msix; |
1730 | } else if (rc < msix_vec) { |
1731 | /* how less vectors we will have? */ |
1732 | int diff = msix_vec - rc; |
1733 | |
1734 | BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n" , rc); |
1735 | |
1736 | /* |
1737 | * decrease number of queues by number of unallocated entries |
1738 | */ |
1739 | bp->num_ethernet_queues -= diff; |
1740 | bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; |
1741 | |
1742 | BNX2X_DEV_INFO("New queue configuration set: %d\n" , |
1743 | bp->num_queues); |
1744 | } |
1745 | |
1746 | bp->flags |= USING_MSIX_FLAG; |
1747 | |
1748 | return 0; |
1749 | |
1750 | no_msix: |
1751 | /* fall to INTx if not enough memory */ |
1752 | if (rc == -ENOMEM) |
1753 | bp->flags |= DISABLE_MSI_FLAG; |
1754 | |
1755 | return rc; |
1756 | } |
1757 | |
1758 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) |
1759 | { |
1760 | int i, rc, offset = 0; |
1761 | |
1762 | /* no default status block for vf */ |
1763 | if (IS_PF(bp)) { |
1764 | rc = request_irq(irq: bp->msix_table[offset++].vector, |
1765 | handler: bnx2x_msix_sp_int, flags: 0, |
1766 | name: bp->dev->name, dev: bp->dev); |
1767 | if (rc) { |
1768 | BNX2X_ERR("request sp irq failed\n" ); |
1769 | return -EBUSY; |
1770 | } |
1771 | } |
1772 | |
1773 | if (CNIC_SUPPORT(bp)) |
1774 | offset++; |
1775 | |
1776 | for_each_eth_queue(bp, i) { |
1777 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1778 | snprintf(buf: fp->name, size: sizeof(fp->name), fmt: "%s-fp-%d" , |
1779 | bp->dev->name, i); |
1780 | |
1781 | rc = request_irq(irq: bp->msix_table[offset].vector, |
1782 | handler: bnx2x_msix_fp_int, flags: 0, name: fp->name, dev: fp); |
1783 | if (rc) { |
1784 | BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n" , i, |
1785 | bp->msix_table[offset].vector, rc); |
1786 | bnx2x_free_msix_irqs(bp, nvecs: offset); |
1787 | return -EBUSY; |
1788 | } |
1789 | |
1790 | offset++; |
1791 | } |
1792 | |
1793 | i = BNX2X_NUM_ETH_QUEUES(bp); |
1794 | if (IS_PF(bp)) { |
1795 | offset = 1 + CNIC_SUPPORT(bp); |
1796 | netdev_info(dev: bp->dev, |
1797 | format: "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n" , |
1798 | bp->msix_table[0].vector, |
1799 | 0, bp->msix_table[offset].vector, |
1800 | i - 1, bp->msix_table[offset + i - 1].vector); |
1801 | } else { |
1802 | offset = CNIC_SUPPORT(bp); |
1803 | netdev_info(dev: bp->dev, |
1804 | format: "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n" , |
1805 | 0, bp->msix_table[offset].vector, |
1806 | i - 1, bp->msix_table[offset + i - 1].vector); |
1807 | } |
1808 | return 0; |
1809 | } |
1810 | |
1811 | int bnx2x_enable_msi(struct bnx2x *bp) |
1812 | { |
1813 | int rc; |
1814 | |
1815 | rc = pci_enable_msi(dev: bp->pdev); |
1816 | if (rc) { |
1817 | BNX2X_DEV_INFO("MSI is not attainable\n" ); |
1818 | return -1; |
1819 | } |
1820 | bp->flags |= USING_MSI_FLAG; |
1821 | |
1822 | return 0; |
1823 | } |
1824 | |
1825 | static int bnx2x_req_irq(struct bnx2x *bp) |
1826 | { |
1827 | unsigned long flags; |
1828 | unsigned int irq; |
1829 | |
1830 | if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) |
1831 | flags = 0; |
1832 | else |
1833 | flags = IRQF_SHARED; |
1834 | |
1835 | if (bp->flags & USING_MSIX_FLAG) |
1836 | irq = bp->msix_table[0].vector; |
1837 | else |
1838 | irq = bp->pdev->irq; |
1839 | |
1840 | return request_irq(irq, handler: bnx2x_interrupt, flags, name: bp->dev->name, dev: bp->dev); |
1841 | } |
1842 | |
1843 | static int bnx2x_setup_irqs(struct bnx2x *bp) |
1844 | { |
1845 | int rc = 0; |
1846 | if (bp->flags & USING_MSIX_FLAG && |
1847 | !(bp->flags & USING_SINGLE_MSIX_FLAG)) { |
1848 | rc = bnx2x_req_msix_irqs(bp); |
1849 | if (rc) |
1850 | return rc; |
1851 | } else { |
1852 | rc = bnx2x_req_irq(bp); |
1853 | if (rc) { |
1854 | BNX2X_ERR("IRQ request failed rc %d, aborting\n" , rc); |
1855 | return rc; |
1856 | } |
1857 | if (bp->flags & USING_MSI_FLAG) { |
1858 | bp->dev->irq = bp->pdev->irq; |
1859 | netdev_info(dev: bp->dev, format: "using MSI IRQ %d\n" , |
1860 | bp->dev->irq); |
1861 | } |
1862 | if (bp->flags & USING_MSIX_FLAG) { |
1863 | bp->dev->irq = bp->msix_table[0].vector; |
1864 | netdev_info(dev: bp->dev, format: "using MSIX IRQ %d\n" , |
1865 | bp->dev->irq); |
1866 | } |
1867 | } |
1868 | |
1869 | return 0; |
1870 | } |
1871 | |
1872 | static void bnx2x_napi_enable_cnic(struct bnx2x *bp) |
1873 | { |
1874 | int i; |
1875 | |
1876 | for_each_rx_queue_cnic(bp, i) { |
1877 | napi_enable(n: &bnx2x_fp(bp, i, napi)); |
1878 | } |
1879 | } |
1880 | |
1881 | static void bnx2x_napi_enable(struct bnx2x *bp) |
1882 | { |
1883 | int i; |
1884 | |
1885 | for_each_eth_queue(bp, i) { |
1886 | napi_enable(n: &bnx2x_fp(bp, i, napi)); |
1887 | } |
1888 | } |
1889 | |
1890 | static void bnx2x_napi_disable_cnic(struct bnx2x *bp) |
1891 | { |
1892 | int i; |
1893 | |
1894 | for_each_rx_queue_cnic(bp, i) { |
1895 | napi_disable(n: &bnx2x_fp(bp, i, napi)); |
1896 | } |
1897 | } |
1898 | |
1899 | static void bnx2x_napi_disable(struct bnx2x *bp) |
1900 | { |
1901 | int i; |
1902 | |
1903 | for_each_eth_queue(bp, i) { |
1904 | napi_disable(n: &bnx2x_fp(bp, i, napi)); |
1905 | } |
1906 | } |
1907 | |
1908 | void bnx2x_netif_start(struct bnx2x *bp) |
1909 | { |
1910 | if (netif_running(dev: bp->dev)) { |
1911 | bnx2x_napi_enable(bp); |
1912 | if (CNIC_LOADED(bp)) |
1913 | bnx2x_napi_enable_cnic(bp); |
1914 | bnx2x_int_enable(bp); |
1915 | if (bp->state == BNX2X_STATE_OPEN) |
1916 | netif_tx_wake_all_queues(dev: bp->dev); |
1917 | } |
1918 | } |
1919 | |
1920 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) |
1921 | { |
1922 | bnx2x_int_disable_sync(bp, disable_hw); |
1923 | bnx2x_napi_disable(bp); |
1924 | if (CNIC_LOADED(bp)) |
1925 | bnx2x_napi_disable_cnic(bp); |
1926 | } |
1927 | |
1928 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
1929 | struct net_device *sb_dev) |
1930 | { |
1931 | struct bnx2x *bp = netdev_priv(dev); |
1932 | |
1933 | if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { |
1934 | struct ethhdr *hdr = (struct ethhdr *)skb->data; |
1935 | u16 ether_type = ntohs(hdr->h_proto); |
1936 | |
1937 | /* Skip VLAN tag if present */ |
1938 | if (ether_type == ETH_P_8021Q) { |
1939 | struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); |
1940 | |
1941 | ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); |
1942 | } |
1943 | |
1944 | /* If ethertype is FCoE or FIP - use FCoE ring */ |
1945 | if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) |
1946 | return bnx2x_fcoe_tx(bp, txq_index); |
1947 | } |
1948 | |
1949 | /* select a non-FCoE queue */ |
1950 | return netdev_pick_tx(dev, skb, NULL) % |
1951 | (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); |
1952 | } |
1953 | |
1954 | void bnx2x_set_num_queues(struct bnx2x *bp) |
1955 | { |
1956 | /* RSS queues */ |
1957 | bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); |
1958 | |
1959 | /* override in STORAGE SD modes */ |
1960 | if (IS_MF_STORAGE_ONLY(bp)) |
1961 | bp->num_ethernet_queues = 1; |
1962 | |
1963 | /* Add special queues */ |
1964 | bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ |
1965 | bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; |
1966 | |
1967 | BNX2X_DEV_INFO("set number of queues to %d\n" , bp->num_queues); |
1968 | } |
1969 | |
1970 | /** |
1971 | * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues |
1972 | * |
1973 | * @bp: Driver handle |
1974 | * @include_cnic: handle cnic case |
1975 | * |
1976 | * We currently support for at most 16 Tx queues for each CoS thus we will |
1977 | * allocate a multiple of 16 for ETH L2 rings according to the value of the |
1978 | * bp->max_cos. |
1979 | * |
1980 | * If there is an FCoE L2 queue the appropriate Tx queue will have the next |
1981 | * index after all ETH L2 indices. |
1982 | * |
1983 | * If the actual number of Tx queues (for each CoS) is less than 16 then there |
1984 | * will be the holes at the end of each group of 16 ETh L2 indices (0..15, |
1985 | * 16..31,...) with indices that are not coupled with any real Tx queue. |
1986 | * |
1987 | * The proper configuration of skb->queue_mapping is handled by |
1988 | * bnx2x_select_queue() and __skb_tx_hash(). |
1989 | * |
1990 | * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() |
1991 | * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). |
1992 | */ |
1993 | static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) |
1994 | { |
1995 | int rc, tx, rx; |
1996 | |
1997 | tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; |
1998 | rx = BNX2X_NUM_ETH_QUEUES(bp); |
1999 | |
2000 | /* account for fcoe queue */ |
2001 | if (include_cnic && !NO_FCOE(bp)) { |
2002 | rx++; |
2003 | tx++; |
2004 | } |
2005 | |
2006 | rc = netif_set_real_num_tx_queues(dev: bp->dev, txq: tx); |
2007 | if (rc) { |
2008 | BNX2X_ERR("Failed to set real number of Tx queues: %d\n" , rc); |
2009 | return rc; |
2010 | } |
2011 | rc = netif_set_real_num_rx_queues(dev: bp->dev, rxq: rx); |
2012 | if (rc) { |
2013 | BNX2X_ERR("Failed to set real number of Rx queues: %d\n" , rc); |
2014 | return rc; |
2015 | } |
2016 | |
2017 | DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n" , |
2018 | tx, rx); |
2019 | |
2020 | return rc; |
2021 | } |
2022 | |
2023 | static void bnx2x_set_rx_buf_size(struct bnx2x *bp) |
2024 | { |
2025 | int i; |
2026 | |
2027 | for_each_queue(bp, i) { |
2028 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
2029 | u32 mtu; |
2030 | |
2031 | /* Always use a mini-jumbo MTU for the FCoE L2 ring */ |
2032 | if (IS_FCOE_IDX(i)) |
2033 | /* |
2034 | * Although there are no IP frames expected to arrive to |
2035 | * this ring we still want to add an |
2036 | * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer |
2037 | * overrun attack. |
2038 | */ |
2039 | mtu = BNX2X_FCOE_MINI_JUMBO_MTU; |
2040 | else |
2041 | mtu = bp->dev->mtu; |
2042 | fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + |
2043 | IP_HEADER_ALIGNMENT_PADDING + |
2044 | ETH_OVERHEAD + |
2045 | mtu + |
2046 | BNX2X_FW_RX_ALIGN_END; |
2047 | fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size); |
2048 | /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ |
2049 | if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) |
2050 | fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; |
2051 | else |
2052 | fp->rx_frag_size = 0; |
2053 | } |
2054 | } |
2055 | |
2056 | static int (struct bnx2x *bp) |
2057 | { |
2058 | int i; |
2059 | u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); |
2060 | |
2061 | /* Prepare the initial contents for the indirection table if RSS is |
2062 | * enabled |
2063 | */ |
2064 | for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) |
2065 | bp->rss_conf_obj.ind_table[i] = |
2066 | bp->fp->cl_id + |
2067 | ethtool_rxfh_indir_default(index: i, n_rx_rings: num_eth_queues); |
2068 | |
2069 | /* |
2070 | * For 57710 and 57711 SEARCHER configuration (rss_keys) is |
2071 | * per-port, so if explicit configuration is needed , do it only |
2072 | * for a PMF. |
2073 | * |
2074 | * For 57712 and newer on the other hand it's a per-function |
2075 | * configuration. |
2076 | */ |
2077 | return bnx2x_config_rss_eth(bp, config_hash: bp->port.pmf || !CHIP_IS_E1x(bp)); |
2078 | } |
2079 | |
2080 | int (struct bnx2x *bp, struct bnx2x_rss_config_obj *, |
2081 | bool config_hash, bool enable) |
2082 | { |
2083 | struct bnx2x_config_rss_params params = {NULL}; |
2084 | |
2085 | /* Although RSS is meaningless when there is a single HW queue we |
2086 | * still need it enabled in order to have HW Rx hash generated. |
2087 | * |
2088 | * if (!is_eth_multi(bp)) |
2089 | * bp->multi_mode = ETH_RSS_MODE_DISABLED; |
2090 | */ |
2091 | |
2092 | params.rss_obj = rss_obj; |
2093 | |
2094 | __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); |
2095 | |
2096 | if (enable) { |
2097 | __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); |
2098 | |
2099 | /* RSS configuration */ |
2100 | __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); |
2101 | __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); |
2102 | __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); |
2103 | __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); |
2104 | if (rss_obj->udp_rss_v4) |
2105 | __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); |
2106 | if (rss_obj->udp_rss_v6) |
2107 | __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); |
2108 | |
2109 | if (!CHIP_IS_E1x(bp)) { |
2110 | /* valid only for TUNN_MODE_VXLAN tunnel mode */ |
2111 | __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags); |
2112 | __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags); |
2113 | |
2114 | /* valid only for TUNN_MODE_GRE tunnel mode */ |
2115 | __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags); |
2116 | } |
2117 | } else { |
2118 | __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); |
2119 | } |
2120 | |
2121 | /* Hash bits */ |
2122 | params.rss_result_mask = MULTI_MASK; |
2123 | |
2124 | memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); |
2125 | |
2126 | if (config_hash) { |
2127 | /* RSS keys */ |
2128 | netdev_rss_key_fill(buffer: params.rss_key, T_ETH_RSS_KEY * 4); |
2129 | __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); |
2130 | } |
2131 | |
2132 | if (IS_PF(bp)) |
2133 | return bnx2x_config_rss(bp, p: ¶ms); |
2134 | else |
2135 | return bnx2x_vfpf_config_rss(bp, params: ¶ms); |
2136 | } |
2137 | |
2138 | static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) |
2139 | { |
2140 | struct bnx2x_func_state_params func_params = {NULL}; |
2141 | |
2142 | /* Prepare parameters for function state transitions */ |
2143 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); |
2144 | |
2145 | func_params.f_obj = &bp->func_obj; |
2146 | func_params.cmd = BNX2X_F_CMD_HW_INIT; |
2147 | |
2148 | func_params.params.hw_init.load_phase = load_code; |
2149 | |
2150 | return bnx2x_func_state_change(bp, params: &func_params); |
2151 | } |
2152 | |
2153 | /* |
2154 | * Cleans the object that have internal lists without sending |
2155 | * ramrods. Should be run when interrupts are disabled. |
2156 | */ |
2157 | void bnx2x_squeeze_objects(struct bnx2x *bp) |
2158 | { |
2159 | int rc; |
2160 | unsigned long ramrod_flags = 0, vlan_mac_flags = 0; |
2161 | struct bnx2x_mcast_ramrod_params rparam = {NULL}; |
2162 | struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; |
2163 | |
2164 | /***************** Cleanup MACs' object first *************************/ |
2165 | |
2166 | /* Wait for completion of requested */ |
2167 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
2168 | /* Perform a dry cleanup */ |
2169 | __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); |
2170 | |
2171 | /* Clean ETH primary MAC */ |
2172 | __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); |
2173 | rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, |
2174 | &ramrod_flags); |
2175 | if (rc != 0) |
2176 | BNX2X_ERR("Failed to clean ETH MACs: %d\n" , rc); |
2177 | |
2178 | /* Cleanup UC list */ |
2179 | vlan_mac_flags = 0; |
2180 | __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); |
2181 | rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, |
2182 | &ramrod_flags); |
2183 | if (rc != 0) |
2184 | BNX2X_ERR("Failed to clean UC list MACs: %d\n" , rc); |
2185 | |
2186 | /***************** Now clean mcast object *****************************/ |
2187 | rparam.mcast_obj = &bp->mcast_obj; |
2188 | __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); |
2189 | |
2190 | /* Add a DEL command... - Since we're doing a driver cleanup only, |
2191 | * we take a lock surrounding both the initial send and the CONTs, |
2192 | * as we don't want a true completion to disrupt us in the middle. |
2193 | */ |
2194 | netif_addr_lock_bh(dev: bp->dev); |
2195 | rc = bnx2x_config_mcast(bp, p: &rparam, cmd: BNX2X_MCAST_CMD_DEL); |
2196 | if (rc < 0) |
2197 | BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n" , |
2198 | rc); |
2199 | |
2200 | /* ...and wait until all pending commands are cleared */ |
2201 | rc = bnx2x_config_mcast(bp, p: &rparam, cmd: BNX2X_MCAST_CMD_CONT); |
2202 | while (rc != 0) { |
2203 | if (rc < 0) { |
2204 | BNX2X_ERR("Failed to clean multi-cast object: %d\n" , |
2205 | rc); |
2206 | netif_addr_unlock_bh(dev: bp->dev); |
2207 | return; |
2208 | } |
2209 | |
2210 | rc = bnx2x_config_mcast(bp, p: &rparam, cmd: BNX2X_MCAST_CMD_CONT); |
2211 | } |
2212 | netif_addr_unlock_bh(dev: bp->dev); |
2213 | } |
2214 | |
2215 | #ifndef BNX2X_STOP_ON_ERROR |
2216 | #define LOAD_ERROR_EXIT(bp, label) \ |
2217 | do { \ |
2218 | (bp)->state = BNX2X_STATE_ERROR; \ |
2219 | goto label; \ |
2220 | } while (0) |
2221 | |
2222 | #define LOAD_ERROR_EXIT_CNIC(bp, label) \ |
2223 | do { \ |
2224 | bp->cnic_loaded = false; \ |
2225 | goto label; \ |
2226 | } while (0) |
2227 | #else /*BNX2X_STOP_ON_ERROR*/ |
2228 | #define LOAD_ERROR_EXIT(bp, label) \ |
2229 | do { \ |
2230 | (bp)->state = BNX2X_STATE_ERROR; \ |
2231 | (bp)->panic = 1; \ |
2232 | return -EBUSY; \ |
2233 | } while (0) |
2234 | #define LOAD_ERROR_EXIT_CNIC(bp, label) \ |
2235 | do { \ |
2236 | bp->cnic_loaded = false; \ |
2237 | (bp)->panic = 1; \ |
2238 | return -EBUSY; \ |
2239 | } while (0) |
2240 | #endif /*BNX2X_STOP_ON_ERROR*/ |
2241 | |
2242 | static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) |
2243 | { |
2244 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, |
2245 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); |
2246 | return; |
2247 | } |
2248 | |
2249 | static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) |
2250 | { |
2251 | int num_groups, vf_headroom = 0; |
2252 | int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; |
2253 | |
2254 | /* number of queues for statistics is number of eth queues + FCoE */ |
2255 | u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; |
2256 | |
2257 | /* Total number of FW statistics requests = |
2258 | * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper |
2259 | * and fcoe l2 queue) stats + num of queues (which includes another 1 |
2260 | * for fcoe l2 queue if applicable) |
2261 | */ |
2262 | bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; |
2263 | |
2264 | /* vf stats appear in the request list, but their data is allocated by |
2265 | * the VFs themselves. We don't include them in the bp->fw_stats_num as |
2266 | * it is used to determine where to place the vf stats queries in the |
2267 | * request struct |
2268 | */ |
2269 | if (IS_SRIOV(bp)) |
2270 | vf_headroom = bnx2x_vf_headroom(bp); |
2271 | |
2272 | /* Request is built from stats_query_header and an array of |
2273 | * stats_query_cmd_group each of which contains |
2274 | * STATS_QUERY_CMD_COUNT rules. The real number or requests is |
2275 | * configured in the stats_query_header. |
2276 | */ |
2277 | num_groups = |
2278 | (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + |
2279 | (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? |
2280 | 1 : 0)); |
2281 | |
2282 | DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n" , |
2283 | bp->fw_stats_num, vf_headroom, num_groups); |
2284 | bp->fw_stats_req_sz = sizeof(struct stats_query_header) + |
2285 | num_groups * sizeof(struct stats_query_cmd_group); |
2286 | |
2287 | /* Data for statistics requests + stats_counter |
2288 | * stats_counter holds per-STORM counters that are incremented |
2289 | * when STORM has finished with the current request. |
2290 | * memory for FCoE offloaded statistics are counted anyway, |
2291 | * even if they will not be sent. |
2292 | * VF stats are not accounted for here as the data of VF stats is stored |
2293 | * in memory allocated by the VF, not here. |
2294 | */ |
2295 | bp->fw_stats_data_sz = sizeof(struct per_port_stats) + |
2296 | sizeof(struct per_pf_stats) + |
2297 | sizeof(struct fcoe_statistics_params) + |
2298 | sizeof(struct per_queue_stats) * num_queue_stats + |
2299 | sizeof(struct stats_counter); |
2300 | |
2301 | bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, |
2302 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); |
2303 | if (!bp->fw_stats) |
2304 | goto alloc_mem_err; |
2305 | |
2306 | /* Set shortcuts */ |
2307 | bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; |
2308 | bp->fw_stats_req_mapping = bp->fw_stats_mapping; |
2309 | bp->fw_stats_data = (struct bnx2x_fw_stats_data *) |
2310 | ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); |
2311 | bp->fw_stats_data_mapping = bp->fw_stats_mapping + |
2312 | bp->fw_stats_req_sz; |
2313 | |
2314 | DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n" , |
2315 | U64_HI(bp->fw_stats_req_mapping), |
2316 | U64_LO(bp->fw_stats_req_mapping)); |
2317 | DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n" , |
2318 | U64_HI(bp->fw_stats_data_mapping), |
2319 | U64_LO(bp->fw_stats_data_mapping)); |
2320 | return 0; |
2321 | |
2322 | alloc_mem_err: |
2323 | bnx2x_free_fw_stats_mem(bp); |
2324 | BNX2X_ERR("Can't allocate FW stats memory\n" ); |
2325 | return -ENOMEM; |
2326 | } |
2327 | |
2328 | /* send load request to mcp and analyze response */ |
2329 | static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) |
2330 | { |
2331 | u32 param; |
2332 | |
2333 | /* init fw_seq */ |
2334 | bp->fw_seq = |
2335 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & |
2336 | DRV_MSG_SEQ_NUMBER_MASK); |
2337 | BNX2X_DEV_INFO("fw_seq 0x%08x\n" , bp->fw_seq); |
2338 | |
2339 | /* Get current FW pulse sequence */ |
2340 | bp->fw_drv_pulse_wr_seq = |
2341 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & |
2342 | DRV_PULSE_SEQ_MASK); |
2343 | BNX2X_DEV_INFO("drv_pulse 0x%x\n" , bp->fw_drv_pulse_wr_seq); |
2344 | |
2345 | param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA; |
2346 | |
2347 | if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) |
2348 | param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA; |
2349 | |
2350 | /* load request */ |
2351 | (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); |
2352 | |
2353 | /* if mcp fails to respond we must abort */ |
2354 | if (!(*load_code)) { |
2355 | BNX2X_ERR("MCP response failure, aborting\n" ); |
2356 | return -EBUSY; |
2357 | } |
2358 | |
2359 | /* If mcp refused (e.g. other port is in diagnostic mode) we |
2360 | * must abort |
2361 | */ |
2362 | if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { |
2363 | BNX2X_ERR("MCP refused load request, aborting\n" ); |
2364 | return -EBUSY; |
2365 | } |
2366 | return 0; |
2367 | } |
2368 | |
2369 | /* check whether another PF has already loaded FW to chip. In |
2370 | * virtualized environments a pf from another VM may have already |
2371 | * initialized the device including loading FW |
2372 | */ |
2373 | int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) |
2374 | { |
2375 | /* is another pf loaded on this engine? */ |
2376 | if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && |
2377 | load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { |
2378 | u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng; |
2379 | u32 loaded_fw; |
2380 | |
2381 | /* read loaded FW from chip */ |
2382 | loaded_fw = REG_RD(bp, XSEM_REG_PRAM); |
2383 | |
2384 | loaded_fw_major = loaded_fw & 0xff; |
2385 | loaded_fw_minor = (loaded_fw >> 8) & 0xff; |
2386 | loaded_fw_rev = (loaded_fw >> 16) & 0xff; |
2387 | loaded_fw_eng = (loaded_fw >> 24) & 0xff; |
2388 | |
2389 | DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n" , |
2390 | loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng); |
2391 | |
2392 | /* abort nic load if version mismatch */ |
2393 | if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION || |
2394 | loaded_fw_minor != BCM_5710_FW_MINOR_VERSION || |
2395 | loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION || |
2396 | loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) { |
2397 | if (print_err) |
2398 | BNX2X_ERR("loaded FW incompatible. Aborting\n" ); |
2399 | else |
2400 | BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n" ); |
2401 | |
2402 | return -EBUSY; |
2403 | } |
2404 | } |
2405 | return 0; |
2406 | } |
2407 | |
2408 | /* returns the "mcp load_code" according to global load_count array */ |
2409 | static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) |
2410 | { |
2411 | int path = BP_PATH(bp); |
2412 | |
2413 | DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n" , |
2414 | path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], |
2415 | bnx2x_load_count[path][2]); |
2416 | bnx2x_load_count[path][0]++; |
2417 | bnx2x_load_count[path][1 + port]++; |
2418 | DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n" , |
2419 | path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], |
2420 | bnx2x_load_count[path][2]); |
2421 | if (bnx2x_load_count[path][0] == 1) |
2422 | return FW_MSG_CODE_DRV_LOAD_COMMON; |
2423 | else if (bnx2x_load_count[path][1 + port] == 1) |
2424 | return FW_MSG_CODE_DRV_LOAD_PORT; |
2425 | else |
2426 | return FW_MSG_CODE_DRV_LOAD_FUNCTION; |
2427 | } |
2428 | |
2429 | /* mark PMF if applicable */ |
2430 | static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) |
2431 | { |
2432 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || |
2433 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || |
2434 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { |
2435 | bp->port.pmf = 1; |
2436 | /* We need the barrier to ensure the ordering between the |
2437 | * writing to bp->port.pmf here and reading it from the |
2438 | * bnx2x_periodic_task(). |
2439 | */ |
2440 | smp_mb(); |
2441 | } else { |
2442 | bp->port.pmf = 0; |
2443 | } |
2444 | |
2445 | DP(NETIF_MSG_LINK, "pmf %d\n" , bp->port.pmf); |
2446 | } |
2447 | |
2448 | static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) |
2449 | { |
2450 | if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || |
2451 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && |
2452 | (bp->common.shmem2_base)) { |
2453 | if (SHMEM2_HAS(bp, dcc_support)) |
2454 | SHMEM2_WR(bp, dcc_support, |
2455 | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | |
2456 | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); |
2457 | if (SHMEM2_HAS(bp, afex_driver_support)) |
2458 | SHMEM2_WR(bp, afex_driver_support, |
2459 | SHMEM_AFEX_SUPPORTED_VERSION_ONE); |
2460 | } |
2461 | |
2462 | /* Set AFEX default VLAN tag to an invalid value */ |
2463 | bp->afex_def_vlan_tag = -1; |
2464 | } |
2465 | |
2466 | /** |
2467 | * bnx2x_bz_fp - zero content of the fastpath structure. |
2468 | * |
2469 | * @bp: driver handle |
2470 | * @index: fastpath index to be zeroed |
2471 | * |
2472 | * Makes sure the contents of the bp->fp[index].napi is kept |
2473 | * intact. |
2474 | */ |
2475 | static void bnx2x_bz_fp(struct bnx2x *bp, int index) |
2476 | { |
2477 | struct bnx2x_fastpath *fp = &bp->fp[index]; |
2478 | int cos; |
2479 | struct napi_struct orig_napi = fp->napi; |
2480 | struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; |
2481 | |
2482 | /* bzero bnx2x_fastpath contents */ |
2483 | if (fp->tpa_info) |
2484 | memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * |
2485 | sizeof(struct bnx2x_agg_info)); |
2486 | memset(fp, 0, sizeof(*fp)); |
2487 | |
2488 | /* Restore the NAPI object as it has been already initialized */ |
2489 | fp->napi = orig_napi; |
2490 | fp->tpa_info = orig_tpa_info; |
2491 | fp->bp = bp; |
2492 | fp->index = index; |
2493 | if (IS_ETH_FP(fp)) |
2494 | fp->max_cos = bp->max_cos; |
2495 | else |
2496 | /* Special queues support only one CoS */ |
2497 | fp->max_cos = 1; |
2498 | |
2499 | /* Init txdata pointers */ |
2500 | if (IS_FCOE_FP(fp)) |
2501 | fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; |
2502 | if (IS_ETH_FP(fp)) |
2503 | for_each_cos_in_tx_queue(fp, cos) |
2504 | fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * |
2505 | BNX2X_NUM_ETH_QUEUES(bp) + index]; |
2506 | |
2507 | /* set the tpa flag for each queue. The tpa flag determines the queue |
2508 | * minimal size so it must be set prior to queue memory allocation |
2509 | */ |
2510 | if (bp->dev->features & NETIF_F_LRO) |
2511 | fp->mode = TPA_MODE_LRO; |
2512 | else if (bp->dev->features & NETIF_F_GRO_HW) |
2513 | fp->mode = TPA_MODE_GRO; |
2514 | else |
2515 | fp->mode = TPA_MODE_DISABLED; |
2516 | |
2517 | /* We don't want TPA if it's disabled in bp |
2518 | * or if this is an FCoE L2 ring. |
2519 | */ |
2520 | if (bp->disable_tpa || IS_FCOE_FP(fp)) |
2521 | fp->mode = TPA_MODE_DISABLED; |
2522 | } |
2523 | |
2524 | void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state) |
2525 | { |
2526 | u32 cur; |
2527 | |
2528 | if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp)) |
2529 | return; |
2530 | |
2531 | cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]); |
2532 | DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n" , |
2533 | cur, state); |
2534 | |
2535 | SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state); |
2536 | } |
2537 | |
2538 | int bnx2x_load_cnic(struct bnx2x *bp) |
2539 | { |
2540 | int i, rc, port = BP_PORT(bp); |
2541 | |
2542 | DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n" ); |
2543 | |
2544 | mutex_init(&bp->cnic_mutex); |
2545 | |
2546 | if (IS_PF(bp)) { |
2547 | rc = bnx2x_alloc_mem_cnic(bp); |
2548 | if (rc) { |
2549 | BNX2X_ERR("Unable to allocate bp memory for cnic\n" ); |
2550 | LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); |
2551 | } |
2552 | } |
2553 | |
2554 | rc = bnx2x_alloc_fp_mem_cnic(bp); |
2555 | if (rc) { |
2556 | BNX2X_ERR("Unable to allocate memory for cnic fps\n" ); |
2557 | LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); |
2558 | } |
2559 | |
2560 | /* Update the number of queues with the cnic queues */ |
2561 | rc = bnx2x_set_real_num_queues(bp, include_cnic: 1); |
2562 | if (rc) { |
2563 | BNX2X_ERR("Unable to set real_num_queues including cnic\n" ); |
2564 | LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); |
2565 | } |
2566 | |
2567 | /* Add all CNIC NAPI objects */ |
2568 | bnx2x_add_all_napi_cnic(bp); |
2569 | DP(NETIF_MSG_IFUP, "cnic napi added\n" ); |
2570 | bnx2x_napi_enable_cnic(bp); |
2571 | |
2572 | rc = bnx2x_init_hw_func_cnic(bp); |
2573 | if (rc) |
2574 | LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); |
2575 | |
2576 | bnx2x_nic_init_cnic(bp); |
2577 | |
2578 | if (IS_PF(bp)) { |
2579 | /* Enable Timer scan */ |
2580 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); |
2581 | |
2582 | /* setup cnic queues */ |
2583 | for_each_cnic_queue(bp, i) { |
2584 | rc = bnx2x_setup_queue(bp, fp: &bp->fp[i], leading: 0); |
2585 | if (rc) { |
2586 | BNX2X_ERR("Queue setup failed\n" ); |
2587 | LOAD_ERROR_EXIT(bp, load_error_cnic2); |
2588 | } |
2589 | } |
2590 | } |
2591 | |
2592 | /* Initialize Rx filter. */ |
2593 | bnx2x_set_rx_mode_inner(bp); |
2594 | |
2595 | /* re-read iscsi info */ |
2596 | bnx2x_get_iscsi_info(bp); |
2597 | bnx2x_setup_cnic_irq_info(bp); |
2598 | bnx2x_setup_cnic_info(bp); |
2599 | bp->cnic_loaded = true; |
2600 | if (bp->state == BNX2X_STATE_OPEN) |
2601 | bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); |
2602 | |
2603 | DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n" ); |
2604 | |
2605 | return 0; |
2606 | |
2607 | #ifndef BNX2X_STOP_ON_ERROR |
2608 | load_error_cnic2: |
2609 | /* Disable Timer scan */ |
2610 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); |
2611 | |
2612 | load_error_cnic1: |
2613 | bnx2x_napi_disable_cnic(bp); |
2614 | /* Update the number of queues without the cnic queues */ |
2615 | if (bnx2x_set_real_num_queues(bp, include_cnic: 0)) |
2616 | BNX2X_ERR("Unable to set real_num_queues not including cnic\n" ); |
2617 | load_error_cnic0: |
2618 | BNX2X_ERR("CNIC-related load failed\n" ); |
2619 | bnx2x_free_fp_mem_cnic(bp); |
2620 | bnx2x_free_mem_cnic(bp); |
2621 | return rc; |
2622 | #endif /* ! BNX2X_STOP_ON_ERROR */ |
2623 | } |
2624 | |
2625 | /* must be called with rtnl_lock */ |
2626 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) |
2627 | { |
2628 | int port = BP_PORT(bp); |
2629 | int i, rc = 0, load_code = 0; |
2630 | |
2631 | DP(NETIF_MSG_IFUP, "Starting NIC load\n" ); |
2632 | DP(NETIF_MSG_IFUP, |
2633 | "CNIC is %s\n" , CNIC_ENABLED(bp) ? "enabled" : "disabled" ); |
2634 | |
2635 | #ifdef BNX2X_STOP_ON_ERROR |
2636 | if (unlikely(bp->panic)) { |
2637 | BNX2X_ERR("Can't load NIC when there is panic\n" ); |
2638 | return -EPERM; |
2639 | } |
2640 | #endif |
2641 | |
2642 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
2643 | |
2644 | /* zero the structure w/o any lock, before SP handler is initialized */ |
2645 | memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); |
2646 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, |
2647 | &bp->last_reported_link.link_report_flags); |
2648 | |
2649 | if (IS_PF(bp)) |
2650 | /* must be called before memory allocation and HW init */ |
2651 | bnx2x_ilt_set_info(bp); |
2652 | |
2653 | /* |
2654 | * Zero fastpath structures preserving invariants like napi, which are |
2655 | * allocated only once, fp index, max_cos, bp pointer. |
2656 | * Also set fp->mode and txdata_ptr. |
2657 | */ |
2658 | DP(NETIF_MSG_IFUP, "num queues: %d" , bp->num_queues); |
2659 | for_each_queue(bp, i) |
2660 | bnx2x_bz_fp(bp, index: i); |
2661 | memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + |
2662 | bp->num_cnic_queues) * |
2663 | sizeof(struct bnx2x_fp_txdata)); |
2664 | |
2665 | bp->fcoe_init = false; |
2666 | |
2667 | /* Set the receive queues buffer size */ |
2668 | bnx2x_set_rx_buf_size(bp); |
2669 | |
2670 | if (IS_PF(bp)) { |
2671 | rc = bnx2x_alloc_mem(bp); |
2672 | if (rc) { |
2673 | BNX2X_ERR("Unable to allocate bp memory\n" ); |
2674 | return rc; |
2675 | } |
2676 | } |
2677 | |
2678 | /* need to be done after alloc mem, since it's self adjusting to amount |
2679 | * of memory available for RSS queues |
2680 | */ |
2681 | rc = bnx2x_alloc_fp_mem(bp); |
2682 | if (rc) { |
2683 | BNX2X_ERR("Unable to allocate memory for fps\n" ); |
2684 | LOAD_ERROR_EXIT(bp, load_error0); |
2685 | } |
2686 | |
2687 | /* Allocated memory for FW statistics */ |
2688 | rc = bnx2x_alloc_fw_stats_mem(bp); |
2689 | if (rc) |
2690 | LOAD_ERROR_EXIT(bp, load_error0); |
2691 | |
2692 | /* request pf to initialize status blocks */ |
2693 | if (IS_VF(bp)) { |
2694 | rc = bnx2x_vfpf_init(bp); |
2695 | if (rc) |
2696 | LOAD_ERROR_EXIT(bp, load_error0); |
2697 | } |
2698 | |
2699 | /* As long as bnx2x_alloc_mem() may possibly update |
2700 | * bp->num_queues, bnx2x_set_real_num_queues() should always |
2701 | * come after it. At this stage cnic queues are not counted. |
2702 | */ |
2703 | rc = bnx2x_set_real_num_queues(bp, include_cnic: 0); |
2704 | if (rc) { |
2705 | BNX2X_ERR("Unable to set real_num_queues\n" ); |
2706 | LOAD_ERROR_EXIT(bp, load_error0); |
2707 | } |
2708 | |
2709 | /* configure multi cos mappings in kernel. |
2710 | * this configuration may be overridden by a multi class queue |
2711 | * discipline or by a dcbx negotiation result. |
2712 | */ |
2713 | bnx2x_setup_tc(dev: bp->dev, num_tc: bp->max_cos); |
2714 | |
2715 | /* Add all NAPI objects */ |
2716 | bnx2x_add_all_napi(bp); |
2717 | DP(NETIF_MSG_IFUP, "napi added\n" ); |
2718 | bnx2x_napi_enable(bp); |
2719 | bp->nic_stopped = false; |
2720 | |
2721 | if (IS_PF(bp)) { |
2722 | /* set pf load just before approaching the MCP */ |
2723 | bnx2x_set_pf_load(bp); |
2724 | |
2725 | /* if mcp exists send load request and analyze response */ |
2726 | if (!BP_NOMCP(bp)) { |
2727 | /* attempt to load pf */ |
2728 | rc = bnx2x_nic_load_request(bp, load_code: &load_code); |
2729 | if (rc) |
2730 | LOAD_ERROR_EXIT(bp, load_error1); |
2731 | |
2732 | /* what did mcp say? */ |
2733 | rc = bnx2x_compare_fw_ver(bp, load_code, print_err: true); |
2734 | if (rc) { |
2735 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, param: 0); |
2736 | LOAD_ERROR_EXIT(bp, load_error2); |
2737 | } |
2738 | } else { |
2739 | load_code = bnx2x_nic_load_no_mcp(bp, port); |
2740 | } |
2741 | |
2742 | /* mark pmf if applicable */ |
2743 | bnx2x_nic_load_pmf(bp, load_code); |
2744 | |
2745 | /* Init Function state controlling object */ |
2746 | bnx2x__init_func_obj(bp); |
2747 | |
2748 | /* Initialize HW */ |
2749 | rc = bnx2x_init_hw(bp, load_code); |
2750 | if (rc) { |
2751 | BNX2X_ERR("HW init failed, aborting\n" ); |
2752 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, param: 0); |
2753 | LOAD_ERROR_EXIT(bp, load_error2); |
2754 | } |
2755 | } |
2756 | |
2757 | bnx2x_pre_irq_nic_init(bp); |
2758 | |
2759 | /* Connect to IRQs */ |
2760 | rc = bnx2x_setup_irqs(bp); |
2761 | if (rc) { |
2762 | BNX2X_ERR("setup irqs failed\n" ); |
2763 | if (IS_PF(bp)) |
2764 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, param: 0); |
2765 | LOAD_ERROR_EXIT(bp, load_error2); |
2766 | } |
2767 | |
2768 | /* Init per-function objects */ |
2769 | if (IS_PF(bp)) { |
2770 | /* Setup NIC internals and enable interrupts */ |
2771 | bnx2x_post_irq_nic_init(bp, load_code); |
2772 | |
2773 | bnx2x_init_bp_objs(bp); |
2774 | bnx2x_iov_nic_init(bp); |
2775 | |
2776 | /* Set AFEX default VLAN tag to an invalid value */ |
2777 | bp->afex_def_vlan_tag = -1; |
2778 | bnx2x_nic_load_afex_dcc(bp, load_code); |
2779 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; |
2780 | rc = bnx2x_func_start(bp); |
2781 | if (rc) { |
2782 | BNX2X_ERR("Function start failed!\n" ); |
2783 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, param: 0); |
2784 | |
2785 | LOAD_ERROR_EXIT(bp, load_error3); |
2786 | } |
2787 | |
2788 | /* Send LOAD_DONE command to MCP */ |
2789 | if (!BP_NOMCP(bp)) { |
2790 | load_code = bnx2x_fw_command(bp, |
2791 | DRV_MSG_CODE_LOAD_DONE, param: 0); |
2792 | if (!load_code) { |
2793 | BNX2X_ERR("MCP response failure, aborting\n" ); |
2794 | rc = -EBUSY; |
2795 | LOAD_ERROR_EXIT(bp, load_error3); |
2796 | } |
2797 | } |
2798 | |
2799 | /* initialize FW coalescing state machines in RAM */ |
2800 | bnx2x_update_coalesce(bp); |
2801 | } |
2802 | |
2803 | /* setup the leading queue */ |
2804 | rc = bnx2x_setup_leading(bp); |
2805 | if (rc) { |
2806 | BNX2X_ERR("Setup leading failed!\n" ); |
2807 | LOAD_ERROR_EXIT(bp, load_error3); |
2808 | } |
2809 | |
2810 | /* set up the rest of the queues */ |
2811 | for_each_nondefault_eth_queue(bp, i) { |
2812 | if (IS_PF(bp)) |
2813 | rc = bnx2x_setup_queue(bp, fp: &bp->fp[i], leading: false); |
2814 | else /* VF */ |
2815 | rc = bnx2x_vfpf_setup_q(bp, fp: &bp->fp[i], is_leading: false); |
2816 | if (rc) { |
2817 | BNX2X_ERR("Queue %d setup failed\n" , i); |
2818 | LOAD_ERROR_EXIT(bp, load_error3); |
2819 | } |
2820 | } |
2821 | |
2822 | /* setup rss */ |
2823 | rc = bnx2x_init_rss(bp); |
2824 | if (rc) { |
2825 | BNX2X_ERR("PF RSS init failed\n" ); |
2826 | LOAD_ERROR_EXIT(bp, load_error3); |
2827 | } |
2828 | |
2829 | /* Now when Clients are configured we are ready to work */ |
2830 | bp->state = BNX2X_STATE_OPEN; |
2831 | |
2832 | /* Configure a ucast MAC */ |
2833 | if (IS_PF(bp)) |
2834 | rc = bnx2x_set_eth_mac(bp, set: true); |
2835 | else /* vf */ |
2836 | rc = bnx2x_vfpf_config_mac(bp, addr: bp->dev->dev_addr, vf_qid: bp->fp->index, |
2837 | set: true); |
2838 | if (rc) { |
2839 | BNX2X_ERR("Setting Ethernet MAC failed\n" ); |
2840 | LOAD_ERROR_EXIT(bp, load_error3); |
2841 | } |
2842 | |
2843 | if (IS_PF(bp) && bp->pending_max) { |
2844 | bnx2x_update_max_mf_config(bp, value: bp->pending_max); |
2845 | bp->pending_max = 0; |
2846 | } |
2847 | |
2848 | bp->force_link_down = false; |
2849 | if (bp->port.pmf) { |
2850 | rc = bnx2x_initial_phy_init(bp, load_mode); |
2851 | if (rc) |
2852 | LOAD_ERROR_EXIT(bp, load_error3); |
2853 | } |
2854 | bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; |
2855 | |
2856 | /* Start fast path */ |
2857 | |
2858 | /* Re-configure vlan filters */ |
2859 | rc = bnx2x_vlan_reconfigure_vid(bp); |
2860 | if (rc) |
2861 | LOAD_ERROR_EXIT(bp, load_error3); |
2862 | |
2863 | /* Initialize Rx filter. */ |
2864 | bnx2x_set_rx_mode_inner(bp); |
2865 | |
2866 | if (bp->flags & PTP_SUPPORTED) { |
2867 | bnx2x_register_phc(bp); |
2868 | bnx2x_init_ptp(bp); |
2869 | bnx2x_configure_ptp_filters(bp); |
2870 | } |
2871 | /* Start Tx */ |
2872 | switch (load_mode) { |
2873 | case LOAD_NORMAL: |
2874 | /* Tx queue should be only re-enabled */ |
2875 | netif_tx_wake_all_queues(dev: bp->dev); |
2876 | break; |
2877 | |
2878 | case LOAD_OPEN: |
2879 | netif_tx_start_all_queues(dev: bp->dev); |
2880 | smp_mb__after_atomic(); |
2881 | break; |
2882 | |
2883 | case LOAD_DIAG: |
2884 | case LOAD_LOOPBACK_EXT: |
2885 | bp->state = BNX2X_STATE_DIAG; |
2886 | break; |
2887 | |
2888 | default: |
2889 | break; |
2890 | } |
2891 | |
2892 | if (bp->port.pmf) |
2893 | bnx2x_update_drv_flags(bp, flags: 1 << DRV_FLAGS_PORT_MASK, set: 0); |
2894 | else |
2895 | bnx2x__link_status_update(bp); |
2896 | |
2897 | /* start the timer */ |
2898 | mod_timer(timer: &bp->timer, expires: jiffies + bp->current_interval); |
2899 | |
2900 | if (CNIC_ENABLED(bp)) |
2901 | bnx2x_load_cnic(bp); |
2902 | |
2903 | if (IS_PF(bp)) |
2904 | bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, verbose: 0); |
2905 | |
2906 | if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { |
2907 | /* mark driver is loaded in shmem2 */ |
2908 | u32 val; |
2909 | val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); |
2910 | val &= ~DRV_FLAGS_MTU_MASK; |
2911 | val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); |
2912 | SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], |
2913 | val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | |
2914 | DRV_FLAGS_CAPABILITIES_LOADED_L2); |
2915 | } |
2916 | |
2917 | /* Wait for all pending SP commands to complete */ |
2918 | if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, mask: ~0x0UL)) { |
2919 | BNX2X_ERR("Timeout waiting for SP elements to complete\n" ); |
2920 | bnx2x_nic_unload(bp, UNLOAD_CLOSE, keep_link: false); |
2921 | return -EBUSY; |
2922 | } |
2923 | |
2924 | /* Update driver data for On-Chip MFW dump. */ |
2925 | if (IS_PF(bp)) |
2926 | bnx2x_update_mfw_dump(bp); |
2927 | |
2928 | /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ |
2929 | if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) |
2930 | bnx2x_dcbx_init(bp, update_shmem: false); |
2931 | |
2932 | if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) |
2933 | bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE); |
2934 | |
2935 | DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n" ); |
2936 | |
2937 | return 0; |
2938 | |
2939 | #ifndef BNX2X_STOP_ON_ERROR |
2940 | load_error3: |
2941 | if (IS_PF(bp)) { |
2942 | bnx2x_int_disable_sync(bp, disable_hw: 1); |
2943 | |
2944 | /* Clean queueable objects */ |
2945 | bnx2x_squeeze_objects(bp); |
2946 | } |
2947 | |
2948 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
2949 | bnx2x_free_skbs(bp); |
2950 | for_each_rx_queue(bp, i) |
2951 | bnx2x_free_rx_sge_range(bp, fp: bp->fp + i, NUM_RX_SGE); |
2952 | |
2953 | /* Release IRQs */ |
2954 | bnx2x_free_irq(bp); |
2955 | load_error2: |
2956 | if (IS_PF(bp) && !BP_NOMCP(bp)) { |
2957 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, param: 0); |
2958 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, param: 0); |
2959 | } |
2960 | |
2961 | bp->port.pmf = 0; |
2962 | load_error1: |
2963 | bnx2x_napi_disable(bp); |
2964 | bnx2x_del_all_napi(bp); |
2965 | bp->nic_stopped = true; |
2966 | |
2967 | /* clear pf_load status, as it was already set */ |
2968 | if (IS_PF(bp)) |
2969 | bnx2x_clear_pf_load(bp); |
2970 | load_error0: |
2971 | bnx2x_free_fw_stats_mem(bp); |
2972 | bnx2x_free_fp_mem(bp); |
2973 | bnx2x_free_mem(bp); |
2974 | |
2975 | return rc; |
2976 | #endif /* ! BNX2X_STOP_ON_ERROR */ |
2977 | } |
2978 | |
2979 | int bnx2x_drain_tx_queues(struct bnx2x *bp) |
2980 | { |
2981 | u8 rc = 0, cos, i; |
2982 | |
2983 | /* Wait until tx fastpath tasks complete */ |
2984 | for_each_tx_queue(bp, i) { |
2985 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
2986 | |
2987 | for_each_cos_in_tx_queue(fp, cos) |
2988 | rc = bnx2x_clean_tx_queue(bp, txdata: fp->txdata_ptr[cos]); |
2989 | if (rc) |
2990 | return rc; |
2991 | } |
2992 | return 0; |
2993 | } |
2994 | |
2995 | /* must be called with rtnl_lock */ |
2996 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) |
2997 | { |
2998 | int i; |
2999 | bool global = false; |
3000 | |
3001 | DP(NETIF_MSG_IFUP, "Starting NIC unload\n" ); |
3002 | |
3003 | if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) |
3004 | bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); |
3005 | |
3006 | /* mark driver is unloaded in shmem2 */ |
3007 | if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { |
3008 | u32 val; |
3009 | val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); |
3010 | SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], |
3011 | val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); |
3012 | } |
3013 | |
3014 | if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && |
3015 | (bp->state == BNX2X_STATE_CLOSED || |
3016 | bp->state == BNX2X_STATE_ERROR)) { |
3017 | /* We can get here if the driver has been unloaded |
3018 | * during parity error recovery and is either waiting for a |
3019 | * leader to complete or for other functions to unload and |
3020 | * then ifdown has been issued. In this case we want to |
3021 | * unload and let other functions to complete a recovery |
3022 | * process. |
3023 | */ |
3024 | bp->recovery_state = BNX2X_RECOVERY_DONE; |
3025 | bp->is_leader = 0; |
3026 | bnx2x_release_leader_lock(bp); |
3027 | smp_mb(); |
3028 | |
3029 | DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n" ); |
3030 | BNX2X_ERR("Can't unload in closed or error state\n" ); |
3031 | return -EINVAL; |
3032 | } |
3033 | |
3034 | /* Nothing to do during unload if previous bnx2x_nic_load() |
3035 | * have not completed successfully - all resources are released. |
3036 | * |
3037 | * we can get here only after unsuccessful ndo_* callback, during which |
3038 | * dev->IFF_UP flag is still on. |
3039 | */ |
3040 | if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) |
3041 | return 0; |
3042 | |
3043 | /* It's important to set the bp->state to the value different from |
3044 | * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() |
3045 | * may restart the Tx from the NAPI context (see bnx2x_tx_int()). |
3046 | */ |
3047 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; |
3048 | smp_mb(); |
3049 | |
3050 | /* indicate to VFs that the PF is going down */ |
3051 | bnx2x_iov_channel_down(bp); |
3052 | |
3053 | if (CNIC_LOADED(bp)) |
3054 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); |
3055 | |
3056 | /* Stop Tx */ |
3057 | bnx2x_tx_disable(bp); |
3058 | netdev_reset_tc(dev: bp->dev); |
3059 | |
3060 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
3061 | |
3062 | del_timer_sync(timer: &bp->timer); |
3063 | |
3064 | if (IS_PF(bp) && !BP_NOMCP(bp)) { |
3065 | /* Set ALWAYS_ALIVE bit in shmem */ |
3066 | bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; |
3067 | bnx2x_drv_pulse(bp); |
3068 | bnx2x_stats_handle(bp, event: STATS_EVENT_STOP); |
3069 | bnx2x_save_statistics(bp); |
3070 | } |
3071 | |
3072 | /* wait till consumers catch up with producers in all queues. |
3073 | * If we're recovering, FW can't write to host so no reason |
3074 | * to wait for the queues to complete all Tx. |
3075 | */ |
3076 | if (unload_mode != UNLOAD_RECOVERY) |
3077 | bnx2x_drain_tx_queues(bp); |
3078 | |
3079 | /* if VF indicate to PF this function is going down (PF will delete sp |
3080 | * elements and clear initializations |
3081 | */ |
3082 | if (IS_VF(bp)) { |
3083 | bnx2x_clear_vlan_info(bp); |
3084 | bnx2x_vfpf_close_vf(bp); |
3085 | } else if (unload_mode != UNLOAD_RECOVERY) { |
3086 | /* if this is a normal/close unload need to clean up chip*/ |
3087 | bnx2x_chip_cleanup(bp, unload_mode, keep_link); |
3088 | } else { |
3089 | /* Send the UNLOAD_REQUEST to the MCP */ |
3090 | bnx2x_send_unload_req(bp, unload_mode); |
3091 | |
3092 | /* Prevent transactions to host from the functions on the |
3093 | * engine that doesn't reset global blocks in case of global |
3094 | * attention once global blocks are reset and gates are opened |
3095 | * (the engine which leader will perform the recovery |
3096 | * last). |
3097 | */ |
3098 | if (!CHIP_IS_E1x(bp)) |
3099 | bnx2x_pf_disable(bp); |
3100 | |
3101 | if (!bp->nic_stopped) { |
3102 | /* Disable HW interrupts, NAPI */ |
3103 | bnx2x_netif_stop(bp, disable_hw: 1); |
3104 | /* Delete all NAPI objects */ |
3105 | bnx2x_del_all_napi(bp); |
3106 | if (CNIC_LOADED(bp)) |
3107 | bnx2x_del_all_napi_cnic(bp); |
3108 | /* Release IRQs */ |
3109 | bnx2x_free_irq(bp); |
3110 | bp->nic_stopped = true; |
3111 | } |
3112 | |
3113 | /* Report UNLOAD_DONE to MCP */ |
3114 | bnx2x_send_unload_done(bp, keep_link: false); |
3115 | } |
3116 | |
3117 | /* |
3118 | * At this stage no more interrupts will arrive so we may safely clean |
3119 | * the queueable objects here in case they failed to get cleaned so far. |
3120 | */ |
3121 | if (IS_PF(bp)) |
3122 | bnx2x_squeeze_objects(bp); |
3123 | |
3124 | /* There should be no more pending SP commands at this stage */ |
3125 | bp->sp_state = 0; |
3126 | |
3127 | bp->port.pmf = 0; |
3128 | |
3129 | /* clear pending work in rtnl task */ |
3130 | bp->sp_rtnl_state = 0; |
3131 | smp_mb(); |
3132 | |
3133 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
3134 | bnx2x_free_skbs(bp); |
3135 | if (CNIC_LOADED(bp)) |
3136 | bnx2x_free_skbs_cnic(bp); |
3137 | for_each_rx_queue(bp, i) |
3138 | bnx2x_free_rx_sge_range(bp, fp: bp->fp + i, NUM_RX_SGE); |
3139 | |
3140 | bnx2x_free_fp_mem(bp); |
3141 | if (CNIC_LOADED(bp)) |
3142 | bnx2x_free_fp_mem_cnic(bp); |
3143 | |
3144 | if (IS_PF(bp)) { |
3145 | if (CNIC_LOADED(bp)) |
3146 | bnx2x_free_mem_cnic(bp); |
3147 | } |
3148 | bnx2x_free_mem(bp); |
3149 | |
3150 | bp->state = BNX2X_STATE_CLOSED; |
3151 | bp->cnic_loaded = false; |
3152 | |
3153 | /* Clear driver version indication in shmem */ |
3154 | if (IS_PF(bp) && !BP_NOMCP(bp)) |
3155 | bnx2x_update_mng_version(bp); |
3156 | |
3157 | /* Check if there are pending parity attentions. If there are - set |
3158 | * RECOVERY_IN_PROGRESS. |
3159 | */ |
3160 | if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, global: &global, print: false)) { |
3161 | bnx2x_set_reset_in_progress(bp); |
3162 | |
3163 | /* Set RESET_IS_GLOBAL if needed */ |
3164 | if (global) |
3165 | bnx2x_set_reset_global(bp); |
3166 | } |
3167 | |
3168 | /* The last driver must disable a "close the gate" if there is no |
3169 | * parity attention or "process kill" pending. |
3170 | */ |
3171 | if (IS_PF(bp) && |
3172 | !bnx2x_clear_pf_load(bp) && |
3173 | bnx2x_reset_is_done(bp, BP_PATH(bp))) |
3174 | bnx2x_disable_close_the_gate(bp); |
3175 | |
3176 | DP(NETIF_MSG_IFUP, "Ending NIC unload\n" ); |
3177 | |
3178 | return 0; |
3179 | } |
3180 | |
3181 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) |
3182 | { |
3183 | u16 pmcsr; |
3184 | |
3185 | /* If there is no power capability, silently succeed */ |
3186 | if (!bp->pdev->pm_cap) { |
3187 | BNX2X_DEV_INFO("No power capability. Breaking.\n" ); |
3188 | return 0; |
3189 | } |
3190 | |
3191 | pci_read_config_word(dev: bp->pdev, where: bp->pdev->pm_cap + PCI_PM_CTRL, val: &pmcsr); |
3192 | |
3193 | switch (state) { |
3194 | case PCI_D0: |
3195 | pci_write_config_word(dev: bp->pdev, where: bp->pdev->pm_cap + PCI_PM_CTRL, |
3196 | val: ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | |
3197 | PCI_PM_CTRL_PME_STATUS)); |
3198 | |
3199 | if (pmcsr & PCI_PM_CTRL_STATE_MASK) |
3200 | /* delay required during transition out of D3hot */ |
3201 | msleep(msecs: 20); |
3202 | break; |
3203 | |
3204 | case PCI_D3hot: |
3205 | /* If there are other clients above don't |
3206 | shut down the power */ |
3207 | if (atomic_read(v: &bp->pdev->enable_cnt) != 1) |
3208 | return 0; |
3209 | /* Don't shut down the power for emulation and FPGA */ |
3210 | if (CHIP_REV_IS_SLOW(bp)) |
3211 | return 0; |
3212 | |
3213 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
3214 | pmcsr |= 3; |
3215 | |
3216 | if (bp->wol) |
3217 | pmcsr |= PCI_PM_CTRL_PME_ENABLE; |
3218 | |
3219 | pci_write_config_word(dev: bp->pdev, where: bp->pdev->pm_cap + PCI_PM_CTRL, |
3220 | val: pmcsr); |
3221 | |
3222 | /* No more memory access after this point until |
3223 | * device is brought back to D0. |
3224 | */ |
3225 | break; |
3226 | |
3227 | default: |
3228 | dev_err(&bp->pdev->dev, "Can't support state = %d\n" , state); |
3229 | return -EINVAL; |
3230 | } |
3231 | return 0; |
3232 | } |
3233 | |
3234 | /* |
3235 | * net_device service functions |
3236 | */ |
3237 | static int bnx2x_poll(struct napi_struct *napi, int budget) |
3238 | { |
3239 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, |
3240 | napi); |
3241 | struct bnx2x *bp = fp->bp; |
3242 | int rx_work_done; |
3243 | u8 cos; |
3244 | |
3245 | #ifdef BNX2X_STOP_ON_ERROR |
3246 | if (unlikely(bp->panic)) { |
3247 | napi_complete(napi); |
3248 | return 0; |
3249 | } |
3250 | #endif |
3251 | for_each_cos_in_tx_queue(fp, cos) |
3252 | if (bnx2x_tx_queue_has_work(txdata: fp->txdata_ptr[cos])) |
3253 | bnx2x_tx_int(bp, txdata: fp->txdata_ptr[cos]); |
3254 | |
3255 | rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0; |
3256 | |
3257 | if (rx_work_done < budget) { |
3258 | /* No need to update SB for FCoE L2 ring as long as |
3259 | * it's connected to the default SB and the SB |
3260 | * has been updated when NAPI was scheduled. |
3261 | */ |
3262 | if (IS_FCOE_FP(fp)) { |
3263 | napi_complete_done(n: napi, work_done: rx_work_done); |
3264 | } else { |
3265 | bnx2x_update_fpsb_idx(fp); |
3266 | /* bnx2x_has_rx_work() reads the status block, |
3267 | * thus we need to ensure that status block indices |
3268 | * have been actually read (bnx2x_update_fpsb_idx) |
3269 | * prior to this check (bnx2x_has_rx_work) so that |
3270 | * we won't write the "newer" value of the status block |
3271 | * to IGU (if there was a DMA right after |
3272 | * bnx2x_has_rx_work and if there is no rmb, the memory |
3273 | * reading (bnx2x_update_fpsb_idx) may be postponed |
3274 | * to right before bnx2x_ack_sb). In this case there |
3275 | * will never be another interrupt until there is |
3276 | * another update of the status block, while there |
3277 | * is still unhandled work. |
3278 | */ |
3279 | rmb(); |
3280 | |
3281 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
3282 | if (napi_complete_done(n: napi, work_done: rx_work_done)) { |
3283 | /* Re-enable interrupts */ |
3284 | DP(NETIF_MSG_RX_STATUS, |
3285 | "Update index to %d\n" , fp->fp_hc_idx); |
3286 | bnx2x_ack_sb(bp, igu_sb_id: fp->igu_sb_id, storm: USTORM_ID, |
3287 | le16_to_cpu(fp->fp_hc_idx), |
3288 | op: IGU_INT_ENABLE, update: 1); |
3289 | } |
3290 | } else { |
3291 | rx_work_done = budget; |
3292 | } |
3293 | } |
3294 | } |
3295 | |
3296 | return rx_work_done; |
3297 | } |
3298 | |
3299 | /* we split the first BD into headers and data BDs |
3300 | * to ease the pain of our fellow microcode engineers |
3301 | * we use one mapping for both BDs |
3302 | */ |
3303 | static u16 bnx2x_tx_split(struct bnx2x *bp, |
3304 | struct bnx2x_fp_txdata *txdata, |
3305 | struct sw_tx_bd *tx_buf, |
3306 | struct eth_tx_start_bd **tx_bd, u16 hlen, |
3307 | u16 bd_prod) |
3308 | { |
3309 | struct eth_tx_start_bd *h_tx_bd = *tx_bd; |
3310 | struct eth_tx_bd *d_tx_bd; |
3311 | dma_addr_t mapping; |
3312 | int old_len = le16_to_cpu(h_tx_bd->nbytes); |
3313 | |
3314 | /* first fix first BD */ |
3315 | h_tx_bd->nbytes = cpu_to_le16(hlen); |
3316 | |
3317 | DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n" , |
3318 | h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo); |
3319 | |
3320 | /* now get a new data BD |
3321 | * (after the pbd) and fill it */ |
3322 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
3323 | d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; |
3324 | |
3325 | mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), |
3326 | le32_to_cpu(h_tx_bd->addr_lo)) + hlen; |
3327 | |
3328 | d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
3329 | d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
3330 | d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); |
3331 | |
3332 | /* this marks the BD as one that has no individual mapping */ |
3333 | tx_buf->flags |= BNX2X_TSO_SPLIT_BD; |
3334 | |
3335 | DP(NETIF_MSG_TX_QUEUED, |
3336 | "TSO split data size is %d (%x:%x)\n" , |
3337 | d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); |
3338 | |
3339 | /* update tx_bd */ |
3340 | *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; |
3341 | |
3342 | return bd_prod; |
3343 | } |
3344 | |
3345 | #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) |
3346 | #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) |
3347 | static __le16 bnx2x_csum_fix(unsigned char *, u16 csum, s8 fix) |
3348 | { |
3349 | __sum16 tsum = (__force __sum16) csum; |
3350 | |
3351 | if (fix > 0) |
3352 | tsum = ~csum_fold(sum: csum_sub(csum: (__force __wsum) csum, |
3353 | addend: csum_partial(buff: t_header - fix, len: fix, sum: 0))); |
3354 | |
3355 | else if (fix < 0) |
3356 | tsum = ~csum_fold(sum: csum_add(csum: (__force __wsum) csum, |
3357 | addend: csum_partial(buff: t_header, len: -fix, sum: 0))); |
3358 | |
3359 | return bswab16(tsum); |
3360 | } |
3361 | |
3362 | static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) |
3363 | { |
3364 | u32 rc; |
3365 | __u8 prot = 0; |
3366 | __be16 protocol; |
3367 | |
3368 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
3369 | return XMIT_PLAIN; |
3370 | |
3371 | protocol = vlan_get_protocol(skb); |
3372 | if (protocol == htons(ETH_P_IPV6)) { |
3373 | rc = XMIT_CSUM_V6; |
3374 | prot = ipv6_hdr(skb)->nexthdr; |
3375 | } else { |
3376 | rc = XMIT_CSUM_V4; |
3377 | prot = ip_hdr(skb)->protocol; |
3378 | } |
3379 | |
3380 | if (!CHIP_IS_E1x(bp) && skb->encapsulation) { |
3381 | if (inner_ip_hdr(skb)->version == 6) { |
3382 | rc |= XMIT_CSUM_ENC_V6; |
3383 | if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
3384 | rc |= XMIT_CSUM_TCP; |
3385 | } else { |
3386 | rc |= XMIT_CSUM_ENC_V4; |
3387 | if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP) |
3388 | rc |= XMIT_CSUM_TCP; |
3389 | } |
3390 | } |
3391 | if (prot == IPPROTO_TCP) |
3392 | rc |= XMIT_CSUM_TCP; |
3393 | |
3394 | if (skb_is_gso(skb)) { |
3395 | if (skb_is_gso_v6(skb)) { |
3396 | rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); |
3397 | if (rc & XMIT_CSUM_ENC) |
3398 | rc |= XMIT_GSO_ENC_V6; |
3399 | } else { |
3400 | rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); |
3401 | if (rc & XMIT_CSUM_ENC) |
3402 | rc |= XMIT_GSO_ENC_V4; |
3403 | } |
3404 | } |
3405 | |
3406 | return rc; |
3407 | } |
3408 | |
3409 | /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */ |
3410 | #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4 |
3411 | |
3412 | /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ |
3413 | #define BNX2X_NUM_TSO_WIN_SUB_BDS 3 |
3414 | |
3415 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) |
3416 | /* check if packet requires linearization (packet is too fragmented) |
3417 | no need to check fragmentation if page size > 8K (there will be no |
3418 | violation to FW restrictions) */ |
3419 | static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, |
3420 | u32 xmit_type) |
3421 | { |
3422 | int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS; |
3423 | int to_copy = 0, hlen = 0; |
3424 | |
3425 | if (xmit_type & XMIT_GSO_ENC) |
3426 | num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS; |
3427 | |
3428 | if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) { |
3429 | if (xmit_type & XMIT_GSO) { |
3430 | unsigned short lso_mss = skb_shinfo(skb)->gso_size; |
3431 | int wnd_size = MAX_FETCH_BD - num_tso_win_sub; |
3432 | /* Number of windows to check */ |
3433 | int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; |
3434 | int wnd_idx = 0; |
3435 | int frag_idx = 0; |
3436 | u32 wnd_sum = 0; |
3437 | |
3438 | /* Headers length */ |
3439 | if (xmit_type & XMIT_GSO_ENC) |
3440 | hlen = skb_inner_tcp_all_headers(skb); |
3441 | else |
3442 | hlen = skb_tcp_all_headers(skb); |
3443 | |
3444 | /* Amount of data (w/o headers) on linear part of SKB*/ |
3445 | first_bd_sz = skb_headlen(skb) - hlen; |
3446 | |
3447 | wnd_sum = first_bd_sz; |
3448 | |
3449 | /* Calculate the first sum - it's special */ |
3450 | for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) |
3451 | wnd_sum += |
3452 | skb_frag_size(frag: &skb_shinfo(skb)->frags[frag_idx]); |
3453 | |
3454 | /* If there was data on linear skb data - check it */ |
3455 | if (first_bd_sz > 0) { |
3456 | if (unlikely(wnd_sum < lso_mss)) { |
3457 | to_copy = 1; |
3458 | goto exit_lbl; |
3459 | } |
3460 | |
3461 | wnd_sum -= first_bd_sz; |
3462 | } |
3463 | |
3464 | /* Others are easier: run through the frag list and |
3465 | check all windows */ |
3466 | for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { |
3467 | wnd_sum += |
3468 | skb_frag_size(frag: &skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]); |
3469 | |
3470 | if (unlikely(wnd_sum < lso_mss)) { |
3471 | to_copy = 1; |
3472 | break; |
3473 | } |
3474 | wnd_sum -= |
3475 | skb_frag_size(frag: &skb_shinfo(skb)->frags[wnd_idx]); |
3476 | } |
3477 | } else { |
3478 | /* in non-LSO too fragmented packet should always |
3479 | be linearized */ |
3480 | to_copy = 1; |
3481 | } |
3482 | } |
3483 | |
3484 | exit_lbl: |
3485 | if (unlikely(to_copy)) |
3486 | DP(NETIF_MSG_TX_QUEUED, |
3487 | "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n" , |
3488 | (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO" , |
3489 | skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); |
3490 | |
3491 | return to_copy; |
3492 | } |
3493 | #endif |
3494 | |
3495 | /** |
3496 | * bnx2x_set_pbd_gso - update PBD in GSO case. |
3497 | * |
3498 | * @skb: packet skb |
3499 | * @pbd: parse BD |
3500 | * @xmit_type: xmit flags |
3501 | */ |
3502 | static void bnx2x_set_pbd_gso(struct sk_buff *skb, |
3503 | struct eth_tx_parse_bd_e1x *pbd, |
3504 | u32 xmit_type) |
3505 | { |
3506 | pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
3507 | pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); |
3508 | pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb)); |
3509 | |
3510 | if (xmit_type & XMIT_GSO_V4) { |
3511 | pbd->ip_id = bswab16(ip_hdr(skb)->id); |
3512 | pbd->tcp_pseudo_csum = |
3513 | bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, |
3514 | ip_hdr(skb)->daddr, |
3515 | 0, IPPROTO_TCP, 0)); |
3516 | } else { |
3517 | pbd->tcp_pseudo_csum = |
3518 | bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
3519 | &ipv6_hdr(skb)->daddr, |
3520 | 0, IPPROTO_TCP, 0)); |
3521 | } |
3522 | |
3523 | pbd->global_data |= |
3524 | cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); |
3525 | } |
3526 | |
3527 | /** |
3528 | * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length |
3529 | * |
3530 | * @bp: driver handle |
3531 | * @skb: packet skb |
3532 | * @parsing_data: data to be updated |
3533 | * @xmit_type: xmit flags |
3534 | * |
3535 | * 57712/578xx related, when skb has encapsulation |
3536 | */ |
3537 | static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, |
3538 | u32 *parsing_data, u32 xmit_type) |
3539 | { |
3540 | *parsing_data |= |
3541 | ((skb_inner_transport_offset(skb) >> 1) << |
3542 | ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & |
3543 | ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; |
3544 | |
3545 | if (xmit_type & XMIT_CSUM_TCP) { |
3546 | *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) << |
3547 | ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & |
3548 | ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; |
3549 | |
3550 | return skb_inner_tcp_all_headers(skb); |
3551 | } |
3552 | |
3553 | /* We support checksum offload for TCP and UDP only. |
3554 | * No need to pass the UDP header length - it's a constant. |
3555 | */ |
3556 | return skb_inner_transport_offset(skb) + sizeof(struct udphdr); |
3557 | } |
3558 | |
3559 | /** |
3560 | * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length |
3561 | * |
3562 | * @bp: driver handle |
3563 | * @skb: packet skb |
3564 | * @parsing_data: data to be updated |
3565 | * @xmit_type: xmit flags |
3566 | * |
3567 | * 57712/578xx related |
3568 | */ |
3569 | static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, |
3570 | u32 *parsing_data, u32 xmit_type) |
3571 | { |
3572 | *parsing_data |= |
3573 | ((skb_transport_offset(skb) >> 1) << |
3574 | ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & |
3575 | ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; |
3576 | |
3577 | if (xmit_type & XMIT_CSUM_TCP) { |
3578 | *parsing_data |= ((tcp_hdrlen(skb) / 4) << |
3579 | ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & |
3580 | ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; |
3581 | |
3582 | return skb_tcp_all_headers(skb); |
3583 | } |
3584 | /* We support checksum offload for TCP and UDP only. |
3585 | * No need to pass the UDP header length - it's a constant. |
3586 | */ |
3587 | return skb_transport_offset(skb) + sizeof(struct udphdr); |
3588 | } |
3589 | |
3590 | /* set FW indication according to inner or outer protocols if tunneled */ |
3591 | static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, |
3592 | struct eth_tx_start_bd *tx_start_bd, |
3593 | u32 xmit_type) |
3594 | { |
3595 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; |
3596 | |
3597 | if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6)) |
3598 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; |
3599 | |
3600 | if (!(xmit_type & XMIT_CSUM_TCP)) |
3601 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; |
3602 | } |
3603 | |
3604 | /** |
3605 | * bnx2x_set_pbd_csum - update PBD with checksum and return header length |
3606 | * |
3607 | * @bp: driver handle |
3608 | * @skb: packet skb |
3609 | * @pbd: parse BD to be updated |
3610 | * @xmit_type: xmit flags |
3611 | */ |
3612 | static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, |
3613 | struct eth_tx_parse_bd_e1x *pbd, |
3614 | u32 xmit_type) |
3615 | { |
3616 | u8 hlen = skb_network_offset(skb) >> 1; |
3617 | |
3618 | /* for now NS flag is not used in Linux */ |
3619 | pbd->global_data = |
3620 | cpu_to_le16(hlen | |
3621 | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << |
3622 | ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); |
3623 | |
3624 | pbd->ip_hlen_w = skb_network_header_len(skb) >> 1; |
3625 | |
3626 | hlen += pbd->ip_hlen_w; |
3627 | |
3628 | /* We support checksum offload for TCP and UDP only */ |
3629 | if (xmit_type & XMIT_CSUM_TCP) |
3630 | hlen += tcp_hdrlen(skb) / 2; |
3631 | else |
3632 | hlen += sizeof(struct udphdr) / 2; |
3633 | |
3634 | pbd->total_hlen_w = cpu_to_le16(hlen); |
3635 | hlen = hlen*2; |
3636 | |
3637 | if (xmit_type & XMIT_CSUM_TCP) { |
3638 | pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check); |
3639 | |
3640 | } else { |
3641 | s8 fix = SKB_CS_OFF(skb); /* signed! */ |
3642 | |
3643 | DP(NETIF_MSG_TX_QUEUED, |
3644 | "hlen %d fix %d csum before fix %x\n" , |
3645 | le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); |
3646 | |
3647 | /* HW bug: fixup the CSUM */ |
3648 | pbd->tcp_pseudo_csum = |
3649 | bnx2x_csum_fix(t_header: skb_transport_header(skb), |
3650 | SKB_CS(skb), fix); |
3651 | |
3652 | DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n" , |
3653 | pbd->tcp_pseudo_csum); |
3654 | } |
3655 | |
3656 | return hlen; |
3657 | } |
3658 | |
3659 | static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, |
3660 | struct eth_tx_parse_bd_e2 *pbd_e2, |
3661 | struct eth_tx_parse_2nd_bd *pbd2, |
3662 | u16 *global_data, |
3663 | u32 xmit_type) |
3664 | { |
3665 | u16 hlen_w = 0; |
3666 | u8 outerip_off, outerip_len = 0; |
3667 | |
3668 | /* from outer IP to transport */ |
3669 | hlen_w = skb_inner_transport_offset(skb) >> 1; |
3670 | |
3671 | /* transport len */ |
3672 | hlen_w += inner_tcp_hdrlen(skb) >> 1; |
3673 | |
3674 | pbd2->fw_ip_hdr_to_payload_w = hlen_w; |
3675 | |
3676 | /* outer IP header info */ |
3677 | if (xmit_type & XMIT_CSUM_V4) { |
3678 | struct iphdr *iph = ip_hdr(skb); |
3679 | u32 csum = (__force u32)(~iph->check) - |
3680 | (__force u32)iph->tot_len - |
3681 | (__force u32)iph->frag_off; |
3682 | |
3683 | outerip_len = iph->ihl << 1; |
3684 | |
3685 | pbd2->fw_ip_csum_wo_len_flags_frag = |
3686 | bswab16(csum_fold((__force __wsum)csum)); |
3687 | } else { |
3688 | pbd2->fw_ip_hdr_to_payload_w = |
3689 | hlen_w - ((sizeof(struct ipv6hdr)) >> 1); |
3690 | pbd_e2->data.tunnel_data.flags |= |
3691 | ETH_TUNNEL_DATA_IPV6_OUTER; |
3692 | } |
3693 | |
3694 | pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); |
3695 | |
3696 | pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); |
3697 | |
3698 | /* inner IP header info */ |
3699 | if (xmit_type & XMIT_CSUM_ENC_V4) { |
3700 | pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); |
3701 | |
3702 | pbd_e2->data.tunnel_data.pseudo_csum = |
3703 | bswab16(~csum_tcpudp_magic( |
3704 | inner_ip_hdr(skb)->saddr, |
3705 | inner_ip_hdr(skb)->daddr, |
3706 | 0, IPPROTO_TCP, 0)); |
3707 | } else { |
3708 | pbd_e2->data.tunnel_data.pseudo_csum = |
3709 | bswab16(~csum_ipv6_magic( |
3710 | &inner_ipv6_hdr(skb)->saddr, |
3711 | &inner_ipv6_hdr(skb)->daddr, |
3712 | 0, IPPROTO_TCP, 0)); |
3713 | } |
3714 | |
3715 | outerip_off = (skb_network_offset(skb)) >> 1; |
3716 | |
3717 | *global_data |= |
3718 | outerip_off | |
3719 | (outerip_len << |
3720 | ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | |
3721 | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << |
3722 | ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT); |
3723 | |
3724 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
3725 | SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1); |
3726 | pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1; |
3727 | } |
3728 | } |
3729 | |
3730 | static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data, |
3731 | u32 xmit_type) |
3732 | { |
3733 | struct ipv6hdr *ipv6; |
3734 | |
3735 | if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6))) |
3736 | return; |
3737 | |
3738 | if (xmit_type & XMIT_GSO_ENC_V6) |
3739 | ipv6 = inner_ipv6_hdr(skb); |
3740 | else /* XMIT_GSO_V6 */ |
3741 | ipv6 = ipv6_hdr(skb); |
3742 | |
3743 | if (ipv6->nexthdr == NEXTHDR_IPV6) |
3744 | *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; |
3745 | } |
3746 | |
3747 | /* called with netif_tx_lock |
3748 | * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call |
3749 | * netif_wake_queue() |
3750 | */ |
3751 | netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) |
3752 | { |
3753 | struct bnx2x *bp = netdev_priv(dev); |
3754 | |
3755 | struct netdev_queue *txq; |
3756 | struct bnx2x_fp_txdata *txdata; |
3757 | struct sw_tx_bd *tx_buf; |
3758 | struct eth_tx_start_bd *tx_start_bd, *first_bd; |
3759 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; |
3760 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; |
3761 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; |
3762 | struct eth_tx_parse_2nd_bd *pbd2 = NULL; |
3763 | u32 pbd_e2_parsing_data = 0; |
3764 | u16 pkt_prod, bd_prod; |
3765 | int nbd, txq_index; |
3766 | dma_addr_t mapping; |
3767 | u32 xmit_type = bnx2x_xmit_type(bp, skb); |
3768 | int i; |
3769 | u8 hlen = 0; |
3770 | __le16 pkt_size = 0; |
3771 | struct ethhdr *eth; |
3772 | u8 mac_type = UNICAST_ADDRESS; |
3773 | |
3774 | #ifdef BNX2X_STOP_ON_ERROR |
3775 | if (unlikely(bp->panic)) |
3776 | return NETDEV_TX_BUSY; |
3777 | #endif |
3778 | |
3779 | txq_index = skb_get_queue_mapping(skb); |
3780 | txq = netdev_get_tx_queue(dev, index: txq_index); |
3781 | |
3782 | BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); |
3783 | |
3784 | txdata = &bp->bnx2x_txq[txq_index]; |
3785 | |
3786 | /* enable this debug print to view the transmission queue being used |
3787 | DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", |
3788 | txq_index, fp_index, txdata_index); */ |
3789 | |
3790 | /* enable this debug print to view the transmission details |
3791 | DP(NETIF_MSG_TX_QUEUED, |
3792 | "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", |
3793 | txdata->cid, fp_index, txdata_index, txdata, fp); */ |
3794 | |
3795 | if (unlikely(bnx2x_tx_avail(bp, txdata) < |
3796 | skb_shinfo(skb)->nr_frags + |
3797 | BDS_PER_TX_PKT + |
3798 | NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { |
3799 | /* Handle special storage cases separately */ |
3800 | if (txdata->tx_ring_size == 0) { |
3801 | struct bnx2x_eth_q_stats *q_stats = |
3802 | bnx2x_fp_qstats(bp, txdata->parent_fp); |
3803 | q_stats->driver_filtered_tx_pkt++; |
3804 | dev_kfree_skb(skb); |
3805 | return NETDEV_TX_OK; |
3806 | } |
3807 | bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; |
3808 | netif_tx_stop_queue(dev_queue: txq); |
3809 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n" ); |
3810 | |
3811 | return NETDEV_TX_BUSY; |
3812 | } |
3813 | |
3814 | DP(NETIF_MSG_TX_QUEUED, |
3815 | "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n" , |
3816 | txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, |
3817 | ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type, |
3818 | skb->len); |
3819 | |
3820 | eth = (struct ethhdr *)skb->data; |
3821 | |
3822 | /* set flag according to packet type (UNICAST_ADDRESS is default)*/ |
3823 | if (unlikely(is_multicast_ether_addr(eth->h_dest))) { |
3824 | if (is_broadcast_ether_addr(addr: eth->h_dest)) |
3825 | mac_type = BROADCAST_ADDRESS; |
3826 | else |
3827 | mac_type = MULTICAST_ADDRESS; |
3828 | } |
3829 | |
3830 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) |
3831 | /* First, check if we need to linearize the skb (due to FW |
3832 | restrictions). No need to check fragmentation if page size > 8K |
3833 | (there will be no violation to FW restrictions) */ |
3834 | if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { |
3835 | /* Statistics of linearization */ |
3836 | bp->lin_cnt++; |
3837 | if (skb_linearize(skb) != 0) { |
3838 | DP(NETIF_MSG_TX_QUEUED, |
3839 | "SKB linearization failed - silently dropping this SKB\n" ); |
3840 | dev_kfree_skb_any(skb); |
3841 | return NETDEV_TX_OK; |
3842 | } |
3843 | } |
3844 | #endif |
3845 | /* Map skb linear data for DMA */ |
3846 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
3847 | skb_headlen(skb), DMA_TO_DEVICE); |
3848 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
3849 | DP(NETIF_MSG_TX_QUEUED, |
3850 | "SKB mapping failed - silently dropping this SKB\n" ); |
3851 | dev_kfree_skb_any(skb); |
3852 | return NETDEV_TX_OK; |
3853 | } |
3854 | /* |
3855 | Please read carefully. First we use one BD which we mark as start, |
3856 | then we have a parsing info BD (used for TSO or xsum), |
3857 | and only then we have the rest of the TSO BDs. |
3858 | (don't forget to mark the last one as last, |
3859 | and to unmap only AFTER you write to the BD ...) |
3860 | And above all, all pdb sizes are in words - NOT DWORDS! |
3861 | */ |
3862 | |
3863 | /* get current pkt produced now - advance it just before sending packet |
3864 | * since mapping of pages may fail and cause packet to be dropped |
3865 | */ |
3866 | pkt_prod = txdata->tx_pkt_prod; |
3867 | bd_prod = TX_BD(txdata->tx_bd_prod); |
3868 | |
3869 | /* get a tx_buf and first BD |
3870 | * tx_start_bd may be changed during SPLIT, |
3871 | * but first_bd will always stay first |
3872 | */ |
3873 | tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; |
3874 | tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; |
3875 | first_bd = tx_start_bd; |
3876 | |
3877 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; |
3878 | |
3879 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { |
3880 | if (!(bp->flags & TX_TIMESTAMPING_EN)) { |
3881 | bp->eth_stats.ptp_skip_tx_ts++; |
3882 | BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n" ); |
3883 | } else if (bp->ptp_tx_skb) { |
3884 | bp->eth_stats.ptp_skip_tx_ts++; |
3885 | netdev_err_once(bp->dev, |
3886 | "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n" ); |
3887 | } else { |
3888 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
3889 | /* schedule check for Tx timestamp */ |
3890 | bp->ptp_tx_skb = skb_get(skb); |
3891 | bp->ptp_tx_start = jiffies; |
3892 | schedule_work(work: &bp->ptp_task); |
3893 | } |
3894 | } |
3895 | |
3896 | /* header nbd: indirectly zero other flags! */ |
3897 | tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; |
3898 | |
3899 | /* remember the first BD of the packet */ |
3900 | tx_buf->first_bd = txdata->tx_bd_prod; |
3901 | tx_buf->skb = skb; |
3902 | tx_buf->flags = 0; |
3903 | |
3904 | DP(NETIF_MSG_TX_QUEUED, |
3905 | "sending pkt %u @%p next_idx %u bd %u @%p\n" , |
3906 | pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); |
3907 | |
3908 | if (skb_vlan_tag_present(skb)) { |
3909 | tx_start_bd->vlan_or_ethertype = |
3910 | cpu_to_le16(skb_vlan_tag_get(skb)); |
3911 | tx_start_bd->bd_flags.as_bitfield |= |
3912 | (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); |
3913 | } else { |
3914 | /* when transmitting in a vf, start bd must hold the ethertype |
3915 | * for fw to enforce it |
3916 | */ |
3917 | u16 vlan_tci = 0; |
3918 | #ifndef BNX2X_STOP_ON_ERROR |
3919 | if (IS_VF(bp)) { |
3920 | #endif |
3921 | /* Still need to consider inband vlan for enforced */ |
3922 | if (__vlan_get_tag(skb, vlan_tci: &vlan_tci)) { |
3923 | tx_start_bd->vlan_or_ethertype = |
3924 | cpu_to_le16(ntohs(eth->h_proto)); |
3925 | } else { |
3926 | tx_start_bd->bd_flags.as_bitfield |= |
3927 | (X_ETH_INBAND_VLAN << |
3928 | ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); |
3929 | tx_start_bd->vlan_or_ethertype = |
3930 | cpu_to_le16(vlan_tci); |
3931 | } |
3932 | #ifndef BNX2X_STOP_ON_ERROR |
3933 | } else { |
3934 | /* used by FW for packet accounting */ |
3935 | tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); |
3936 | } |
3937 | #endif |
3938 | } |
3939 | |
3940 | nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ |
3941 | |
3942 | /* turn on parsing and get a BD */ |
3943 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
3944 | |
3945 | if (xmit_type & XMIT_CSUM) |
3946 | bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); |
3947 | |
3948 | if (!CHIP_IS_E1x(bp)) { |
3949 | pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; |
3950 | memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); |
3951 | |
3952 | if (xmit_type & XMIT_CSUM_ENC) { |
3953 | u16 global_data = 0; |
3954 | |
3955 | /* Set PBD in enc checksum offload case */ |
3956 | hlen = bnx2x_set_pbd_csum_enc(bp, skb, |
3957 | parsing_data: &pbd_e2_parsing_data, |
3958 | xmit_type); |
3959 | |
3960 | /* turn on 2nd parsing and get a BD */ |
3961 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
3962 | |
3963 | pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd; |
3964 | |
3965 | memset(pbd2, 0, sizeof(*pbd2)); |
3966 | |
3967 | pbd_e2->data.tunnel_data.ip_hdr_start_inner_w = |
3968 | (skb_inner_network_header(skb) - |
3969 | skb->data) >> 1; |
3970 | |
3971 | if (xmit_type & XMIT_GSO_ENC) |
3972 | bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2, |
3973 | global_data: &global_data, |
3974 | xmit_type); |
3975 | |
3976 | pbd2->global_data = cpu_to_le16(global_data); |
3977 | |
3978 | /* add addition parse BD indication to start BD */ |
3979 | SET_FLAG(tx_start_bd->general_data, |
3980 | ETH_TX_START_BD_PARSE_NBDS, 1); |
3981 | /* set encapsulation flag in start BD */ |
3982 | SET_FLAG(tx_start_bd->general_data, |
3983 | ETH_TX_START_BD_TUNNEL_EXIST, 1); |
3984 | |
3985 | tx_buf->flags |= BNX2X_HAS_SECOND_PBD; |
3986 | |
3987 | nbd++; |
3988 | } else if (xmit_type & XMIT_CSUM) { |
3989 | /* Set PBD in checksum offload case w/o encapsulation */ |
3990 | hlen = bnx2x_set_pbd_csum_e2(bp, skb, |
3991 | parsing_data: &pbd_e2_parsing_data, |
3992 | xmit_type); |
3993 | } |
3994 | |
3995 | bnx2x_set_ipv6_ext_e2(skb, parsing_data: &pbd_e2_parsing_data, xmit_type); |
3996 | /* Add the macs to the parsing BD if this is a vf or if |
3997 | * Tx Switching is enabled. |
3998 | */ |
3999 | if (IS_VF(bp)) { |
4000 | /* override GRE parameters in BD */ |
4001 | bnx2x_set_fw_mac_addr(fw_hi: &pbd_e2->data.mac_addr.src_hi, |
4002 | fw_mid: &pbd_e2->data.mac_addr.src_mid, |
4003 | fw_lo: &pbd_e2->data.mac_addr.src_lo, |
4004 | mac: eth->h_source); |
4005 | |
4006 | bnx2x_set_fw_mac_addr(fw_hi: &pbd_e2->data.mac_addr.dst_hi, |
4007 | fw_mid: &pbd_e2->data.mac_addr.dst_mid, |
4008 | fw_lo: &pbd_e2->data.mac_addr.dst_lo, |
4009 | mac: eth->h_dest); |
4010 | } else { |
4011 | if (bp->flags & TX_SWITCHING) |
4012 | bnx2x_set_fw_mac_addr( |
4013 | fw_hi: &pbd_e2->data.mac_addr.dst_hi, |
4014 | fw_mid: &pbd_e2->data.mac_addr.dst_mid, |
4015 | fw_lo: &pbd_e2->data.mac_addr.dst_lo, |
4016 | mac: eth->h_dest); |
4017 | #ifdef BNX2X_STOP_ON_ERROR |
4018 | /* Enforce security is always set in Stop on Error - |
4019 | * source mac should be present in the parsing BD |
4020 | */ |
4021 | bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, |
4022 | &pbd_e2->data.mac_addr.src_mid, |
4023 | &pbd_e2->data.mac_addr.src_lo, |
4024 | eth->h_source); |
4025 | #endif |
4026 | } |
4027 | |
4028 | SET_FLAG(pbd_e2_parsing_data, |
4029 | ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); |
4030 | } else { |
4031 | u16 global_data = 0; |
4032 | pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; |
4033 | memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); |
4034 | /* Set PBD in checksum offload case */ |
4035 | if (xmit_type & XMIT_CSUM) |
4036 | hlen = bnx2x_set_pbd_csum(bp, skb, pbd: pbd_e1x, xmit_type); |
4037 | |
4038 | SET_FLAG(global_data, |
4039 | ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); |
4040 | pbd_e1x->global_data |= cpu_to_le16(global_data); |
4041 | } |
4042 | |
4043 | /* Setup the data pointer of the first BD of the packet */ |
4044 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
4045 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
4046 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); |
4047 | pkt_size = tx_start_bd->nbytes; |
4048 | |
4049 | DP(NETIF_MSG_TX_QUEUED, |
4050 | "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n" , |
4051 | tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, |
4052 | le16_to_cpu(tx_start_bd->nbytes), |
4053 | tx_start_bd->bd_flags.as_bitfield, |
4054 | le16_to_cpu(tx_start_bd->vlan_or_ethertype)); |
4055 | |
4056 | if (xmit_type & XMIT_GSO) { |
4057 | |
4058 | DP(NETIF_MSG_TX_QUEUED, |
4059 | "TSO packet len %d hlen %d total len %d tso size %d\n" , |
4060 | skb->len, hlen, skb_headlen(skb), |
4061 | skb_shinfo(skb)->gso_size); |
4062 | |
4063 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; |
4064 | |
4065 | if (unlikely(skb_headlen(skb) > hlen)) { |
4066 | nbd++; |
4067 | bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, |
4068 | tx_bd: &tx_start_bd, hlen, |
4069 | bd_prod); |
4070 | } |
4071 | if (!CHIP_IS_E1x(bp)) |
4072 | pbd_e2_parsing_data |= |
4073 | (skb_shinfo(skb)->gso_size << |
4074 | ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & |
4075 | ETH_TX_PARSE_BD_E2_LSO_MSS; |
4076 | else |
4077 | bnx2x_set_pbd_gso(skb, pbd: pbd_e1x, xmit_type); |
4078 | } |
4079 | |
4080 | /* Set the PBD's parsing_data field if not zero |
4081 | * (for the chips newer than 57711). |
4082 | */ |
4083 | if (pbd_e2_parsing_data) |
4084 | pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); |
4085 | |
4086 | tx_data_bd = (struct eth_tx_bd *)tx_start_bd; |
4087 | |
4088 | /* Handle fragmented skb */ |
4089 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
4090 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
4091 | |
4092 | mapping = skb_frag_dma_map(dev: &bp->pdev->dev, frag, offset: 0, |
4093 | size: skb_frag_size(frag), dir: DMA_TO_DEVICE); |
4094 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
4095 | unsigned int pkts_compl = 0, bytes_compl = 0; |
4096 | |
4097 | DP(NETIF_MSG_TX_QUEUED, |
4098 | "Unable to map page - dropping packet...\n" ); |
4099 | |
4100 | /* we need unmap all buffers already mapped |
4101 | * for this SKB; |
4102 | * first_bd->nbd need to be properly updated |
4103 | * before call to bnx2x_free_tx_pkt |
4104 | */ |
4105 | first_bd->nbd = cpu_to_le16(nbd); |
4106 | bnx2x_free_tx_pkt(bp, txdata, |
4107 | TX_BD(txdata->tx_pkt_prod), |
4108 | pkts_compl: &pkts_compl, bytes_compl: &bytes_compl); |
4109 | return NETDEV_TX_OK; |
4110 | } |
4111 | |
4112 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
4113 | tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; |
4114 | if (total_pkt_bd == NULL) |
4115 | total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; |
4116 | |
4117 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
4118 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
4119 | tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag)); |
4120 | le16_add_cpu(var: &pkt_size, val: skb_frag_size(frag)); |
4121 | nbd++; |
4122 | |
4123 | DP(NETIF_MSG_TX_QUEUED, |
4124 | "frag %d bd @%p addr (%x:%x) nbytes %d\n" , |
4125 | i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, |
4126 | le16_to_cpu(tx_data_bd->nbytes)); |
4127 | } |
4128 | |
4129 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n" , tx_data_bd); |
4130 | |
4131 | /* update with actual num BDs */ |
4132 | first_bd->nbd = cpu_to_le16(nbd); |
4133 | |
4134 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); |
4135 | |
4136 | /* now send a tx doorbell, counting the next BD |
4137 | * if the packet contains or ends with it |
4138 | */ |
4139 | if (TX_BD_POFF(bd_prod) < nbd) |
4140 | nbd++; |
4141 | |
4142 | /* total_pkt_bytes should be set on the first data BD if |
4143 | * it's not an LSO packet and there is more than one |
4144 | * data BD. In this case pkt_size is limited by an MTU value. |
4145 | * However we prefer to set it for an LSO packet (while we don't |
4146 | * have to) in order to save some CPU cycles in a none-LSO |
4147 | * case, when we much more care about them. |
4148 | */ |
4149 | if (total_pkt_bd != NULL) |
4150 | total_pkt_bd->total_pkt_bytes = pkt_size; |
4151 | |
4152 | if (pbd_e1x) |
4153 | DP(NETIF_MSG_TX_QUEUED, |
4154 | "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n" , |
4155 | pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, |
4156 | pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, |
4157 | pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, |
4158 | le16_to_cpu(pbd_e1x->total_hlen_w)); |
4159 | if (pbd_e2) |
4160 | DP(NETIF_MSG_TX_QUEUED, |
4161 | "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n" , |
4162 | pbd_e2, |
4163 | pbd_e2->data.mac_addr.dst_hi, |
4164 | pbd_e2->data.mac_addr.dst_mid, |
4165 | pbd_e2->data.mac_addr.dst_lo, |
4166 | pbd_e2->data.mac_addr.src_hi, |
4167 | pbd_e2->data.mac_addr.src_mid, |
4168 | pbd_e2->data.mac_addr.src_lo, |
4169 | pbd_e2->parsing_data); |
4170 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n" , nbd, bd_prod); |
4171 | |
4172 | netdev_tx_sent_queue(dev_queue: txq, bytes: skb->len); |
4173 | |
4174 | skb_tx_timestamp(skb); |
4175 | |
4176 | txdata->tx_pkt_prod++; |
4177 | /* |
4178 | * Make sure that the BD data is updated before updating the producer |
4179 | * since FW might read the BD right after the producer is updated. |
4180 | * This is only applicable for weak-ordered memory model archs such |
4181 | * as IA-64. The following barrier is also mandatory since FW will |
4182 | * assumes packets must have BDs. |
4183 | */ |
4184 | wmb(); |
4185 | |
4186 | txdata->tx_db.data.prod += nbd; |
4187 | /* make sure descriptor update is observed by HW */ |
4188 | wmb(); |
4189 | |
4190 | DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); |
4191 | |
4192 | txdata->tx_bd_prod += nbd; |
4193 | |
4194 | if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { |
4195 | netif_tx_stop_queue(dev_queue: txq); |
4196 | |
4197 | /* paired memory barrier is in bnx2x_tx_int(), we have to keep |
4198 | * ordering of set_bit() in netif_tx_stop_queue() and read of |
4199 | * fp->bd_tx_cons */ |
4200 | smp_mb(); |
4201 | |
4202 | bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; |
4203 | if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) |
4204 | netif_tx_wake_queue(dev_queue: txq); |
4205 | } |
4206 | txdata->tx_pkt++; |
4207 | |
4208 | return NETDEV_TX_OK; |
4209 | } |
4210 | |
4211 | void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default) |
4212 | { |
4213 | int mfw_vn = BP_FW_MB_IDX(bp); |
4214 | u32 tmp; |
4215 | |
4216 | /* If the shmem shouldn't affect configuration, reflect */ |
4217 | if (!IS_MF_BD(bp)) { |
4218 | int i; |
4219 | |
4220 | for (i = 0; i < BNX2X_MAX_PRIORITY; i++) |
4221 | c2s_map[i] = i; |
4222 | *c2s_default = 0; |
4223 | |
4224 | return; |
4225 | } |
4226 | |
4227 | tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]); |
4228 | tmp = (__force u32)be32_to_cpu((__force __be32)tmp); |
4229 | c2s_map[0] = tmp & 0xff; |
4230 | c2s_map[1] = (tmp >> 8) & 0xff; |
4231 | c2s_map[2] = (tmp >> 16) & 0xff; |
4232 | c2s_map[3] = (tmp >> 24) & 0xff; |
4233 | |
4234 | tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]); |
4235 | tmp = (__force u32)be32_to_cpu((__force __be32)tmp); |
4236 | c2s_map[4] = tmp & 0xff; |
4237 | c2s_map[5] = (tmp >> 8) & 0xff; |
4238 | c2s_map[6] = (tmp >> 16) & 0xff; |
4239 | c2s_map[7] = (tmp >> 24) & 0xff; |
4240 | |
4241 | tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]); |
4242 | tmp = (__force u32)be32_to_cpu((__force __be32)tmp); |
4243 | *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff; |
4244 | } |
4245 | |
4246 | /** |
4247 | * bnx2x_setup_tc - routine to configure net_device for multi tc |
4248 | * |
4249 | * @dev: net device to configure |
4250 | * @num_tc: number of traffic classes to enable |
4251 | * |
4252 | * callback connected to the ndo_setup_tc function pointer |
4253 | */ |
4254 | int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) |
4255 | { |
4256 | struct bnx2x *bp = netdev_priv(dev); |
4257 | u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def; |
4258 | int cos, prio, count, offset; |
4259 | |
4260 | /* setup tc must be called under rtnl lock */ |
4261 | ASSERT_RTNL(); |
4262 | |
4263 | /* no traffic classes requested. Aborting */ |
4264 | if (!num_tc) { |
4265 | netdev_reset_tc(dev); |
4266 | return 0; |
4267 | } |
4268 | |
4269 | /* requested to support too many traffic classes */ |
4270 | if (num_tc > bp->max_cos) { |
4271 | BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n" , |
4272 | num_tc, bp->max_cos); |
4273 | return -EINVAL; |
4274 | } |
4275 | |
4276 | /* declare amount of supported traffic classes */ |
4277 | if (netdev_set_num_tc(dev, num_tc)) { |
4278 | BNX2X_ERR("failed to declare %d traffic classes\n" , num_tc); |
4279 | return -EINVAL; |
4280 | } |
4281 | |
4282 | bnx2x_get_c2s_mapping(bp, c2s_map, c2s_default: &c2s_def); |
4283 | |
4284 | /* configure priority to traffic class mapping */ |
4285 | for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { |
4286 | int outer_prio = c2s_map[prio]; |
4287 | |
4288 | netdev_set_prio_tc_map(dev, prio, tc: bp->prio_to_cos[outer_prio]); |
4289 | DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, |
4290 | "mapping priority %d to tc %d\n" , |
4291 | outer_prio, bp->prio_to_cos[outer_prio]); |
4292 | } |
4293 | |
4294 | /* Use this configuration to differentiate tc0 from other COSes |
4295 | This can be used for ets or pfc, and save the effort of setting |
4296 | up a multio class queue disc or negotiating DCBX with a switch |
4297 | netdev_set_prio_tc_map(dev, 0, 0); |
4298 | DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0); |
4299 | for (prio = 1; prio < 16; prio++) { |
4300 | netdev_set_prio_tc_map(dev, prio, 1); |
4301 | DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1); |
4302 | } */ |
4303 | |
4304 | /* configure traffic class to transmission queue mapping */ |
4305 | for (cos = 0; cos < bp->max_cos; cos++) { |
4306 | count = BNX2X_NUM_ETH_QUEUES(bp); |
4307 | offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); |
4308 | netdev_set_tc_queue(dev, tc: cos, count, offset); |
4309 | DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, |
4310 | "mapping tc %d to offset %d count %d\n" , |
4311 | cos, offset, count); |
4312 | } |
4313 | |
4314 | return 0; |
4315 | } |
4316 | |
4317 | int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, |
4318 | void *type_data) |
4319 | { |
4320 | struct tc_mqprio_qopt *mqprio = type_data; |
4321 | |
4322 | if (type != TC_SETUP_QDISC_MQPRIO) |
4323 | return -EOPNOTSUPP; |
4324 | |
4325 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
4326 | |
4327 | return bnx2x_setup_tc(dev, num_tc: mqprio->num_tc); |
4328 | } |
4329 | |
4330 | /* called with rtnl_lock */ |
4331 | int bnx2x_change_mac_addr(struct net_device *dev, void *p) |
4332 | { |
4333 | struct sockaddr *addr = p; |
4334 | struct bnx2x *bp = netdev_priv(dev); |
4335 | int rc = 0; |
4336 | |
4337 | if (!is_valid_ether_addr(addr: addr->sa_data)) { |
4338 | BNX2X_ERR("Requested MAC address is not valid\n" ); |
4339 | return -EINVAL; |
4340 | } |
4341 | |
4342 | if (IS_MF_STORAGE_ONLY(bp)) { |
4343 | BNX2X_ERR("Can't change address on STORAGE ONLY function\n" ); |
4344 | return -EINVAL; |
4345 | } |
4346 | |
4347 | if (netif_running(dev)) { |
4348 | rc = bnx2x_set_eth_mac(bp, set: false); |
4349 | if (rc) |
4350 | return rc; |
4351 | } |
4352 | |
4353 | eth_hw_addr_set(dev, addr: addr->sa_data); |
4354 | |
4355 | if (netif_running(dev)) |
4356 | rc = bnx2x_set_eth_mac(bp, set: true); |
4357 | |
4358 | if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) |
4359 | SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); |
4360 | |
4361 | return rc; |
4362 | } |
4363 | |
4364 | static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) |
4365 | { |
4366 | union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); |
4367 | struct bnx2x_fastpath *fp = &bp->fp[fp_index]; |
4368 | u8 cos; |
4369 | |
4370 | /* Common */ |
4371 | |
4372 | if (IS_FCOE_IDX(fp_index)) { |
4373 | memset(sb, 0, sizeof(union host_hc_status_block)); |
4374 | fp->status_blk_mapping = 0; |
4375 | } else { |
4376 | /* status blocks */ |
4377 | if (!CHIP_IS_E1x(bp)) |
4378 | BNX2X_PCI_FREE(sb->e2_sb, |
4379 | bnx2x_fp(bp, fp_index, |
4380 | status_blk_mapping), |
4381 | sizeof(struct host_hc_status_block_e2)); |
4382 | else |
4383 | BNX2X_PCI_FREE(sb->e1x_sb, |
4384 | bnx2x_fp(bp, fp_index, |
4385 | status_blk_mapping), |
4386 | sizeof(struct host_hc_status_block_e1x)); |
4387 | } |
4388 | |
4389 | /* Rx */ |
4390 | if (!skip_rx_queue(bp, fp_index)) { |
4391 | bnx2x_free_rx_bds(fp); |
4392 | |
4393 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
4394 | BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); |
4395 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), |
4396 | bnx2x_fp(bp, fp_index, rx_desc_mapping), |
4397 | sizeof(struct eth_rx_bd) * NUM_RX_BD); |
4398 | |
4399 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), |
4400 | bnx2x_fp(bp, fp_index, rx_comp_mapping), |
4401 | sizeof(struct eth_fast_path_rx_cqe) * |
4402 | NUM_RCQ_BD); |
4403 | |
4404 | /* SGE ring */ |
4405 | BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); |
4406 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), |
4407 | bnx2x_fp(bp, fp_index, rx_sge_mapping), |
4408 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
4409 | } |
4410 | |
4411 | /* Tx */ |
4412 | if (!skip_tx_queue(bp, fp_index)) { |
4413 | /* fastpath tx rings: tx_buf tx_desc */ |
4414 | for_each_cos_in_tx_queue(fp, cos) { |
4415 | struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; |
4416 | |
4417 | DP(NETIF_MSG_IFDOWN, |
4418 | "freeing tx memory of fp %d cos %d cid %d\n" , |
4419 | fp_index, cos, txdata->cid); |
4420 | |
4421 | BNX2X_FREE(txdata->tx_buf_ring); |
4422 | BNX2X_PCI_FREE(txdata->tx_desc_ring, |
4423 | txdata->tx_desc_mapping, |
4424 | sizeof(union eth_tx_bd_types) * NUM_TX_BD); |
4425 | } |
4426 | } |
4427 | /* end of fastpath */ |
4428 | } |
4429 | |
4430 | static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) |
4431 | { |
4432 | int i; |
4433 | for_each_cnic_queue(bp, i) |
4434 | bnx2x_free_fp_mem_at(bp, fp_index: i); |
4435 | } |
4436 | |
4437 | void bnx2x_free_fp_mem(struct bnx2x *bp) |
4438 | { |
4439 | int i; |
4440 | for_each_eth_queue(bp, i) |
4441 | bnx2x_free_fp_mem_at(bp, fp_index: i); |
4442 | } |
4443 | |
4444 | static void set_sb_shortcuts(struct bnx2x *bp, int index) |
4445 | { |
4446 | union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); |
4447 | if (!CHIP_IS_E1x(bp)) { |
4448 | bnx2x_fp(bp, index, sb_index_values) = |
4449 | (__le16 *)status_blk.e2_sb->sb.index_values; |
4450 | bnx2x_fp(bp, index, sb_running_index) = |
4451 | (__le16 *)status_blk.e2_sb->sb.running_index; |
4452 | } else { |
4453 | bnx2x_fp(bp, index, sb_index_values) = |
4454 | (__le16 *)status_blk.e1x_sb->sb.index_values; |
4455 | bnx2x_fp(bp, index, sb_running_index) = |
4456 | (__le16 *)status_blk.e1x_sb->sb.running_index; |
4457 | } |
4458 | } |
4459 | |
4460 | /* Returns the number of actually allocated BDs */ |
4461 | static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, |
4462 | int rx_ring_size) |
4463 | { |
4464 | struct bnx2x *bp = fp->bp; |
4465 | u16 ring_prod, cqe_ring_prod; |
4466 | int i, failure_cnt = 0; |
4467 | |
4468 | fp->rx_comp_cons = 0; |
4469 | cqe_ring_prod = ring_prod = 0; |
4470 | |
4471 | /* This routine is called only during fo init so |
4472 | * fp->eth_q_stats.rx_skb_alloc_failed = 0 |
4473 | */ |
4474 | for (i = 0; i < rx_ring_size; i++) { |
4475 | if (bnx2x_alloc_rx_data(bp, fp, index: ring_prod, GFP_KERNEL) < 0) { |
4476 | failure_cnt++; |
4477 | continue; |
4478 | } |
4479 | ring_prod = NEXT_RX_IDX(ring_prod); |
4480 | cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); |
4481 | WARN_ON(ring_prod <= (i - failure_cnt)); |
4482 | } |
4483 | |
4484 | if (failure_cnt) |
4485 | BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n" , |
4486 | i - failure_cnt, fp->index); |
4487 | |
4488 | fp->rx_bd_prod = ring_prod; |
4489 | /* Limit the CQE producer by the CQE ring size */ |
4490 | fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, |
4491 | cqe_ring_prod); |
4492 | |
4493 | bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; |
4494 | |
4495 | return i - failure_cnt; |
4496 | } |
4497 | |
4498 | static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) |
4499 | { |
4500 | int i; |
4501 | |
4502 | for (i = 1; i <= NUM_RCQ_RINGS; i++) { |
4503 | struct eth_rx_cqe_next_page *nextpg; |
4504 | |
4505 | nextpg = (struct eth_rx_cqe_next_page *) |
4506 | &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; |
4507 | nextpg->addr_hi = |
4508 | cpu_to_le32(U64_HI(fp->rx_comp_mapping + |
4509 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); |
4510 | nextpg->addr_lo = |
4511 | cpu_to_le32(U64_LO(fp->rx_comp_mapping + |
4512 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); |
4513 | } |
4514 | } |
4515 | |
4516 | static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) |
4517 | { |
4518 | union host_hc_status_block *sb; |
4519 | struct bnx2x_fastpath *fp = &bp->fp[index]; |
4520 | int ring_size = 0; |
4521 | u8 cos; |
4522 | int rx_ring_size = 0; |
4523 | |
4524 | if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { |
4525 | rx_ring_size = MIN_RX_SIZE_NONTPA; |
4526 | bp->rx_ring_size = rx_ring_size; |
4527 | } else if (!bp->rx_ring_size) { |
4528 | rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); |
4529 | |
4530 | if (CHIP_IS_E3(bp)) { |
4531 | u32 cfg = SHMEM_RD(bp, |
4532 | dev_info.port_hw_config[BP_PORT(bp)]. |
4533 | default_cfg); |
4534 | |
4535 | /* Decrease ring size for 1G functions */ |
4536 | if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) == |
4537 | PORT_HW_CFG_NET_SERDES_IF_SGMII) |
4538 | rx_ring_size /= 10; |
4539 | } |
4540 | |
4541 | /* allocate at least number of buffers required by FW */ |
4542 | rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : |
4543 | MIN_RX_SIZE_TPA, rx_ring_size); |
4544 | |
4545 | bp->rx_ring_size = rx_ring_size; |
4546 | } else /* if rx_ring_size specified - use it */ |
4547 | rx_ring_size = bp->rx_ring_size; |
4548 | |
4549 | DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n" , rx_ring_size); |
4550 | |
4551 | /* Common */ |
4552 | sb = &bnx2x_fp(bp, index, status_blk); |
4553 | |
4554 | if (!IS_FCOE_IDX(index)) { |
4555 | /* status blocks */ |
4556 | if (!CHIP_IS_E1x(bp)) { |
4557 | sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), |
4558 | sizeof(struct host_hc_status_block_e2)); |
4559 | if (!sb->e2_sb) |
4560 | goto alloc_mem_err; |
4561 | } else { |
4562 | sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), |
4563 | sizeof(struct host_hc_status_block_e1x)); |
4564 | if (!sb->e1x_sb) |
4565 | goto alloc_mem_err; |
4566 | } |
4567 | } |
4568 | |
4569 | /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to |
4570 | * set shortcuts for it. |
4571 | */ |
4572 | if (!IS_FCOE_IDX(index)) |
4573 | set_sb_shortcuts(bp, index); |
4574 | |
4575 | /* Tx */ |
4576 | if (!skip_tx_queue(bp, index)) { |
4577 | /* fastpath tx rings: tx_buf tx_desc */ |
4578 | for_each_cos_in_tx_queue(fp, cos) { |
4579 | struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; |
4580 | |
4581 | DP(NETIF_MSG_IFUP, |
4582 | "allocating tx memory of fp %d cos %d\n" , |
4583 | index, cos); |
4584 | |
4585 | txdata->tx_buf_ring = kcalloc(NUM_TX_BD, |
4586 | size: sizeof(struct sw_tx_bd), |
4587 | GFP_KERNEL); |
4588 | if (!txdata->tx_buf_ring) |
4589 | goto alloc_mem_err; |
4590 | txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, |
4591 | sizeof(union eth_tx_bd_types) * NUM_TX_BD); |
4592 | if (!txdata->tx_desc_ring) |
4593 | goto alloc_mem_err; |
4594 | } |
4595 | } |
4596 | |
4597 | /* Rx */ |
4598 | if (!skip_rx_queue(bp, index)) { |
4599 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
4600 | bnx2x_fp(bp, index, rx_buf_ring) = |
4601 | kcalloc(NUM_RX_BD, size: sizeof(struct sw_rx_bd), GFP_KERNEL); |
4602 | if (!bnx2x_fp(bp, index, rx_buf_ring)) |
4603 | goto alloc_mem_err; |
4604 | bnx2x_fp(bp, index, rx_desc_ring) = |
4605 | BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), |
4606 | sizeof(struct eth_rx_bd) * NUM_RX_BD); |
4607 | if (!bnx2x_fp(bp, index, rx_desc_ring)) |
4608 | goto alloc_mem_err; |
4609 | |
4610 | /* Seed all CQEs by 1s */ |
4611 | bnx2x_fp(bp, index, rx_comp_ring) = |
4612 | BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), |
4613 | sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD); |
4614 | if (!bnx2x_fp(bp, index, rx_comp_ring)) |
4615 | goto alloc_mem_err; |
4616 | |
4617 | /* SGE ring */ |
4618 | bnx2x_fp(bp, index, rx_page_ring) = |
4619 | kcalloc(NUM_RX_SGE, size: sizeof(struct sw_rx_page), |
4620 | GFP_KERNEL); |
4621 | if (!bnx2x_fp(bp, index, rx_page_ring)) |
4622 | goto alloc_mem_err; |
4623 | bnx2x_fp(bp, index, rx_sge_ring) = |
4624 | BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), |
4625 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
4626 | if (!bnx2x_fp(bp, index, rx_sge_ring)) |
4627 | goto alloc_mem_err; |
4628 | /* RX BD ring */ |
4629 | bnx2x_set_next_page_rx_bd(fp); |
4630 | |
4631 | /* CQ ring */ |
4632 | bnx2x_set_next_page_rx_cq(fp); |
4633 | |
4634 | /* BDs */ |
4635 | ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); |
4636 | if (ring_size < rx_ring_size) |
4637 | goto alloc_mem_err; |
4638 | } |
4639 | |
4640 | return 0; |
4641 | |
4642 | /* handles low memory cases */ |
4643 | alloc_mem_err: |
4644 | BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n" , |
4645 | index, ring_size); |
4646 | /* FW will drop all packets if queue is not big enough, |
4647 | * In these cases we disable the queue |
4648 | * Min size is different for OOO, TPA and non-TPA queues |
4649 | */ |
4650 | if (ring_size < (fp->mode == TPA_MODE_DISABLED ? |
4651 | MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { |
4652 | /* release memory allocated for this queue */ |
4653 | bnx2x_free_fp_mem_at(bp, fp_index: index); |
4654 | return -ENOMEM; |
4655 | } |
4656 | return 0; |
4657 | } |
4658 | |
4659 | static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) |
4660 | { |
4661 | if (!NO_FCOE(bp)) |
4662 | /* FCoE */ |
4663 | if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) |
4664 | /* we will fail load process instead of mark |
4665 | * NO_FCOE_FLAG |
4666 | */ |
4667 | return -ENOMEM; |
4668 | |
4669 | return 0; |
4670 | } |
4671 | |
4672 | static int bnx2x_alloc_fp_mem(struct bnx2x *bp) |
4673 | { |
4674 | int i; |
4675 | |
4676 | /* 1. Allocate FP for leading - fatal if error |
4677 | * 2. Allocate RSS - fix number of queues if error |
4678 | */ |
4679 | |
4680 | /* leading */ |
4681 | if (bnx2x_alloc_fp_mem_at(bp, index: 0)) |
4682 | return -ENOMEM; |
4683 | |
4684 | /* RSS */ |
4685 | for_each_nondefault_eth_queue(bp, i) |
4686 | if (bnx2x_alloc_fp_mem_at(bp, index: i)) |
4687 | break; |
4688 | |
4689 | /* handle memory failures */ |
4690 | if (i != BNX2X_NUM_ETH_QUEUES(bp)) { |
4691 | int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; |
4692 | |
4693 | WARN_ON(delta < 0); |
4694 | bnx2x_shrink_eth_fp(bp, delta); |
4695 | if (CNIC_SUPPORT(bp)) |
4696 | /* move non eth FPs next to last eth FP |
4697 | * must be done in that order |
4698 | * FCOE_IDX < FWD_IDX < OOO_IDX |
4699 | */ |
4700 | |
4701 | /* move FCoE fp even NO_FCOE_FLAG is on */ |
4702 | bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); |
4703 | bp->num_ethernet_queues -= delta; |
4704 | bp->num_queues = bp->num_ethernet_queues + |
4705 | bp->num_cnic_queues; |
4706 | BNX2X_ERR("Adjusted num of queues from %d to %d\n" , |
4707 | bp->num_queues + delta, bp->num_queues); |
4708 | } |
4709 | |
4710 | return 0; |
4711 | } |
4712 | |
4713 | void bnx2x_free_mem_bp(struct bnx2x *bp) |
4714 | { |
4715 | int i; |
4716 | |
4717 | for (i = 0; i < bp->fp_array_size; i++) |
4718 | kfree(objp: bp->fp[i].tpa_info); |
4719 | kfree(objp: bp->fp); |
4720 | kfree(objp: bp->sp_objs); |
4721 | kfree(objp: bp->fp_stats); |
4722 | kfree(objp: bp->bnx2x_txq); |
4723 | kfree(objp: bp->msix_table); |
4724 | kfree(objp: bp->ilt); |
4725 | } |
4726 | |
4727 | int bnx2x_alloc_mem_bp(struct bnx2x *bp) |
4728 | { |
4729 | struct bnx2x_fastpath *fp; |
4730 | struct msix_entry *tbl; |
4731 | struct bnx2x_ilt *ilt; |
4732 | int msix_table_size = 0; |
4733 | int fp_array_size, txq_array_size; |
4734 | int i; |
4735 | |
4736 | /* |
4737 | * The biggest MSI-X table we might need is as a maximum number of fast |
4738 | * path IGU SBs plus default SB (for PF only). |
4739 | */ |
4740 | msix_table_size = bp->igu_sb_cnt; |
4741 | if (IS_PF(bp)) |
4742 | msix_table_size++; |
4743 | BNX2X_DEV_INFO("msix_table_size %d\n" , msix_table_size); |
4744 | |
4745 | /* fp array: RSS plus CNIC related L2 queues */ |
4746 | fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); |
4747 | bp->fp_array_size = fp_array_size; |
4748 | BNX2X_DEV_INFO("fp_array_size %d\n" , bp->fp_array_size); |
4749 | |
4750 | fp = kcalloc(n: bp->fp_array_size, size: sizeof(*fp), GFP_KERNEL); |
4751 | if (!fp) |
4752 | goto alloc_err; |
4753 | for (i = 0; i < bp->fp_array_size; i++) { |
4754 | fp[i].tpa_info = |
4755 | kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2, |
4756 | size: sizeof(struct bnx2x_agg_info), GFP_KERNEL); |
4757 | if (!(fp[i].tpa_info)) |
4758 | goto alloc_err; |
4759 | } |
4760 | |
4761 | bp->fp = fp; |
4762 | |
4763 | /* allocate sp objs */ |
4764 | bp->sp_objs = kcalloc(n: bp->fp_array_size, size: sizeof(struct bnx2x_sp_objs), |
4765 | GFP_KERNEL); |
4766 | if (!bp->sp_objs) |
4767 | goto alloc_err; |
4768 | |
4769 | /* allocate fp_stats */ |
4770 | bp->fp_stats = kcalloc(n: bp->fp_array_size, size: sizeof(struct bnx2x_fp_stats), |
4771 | GFP_KERNEL); |
4772 | if (!bp->fp_stats) |
4773 | goto alloc_err; |
4774 | |
4775 | /* Allocate memory for the transmission queues array */ |
4776 | txq_array_size = |
4777 | BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); |
4778 | BNX2X_DEV_INFO("txq_array_size %d" , txq_array_size); |
4779 | |
4780 | bp->bnx2x_txq = kcalloc(n: txq_array_size, size: sizeof(struct bnx2x_fp_txdata), |
4781 | GFP_KERNEL); |
4782 | if (!bp->bnx2x_txq) |
4783 | goto alloc_err; |
4784 | |
4785 | /* msix table */ |
4786 | tbl = kcalloc(n: msix_table_size, size: sizeof(*tbl), GFP_KERNEL); |
4787 | if (!tbl) |
4788 | goto alloc_err; |
4789 | bp->msix_table = tbl; |
4790 | |
4791 | /* ilt */ |
4792 | ilt = kzalloc(size: sizeof(*ilt), GFP_KERNEL); |
4793 | if (!ilt) |
4794 | goto alloc_err; |
4795 | bp->ilt = ilt; |
4796 | |
4797 | return 0; |
4798 | alloc_err: |
4799 | bnx2x_free_mem_bp(bp); |
4800 | return -ENOMEM; |
4801 | } |
4802 | |
4803 | int bnx2x_reload_if_running(struct net_device *dev) |
4804 | { |
4805 | struct bnx2x *bp = netdev_priv(dev); |
4806 | |
4807 | if (unlikely(!netif_running(dev))) |
4808 | return 0; |
4809 | |
4810 | bnx2x_nic_unload(bp, UNLOAD_NORMAL, keep_link: true); |
4811 | return bnx2x_nic_load(bp, LOAD_NORMAL); |
4812 | } |
4813 | |
4814 | int bnx2x_get_cur_phy_idx(struct bnx2x *bp) |
4815 | { |
4816 | u32 sel_phy_idx = 0; |
4817 | if (bp->link_params.num_phys <= 1) |
4818 | return INT_PHY; |
4819 | |
4820 | if (bp->link_vars.link_up) { |
4821 | sel_phy_idx = EXT_PHY1; |
4822 | /* In case link is SERDES, check if the EXT_PHY2 is the one */ |
4823 | if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && |
4824 | (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) |
4825 | sel_phy_idx = EXT_PHY2; |
4826 | } else { |
4827 | |
4828 | switch (bnx2x_phy_selection(params: &bp->link_params)) { |
4829 | case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: |
4830 | case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: |
4831 | case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: |
4832 | sel_phy_idx = EXT_PHY1; |
4833 | break; |
4834 | case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: |
4835 | case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: |
4836 | sel_phy_idx = EXT_PHY2; |
4837 | break; |
4838 | } |
4839 | } |
4840 | |
4841 | return sel_phy_idx; |
4842 | } |
4843 | int bnx2x_get_link_cfg_idx(struct bnx2x *bp) |
4844 | { |
4845 | u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); |
4846 | /* |
4847 | * The selected activated PHY is always after swapping (in case PHY |
4848 | * swapping is enabled). So when swapping is enabled, we need to reverse |
4849 | * the configuration |
4850 | */ |
4851 | |
4852 | if (bp->link_params.multi_phy_config & |
4853 | PORT_HW_CFG_PHY_SWAPPED_ENABLED) { |
4854 | if (sel_phy_idx == EXT_PHY1) |
4855 | sel_phy_idx = EXT_PHY2; |
4856 | else if (sel_phy_idx == EXT_PHY2) |
4857 | sel_phy_idx = EXT_PHY1; |
4858 | } |
4859 | return LINK_CONFIG_IDX(sel_phy_idx); |
4860 | } |
4861 | |
4862 | #ifdef NETDEV_FCOE_WWNN |
4863 | int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) |
4864 | { |
4865 | struct bnx2x *bp = netdev_priv(dev); |
4866 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; |
4867 | |
4868 | switch (type) { |
4869 | case NETDEV_FCOE_WWNN: |
4870 | *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, |
4871 | cp->fcoe_wwn_node_name_lo); |
4872 | break; |
4873 | case NETDEV_FCOE_WWPN: |
4874 | *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, |
4875 | cp->fcoe_wwn_port_name_lo); |
4876 | break; |
4877 | default: |
4878 | BNX2X_ERR("Wrong WWN type requested - %d\n" , type); |
4879 | return -EINVAL; |
4880 | } |
4881 | |
4882 | return 0; |
4883 | } |
4884 | #endif |
4885 | |
4886 | /* called with rtnl_lock */ |
4887 | int bnx2x_change_mtu(struct net_device *dev, int new_mtu) |
4888 | { |
4889 | struct bnx2x *bp = netdev_priv(dev); |
4890 | |
4891 | if (pci_num_vf(dev: bp->pdev)) { |
4892 | DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n" ); |
4893 | return -EPERM; |
4894 | } |
4895 | |
4896 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
4897 | BNX2X_ERR("Can't perform change MTU during parity recovery\n" ); |
4898 | return -EAGAIN; |
4899 | } |
4900 | |
4901 | /* This does not race with packet allocation |
4902 | * because the actual alloc size is |
4903 | * only updated as part of load |
4904 | */ |
4905 | dev->mtu = new_mtu; |
4906 | |
4907 | if (!bnx2x_mtu_allows_gro(mtu: new_mtu)) |
4908 | dev->features &= ~NETIF_F_GRO_HW; |
4909 | |
4910 | if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) |
4911 | SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); |
4912 | |
4913 | return bnx2x_reload_if_running(dev); |
4914 | } |
4915 | |
4916 | netdev_features_t bnx2x_fix_features(struct net_device *dev, |
4917 | netdev_features_t features) |
4918 | { |
4919 | struct bnx2x *bp = netdev_priv(dev); |
4920 | |
4921 | if (pci_num_vf(dev: bp->pdev)) { |
4922 | netdev_features_t changed = dev->features ^ features; |
4923 | |
4924 | /* Revert the requested changes in features if they |
4925 | * would require internal reload of PF in bnx2x_set_features(). |
4926 | */ |
4927 | if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { |
4928 | features &= ~NETIF_F_RXCSUM; |
4929 | features |= dev->features & NETIF_F_RXCSUM; |
4930 | } |
4931 | |
4932 | if (changed & NETIF_F_LOOPBACK) { |
4933 | features &= ~NETIF_F_LOOPBACK; |
4934 | features |= dev->features & NETIF_F_LOOPBACK; |
4935 | } |
4936 | } |
4937 | |
4938 | /* TPA requires Rx CSUM offloading */ |
4939 | if (!(features & NETIF_F_RXCSUM)) |
4940 | features &= ~NETIF_F_LRO; |
4941 | |
4942 | if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(mtu: dev->mtu)) |
4943 | features &= ~NETIF_F_GRO_HW; |
4944 | if (features & NETIF_F_GRO_HW) |
4945 | features &= ~NETIF_F_LRO; |
4946 | |
4947 | return features; |
4948 | } |
4949 | |
4950 | int bnx2x_set_features(struct net_device *dev, netdev_features_t features) |
4951 | { |
4952 | struct bnx2x *bp = netdev_priv(dev); |
4953 | netdev_features_t changes = features ^ dev->features; |
4954 | bool bnx2x_reload = false; |
4955 | int rc; |
4956 | |
4957 | /* VFs or non SRIOV PFs should be able to change loopback feature */ |
4958 | if (!pci_num_vf(dev: bp->pdev)) { |
4959 | if (features & NETIF_F_LOOPBACK) { |
4960 | if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { |
4961 | bp->link_params.loopback_mode = LOOPBACK_BMAC; |
4962 | bnx2x_reload = true; |
4963 | } |
4964 | } else { |
4965 | if (bp->link_params.loopback_mode != LOOPBACK_NONE) { |
4966 | bp->link_params.loopback_mode = LOOPBACK_NONE; |
4967 | bnx2x_reload = true; |
4968 | } |
4969 | } |
4970 | } |
4971 | |
4972 | /* Don't care about GRO changes */ |
4973 | changes &= ~NETIF_F_GRO; |
4974 | |
4975 | if (changes) |
4976 | bnx2x_reload = true; |
4977 | |
4978 | if (bnx2x_reload) { |
4979 | if (bp->recovery_state == BNX2X_RECOVERY_DONE) { |
4980 | dev->features = features; |
4981 | rc = bnx2x_reload_if_running(dev); |
4982 | return rc ? rc : 1; |
4983 | } |
4984 | /* else: bnx2x_nic_load() will be called at end of recovery */ |
4985 | } |
4986 | |
4987 | return 0; |
4988 | } |
4989 | |
4990 | void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue) |
4991 | { |
4992 | struct bnx2x *bp = netdev_priv(dev); |
4993 | |
4994 | /* We want the information of the dump logged, |
4995 | * but calling bnx2x_panic() would kill all chances of recovery. |
4996 | */ |
4997 | if (!bp->panic) |
4998 | #ifndef BNX2X_STOP_ON_ERROR |
4999 | bnx2x_panic_dump(bp, disable_int: false); |
5000 | #else |
5001 | bnx2x_panic(); |
5002 | #endif |
5003 | |
5004 | /* This allows the netif to be shutdown gracefully before resetting */ |
5005 | bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, verbose: 0); |
5006 | } |
5007 | |
5008 | static int __maybe_unused bnx2x_suspend(struct device *dev_d) |
5009 | { |
5010 | struct pci_dev *pdev = to_pci_dev(dev_d); |
5011 | struct net_device *dev = pci_get_drvdata(pdev); |
5012 | struct bnx2x *bp; |
5013 | |
5014 | if (!dev) { |
5015 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n" ); |
5016 | return -ENODEV; |
5017 | } |
5018 | bp = netdev_priv(dev); |
5019 | |
5020 | rtnl_lock(); |
5021 | |
5022 | if (!netif_running(dev)) { |
5023 | rtnl_unlock(); |
5024 | return 0; |
5025 | } |
5026 | |
5027 | netif_device_detach(dev); |
5028 | |
5029 | bnx2x_nic_unload(bp, UNLOAD_CLOSE, keep_link: false); |
5030 | |
5031 | rtnl_unlock(); |
5032 | |
5033 | return 0; |
5034 | } |
5035 | |
5036 | static int __maybe_unused bnx2x_resume(struct device *dev_d) |
5037 | { |
5038 | struct pci_dev *pdev = to_pci_dev(dev_d); |
5039 | struct net_device *dev = pci_get_drvdata(pdev); |
5040 | struct bnx2x *bp; |
5041 | int rc; |
5042 | |
5043 | if (!dev) { |
5044 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n" ); |
5045 | return -ENODEV; |
5046 | } |
5047 | bp = netdev_priv(dev); |
5048 | |
5049 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
5050 | BNX2X_ERR("Handling parity error recovery. Try again later\n" ); |
5051 | return -EAGAIN; |
5052 | } |
5053 | |
5054 | rtnl_lock(); |
5055 | |
5056 | if (!netif_running(dev)) { |
5057 | rtnl_unlock(); |
5058 | return 0; |
5059 | } |
5060 | |
5061 | netif_device_attach(dev); |
5062 | |
5063 | rc = bnx2x_nic_load(bp, LOAD_OPEN); |
5064 | |
5065 | rtnl_unlock(); |
5066 | |
5067 | return rc; |
5068 | } |
5069 | |
5070 | SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume); |
5071 | |
5072 | void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, |
5073 | u32 cid) |
5074 | { |
5075 | if (!cxt) { |
5076 | BNX2X_ERR("bad context pointer %p\n" , cxt); |
5077 | return; |
5078 | } |
5079 | |
5080 | /* ustorm cxt validation */ |
5081 | cxt->ustorm_ag_context.cdu_usage = |
5082 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), |
5083 | CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); |
5084 | /* xcontext validation */ |
5085 | cxt->xstorm_ag_context.cdu_reserved = |
5086 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), |
5087 | CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); |
5088 | } |
5089 | |
5090 | static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, |
5091 | u8 fw_sb_id, u8 sb_index, |
5092 | u8 ticks) |
5093 | { |
5094 | u32 addr = BAR_CSTRORM_INTMEM + |
5095 | CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); |
5096 | REG_WR8(bp, addr, ticks); |
5097 | DP(NETIF_MSG_IFUP, |
5098 | "port %x fw_sb_id %d sb_index %d ticks %d\n" , |
5099 | port, fw_sb_id, sb_index, ticks); |
5100 | } |
5101 | |
5102 | static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, |
5103 | u16 fw_sb_id, u8 sb_index, |
5104 | u8 disable) |
5105 | { |
5106 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); |
5107 | u32 addr = BAR_CSTRORM_INTMEM + |
5108 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); |
5109 | u8 flags = REG_RD8(bp, addr); |
5110 | /* clear and set */ |
5111 | flags &= ~HC_INDEX_DATA_HC_ENABLED; |
5112 | flags |= enable_flag; |
5113 | REG_WR8(bp, addr, flags); |
5114 | DP(NETIF_MSG_IFUP, |
5115 | "port %x fw_sb_id %d sb_index %d disable %d\n" , |
5116 | port, fw_sb_id, sb_index, disable); |
5117 | } |
5118 | |
5119 | void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, |
5120 | u8 sb_index, u8 disable, u16 usec) |
5121 | { |
5122 | int port = BP_PORT(bp); |
5123 | u8 ticks = usec / BNX2X_BTR; |
5124 | |
5125 | storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); |
5126 | |
5127 | disable = disable ? 1 : (usec ? 0 : 1); |
5128 | storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); |
5129 | } |
5130 | |
5131 | void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, |
5132 | u32 verbose) |
5133 | { |
5134 | smp_mb__before_atomic(); |
5135 | set_bit(nr: flag, addr: &bp->sp_rtnl_state); |
5136 | smp_mb__after_atomic(); |
5137 | DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n" , |
5138 | flag); |
5139 | schedule_delayed_work(dwork: &bp->sp_rtnl_task, delay: 0); |
5140 | } |
5141 | |