1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qede NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/netdevice.h> |
8 | #include <linux/etherdevice.h> |
9 | #include <linux/skbuff.h> |
10 | #include <linux/bpf_trace.h> |
11 | #include <net/udp_tunnel.h> |
12 | #include <linux/ip.h> |
13 | #include <net/gro.h> |
14 | #include <net/ipv6.h> |
15 | #include <net/tcp.h> |
16 | #include <linux/if_ether.h> |
17 | #include <linux/if_vlan.h> |
18 | #include <net/ip6_checksum.h> |
19 | #include "qede_ptp.h" |
20 | |
21 | #include <linux/qed/qed_if.h> |
22 | #include "qede.h" |
23 | /********************************* |
24 | * Content also used by slowpath * |
25 | *********************************/ |
26 | |
27 | int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) |
28 | { |
29 | struct sw_rx_data *sw_rx_data; |
30 | struct eth_rx_bd *rx_bd; |
31 | dma_addr_t mapping; |
32 | struct page *data; |
33 | |
34 | /* In case lazy-allocation is allowed, postpone allocation until the |
35 | * end of the NAPI run. We'd still need to make sure the Rx ring has |
36 | * sufficient buffers to guarantee an additional Rx interrupt. |
37 | */ |
38 | if (allow_lazy && likely(rxq->filled_buffers > 12)) { |
39 | rxq->filled_buffers--; |
40 | return 0; |
41 | } |
42 | |
43 | data = alloc_pages(GFP_ATOMIC, order: 0); |
44 | if (unlikely(!data)) |
45 | return -ENOMEM; |
46 | |
47 | /* Map the entire page as it would be used |
48 | * for multiple RX buffer segment size mapping. |
49 | */ |
50 | mapping = dma_map_page(rxq->dev, data, 0, |
51 | PAGE_SIZE, rxq->data_direction); |
52 | if (unlikely(dma_mapping_error(rxq->dev, mapping))) { |
53 | __free_page(data); |
54 | return -ENOMEM; |
55 | } |
56 | |
57 | sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; |
58 | sw_rx_data->page_offset = 0; |
59 | sw_rx_data->data = data; |
60 | sw_rx_data->mapping = mapping; |
61 | |
62 | /* Advance PROD and get BD pointer */ |
63 | rx_bd = (struct eth_rx_bd *)qed_chain_produce(p_chain: &rxq->rx_bd_ring); |
64 | WARN_ON(!rx_bd); |
65 | rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); |
66 | rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) + |
67 | rxq->rx_headroom); |
68 | |
69 | rxq->sw_rx_prod++; |
70 | rxq->filled_buffers++; |
71 | |
72 | return 0; |
73 | } |
74 | |
75 | /* Unmap the data and free skb */ |
76 | int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) |
77 | { |
78 | u16 idx = txq->sw_tx_cons; |
79 | struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; |
80 | struct eth_tx_1st_bd *first_bd; |
81 | struct eth_tx_bd *tx_data_bd; |
82 | int bds_consumed = 0; |
83 | int nbds; |
84 | bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; |
85 | int i, split_bd_len = 0; |
86 | |
87 | if (unlikely(!skb)) { |
88 | DP_ERR(edev, |
89 | "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n" , |
90 | idx, txq->sw_tx_cons, txq->sw_tx_prod); |
91 | return -1; |
92 | } |
93 | |
94 | *len = skb->len; |
95 | |
96 | first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(p_chain: &txq->tx_pbl); |
97 | |
98 | bds_consumed++; |
99 | |
100 | nbds = first_bd->data.nbds; |
101 | |
102 | if (data_split) { |
103 | struct eth_tx_bd *split = (struct eth_tx_bd *) |
104 | qed_chain_consume(p_chain: &txq->tx_pbl); |
105 | split_bd_len = BD_UNMAP_LEN(split); |
106 | bds_consumed++; |
107 | } |
108 | dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), |
109 | BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); |
110 | |
111 | /* Unmap the data of the skb frags */ |
112 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { |
113 | tx_data_bd = (struct eth_tx_bd *) |
114 | qed_chain_consume(p_chain: &txq->tx_pbl); |
115 | dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), |
116 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); |
117 | } |
118 | |
119 | while (bds_consumed++ < nbds) |
120 | qed_chain_consume(p_chain: &txq->tx_pbl); |
121 | |
122 | /* Free skb */ |
123 | dev_kfree_skb_any(skb); |
124 | txq->sw_tx_ring.skbs[idx].skb = NULL; |
125 | txq->sw_tx_ring.skbs[idx].flags = 0; |
126 | |
127 | return 0; |
128 | } |
129 | |
130 | /* Unmap the data and free skb when mapping failed during start_xmit */ |
131 | static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, |
132 | struct eth_tx_1st_bd *first_bd, |
133 | int nbd, bool data_split) |
134 | { |
135 | u16 idx = txq->sw_tx_prod; |
136 | struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; |
137 | struct eth_tx_bd *tx_data_bd; |
138 | int i, split_bd_len = 0; |
139 | |
140 | /* Return prod to its position before this skb was handled */ |
141 | qed_chain_set_prod(p_chain: &txq->tx_pbl, |
142 | le16_to_cpu(txq->tx_db.data.bd_prod), p_prod_elem: first_bd); |
143 | |
144 | first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(p_chain: &txq->tx_pbl); |
145 | |
146 | if (data_split) { |
147 | struct eth_tx_bd *split = (struct eth_tx_bd *) |
148 | qed_chain_produce(p_chain: &txq->tx_pbl); |
149 | split_bd_len = BD_UNMAP_LEN(split); |
150 | nbd--; |
151 | } |
152 | |
153 | dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), |
154 | BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); |
155 | |
156 | /* Unmap the data of the skb frags */ |
157 | for (i = 0; i < nbd; i++) { |
158 | tx_data_bd = (struct eth_tx_bd *) |
159 | qed_chain_produce(p_chain: &txq->tx_pbl); |
160 | if (tx_data_bd->nbytes) |
161 | dma_unmap_page(txq->dev, |
162 | BD_UNMAP_ADDR(tx_data_bd), |
163 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); |
164 | } |
165 | |
166 | /* Return again prod to its position before this skb was handled */ |
167 | qed_chain_set_prod(p_chain: &txq->tx_pbl, |
168 | le16_to_cpu(txq->tx_db.data.bd_prod), p_prod_elem: first_bd); |
169 | |
170 | /* Free skb */ |
171 | dev_kfree_skb_any(skb); |
172 | txq->sw_tx_ring.skbs[idx].skb = NULL; |
173 | txq->sw_tx_ring.skbs[idx].flags = 0; |
174 | } |
175 | |
176 | static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext) |
177 | { |
178 | u32 rc = XMIT_L4_CSUM; |
179 | __be16 l3_proto; |
180 | |
181 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
182 | return XMIT_PLAIN; |
183 | |
184 | l3_proto = vlan_get_protocol(skb); |
185 | if (l3_proto == htons(ETH_P_IPV6) && |
186 | (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) |
187 | *ipv6_ext = 1; |
188 | |
189 | if (skb->encapsulation) { |
190 | rc |= XMIT_ENC; |
191 | if (skb_is_gso(skb)) { |
192 | unsigned short gso_type = skb_shinfo(skb)->gso_type; |
193 | |
194 | if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || |
195 | (gso_type & SKB_GSO_GRE_CSUM)) |
196 | rc |= XMIT_ENC_GSO_L4_CSUM; |
197 | |
198 | rc |= XMIT_LSO; |
199 | return rc; |
200 | } |
201 | } |
202 | |
203 | if (skb_is_gso(skb)) |
204 | rc |= XMIT_LSO; |
205 | |
206 | return rc; |
207 | } |
208 | |
209 | static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, |
210 | struct eth_tx_2nd_bd *second_bd, |
211 | struct eth_tx_3rd_bd *third_bd) |
212 | { |
213 | u8 l4_proto; |
214 | u16 bd2_bits1 = 0, bd2_bits2 = 0; |
215 | |
216 | bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); |
217 | |
218 | bd2_bits2 |= ((skb_transport_offset(skb) >> 1) & |
219 | ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) |
220 | << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; |
221 | |
222 | bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << |
223 | ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); |
224 | |
225 | if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) |
226 | l4_proto = ipv6_hdr(skb)->nexthdr; |
227 | else |
228 | l4_proto = ip_hdr(skb)->protocol; |
229 | |
230 | if (l4_proto == IPPROTO_UDP) |
231 | bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; |
232 | |
233 | if (third_bd) |
234 | third_bd->data.bitfields |= |
235 | cpu_to_le16(((tcp_hdrlen(skb) / 4) & |
236 | ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << |
237 | ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); |
238 | |
239 | second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); |
240 | second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); |
241 | } |
242 | |
243 | static int map_frag_to_bd(struct qede_tx_queue *txq, |
244 | skb_frag_t *frag, struct eth_tx_bd *bd) |
245 | { |
246 | dma_addr_t mapping; |
247 | |
248 | /* Map skb non-linear frag data for DMA */ |
249 | mapping = skb_frag_dma_map(dev: txq->dev, frag, offset: 0, |
250 | size: skb_frag_size(frag), dir: DMA_TO_DEVICE); |
251 | if (unlikely(dma_mapping_error(txq->dev, mapping))) |
252 | return -ENOMEM; |
253 | |
254 | /* Setup the data pointer of the frag data */ |
255 | BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); |
256 | |
257 | return 0; |
258 | } |
259 | |
260 | static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) |
261 | { |
262 | if (is_encap_pkt) |
263 | return skb_inner_tcp_all_headers(skb); |
264 | |
265 | return skb_tcp_all_headers(skb); |
266 | } |
267 | |
268 | /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ |
269 | #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) |
270 | static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type) |
271 | { |
272 | int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; |
273 | |
274 | if (xmit_type & XMIT_LSO) { |
275 | int hlen; |
276 | |
277 | hlen = qede_get_skb_hlen(skb, is_encap_pkt: xmit_type & XMIT_ENC); |
278 | |
279 | /* linear payload would require its own BD */ |
280 | if (skb_headlen(skb) > hlen) |
281 | allowed_frags--; |
282 | } |
283 | |
284 | return (skb_shinfo(skb)->nr_frags > allowed_frags); |
285 | } |
286 | #endif |
287 | |
288 | static inline void qede_update_tx_producer(struct qede_tx_queue *txq) |
289 | { |
290 | /* wmb makes sure that the BDs data is updated before updating the |
291 | * producer, otherwise FW may read old data from the BDs. |
292 | */ |
293 | wmb(); |
294 | barrier(); |
295 | writel(val: txq->tx_db.raw, addr: txq->doorbell_addr); |
296 | |
297 | /* Fence required to flush the write combined buffer, since another |
298 | * CPU may write to the same doorbell address and data may be lost |
299 | * due to relaxed order nature of write combined bar. |
300 | */ |
301 | wmb(); |
302 | } |
303 | |
304 | static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad, |
305 | u16 len, struct page *page, struct xdp_frame *xdpf) |
306 | { |
307 | struct eth_tx_1st_bd *bd; |
308 | struct sw_tx_xdp *xdp; |
309 | u16 val; |
310 | |
311 | if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >= |
312 | txq->num_tx_buffers)) { |
313 | txq->stopped_cnt++; |
314 | return -ENOMEM; |
315 | } |
316 | |
317 | bd = qed_chain_produce(p_chain: &txq->tx_pbl); |
318 | bd->data.nbds = 1; |
319 | bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); |
320 | |
321 | val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << |
322 | ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; |
323 | |
324 | bd->data.bitfields = cpu_to_le16(val); |
325 | |
326 | /* We can safely ignore the offset, as it's 0 for XDP */ |
327 | BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len); |
328 | |
329 | xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; |
330 | xdp->mapping = dma; |
331 | xdp->page = page; |
332 | xdp->xdpf = xdpf; |
333 | |
334 | txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; |
335 | |
336 | return 0; |
337 | } |
338 | |
339 | int qede_xdp_transmit(struct net_device *dev, int n_frames, |
340 | struct xdp_frame **frames, u32 flags) |
341 | { |
342 | struct qede_dev *edev = netdev_priv(dev); |
343 | struct device *dmadev = &edev->pdev->dev; |
344 | struct qede_tx_queue *xdp_tx; |
345 | struct xdp_frame *xdpf; |
346 | dma_addr_t mapping; |
347 | int i, nxmit = 0; |
348 | u16 xdp_prod; |
349 | |
350 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
351 | return -EINVAL; |
352 | |
353 | if (unlikely(!netif_running(dev))) |
354 | return -ENETDOWN; |
355 | |
356 | i = smp_processor_id() % edev->total_xdp_queues; |
357 | xdp_tx = edev->fp_array[i].xdp_tx; |
358 | |
359 | spin_lock(lock: &xdp_tx->xdp_tx_lock); |
360 | |
361 | for (i = 0; i < n_frames; i++) { |
362 | xdpf = frames[i]; |
363 | |
364 | mapping = dma_map_single(dmadev, xdpf->data, xdpf->len, |
365 | DMA_TO_DEVICE); |
366 | if (unlikely(dma_mapping_error(dmadev, mapping))) |
367 | break; |
368 | |
369 | if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len, |
370 | NULL, xdpf))) |
371 | break; |
372 | nxmit++; |
373 | } |
374 | |
375 | if (flags & XDP_XMIT_FLUSH) { |
376 | xdp_prod = qed_chain_get_prod_idx(chain: &xdp_tx->tx_pbl); |
377 | |
378 | xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); |
379 | qede_update_tx_producer(txq: xdp_tx); |
380 | } |
381 | |
382 | spin_unlock(lock: &xdp_tx->xdp_tx_lock); |
383 | |
384 | return nxmit; |
385 | } |
386 | |
387 | int qede_txq_has_work(struct qede_tx_queue *txq) |
388 | { |
389 | u16 hw_bd_cons; |
390 | |
391 | /* Tell compiler that consumer and producer can change */ |
392 | barrier(); |
393 | hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); |
394 | if (qed_chain_get_cons_idx(chain: &txq->tx_pbl) == hw_bd_cons + 1) |
395 | return 0; |
396 | |
397 | return hw_bd_cons != qed_chain_get_cons_idx(chain: &txq->tx_pbl); |
398 | } |
399 | |
400 | static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) |
401 | { |
402 | struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; |
403 | struct device *dev = &edev->pdev->dev; |
404 | struct xdp_frame *xdpf; |
405 | u16 hw_bd_cons; |
406 | |
407 | hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); |
408 | barrier(); |
409 | |
410 | while (hw_bd_cons != qed_chain_get_cons_idx(chain: &txq->tx_pbl)) { |
411 | xdp_info = xdp_arr + txq->sw_tx_cons; |
412 | xdpf = xdp_info->xdpf; |
413 | |
414 | if (xdpf) { |
415 | dma_unmap_single(dev, xdp_info->mapping, xdpf->len, |
416 | DMA_TO_DEVICE); |
417 | xdp_return_frame(xdpf); |
418 | |
419 | xdp_info->xdpf = NULL; |
420 | } else { |
421 | dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE, |
422 | DMA_BIDIRECTIONAL); |
423 | __free_page(xdp_info->page); |
424 | } |
425 | |
426 | qed_chain_consume(p_chain: &txq->tx_pbl); |
427 | txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; |
428 | txq->xmit_pkts++; |
429 | } |
430 | } |
431 | |
432 | static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) |
433 | { |
434 | unsigned int pkts_compl = 0, bytes_compl = 0; |
435 | struct netdev_queue *netdev_txq; |
436 | u16 hw_bd_cons; |
437 | int rc; |
438 | |
439 | netdev_txq = netdev_get_tx_queue(dev: edev->ndev, index: txq->ndev_txq_id); |
440 | |
441 | hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); |
442 | barrier(); |
443 | |
444 | while (hw_bd_cons != qed_chain_get_cons_idx(chain: &txq->tx_pbl)) { |
445 | int len = 0; |
446 | |
447 | rc = qede_free_tx_pkt(edev, txq, len: &len); |
448 | if (rc) { |
449 | DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n" , |
450 | hw_bd_cons, |
451 | qed_chain_get_cons_idx(&txq->tx_pbl)); |
452 | break; |
453 | } |
454 | |
455 | bytes_compl += len; |
456 | pkts_compl++; |
457 | txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; |
458 | txq->xmit_pkts++; |
459 | } |
460 | |
461 | netdev_tx_completed_queue(dev_queue: netdev_txq, pkts: pkts_compl, bytes: bytes_compl); |
462 | |
463 | /* Need to make the tx_bd_cons update visible to start_xmit() |
464 | * before checking for netif_tx_queue_stopped(). Without the |
465 | * memory barrier, there is a small possibility that |
466 | * start_xmit() will miss it and cause the queue to be stopped |
467 | * forever. |
468 | * On the other hand we need an rmb() here to ensure the proper |
469 | * ordering of bit testing in the following |
470 | * netif_tx_queue_stopped(txq) call. |
471 | */ |
472 | smp_mb(); |
473 | |
474 | if (unlikely(netif_tx_queue_stopped(netdev_txq))) { |
475 | /* Taking tx_lock is needed to prevent reenabling the queue |
476 | * while it's empty. This could have happen if rx_action() gets |
477 | * suspended in qede_tx_int() after the condition before |
478 | * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): |
479 | * |
480 | * stops the queue->sees fresh tx_bd_cons->releases the queue-> |
481 | * sends some packets consuming the whole queue again-> |
482 | * stops the queue |
483 | */ |
484 | |
485 | __netif_tx_lock(txq: netdev_txq, smp_processor_id()); |
486 | |
487 | if ((netif_tx_queue_stopped(dev_queue: netdev_txq)) && |
488 | (edev->state == QEDE_STATE_OPEN) && |
489 | (qed_chain_get_elem_left(chain: &txq->tx_pbl) |
490 | >= (MAX_SKB_FRAGS + 1))) { |
491 | netif_tx_wake_queue(dev_queue: netdev_txq); |
492 | DP_VERBOSE(edev, NETIF_MSG_TX_DONE, |
493 | "Wake queue was called\n" ); |
494 | } |
495 | |
496 | __netif_tx_unlock(txq: netdev_txq); |
497 | } |
498 | |
499 | return 0; |
500 | } |
501 | |
502 | bool qede_has_rx_work(struct qede_rx_queue *rxq) |
503 | { |
504 | u16 hw_comp_cons, sw_comp_cons; |
505 | |
506 | /* Tell compiler that status block fields can change */ |
507 | barrier(); |
508 | |
509 | hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); |
510 | sw_comp_cons = qed_chain_get_cons_idx(chain: &rxq->rx_comp_ring); |
511 | |
512 | return hw_comp_cons != sw_comp_cons; |
513 | } |
514 | |
515 | static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) |
516 | { |
517 | qed_chain_consume(p_chain: &rxq->rx_bd_ring); |
518 | rxq->sw_rx_cons++; |
519 | } |
520 | |
521 | /* This function reuses the buffer(from an offset) from |
522 | * consumer index to producer index in the bd ring |
523 | */ |
524 | static inline void qede_reuse_page(struct qede_rx_queue *rxq, |
525 | struct sw_rx_data *curr_cons) |
526 | { |
527 | struct eth_rx_bd *rx_bd_prod = qed_chain_produce(p_chain: &rxq->rx_bd_ring); |
528 | struct sw_rx_data *curr_prod; |
529 | dma_addr_t new_mapping; |
530 | |
531 | curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; |
532 | *curr_prod = *curr_cons; |
533 | |
534 | new_mapping = curr_prod->mapping + curr_prod->page_offset; |
535 | |
536 | rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); |
537 | rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) + |
538 | rxq->rx_headroom); |
539 | |
540 | rxq->sw_rx_prod++; |
541 | curr_cons->data = NULL; |
542 | } |
543 | |
544 | /* In case of allocation failures reuse buffers |
545 | * from consumer index to produce buffers for firmware |
546 | */ |
547 | void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) |
548 | { |
549 | struct sw_rx_data *curr_cons; |
550 | |
551 | for (; count > 0; count--) { |
552 | curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; |
553 | qede_reuse_page(rxq, curr_cons); |
554 | qede_rx_bd_ring_consume(rxq); |
555 | } |
556 | } |
557 | |
558 | static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, |
559 | struct sw_rx_data *curr_cons) |
560 | { |
561 | /* Move to the next segment in the page */ |
562 | curr_cons->page_offset += rxq->rx_buf_seg_size; |
563 | |
564 | if (curr_cons->page_offset == PAGE_SIZE) { |
565 | if (unlikely(qede_alloc_rx_buffer(rxq, true))) { |
566 | /* Since we failed to allocate new buffer |
567 | * current buffer can be used again. |
568 | */ |
569 | curr_cons->page_offset -= rxq->rx_buf_seg_size; |
570 | |
571 | return -ENOMEM; |
572 | } |
573 | |
574 | dma_unmap_page(rxq->dev, curr_cons->mapping, |
575 | PAGE_SIZE, rxq->data_direction); |
576 | } else { |
577 | /* Increment refcount of the page as we don't want |
578 | * network stack to take the ownership of the page |
579 | * which can be recycled multiple times by the driver. |
580 | */ |
581 | page_ref_inc(page: curr_cons->data); |
582 | qede_reuse_page(rxq, curr_cons); |
583 | } |
584 | |
585 | return 0; |
586 | } |
587 | |
588 | void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) |
589 | { |
590 | u16 bd_prod = qed_chain_get_prod_idx(chain: &rxq->rx_bd_ring); |
591 | u16 cqe_prod = qed_chain_get_prod_idx(chain: &rxq->rx_comp_ring); |
592 | struct eth_rx_prod_data rx_prods = {0}; |
593 | |
594 | /* Update producers */ |
595 | rx_prods.bd_prod = cpu_to_le16(bd_prod); |
596 | rx_prods.cqe_prod = cpu_to_le16(cqe_prod); |
597 | |
598 | /* Make sure that the BD and SGE data is updated before updating the |
599 | * producers since FW might read the BD/SGE right after the producer |
600 | * is updated. |
601 | */ |
602 | wmb(); |
603 | |
604 | internal_ram_wr(addr: rxq->hw_rxq_prod_addr, size: sizeof(rx_prods), |
605 | data: (u32 *)&rx_prods); |
606 | } |
607 | |
608 | static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 ) |
609 | { |
610 | enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; |
611 | enum rss_hash_type htype; |
612 | u32 hash = 0; |
613 | |
614 | htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); |
615 | if (htype) { |
616 | hash_type = ((htype == RSS_HASH_TYPE_IPV4) || |
617 | (htype == RSS_HASH_TYPE_IPV6)) ? |
618 | PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; |
619 | hash = le32_to_cpu(rss_hash); |
620 | } |
621 | skb_set_hash(skb, hash, type: hash_type); |
622 | } |
623 | |
624 | static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) |
625 | { |
626 | skb_checksum_none_assert(skb); |
627 | |
628 | if (csum_flag & QEDE_CSUM_UNNECESSARY) |
629 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
630 | |
631 | if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) { |
632 | skb->csum_level = 1; |
633 | skb->encapsulation = 1; |
634 | } |
635 | } |
636 | |
637 | static inline void qede_skb_receive(struct qede_dev *edev, |
638 | struct qede_fastpath *fp, |
639 | struct qede_rx_queue *rxq, |
640 | struct sk_buff *skb, u16 vlan_tag) |
641 | { |
642 | if (vlan_tag) |
643 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vlan_tag); |
644 | |
645 | napi_gro_receive(napi: &fp->napi, skb); |
646 | } |
647 | |
648 | static void qede_set_gro_params(struct qede_dev *edev, |
649 | struct sk_buff *skb, |
650 | struct eth_fast_path_rx_tpa_start_cqe *cqe) |
651 | { |
652 | u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); |
653 | |
654 | if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & |
655 | PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) |
656 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
657 | else |
658 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
659 | |
660 | skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - |
661 | cqe->header_len; |
662 | } |
663 | |
664 | static int qede_fill_frag_skb(struct qede_dev *edev, |
665 | struct qede_rx_queue *rxq, |
666 | u8 tpa_agg_index, u16 len_on_bd) |
667 | { |
668 | struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & |
669 | NUM_RX_BDS_MAX]; |
670 | struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; |
671 | struct sk_buff *skb = tpa_info->skb; |
672 | |
673 | if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) |
674 | goto out; |
675 | |
676 | /* Add one frag and update the appropriate fields in the skb */ |
677 | skb_fill_page_desc(skb, i: tpa_info->frag_id++, |
678 | page: current_bd->data, |
679 | off: current_bd->page_offset + rxq->rx_headroom, |
680 | size: len_on_bd); |
681 | |
682 | if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { |
683 | /* Incr page ref count to reuse on allocation failure |
684 | * so that it doesn't get freed while freeing SKB. |
685 | */ |
686 | page_ref_inc(page: current_bd->data); |
687 | goto out; |
688 | } |
689 | |
690 | qede_rx_bd_ring_consume(rxq); |
691 | |
692 | skb->data_len += len_on_bd; |
693 | skb->truesize += rxq->rx_buf_seg_size; |
694 | skb->len += len_on_bd; |
695 | |
696 | return 0; |
697 | |
698 | out: |
699 | tpa_info->state = QEDE_AGG_STATE_ERROR; |
700 | qede_recycle_rx_bd_ring(rxq, count: 1); |
701 | |
702 | return -ENOMEM; |
703 | } |
704 | |
705 | static bool qede_tunn_exist(u16 flag) |
706 | { |
707 | return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << |
708 | PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); |
709 | } |
710 | |
711 | static u8 qede_check_tunn_csum(u16 flag) |
712 | { |
713 | u16 csum_flag = 0; |
714 | u8 tcsum = 0; |
715 | |
716 | if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << |
717 | PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) |
718 | csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << |
719 | PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; |
720 | |
721 | if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << |
722 | PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { |
723 | csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << |
724 | PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; |
725 | tcsum = QEDE_TUNN_CSUM_UNNECESSARY; |
726 | } |
727 | |
728 | csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << |
729 | PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | |
730 | PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << |
731 | PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; |
732 | |
733 | if (csum_flag & flag) |
734 | return QEDE_CSUM_ERROR; |
735 | |
736 | return QEDE_CSUM_UNNECESSARY | tcsum; |
737 | } |
738 | |
739 | static inline struct sk_buff * |
740 | qede_build_skb(struct qede_rx_queue *rxq, |
741 | struct sw_rx_data *bd, u16 len, u16 pad) |
742 | { |
743 | struct sk_buff *skb; |
744 | void *buf; |
745 | |
746 | buf = page_address(bd->data) + bd->page_offset; |
747 | skb = build_skb(data: buf, frag_size: rxq->rx_buf_seg_size); |
748 | |
749 | if (unlikely(!skb)) |
750 | return NULL; |
751 | |
752 | skb_reserve(skb, len: pad); |
753 | skb_put(skb, len); |
754 | |
755 | return skb; |
756 | } |
757 | |
758 | static struct sk_buff * |
759 | qede_tpa_rx_build_skb(struct qede_dev *edev, |
760 | struct qede_rx_queue *rxq, |
761 | struct sw_rx_data *bd, u16 len, u16 pad, |
762 | bool alloc_skb) |
763 | { |
764 | struct sk_buff *skb; |
765 | |
766 | skb = qede_build_skb(rxq, bd, len, pad); |
767 | bd->page_offset += rxq->rx_buf_seg_size; |
768 | |
769 | if (bd->page_offset == PAGE_SIZE) { |
770 | if (unlikely(qede_alloc_rx_buffer(rxq, true))) { |
771 | DP_NOTICE(edev, |
772 | "Failed to allocate RX buffer for tpa start\n" ); |
773 | bd->page_offset -= rxq->rx_buf_seg_size; |
774 | page_ref_inc(page: bd->data); |
775 | dev_kfree_skb_any(skb); |
776 | return NULL; |
777 | } |
778 | } else { |
779 | page_ref_inc(page: bd->data); |
780 | qede_reuse_page(rxq, curr_cons: bd); |
781 | } |
782 | |
783 | /* We've consumed the first BD and prepared an SKB */ |
784 | qede_rx_bd_ring_consume(rxq); |
785 | |
786 | return skb; |
787 | } |
788 | |
789 | static struct sk_buff * |
790 | qede_rx_build_skb(struct qede_dev *edev, |
791 | struct qede_rx_queue *rxq, |
792 | struct sw_rx_data *bd, u16 len, u16 pad) |
793 | { |
794 | struct sk_buff *skb = NULL; |
795 | |
796 | /* For smaller frames still need to allocate skb, memcpy |
797 | * data and benefit in reusing the page segment instead of |
798 | * un-mapping it. |
799 | */ |
800 | if ((len + pad <= edev->rx_copybreak)) { |
801 | unsigned int offset = bd->page_offset + pad; |
802 | |
803 | skb = netdev_alloc_skb(dev: edev->ndev, QEDE_RX_HDR_SIZE); |
804 | if (unlikely(!skb)) |
805 | return NULL; |
806 | |
807 | skb_reserve(skb, len: pad); |
808 | skb_put_data(skb, page_address(bd->data) + offset, len); |
809 | qede_reuse_page(rxq, curr_cons: bd); |
810 | goto out; |
811 | } |
812 | |
813 | skb = qede_build_skb(rxq, bd, len, pad); |
814 | |
815 | if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { |
816 | /* Incr page ref count to reuse on allocation failure so |
817 | * that it doesn't get freed while freeing SKB [as its |
818 | * already mapped there]. |
819 | */ |
820 | page_ref_inc(page: bd->data); |
821 | dev_kfree_skb_any(skb); |
822 | return NULL; |
823 | } |
824 | out: |
825 | /* We've consumed the first BD and prepared an SKB */ |
826 | qede_rx_bd_ring_consume(rxq); |
827 | |
828 | return skb; |
829 | } |
830 | |
831 | static void qede_tpa_start(struct qede_dev *edev, |
832 | struct qede_rx_queue *rxq, |
833 | struct eth_fast_path_rx_tpa_start_cqe *cqe) |
834 | { |
835 | struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; |
836 | struct sw_rx_data *sw_rx_data_cons; |
837 | u16 pad; |
838 | |
839 | sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; |
840 | pad = cqe->placement_offset + rxq->rx_headroom; |
841 | |
842 | tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, bd: sw_rx_data_cons, |
843 | le16_to_cpu(cqe->len_on_first_bd), |
844 | pad, alloc_skb: false); |
845 | tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset; |
846 | tpa_info->buffer.mapping = sw_rx_data_cons->mapping; |
847 | |
848 | if (unlikely(!tpa_info->skb)) { |
849 | DP_NOTICE(edev, "Failed to allocate SKB for gro\n" ); |
850 | |
851 | /* Consume from ring but do not produce since |
852 | * this might be used by FW still, it will be re-used |
853 | * at TPA end. |
854 | */ |
855 | tpa_info->tpa_start_fail = true; |
856 | qede_rx_bd_ring_consume(rxq); |
857 | tpa_info->state = QEDE_AGG_STATE_ERROR; |
858 | goto cons_buf; |
859 | } |
860 | |
861 | tpa_info->frag_id = 0; |
862 | tpa_info->state = QEDE_AGG_STATE_START; |
863 | |
864 | if ((le16_to_cpu(cqe->pars_flags.flags) >> |
865 | PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & |
866 | PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) |
867 | tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); |
868 | else |
869 | tpa_info->vlan_tag = 0; |
870 | |
871 | qede_get_rxhash(skb: tpa_info->skb, bitfields: cqe->bitfields, rss_hash: cqe->rss_hash); |
872 | |
873 | /* This is needed in order to enable forwarding support */ |
874 | qede_set_gro_params(edev, skb: tpa_info->skb, cqe); |
875 | |
876 | cons_buf: /* We still need to handle bd_len_list to consume buffers */ |
877 | if (likely(cqe->bw_ext_bd_len_list[0])) |
878 | qede_fill_frag_skb(edev, rxq, tpa_agg_index: cqe->tpa_agg_index, |
879 | le16_to_cpu(cqe->bw_ext_bd_len_list[0])); |
880 | |
881 | if (unlikely(cqe->bw_ext_bd_len_list[1])) { |
882 | DP_ERR(edev, |
883 | "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n" ); |
884 | tpa_info->state = QEDE_AGG_STATE_ERROR; |
885 | } |
886 | } |
887 | |
888 | #ifdef CONFIG_INET |
889 | static void qede_gro_ip_csum(struct sk_buff *skb) |
890 | { |
891 | const struct iphdr *iph = ip_hdr(skb); |
892 | struct tcphdr *th; |
893 | |
894 | skb_set_transport_header(skb, offset: sizeof(struct iphdr)); |
895 | th = tcp_hdr(skb); |
896 | |
897 | th->check = ~tcp_v4_check(len: skb->len - skb_transport_offset(skb), |
898 | saddr: iph->saddr, daddr: iph->daddr, base: 0); |
899 | |
900 | tcp_gro_complete(skb); |
901 | } |
902 | |
903 | static void qede_gro_ipv6_csum(struct sk_buff *skb) |
904 | { |
905 | struct ipv6hdr *iph = ipv6_hdr(skb); |
906 | struct tcphdr *th; |
907 | |
908 | skb_set_transport_header(skb, offset: sizeof(struct ipv6hdr)); |
909 | th = tcp_hdr(skb); |
910 | |
911 | th->check = ~tcp_v6_check(len: skb->len - skb_transport_offset(skb), |
912 | saddr: &iph->saddr, daddr: &iph->daddr, base: 0); |
913 | tcp_gro_complete(skb); |
914 | } |
915 | #endif |
916 | |
917 | static void qede_gro_receive(struct qede_dev *edev, |
918 | struct qede_fastpath *fp, |
919 | struct sk_buff *skb, |
920 | u16 vlan_tag) |
921 | { |
922 | /* FW can send a single MTU sized packet from gro flow |
923 | * due to aggregation timeout/last segment etc. which |
924 | * is not expected to be a gro packet. If a skb has zero |
925 | * frags then simply push it in the stack as non gso skb. |
926 | */ |
927 | if (unlikely(!skb->data_len)) { |
928 | skb_shinfo(skb)->gso_type = 0; |
929 | skb_shinfo(skb)->gso_size = 0; |
930 | goto send_skb; |
931 | } |
932 | |
933 | #ifdef CONFIG_INET |
934 | if (skb_shinfo(skb)->gso_size) { |
935 | skb_reset_network_header(skb); |
936 | |
937 | switch (skb->protocol) { |
938 | case htons(ETH_P_IP): |
939 | qede_gro_ip_csum(skb); |
940 | break; |
941 | case htons(ETH_P_IPV6): |
942 | qede_gro_ipv6_csum(skb); |
943 | break; |
944 | default: |
945 | DP_ERR(edev, |
946 | "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n" , |
947 | ntohs(skb->protocol)); |
948 | } |
949 | } |
950 | #endif |
951 | |
952 | send_skb: |
953 | skb_record_rx_queue(skb, rx_queue: fp->rxq->rxq_id); |
954 | qede_skb_receive(edev, fp, rxq: fp->rxq, skb, vlan_tag); |
955 | } |
956 | |
957 | static inline void qede_tpa_cont(struct qede_dev *edev, |
958 | struct qede_rx_queue *rxq, |
959 | struct eth_fast_path_rx_tpa_cont_cqe *cqe) |
960 | { |
961 | int i; |
962 | |
963 | for (i = 0; cqe->len_list[i]; i++) |
964 | qede_fill_frag_skb(edev, rxq, tpa_agg_index: cqe->tpa_agg_index, |
965 | le16_to_cpu(cqe->len_list[i])); |
966 | |
967 | if (unlikely(i > 1)) |
968 | DP_ERR(edev, |
969 | "Strange - TPA cont with more than a single len_list entry\n" ); |
970 | } |
971 | |
972 | static int qede_tpa_end(struct qede_dev *edev, |
973 | struct qede_fastpath *fp, |
974 | struct eth_fast_path_rx_tpa_end_cqe *cqe) |
975 | { |
976 | struct qede_rx_queue *rxq = fp->rxq; |
977 | struct qede_agg_info *tpa_info; |
978 | struct sk_buff *skb; |
979 | int i; |
980 | |
981 | tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; |
982 | skb = tpa_info->skb; |
983 | |
984 | if (tpa_info->buffer.page_offset == PAGE_SIZE) |
985 | dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, |
986 | PAGE_SIZE, rxq->data_direction); |
987 | |
988 | for (i = 0; cqe->len_list[i]; i++) |
989 | qede_fill_frag_skb(edev, rxq, tpa_agg_index: cqe->tpa_agg_index, |
990 | le16_to_cpu(cqe->len_list[i])); |
991 | if (unlikely(i > 1)) |
992 | DP_ERR(edev, |
993 | "Strange - TPA emd with more than a single len_list entry\n" ); |
994 | |
995 | if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) |
996 | goto err; |
997 | |
998 | /* Sanity */ |
999 | if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) |
1000 | DP_ERR(edev, |
1001 | "Strange - TPA had %02x BDs, but SKB has only %d frags\n" , |
1002 | cqe->num_of_bds, tpa_info->frag_id); |
1003 | if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) |
1004 | DP_ERR(edev, |
1005 | "Strange - total packet len [cqe] is %4x but SKB has len %04x\n" , |
1006 | le16_to_cpu(cqe->total_packet_len), skb->len); |
1007 | |
1008 | /* Finalize the SKB */ |
1009 | skb->protocol = eth_type_trans(skb, dev: edev->ndev); |
1010 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1011 | |
1012 | /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count |
1013 | * to skb_shinfo(skb)->gso_segs |
1014 | */ |
1015 | NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); |
1016 | |
1017 | qede_gro_receive(edev, fp, skb, vlan_tag: tpa_info->vlan_tag); |
1018 | |
1019 | tpa_info->state = QEDE_AGG_STATE_NONE; |
1020 | |
1021 | return 1; |
1022 | err: |
1023 | tpa_info->state = QEDE_AGG_STATE_NONE; |
1024 | |
1025 | if (tpa_info->tpa_start_fail) { |
1026 | qede_reuse_page(rxq, curr_cons: &tpa_info->buffer); |
1027 | tpa_info->tpa_start_fail = false; |
1028 | } |
1029 | |
1030 | dev_kfree_skb_any(skb: tpa_info->skb); |
1031 | tpa_info->skb = NULL; |
1032 | return 0; |
1033 | } |
1034 | |
1035 | static u8 qede_check_notunn_csum(u16 flag) |
1036 | { |
1037 | u16 csum_flag = 0; |
1038 | u8 csum = 0; |
1039 | |
1040 | if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << |
1041 | PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { |
1042 | csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << |
1043 | PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; |
1044 | csum = QEDE_CSUM_UNNECESSARY; |
1045 | } |
1046 | |
1047 | csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << |
1048 | PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; |
1049 | |
1050 | if (csum_flag & flag) |
1051 | return QEDE_CSUM_ERROR; |
1052 | |
1053 | return csum; |
1054 | } |
1055 | |
1056 | static u8 qede_check_csum(u16 flag) |
1057 | { |
1058 | if (!qede_tunn_exist(flag)) |
1059 | return qede_check_notunn_csum(flag); |
1060 | else |
1061 | return qede_check_tunn_csum(flag); |
1062 | } |
1063 | |
1064 | static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, |
1065 | u16 flag) |
1066 | { |
1067 | u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; |
1068 | |
1069 | if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << |
1070 | ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || |
1071 | (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << |
1072 | PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) |
1073 | return true; |
1074 | |
1075 | return false; |
1076 | } |
1077 | |
1078 | /* Return true iff packet is to be passed to stack */ |
1079 | static bool qede_rx_xdp(struct qede_dev *edev, |
1080 | struct qede_fastpath *fp, |
1081 | struct qede_rx_queue *rxq, |
1082 | struct bpf_prog *prog, |
1083 | struct sw_rx_data *bd, |
1084 | struct eth_fast_path_rx_reg_cqe *cqe, |
1085 | u16 *data_offset, u16 *len) |
1086 | { |
1087 | struct xdp_buff xdp; |
1088 | enum xdp_action act; |
1089 | |
1090 | xdp_init_buff(xdp: &xdp, frame_sz: rxq->rx_buf_seg_size, rxq: &rxq->xdp_rxq); |
1091 | xdp_prepare_buff(xdp: &xdp, page_address(bd->data), headroom: *data_offset, |
1092 | data_len: *len, meta_valid: false); |
1093 | |
1094 | act = bpf_prog_run_xdp(prog, xdp: &xdp); |
1095 | |
1096 | /* Recalculate, as XDP might have changed the headers */ |
1097 | *data_offset = xdp.data - xdp.data_hard_start; |
1098 | *len = xdp.data_end - xdp.data; |
1099 | |
1100 | if (act == XDP_PASS) |
1101 | return true; |
1102 | |
1103 | /* Count number of packets not to be passed to stack */ |
1104 | rxq->xdp_no_pass++; |
1105 | |
1106 | switch (act) { |
1107 | case XDP_TX: |
1108 | /* We need the replacement buffer before transmit. */ |
1109 | if (unlikely(qede_alloc_rx_buffer(rxq, true))) { |
1110 | qede_recycle_rx_bd_ring(rxq, count: 1); |
1111 | |
1112 | trace_xdp_exception(dev: edev->ndev, xdp: prog, act); |
1113 | break; |
1114 | } |
1115 | |
1116 | /* Now if there's a transmission problem, we'd still have to |
1117 | * throw current buffer, as replacement was already allocated. |
1118 | */ |
1119 | if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping, |
1120 | *data_offset, *len, bd->data, |
1121 | NULL))) { |
1122 | dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, |
1123 | rxq->data_direction); |
1124 | __free_page(bd->data); |
1125 | |
1126 | trace_xdp_exception(dev: edev->ndev, xdp: prog, act); |
1127 | } else { |
1128 | dma_sync_single_for_device(dev: rxq->dev, |
1129 | addr: bd->mapping + *data_offset, |
1130 | size: *len, dir: rxq->data_direction); |
1131 | fp->xdp_xmit |= QEDE_XDP_TX; |
1132 | } |
1133 | |
1134 | /* Regardless, we've consumed an Rx BD */ |
1135 | qede_rx_bd_ring_consume(rxq); |
1136 | break; |
1137 | case XDP_REDIRECT: |
1138 | /* We need the replacement buffer before transmit. */ |
1139 | if (unlikely(qede_alloc_rx_buffer(rxq, true))) { |
1140 | qede_recycle_rx_bd_ring(rxq, count: 1); |
1141 | |
1142 | trace_xdp_exception(dev: edev->ndev, xdp: prog, act); |
1143 | break; |
1144 | } |
1145 | |
1146 | dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, |
1147 | rxq->data_direction); |
1148 | |
1149 | if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog))) |
1150 | DP_NOTICE(edev, "Failed to redirect the packet\n" ); |
1151 | else |
1152 | fp->xdp_xmit |= QEDE_XDP_REDIRECT; |
1153 | |
1154 | qede_rx_bd_ring_consume(rxq); |
1155 | break; |
1156 | default: |
1157 | bpf_warn_invalid_xdp_action(dev: edev->ndev, prog, act); |
1158 | fallthrough; |
1159 | case XDP_ABORTED: |
1160 | trace_xdp_exception(dev: edev->ndev, xdp: prog, act); |
1161 | fallthrough; |
1162 | case XDP_DROP: |
1163 | qede_recycle_rx_bd_ring(rxq, count: cqe->bd_num); |
1164 | } |
1165 | |
1166 | return false; |
1167 | } |
1168 | |
1169 | static int qede_rx_build_jumbo(struct qede_dev *edev, |
1170 | struct qede_rx_queue *rxq, |
1171 | struct sk_buff *skb, |
1172 | struct eth_fast_path_rx_reg_cqe *cqe, |
1173 | u16 first_bd_len) |
1174 | { |
1175 | u16 pkt_len = le16_to_cpu(cqe->pkt_len); |
1176 | struct sw_rx_data *bd; |
1177 | u16 bd_cons_idx; |
1178 | u8 num_frags; |
1179 | |
1180 | pkt_len -= first_bd_len; |
1181 | |
1182 | /* We've already used one BD for the SKB. Now take care of the rest */ |
1183 | for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { |
1184 | u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : |
1185 | pkt_len; |
1186 | |
1187 | if (unlikely(!cur_size)) { |
1188 | DP_ERR(edev, |
1189 | "Still got %d BDs for mapping jumbo, but length became 0\n" , |
1190 | num_frags); |
1191 | goto out; |
1192 | } |
1193 | |
1194 | /* We need a replacement buffer for each BD */ |
1195 | if (unlikely(qede_alloc_rx_buffer(rxq, true))) |
1196 | goto out; |
1197 | |
1198 | /* Now that we've allocated the replacement buffer, |
1199 | * we can safely consume the next BD and map it to the SKB. |
1200 | */ |
1201 | bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; |
1202 | bd = &rxq->sw_rx_ring[bd_cons_idx]; |
1203 | qede_rx_bd_ring_consume(rxq); |
1204 | |
1205 | dma_unmap_page(rxq->dev, bd->mapping, |
1206 | PAGE_SIZE, DMA_FROM_DEVICE); |
1207 | |
1208 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page: bd->data, |
1209 | off: rxq->rx_headroom, size: cur_size, PAGE_SIZE); |
1210 | |
1211 | pkt_len -= cur_size; |
1212 | } |
1213 | |
1214 | if (unlikely(pkt_len)) |
1215 | DP_ERR(edev, |
1216 | "Mapped all BDs of jumbo, but still have %d bytes\n" , |
1217 | pkt_len); |
1218 | |
1219 | out: |
1220 | return num_frags; |
1221 | } |
1222 | |
1223 | static int qede_rx_process_tpa_cqe(struct qede_dev *edev, |
1224 | struct qede_fastpath *fp, |
1225 | struct qede_rx_queue *rxq, |
1226 | union eth_rx_cqe *cqe, |
1227 | enum eth_rx_cqe_type type) |
1228 | { |
1229 | switch (type) { |
1230 | case ETH_RX_CQE_TYPE_TPA_START: |
1231 | qede_tpa_start(edev, rxq, cqe: &cqe->fast_path_tpa_start); |
1232 | return 0; |
1233 | case ETH_RX_CQE_TYPE_TPA_CONT: |
1234 | qede_tpa_cont(edev, rxq, cqe: &cqe->fast_path_tpa_cont); |
1235 | return 0; |
1236 | case ETH_RX_CQE_TYPE_TPA_END: |
1237 | return qede_tpa_end(edev, fp, cqe: &cqe->fast_path_tpa_end); |
1238 | default: |
1239 | return 0; |
1240 | } |
1241 | } |
1242 | |
1243 | static int qede_rx_process_cqe(struct qede_dev *edev, |
1244 | struct qede_fastpath *fp, |
1245 | struct qede_rx_queue *rxq) |
1246 | { |
1247 | struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); |
1248 | struct eth_fast_path_rx_reg_cqe *fp_cqe; |
1249 | u16 len, pad, bd_cons_idx, parse_flag; |
1250 | enum eth_rx_cqe_type cqe_type; |
1251 | union eth_rx_cqe *cqe; |
1252 | struct sw_rx_data *bd; |
1253 | struct sk_buff *skb; |
1254 | __le16 flags; |
1255 | u8 csum_flag; |
1256 | |
1257 | /* Get the CQE from the completion ring */ |
1258 | cqe = (union eth_rx_cqe *)qed_chain_consume(p_chain: &rxq->rx_comp_ring); |
1259 | cqe_type = cqe->fast_path_regular.type; |
1260 | |
1261 | /* Process an unlikely slowpath event */ |
1262 | if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { |
1263 | struct eth_slow_path_rx_cqe *sp_cqe; |
1264 | |
1265 | sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; |
1266 | edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); |
1267 | return 0; |
1268 | } |
1269 | |
1270 | /* Handle TPA cqes */ |
1271 | if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) |
1272 | return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, type: cqe_type); |
1273 | |
1274 | /* Get the data from the SW ring; Consume it only after it's evident |
1275 | * we wouldn't recycle it. |
1276 | */ |
1277 | bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; |
1278 | bd = &rxq->sw_rx_ring[bd_cons_idx]; |
1279 | |
1280 | fp_cqe = &cqe->fast_path_regular; |
1281 | len = le16_to_cpu(fp_cqe->len_on_first_bd); |
1282 | pad = fp_cqe->placement_offset + rxq->rx_headroom; |
1283 | |
1284 | /* Run eBPF program if one is attached */ |
1285 | if (xdp_prog) |
1286 | if (!qede_rx_xdp(edev, fp, rxq, prog: xdp_prog, bd, cqe: fp_cqe, |
1287 | data_offset: &pad, len: &len)) |
1288 | return 0; |
1289 | |
1290 | /* If this is an error packet then drop it */ |
1291 | flags = cqe->fast_path_regular.pars_flags.flags; |
1292 | parse_flag = le16_to_cpu(flags); |
1293 | |
1294 | csum_flag = qede_check_csum(flag: parse_flag); |
1295 | if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { |
1296 | if (qede_pkt_is_ip_fragmented(cqe: fp_cqe, flag: parse_flag)) |
1297 | rxq->rx_ip_frags++; |
1298 | else |
1299 | rxq->rx_hw_errors++; |
1300 | } |
1301 | |
1302 | /* Basic validation passed; Need to prepare an SKB. This would also |
1303 | * guarantee to finally consume the first BD upon success. |
1304 | */ |
1305 | skb = qede_rx_build_skb(edev, rxq, bd, len, pad); |
1306 | if (!skb) { |
1307 | rxq->rx_alloc_errors++; |
1308 | qede_recycle_rx_bd_ring(rxq, count: fp_cqe->bd_num); |
1309 | return 0; |
1310 | } |
1311 | |
1312 | /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed |
1313 | * by a single cqe. |
1314 | */ |
1315 | if (fp_cqe->bd_num > 1) { |
1316 | u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, |
1317 | cqe: fp_cqe, first_bd_len: len); |
1318 | |
1319 | if (unlikely(unmapped_frags > 0)) { |
1320 | qede_recycle_rx_bd_ring(rxq, count: unmapped_frags); |
1321 | dev_kfree_skb_any(skb); |
1322 | return 0; |
1323 | } |
1324 | } |
1325 | |
1326 | /* The SKB contains all the data. Now prepare meta-magic */ |
1327 | skb->protocol = eth_type_trans(skb, dev: edev->ndev); |
1328 | qede_get_rxhash(skb, bitfields: fp_cqe->bitfields, rss_hash: fp_cqe->rss_hash); |
1329 | qede_set_skb_csum(skb, csum_flag); |
1330 | skb_record_rx_queue(skb, rx_queue: rxq->rxq_id); |
1331 | qede_ptp_record_rx_ts(edev, cqe, skb); |
1332 | |
1333 | /* SKB is prepared - pass it to stack */ |
1334 | qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); |
1335 | |
1336 | return 1; |
1337 | } |
1338 | |
1339 | static int qede_rx_int(struct qede_fastpath *fp, int budget) |
1340 | { |
1341 | struct qede_rx_queue *rxq = fp->rxq; |
1342 | struct qede_dev *edev = fp->edev; |
1343 | int work_done = 0, rcv_pkts = 0; |
1344 | u16 hw_comp_cons, sw_comp_cons; |
1345 | |
1346 | hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); |
1347 | sw_comp_cons = qed_chain_get_cons_idx(chain: &rxq->rx_comp_ring); |
1348 | |
1349 | /* Memory barrier to prevent the CPU from doing speculative reads of CQE |
1350 | * / BD in the while-loop before reading hw_comp_cons. If the CQE is |
1351 | * read before it is written by FW, then FW writes CQE and SB, and then |
1352 | * the CPU reads the hw_comp_cons, it will use an old CQE. |
1353 | */ |
1354 | rmb(); |
1355 | |
1356 | /* Loop to complete all indicated BDs */ |
1357 | while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { |
1358 | rcv_pkts += qede_rx_process_cqe(edev, fp, rxq); |
1359 | qed_chain_recycle_consumed(p_chain: &rxq->rx_comp_ring); |
1360 | sw_comp_cons = qed_chain_get_cons_idx(chain: &rxq->rx_comp_ring); |
1361 | work_done++; |
1362 | } |
1363 | |
1364 | rxq->rcv_pkts += rcv_pkts; |
1365 | |
1366 | /* Allocate replacement buffers */ |
1367 | while (rxq->num_rx_buffers - rxq->filled_buffers) |
1368 | if (qede_alloc_rx_buffer(rxq, allow_lazy: false)) |
1369 | break; |
1370 | |
1371 | /* Update producers */ |
1372 | qede_update_rx_prod(edev, rxq); |
1373 | |
1374 | return work_done; |
1375 | } |
1376 | |
1377 | static bool qede_poll_is_more_work(struct qede_fastpath *fp) |
1378 | { |
1379 | qed_sb_update_sb_idx(sb_info: fp->sb_info); |
1380 | |
1381 | /* *_has_*_work() reads the status block, thus we need to ensure that |
1382 | * status block indices have been actually read (qed_sb_update_sb_idx) |
1383 | * prior to this check (*_has_*_work) so that we won't write the |
1384 | * "newer" value of the status block to HW (if there was a DMA right |
1385 | * after qede_has_rx_work and if there is no rmb, the memory reading |
1386 | * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb). |
1387 | * In this case there will never be another interrupt until there is |
1388 | * another update of the status block, while there is still unhandled |
1389 | * work. |
1390 | */ |
1391 | rmb(); |
1392 | |
1393 | if (likely(fp->type & QEDE_FASTPATH_RX)) |
1394 | if (qede_has_rx_work(rxq: fp->rxq)) |
1395 | return true; |
1396 | |
1397 | if (fp->type & QEDE_FASTPATH_XDP) |
1398 | if (qede_txq_has_work(txq: fp->xdp_tx)) |
1399 | return true; |
1400 | |
1401 | if (likely(fp->type & QEDE_FASTPATH_TX)) { |
1402 | int cos; |
1403 | |
1404 | for_each_cos_in_txq(fp->edev, cos) { |
1405 | if (qede_txq_has_work(txq: &fp->txq[cos])) |
1406 | return true; |
1407 | } |
1408 | } |
1409 | |
1410 | return false; |
1411 | } |
1412 | |
1413 | /********************* |
1414 | * NDO & API related * |
1415 | *********************/ |
1416 | int qede_poll(struct napi_struct *napi, int budget) |
1417 | { |
1418 | struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, |
1419 | napi); |
1420 | struct qede_dev *edev = fp->edev; |
1421 | int rx_work_done = 0; |
1422 | u16 xdp_prod; |
1423 | |
1424 | fp->xdp_xmit = 0; |
1425 | |
1426 | if (likely(fp->type & QEDE_FASTPATH_TX)) { |
1427 | int cos; |
1428 | |
1429 | for_each_cos_in_txq(fp->edev, cos) { |
1430 | if (qede_txq_has_work(txq: &fp->txq[cos])) |
1431 | qede_tx_int(edev, txq: &fp->txq[cos]); |
1432 | } |
1433 | } |
1434 | |
1435 | if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(txq: fp->xdp_tx)) |
1436 | qede_xdp_tx_int(edev, txq: fp->xdp_tx); |
1437 | |
1438 | rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && |
1439 | qede_has_rx_work(rxq: fp->rxq)) ? |
1440 | qede_rx_int(fp, budget) : 0; |
1441 | |
1442 | if (fp->xdp_xmit & QEDE_XDP_REDIRECT) |
1443 | xdp_do_flush(); |
1444 | |
1445 | /* Handle case where we are called by netpoll with a budget of 0 */ |
1446 | if (rx_work_done < budget || !budget) { |
1447 | if (!qede_poll_is_more_work(fp)) { |
1448 | napi_complete_done(n: napi, work_done: rx_work_done); |
1449 | |
1450 | /* Update and reenable interrupts */ |
1451 | qed_sb_ack(sb_info: fp->sb_info, int_cmd: IGU_INT_ENABLE, upd_flg: 1); |
1452 | } else { |
1453 | rx_work_done = budget; |
1454 | } |
1455 | } |
1456 | |
1457 | if (fp->xdp_xmit & QEDE_XDP_TX) { |
1458 | xdp_prod = qed_chain_get_prod_idx(chain: &fp->xdp_tx->tx_pbl); |
1459 | |
1460 | fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); |
1461 | qede_update_tx_producer(txq: fp->xdp_tx); |
1462 | } |
1463 | |
1464 | return rx_work_done; |
1465 | } |
1466 | |
1467 | irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) |
1468 | { |
1469 | struct qede_fastpath *fp = fp_cookie; |
1470 | |
1471 | qed_sb_ack(sb_info: fp->sb_info, int_cmd: IGU_INT_DISABLE, upd_flg: 0 /*do not update*/); |
1472 | |
1473 | napi_schedule_irqoff(n: &fp->napi); |
1474 | return IRQ_HANDLED; |
1475 | } |
1476 | |
1477 | /* Main transmit function */ |
1478 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
1479 | { |
1480 | struct qede_dev *edev = netdev_priv(dev: ndev); |
1481 | struct netdev_queue *netdev_txq; |
1482 | struct qede_tx_queue *txq; |
1483 | struct eth_tx_1st_bd *first_bd; |
1484 | struct eth_tx_2nd_bd *second_bd = NULL; |
1485 | struct eth_tx_3rd_bd *third_bd = NULL; |
1486 | struct eth_tx_bd *tx_data_bd = NULL; |
1487 | u16 txq_index, val = 0; |
1488 | u8 nbd = 0; |
1489 | dma_addr_t mapping; |
1490 | int rc, frag_idx = 0, ipv6_ext = 0; |
1491 | u8 xmit_type; |
1492 | u16 idx; |
1493 | u16 hlen; |
1494 | bool data_split = false; |
1495 | |
1496 | /* Get tx-queue context and netdev index */ |
1497 | txq_index = skb_get_queue_mapping(skb); |
1498 | WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc); |
1499 | txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index); |
1500 | netdev_txq = netdev_get_tx_queue(dev: ndev, index: txq_index); |
1501 | |
1502 | WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); |
1503 | |
1504 | xmit_type = qede_xmit_type(skb, ipv6_ext: &ipv6_ext); |
1505 | |
1506 | #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) |
1507 | if (qede_pkt_req_lin(skb, xmit_type)) { |
1508 | if (skb_linearize(skb)) { |
1509 | txq->tx_mem_alloc_err++; |
1510 | |
1511 | dev_kfree_skb_any(skb); |
1512 | return NETDEV_TX_OK; |
1513 | } |
1514 | } |
1515 | #endif |
1516 | |
1517 | /* Fill the entry in the SW ring and the BDs in the FW ring */ |
1518 | idx = txq->sw_tx_prod; |
1519 | txq->sw_tx_ring.skbs[idx].skb = skb; |
1520 | first_bd = (struct eth_tx_1st_bd *) |
1521 | qed_chain_produce(p_chain: &txq->tx_pbl); |
1522 | memset(first_bd, 0, sizeof(*first_bd)); |
1523 | first_bd->data.bd_flags.bitfields = |
1524 | 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; |
1525 | |
1526 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) |
1527 | qede_ptp_tx_ts(edev, skb); |
1528 | |
1529 | /* Map skb linear data for DMA and set in the first BD */ |
1530 | mapping = dma_map_single(txq->dev, skb->data, |
1531 | skb_headlen(skb), DMA_TO_DEVICE); |
1532 | if (unlikely(dma_mapping_error(txq->dev, mapping))) { |
1533 | DP_NOTICE(edev, "SKB mapping failed\n" ); |
1534 | qede_free_failed_tx_pkt(txq, first_bd, nbd: 0, data_split: false); |
1535 | qede_update_tx_producer(txq); |
1536 | return NETDEV_TX_OK; |
1537 | } |
1538 | nbd++; |
1539 | BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); |
1540 | |
1541 | /* In case there is IPv6 with extension headers or LSO we need 2nd and |
1542 | * 3rd BDs. |
1543 | */ |
1544 | if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { |
1545 | second_bd = (struct eth_tx_2nd_bd *) |
1546 | qed_chain_produce(p_chain: &txq->tx_pbl); |
1547 | memset(second_bd, 0, sizeof(*second_bd)); |
1548 | |
1549 | nbd++; |
1550 | third_bd = (struct eth_tx_3rd_bd *) |
1551 | qed_chain_produce(p_chain: &txq->tx_pbl); |
1552 | memset(third_bd, 0, sizeof(*third_bd)); |
1553 | |
1554 | nbd++; |
1555 | /* We need to fill in additional data in second_bd... */ |
1556 | tx_data_bd = (struct eth_tx_bd *)second_bd; |
1557 | } |
1558 | |
1559 | if (skb_vlan_tag_present(skb)) { |
1560 | first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); |
1561 | first_bd->data.bd_flags.bitfields |= |
1562 | 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; |
1563 | } |
1564 | |
1565 | /* Fill the parsing flags & params according to the requested offload */ |
1566 | if (xmit_type & XMIT_L4_CSUM) { |
1567 | /* We don't re-calculate IP checksum as it is already done by |
1568 | * the upper stack |
1569 | */ |
1570 | first_bd->data.bd_flags.bitfields |= |
1571 | 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; |
1572 | |
1573 | if (xmit_type & XMIT_ENC) { |
1574 | first_bd->data.bd_flags.bitfields |= |
1575 | 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; |
1576 | |
1577 | val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT); |
1578 | } |
1579 | |
1580 | /* Legacy FW had flipped behavior in regard to this bit - |
1581 | * I.e., needed to set to prevent FW from touching encapsulated |
1582 | * packets when it didn't need to. |
1583 | */ |
1584 | if (unlikely(txq->is_legacy)) |
1585 | val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT); |
1586 | |
1587 | /* If the packet is IPv6 with extension header, indicate that |
1588 | * to FW and pass few params, since the device cracker doesn't |
1589 | * support parsing IPv6 with extension header/s. |
1590 | */ |
1591 | if (unlikely(ipv6_ext)) |
1592 | qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); |
1593 | } |
1594 | |
1595 | if (xmit_type & XMIT_LSO) { |
1596 | first_bd->data.bd_flags.bitfields |= |
1597 | (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); |
1598 | third_bd->data.lso_mss = |
1599 | cpu_to_le16(skb_shinfo(skb)->gso_size); |
1600 | |
1601 | if (unlikely(xmit_type & XMIT_ENC)) { |
1602 | first_bd->data.bd_flags.bitfields |= |
1603 | 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; |
1604 | |
1605 | if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { |
1606 | u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; |
1607 | |
1608 | first_bd->data.bd_flags.bitfields |= 1 << tmp; |
1609 | } |
1610 | hlen = qede_get_skb_hlen(skb, is_encap_pkt: true); |
1611 | } else { |
1612 | first_bd->data.bd_flags.bitfields |= |
1613 | 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; |
1614 | hlen = qede_get_skb_hlen(skb, is_encap_pkt: false); |
1615 | } |
1616 | |
1617 | /* @@@TBD - if will not be removed need to check */ |
1618 | third_bd->data.bitfields |= |
1619 | cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); |
1620 | |
1621 | /* Make life easier for FW guys who can't deal with header and |
1622 | * data on same BD. If we need to split, use the second bd... |
1623 | */ |
1624 | if (unlikely(skb_headlen(skb) > hlen)) { |
1625 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, |
1626 | "TSO split header size is %d (%x:%x)\n" , |
1627 | first_bd->nbytes, first_bd->addr.hi, |
1628 | first_bd->addr.lo); |
1629 | |
1630 | mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), |
1631 | le32_to_cpu(first_bd->addr.lo)) + |
1632 | hlen; |
1633 | |
1634 | BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, |
1635 | le16_to_cpu(first_bd->nbytes) - |
1636 | hlen); |
1637 | |
1638 | /* this marks the BD as one that has no |
1639 | * individual mapping |
1640 | */ |
1641 | txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; |
1642 | |
1643 | first_bd->nbytes = cpu_to_le16(hlen); |
1644 | |
1645 | tx_data_bd = (struct eth_tx_bd *)third_bd; |
1646 | data_split = true; |
1647 | } |
1648 | } else { |
1649 | if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) { |
1650 | DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n" , skb->len); |
1651 | qede_free_failed_tx_pkt(txq, first_bd, nbd: 0, data_split: false); |
1652 | qede_update_tx_producer(txq); |
1653 | return NETDEV_TX_OK; |
1654 | } |
1655 | |
1656 | val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << |
1657 | ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); |
1658 | } |
1659 | |
1660 | first_bd->data.bitfields = cpu_to_le16(val); |
1661 | |
1662 | /* Handle fragmented skb */ |
1663 | /* special handle for frags inside 2nd and 3rd bds.. */ |
1664 | while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { |
1665 | rc = map_frag_to_bd(txq, |
1666 | frag: &skb_shinfo(skb)->frags[frag_idx], |
1667 | bd: tx_data_bd); |
1668 | if (rc) { |
1669 | qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); |
1670 | qede_update_tx_producer(txq); |
1671 | return NETDEV_TX_OK; |
1672 | } |
1673 | |
1674 | if (tx_data_bd == (struct eth_tx_bd *)second_bd) |
1675 | tx_data_bd = (struct eth_tx_bd *)third_bd; |
1676 | else |
1677 | tx_data_bd = NULL; |
1678 | |
1679 | frag_idx++; |
1680 | } |
1681 | |
1682 | /* map last frags into 4th, 5th .... */ |
1683 | for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { |
1684 | tx_data_bd = (struct eth_tx_bd *) |
1685 | qed_chain_produce(p_chain: &txq->tx_pbl); |
1686 | |
1687 | memset(tx_data_bd, 0, sizeof(*tx_data_bd)); |
1688 | |
1689 | rc = map_frag_to_bd(txq, |
1690 | frag: &skb_shinfo(skb)->frags[frag_idx], |
1691 | bd: tx_data_bd); |
1692 | if (rc) { |
1693 | qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); |
1694 | qede_update_tx_producer(txq); |
1695 | return NETDEV_TX_OK; |
1696 | } |
1697 | } |
1698 | |
1699 | /* update the first BD with the actual num BDs */ |
1700 | first_bd->data.nbds = nbd; |
1701 | |
1702 | netdev_tx_sent_queue(dev_queue: netdev_txq, bytes: skb->len); |
1703 | |
1704 | skb_tx_timestamp(skb); |
1705 | |
1706 | /* Advance packet producer only before sending the packet since mapping |
1707 | * of pages may fail. |
1708 | */ |
1709 | txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; |
1710 | |
1711 | /* 'next page' entries are counted in the producer value */ |
1712 | txq->tx_db.data.bd_prod = |
1713 | cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); |
1714 | |
1715 | if (!netdev_xmit_more() || netif_xmit_stopped(dev_queue: netdev_txq)) |
1716 | qede_update_tx_producer(txq); |
1717 | |
1718 | if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) |
1719 | < (MAX_SKB_FRAGS + 1))) { |
1720 | if (netdev_xmit_more()) |
1721 | qede_update_tx_producer(txq); |
1722 | |
1723 | netif_tx_stop_queue(dev_queue: netdev_txq); |
1724 | txq->stopped_cnt++; |
1725 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, |
1726 | "Stop queue was called\n" ); |
1727 | /* paired memory barrier is in qede_tx_int(), we have to keep |
1728 | * ordering of set_bit() in netif_tx_stop_queue() and read of |
1729 | * fp->bd_tx_cons |
1730 | */ |
1731 | smp_mb(); |
1732 | |
1733 | if ((qed_chain_get_elem_left(chain: &txq->tx_pbl) >= |
1734 | (MAX_SKB_FRAGS + 1)) && |
1735 | (edev->state == QEDE_STATE_OPEN)) { |
1736 | netif_tx_wake_queue(dev_queue: netdev_txq); |
1737 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, |
1738 | "Wake queue was called\n" ); |
1739 | } |
1740 | } |
1741 | |
1742 | return NETDEV_TX_OK; |
1743 | } |
1744 | |
1745 | u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, |
1746 | struct net_device *sb_dev) |
1747 | { |
1748 | struct qede_dev *edev = netdev_priv(dev); |
1749 | int total_txq; |
1750 | |
1751 | total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; |
1752 | |
1753 | return QEDE_TSS_COUNT(edev) ? |
1754 | netdev_pick_tx(dev, skb, NULL) % total_txq : 0; |
1755 | } |
1756 | |
1757 | /* 8B udp header + 8B base tunnel header + 32B option length */ |
1758 | #define QEDE_MAX_TUN_HDR_LEN 48 |
1759 | |
1760 | netdev_features_t qede_features_check(struct sk_buff *skb, |
1761 | struct net_device *dev, |
1762 | netdev_features_t features) |
1763 | { |
1764 | if (skb->encapsulation) { |
1765 | u8 l4_proto = 0; |
1766 | |
1767 | switch (vlan_get_protocol(skb)) { |
1768 | case htons(ETH_P_IP): |
1769 | l4_proto = ip_hdr(skb)->protocol; |
1770 | break; |
1771 | case htons(ETH_P_IPV6): |
1772 | l4_proto = ipv6_hdr(skb)->nexthdr; |
1773 | break; |
1774 | default: |
1775 | return features; |
1776 | } |
1777 | |
1778 | /* Disable offloads for geneve tunnels, as HW can't parse |
1779 | * the geneve header which has option length greater than 32b |
1780 | * and disable offloads for the ports which are not offloaded. |
1781 | */ |
1782 | if (l4_proto == IPPROTO_UDP) { |
1783 | struct qede_dev *edev = netdev_priv(dev); |
1784 | u16 hdrlen, vxln_port, gnv_port; |
1785 | |
1786 | hdrlen = QEDE_MAX_TUN_HDR_LEN; |
1787 | vxln_port = edev->vxlan_dst_port; |
1788 | gnv_port = edev->geneve_dst_port; |
1789 | |
1790 | if ((skb_inner_mac_header(skb) - |
1791 | skb_transport_header(skb)) > hdrlen || |
1792 | (ntohs(udp_hdr(skb)->dest) != vxln_port && |
1793 | ntohs(udp_hdr(skb)->dest) != gnv_port)) |
1794 | return features & ~(NETIF_F_CSUM_MASK | |
1795 | NETIF_F_GSO_MASK); |
1796 | } else if (l4_proto == IPPROTO_IPIP) { |
1797 | /* IPIP tunnels are unknown to the device or at least unsupported natively, |
1798 | * offloads for them can't be done trivially, so disable them for such skb. |
1799 | */ |
1800 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
1801 | } |
1802 | } |
1803 | |
1804 | return features; |
1805 | } |
1806 | |