1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards |
4 | * Copyright 2005-2006 Fen Systems Ltd. |
5 | * Copyright 2005-2013 Solarflare Communications Inc. |
6 | */ |
7 | |
8 | #include <linux/pci.h> |
9 | #include <linux/tcp.h> |
10 | #include <linux/ip.h> |
11 | #include <linux/in.h> |
12 | #include <linux/ipv6.h> |
13 | #include <linux/slab.h> |
14 | #include <net/ipv6.h> |
15 | #include <linux/if_ether.h> |
16 | #include <linux/highmem.h> |
17 | #include <linux/cache.h> |
18 | #include "net_driver.h" |
19 | #include "efx.h" |
20 | #include "io.h" |
21 | #include "nic.h" |
22 | #include "tx.h" |
23 | #include "tx_common.h" |
24 | #include "workarounds.h" |
25 | #include "ef10_regs.h" |
26 | |
27 | #ifdef EFX_USE_PIO |
28 | |
29 | #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) |
30 | unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; |
31 | |
32 | #endif /* EFX_USE_PIO */ |
33 | |
34 | static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, |
35 | struct efx_tx_buffer *buffer) |
36 | { |
37 | unsigned int index = efx_tx_queue_get_insert_index(tx_queue); |
38 | struct efx_buffer *page_buf = |
39 | &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; |
40 | unsigned int offset = |
41 | ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1); |
42 | |
43 | if (unlikely(!page_buf->addr) && |
44 | efx_nic_alloc_buffer(efx: tx_queue->efx, buffer: page_buf, PAGE_SIZE, |
45 | GFP_ATOMIC)) |
46 | return NULL; |
47 | buffer->dma_addr = page_buf->dma_addr + offset; |
48 | buffer->unmap_len = 0; |
49 | return (u8 *)page_buf->addr + offset; |
50 | } |
51 | |
52 | u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, |
53 | struct efx_tx_buffer *buffer, size_t len) |
54 | { |
55 | if (len > EFX_TX_CB_SIZE) |
56 | return NULL; |
57 | return efx_tx_get_copy_buffer(tx_queue, buffer); |
58 | } |
59 | |
60 | static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) |
61 | { |
62 | /* We need to consider all queues that the net core sees as one */ |
63 | struct efx_nic *efx = txq1->efx; |
64 | struct efx_tx_queue *txq2; |
65 | unsigned int fill_level; |
66 | |
67 | fill_level = efx_channel_tx_old_fill_level(channel: txq1->channel); |
68 | if (likely(fill_level < efx->txq_stop_thresh)) |
69 | return; |
70 | |
71 | /* We used the stale old_read_count above, which gives us a |
72 | * pessimistic estimate of the fill level (which may even |
73 | * validly be >= efx->txq_entries). Now try again using |
74 | * read_count (more likely to be a cache miss). |
75 | * |
76 | * If we read read_count and then conditionally stop the |
77 | * queue, it is possible for the completion path to race with |
78 | * us and complete all outstanding descriptors in the middle, |
79 | * after which there will be no more completions to wake it. |
80 | * Therefore we stop the queue first, then read read_count |
81 | * (with a memory barrier to ensure the ordering), then |
82 | * restart the queue if the fill level turns out to be low |
83 | * enough. |
84 | */ |
85 | netif_tx_stop_queue(dev_queue: txq1->core_txq); |
86 | smp_mb(); |
87 | efx_for_each_channel_tx_queue(txq2, txq1->channel) |
88 | txq2->old_read_count = READ_ONCE(txq2->read_count); |
89 | |
90 | fill_level = efx_channel_tx_old_fill_level(channel: txq1->channel); |
91 | EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries); |
92 | if (likely(fill_level < efx->txq_stop_thresh)) { |
93 | smp_mb(); |
94 | if (likely(!efx->loopback_selftest)) |
95 | netif_tx_start_queue(dev_queue: txq1->core_txq); |
96 | } |
97 | } |
98 | |
99 | static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, |
100 | struct sk_buff *skb) |
101 | { |
102 | unsigned int copy_len = skb->len; |
103 | struct efx_tx_buffer *buffer; |
104 | u8 *copy_buffer; |
105 | int rc; |
106 | |
107 | EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE); |
108 | |
109 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); |
110 | |
111 | copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); |
112 | if (unlikely(!copy_buffer)) |
113 | return -ENOMEM; |
114 | |
115 | rc = skb_copy_bits(skb, offset: 0, to: copy_buffer, len: copy_len); |
116 | EFX_WARN_ON_PARANOID(rc); |
117 | buffer->len = copy_len; |
118 | |
119 | buffer->skb = skb; |
120 | buffer->flags = EFX_TX_BUF_SKB; |
121 | |
122 | ++tx_queue->insert_count; |
123 | return rc; |
124 | } |
125 | |
126 | #ifdef EFX_USE_PIO |
127 | |
128 | struct efx_short_copy_buffer { |
129 | int used; |
130 | u8 buf[L1_CACHE_BYTES]; |
131 | }; |
132 | |
133 | /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. |
134 | * Advances piobuf pointer. Leaves additional data in the copy buffer. |
135 | */ |
136 | static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, |
137 | u8 *data, int len, |
138 | struct efx_short_copy_buffer *copy_buf) |
139 | { |
140 | int block_len = len & ~(sizeof(copy_buf->buf) - 1); |
141 | |
142 | __iowrite64_copy(to: *piobuf, from: data, count: block_len >> 3); |
143 | *piobuf += block_len; |
144 | len -= block_len; |
145 | |
146 | if (len) { |
147 | data += block_len; |
148 | BUG_ON(copy_buf->used); |
149 | BUG_ON(len > sizeof(copy_buf->buf)); |
150 | memcpy(copy_buf->buf, data, len); |
151 | copy_buf->used = len; |
152 | } |
153 | } |
154 | |
155 | /* Copy to PIO, respecting dword alignment, popping data from copy buffer first. |
156 | * Advances piobuf pointer. Leaves additional data in the copy buffer. |
157 | */ |
158 | static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, |
159 | u8 *data, int len, |
160 | struct efx_short_copy_buffer *copy_buf) |
161 | { |
162 | if (copy_buf->used) { |
163 | /* if the copy buffer is partially full, fill it up and write */ |
164 | int copy_to_buf = |
165 | min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); |
166 | |
167 | memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); |
168 | copy_buf->used += copy_to_buf; |
169 | |
170 | /* if we didn't fill it up then we're done for now */ |
171 | if (copy_buf->used < sizeof(copy_buf->buf)) |
172 | return; |
173 | |
174 | __iowrite64_copy(to: *piobuf, from: copy_buf->buf, |
175 | count: sizeof(copy_buf->buf) >> 3); |
176 | *piobuf += sizeof(copy_buf->buf); |
177 | data += copy_to_buf; |
178 | len -= copy_to_buf; |
179 | copy_buf->used = 0; |
180 | } |
181 | |
182 | efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); |
183 | } |
184 | |
185 | static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, |
186 | struct efx_short_copy_buffer *copy_buf) |
187 | { |
188 | /* if there's anything in it, write the whole buffer, including junk */ |
189 | if (copy_buf->used) |
190 | __iowrite64_copy(to: piobuf, from: copy_buf->buf, |
191 | count: sizeof(copy_buf->buf) >> 3); |
192 | } |
193 | |
194 | /* Traverse skb structure and copy fragments in to PIO buffer. |
195 | * Advances piobuf pointer. |
196 | */ |
197 | static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, |
198 | u8 __iomem **piobuf, |
199 | struct efx_short_copy_buffer *copy_buf) |
200 | { |
201 | int i; |
202 | |
203 | efx_memcpy_toio_aligned(efx, piobuf, data: skb->data, len: skb_headlen(skb), |
204 | copy_buf); |
205 | |
206 | for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { |
207 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
208 | u8 *vaddr; |
209 | |
210 | vaddr = kmap_local_page(page: skb_frag_page(frag: f)); |
211 | |
212 | efx_memcpy_toio_aligned_cb(efx, piobuf, data: vaddr + skb_frag_off(frag: f), |
213 | len: skb_frag_size(frag: f), copy_buf); |
214 | kunmap_local(vaddr); |
215 | } |
216 | |
217 | EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list); |
218 | } |
219 | |
220 | static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, |
221 | struct sk_buff *skb) |
222 | { |
223 | struct efx_tx_buffer *buffer = |
224 | efx_tx_queue_get_insert_buffer(tx_queue); |
225 | u8 __iomem *piobuf = tx_queue->piobuf; |
226 | |
227 | /* Copy to PIO buffer. Ensure the writes are padded to the end |
228 | * of a cache line, as this is required for write-combining to be |
229 | * effective on at least x86. |
230 | */ |
231 | |
232 | if (skb_shinfo(skb)->nr_frags) { |
233 | /* The size of the copy buffer will ensure all writes |
234 | * are the size of a cache line. |
235 | */ |
236 | struct efx_short_copy_buffer copy_buf; |
237 | |
238 | copy_buf.used = 0; |
239 | |
240 | efx_skb_copy_bits_to_pio(efx: tx_queue->efx, skb, |
241 | piobuf: &piobuf, copy_buf: ©_buf); |
242 | efx_flush_copy_buffer(efx: tx_queue->efx, piobuf, copy_buf: ©_buf); |
243 | } else { |
244 | /* Pad the write to the size of a cache line. |
245 | * We can do this because we know the skb_shared_info struct is |
246 | * after the source, and the destination buffer is big enough. |
247 | */ |
248 | BUILD_BUG_ON(L1_CACHE_BYTES > |
249 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); |
250 | __iowrite64_copy(to: tx_queue->piobuf, from: skb->data, |
251 | ALIGN(skb->len, L1_CACHE_BYTES) >> 3); |
252 | } |
253 | |
254 | buffer->skb = skb; |
255 | buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION; |
256 | |
257 | EFX_POPULATE_QWORD_5(buffer->option, |
258 | ESF_DZ_TX_DESC_IS_OPT, 1, |
259 | ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, |
260 | ESF_DZ_TX_PIO_CONT, 0, |
261 | ESF_DZ_TX_PIO_BYTE_CNT, skb->len, |
262 | ESF_DZ_TX_PIO_BUF_ADDR, |
263 | tx_queue->piobuf_offset); |
264 | ++tx_queue->insert_count; |
265 | return 0; |
266 | } |
267 | |
268 | /* Decide whether we can use TX PIO, ie. write packet data directly into |
269 | * a buffer on the device. This can reduce latency at the expense of |
270 | * throughput, so we only do this if both hardware and software TX rings |
271 | * are empty, including all queues for the channel. This also ensures that |
272 | * only one packet at a time can be using the PIO buffer. If the xmit_more |
273 | * flag is set then we don't use this - there'll be another packet along |
274 | * shortly and we want to hold off the doorbell. |
275 | */ |
276 | static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue) |
277 | { |
278 | struct efx_channel *channel = tx_queue->channel; |
279 | |
280 | if (!tx_queue->piobuf) |
281 | return false; |
282 | |
283 | EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors); |
284 | |
285 | efx_for_each_channel_tx_queue(tx_queue, channel) |
286 | if (!efx_nic_tx_is_empty(tx_queue, write_count: tx_queue->packet_write_count)) |
287 | return false; |
288 | |
289 | return true; |
290 | } |
291 | #endif /* EFX_USE_PIO */ |
292 | |
293 | /* Send any pending traffic for a channel. xmit_more is shared across all |
294 | * queues for a channel, so we must check all of them. |
295 | */ |
296 | static void efx_tx_send_pending(struct efx_channel *channel) |
297 | { |
298 | struct efx_tx_queue *q; |
299 | |
300 | efx_for_each_channel_tx_queue(q, channel) { |
301 | if (q->xmit_pending) |
302 | efx_nic_push_buffers(tx_queue: q); |
303 | } |
304 | } |
305 | |
306 | /* |
307 | * Add a socket buffer to a TX queue |
308 | * |
309 | * This maps all fragments of a socket buffer for DMA and adds them to |
310 | * the TX queue. The queue's insert pointer will be incremented by |
311 | * the number of fragments in the socket buffer. |
312 | * |
313 | * If any DMA mapping fails, any mapped fragments will be unmapped, |
314 | * the queue's insert pointer will be restored to its original value. |
315 | * |
316 | * This function is split out from efx_hard_start_xmit to allow the |
317 | * loopback test to direct packets via specific TX queues. |
318 | * |
319 | * Returns NETDEV_TX_OK. |
320 | * You must hold netif_tx_lock() to call this function. |
321 | */ |
322 | netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
323 | { |
324 | unsigned int old_insert_count = tx_queue->insert_count; |
325 | bool xmit_more = netdev_xmit_more(); |
326 | bool data_mapped = false; |
327 | unsigned int segments; |
328 | unsigned int skb_len; |
329 | int rc; |
330 | |
331 | skb_len = skb->len; |
332 | segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; |
333 | if (segments == 1) |
334 | segments = 0; /* Don't use TSO for a single segment. */ |
335 | |
336 | /* Handle TSO first - it's *possible* (although unlikely) that we might |
337 | * be passed a packet to segment that's smaller than the copybreak/PIO |
338 | * size limit. |
339 | */ |
340 | if (segments) { |
341 | switch (tx_queue->tso_version) { |
342 | case 1: |
343 | rc = efx_enqueue_skb_tso(tx_queue, skb, data_mapped: &data_mapped); |
344 | break; |
345 | case 2: |
346 | rc = efx_ef10_tx_tso_desc(tx_queue, skb, data_mapped: &data_mapped); |
347 | break; |
348 | case 0: /* No TSO on this queue, SW fallback needed */ |
349 | default: |
350 | rc = -EINVAL; |
351 | break; |
352 | } |
353 | if (rc == -EINVAL) { |
354 | rc = efx_tx_tso_fallback(tx_queue, skb); |
355 | tx_queue->tso_fallbacks++; |
356 | if (rc == 0) |
357 | return 0; |
358 | } |
359 | if (rc) |
360 | goto err; |
361 | #ifdef EFX_USE_PIO |
362 | } else if (skb_len <= efx_piobuf_size && !xmit_more && |
363 | efx_tx_may_pio(tx_queue)) { |
364 | /* Use PIO for short packets with an empty queue. */ |
365 | if (efx_enqueue_skb_pio(tx_queue, skb)) |
366 | goto err; |
367 | tx_queue->pio_packets++; |
368 | data_mapped = true; |
369 | #endif |
370 | } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) { |
371 | /* Pad short packets or coalesce short fragmented packets. */ |
372 | if (efx_enqueue_skb_copy(tx_queue, skb)) |
373 | goto err; |
374 | tx_queue->cb_packets++; |
375 | data_mapped = true; |
376 | } |
377 | |
378 | /* Map for DMA and create descriptors if we haven't done so already. */ |
379 | if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segment_count: segments))) |
380 | goto err; |
381 | |
382 | efx_tx_maybe_stop_queue(txq1: tx_queue); |
383 | |
384 | tx_queue->xmit_pending = true; |
385 | |
386 | /* Pass off to hardware */ |
387 | if (__netdev_tx_sent_queue(dev_queue: tx_queue->core_txq, bytes: skb_len, xmit_more)) |
388 | efx_tx_send_pending(channel: tx_queue->channel); |
389 | |
390 | if (segments) { |
391 | tx_queue->tso_bursts++; |
392 | tx_queue->tso_packets += segments; |
393 | tx_queue->tx_packets += segments; |
394 | } else { |
395 | tx_queue->tx_packets++; |
396 | } |
397 | |
398 | return NETDEV_TX_OK; |
399 | |
400 | |
401 | err: |
402 | efx_enqueue_unwind(tx_queue, insert_count: old_insert_count); |
403 | dev_kfree_skb_any(skb); |
404 | |
405 | /* If we're not expecting another transmit and we had something to push |
406 | * on this queue or a partner queue then we need to push here to get the |
407 | * previous packets out. |
408 | */ |
409 | if (!xmit_more) |
410 | efx_tx_send_pending(channel: tx_queue->channel); |
411 | |
412 | return NETDEV_TX_OK; |
413 | } |
414 | |
415 | /* Transmit a packet from an XDP buffer |
416 | * |
417 | * Returns number of packets sent on success, error code otherwise. |
418 | * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC |
419 | * (for XDP redirect). |
420 | */ |
421 | int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, |
422 | bool flush) |
423 | { |
424 | struct efx_tx_buffer *tx_buffer; |
425 | struct efx_tx_queue *tx_queue; |
426 | struct xdp_frame *xdpf; |
427 | dma_addr_t dma_addr; |
428 | unsigned int len; |
429 | int space; |
430 | int cpu; |
431 | int i = 0; |
432 | |
433 | if (unlikely(n && !xdpfs)) |
434 | return -EINVAL; |
435 | if (unlikely(!n)) |
436 | return 0; |
437 | |
438 | cpu = raw_smp_processor_id(); |
439 | if (unlikely(cpu >= efx->xdp_tx_queue_count)) |
440 | return -EINVAL; |
441 | |
442 | tx_queue = efx->xdp_tx_queues[cpu]; |
443 | if (unlikely(!tx_queue)) |
444 | return -EINVAL; |
445 | |
446 | if (!tx_queue->initialised) |
447 | return -EINVAL; |
448 | |
449 | if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) |
450 | HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); |
451 | |
452 | /* If we're borrowing net stack queues we have to handle stop-restart |
453 | * or we might block the queue and it will be considered as frozen |
454 | */ |
455 | if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { |
456 | if (netif_tx_queue_stopped(dev_queue: tx_queue->core_txq)) |
457 | goto unlock; |
458 | efx_tx_maybe_stop_queue(txq1: tx_queue); |
459 | } |
460 | |
461 | /* Check for available space. We should never need multiple |
462 | * descriptors per frame. |
463 | */ |
464 | space = efx->txq_entries + |
465 | tx_queue->read_count - tx_queue->insert_count; |
466 | |
467 | for (i = 0; i < n; i++) { |
468 | xdpf = xdpfs[i]; |
469 | |
470 | if (i >= space) |
471 | break; |
472 | |
473 | /* We'll want a descriptor for this tx. */ |
474 | prefetchw(x: __efx_tx_queue_get_insert_buffer(tx_queue)); |
475 | |
476 | len = xdpf->len; |
477 | |
478 | /* Map for DMA. */ |
479 | dma_addr = dma_map_single(&efx->pci_dev->dev, |
480 | xdpf->data, len, |
481 | DMA_TO_DEVICE); |
482 | if (dma_mapping_error(dev: &efx->pci_dev->dev, dma_addr)) |
483 | break; |
484 | |
485 | /* Create descriptor and set up for unmapping DMA. */ |
486 | tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); |
487 | tx_buffer->xdpf = xdpf; |
488 | tx_buffer->flags = EFX_TX_BUF_XDP | |
489 | EFX_TX_BUF_MAP_SINGLE; |
490 | tx_buffer->dma_offset = 0; |
491 | tx_buffer->unmap_len = len; |
492 | tx_queue->tx_packets++; |
493 | } |
494 | |
495 | /* Pass mapped frames to hardware. */ |
496 | if (flush && i > 0) |
497 | efx_nic_push_buffers(tx_queue); |
498 | |
499 | unlock: |
500 | if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) |
501 | HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); |
502 | |
503 | return i == 0 ? -EIO : i; |
504 | } |
505 | |
506 | /* Initiate a packet transmission. We use one channel per CPU |
507 | * (sharing when we have more CPUs than channels). |
508 | * |
509 | * Context: non-blocking. |
510 | * Should always return NETDEV_TX_OK and consume the skb. |
511 | */ |
512 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, |
513 | struct net_device *net_dev) |
514 | { |
515 | struct efx_nic *efx = efx_netdev_priv(dev: net_dev); |
516 | struct efx_tx_queue *tx_queue; |
517 | unsigned index, type; |
518 | |
519 | EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); |
520 | index = skb_get_queue_mapping(skb); |
521 | type = efx_tx_csum_type_skb(skb); |
522 | |
523 | /* PTP "event" packet */ |
524 | if (unlikely(efx_xmit_with_hwtstamp(skb)) && |
525 | ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) || |
526 | unlikely(efx_ptp_is_ptp_tx(efx, skb)))) { |
527 | /* There may be existing transmits on the channel that are |
528 | * waiting for this packet to trigger the doorbell write. |
529 | * We need to send the packets at this point. |
530 | */ |
531 | efx_tx_send_pending(channel: efx_get_tx_channel(efx, index)); |
532 | return efx_ptp_tx(efx, skb); |
533 | } |
534 | |
535 | tx_queue = efx_get_tx_queue(efx, index, type); |
536 | if (WARN_ON_ONCE(!tx_queue)) { |
537 | /* We don't have a TXQ of the right type. |
538 | * This should never happen, as we don't advertise offload |
539 | * features unless we can support them. |
540 | */ |
541 | dev_kfree_skb_any(skb); |
542 | /* If we're not expecting another transmit and we had something to push |
543 | * on this queue or a partner queue then we need to push here to get the |
544 | * previous packets out. |
545 | */ |
546 | if (!netdev_xmit_more()) |
547 | efx_tx_send_pending(channel: efx_get_tx_channel(efx, index)); |
548 | return NETDEV_TX_OK; |
549 | } |
550 | |
551 | return __efx_enqueue_skb(tx_queue, skb); |
552 | } |
553 | |
554 | void efx_xmit_done_single(struct efx_tx_queue *tx_queue) |
555 | { |
556 | unsigned int pkts_compl = 0, bytes_compl = 0; |
557 | unsigned int efv_pkts_compl = 0; |
558 | unsigned int read_ptr; |
559 | bool finished = false; |
560 | |
561 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
562 | |
563 | while (!finished) { |
564 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; |
565 | |
566 | if (!efx_tx_buffer_in_use(buffer)) { |
567 | struct efx_nic *efx = tx_queue->efx; |
568 | |
569 | netif_err(efx, hw, efx->net_dev, |
570 | "TX queue %d spurious single TX completion\n" , |
571 | tx_queue->queue); |
572 | efx_schedule_reset(efx, type: RESET_TYPE_TX_SKIP); |
573 | return; |
574 | } |
575 | |
576 | /* Need to check the flag before dequeueing. */ |
577 | if (buffer->flags & EFX_TX_BUF_SKB) |
578 | finished = true; |
579 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl: &pkts_compl, bytes_compl: &bytes_compl, |
580 | efv_pkts_compl: &efv_pkts_compl); |
581 | |
582 | ++tx_queue->read_count; |
583 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
584 | } |
585 | |
586 | tx_queue->pkts_compl += pkts_compl; |
587 | tx_queue->bytes_compl += bytes_compl; |
588 | |
589 | EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1); |
590 | |
591 | efx_xmit_done_check_empty(tx_queue); |
592 | } |
593 | |
594 | void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) |
595 | { |
596 | struct efx_nic *efx = tx_queue->efx; |
597 | |
598 | /* Must be inverse of queue lookup in efx_hard_start_xmit() */ |
599 | tx_queue->core_txq = |
600 | netdev_get_tx_queue(dev: efx->net_dev, |
601 | index: tx_queue->channel->channel); |
602 | } |
603 | |