1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards |
4 | * Copyright 2005-2006 Fen Systems Ltd. |
5 | * Copyright 2005-2013 Solarflare Communications Inc. |
6 | */ |
7 | |
8 | #include <linux/socket.h> |
9 | #include <linux/in.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/ip.h> |
12 | #include <linux/ipv6.h> |
13 | #include <linux/tcp.h> |
14 | #include <linux/udp.h> |
15 | #include <linux/prefetch.h> |
16 | #include <linux/moduleparam.h> |
17 | #include <linux/iommu.h> |
18 | #include <net/ip.h> |
19 | #include <net/checksum.h> |
20 | #include "net_driver.h" |
21 | #include "efx.h" |
22 | #include "filter.h" |
23 | #include "nic.h" |
24 | #include "selftest.h" |
25 | #include "workarounds.h" |
26 | |
27 | /* Preferred number of descriptors to fill at once */ |
28 | #define EF4_RX_PREFERRED_BATCH 8U |
29 | |
30 | /* Number of RX buffers to recycle pages for. When creating the RX page recycle |
31 | * ring, this number is divided by the number of buffers per page to calculate |
32 | * the number of pages to store in the RX page recycle ring. |
33 | */ |
34 | #define EF4_RECYCLE_RING_SIZE_IOMMU 4096 |
35 | #define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH) |
36 | |
37 | /* Size of buffer allocated for skb header area. */ |
38 | #define 128u |
39 | |
40 | /* This is the percentage fill level below which new RX descriptors |
41 | * will be added to the RX descriptor ring. |
42 | */ |
43 | static unsigned int rx_refill_threshold; |
44 | |
45 | /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ |
46 | #define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \ |
47 | EF4_RX_USR_BUF_SIZE) |
48 | |
49 | /* |
50 | * RX maximum head room required. |
51 | * |
52 | * This must be at least 1 to prevent overflow, plus one packet-worth |
53 | * to allow pipelined receives. |
54 | */ |
55 | #define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS) |
56 | |
57 | static inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf) |
58 | { |
59 | return page_address(buf->page) + buf->page_offset; |
60 | } |
61 | |
62 | static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh) |
63 | { |
64 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
65 | return __le32_to_cpup(p: (const __le32 *)(eh + efx->rx_packet_hash_offset)); |
66 | #else |
67 | const u8 *data = eh + efx->rx_packet_hash_offset; |
68 | return (u32)data[0] | |
69 | (u32)data[1] << 8 | |
70 | (u32)data[2] << 16 | |
71 | (u32)data[3] << 24; |
72 | #endif |
73 | } |
74 | |
75 | static inline struct ef4_rx_buffer * |
76 | ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf) |
77 | { |
78 | if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask))) |
79 | return ef4_rx_buffer(rx_queue, index: 0); |
80 | else |
81 | return rx_buf + 1; |
82 | } |
83 | |
84 | static inline void ef4_sync_rx_buffer(struct ef4_nic *efx, |
85 | struct ef4_rx_buffer *rx_buf, |
86 | unsigned int len) |
87 | { |
88 | dma_sync_single_for_cpu(dev: &efx->pci_dev->dev, addr: rx_buf->dma_addr, size: len, |
89 | dir: DMA_FROM_DEVICE); |
90 | } |
91 | |
92 | void ef4_rx_config_page_split(struct ef4_nic *efx) |
93 | { |
94 | efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, |
95 | EF4_RX_BUF_ALIGNMENT); |
96 | efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : |
97 | ((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) / |
98 | efx->rx_page_buf_step); |
99 | efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / |
100 | efx->rx_bufs_per_page; |
101 | efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH, |
102 | efx->rx_bufs_per_page); |
103 | } |
104 | |
105 | /* Check the RX page recycle ring for a page that can be reused. */ |
106 | static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue) |
107 | { |
108 | struct ef4_nic *efx = rx_queue->efx; |
109 | struct page *page; |
110 | struct ef4_rx_page_state *state; |
111 | unsigned index; |
112 | |
113 | if (unlikely(!rx_queue->page_ring)) |
114 | return NULL; |
115 | index = rx_queue->page_remove & rx_queue->page_ptr_mask; |
116 | page = rx_queue->page_ring[index]; |
117 | if (page == NULL) |
118 | return NULL; |
119 | |
120 | rx_queue->page_ring[index] = NULL; |
121 | /* page_remove cannot exceed page_add. */ |
122 | if (rx_queue->page_remove != rx_queue->page_add) |
123 | ++rx_queue->page_remove; |
124 | |
125 | /* If page_count is 1 then we hold the only reference to this page. */ |
126 | if (page_count(page) == 1) { |
127 | ++rx_queue->page_recycle_count; |
128 | return page; |
129 | } else { |
130 | state = page_address(page); |
131 | dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, |
132 | PAGE_SIZE << efx->rx_buffer_order, |
133 | DMA_FROM_DEVICE); |
134 | put_page(page); |
135 | ++rx_queue->page_recycle_failed; |
136 | } |
137 | |
138 | return NULL; |
139 | } |
140 | |
141 | /** |
142 | * ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers |
143 | * |
144 | * @rx_queue: Efx RX queue |
145 | * @atomic: control memory allocation flags |
146 | * |
147 | * This allocates a batch of pages, maps them for DMA, and populates |
148 | * struct ef4_rx_buffers for each one. Return a negative error code or |
149 | * 0 on success. If a single page can be used for multiple buffers, |
150 | * then the page will either be inserted fully, or not at all. |
151 | */ |
152 | static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic) |
153 | { |
154 | struct ef4_nic *efx = rx_queue->efx; |
155 | struct ef4_rx_buffer *rx_buf; |
156 | struct page *page; |
157 | unsigned int page_offset; |
158 | struct ef4_rx_page_state *state; |
159 | dma_addr_t dma_addr; |
160 | unsigned index, count; |
161 | |
162 | count = 0; |
163 | do { |
164 | page = ef4_reuse_page(rx_queue); |
165 | if (page == NULL) { |
166 | page = alloc_pages(__GFP_COMP | |
167 | (atomic ? GFP_ATOMIC : GFP_KERNEL), |
168 | order: efx->rx_buffer_order); |
169 | if (unlikely(page == NULL)) |
170 | return -ENOMEM; |
171 | dma_addr = |
172 | dma_map_page(&efx->pci_dev->dev, page, 0, |
173 | PAGE_SIZE << efx->rx_buffer_order, |
174 | DMA_FROM_DEVICE); |
175 | if (unlikely(dma_mapping_error(&efx->pci_dev->dev, |
176 | dma_addr))) { |
177 | __free_pages(page, order: efx->rx_buffer_order); |
178 | return -EIO; |
179 | } |
180 | state = page_address(page); |
181 | state->dma_addr = dma_addr; |
182 | } else { |
183 | state = page_address(page); |
184 | dma_addr = state->dma_addr; |
185 | } |
186 | |
187 | dma_addr += sizeof(struct ef4_rx_page_state); |
188 | page_offset = sizeof(struct ef4_rx_page_state); |
189 | |
190 | do { |
191 | index = rx_queue->added_count & rx_queue->ptr_mask; |
192 | rx_buf = ef4_rx_buffer(rx_queue, index); |
193 | rx_buf->dma_addr = dma_addr + efx->rx_ip_align; |
194 | rx_buf->page = page; |
195 | rx_buf->page_offset = page_offset + efx->rx_ip_align; |
196 | rx_buf->len = efx->rx_dma_len; |
197 | rx_buf->flags = 0; |
198 | ++rx_queue->added_count; |
199 | get_page(page); |
200 | dma_addr += efx->rx_page_buf_step; |
201 | page_offset += efx->rx_page_buf_step; |
202 | } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); |
203 | |
204 | rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE; |
205 | } while (++count < efx->rx_pages_per_batch); |
206 | |
207 | return 0; |
208 | } |
209 | |
210 | /* Unmap a DMA-mapped page. This function is only called for the final RX |
211 | * buffer in a page. |
212 | */ |
213 | static void ef4_unmap_rx_buffer(struct ef4_nic *efx, |
214 | struct ef4_rx_buffer *rx_buf) |
215 | { |
216 | struct page *page = rx_buf->page; |
217 | |
218 | if (page) { |
219 | struct ef4_rx_page_state *state = page_address(page); |
220 | dma_unmap_page(&efx->pci_dev->dev, |
221 | state->dma_addr, |
222 | PAGE_SIZE << efx->rx_buffer_order, |
223 | DMA_FROM_DEVICE); |
224 | } |
225 | } |
226 | |
227 | static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue, |
228 | struct ef4_rx_buffer *rx_buf, |
229 | unsigned int num_bufs) |
230 | { |
231 | do { |
232 | if (rx_buf->page) { |
233 | put_page(page: rx_buf->page); |
234 | rx_buf->page = NULL; |
235 | } |
236 | rx_buf = ef4_rx_buf_next(rx_queue, rx_buf); |
237 | } while (--num_bufs); |
238 | } |
239 | |
240 | /* Attempt to recycle the page if there is an RX recycle ring; the page can |
241 | * only be added if this is the final RX buffer, to prevent pages being used in |
242 | * the descriptor ring and appearing in the recycle ring simultaneously. |
243 | */ |
244 | static void ef4_recycle_rx_page(struct ef4_channel *channel, |
245 | struct ef4_rx_buffer *rx_buf) |
246 | { |
247 | struct page *page = rx_buf->page; |
248 | struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel); |
249 | struct ef4_nic *efx = rx_queue->efx; |
250 | unsigned index; |
251 | |
252 | /* Only recycle the page after processing the final buffer. */ |
253 | if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE)) |
254 | return; |
255 | |
256 | index = rx_queue->page_add & rx_queue->page_ptr_mask; |
257 | if (rx_queue->page_ring[index] == NULL) { |
258 | unsigned read_index = rx_queue->page_remove & |
259 | rx_queue->page_ptr_mask; |
260 | |
261 | /* The next slot in the recycle ring is available, but |
262 | * increment page_remove if the read pointer currently |
263 | * points here. |
264 | */ |
265 | if (read_index == index) |
266 | ++rx_queue->page_remove; |
267 | rx_queue->page_ring[index] = page; |
268 | ++rx_queue->page_add; |
269 | return; |
270 | } |
271 | ++rx_queue->page_recycle_full; |
272 | ef4_unmap_rx_buffer(efx, rx_buf); |
273 | put_page(page: rx_buf->page); |
274 | } |
275 | |
276 | static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue, |
277 | struct ef4_rx_buffer *rx_buf) |
278 | { |
279 | /* Release the page reference we hold for the buffer. */ |
280 | if (rx_buf->page) |
281 | put_page(page: rx_buf->page); |
282 | |
283 | /* If this is the last buffer in a page, unmap and free it. */ |
284 | if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) { |
285 | ef4_unmap_rx_buffer(efx: rx_queue->efx, rx_buf); |
286 | ef4_free_rx_buffers(rx_queue, rx_buf, num_bufs: 1); |
287 | } |
288 | rx_buf->page = NULL; |
289 | } |
290 | |
291 | /* Recycle the pages that are used by buffers that have just been received. */ |
292 | static void ef4_recycle_rx_pages(struct ef4_channel *channel, |
293 | struct ef4_rx_buffer *rx_buf, |
294 | unsigned int n_frags) |
295 | { |
296 | struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel); |
297 | |
298 | if (unlikely(!rx_queue->page_ring)) |
299 | return; |
300 | |
301 | do { |
302 | ef4_recycle_rx_page(channel, rx_buf); |
303 | rx_buf = ef4_rx_buf_next(rx_queue, rx_buf); |
304 | } while (--n_frags); |
305 | } |
306 | |
307 | static void ef4_discard_rx_packet(struct ef4_channel *channel, |
308 | struct ef4_rx_buffer *rx_buf, |
309 | unsigned int n_frags) |
310 | { |
311 | struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel); |
312 | |
313 | ef4_recycle_rx_pages(channel, rx_buf, n_frags); |
314 | |
315 | ef4_free_rx_buffers(rx_queue, rx_buf, num_bufs: n_frags); |
316 | } |
317 | |
318 | /** |
319 | * ef4_fast_push_rx_descriptors - push new RX descriptors quickly |
320 | * @rx_queue: RX descriptor queue |
321 | * |
322 | * This will aim to fill the RX descriptor queue up to |
323 | * @rx_queue->@max_fill. If there is insufficient atomic |
324 | * memory to do so, a slow fill will be scheduled. |
325 | * @atomic: control memory allocation flags |
326 | * |
327 | * The caller must provide serialisation (none is used here). In practise, |
328 | * this means this function must run from the NAPI handler, or be called |
329 | * when NAPI is disabled. |
330 | */ |
331 | void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic) |
332 | { |
333 | struct ef4_nic *efx = rx_queue->efx; |
334 | unsigned int fill_level, batch_size; |
335 | int space, rc = 0; |
336 | |
337 | if (!rx_queue->refill_enabled) |
338 | return; |
339 | |
340 | /* Calculate current fill level, and exit if we don't need to fill */ |
341 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
342 | EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); |
343 | if (fill_level >= rx_queue->fast_fill_trigger) |
344 | goto out; |
345 | |
346 | /* Record minimum fill level */ |
347 | if (unlikely(fill_level < rx_queue->min_fill)) { |
348 | if (fill_level) |
349 | rx_queue->min_fill = fill_level; |
350 | } |
351 | |
352 | batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; |
353 | space = rx_queue->max_fill - fill_level; |
354 | EF4_BUG_ON_PARANOID(space < batch_size); |
355 | |
356 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
357 | "RX queue %d fast-filling descriptor ring from" |
358 | " level %d to level %d\n" , |
359 | ef4_rx_queue_index(rx_queue), fill_level, |
360 | rx_queue->max_fill); |
361 | |
362 | |
363 | do { |
364 | rc = ef4_init_rx_buffers(rx_queue, atomic); |
365 | if (unlikely(rc)) { |
366 | /* Ensure that we don't leave the rx queue empty */ |
367 | if (rx_queue->added_count == rx_queue->removed_count) |
368 | ef4_schedule_slow_fill(rx_queue); |
369 | goto out; |
370 | } |
371 | } while ((space -= batch_size) >= batch_size); |
372 | |
373 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
374 | "RX queue %d fast-filled descriptor ring " |
375 | "to level %d\n" , ef4_rx_queue_index(rx_queue), |
376 | rx_queue->added_count - rx_queue->removed_count); |
377 | |
378 | out: |
379 | if (rx_queue->notified_count != rx_queue->added_count) |
380 | ef4_nic_notify_rx_desc(rx_queue); |
381 | } |
382 | |
383 | void ef4_rx_slow_fill(struct timer_list *t) |
384 | { |
385 | struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); |
386 | |
387 | /* Post an event to cause NAPI to run and refill the queue */ |
388 | ef4_nic_generate_fill_event(rx_queue); |
389 | ++rx_queue->slow_fill_count; |
390 | } |
391 | |
392 | static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue, |
393 | struct ef4_rx_buffer *rx_buf, |
394 | int len) |
395 | { |
396 | struct ef4_nic *efx = rx_queue->efx; |
397 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; |
398 | |
399 | if (likely(len <= max_len)) |
400 | return; |
401 | |
402 | /* The packet must be discarded, but this is only a fatal error |
403 | * if the caller indicated it was |
404 | */ |
405 | rx_buf->flags |= EF4_RX_PKT_DISCARD; |
406 | |
407 | if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) { |
408 | if (net_ratelimit()) |
409 | netif_err(efx, rx_err, efx->net_dev, |
410 | " RX queue %d seriously overlength " |
411 | "RX event (0x%x > 0x%x+0x%x). Leaking\n" , |
412 | ef4_rx_queue_index(rx_queue), len, max_len, |
413 | efx->type->rx_buffer_padding); |
414 | ef4_schedule_reset(efx, type: RESET_TYPE_RX_RECOVERY); |
415 | } else { |
416 | if (net_ratelimit()) |
417 | netif_err(efx, rx_err, efx->net_dev, |
418 | " RX queue %d overlength RX event " |
419 | "(0x%x > 0x%x)\n" , |
420 | ef4_rx_queue_index(rx_queue), len, max_len); |
421 | } |
422 | |
423 | ef4_rx_queue_channel(rx_queue)->n_rx_overlength++; |
424 | } |
425 | |
426 | /* Pass a received packet up through GRO. GRO can handle pages |
427 | * regardless of checksum state and skbs with a good checksum. |
428 | */ |
429 | static void |
430 | ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf, |
431 | unsigned int n_frags, u8 *eh) |
432 | { |
433 | struct napi_struct *napi = &channel->napi_str; |
434 | struct ef4_nic *efx = channel->efx; |
435 | struct sk_buff *skb; |
436 | |
437 | skb = napi_get_frags(napi); |
438 | if (unlikely(!skb)) { |
439 | struct ef4_rx_queue *rx_queue; |
440 | |
441 | rx_queue = ef4_channel_get_rx_queue(channel); |
442 | ef4_free_rx_buffers(rx_queue, rx_buf, num_bufs: n_frags); |
443 | return; |
444 | } |
445 | |
446 | if (efx->net_dev->features & NETIF_F_RXHASH) |
447 | skb_set_hash(skb, hash: ef4_rx_buf_hash(efx, eh), |
448 | type: PKT_HASH_TYPE_L3); |
449 | skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ? |
450 | CHECKSUM_UNNECESSARY : CHECKSUM_NONE); |
451 | |
452 | for (;;) { |
453 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
454 | page: rx_buf->page, off: rx_buf->page_offset, |
455 | size: rx_buf->len); |
456 | rx_buf->page = NULL; |
457 | skb->len += rx_buf->len; |
458 | if (skb_shinfo(skb)->nr_frags == n_frags) |
459 | break; |
460 | |
461 | rx_buf = ef4_rx_buf_next(rx_queue: &channel->rx_queue, rx_buf); |
462 | } |
463 | |
464 | skb->data_len = skb->len; |
465 | skb->truesize += n_frags * efx->rx_buffer_truesize; |
466 | |
467 | skb_record_rx_queue(skb, rx_queue: channel->rx_queue.core_index); |
468 | |
469 | napi_gro_frags(napi); |
470 | } |
471 | |
472 | /* Allocate and construct an SKB around page fragments */ |
473 | static struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel, |
474 | struct ef4_rx_buffer *rx_buf, |
475 | unsigned int n_frags, |
476 | u8 *eh, int hdr_len) |
477 | { |
478 | struct ef4_nic *efx = channel->efx; |
479 | struct sk_buff *skb; |
480 | |
481 | /* Allocate an SKB to store the headers */ |
482 | skb = netdev_alloc_skb(dev: efx->net_dev, |
483 | length: efx->rx_ip_align + efx->rx_prefix_size + |
484 | hdr_len); |
485 | if (unlikely(skb == NULL)) { |
486 | atomic_inc(v: &efx->n_rx_noskb_drops); |
487 | return NULL; |
488 | } |
489 | |
490 | EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len); |
491 | |
492 | memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, |
493 | efx->rx_prefix_size + hdr_len); |
494 | skb_reserve(skb, len: efx->rx_ip_align + efx->rx_prefix_size); |
495 | __skb_put(skb, len: hdr_len); |
496 | |
497 | /* Append the remaining page(s) onto the frag list */ |
498 | if (rx_buf->len > hdr_len) { |
499 | rx_buf->page_offset += hdr_len; |
500 | rx_buf->len -= hdr_len; |
501 | |
502 | for (;;) { |
503 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
504 | page: rx_buf->page, off: rx_buf->page_offset, |
505 | size: rx_buf->len); |
506 | rx_buf->page = NULL; |
507 | skb->len += rx_buf->len; |
508 | skb->data_len += rx_buf->len; |
509 | if (skb_shinfo(skb)->nr_frags == n_frags) |
510 | break; |
511 | |
512 | rx_buf = ef4_rx_buf_next(rx_queue: &channel->rx_queue, rx_buf); |
513 | } |
514 | } else { |
515 | __free_pages(page: rx_buf->page, order: efx->rx_buffer_order); |
516 | rx_buf->page = NULL; |
517 | n_frags = 0; |
518 | } |
519 | |
520 | skb->truesize += n_frags * efx->rx_buffer_truesize; |
521 | |
522 | /* Move past the ethernet header */ |
523 | skb->protocol = eth_type_trans(skb, dev: efx->net_dev); |
524 | |
525 | skb_mark_napi_id(skb, napi: &channel->napi_str); |
526 | |
527 | return skb; |
528 | } |
529 | |
530 | void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index, |
531 | unsigned int n_frags, unsigned int len, u16 flags) |
532 | { |
533 | struct ef4_nic *efx = rx_queue->efx; |
534 | struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue); |
535 | struct ef4_rx_buffer *rx_buf; |
536 | |
537 | rx_queue->rx_packets++; |
538 | |
539 | rx_buf = ef4_rx_buffer(rx_queue, index); |
540 | rx_buf->flags |= flags; |
541 | |
542 | /* Validate the number of fragments and completed length */ |
543 | if (n_frags == 1) { |
544 | if (!(flags & EF4_RX_PKT_PREFIX_LEN)) |
545 | ef4_rx_packet__check_len(rx_queue, rx_buf, len); |
546 | } else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) || |
547 | unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || |
548 | unlikely(len > n_frags * efx->rx_dma_len) || |
549 | unlikely(!efx->rx_scatter)) { |
550 | /* If this isn't an explicit discard request, either |
551 | * the hardware or the driver is broken. |
552 | */ |
553 | WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD)); |
554 | rx_buf->flags |= EF4_RX_PKT_DISCARD; |
555 | } |
556 | |
557 | netif_vdbg(efx, rx_status, efx->net_dev, |
558 | "RX queue %d received ids %x-%x len %d %s%s\n" , |
559 | ef4_rx_queue_index(rx_queue), index, |
560 | (index + n_frags - 1) & rx_queue->ptr_mask, len, |
561 | (rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "" , |
562 | (rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "" ); |
563 | |
564 | /* Discard packet, if instructed to do so. Process the |
565 | * previous receive first. |
566 | */ |
567 | if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) { |
568 | ef4_rx_flush_packet(channel); |
569 | ef4_discard_rx_packet(channel, rx_buf, n_frags); |
570 | return; |
571 | } |
572 | |
573 | if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN)) |
574 | rx_buf->len = len; |
575 | |
576 | /* Release and/or sync the DMA mapping - assumes all RX buffers |
577 | * consumed in-order per RX queue. |
578 | */ |
579 | ef4_sync_rx_buffer(efx, rx_buf, len: rx_buf->len); |
580 | |
581 | /* Prefetch nice and early so data will (hopefully) be in cache by |
582 | * the time we look at it. |
583 | */ |
584 | prefetch(ef4_rx_buf_va(rx_buf)); |
585 | |
586 | rx_buf->page_offset += efx->rx_prefix_size; |
587 | rx_buf->len -= efx->rx_prefix_size; |
588 | |
589 | if (n_frags > 1) { |
590 | /* Release/sync DMA mapping for additional fragments. |
591 | * Fix length for last fragment. |
592 | */ |
593 | unsigned int tail_frags = n_frags - 1; |
594 | |
595 | for (;;) { |
596 | rx_buf = ef4_rx_buf_next(rx_queue, rx_buf); |
597 | if (--tail_frags == 0) |
598 | break; |
599 | ef4_sync_rx_buffer(efx, rx_buf, len: efx->rx_dma_len); |
600 | } |
601 | rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; |
602 | ef4_sync_rx_buffer(efx, rx_buf, len: rx_buf->len); |
603 | } |
604 | |
605 | /* All fragments have been DMA-synced, so recycle pages. */ |
606 | rx_buf = ef4_rx_buffer(rx_queue, index); |
607 | ef4_recycle_rx_pages(channel, rx_buf, n_frags); |
608 | |
609 | /* Pipeline receives so that we give time for packet headers to be |
610 | * prefetched into cache. |
611 | */ |
612 | ef4_rx_flush_packet(channel); |
613 | channel->rx_pkt_n_frags = n_frags; |
614 | channel->rx_pkt_index = index; |
615 | } |
616 | |
617 | static void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh, |
618 | struct ef4_rx_buffer *rx_buf, |
619 | unsigned int n_frags) |
620 | { |
621 | struct sk_buff *skb; |
622 | u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS); |
623 | |
624 | skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); |
625 | if (unlikely(skb == NULL)) { |
626 | struct ef4_rx_queue *rx_queue; |
627 | |
628 | rx_queue = ef4_channel_get_rx_queue(channel); |
629 | ef4_free_rx_buffers(rx_queue, rx_buf, num_bufs: n_frags); |
630 | return; |
631 | } |
632 | skb_record_rx_queue(skb, rx_queue: channel->rx_queue.core_index); |
633 | |
634 | /* Set the SKB flags */ |
635 | skb_checksum_none_assert(skb); |
636 | if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED)) |
637 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
638 | |
639 | if (channel->type->receive_skb) |
640 | if (channel->type->receive_skb(channel, skb)) |
641 | return; |
642 | |
643 | /* Pass the packet up */ |
644 | netif_receive_skb(skb); |
645 | } |
646 | |
647 | /* Handle a received packet. Second half: Touches packet payload. */ |
648 | void __ef4_rx_packet(struct ef4_channel *channel) |
649 | { |
650 | struct ef4_nic *efx = channel->efx; |
651 | struct ef4_rx_buffer *rx_buf = |
652 | ef4_rx_buffer(rx_queue: &channel->rx_queue, index: channel->rx_pkt_index); |
653 | u8 *eh = ef4_rx_buf_va(buf: rx_buf); |
654 | |
655 | /* Read length from the prefix if necessary. This already |
656 | * excludes the length of the prefix itself. |
657 | */ |
658 | if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN) |
659 | rx_buf->len = le16_to_cpup(p: (__le16 *) |
660 | (eh + efx->rx_packet_len_offset)); |
661 | |
662 | /* If we're in loopback test, then pass the packet directly to the |
663 | * loopback layer, and free the rx_buf here |
664 | */ |
665 | if (unlikely(efx->loopback_selftest)) { |
666 | struct ef4_rx_queue *rx_queue; |
667 | |
668 | ef4_loopback_rx_packet(efx, buf_ptr: eh, pkt_len: rx_buf->len); |
669 | rx_queue = ef4_channel_get_rx_queue(channel); |
670 | ef4_free_rx_buffers(rx_queue, rx_buf, |
671 | num_bufs: channel->rx_pkt_n_frags); |
672 | goto out; |
673 | } |
674 | |
675 | if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) |
676 | rx_buf->flags &= ~EF4_RX_PKT_CSUMMED; |
677 | |
678 | if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb) |
679 | ef4_rx_packet_gro(channel, rx_buf, n_frags: channel->rx_pkt_n_frags, eh); |
680 | else |
681 | ef4_rx_deliver(channel, eh, rx_buf, n_frags: channel->rx_pkt_n_frags); |
682 | out: |
683 | channel->rx_pkt_n_frags = 0; |
684 | } |
685 | |
686 | int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue) |
687 | { |
688 | struct ef4_nic *efx = rx_queue->efx; |
689 | unsigned int entries; |
690 | int rc; |
691 | |
692 | /* Create the smallest power-of-two aligned ring */ |
693 | entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE); |
694 | EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE); |
695 | rx_queue->ptr_mask = entries - 1; |
696 | |
697 | netif_dbg(efx, probe, efx->net_dev, |
698 | "creating RX queue %d size %#x mask %#x\n" , |
699 | ef4_rx_queue_index(rx_queue), efx->rxq_entries, |
700 | rx_queue->ptr_mask); |
701 | |
702 | /* Allocate RX buffers */ |
703 | rx_queue->buffer = kcalloc(n: entries, size: sizeof(*rx_queue->buffer), |
704 | GFP_KERNEL); |
705 | if (!rx_queue->buffer) |
706 | return -ENOMEM; |
707 | |
708 | rc = ef4_nic_probe_rx(rx_queue); |
709 | if (rc) { |
710 | kfree(objp: rx_queue->buffer); |
711 | rx_queue->buffer = NULL; |
712 | } |
713 | |
714 | return rc; |
715 | } |
716 | |
717 | static void ef4_init_rx_recycle_ring(struct ef4_nic *efx, |
718 | struct ef4_rx_queue *rx_queue) |
719 | { |
720 | unsigned int bufs_in_recycle_ring, page_ring_size; |
721 | struct iommu_domain __maybe_unused *domain; |
722 | |
723 | /* Set the RX recycle ring size */ |
724 | #ifdef CONFIG_PPC64 |
725 | bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU; |
726 | #else |
727 | domain = iommu_get_domain_for_dev(dev: &efx->pci_dev->dev); |
728 | if (domain && domain->type != IOMMU_DOMAIN_IDENTITY) |
729 | bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU; |
730 | else |
731 | bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU; |
732 | #endif /* CONFIG_PPC64 */ |
733 | |
734 | page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / |
735 | efx->rx_bufs_per_page); |
736 | rx_queue->page_ring = kcalloc(n: page_ring_size, |
737 | size: sizeof(*rx_queue->page_ring), GFP_KERNEL); |
738 | if (!rx_queue->page_ring) |
739 | rx_queue->page_ptr_mask = 0; |
740 | else |
741 | rx_queue->page_ptr_mask = page_ring_size - 1; |
742 | } |
743 | |
744 | void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue) |
745 | { |
746 | struct ef4_nic *efx = rx_queue->efx; |
747 | unsigned int max_fill, trigger, max_trigger; |
748 | |
749 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
750 | "initialising RX queue %d\n" , ef4_rx_queue_index(rx_queue)); |
751 | |
752 | /* Initialise ptr fields */ |
753 | rx_queue->added_count = 0; |
754 | rx_queue->notified_count = 0; |
755 | rx_queue->removed_count = 0; |
756 | rx_queue->min_fill = -1U; |
757 | ef4_init_rx_recycle_ring(efx, rx_queue); |
758 | |
759 | rx_queue->page_remove = 0; |
760 | rx_queue->page_add = rx_queue->page_ptr_mask + 1; |
761 | rx_queue->page_recycle_count = 0; |
762 | rx_queue->page_recycle_failed = 0; |
763 | rx_queue->page_recycle_full = 0; |
764 | |
765 | /* Initialise limit fields */ |
766 | max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM; |
767 | max_trigger = |
768 | max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; |
769 | if (rx_refill_threshold != 0) { |
770 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; |
771 | if (trigger > max_trigger) |
772 | trigger = max_trigger; |
773 | } else { |
774 | trigger = max_trigger; |
775 | } |
776 | |
777 | rx_queue->max_fill = max_fill; |
778 | rx_queue->fast_fill_trigger = trigger; |
779 | rx_queue->refill_enabled = true; |
780 | |
781 | /* Set up RX descriptor ring */ |
782 | ef4_nic_init_rx(rx_queue); |
783 | } |
784 | |
785 | void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue) |
786 | { |
787 | int i; |
788 | struct ef4_nic *efx = rx_queue->efx; |
789 | struct ef4_rx_buffer *rx_buf; |
790 | |
791 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
792 | "shutting down RX queue %d\n" , ef4_rx_queue_index(rx_queue)); |
793 | |
794 | del_timer_sync(timer: &rx_queue->slow_fill); |
795 | |
796 | /* Release RX buffers from the current read ptr to the write ptr */ |
797 | if (rx_queue->buffer) { |
798 | for (i = rx_queue->removed_count; i < rx_queue->added_count; |
799 | i++) { |
800 | unsigned index = i & rx_queue->ptr_mask; |
801 | rx_buf = ef4_rx_buffer(rx_queue, index); |
802 | ef4_fini_rx_buffer(rx_queue, rx_buf); |
803 | } |
804 | } |
805 | |
806 | /* Unmap and release the pages in the recycle ring. Remove the ring. */ |
807 | for (i = 0; i <= rx_queue->page_ptr_mask; i++) { |
808 | struct page *page = rx_queue->page_ring[i]; |
809 | struct ef4_rx_page_state *state; |
810 | |
811 | if (page == NULL) |
812 | continue; |
813 | |
814 | state = page_address(page); |
815 | dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, |
816 | PAGE_SIZE << efx->rx_buffer_order, |
817 | DMA_FROM_DEVICE); |
818 | put_page(page); |
819 | } |
820 | kfree(objp: rx_queue->page_ring); |
821 | rx_queue->page_ring = NULL; |
822 | } |
823 | |
824 | void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue) |
825 | { |
826 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
827 | "destroying RX queue %d\n" , ef4_rx_queue_index(rx_queue)); |
828 | |
829 | ef4_nic_remove_rx(rx_queue); |
830 | |
831 | kfree(objp: rx_queue->buffer); |
832 | rx_queue->buffer = NULL; |
833 | } |
834 | |
835 | |
836 | module_param(rx_refill_threshold, uint, 0444); |
837 | MODULE_PARM_DESC(rx_refill_threshold, |
838 | "RX descriptor ring refill threshold (%)" ); |
839 | |
840 | #ifdef CONFIG_RFS_ACCEL |
841 | |
842 | int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, |
843 | u16 rxq_index, u32 flow_id) |
844 | { |
845 | struct ef4_nic *efx = netdev_priv(dev: net_dev); |
846 | struct ef4_channel *channel; |
847 | struct ef4_filter_spec spec; |
848 | struct flow_keys fk; |
849 | int rc; |
850 | |
851 | if (flow_id == RPS_FLOW_ID_INVALID) |
852 | return -EINVAL; |
853 | |
854 | if (!skb_flow_dissect_flow_keys(skb, flow: &fk, flags: 0)) |
855 | return -EPROTONOSUPPORT; |
856 | |
857 | if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) |
858 | return -EPROTONOSUPPORT; |
859 | if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) |
860 | return -EPROTONOSUPPORT; |
861 | |
862 | ef4_filter_init_rx(spec: &spec, priority: EF4_FILTER_PRI_HINT, |
863 | flags: efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0, |
864 | rxq_id: rxq_index); |
865 | spec.match_flags = |
866 | EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO | |
867 | EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT | |
868 | EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT; |
869 | spec.ether_type = fk.basic.n_proto; |
870 | spec.ip_proto = fk.basic.ip_proto; |
871 | |
872 | if (fk.basic.n_proto == htons(ETH_P_IP)) { |
873 | spec.rem_host[0] = fk.addrs.v4addrs.src; |
874 | spec.loc_host[0] = fk.addrs.v4addrs.dst; |
875 | } else { |
876 | memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr)); |
877 | memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr)); |
878 | } |
879 | |
880 | spec.rem_port = fk.ports.src; |
881 | spec.loc_port = fk.ports.dst; |
882 | |
883 | rc = efx->type->filter_rfs_insert(efx, &spec); |
884 | if (rc < 0) |
885 | return rc; |
886 | |
887 | /* Remember this so we can check whether to expire the filter later */ |
888 | channel = ef4_get_channel(efx, index: rxq_index); |
889 | channel->rps_flow_id[rc] = flow_id; |
890 | ++channel->rfs_filters_added; |
891 | |
892 | if (spec.ether_type == htons(ETH_P_IP)) |
893 | netif_info(efx, rx_status, efx->net_dev, |
894 | "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n" , |
895 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP" , |
896 | spec.rem_host, ntohs(spec.rem_port), spec.loc_host, |
897 | ntohs(spec.loc_port), rxq_index, flow_id, rc); |
898 | else |
899 | netif_info(efx, rx_status, efx->net_dev, |
900 | "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n" , |
901 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP" , |
902 | spec.rem_host, ntohs(spec.rem_port), spec.loc_host, |
903 | ntohs(spec.loc_port), rxq_index, flow_id, rc); |
904 | |
905 | return rc; |
906 | } |
907 | |
908 | bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota) |
909 | { |
910 | bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index); |
911 | unsigned int channel_idx, index, size; |
912 | u32 flow_id; |
913 | |
914 | if (!spin_trylock_bh(lock: &efx->filter_lock)) |
915 | return false; |
916 | |
917 | expire_one = efx->type->filter_rfs_expire_one; |
918 | channel_idx = efx->rps_expire_channel; |
919 | index = efx->rps_expire_index; |
920 | size = efx->type->max_rx_ip_filters; |
921 | while (quota--) { |
922 | struct ef4_channel *channel = ef4_get_channel(efx, index: channel_idx); |
923 | flow_id = channel->rps_flow_id[index]; |
924 | |
925 | if (flow_id != RPS_FLOW_ID_INVALID && |
926 | expire_one(efx, flow_id, index)) { |
927 | netif_info(efx, rx_status, efx->net_dev, |
928 | "expired filter %d [queue %u flow %u]\n" , |
929 | index, channel_idx, flow_id); |
930 | channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; |
931 | } |
932 | if (++index == size) { |
933 | if (++channel_idx == efx->n_channels) |
934 | channel_idx = 0; |
935 | index = 0; |
936 | } |
937 | } |
938 | efx->rps_expire_channel = channel_idx; |
939 | efx->rps_expire_index = index; |
940 | |
941 | spin_unlock_bh(lock: &efx->filter_lock); |
942 | return true; |
943 | } |
944 | |
945 | #endif /* CONFIG_RFS_ACCEL */ |
946 | |
947 | /** |
948 | * ef4_filter_is_mc_recipient - test whether spec is a multicast recipient |
949 | * @spec: Specification to test |
950 | * |
951 | * Return: %true if the specification is a non-drop RX filter that |
952 | * matches a local MAC address I/G bit value of 1 or matches a local |
953 | * IPv4 or IPv6 address value in the respective multicast address |
954 | * range. Otherwise %false. |
955 | */ |
956 | bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec) |
957 | { |
958 | if (!(spec->flags & EF4_FILTER_FLAG_RX) || |
959 | spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP) |
960 | return false; |
961 | |
962 | if (spec->match_flags & |
963 | (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) && |
964 | is_multicast_ether_addr(addr: spec->loc_mac)) |
965 | return true; |
966 | |
967 | if ((spec->match_flags & |
968 | (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) == |
969 | (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) { |
970 | if (spec->ether_type == htons(ETH_P_IP) && |
971 | ipv4_is_multicast(addr: spec->loc_host[0])) |
972 | return true; |
973 | if (spec->ether_type == htons(ETH_P_IPV6) && |
974 | ((const u8 *)spec->loc_host)[0] == 0xff) |
975 | return true; |
976 | } |
977 | |
978 | return false; |
979 | } |
980 | |