1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* |
3 | * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. |
4 | */ |
5 | |
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
7 | |
8 | #ifdef CONFIG_RFS_ACCEL |
9 | #include <linux/cpu_rmap.h> |
10 | #endif /* CONFIG_RFS_ACCEL */ |
11 | #include <linux/ethtool.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> |
14 | #include <linux/numa.h> |
15 | #include <linux/pci.h> |
16 | #include <linux/utsname.h> |
17 | #include <linux/version.h> |
18 | #include <linux/vmalloc.h> |
19 | #include <net/ip.h> |
20 | |
21 | #include "ena_netdev.h" |
22 | #include "ena_pci_id_tbl.h" |
23 | #include "ena_xdp.h" |
24 | |
25 | MODULE_AUTHOR("Amazon.com, Inc. or its affiliates" ); |
26 | MODULE_DESCRIPTION(DEVICE_NAME); |
27 | MODULE_LICENSE("GPL" ); |
28 | |
29 | /* Time in jiffies before concluding the transmitter is hung. */ |
30 | #define TX_TIMEOUT (5 * HZ) |
31 | |
32 | #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus()) |
33 | |
34 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ |
35 | NETIF_MSG_IFDOWN | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) |
36 | |
37 | static struct ena_aenq_handlers aenq_handlers; |
38 | |
39 | static struct workqueue_struct *ena_wq; |
40 | |
41 | MODULE_DEVICE_TABLE(pci, ena_pci_tbl); |
42 | |
43 | static int ena_rss_init_default(struct ena_adapter *adapter); |
44 | static void check_for_admin_com_state(struct ena_adapter *adapter); |
45 | static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); |
46 | static int ena_restore_device(struct ena_adapter *adapter); |
47 | |
48 | static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) |
49 | { |
50 | enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; |
51 | struct ena_adapter *adapter = netdev_priv(dev); |
52 | unsigned int time_since_last_napi, threshold; |
53 | struct ena_ring *tx_ring; |
54 | int napi_scheduled; |
55 | |
56 | if (txqueue >= adapter->num_io_queues) { |
57 | netdev_err(dev, format: "TX timeout on invalid queue %u\n" , txqueue); |
58 | goto schedule_reset; |
59 | } |
60 | |
61 | threshold = jiffies_to_usecs(j: dev->watchdog_timeo); |
62 | tx_ring = &adapter->tx_ring[txqueue]; |
63 | |
64 | time_since_last_napi = jiffies_to_usecs(j: jiffies - tx_ring->tx_stats.last_napi_jiffies); |
65 | napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED); |
66 | |
67 | netdev_err(dev, |
68 | format: "TX q %d is paused for too long (threshold %u). Time since last napi %u usec. napi scheduled: %d\n" , |
69 | txqueue, |
70 | threshold, |
71 | time_since_last_napi, |
72 | napi_scheduled); |
73 | |
74 | if (threshold < time_since_last_napi && napi_scheduled) { |
75 | netdev_err(dev, |
76 | format: "napi handler hasn't been called for a long time but is scheduled\n" ); |
77 | reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION; |
78 | } |
79 | schedule_reset: |
80 | /* Change the state of the device to trigger reset |
81 | * Check that we are not in the middle or a trigger already |
82 | */ |
83 | if (test_and_set_bit(nr: ENA_FLAG_TRIGGER_RESET, addr: &adapter->flags)) |
84 | return; |
85 | |
86 | ena_reset_device(adapter, reset_reason); |
87 | ena_increase_stat(statp: &adapter->dev_stats.tx_timeout, cnt: 1, syncp: &adapter->syncp); |
88 | } |
89 | |
90 | static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) |
91 | { |
92 | int i; |
93 | |
94 | for (i = 0; i < adapter->num_io_queues; i++) |
95 | adapter->rx_ring[i].mtu = mtu; |
96 | } |
97 | |
98 | static int ena_change_mtu(struct net_device *dev, int new_mtu) |
99 | { |
100 | struct ena_adapter *adapter = netdev_priv(dev); |
101 | int ret; |
102 | |
103 | ret = ena_com_set_dev_mtu(ena_dev: adapter->ena_dev, mtu: new_mtu); |
104 | if (!ret) { |
105 | netif_dbg(adapter, drv, dev, "Set MTU to %d\n" , new_mtu); |
106 | update_rx_ring_mtu(adapter, mtu: new_mtu); |
107 | dev->mtu = new_mtu; |
108 | } else { |
109 | netif_err(adapter, drv, dev, "Failed to set MTU to %d\n" , |
110 | new_mtu); |
111 | } |
112 | |
113 | return ret; |
114 | } |
115 | |
116 | int ena_xmit_common(struct ena_adapter *adapter, |
117 | struct ena_ring *ring, |
118 | struct ena_tx_buffer *tx_info, |
119 | struct ena_com_tx_ctx *ena_tx_ctx, |
120 | u16 next_to_use, |
121 | u32 bytes) |
122 | { |
123 | int rc, nb_hw_desc; |
124 | |
125 | if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq, |
126 | ena_tx_ctx))) { |
127 | netif_dbg(adapter, tx_queued, adapter->netdev, |
128 | "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n" , |
129 | ring->qid); |
130 | ena_ring_tx_doorbell(tx_ring: ring); |
131 | } |
132 | |
133 | /* prepare the packet's descriptors to dma engine */ |
134 | rc = ena_com_prepare_tx(io_sq: ring->ena_com_io_sq, ena_tx_ctx, |
135 | nb_hw_desc: &nb_hw_desc); |
136 | |
137 | /* In case there isn't enough space in the queue for the packet, |
138 | * we simply drop it. All other failure reasons of |
139 | * ena_com_prepare_tx() are fatal and therefore require a device reset. |
140 | */ |
141 | if (unlikely(rc)) { |
142 | netif_err(adapter, tx_queued, adapter->netdev, |
143 | "Failed to prepare tx bufs\n" ); |
144 | ena_increase_stat(statp: &ring->tx_stats.prepare_ctx_err, cnt: 1, syncp: &ring->syncp); |
145 | if (rc != -ENOMEM) |
146 | ena_reset_device(adapter, reset_reason: ENA_REGS_RESET_DRIVER_INVALID_STATE); |
147 | return rc; |
148 | } |
149 | |
150 | u64_stats_update_begin(syncp: &ring->syncp); |
151 | ring->tx_stats.cnt++; |
152 | ring->tx_stats.bytes += bytes; |
153 | u64_stats_update_end(syncp: &ring->syncp); |
154 | |
155 | tx_info->tx_descs = nb_hw_desc; |
156 | tx_info->total_tx_size = bytes; |
157 | tx_info->last_jiffies = jiffies; |
158 | tx_info->print_once = 0; |
159 | |
160 | ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, |
161 | ring->ring_size); |
162 | return 0; |
163 | } |
164 | |
165 | static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) |
166 | { |
167 | #ifdef CONFIG_RFS_ACCEL |
168 | u32 i; |
169 | int rc; |
170 | |
171 | adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(size: adapter->num_io_queues); |
172 | if (!adapter->netdev->rx_cpu_rmap) |
173 | return -ENOMEM; |
174 | for (i = 0; i < adapter->num_io_queues; i++) { |
175 | int irq_idx = ENA_IO_IRQ_IDX(i); |
176 | |
177 | rc = irq_cpu_rmap_add(rmap: adapter->netdev->rx_cpu_rmap, |
178 | irq: pci_irq_vector(dev: adapter->pdev, nr: irq_idx)); |
179 | if (rc) { |
180 | free_irq_cpu_rmap(rmap: adapter->netdev->rx_cpu_rmap); |
181 | adapter->netdev->rx_cpu_rmap = NULL; |
182 | return rc; |
183 | } |
184 | } |
185 | #endif /* CONFIG_RFS_ACCEL */ |
186 | return 0; |
187 | } |
188 | |
189 | static void ena_init_io_rings_common(struct ena_adapter *adapter, |
190 | struct ena_ring *ring, u16 qid) |
191 | { |
192 | ring->qid = qid; |
193 | ring->pdev = adapter->pdev; |
194 | ring->dev = &adapter->pdev->dev; |
195 | ring->netdev = adapter->netdev; |
196 | ring->napi = &adapter->ena_napi[qid].napi; |
197 | ring->adapter = adapter; |
198 | ring->ena_dev = adapter->ena_dev; |
199 | ring->per_napi_packets = 0; |
200 | ring->cpu = 0; |
201 | ring->numa_node = 0; |
202 | ring->no_interrupt_event_cnt = 0; |
203 | u64_stats_init(syncp: &ring->syncp); |
204 | } |
205 | |
206 | void ena_init_io_rings(struct ena_adapter *adapter, |
207 | int first_index, int count) |
208 | { |
209 | struct ena_com_dev *ena_dev; |
210 | struct ena_ring *txr, *rxr; |
211 | int i; |
212 | |
213 | ena_dev = adapter->ena_dev; |
214 | |
215 | for (i = first_index; i < first_index + count; i++) { |
216 | txr = &adapter->tx_ring[i]; |
217 | rxr = &adapter->rx_ring[i]; |
218 | |
219 | /* TX common ring state */ |
220 | ena_init_io_rings_common(adapter, ring: txr, qid: i); |
221 | |
222 | /* TX specific ring state */ |
223 | txr->ring_size = adapter->requested_tx_ring_size; |
224 | txr->tx_max_header_size = ena_dev->tx_max_header_size; |
225 | txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; |
226 | txr->sgl_size = adapter->max_tx_sgl_size; |
227 | txr->smoothed_interval = |
228 | ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); |
229 | txr->disable_meta_caching = adapter->disable_meta_caching; |
230 | spin_lock_init(&txr->xdp_tx_lock); |
231 | |
232 | /* Don't init RX queues for xdp queues */ |
233 | if (!ENA_IS_XDP_INDEX(adapter, i)) { |
234 | /* RX common ring state */ |
235 | ena_init_io_rings_common(adapter, ring: rxr, qid: i); |
236 | |
237 | /* RX specific ring state */ |
238 | rxr->ring_size = adapter->requested_rx_ring_size; |
239 | rxr->rx_copybreak = adapter->rx_copybreak; |
240 | rxr->sgl_size = adapter->max_rx_sgl_size; |
241 | rxr->smoothed_interval = |
242 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); |
243 | rxr->empty_rx_queue = 0; |
244 | rxr->rx_headroom = NET_SKB_PAD; |
245 | adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
246 | rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; |
247 | } |
248 | } |
249 | } |
250 | |
251 | /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors) |
252 | * @adapter: network interface device structure |
253 | * @qid: queue index |
254 | * |
255 | * Return 0 on success, negative on failure |
256 | */ |
257 | static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) |
258 | { |
259 | struct ena_ring *tx_ring = &adapter->tx_ring[qid]; |
260 | struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; |
261 | int size, i, node; |
262 | |
263 | if (tx_ring->tx_buffer_info) { |
264 | netif_err(adapter, ifup, |
265 | adapter->netdev, "tx_buffer_info info is not NULL" ); |
266 | return -EEXIST; |
267 | } |
268 | |
269 | size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; |
270 | node = cpu_to_node(cpu: ena_irq->cpu); |
271 | |
272 | tx_ring->tx_buffer_info = vzalloc_node(size, node); |
273 | if (!tx_ring->tx_buffer_info) { |
274 | tx_ring->tx_buffer_info = vzalloc(size); |
275 | if (!tx_ring->tx_buffer_info) |
276 | goto err_tx_buffer_info; |
277 | } |
278 | |
279 | size = sizeof(u16) * tx_ring->ring_size; |
280 | tx_ring->free_ids = vzalloc_node(size, node); |
281 | if (!tx_ring->free_ids) { |
282 | tx_ring->free_ids = vzalloc(size); |
283 | if (!tx_ring->free_ids) |
284 | goto err_tx_free_ids; |
285 | } |
286 | |
287 | size = tx_ring->tx_max_header_size; |
288 | tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); |
289 | if (!tx_ring->push_buf_intermediate_buf) { |
290 | tx_ring->push_buf_intermediate_buf = vzalloc(size); |
291 | if (!tx_ring->push_buf_intermediate_buf) |
292 | goto err_push_buf_intermediate_buf; |
293 | } |
294 | |
295 | /* Req id ring for TX out of order completions */ |
296 | for (i = 0; i < tx_ring->ring_size; i++) |
297 | tx_ring->free_ids[i] = i; |
298 | |
299 | /* Reset tx statistics */ |
300 | memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); |
301 | |
302 | tx_ring->next_to_use = 0; |
303 | tx_ring->next_to_clean = 0; |
304 | tx_ring->cpu = ena_irq->cpu; |
305 | tx_ring->numa_node = node; |
306 | return 0; |
307 | |
308 | err_push_buf_intermediate_buf: |
309 | vfree(addr: tx_ring->free_ids); |
310 | tx_ring->free_ids = NULL; |
311 | err_tx_free_ids: |
312 | vfree(addr: tx_ring->tx_buffer_info); |
313 | tx_ring->tx_buffer_info = NULL; |
314 | err_tx_buffer_info: |
315 | return -ENOMEM; |
316 | } |
317 | |
318 | /* ena_free_tx_resources - Free I/O Tx Resources per Queue |
319 | * @adapter: network interface device structure |
320 | * @qid: queue index |
321 | * |
322 | * Free all transmit software resources |
323 | */ |
324 | static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) |
325 | { |
326 | struct ena_ring *tx_ring = &adapter->tx_ring[qid]; |
327 | |
328 | vfree(addr: tx_ring->tx_buffer_info); |
329 | tx_ring->tx_buffer_info = NULL; |
330 | |
331 | vfree(addr: tx_ring->free_ids); |
332 | tx_ring->free_ids = NULL; |
333 | |
334 | vfree(addr: tx_ring->push_buf_intermediate_buf); |
335 | tx_ring->push_buf_intermediate_buf = NULL; |
336 | } |
337 | |
338 | int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, |
339 | int first_index, int count) |
340 | { |
341 | int i, rc = 0; |
342 | |
343 | for (i = first_index; i < first_index + count; i++) { |
344 | rc = ena_setup_tx_resources(adapter, qid: i); |
345 | if (rc) |
346 | goto err_setup_tx; |
347 | } |
348 | |
349 | return 0; |
350 | |
351 | err_setup_tx: |
352 | |
353 | netif_err(adapter, ifup, adapter->netdev, |
354 | "Tx queue %d: allocation failed\n" , i); |
355 | |
356 | /* rewind the index freeing the rings as we go */ |
357 | while (first_index < i--) |
358 | ena_free_tx_resources(adapter, qid: i); |
359 | return rc; |
360 | } |
361 | |
362 | void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, |
363 | int first_index, int count) |
364 | { |
365 | int i; |
366 | |
367 | for (i = first_index; i < first_index + count; i++) |
368 | ena_free_tx_resources(adapter, qid: i); |
369 | } |
370 | |
371 | /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues |
372 | * @adapter: board private structure |
373 | * |
374 | * Free all transmit software resources |
375 | */ |
376 | void ena_free_all_io_tx_resources(struct ena_adapter *adapter) |
377 | { |
378 | ena_free_all_io_tx_resources_in_range(adapter, |
379 | first_index: 0, |
380 | count: adapter->xdp_num_queues + |
381 | adapter->num_io_queues); |
382 | } |
383 | |
384 | /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) |
385 | * @adapter: network interface device structure |
386 | * @qid: queue index |
387 | * |
388 | * Returns 0 on success, negative on failure |
389 | */ |
390 | static int ena_setup_rx_resources(struct ena_adapter *adapter, |
391 | u32 qid) |
392 | { |
393 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; |
394 | struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; |
395 | int size, node, i; |
396 | |
397 | if (rx_ring->rx_buffer_info) { |
398 | netif_err(adapter, ifup, adapter->netdev, |
399 | "rx_buffer_info is not NULL" ); |
400 | return -EEXIST; |
401 | } |
402 | |
403 | /* alloc extra element so in rx path |
404 | * we can always prefetch rx_info + 1 |
405 | */ |
406 | size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); |
407 | node = cpu_to_node(cpu: ena_irq->cpu); |
408 | |
409 | rx_ring->rx_buffer_info = vzalloc_node(size, node); |
410 | if (!rx_ring->rx_buffer_info) { |
411 | rx_ring->rx_buffer_info = vzalloc(size); |
412 | if (!rx_ring->rx_buffer_info) |
413 | return -ENOMEM; |
414 | } |
415 | |
416 | size = sizeof(u16) * rx_ring->ring_size; |
417 | rx_ring->free_ids = vzalloc_node(size, node); |
418 | if (!rx_ring->free_ids) { |
419 | rx_ring->free_ids = vzalloc(size); |
420 | if (!rx_ring->free_ids) { |
421 | vfree(addr: rx_ring->rx_buffer_info); |
422 | rx_ring->rx_buffer_info = NULL; |
423 | return -ENOMEM; |
424 | } |
425 | } |
426 | |
427 | /* Req id ring for receiving RX pkts out of order */ |
428 | for (i = 0; i < rx_ring->ring_size; i++) |
429 | rx_ring->free_ids[i] = i; |
430 | |
431 | /* Reset rx statistics */ |
432 | memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); |
433 | |
434 | rx_ring->next_to_clean = 0; |
435 | rx_ring->next_to_use = 0; |
436 | rx_ring->cpu = ena_irq->cpu; |
437 | rx_ring->numa_node = node; |
438 | |
439 | return 0; |
440 | } |
441 | |
442 | /* ena_free_rx_resources - Free I/O Rx Resources |
443 | * @adapter: network interface device structure |
444 | * @qid: queue index |
445 | * |
446 | * Free all receive software resources |
447 | */ |
448 | static void ena_free_rx_resources(struct ena_adapter *adapter, |
449 | u32 qid) |
450 | { |
451 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; |
452 | |
453 | vfree(addr: rx_ring->rx_buffer_info); |
454 | rx_ring->rx_buffer_info = NULL; |
455 | |
456 | vfree(addr: rx_ring->free_ids); |
457 | rx_ring->free_ids = NULL; |
458 | } |
459 | |
460 | /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues |
461 | * @adapter: board private structure |
462 | * |
463 | * Return 0 on success, negative on failure |
464 | */ |
465 | static int ena_setup_all_rx_resources(struct ena_adapter *adapter) |
466 | { |
467 | int i, rc = 0; |
468 | |
469 | for (i = 0; i < adapter->num_io_queues; i++) { |
470 | rc = ena_setup_rx_resources(adapter, qid: i); |
471 | if (rc) |
472 | goto err_setup_rx; |
473 | } |
474 | |
475 | return 0; |
476 | |
477 | err_setup_rx: |
478 | |
479 | netif_err(adapter, ifup, adapter->netdev, |
480 | "Rx queue %d: allocation failed\n" , i); |
481 | |
482 | /* rewind the index freeing the rings as we go */ |
483 | while (i--) |
484 | ena_free_rx_resources(adapter, qid: i); |
485 | return rc; |
486 | } |
487 | |
488 | /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues |
489 | * @adapter: board private structure |
490 | * |
491 | * Free all receive software resources |
492 | */ |
493 | static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) |
494 | { |
495 | int i; |
496 | |
497 | for (i = 0; i < adapter->num_io_queues; i++) |
498 | ena_free_rx_resources(adapter, qid: i); |
499 | } |
500 | |
501 | static struct page *ena_alloc_map_page(struct ena_ring *rx_ring, |
502 | dma_addr_t *dma) |
503 | { |
504 | struct page *page; |
505 | |
506 | /* This would allocate the page on the same NUMA node the executing code |
507 | * is running on. |
508 | */ |
509 | page = dev_alloc_page(); |
510 | if (!page) { |
511 | ena_increase_stat(statp: &rx_ring->rx_stats.page_alloc_fail, cnt: 1, syncp: &rx_ring->syncp); |
512 | return ERR_PTR(error: -ENOSPC); |
513 | } |
514 | |
515 | /* To enable NIC-side port-mirroring, AKA SPAN port, |
516 | * we make the buffer readable from the nic as well |
517 | */ |
518 | *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, |
519 | DMA_BIDIRECTIONAL); |
520 | if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) { |
521 | ena_increase_stat(statp: &rx_ring->rx_stats.dma_mapping_err, cnt: 1, |
522 | syncp: &rx_ring->syncp); |
523 | __free_page(page); |
524 | return ERR_PTR(error: -EIO); |
525 | } |
526 | |
527 | return page; |
528 | } |
529 | |
530 | static int ena_alloc_rx_buffer(struct ena_ring *rx_ring, |
531 | struct ena_rx_buffer *rx_info) |
532 | { |
533 | int headroom = rx_ring->rx_headroom; |
534 | struct ena_com_buf *ena_buf; |
535 | struct page *page; |
536 | dma_addr_t dma; |
537 | int tailroom; |
538 | |
539 | /* restore page offset value in case it has been changed by device */ |
540 | rx_info->buf_offset = headroom; |
541 | |
542 | /* if previous allocated page is not used */ |
543 | if (unlikely(rx_info->page)) |
544 | return 0; |
545 | |
546 | /* We handle DMA here */ |
547 | page = ena_alloc_map_page(rx_ring, dma: &dma); |
548 | if (IS_ERR(ptr: page)) |
549 | return PTR_ERR(ptr: page); |
550 | |
551 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
552 | "Allocate page %p, rx_info %p\n" , page, rx_info); |
553 | |
554 | tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
555 | |
556 | rx_info->page = page; |
557 | rx_info->dma_addr = dma; |
558 | rx_info->page_offset = 0; |
559 | ena_buf = &rx_info->ena_buf; |
560 | ena_buf->paddr = dma + headroom; |
561 | ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom; |
562 | |
563 | return 0; |
564 | } |
565 | |
566 | static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring, |
567 | struct ena_rx_buffer *rx_info, |
568 | unsigned long attrs) |
569 | { |
570 | dma_unmap_page_attrs(dev: rx_ring->dev, addr: rx_info->dma_addr, ENA_PAGE_SIZE, dir: DMA_BIDIRECTIONAL, |
571 | attrs); |
572 | } |
573 | |
574 | static void ena_free_rx_page(struct ena_ring *rx_ring, |
575 | struct ena_rx_buffer *rx_info) |
576 | { |
577 | struct page *page = rx_info->page; |
578 | |
579 | if (unlikely(!page)) { |
580 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, |
581 | "Trying to free unallocated buffer\n" ); |
582 | return; |
583 | } |
584 | |
585 | ena_unmap_rx_buff_attrs(rx_ring, rx_info, attrs: 0); |
586 | |
587 | __free_page(page); |
588 | rx_info->page = NULL; |
589 | } |
590 | |
591 | static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) |
592 | { |
593 | u16 next_to_use, req_id; |
594 | u32 i; |
595 | int rc; |
596 | |
597 | next_to_use = rx_ring->next_to_use; |
598 | |
599 | for (i = 0; i < num; i++) { |
600 | struct ena_rx_buffer *rx_info; |
601 | |
602 | req_id = rx_ring->free_ids[next_to_use]; |
603 | |
604 | rx_info = &rx_ring->rx_buffer_info[req_id]; |
605 | |
606 | rc = ena_alloc_rx_buffer(rx_ring, rx_info); |
607 | if (unlikely(rc < 0)) { |
608 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, |
609 | "Failed to allocate buffer for rx queue %d\n" , |
610 | rx_ring->qid); |
611 | break; |
612 | } |
613 | rc = ena_com_add_single_rx_desc(io_sq: rx_ring->ena_com_io_sq, |
614 | ena_buf: &rx_info->ena_buf, |
615 | req_id); |
616 | if (unlikely(rc)) { |
617 | netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, |
618 | "Failed to add buffer for rx queue %d\n" , |
619 | rx_ring->qid); |
620 | break; |
621 | } |
622 | next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, |
623 | rx_ring->ring_size); |
624 | } |
625 | |
626 | if (unlikely(i < num)) { |
627 | ena_increase_stat(statp: &rx_ring->rx_stats.refil_partial, cnt: 1, |
628 | syncp: &rx_ring->syncp); |
629 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, |
630 | "Refilled rx qid %d with only %d buffers (from %d)\n" , |
631 | rx_ring->qid, i, num); |
632 | } |
633 | |
634 | /* ena_com_write_sq_doorbell issues a wmb() */ |
635 | if (likely(i)) |
636 | ena_com_write_sq_doorbell(io_sq: rx_ring->ena_com_io_sq); |
637 | |
638 | rx_ring->next_to_use = next_to_use; |
639 | |
640 | return i; |
641 | } |
642 | |
643 | static void ena_free_rx_bufs(struct ena_adapter *adapter, |
644 | u32 qid) |
645 | { |
646 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; |
647 | u32 i; |
648 | |
649 | for (i = 0; i < rx_ring->ring_size; i++) { |
650 | struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; |
651 | |
652 | if (rx_info->page) |
653 | ena_free_rx_page(rx_ring, rx_info); |
654 | } |
655 | } |
656 | |
657 | /* ena_refill_all_rx_bufs - allocate all queues Rx buffers |
658 | * @adapter: board private structure |
659 | */ |
660 | static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) |
661 | { |
662 | struct ena_ring *rx_ring; |
663 | int i, rc, bufs_num; |
664 | |
665 | for (i = 0; i < adapter->num_io_queues; i++) { |
666 | rx_ring = &adapter->rx_ring[i]; |
667 | bufs_num = rx_ring->ring_size - 1; |
668 | rc = ena_refill_rx_bufs(rx_ring, num: bufs_num); |
669 | |
670 | if (unlikely(rc != bufs_num)) |
671 | netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, |
672 | "Refilling Queue %d failed. allocated %d buffers from: %d\n" , |
673 | i, rc, bufs_num); |
674 | } |
675 | } |
676 | |
677 | static void ena_free_all_rx_bufs(struct ena_adapter *adapter) |
678 | { |
679 | int i; |
680 | |
681 | for (i = 0; i < adapter->num_io_queues; i++) |
682 | ena_free_rx_bufs(adapter, qid: i); |
683 | } |
684 | |
685 | void ena_unmap_tx_buff(struct ena_ring *tx_ring, |
686 | struct ena_tx_buffer *tx_info) |
687 | { |
688 | struct ena_com_buf *ena_buf; |
689 | u32 cnt; |
690 | int i; |
691 | |
692 | ena_buf = tx_info->bufs; |
693 | cnt = tx_info->num_of_bufs; |
694 | |
695 | if (unlikely(!cnt)) |
696 | return; |
697 | |
698 | if (tx_info->map_linear_data) { |
699 | dma_unmap_single(tx_ring->dev, |
700 | dma_unmap_addr(ena_buf, paddr), |
701 | dma_unmap_len(ena_buf, len), |
702 | DMA_TO_DEVICE); |
703 | ena_buf++; |
704 | cnt--; |
705 | } |
706 | |
707 | /* unmap remaining mapped pages */ |
708 | for (i = 0; i < cnt; i++) { |
709 | dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), |
710 | dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); |
711 | ena_buf++; |
712 | } |
713 | } |
714 | |
715 | /* ena_free_tx_bufs - Free Tx Buffers per Queue |
716 | * @tx_ring: TX ring for which buffers be freed |
717 | */ |
718 | static void ena_free_tx_bufs(struct ena_ring *tx_ring) |
719 | { |
720 | bool print_once = true; |
721 | bool is_xdp_ring; |
722 | u32 i; |
723 | |
724 | is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid); |
725 | |
726 | for (i = 0; i < tx_ring->ring_size; i++) { |
727 | struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; |
728 | |
729 | if (!tx_info->skb) |
730 | continue; |
731 | |
732 | if (print_once) { |
733 | netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, |
734 | "Free uncompleted tx skb qid %d idx 0x%x\n" , |
735 | tx_ring->qid, i); |
736 | print_once = false; |
737 | } else { |
738 | netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, |
739 | "Free uncompleted tx skb qid %d idx 0x%x\n" , |
740 | tx_ring->qid, i); |
741 | } |
742 | |
743 | ena_unmap_tx_buff(tx_ring, tx_info); |
744 | |
745 | if (is_xdp_ring) |
746 | xdp_return_frame(xdpf: tx_info->xdpf); |
747 | else |
748 | dev_kfree_skb_any(skb: tx_info->skb); |
749 | } |
750 | |
751 | if (!is_xdp_ring) |
752 | netdev_tx_reset_queue(q: netdev_get_tx_queue(dev: tx_ring->netdev, |
753 | index: tx_ring->qid)); |
754 | } |
755 | |
756 | static void ena_free_all_tx_bufs(struct ena_adapter *adapter) |
757 | { |
758 | struct ena_ring *tx_ring; |
759 | int i; |
760 | |
761 | for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { |
762 | tx_ring = &adapter->tx_ring[i]; |
763 | ena_free_tx_bufs(tx_ring); |
764 | } |
765 | } |
766 | |
767 | static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) |
768 | { |
769 | u16 ena_qid; |
770 | int i; |
771 | |
772 | for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { |
773 | ena_qid = ENA_IO_TXQ_IDX(i); |
774 | ena_com_destroy_io_queue(ena_dev: adapter->ena_dev, qid: ena_qid); |
775 | } |
776 | } |
777 | |
778 | static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) |
779 | { |
780 | u16 ena_qid; |
781 | int i; |
782 | |
783 | for (i = 0; i < adapter->num_io_queues; i++) { |
784 | ena_qid = ENA_IO_RXQ_IDX(i); |
785 | cancel_work_sync(work: &adapter->ena_napi[i].dim.work); |
786 | ena_xdp_unregister_rxq_info(rx_ring: &adapter->rx_ring[i]); |
787 | ena_com_destroy_io_queue(ena_dev: adapter->ena_dev, qid: ena_qid); |
788 | } |
789 | } |
790 | |
791 | static void ena_destroy_all_io_queues(struct ena_adapter *adapter) |
792 | { |
793 | ena_destroy_all_tx_queues(adapter); |
794 | ena_destroy_all_rx_queues(adapter); |
795 | } |
796 | |
797 | int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, |
798 | struct ena_tx_buffer *tx_info, bool is_xdp) |
799 | { |
800 | if (tx_info) |
801 | netif_err(ring->adapter, |
802 | tx_done, |
803 | ring->netdev, |
804 | "tx_info doesn't have valid %s. qid %u req_id %u" , |
805 | is_xdp ? "xdp frame" : "skb" , ring->qid, req_id); |
806 | else |
807 | netif_err(ring->adapter, |
808 | tx_done, |
809 | ring->netdev, |
810 | "Invalid req_id %u in qid %u\n" , |
811 | req_id, ring->qid); |
812 | |
813 | ena_increase_stat(statp: &ring->tx_stats.bad_req_id, cnt: 1, syncp: &ring->syncp); |
814 | ena_reset_device(adapter: ring->adapter, reset_reason: ENA_REGS_RESET_INV_TX_REQ_ID); |
815 | |
816 | return -EFAULT; |
817 | } |
818 | |
819 | static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) |
820 | { |
821 | struct ena_tx_buffer *tx_info; |
822 | |
823 | tx_info = &tx_ring->tx_buffer_info[req_id]; |
824 | if (likely(tx_info->skb)) |
825 | return 0; |
826 | |
827 | return handle_invalid_req_id(ring: tx_ring, req_id, tx_info, is_xdp: false); |
828 | } |
829 | |
830 | static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) |
831 | { |
832 | struct netdev_queue *txq; |
833 | bool above_thresh; |
834 | u32 tx_bytes = 0; |
835 | u32 total_done = 0; |
836 | u16 next_to_clean; |
837 | u16 req_id; |
838 | int tx_pkts = 0; |
839 | int rc; |
840 | |
841 | next_to_clean = tx_ring->next_to_clean; |
842 | txq = netdev_get_tx_queue(dev: tx_ring->netdev, index: tx_ring->qid); |
843 | |
844 | while (tx_pkts < budget) { |
845 | struct ena_tx_buffer *tx_info; |
846 | struct sk_buff *skb; |
847 | |
848 | rc = ena_com_tx_comp_req_id_get(io_cq: tx_ring->ena_com_io_cq, |
849 | req_id: &req_id); |
850 | if (rc) { |
851 | if (unlikely(rc == -EINVAL)) |
852 | handle_invalid_req_id(ring: tx_ring, req_id, NULL, is_xdp: false); |
853 | break; |
854 | } |
855 | |
856 | /* validate that the request id points to a valid skb */ |
857 | rc = validate_tx_req_id(tx_ring, req_id); |
858 | if (rc) |
859 | break; |
860 | |
861 | tx_info = &tx_ring->tx_buffer_info[req_id]; |
862 | skb = tx_info->skb; |
863 | |
864 | /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ |
865 | prefetch(&skb->end); |
866 | |
867 | tx_info->skb = NULL; |
868 | tx_info->last_jiffies = 0; |
869 | |
870 | ena_unmap_tx_buff(tx_ring, tx_info); |
871 | |
872 | netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, |
873 | "tx_poll: q %d skb %p completed\n" , tx_ring->qid, |
874 | skb); |
875 | |
876 | tx_bytes += tx_info->total_tx_size; |
877 | dev_kfree_skb(skb); |
878 | tx_pkts++; |
879 | total_done += tx_info->tx_descs; |
880 | |
881 | tx_ring->free_ids[next_to_clean] = req_id; |
882 | next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, |
883 | tx_ring->ring_size); |
884 | } |
885 | |
886 | tx_ring->next_to_clean = next_to_clean; |
887 | ena_com_comp_ack(io_sq: tx_ring->ena_com_io_sq, elem: total_done); |
888 | |
889 | netdev_tx_completed_queue(dev_queue: txq, pkts: tx_pkts, bytes: tx_bytes); |
890 | |
891 | netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, |
892 | "tx_poll: q %d done. total pkts: %d\n" , |
893 | tx_ring->qid, tx_pkts); |
894 | |
895 | /* need to make the rings circular update visible to |
896 | * ena_start_xmit() before checking for netif_queue_stopped(). |
897 | */ |
898 | smp_mb(); |
899 | |
900 | above_thresh = ena_com_sq_have_enough_space(io_sq: tx_ring->ena_com_io_sq, |
901 | ENA_TX_WAKEUP_THRESH); |
902 | if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { |
903 | __netif_tx_lock(txq, smp_processor_id()); |
904 | above_thresh = |
905 | ena_com_sq_have_enough_space(io_sq: tx_ring->ena_com_io_sq, |
906 | ENA_TX_WAKEUP_THRESH); |
907 | if (netif_tx_queue_stopped(dev_queue: txq) && above_thresh && |
908 | test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { |
909 | netif_tx_wake_queue(dev_queue: txq); |
910 | ena_increase_stat(statp: &tx_ring->tx_stats.queue_wakeup, cnt: 1, |
911 | syncp: &tx_ring->syncp); |
912 | } |
913 | __netif_tx_unlock(txq); |
914 | } |
915 | |
916 | return tx_pkts; |
917 | } |
918 | |
919 | static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len) |
920 | { |
921 | struct sk_buff *skb; |
922 | |
923 | if (!first_frag) |
924 | skb = napi_alloc_skb(napi: rx_ring->napi, length: len); |
925 | else |
926 | skb = napi_build_skb(data: first_frag, frag_size: len); |
927 | |
928 | if (unlikely(!skb)) { |
929 | ena_increase_stat(statp: &rx_ring->rx_stats.skb_alloc_fail, cnt: 1, |
930 | syncp: &rx_ring->syncp); |
931 | |
932 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
933 | "Failed to allocate skb. first_frag %s\n" , |
934 | first_frag ? "provided" : "not provided" ); |
935 | } |
936 | |
937 | return skb; |
938 | } |
939 | |
940 | static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer *rx_info, u16 buf_len, |
941 | u16 len, int pkt_offset) |
942 | { |
943 | struct ena_com_buf *ena_buf = &rx_info->ena_buf; |
944 | |
945 | /* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer |
946 | * for data + headroom + tailroom. |
947 | */ |
948 | if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) { |
949 | page_ref_inc(page: rx_info->page); |
950 | rx_info->page_offset += buf_len; |
951 | ena_buf->paddr += buf_len; |
952 | ena_buf->len -= buf_len; |
953 | return true; |
954 | } |
955 | |
956 | return false; |
957 | } |
958 | |
959 | static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, |
960 | struct ena_com_rx_buf_info *ena_bufs, |
961 | u32 descs, |
962 | u16 *next_to_clean) |
963 | { |
964 | int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
965 | bool is_xdp_loaded = ena_xdp_present_ring(ring: rx_ring); |
966 | struct ena_rx_buffer *rx_info; |
967 | struct ena_adapter *adapter; |
968 | int page_offset, pkt_offset; |
969 | dma_addr_t pre_reuse_paddr; |
970 | u16 len, req_id, buf = 0; |
971 | bool reuse_rx_buf_page; |
972 | struct sk_buff *skb; |
973 | void *buf_addr; |
974 | int buf_offset; |
975 | u16 buf_len; |
976 | |
977 | len = ena_bufs[buf].len; |
978 | req_id = ena_bufs[buf].req_id; |
979 | |
980 | rx_info = &rx_ring->rx_buffer_info[req_id]; |
981 | |
982 | if (unlikely(!rx_info->page)) { |
983 | adapter = rx_ring->adapter; |
984 | netif_err(adapter, rx_err, rx_ring->netdev, |
985 | "Page is NULL. qid %u req_id %u\n" , rx_ring->qid, req_id); |
986 | ena_increase_stat(statp: &rx_ring->rx_stats.bad_req_id, cnt: 1, syncp: &rx_ring->syncp); |
987 | ena_reset_device(adapter, reset_reason: ENA_REGS_RESET_INV_RX_REQ_ID); |
988 | return NULL; |
989 | } |
990 | |
991 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
992 | "rx_info %p page %p\n" , |
993 | rx_info, rx_info->page); |
994 | |
995 | buf_offset = rx_info->buf_offset; |
996 | pkt_offset = buf_offset - rx_ring->rx_headroom; |
997 | page_offset = rx_info->page_offset; |
998 | buf_addr = page_address(rx_info->page) + page_offset; |
999 | |
1000 | if (len <= rx_ring->rx_copybreak) { |
1001 | skb = ena_alloc_skb(rx_ring, NULL, len); |
1002 | if (unlikely(!skb)) |
1003 | return NULL; |
1004 | |
1005 | skb_copy_to_linear_data(skb, from: buf_addr + buf_offset, len); |
1006 | dma_sync_single_for_device(dev: rx_ring->dev, |
1007 | dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, |
1008 | size: len, |
1009 | dir: DMA_FROM_DEVICE); |
1010 | |
1011 | skb_put(skb, len); |
1012 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
1013 | "RX allocated small packet. len %d.\n" , skb->len); |
1014 | skb->protocol = eth_type_trans(skb, dev: rx_ring->netdev); |
1015 | rx_ring->free_ids[*next_to_clean] = req_id; |
1016 | *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, |
1017 | rx_ring->ring_size); |
1018 | return skb; |
1019 | } |
1020 | |
1021 | buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom); |
1022 | |
1023 | /* If XDP isn't loaded try to reuse part of the RX buffer */ |
1024 | reuse_rx_buf_page = !is_xdp_loaded && |
1025 | ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset); |
1026 | |
1027 | if (!reuse_rx_buf_page) |
1028 | ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); |
1029 | |
1030 | skb = ena_alloc_skb(rx_ring, first_frag: buf_addr, len: buf_len); |
1031 | if (unlikely(!skb)) |
1032 | return NULL; |
1033 | |
1034 | /* Populate skb's linear part */ |
1035 | skb_reserve(skb, len: buf_offset); |
1036 | skb_put(skb, len); |
1037 | skb->protocol = eth_type_trans(skb, dev: rx_ring->netdev); |
1038 | |
1039 | do { |
1040 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
1041 | "RX skb updated. len %d. data_len %d\n" , |
1042 | skb->len, skb->data_len); |
1043 | |
1044 | if (!reuse_rx_buf_page) |
1045 | rx_info->page = NULL; |
1046 | |
1047 | rx_ring->free_ids[*next_to_clean] = req_id; |
1048 | *next_to_clean = |
1049 | ENA_RX_RING_IDX_NEXT(*next_to_clean, |
1050 | rx_ring->ring_size); |
1051 | if (likely(--descs == 0)) |
1052 | break; |
1053 | |
1054 | buf++; |
1055 | len = ena_bufs[buf].len; |
1056 | req_id = ena_bufs[buf].req_id; |
1057 | |
1058 | rx_info = &rx_ring->rx_buffer_info[req_id]; |
1059 | |
1060 | /* rx_info->buf_offset includes rx_ring->rx_headroom */ |
1061 | buf_offset = rx_info->buf_offset; |
1062 | pkt_offset = buf_offset - rx_ring->rx_headroom; |
1063 | buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom); |
1064 | page_offset = rx_info->page_offset; |
1065 | |
1066 | pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr); |
1067 | |
1068 | reuse_rx_buf_page = !is_xdp_loaded && |
1069 | ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset); |
1070 | |
1071 | dma_sync_single_for_cpu(dev: rx_ring->dev, |
1072 | addr: pre_reuse_paddr + pkt_offset, |
1073 | size: len, |
1074 | dir: DMA_FROM_DEVICE); |
1075 | |
1076 | if (!reuse_rx_buf_page) |
1077 | ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); |
1078 | |
1079 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page: rx_info->page, |
1080 | off: page_offset + buf_offset, size: len, truesize: buf_len); |
1081 | |
1082 | } while (1); |
1083 | |
1084 | return skb; |
1085 | } |
1086 | |
1087 | /* ena_rx_checksum - indicate in skb if hw indicated a good cksum |
1088 | * @adapter: structure containing adapter specific data |
1089 | * @ena_rx_ctx: received packet context/metadata |
1090 | * @skb: skb currently being received and modified |
1091 | */ |
1092 | static void ena_rx_checksum(struct ena_ring *rx_ring, |
1093 | struct ena_com_rx_ctx *ena_rx_ctx, |
1094 | struct sk_buff *skb) |
1095 | { |
1096 | /* Rx csum disabled */ |
1097 | if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { |
1098 | skb->ip_summed = CHECKSUM_NONE; |
1099 | return; |
1100 | } |
1101 | |
1102 | /* For fragmented packets the checksum isn't valid */ |
1103 | if (ena_rx_ctx->frag) { |
1104 | skb->ip_summed = CHECKSUM_NONE; |
1105 | return; |
1106 | } |
1107 | |
1108 | /* if IP and error */ |
1109 | if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && |
1110 | (ena_rx_ctx->l3_csum_err))) { |
1111 | /* ipv4 checksum error */ |
1112 | skb->ip_summed = CHECKSUM_NONE; |
1113 | ena_increase_stat(statp: &rx_ring->rx_stats.csum_bad, cnt: 1, |
1114 | syncp: &rx_ring->syncp); |
1115 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
1116 | "RX IPv4 header checksum error\n" ); |
1117 | return; |
1118 | } |
1119 | |
1120 | /* if TCP/UDP */ |
1121 | if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || |
1122 | (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { |
1123 | if (unlikely(ena_rx_ctx->l4_csum_err)) { |
1124 | /* TCP/UDP checksum error */ |
1125 | ena_increase_stat(statp: &rx_ring->rx_stats.csum_bad, cnt: 1, |
1126 | syncp: &rx_ring->syncp); |
1127 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
1128 | "RX L4 checksum error\n" ); |
1129 | skb->ip_summed = CHECKSUM_NONE; |
1130 | return; |
1131 | } |
1132 | |
1133 | if (likely(ena_rx_ctx->l4_csum_checked)) { |
1134 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1135 | ena_increase_stat(statp: &rx_ring->rx_stats.csum_good, cnt: 1, |
1136 | syncp: &rx_ring->syncp); |
1137 | } else { |
1138 | ena_increase_stat(statp: &rx_ring->rx_stats.csum_unchecked, cnt: 1, |
1139 | syncp: &rx_ring->syncp); |
1140 | skb->ip_summed = CHECKSUM_NONE; |
1141 | } |
1142 | } else { |
1143 | skb->ip_summed = CHECKSUM_NONE; |
1144 | return; |
1145 | } |
1146 | |
1147 | } |
1148 | |
1149 | static void ena_set_rx_hash(struct ena_ring *rx_ring, |
1150 | struct ena_com_rx_ctx *ena_rx_ctx, |
1151 | struct sk_buff *skb) |
1152 | { |
1153 | enum pkt_hash_types hash_type; |
1154 | |
1155 | if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { |
1156 | if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || |
1157 | (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) |
1158 | |
1159 | hash_type = PKT_HASH_TYPE_L4; |
1160 | else |
1161 | hash_type = PKT_HASH_TYPE_NONE; |
1162 | |
1163 | /* Override hash type if the packet is fragmented */ |
1164 | if (ena_rx_ctx->frag) |
1165 | hash_type = PKT_HASH_TYPE_NONE; |
1166 | |
1167 | skb_set_hash(skb, hash: ena_rx_ctx->hash, type: hash_type); |
1168 | } |
1169 | } |
1170 | |
1171 | static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs) |
1172 | { |
1173 | struct ena_rx_buffer *rx_info; |
1174 | int ret; |
1175 | |
1176 | /* XDP multi-buffer packets not supported */ |
1177 | if (unlikely(num_descs > 1)) { |
1178 | netdev_err_once(rx_ring->adapter->netdev, |
1179 | "xdp: dropped unsupported multi-buffer packets\n" ); |
1180 | ena_increase_stat(statp: &rx_ring->rx_stats.xdp_drop, cnt: 1, syncp: &rx_ring->syncp); |
1181 | return ENA_XDP_DROP; |
1182 | } |
1183 | |
1184 | rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; |
1185 | xdp_prepare_buff(xdp, page_address(rx_info->page), |
1186 | headroom: rx_info->buf_offset, |
1187 | data_len: rx_ring->ena_bufs[0].len, meta_valid: false); |
1188 | |
1189 | ret = ena_xdp_execute(rx_ring, xdp); |
1190 | |
1191 | /* The xdp program might expand the headers */ |
1192 | if (ret == ENA_XDP_PASS) { |
1193 | rx_info->buf_offset = xdp->data - xdp->data_hard_start; |
1194 | rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; |
1195 | } |
1196 | |
1197 | return ret; |
1198 | } |
1199 | |
1200 | /* ena_clean_rx_irq - Cleanup RX irq |
1201 | * @rx_ring: RX ring to clean |
1202 | * @napi: napi handler |
1203 | * @budget: how many packets driver is allowed to clean |
1204 | * |
1205 | * Returns the number of cleaned buffers. |
1206 | */ |
1207 | static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, |
1208 | u32 budget) |
1209 | { |
1210 | u16 next_to_clean = rx_ring->next_to_clean; |
1211 | struct ena_com_rx_ctx ena_rx_ctx; |
1212 | struct ena_rx_buffer *rx_info; |
1213 | struct ena_adapter *adapter; |
1214 | u32 res_budget, work_done; |
1215 | int rx_copybreak_pkt = 0; |
1216 | int refill_threshold; |
1217 | struct sk_buff *skb; |
1218 | int refill_required; |
1219 | struct xdp_buff xdp; |
1220 | int xdp_flags = 0; |
1221 | int total_len = 0; |
1222 | int xdp_verdict; |
1223 | u8 pkt_offset; |
1224 | int rc = 0; |
1225 | int i; |
1226 | |
1227 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
1228 | "%s qid %d\n" , __func__, rx_ring->qid); |
1229 | res_budget = budget; |
1230 | xdp_init_buff(xdp: &xdp, ENA_PAGE_SIZE, rxq: &rx_ring->xdp_rxq); |
1231 | |
1232 | do { |
1233 | xdp_verdict = ENA_XDP_PASS; |
1234 | skb = NULL; |
1235 | ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; |
1236 | ena_rx_ctx.max_bufs = rx_ring->sgl_size; |
1237 | ena_rx_ctx.descs = 0; |
1238 | ena_rx_ctx.pkt_offset = 0; |
1239 | rc = ena_com_rx_pkt(io_cq: rx_ring->ena_com_io_cq, |
1240 | io_sq: rx_ring->ena_com_io_sq, |
1241 | ena_rx_ctx: &ena_rx_ctx); |
1242 | if (unlikely(rc)) |
1243 | goto error; |
1244 | |
1245 | if (unlikely(ena_rx_ctx.descs == 0)) |
1246 | break; |
1247 | |
1248 | /* First descriptor might have an offset set by the device */ |
1249 | rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; |
1250 | pkt_offset = ena_rx_ctx.pkt_offset; |
1251 | rx_info->buf_offset += pkt_offset; |
1252 | |
1253 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
1254 | "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n" , |
1255 | rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, |
1256 | ena_rx_ctx.l4_proto, ena_rx_ctx.hash); |
1257 | |
1258 | dma_sync_single_for_cpu(dev: rx_ring->dev, |
1259 | dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, |
1260 | size: rx_ring->ena_bufs[0].len, |
1261 | dir: DMA_FROM_DEVICE); |
1262 | |
1263 | if (ena_xdp_present_ring(ring: rx_ring)) |
1264 | xdp_verdict = ena_xdp_handle_buff(rx_ring, xdp: &xdp, num_descs: ena_rx_ctx.descs); |
1265 | |
1266 | /* allocate skb and fill it */ |
1267 | if (xdp_verdict == ENA_XDP_PASS) |
1268 | skb = ena_rx_skb(rx_ring, |
1269 | ena_bufs: rx_ring->ena_bufs, |
1270 | descs: ena_rx_ctx.descs, |
1271 | next_to_clean: &next_to_clean); |
1272 | |
1273 | if (unlikely(!skb)) { |
1274 | for (i = 0; i < ena_rx_ctx.descs; i++) { |
1275 | int req_id = rx_ring->ena_bufs[i].req_id; |
1276 | |
1277 | rx_ring->free_ids[next_to_clean] = req_id; |
1278 | next_to_clean = |
1279 | ENA_RX_RING_IDX_NEXT(next_to_clean, |
1280 | rx_ring->ring_size); |
1281 | |
1282 | /* Packets was passed for transmission, unmap it |
1283 | * from RX side. |
1284 | */ |
1285 | if (xdp_verdict & ENA_XDP_FORWARDED) { |
1286 | ena_unmap_rx_buff_attrs(rx_ring, |
1287 | rx_info: &rx_ring->rx_buffer_info[req_id], |
1288 | DMA_ATTR_SKIP_CPU_SYNC); |
1289 | rx_ring->rx_buffer_info[req_id].page = NULL; |
1290 | } |
1291 | } |
1292 | if (xdp_verdict != ENA_XDP_PASS) { |
1293 | xdp_flags |= xdp_verdict; |
1294 | total_len += ena_rx_ctx.ena_bufs[0].len; |
1295 | res_budget--; |
1296 | continue; |
1297 | } |
1298 | break; |
1299 | } |
1300 | |
1301 | ena_rx_checksum(rx_ring, ena_rx_ctx: &ena_rx_ctx, skb); |
1302 | |
1303 | ena_set_rx_hash(rx_ring, ena_rx_ctx: &ena_rx_ctx, skb); |
1304 | |
1305 | skb_record_rx_queue(skb, rx_queue: rx_ring->qid); |
1306 | |
1307 | if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) |
1308 | rx_copybreak_pkt++; |
1309 | |
1310 | total_len += skb->len; |
1311 | |
1312 | napi_gro_receive(napi, skb); |
1313 | |
1314 | res_budget--; |
1315 | } while (likely(res_budget)); |
1316 | |
1317 | work_done = budget - res_budget; |
1318 | rx_ring->per_napi_packets += work_done; |
1319 | u64_stats_update_begin(syncp: &rx_ring->syncp); |
1320 | rx_ring->rx_stats.bytes += total_len; |
1321 | rx_ring->rx_stats.cnt += work_done; |
1322 | rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; |
1323 | u64_stats_update_end(syncp: &rx_ring->syncp); |
1324 | |
1325 | rx_ring->next_to_clean = next_to_clean; |
1326 | |
1327 | refill_required = ena_com_free_q_entries(io_sq: rx_ring->ena_com_io_sq); |
1328 | refill_threshold = |
1329 | min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, |
1330 | ENA_RX_REFILL_THRESH_PACKET); |
1331 | |
1332 | /* Optimization, try to batch new rx buffers */ |
1333 | if (refill_required > refill_threshold) |
1334 | ena_refill_rx_bufs(rx_ring, num: refill_required); |
1335 | |
1336 | if (xdp_flags & ENA_XDP_REDIRECT) |
1337 | xdp_do_flush(); |
1338 | |
1339 | return work_done; |
1340 | |
1341 | error: |
1342 | if (xdp_flags & ENA_XDP_REDIRECT) |
1343 | xdp_do_flush(); |
1344 | |
1345 | adapter = netdev_priv(dev: rx_ring->netdev); |
1346 | |
1347 | if (rc == -ENOSPC) { |
1348 | ena_increase_stat(statp: &rx_ring->rx_stats.bad_desc_num, cnt: 1, syncp: &rx_ring->syncp); |
1349 | ena_reset_device(adapter, reset_reason: ENA_REGS_RESET_TOO_MANY_RX_DESCS); |
1350 | } else { |
1351 | ena_increase_stat(statp: &rx_ring->rx_stats.bad_req_id, cnt: 1, |
1352 | syncp: &rx_ring->syncp); |
1353 | ena_reset_device(adapter, reset_reason: ENA_REGS_RESET_INV_RX_REQ_ID); |
1354 | } |
1355 | return 0; |
1356 | } |
1357 | |
1358 | static void ena_dim_work(struct work_struct *w) |
1359 | { |
1360 | struct dim *dim = container_of(w, struct dim, work); |
1361 | struct dim_cq_moder cur_moder = |
1362 | net_dim_get_rx_moderation(cq_period_mode: dim->mode, ix: dim->profile_ix); |
1363 | struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim); |
1364 | |
1365 | ena_napi->rx_ring->smoothed_interval = cur_moder.usec; |
1366 | dim->state = DIM_START_MEASURE; |
1367 | } |
1368 | |
1369 | static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi) |
1370 | { |
1371 | struct dim_sample dim_sample; |
1372 | struct ena_ring *rx_ring = ena_napi->rx_ring; |
1373 | |
1374 | if (!rx_ring->per_napi_packets) |
1375 | return; |
1376 | |
1377 | rx_ring->non_empty_napi_events++; |
1378 | |
1379 | dim_update_sample(event_ctr: rx_ring->non_empty_napi_events, |
1380 | packets: rx_ring->rx_stats.cnt, |
1381 | bytes: rx_ring->rx_stats.bytes, |
1382 | s: &dim_sample); |
1383 | |
1384 | net_dim(dim: &ena_napi->dim, end_sample: dim_sample); |
1385 | |
1386 | rx_ring->per_napi_packets = 0; |
1387 | } |
1388 | |
1389 | void ena_unmask_interrupt(struct ena_ring *tx_ring, |
1390 | struct ena_ring *rx_ring) |
1391 | { |
1392 | u32 rx_interval = tx_ring->smoothed_interval; |
1393 | struct ena_eth_io_intr_reg intr_reg; |
1394 | |
1395 | /* Rx ring can be NULL when for XDP tx queues which don't have an |
1396 | * accompanying rx_ring pair. |
1397 | */ |
1398 | if (rx_ring) |
1399 | rx_interval = ena_com_get_adaptive_moderation_enabled(ena_dev: rx_ring->ena_dev) ? |
1400 | rx_ring->smoothed_interval : |
1401 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev: rx_ring->ena_dev); |
1402 | |
1403 | /* Update intr register: rx intr delay, |
1404 | * tx intr delay and interrupt unmask |
1405 | */ |
1406 | ena_com_update_intr_reg(intr_reg: &intr_reg, |
1407 | rx_delay_interval: rx_interval, |
1408 | tx_delay_interval: tx_ring->smoothed_interval, |
1409 | unmask: true); |
1410 | |
1411 | ena_increase_stat(statp: &tx_ring->tx_stats.unmask_interrupt, cnt: 1, |
1412 | syncp: &tx_ring->syncp); |
1413 | |
1414 | /* It is a shared MSI-X. |
1415 | * Tx and Rx CQ have pointer to it. |
1416 | * So we use one of them to reach the intr reg |
1417 | * The Tx ring is used because the rx_ring is NULL for XDP queues |
1418 | */ |
1419 | ena_com_unmask_intr(io_cq: tx_ring->ena_com_io_cq, intr_reg: &intr_reg); |
1420 | } |
1421 | |
1422 | void ena_update_ring_numa_node(struct ena_ring *tx_ring, |
1423 | struct ena_ring *rx_ring) |
1424 | { |
1425 | int cpu = get_cpu(); |
1426 | int numa_node; |
1427 | |
1428 | /* Check only one ring since the 2 rings are running on the same cpu */ |
1429 | if (likely(tx_ring->cpu == cpu)) |
1430 | goto out; |
1431 | |
1432 | tx_ring->cpu = cpu; |
1433 | if (rx_ring) |
1434 | rx_ring->cpu = cpu; |
1435 | |
1436 | numa_node = cpu_to_node(cpu); |
1437 | |
1438 | if (likely(tx_ring->numa_node == numa_node)) |
1439 | goto out; |
1440 | |
1441 | put_cpu(); |
1442 | |
1443 | if (numa_node != NUMA_NO_NODE) { |
1444 | ena_com_update_numa_node(io_cq: tx_ring->ena_com_io_cq, numa_node); |
1445 | tx_ring->numa_node = numa_node; |
1446 | if (rx_ring) { |
1447 | rx_ring->numa_node = numa_node; |
1448 | ena_com_update_numa_node(io_cq: rx_ring->ena_com_io_cq, |
1449 | numa_node); |
1450 | } |
1451 | } |
1452 | |
1453 | return; |
1454 | out: |
1455 | put_cpu(); |
1456 | } |
1457 | |
1458 | static int ena_io_poll(struct napi_struct *napi, int budget) |
1459 | { |
1460 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); |
1461 | struct ena_ring *tx_ring, *rx_ring; |
1462 | int tx_work_done; |
1463 | int rx_work_done = 0; |
1464 | int tx_budget; |
1465 | int napi_comp_call = 0; |
1466 | int ret; |
1467 | |
1468 | tx_ring = ena_napi->tx_ring; |
1469 | rx_ring = ena_napi->rx_ring; |
1470 | |
1471 | tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; |
1472 | |
1473 | if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || |
1474 | test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { |
1475 | napi_complete_done(n: napi, work_done: 0); |
1476 | return 0; |
1477 | } |
1478 | |
1479 | tx_work_done = ena_clean_tx_irq(tx_ring, budget: tx_budget); |
1480 | /* On netpoll the budget is zero and the handler should only clean the |
1481 | * tx completions. |
1482 | */ |
1483 | if (likely(budget)) |
1484 | rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); |
1485 | |
1486 | /* If the device is about to reset or down, avoid unmask |
1487 | * the interrupt and return 0 so NAPI won't reschedule |
1488 | */ |
1489 | if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || |
1490 | test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { |
1491 | napi_complete_done(n: napi, work_done: 0); |
1492 | ret = 0; |
1493 | |
1494 | } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { |
1495 | napi_comp_call = 1; |
1496 | |
1497 | /* Update numa and unmask the interrupt only when schedule |
1498 | * from the interrupt context (vs from sk_busy_loop) |
1499 | */ |
1500 | if (napi_complete_done(n: napi, work_done: rx_work_done) && |
1501 | READ_ONCE(ena_napi->interrupts_masked)) { |
1502 | smp_rmb(); /* make sure interrupts_masked is read */ |
1503 | WRITE_ONCE(ena_napi->interrupts_masked, false); |
1504 | /* We apply adaptive moderation on Rx path only. |
1505 | * Tx uses static interrupt moderation. |
1506 | */ |
1507 | if (ena_com_get_adaptive_moderation_enabled(ena_dev: rx_ring->ena_dev)) |
1508 | ena_adjust_adaptive_rx_intr_moderation(ena_napi); |
1509 | |
1510 | ena_update_ring_numa_node(tx_ring, rx_ring); |
1511 | ena_unmask_interrupt(tx_ring, rx_ring); |
1512 | } |
1513 | |
1514 | ret = rx_work_done; |
1515 | } else { |
1516 | ret = budget; |
1517 | } |
1518 | |
1519 | u64_stats_update_begin(syncp: &tx_ring->syncp); |
1520 | tx_ring->tx_stats.napi_comp += napi_comp_call; |
1521 | tx_ring->tx_stats.tx_poll++; |
1522 | u64_stats_update_end(syncp: &tx_ring->syncp); |
1523 | |
1524 | tx_ring->tx_stats.last_napi_jiffies = jiffies; |
1525 | |
1526 | return ret; |
1527 | } |
1528 | |
1529 | static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data) |
1530 | { |
1531 | struct ena_adapter *adapter = (struct ena_adapter *)data; |
1532 | |
1533 | ena_com_admin_q_comp_intr_handler(ena_dev: adapter->ena_dev); |
1534 | |
1535 | /* Don't call the aenq handler before probe is done */ |
1536 | if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) |
1537 | ena_com_aenq_intr_handler(ena_dev: adapter->ena_dev, data); |
1538 | |
1539 | return IRQ_HANDLED; |
1540 | } |
1541 | |
1542 | /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx |
1543 | * @irq: interrupt number |
1544 | * @data: pointer to a network interface private napi device structure |
1545 | */ |
1546 | static irqreturn_t ena_intr_msix_io(int irq, void *data) |
1547 | { |
1548 | struct ena_napi *ena_napi = data; |
1549 | |
1550 | /* Used to check HW health */ |
1551 | WRITE_ONCE(ena_napi->first_interrupt, true); |
1552 | |
1553 | WRITE_ONCE(ena_napi->interrupts_masked, true); |
1554 | smp_wmb(); /* write interrupts_masked before calling napi */ |
1555 | |
1556 | napi_schedule_irqoff(n: &ena_napi->napi); |
1557 | |
1558 | return IRQ_HANDLED; |
1559 | } |
1560 | |
1561 | /* Reserve a single MSI-X vector for management (admin + aenq). |
1562 | * plus reserve one vector for each potential io queue. |
1563 | * the number of potential io queues is the minimum of what the device |
1564 | * supports and the number of vCPUs. |
1565 | */ |
1566 | static int ena_enable_msix(struct ena_adapter *adapter) |
1567 | { |
1568 | int msix_vecs, irq_cnt; |
1569 | |
1570 | if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { |
1571 | netif_err(adapter, probe, adapter->netdev, |
1572 | "Error, MSI-X is already enabled\n" ); |
1573 | return -EPERM; |
1574 | } |
1575 | |
1576 | /* Reserved the max msix vectors we might need */ |
1577 | msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); |
1578 | netif_dbg(adapter, probe, adapter->netdev, |
1579 | "Trying to enable MSI-X, vectors %d\n" , msix_vecs); |
1580 | |
1581 | irq_cnt = pci_alloc_irq_vectors(dev: adapter->pdev, ENA_MIN_MSIX_VEC, |
1582 | max_vecs: msix_vecs, PCI_IRQ_MSIX); |
1583 | |
1584 | if (irq_cnt < 0) { |
1585 | netif_err(adapter, probe, adapter->netdev, |
1586 | "Failed to enable MSI-X. irq_cnt %d\n" , irq_cnt); |
1587 | return -ENOSPC; |
1588 | } |
1589 | |
1590 | if (irq_cnt != msix_vecs) { |
1591 | netif_notice(adapter, probe, adapter->netdev, |
1592 | "Enable only %d MSI-X (out of %d), reduce the number of queues\n" , |
1593 | irq_cnt, msix_vecs); |
1594 | adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; |
1595 | } |
1596 | |
1597 | if (ena_init_rx_cpu_rmap(adapter)) |
1598 | netif_warn(adapter, probe, adapter->netdev, |
1599 | "Failed to map IRQs to CPUs\n" ); |
1600 | |
1601 | adapter->msix_vecs = irq_cnt; |
1602 | set_bit(nr: ENA_FLAG_MSIX_ENABLED, addr: &adapter->flags); |
1603 | |
1604 | return 0; |
1605 | } |
1606 | |
1607 | static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) |
1608 | { |
1609 | u32 cpu; |
1610 | |
1611 | snprintf(buf: adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, |
1612 | ENA_IRQNAME_SIZE, fmt: "ena-mgmnt@pci:%s" , |
1613 | pci_name(pdev: adapter->pdev)); |
1614 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = |
1615 | ena_intr_msix_mgmnt; |
1616 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; |
1617 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = |
1618 | pci_irq_vector(dev: adapter->pdev, ENA_MGMNT_IRQ_IDX); |
1619 | cpu = cpumask_first(cpu_online_mask); |
1620 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; |
1621 | cpumask_set_cpu(cpu, |
1622 | dstp: &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); |
1623 | } |
1624 | |
1625 | static void ena_setup_io_intr(struct ena_adapter *adapter) |
1626 | { |
1627 | struct net_device *netdev; |
1628 | int irq_idx, i, cpu; |
1629 | int io_queue_count; |
1630 | |
1631 | netdev = adapter->netdev; |
1632 | io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1633 | |
1634 | for (i = 0; i < io_queue_count; i++) { |
1635 | irq_idx = ENA_IO_IRQ_IDX(i); |
1636 | cpu = i % num_online_cpus(); |
1637 | |
1638 | snprintf(buf: adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, |
1639 | fmt: "%s-Tx-Rx-%d" , netdev->name, i); |
1640 | adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; |
1641 | adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; |
1642 | adapter->irq_tbl[irq_idx].vector = |
1643 | pci_irq_vector(dev: adapter->pdev, nr: irq_idx); |
1644 | adapter->irq_tbl[irq_idx].cpu = cpu; |
1645 | |
1646 | cpumask_set_cpu(cpu, |
1647 | dstp: &adapter->irq_tbl[irq_idx].affinity_hint_mask); |
1648 | } |
1649 | } |
1650 | |
1651 | static int ena_request_mgmnt_irq(struct ena_adapter *adapter) |
1652 | { |
1653 | unsigned long flags = 0; |
1654 | struct ena_irq *irq; |
1655 | int rc; |
1656 | |
1657 | irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; |
1658 | rc = request_irq(irq: irq->vector, handler: irq->handler, flags, name: irq->name, |
1659 | dev: irq->data); |
1660 | if (rc) { |
1661 | netif_err(adapter, probe, adapter->netdev, |
1662 | "Failed to request admin irq\n" ); |
1663 | return rc; |
1664 | } |
1665 | |
1666 | netif_dbg(adapter, probe, adapter->netdev, |
1667 | "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n" , |
1668 | irq->affinity_hint_mask.bits[0], irq->vector); |
1669 | |
1670 | irq_set_affinity_hint(irq: irq->vector, m: &irq->affinity_hint_mask); |
1671 | |
1672 | return rc; |
1673 | } |
1674 | |
1675 | static int ena_request_io_irq(struct ena_adapter *adapter) |
1676 | { |
1677 | u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1678 | unsigned long flags = 0; |
1679 | struct ena_irq *irq; |
1680 | int rc = 0, i, k; |
1681 | |
1682 | if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { |
1683 | netif_err(adapter, ifup, adapter->netdev, |
1684 | "Failed to request I/O IRQ: MSI-X is not enabled\n" ); |
1685 | return -EINVAL; |
1686 | } |
1687 | |
1688 | for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { |
1689 | irq = &adapter->irq_tbl[i]; |
1690 | rc = request_irq(irq: irq->vector, handler: irq->handler, flags, name: irq->name, |
1691 | dev: irq->data); |
1692 | if (rc) { |
1693 | netif_err(adapter, ifup, adapter->netdev, |
1694 | "Failed to request I/O IRQ. index %d rc %d\n" , |
1695 | i, rc); |
1696 | goto err; |
1697 | } |
1698 | |
1699 | netif_dbg(adapter, ifup, adapter->netdev, |
1700 | "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n" , |
1701 | i, irq->affinity_hint_mask.bits[0], irq->vector); |
1702 | |
1703 | irq_set_affinity_hint(irq: irq->vector, m: &irq->affinity_hint_mask); |
1704 | } |
1705 | |
1706 | return rc; |
1707 | |
1708 | err: |
1709 | for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) { |
1710 | irq = &adapter->irq_tbl[k]; |
1711 | free_irq(irq->vector, irq->data); |
1712 | } |
1713 | |
1714 | return rc; |
1715 | } |
1716 | |
1717 | static void ena_free_mgmnt_irq(struct ena_adapter *adapter) |
1718 | { |
1719 | struct ena_irq *irq; |
1720 | |
1721 | irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; |
1722 | synchronize_irq(irq: irq->vector); |
1723 | irq_set_affinity_hint(irq: irq->vector, NULL); |
1724 | free_irq(irq->vector, irq->data); |
1725 | } |
1726 | |
1727 | static void ena_free_io_irq(struct ena_adapter *adapter) |
1728 | { |
1729 | u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1730 | struct ena_irq *irq; |
1731 | int i; |
1732 | |
1733 | #ifdef CONFIG_RFS_ACCEL |
1734 | if (adapter->msix_vecs >= 1) { |
1735 | free_irq_cpu_rmap(rmap: adapter->netdev->rx_cpu_rmap); |
1736 | adapter->netdev->rx_cpu_rmap = NULL; |
1737 | } |
1738 | #endif /* CONFIG_RFS_ACCEL */ |
1739 | |
1740 | for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { |
1741 | irq = &adapter->irq_tbl[i]; |
1742 | irq_set_affinity_hint(irq: irq->vector, NULL); |
1743 | free_irq(irq->vector, irq->data); |
1744 | } |
1745 | } |
1746 | |
1747 | static void ena_disable_msix(struct ena_adapter *adapter) |
1748 | { |
1749 | if (test_and_clear_bit(nr: ENA_FLAG_MSIX_ENABLED, addr: &adapter->flags)) |
1750 | pci_free_irq_vectors(dev: adapter->pdev); |
1751 | } |
1752 | |
1753 | static void ena_disable_io_intr_sync(struct ena_adapter *adapter) |
1754 | { |
1755 | u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1756 | int i; |
1757 | |
1758 | if (!netif_running(dev: adapter->netdev)) |
1759 | return; |
1760 | |
1761 | for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) |
1762 | synchronize_irq(irq: adapter->irq_tbl[i].vector); |
1763 | } |
1764 | |
1765 | static void ena_del_napi_in_range(struct ena_adapter *adapter, |
1766 | int first_index, |
1767 | int count) |
1768 | { |
1769 | int i; |
1770 | |
1771 | for (i = first_index; i < first_index + count; i++) { |
1772 | netif_napi_del(napi: &adapter->ena_napi[i].napi); |
1773 | |
1774 | WARN_ON(ENA_IS_XDP_INDEX(adapter, i) && |
1775 | adapter->ena_napi[i].rx_ring); |
1776 | } |
1777 | } |
1778 | |
1779 | static void ena_init_napi_in_range(struct ena_adapter *adapter, |
1780 | int first_index, int count) |
1781 | { |
1782 | int (*napi_handler)(struct napi_struct *napi, int budget); |
1783 | int i; |
1784 | |
1785 | for (i = first_index; i < first_index + count; i++) { |
1786 | struct ena_napi *napi = &adapter->ena_napi[i]; |
1787 | struct ena_ring *rx_ring, *tx_ring; |
1788 | |
1789 | memset(napi, 0, sizeof(*napi)); |
1790 | |
1791 | rx_ring = &adapter->rx_ring[i]; |
1792 | tx_ring = &adapter->tx_ring[i]; |
1793 | |
1794 | napi_handler = ena_io_poll; |
1795 | if (ENA_IS_XDP_INDEX(adapter, i)) |
1796 | napi_handler = ena_xdp_io_poll; |
1797 | |
1798 | netif_napi_add(dev: adapter->netdev, napi: &napi->napi, poll: napi_handler); |
1799 | |
1800 | if (!ENA_IS_XDP_INDEX(adapter, i)) |
1801 | napi->rx_ring = rx_ring; |
1802 | |
1803 | napi->tx_ring = tx_ring; |
1804 | napi->qid = i; |
1805 | } |
1806 | } |
1807 | |
1808 | static void ena_napi_disable_in_range(struct ena_adapter *adapter, |
1809 | int first_index, |
1810 | int count) |
1811 | { |
1812 | int i; |
1813 | |
1814 | for (i = first_index; i < first_index + count; i++) |
1815 | napi_disable(n: &adapter->ena_napi[i].napi); |
1816 | } |
1817 | |
1818 | static void ena_napi_enable_in_range(struct ena_adapter *adapter, |
1819 | int first_index, |
1820 | int count) |
1821 | { |
1822 | int i; |
1823 | |
1824 | for (i = first_index; i < first_index + count; i++) |
1825 | napi_enable(n: &adapter->ena_napi[i].napi); |
1826 | } |
1827 | |
1828 | /* Configure the Rx forwarding */ |
1829 | static int (struct ena_adapter *adapter) |
1830 | { |
1831 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
1832 | int rc; |
1833 | |
1834 | /* In case the RSS table wasn't initialized by probe */ |
1835 | if (!ena_dev->rss.tbl_log_size) { |
1836 | rc = ena_rss_init_default(adapter); |
1837 | if (rc && (rc != -EOPNOTSUPP)) { |
1838 | netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n" , rc); |
1839 | return rc; |
1840 | } |
1841 | } |
1842 | |
1843 | /* Set indirect table */ |
1844 | rc = ena_com_indirect_table_set(ena_dev); |
1845 | if (unlikely(rc && rc != -EOPNOTSUPP)) |
1846 | return rc; |
1847 | |
1848 | /* Configure hash function (if supported) */ |
1849 | rc = ena_com_set_hash_function(ena_dev); |
1850 | if (unlikely(rc && (rc != -EOPNOTSUPP))) |
1851 | return rc; |
1852 | |
1853 | /* Configure hash inputs (if supported) */ |
1854 | rc = ena_com_set_hash_ctrl(ena_dev); |
1855 | if (unlikely(rc && (rc != -EOPNOTSUPP))) |
1856 | return rc; |
1857 | |
1858 | return 0; |
1859 | } |
1860 | |
1861 | static int ena_up_complete(struct ena_adapter *adapter) |
1862 | { |
1863 | int rc; |
1864 | |
1865 | rc = ena_rss_configure(adapter); |
1866 | if (rc) |
1867 | return rc; |
1868 | |
1869 | ena_change_mtu(dev: adapter->netdev, new_mtu: adapter->netdev->mtu); |
1870 | |
1871 | ena_refill_all_rx_bufs(adapter); |
1872 | |
1873 | /* enable transmits */ |
1874 | netif_tx_start_all_queues(dev: adapter->netdev); |
1875 | |
1876 | ena_napi_enable_in_range(adapter, |
1877 | first_index: 0, |
1878 | count: adapter->xdp_num_queues + adapter->num_io_queues); |
1879 | |
1880 | return 0; |
1881 | } |
1882 | |
1883 | static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) |
1884 | { |
1885 | struct ena_com_create_io_ctx ctx; |
1886 | struct ena_com_dev *ena_dev; |
1887 | struct ena_ring *tx_ring; |
1888 | u32 msix_vector; |
1889 | u16 ena_qid; |
1890 | int rc; |
1891 | |
1892 | ena_dev = adapter->ena_dev; |
1893 | |
1894 | tx_ring = &adapter->tx_ring[qid]; |
1895 | msix_vector = ENA_IO_IRQ_IDX(qid); |
1896 | ena_qid = ENA_IO_TXQ_IDX(qid); |
1897 | |
1898 | memset(&ctx, 0x0, sizeof(ctx)); |
1899 | |
1900 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; |
1901 | ctx.qid = ena_qid; |
1902 | ctx.mem_queue_type = ena_dev->tx_mem_queue_type; |
1903 | ctx.msix_vector = msix_vector; |
1904 | ctx.queue_size = tx_ring->ring_size; |
1905 | ctx.numa_node = tx_ring->numa_node; |
1906 | |
1907 | rc = ena_com_create_io_queue(ena_dev, ctx: &ctx); |
1908 | if (rc) { |
1909 | netif_err(adapter, ifup, adapter->netdev, |
1910 | "Failed to create I/O TX queue num %d rc: %d\n" , |
1911 | qid, rc); |
1912 | return rc; |
1913 | } |
1914 | |
1915 | rc = ena_com_get_io_handlers(ena_dev, qid: ena_qid, |
1916 | io_sq: &tx_ring->ena_com_io_sq, |
1917 | io_cq: &tx_ring->ena_com_io_cq); |
1918 | if (rc) { |
1919 | netif_err(adapter, ifup, adapter->netdev, |
1920 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n" , |
1921 | qid, rc); |
1922 | ena_com_destroy_io_queue(ena_dev, qid: ena_qid); |
1923 | return rc; |
1924 | } |
1925 | |
1926 | ena_com_update_numa_node(io_cq: tx_ring->ena_com_io_cq, numa_node: ctx.numa_node); |
1927 | return rc; |
1928 | } |
1929 | |
1930 | int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, |
1931 | int first_index, int count) |
1932 | { |
1933 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
1934 | int rc, i; |
1935 | |
1936 | for (i = first_index; i < first_index + count; i++) { |
1937 | rc = ena_create_io_tx_queue(adapter, qid: i); |
1938 | if (rc) |
1939 | goto create_err; |
1940 | } |
1941 | |
1942 | return 0; |
1943 | |
1944 | create_err: |
1945 | while (i-- > first_index) |
1946 | ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); |
1947 | |
1948 | return rc; |
1949 | } |
1950 | |
1951 | static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) |
1952 | { |
1953 | struct ena_com_dev *ena_dev; |
1954 | struct ena_com_create_io_ctx ctx; |
1955 | struct ena_ring *rx_ring; |
1956 | u32 msix_vector; |
1957 | u16 ena_qid; |
1958 | int rc; |
1959 | |
1960 | ena_dev = adapter->ena_dev; |
1961 | |
1962 | rx_ring = &adapter->rx_ring[qid]; |
1963 | msix_vector = ENA_IO_IRQ_IDX(qid); |
1964 | ena_qid = ENA_IO_RXQ_IDX(qid); |
1965 | |
1966 | memset(&ctx, 0x0, sizeof(ctx)); |
1967 | |
1968 | ctx.qid = ena_qid; |
1969 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; |
1970 | ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
1971 | ctx.msix_vector = msix_vector; |
1972 | ctx.queue_size = rx_ring->ring_size; |
1973 | ctx.numa_node = rx_ring->numa_node; |
1974 | |
1975 | rc = ena_com_create_io_queue(ena_dev, ctx: &ctx); |
1976 | if (rc) { |
1977 | netif_err(adapter, ifup, adapter->netdev, |
1978 | "Failed to create I/O RX queue num %d rc: %d\n" , |
1979 | qid, rc); |
1980 | return rc; |
1981 | } |
1982 | |
1983 | rc = ena_com_get_io_handlers(ena_dev, qid: ena_qid, |
1984 | io_sq: &rx_ring->ena_com_io_sq, |
1985 | io_cq: &rx_ring->ena_com_io_cq); |
1986 | if (rc) { |
1987 | netif_err(adapter, ifup, adapter->netdev, |
1988 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n" , |
1989 | qid, rc); |
1990 | goto err; |
1991 | } |
1992 | |
1993 | ena_com_update_numa_node(io_cq: rx_ring->ena_com_io_cq, numa_node: ctx.numa_node); |
1994 | |
1995 | return rc; |
1996 | err: |
1997 | ena_com_destroy_io_queue(ena_dev, qid: ena_qid); |
1998 | return rc; |
1999 | } |
2000 | |
2001 | static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) |
2002 | { |
2003 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
2004 | int rc, i; |
2005 | |
2006 | for (i = 0; i < adapter->num_io_queues; i++) { |
2007 | rc = ena_create_io_rx_queue(adapter, qid: i); |
2008 | if (rc) |
2009 | goto create_err; |
2010 | INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work); |
2011 | |
2012 | ena_xdp_register_rxq_info(rx_ring: &adapter->rx_ring[i]); |
2013 | } |
2014 | |
2015 | return 0; |
2016 | |
2017 | create_err: |
2018 | while (i--) { |
2019 | ena_xdp_unregister_rxq_info(rx_ring: &adapter->rx_ring[i]); |
2020 | cancel_work_sync(work: &adapter->ena_napi[i].dim.work); |
2021 | ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); |
2022 | } |
2023 | |
2024 | return rc; |
2025 | } |
2026 | |
2027 | static void set_io_rings_size(struct ena_adapter *adapter, |
2028 | int new_tx_size, |
2029 | int new_rx_size) |
2030 | { |
2031 | int i; |
2032 | |
2033 | for (i = 0; i < adapter->num_io_queues; i++) { |
2034 | adapter->tx_ring[i].ring_size = new_tx_size; |
2035 | adapter->rx_ring[i].ring_size = new_rx_size; |
2036 | } |
2037 | } |
2038 | |
2039 | /* This function allows queue allocation to backoff when the system is |
2040 | * low on memory. If there is not enough memory to allocate io queues |
2041 | * the driver will try to allocate smaller queues. |
2042 | * |
2043 | * The backoff algorithm is as follows: |
2044 | * 1. Try to allocate TX and RX and if successful. |
2045 | * 1.1. return success |
2046 | * |
2047 | * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same). |
2048 | * |
2049 | * 3. If TX or RX is smaller than 256 |
2050 | * 3.1. return failure. |
2051 | * 4. else |
2052 | * 4.1. go back to 1. |
2053 | */ |
2054 | static int create_queues_with_size_backoff(struct ena_adapter *adapter) |
2055 | { |
2056 | int rc, cur_rx_ring_size, cur_tx_ring_size; |
2057 | int new_rx_ring_size, new_tx_ring_size; |
2058 | |
2059 | /* current queue sizes might be set to smaller than the requested |
2060 | * ones due to past queue allocation failures. |
2061 | */ |
2062 | set_io_rings_size(adapter, new_tx_size: adapter->requested_tx_ring_size, |
2063 | new_rx_size: adapter->requested_rx_ring_size); |
2064 | |
2065 | while (1) { |
2066 | if (ena_xdp_present(adapter)) { |
2067 | rc = ena_setup_and_create_all_xdp_queues(adapter); |
2068 | |
2069 | if (rc) |
2070 | goto err_setup_tx; |
2071 | } |
2072 | rc = ena_setup_tx_resources_in_range(adapter, |
2073 | first_index: 0, |
2074 | count: adapter->num_io_queues); |
2075 | if (rc) |
2076 | goto err_setup_tx; |
2077 | |
2078 | rc = ena_create_io_tx_queues_in_range(adapter, |
2079 | first_index: 0, |
2080 | count: adapter->num_io_queues); |
2081 | if (rc) |
2082 | goto err_create_tx_queues; |
2083 | |
2084 | rc = ena_setup_all_rx_resources(adapter); |
2085 | if (rc) |
2086 | goto err_setup_rx; |
2087 | |
2088 | rc = ena_create_all_io_rx_queues(adapter); |
2089 | if (rc) |
2090 | goto err_create_rx_queues; |
2091 | |
2092 | return 0; |
2093 | |
2094 | err_create_rx_queues: |
2095 | ena_free_all_io_rx_resources(adapter); |
2096 | err_setup_rx: |
2097 | ena_destroy_all_tx_queues(adapter); |
2098 | err_create_tx_queues: |
2099 | ena_free_all_io_tx_resources(adapter); |
2100 | err_setup_tx: |
2101 | if (rc != -ENOMEM) { |
2102 | netif_err(adapter, ifup, adapter->netdev, |
2103 | "Queue creation failed with error code %d\n" , |
2104 | rc); |
2105 | return rc; |
2106 | } |
2107 | |
2108 | cur_tx_ring_size = adapter->tx_ring[0].ring_size; |
2109 | cur_rx_ring_size = adapter->rx_ring[0].ring_size; |
2110 | |
2111 | netif_err(adapter, ifup, adapter->netdev, |
2112 | "Not enough memory to create queues with sizes TX=%d, RX=%d\n" , |
2113 | cur_tx_ring_size, cur_rx_ring_size); |
2114 | |
2115 | new_tx_ring_size = cur_tx_ring_size; |
2116 | new_rx_ring_size = cur_rx_ring_size; |
2117 | |
2118 | /* Decrease the size of the larger queue, or |
2119 | * decrease both if they are the same size. |
2120 | */ |
2121 | if (cur_rx_ring_size <= cur_tx_ring_size) |
2122 | new_tx_ring_size = cur_tx_ring_size / 2; |
2123 | if (cur_rx_ring_size >= cur_tx_ring_size) |
2124 | new_rx_ring_size = cur_rx_ring_size / 2; |
2125 | |
2126 | if (new_tx_ring_size < ENA_MIN_RING_SIZE || |
2127 | new_rx_ring_size < ENA_MIN_RING_SIZE) { |
2128 | netif_err(adapter, ifup, adapter->netdev, |
2129 | "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n" , |
2130 | ENA_MIN_RING_SIZE); |
2131 | return rc; |
2132 | } |
2133 | |
2134 | netif_err(adapter, ifup, adapter->netdev, |
2135 | "Retrying queue creation with sizes TX=%d, RX=%d\n" , |
2136 | new_tx_ring_size, |
2137 | new_rx_ring_size); |
2138 | |
2139 | set_io_rings_size(adapter, new_tx_size: new_tx_ring_size, |
2140 | new_rx_size: new_rx_ring_size); |
2141 | } |
2142 | } |
2143 | |
2144 | int ena_up(struct ena_adapter *adapter) |
2145 | { |
2146 | int io_queue_count, rc, i; |
2147 | |
2148 | netif_dbg(adapter, ifup, adapter->netdev, "%s\n" , __func__); |
2149 | |
2150 | io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
2151 | ena_setup_io_intr(adapter); |
2152 | |
2153 | /* napi poll functions should be initialized before running |
2154 | * request_irq(), to handle a rare condition where there is a pending |
2155 | * interrupt, causing the ISR to fire immediately while the poll |
2156 | * function wasn't set yet, causing a null dereference |
2157 | */ |
2158 | ena_init_napi_in_range(adapter, first_index: 0, count: io_queue_count); |
2159 | |
2160 | /* Enabling DIM needs to happen before enabling IRQs since DIM |
2161 | * is run from napi routine |
2162 | */ |
2163 | if (ena_com_interrupt_moderation_supported(ena_dev: adapter->ena_dev)) |
2164 | ena_com_enable_adaptive_moderation(ena_dev: adapter->ena_dev); |
2165 | |
2166 | rc = ena_request_io_irq(adapter); |
2167 | if (rc) |
2168 | goto err_req_irq; |
2169 | |
2170 | rc = create_queues_with_size_backoff(adapter); |
2171 | if (rc) |
2172 | goto err_create_queues_with_backoff; |
2173 | |
2174 | rc = ena_up_complete(adapter); |
2175 | if (rc) |
2176 | goto err_up; |
2177 | |
2178 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) |
2179 | netif_carrier_on(dev: adapter->netdev); |
2180 | |
2181 | ena_increase_stat(statp: &adapter->dev_stats.interface_up, cnt: 1, |
2182 | syncp: &adapter->syncp); |
2183 | |
2184 | set_bit(nr: ENA_FLAG_DEV_UP, addr: &adapter->flags); |
2185 | |
2186 | /* Enable completion queues interrupt */ |
2187 | for (i = 0; i < adapter->num_io_queues; i++) |
2188 | ena_unmask_interrupt(tx_ring: &adapter->tx_ring[i], |
2189 | rx_ring: &adapter->rx_ring[i]); |
2190 | |
2191 | /* schedule napi in case we had pending packets |
2192 | * from the last time we disable napi |
2193 | */ |
2194 | for (i = 0; i < io_queue_count; i++) |
2195 | napi_schedule(n: &adapter->ena_napi[i].napi); |
2196 | |
2197 | return rc; |
2198 | |
2199 | err_up: |
2200 | ena_destroy_all_tx_queues(adapter); |
2201 | ena_free_all_io_tx_resources(adapter); |
2202 | ena_destroy_all_rx_queues(adapter); |
2203 | ena_free_all_io_rx_resources(adapter); |
2204 | err_create_queues_with_backoff: |
2205 | ena_free_io_irq(adapter); |
2206 | err_req_irq: |
2207 | ena_del_napi_in_range(adapter, first_index: 0, count: io_queue_count); |
2208 | |
2209 | return rc; |
2210 | } |
2211 | |
2212 | void ena_down(struct ena_adapter *adapter) |
2213 | { |
2214 | int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
2215 | |
2216 | netif_dbg(adapter, ifdown, adapter->netdev, "%s\n" , __func__); |
2217 | |
2218 | clear_bit(nr: ENA_FLAG_DEV_UP, addr: &adapter->flags); |
2219 | |
2220 | ena_increase_stat(statp: &adapter->dev_stats.interface_down, cnt: 1, |
2221 | syncp: &adapter->syncp); |
2222 | |
2223 | netif_carrier_off(dev: adapter->netdev); |
2224 | netif_tx_disable(dev: adapter->netdev); |
2225 | |
2226 | /* After this point the napi handler won't enable the tx queue */ |
2227 | ena_napi_disable_in_range(adapter, first_index: 0, count: io_queue_count); |
2228 | |
2229 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { |
2230 | int rc; |
2231 | |
2232 | rc = ena_com_dev_reset(ena_dev: adapter->ena_dev, reset_reason: adapter->reset_reason); |
2233 | if (rc) |
2234 | netif_err(adapter, ifdown, adapter->netdev, |
2235 | "Device reset failed\n" ); |
2236 | /* stop submitting admin commands on a device that was reset */ |
2237 | ena_com_set_admin_running_state(ena_dev: adapter->ena_dev, state: false); |
2238 | } |
2239 | |
2240 | ena_destroy_all_io_queues(adapter); |
2241 | |
2242 | ena_disable_io_intr_sync(adapter); |
2243 | ena_free_io_irq(adapter); |
2244 | ena_del_napi_in_range(adapter, first_index: 0, count: io_queue_count); |
2245 | |
2246 | ena_free_all_tx_bufs(adapter); |
2247 | ena_free_all_rx_bufs(adapter); |
2248 | ena_free_all_io_tx_resources(adapter); |
2249 | ena_free_all_io_rx_resources(adapter); |
2250 | } |
2251 | |
2252 | /* ena_open - Called when a network interface is made active |
2253 | * @netdev: network interface device structure |
2254 | * |
2255 | * Returns 0 on success, negative value on failure |
2256 | * |
2257 | * The open entry point is called when a network interface is made |
2258 | * active by the system (IFF_UP). At this point all resources needed |
2259 | * for transmit and receive operations are allocated, the interrupt |
2260 | * handler is registered with the OS, the watchdog timer is started, |
2261 | * and the stack is notified that the interface is ready. |
2262 | */ |
2263 | static int ena_open(struct net_device *netdev) |
2264 | { |
2265 | struct ena_adapter *adapter = netdev_priv(dev: netdev); |
2266 | int rc; |
2267 | |
2268 | /* Notify the stack of the actual queue counts. */ |
2269 | rc = netif_set_real_num_tx_queues(dev: netdev, txq: adapter->num_io_queues); |
2270 | if (rc) { |
2271 | netif_err(adapter, ifup, netdev, "Can't set num tx queues\n" ); |
2272 | return rc; |
2273 | } |
2274 | |
2275 | rc = netif_set_real_num_rx_queues(dev: netdev, rxq: adapter->num_io_queues); |
2276 | if (rc) { |
2277 | netif_err(adapter, ifup, netdev, "Can't set num rx queues\n" ); |
2278 | return rc; |
2279 | } |
2280 | |
2281 | rc = ena_up(adapter); |
2282 | if (rc) |
2283 | return rc; |
2284 | |
2285 | return rc; |
2286 | } |
2287 | |
2288 | /* ena_close - Disables a network interface |
2289 | * @netdev: network interface device structure |
2290 | * |
2291 | * Returns 0, this is not allowed to fail |
2292 | * |
2293 | * The close entry point is called when an interface is de-activated |
2294 | * by the OS. The hardware is still under the drivers control, but |
2295 | * needs to be disabled. A global MAC reset is issued to stop the |
2296 | * hardware, and all transmit and receive resources are freed. |
2297 | */ |
2298 | static int ena_close(struct net_device *netdev) |
2299 | { |
2300 | struct ena_adapter *adapter = netdev_priv(dev: netdev); |
2301 | |
2302 | netif_dbg(adapter, ifdown, netdev, "%s\n" , __func__); |
2303 | |
2304 | if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) |
2305 | return 0; |
2306 | |
2307 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
2308 | ena_down(adapter); |
2309 | |
2310 | /* Check for device status and issue reset if needed*/ |
2311 | check_for_admin_com_state(adapter); |
2312 | if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { |
2313 | netif_err(adapter, ifdown, adapter->netdev, |
2314 | "Destroy failure, restarting device\n" ); |
2315 | ena_dump_stats_to_dmesg(adapter); |
2316 | /* rtnl lock already obtained in dev_ioctl() layer */ |
2317 | ena_destroy_device(adapter, graceful: false); |
2318 | ena_restore_device(adapter); |
2319 | } |
2320 | |
2321 | return 0; |
2322 | } |
2323 | |
2324 | int ena_update_queue_params(struct ena_adapter *adapter, |
2325 | u32 new_tx_size, |
2326 | u32 new_rx_size, |
2327 | u32 ) |
2328 | { |
2329 | bool dev_was_up, large_llq_changed = false; |
2330 | int rc = 0; |
2331 | |
2332 | dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
2333 | ena_close(netdev: adapter->netdev); |
2334 | adapter->requested_tx_ring_size = new_tx_size; |
2335 | adapter->requested_rx_ring_size = new_rx_size; |
2336 | ena_init_io_rings(adapter, |
2337 | first_index: 0, |
2338 | count: adapter->xdp_num_queues + |
2339 | adapter->num_io_queues); |
2340 | |
2341 | large_llq_changed = adapter->ena_dev->tx_mem_queue_type == |
2342 | ENA_ADMIN_PLACEMENT_POLICY_DEV; |
2343 | large_llq_changed &= |
2344 | new_llq_header_len != adapter->ena_dev->tx_max_header_size; |
2345 | |
2346 | /* a check that the configuration is valid is done by caller */ |
2347 | if (large_llq_changed) { |
2348 | adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled; |
2349 | |
2350 | ena_destroy_device(adapter, graceful: false); |
2351 | rc = ena_restore_device(adapter); |
2352 | } |
2353 | |
2354 | return dev_was_up && !rc ? ena_up(adapter) : rc; |
2355 | } |
2356 | |
2357 | int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak) |
2358 | { |
2359 | struct ena_ring *rx_ring; |
2360 | int i; |
2361 | |
2362 | if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE)) |
2363 | return -EINVAL; |
2364 | |
2365 | adapter->rx_copybreak = rx_copybreak; |
2366 | |
2367 | for (i = 0; i < adapter->num_io_queues; i++) { |
2368 | rx_ring = &adapter->rx_ring[i]; |
2369 | rx_ring->rx_copybreak = rx_copybreak; |
2370 | } |
2371 | |
2372 | return 0; |
2373 | } |
2374 | |
2375 | int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count) |
2376 | { |
2377 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
2378 | int prev_channel_count; |
2379 | bool dev_was_up; |
2380 | |
2381 | dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
2382 | ena_close(netdev: adapter->netdev); |
2383 | prev_channel_count = adapter->num_io_queues; |
2384 | adapter->num_io_queues = new_channel_count; |
2385 | if (ena_xdp_present(adapter) && |
2386 | ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) { |
2387 | adapter->xdp_first_ring = new_channel_count; |
2388 | adapter->xdp_num_queues = new_channel_count; |
2389 | if (prev_channel_count > new_channel_count) |
2390 | ena_xdp_exchange_program_rx_in_range(adapter, |
2391 | NULL, |
2392 | first: new_channel_count, |
2393 | count: prev_channel_count); |
2394 | else |
2395 | ena_xdp_exchange_program_rx_in_range(adapter, |
2396 | prog: adapter->xdp_bpf_prog, |
2397 | first: prev_channel_count, |
2398 | count: new_channel_count); |
2399 | } |
2400 | |
2401 | /* We need to destroy the rss table so that the indirection |
2402 | * table will be reinitialized by ena_up() |
2403 | */ |
2404 | ena_com_rss_destroy(ena_dev); |
2405 | ena_init_io_rings(adapter, |
2406 | first_index: 0, |
2407 | count: adapter->xdp_num_queues + |
2408 | adapter->num_io_queues); |
2409 | return dev_was_up ? ena_open(netdev: adapter->netdev) : 0; |
2410 | } |
2411 | |
2412 | static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, |
2413 | struct sk_buff *skb, |
2414 | bool disable_meta_caching) |
2415 | { |
2416 | u32 mss = skb_shinfo(skb)->gso_size; |
2417 | struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; |
2418 | u8 l4_protocol = 0; |
2419 | |
2420 | if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { |
2421 | ena_tx_ctx->l4_csum_enable = 1; |
2422 | if (mss) { |
2423 | ena_tx_ctx->tso_enable = 1; |
2424 | ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; |
2425 | ena_tx_ctx->l4_csum_partial = 0; |
2426 | } else { |
2427 | ena_tx_ctx->tso_enable = 0; |
2428 | ena_meta->l4_hdr_len = 0; |
2429 | ena_tx_ctx->l4_csum_partial = 1; |
2430 | } |
2431 | |
2432 | switch (ip_hdr(skb)->version) { |
2433 | case IPVERSION: |
2434 | ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; |
2435 | if (ip_hdr(skb)->frag_off & htons(IP_DF)) |
2436 | ena_tx_ctx->df = 1; |
2437 | if (mss) |
2438 | ena_tx_ctx->l3_csum_enable = 1; |
2439 | l4_protocol = ip_hdr(skb)->protocol; |
2440 | break; |
2441 | case 6: |
2442 | ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; |
2443 | l4_protocol = ipv6_hdr(skb)->nexthdr; |
2444 | break; |
2445 | default: |
2446 | break; |
2447 | } |
2448 | |
2449 | if (l4_protocol == IPPROTO_TCP) |
2450 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; |
2451 | else |
2452 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; |
2453 | |
2454 | ena_meta->mss = mss; |
2455 | ena_meta->l3_hdr_len = skb_network_header_len(skb); |
2456 | ena_meta->l3_hdr_offset = skb_network_offset(skb); |
2457 | ena_tx_ctx->meta_valid = 1; |
2458 | } else if (disable_meta_caching) { |
2459 | memset(ena_meta, 0, sizeof(*ena_meta)); |
2460 | ena_tx_ctx->meta_valid = 1; |
2461 | } else { |
2462 | ena_tx_ctx->meta_valid = 0; |
2463 | } |
2464 | } |
2465 | |
2466 | static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, |
2467 | struct sk_buff *skb) |
2468 | { |
2469 | int num_frags, , rc; |
2470 | |
2471 | num_frags = skb_shinfo(skb)->nr_frags; |
2472 | header_len = skb_headlen(skb); |
2473 | |
2474 | if (num_frags < tx_ring->sgl_size) |
2475 | return 0; |
2476 | |
2477 | if ((num_frags == tx_ring->sgl_size) && |
2478 | (header_len < tx_ring->tx_max_header_size)) |
2479 | return 0; |
2480 | |
2481 | ena_increase_stat(statp: &tx_ring->tx_stats.linearize, cnt: 1, syncp: &tx_ring->syncp); |
2482 | |
2483 | rc = skb_linearize(skb); |
2484 | if (unlikely(rc)) { |
2485 | ena_increase_stat(statp: &tx_ring->tx_stats.linearize_failed, cnt: 1, |
2486 | syncp: &tx_ring->syncp); |
2487 | } |
2488 | |
2489 | return rc; |
2490 | } |
2491 | |
2492 | static int ena_tx_map_skb(struct ena_ring *tx_ring, |
2493 | struct ena_tx_buffer *tx_info, |
2494 | struct sk_buff *skb, |
2495 | void **push_hdr, |
2496 | u16 *) |
2497 | { |
2498 | struct ena_adapter *adapter = tx_ring->adapter; |
2499 | struct ena_com_buf *ena_buf; |
2500 | dma_addr_t dma; |
2501 | u32 skb_head_len, frag_len, last_frag; |
2502 | u16 push_len = 0; |
2503 | u16 delta = 0; |
2504 | int i = 0; |
2505 | |
2506 | skb_head_len = skb_headlen(skb); |
2507 | tx_info->skb = skb; |
2508 | ena_buf = tx_info->bufs; |
2509 | |
2510 | if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
2511 | /* When the device is LLQ mode, the driver will copy |
2512 | * the header into the device memory space. |
2513 | * the ena_com layer assume the header is in a linear |
2514 | * memory space. |
2515 | * This assumption might be wrong since part of the header |
2516 | * can be in the fragmented buffers. |
2517 | * Use skb_header_pointer to make sure the header is in a |
2518 | * linear memory space. |
2519 | */ |
2520 | |
2521 | push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); |
2522 | *push_hdr = skb_header_pointer(skb, offset: 0, len: push_len, |
2523 | buffer: tx_ring->push_buf_intermediate_buf); |
2524 | *header_len = push_len; |
2525 | if (unlikely(skb->data != *push_hdr)) { |
2526 | ena_increase_stat(statp: &tx_ring->tx_stats.llq_buffer_copy, cnt: 1, |
2527 | syncp: &tx_ring->syncp); |
2528 | |
2529 | delta = push_len - skb_head_len; |
2530 | } |
2531 | } else { |
2532 | *push_hdr = NULL; |
2533 | *header_len = min_t(u32, skb_head_len, |
2534 | tx_ring->tx_max_header_size); |
2535 | } |
2536 | |
2537 | netif_dbg(adapter, tx_queued, adapter->netdev, |
2538 | "skb: %p header_buf->vaddr: %p push_len: %d\n" , skb, |
2539 | *push_hdr, push_len); |
2540 | |
2541 | if (skb_head_len > push_len) { |
2542 | dma = dma_map_single(tx_ring->dev, skb->data + push_len, |
2543 | skb_head_len - push_len, DMA_TO_DEVICE); |
2544 | if (unlikely(dma_mapping_error(tx_ring->dev, dma))) |
2545 | goto error_report_dma_error; |
2546 | |
2547 | ena_buf->paddr = dma; |
2548 | ena_buf->len = skb_head_len - push_len; |
2549 | |
2550 | ena_buf++; |
2551 | tx_info->num_of_bufs++; |
2552 | tx_info->map_linear_data = 1; |
2553 | } else { |
2554 | tx_info->map_linear_data = 0; |
2555 | } |
2556 | |
2557 | last_frag = skb_shinfo(skb)->nr_frags; |
2558 | |
2559 | for (i = 0; i < last_frag; i++) { |
2560 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2561 | |
2562 | frag_len = skb_frag_size(frag); |
2563 | |
2564 | if (unlikely(delta >= frag_len)) { |
2565 | delta -= frag_len; |
2566 | continue; |
2567 | } |
2568 | |
2569 | dma = skb_frag_dma_map(dev: tx_ring->dev, frag, offset: delta, |
2570 | size: frag_len - delta, dir: DMA_TO_DEVICE); |
2571 | if (unlikely(dma_mapping_error(tx_ring->dev, dma))) |
2572 | goto error_report_dma_error; |
2573 | |
2574 | ena_buf->paddr = dma; |
2575 | ena_buf->len = frag_len - delta; |
2576 | ena_buf++; |
2577 | tx_info->num_of_bufs++; |
2578 | delta = 0; |
2579 | } |
2580 | |
2581 | return 0; |
2582 | |
2583 | error_report_dma_error: |
2584 | ena_increase_stat(statp: &tx_ring->tx_stats.dma_mapping_err, cnt: 1, |
2585 | syncp: &tx_ring->syncp); |
2586 | netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n" ); |
2587 | |
2588 | tx_info->skb = NULL; |
2589 | |
2590 | tx_info->num_of_bufs += i; |
2591 | ena_unmap_tx_buff(tx_ring, tx_info); |
2592 | |
2593 | return -EINVAL; |
2594 | } |
2595 | |
2596 | /* Called with netif_tx_lock. */ |
2597 | static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2598 | { |
2599 | struct ena_adapter *adapter = netdev_priv(dev); |
2600 | struct ena_tx_buffer *tx_info; |
2601 | struct ena_com_tx_ctx ena_tx_ctx; |
2602 | struct ena_ring *tx_ring; |
2603 | struct netdev_queue *txq; |
2604 | void *push_hdr; |
2605 | u16 next_to_use, req_id, ; |
2606 | int qid, rc; |
2607 | |
2608 | netif_dbg(adapter, tx_queued, dev, "%s skb %p\n" , __func__, skb); |
2609 | /* Determine which tx ring we will be placed on */ |
2610 | qid = skb_get_queue_mapping(skb); |
2611 | tx_ring = &adapter->tx_ring[qid]; |
2612 | txq = netdev_get_tx_queue(dev, index: qid); |
2613 | |
2614 | rc = ena_check_and_linearize_skb(tx_ring, skb); |
2615 | if (unlikely(rc)) |
2616 | goto error_drop_packet; |
2617 | |
2618 | next_to_use = tx_ring->next_to_use; |
2619 | req_id = tx_ring->free_ids[next_to_use]; |
2620 | tx_info = &tx_ring->tx_buffer_info[req_id]; |
2621 | tx_info->num_of_bufs = 0; |
2622 | |
2623 | WARN(tx_info->skb, "SKB isn't NULL req_id %d\n" , req_id); |
2624 | |
2625 | rc = ena_tx_map_skb(tx_ring, tx_info, skb, push_hdr: &push_hdr, header_len: &header_len); |
2626 | if (unlikely(rc)) |
2627 | goto error_drop_packet; |
2628 | |
2629 | memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); |
2630 | ena_tx_ctx.ena_bufs = tx_info->bufs; |
2631 | ena_tx_ctx.push_header = push_hdr; |
2632 | ena_tx_ctx.num_bufs = tx_info->num_of_bufs; |
2633 | ena_tx_ctx.req_id = req_id; |
2634 | ena_tx_ctx.header_len = header_len; |
2635 | |
2636 | /* set flags and meta data */ |
2637 | ena_tx_csum(ena_tx_ctx: &ena_tx_ctx, skb, disable_meta_caching: tx_ring->disable_meta_caching); |
2638 | |
2639 | rc = ena_xmit_common(adapter, |
2640 | ring: tx_ring, |
2641 | tx_info, |
2642 | ena_tx_ctx: &ena_tx_ctx, |
2643 | next_to_use, |
2644 | bytes: skb->len); |
2645 | if (rc) |
2646 | goto error_unmap_dma; |
2647 | |
2648 | netdev_tx_sent_queue(dev_queue: txq, bytes: skb->len); |
2649 | |
2650 | /* stop the queue when no more space available, the packet can have up |
2651 | * to sgl_size + 2. one for the meta descriptor and one for header |
2652 | * (if the header is larger than tx_max_header_size). |
2653 | */ |
2654 | if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
2655 | tx_ring->sgl_size + 2))) { |
2656 | netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n" , |
2657 | __func__, qid); |
2658 | |
2659 | netif_tx_stop_queue(dev_queue: txq); |
2660 | ena_increase_stat(statp: &tx_ring->tx_stats.queue_stop, cnt: 1, |
2661 | syncp: &tx_ring->syncp); |
2662 | |
2663 | /* There is a rare condition where this function decide to |
2664 | * stop the queue but meanwhile clean_tx_irq updates |
2665 | * next_to_completion and terminates. |
2666 | * The queue will remain stopped forever. |
2667 | * To solve this issue add a mb() to make sure that |
2668 | * netif_tx_stop_queue() write is vissible before checking if |
2669 | * there is additional space in the queue. |
2670 | */ |
2671 | smp_mb(); |
2672 | |
2673 | if (ena_com_sq_have_enough_space(io_sq: tx_ring->ena_com_io_sq, |
2674 | ENA_TX_WAKEUP_THRESH)) { |
2675 | netif_tx_wake_queue(dev_queue: txq); |
2676 | ena_increase_stat(statp: &tx_ring->tx_stats.queue_wakeup, cnt: 1, |
2677 | syncp: &tx_ring->syncp); |
2678 | } |
2679 | } |
2680 | |
2681 | skb_tx_timestamp(skb); |
2682 | |
2683 | if (netif_xmit_stopped(dev_queue: txq) || !netdev_xmit_more()) |
2684 | /* trigger the dma engine. ena_ring_tx_doorbell() |
2685 | * calls a memory barrier inside it. |
2686 | */ |
2687 | ena_ring_tx_doorbell(tx_ring); |
2688 | |
2689 | return NETDEV_TX_OK; |
2690 | |
2691 | error_unmap_dma: |
2692 | ena_unmap_tx_buff(tx_ring, tx_info); |
2693 | tx_info->skb = NULL; |
2694 | |
2695 | error_drop_packet: |
2696 | dev_kfree_skb(skb); |
2697 | return NETDEV_TX_OK; |
2698 | } |
2699 | |
2700 | static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev) |
2701 | { |
2702 | struct device *dev = &pdev->dev; |
2703 | struct ena_admin_host_info *host_info; |
2704 | int rc; |
2705 | |
2706 | /* Allocate only the host info */ |
2707 | rc = ena_com_allocate_host_info(ena_dev); |
2708 | if (rc) { |
2709 | dev_err(dev, "Cannot allocate host info\n" ); |
2710 | return; |
2711 | } |
2712 | |
2713 | host_info = ena_dev->host_attr.host_info; |
2714 | |
2715 | host_info->bdf = pci_dev_id(dev: pdev); |
2716 | host_info->os_type = ENA_ADMIN_OS_LINUX; |
2717 | host_info->kernel_ver = LINUX_VERSION_CODE; |
2718 | strscpy(host_info->kernel_ver_str, utsname()->version, |
2719 | sizeof(host_info->kernel_ver_str) - 1); |
2720 | host_info->os_dist = 0; |
2721 | strscpy(host_info->os_dist_str, utsname()->release, |
2722 | sizeof(host_info->os_dist_str)); |
2723 | host_info->driver_version = |
2724 | (DRV_MODULE_GEN_MAJOR) | |
2725 | (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | |
2726 | (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) | |
2727 | ("K" [0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); |
2728 | host_info->num_cpus = num_online_cpus(); |
2729 | |
2730 | host_info->driver_supported_features = |
2731 | ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | |
2732 | ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK | |
2733 | ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK | |
2734 | ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK | |
2735 | ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK; |
2736 | |
2737 | rc = ena_com_set_host_attributes(ena_dev); |
2738 | if (rc) { |
2739 | if (rc == -EOPNOTSUPP) |
2740 | dev_warn(dev, "Cannot set host attributes\n" ); |
2741 | else |
2742 | dev_err(dev, "Cannot set host attributes\n" ); |
2743 | |
2744 | goto err; |
2745 | } |
2746 | |
2747 | return; |
2748 | |
2749 | err: |
2750 | ena_com_delete_host_info(ena_dev); |
2751 | } |
2752 | |
2753 | static void ena_config_debug_area(struct ena_adapter *adapter) |
2754 | { |
2755 | u32 debug_area_size; |
2756 | int rc, ss_count; |
2757 | |
2758 | ss_count = ena_get_sset_count(netdev: adapter->netdev, sset: ETH_SS_STATS); |
2759 | if (ss_count <= 0) { |
2760 | netif_err(adapter, drv, adapter->netdev, |
2761 | "SS count is negative\n" ); |
2762 | return; |
2763 | } |
2764 | |
2765 | /* allocate 32 bytes for each string and 64bit for the value */ |
2766 | debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; |
2767 | |
2768 | rc = ena_com_allocate_debug_area(ena_dev: adapter->ena_dev, debug_area_size); |
2769 | if (rc) { |
2770 | netif_err(adapter, drv, adapter->netdev, |
2771 | "Cannot allocate debug area\n" ); |
2772 | return; |
2773 | } |
2774 | |
2775 | rc = ena_com_set_host_attributes(ena_dev: adapter->ena_dev); |
2776 | if (rc) { |
2777 | if (rc == -EOPNOTSUPP) |
2778 | netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n" ); |
2779 | else |
2780 | netif_err(adapter, drv, adapter->netdev, |
2781 | "Cannot set host attributes\n" ); |
2782 | goto err; |
2783 | } |
2784 | |
2785 | return; |
2786 | err: |
2787 | ena_com_delete_debug_area(ena_dev: adapter->ena_dev); |
2788 | } |
2789 | |
2790 | int ena_update_hw_stats(struct ena_adapter *adapter) |
2791 | { |
2792 | int rc; |
2793 | |
2794 | rc = ena_com_get_eni_stats(ena_dev: adapter->ena_dev, stats: &adapter->eni_stats); |
2795 | if (rc) { |
2796 | netdev_err(dev: adapter->netdev, format: "Failed to get ENI stats\n" ); |
2797 | return rc; |
2798 | } |
2799 | |
2800 | return 0; |
2801 | } |
2802 | |
2803 | static void ena_get_stats64(struct net_device *netdev, |
2804 | struct rtnl_link_stats64 *stats) |
2805 | { |
2806 | struct ena_adapter *adapter = netdev_priv(dev: netdev); |
2807 | struct ena_ring *rx_ring, *tx_ring; |
2808 | u64 total_xdp_rx_drops = 0; |
2809 | unsigned int start; |
2810 | u64 rx_drops; |
2811 | u64 tx_drops; |
2812 | int i; |
2813 | |
2814 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
2815 | return; |
2816 | |
2817 | for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { |
2818 | u64 bytes, packets, xdp_rx_drops; |
2819 | |
2820 | tx_ring = &adapter->tx_ring[i]; |
2821 | |
2822 | do { |
2823 | start = u64_stats_fetch_begin(syncp: &tx_ring->syncp); |
2824 | packets = tx_ring->tx_stats.cnt; |
2825 | bytes = tx_ring->tx_stats.bytes; |
2826 | } while (u64_stats_fetch_retry(syncp: &tx_ring->syncp, start)); |
2827 | |
2828 | stats->tx_packets += packets; |
2829 | stats->tx_bytes += bytes; |
2830 | |
2831 | /* In XDP there isn't an RX queue counterpart */ |
2832 | if (ENA_IS_XDP_INDEX(adapter, i)) |
2833 | continue; |
2834 | |
2835 | rx_ring = &adapter->rx_ring[i]; |
2836 | |
2837 | do { |
2838 | start = u64_stats_fetch_begin(syncp: &rx_ring->syncp); |
2839 | packets = rx_ring->rx_stats.cnt; |
2840 | bytes = rx_ring->rx_stats.bytes; |
2841 | xdp_rx_drops = rx_ring->rx_stats.xdp_drop; |
2842 | } while (u64_stats_fetch_retry(syncp: &rx_ring->syncp, start)); |
2843 | |
2844 | stats->rx_packets += packets; |
2845 | stats->rx_bytes += bytes; |
2846 | total_xdp_rx_drops += xdp_rx_drops; |
2847 | } |
2848 | |
2849 | do { |
2850 | start = u64_stats_fetch_begin(syncp: &adapter->syncp); |
2851 | rx_drops = adapter->dev_stats.rx_drops; |
2852 | tx_drops = adapter->dev_stats.tx_drops; |
2853 | } while (u64_stats_fetch_retry(syncp: &adapter->syncp, start)); |
2854 | |
2855 | stats->rx_dropped = rx_drops + total_xdp_rx_drops; |
2856 | stats->tx_dropped = tx_drops; |
2857 | |
2858 | stats->multicast = 0; |
2859 | stats->collisions = 0; |
2860 | |
2861 | stats->rx_length_errors = 0; |
2862 | stats->rx_crc_errors = 0; |
2863 | stats->rx_frame_errors = 0; |
2864 | stats->rx_fifo_errors = 0; |
2865 | stats->rx_missed_errors = 0; |
2866 | stats->tx_window_errors = 0; |
2867 | |
2868 | stats->rx_errors = 0; |
2869 | stats->tx_errors = 0; |
2870 | } |
2871 | |
2872 | static const struct net_device_ops ena_netdev_ops = { |
2873 | .ndo_open = ena_open, |
2874 | .ndo_stop = ena_close, |
2875 | .ndo_start_xmit = ena_start_xmit, |
2876 | .ndo_get_stats64 = ena_get_stats64, |
2877 | .ndo_tx_timeout = ena_tx_timeout, |
2878 | .ndo_change_mtu = ena_change_mtu, |
2879 | .ndo_validate_addr = eth_validate_addr, |
2880 | .ndo_bpf = ena_xdp, |
2881 | .ndo_xdp_xmit = ena_xdp_xmit, |
2882 | }; |
2883 | |
2884 | static int ena_calc_io_queue_size(struct ena_adapter *adapter, |
2885 | struct ena_com_dev_get_features_ctx *get_feat_ctx) |
2886 | { |
2887 | struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq; |
2888 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
2889 | u32 tx_queue_size = ENA_DEFAULT_RING_SIZE; |
2890 | u32 rx_queue_size = ENA_DEFAULT_RING_SIZE; |
2891 | u32 max_tx_queue_size; |
2892 | u32 max_rx_queue_size; |
2893 | |
2894 | /* If this function is called after driver load, the ring sizes have already |
2895 | * been configured. Take it into account when recalculating ring size. |
2896 | */ |
2897 | if (adapter->tx_ring->ring_size) |
2898 | tx_queue_size = adapter->tx_ring->ring_size; |
2899 | |
2900 | if (adapter->rx_ring->ring_size) |
2901 | rx_queue_size = adapter->rx_ring->ring_size; |
2902 | |
2903 | if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { |
2904 | struct ena_admin_queue_ext_feature_fields *max_queue_ext = |
2905 | &get_feat_ctx->max_queue_ext.max_queue_ext; |
2906 | max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, |
2907 | max_queue_ext->max_rx_sq_depth); |
2908 | max_tx_queue_size = max_queue_ext->max_tx_cq_depth; |
2909 | |
2910 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
2911 | max_tx_queue_size = min_t(u32, max_tx_queue_size, |
2912 | llq->max_llq_depth); |
2913 | else |
2914 | max_tx_queue_size = min_t(u32, max_tx_queue_size, |
2915 | max_queue_ext->max_tx_sq_depth); |
2916 | |
2917 | adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, |
2918 | max_queue_ext->max_per_packet_tx_descs); |
2919 | adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, |
2920 | max_queue_ext->max_per_packet_rx_descs); |
2921 | } else { |
2922 | struct ena_admin_queue_feature_desc *max_queues = |
2923 | &get_feat_ctx->max_queues; |
2924 | max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, |
2925 | max_queues->max_sq_depth); |
2926 | max_tx_queue_size = max_queues->max_cq_depth; |
2927 | |
2928 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
2929 | max_tx_queue_size = min_t(u32, max_tx_queue_size, |
2930 | llq->max_llq_depth); |
2931 | else |
2932 | max_tx_queue_size = min_t(u32, max_tx_queue_size, |
2933 | max_queues->max_sq_depth); |
2934 | |
2935 | adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, |
2936 | max_queues->max_packet_tx_descs); |
2937 | adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, |
2938 | max_queues->max_packet_rx_descs); |
2939 | } |
2940 | |
2941 | max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size); |
2942 | max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size); |
2943 | |
2944 | if (max_tx_queue_size < ENA_MIN_RING_SIZE) { |
2945 | netdev_err(dev: adapter->netdev, format: "Device max TX queue size: %d < minimum: %d\n" , |
2946 | max_tx_queue_size, ENA_MIN_RING_SIZE); |
2947 | return -EINVAL; |
2948 | } |
2949 | |
2950 | if (max_rx_queue_size < ENA_MIN_RING_SIZE) { |
2951 | netdev_err(dev: adapter->netdev, format: "Device max RX queue size: %d < minimum: %d\n" , |
2952 | max_rx_queue_size, ENA_MIN_RING_SIZE); |
2953 | return -EINVAL; |
2954 | } |
2955 | |
2956 | /* When forcing large headers, we multiply the entry size by 2, and therefore divide |
2957 | * the queue size by 2, leaving the amount of memory used by the queues unchanged. |
2958 | */ |
2959 | if (adapter->large_llq_header_enabled) { |
2960 | if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && |
2961 | ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
2962 | max_tx_queue_size /= 2; |
2963 | dev_info(&adapter->pdev->dev, |
2964 | "Forcing large headers and decreasing maximum TX queue size to %d\n" , |
2965 | max_tx_queue_size); |
2966 | } else { |
2967 | dev_err(&adapter->pdev->dev, |
2968 | "Forcing large headers failed: LLQ is disabled or device does not support large headers\n" ); |
2969 | |
2970 | adapter->large_llq_header_enabled = false; |
2971 | } |
2972 | } |
2973 | |
2974 | tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, |
2975 | max_tx_queue_size); |
2976 | rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, |
2977 | max_rx_queue_size); |
2978 | |
2979 | tx_queue_size = rounddown_pow_of_two(tx_queue_size); |
2980 | rx_queue_size = rounddown_pow_of_two(rx_queue_size); |
2981 | |
2982 | adapter->max_tx_ring_size = max_tx_queue_size; |
2983 | adapter->max_rx_ring_size = max_rx_queue_size; |
2984 | adapter->requested_tx_ring_size = tx_queue_size; |
2985 | adapter->requested_rx_ring_size = rx_queue_size; |
2986 | |
2987 | return 0; |
2988 | } |
2989 | |
2990 | static int ena_device_validate_params(struct ena_adapter *adapter, |
2991 | struct ena_com_dev_get_features_ctx *get_feat_ctx) |
2992 | { |
2993 | struct net_device *netdev = adapter->netdev; |
2994 | int rc; |
2995 | |
2996 | rc = ether_addr_equal(addr1: get_feat_ctx->dev_attr.mac_addr, |
2997 | addr2: adapter->mac_addr); |
2998 | if (!rc) { |
2999 | netif_err(adapter, drv, netdev, |
3000 | "Error, mac address are different\n" ); |
3001 | return -EINVAL; |
3002 | } |
3003 | |
3004 | if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { |
3005 | netif_err(adapter, drv, netdev, |
3006 | "Error, device max mtu is smaller than netdev MTU\n" ); |
3007 | return -EINVAL; |
3008 | } |
3009 | |
3010 | return 0; |
3011 | } |
3012 | |
3013 | static void set_default_llq_configurations(struct ena_adapter *adapter, |
3014 | struct ena_llq_configurations *llq_config, |
3015 | struct ena_admin_feature_llq_desc *llq) |
3016 | { |
3017 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
3018 | |
3019 | llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; |
3020 | llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; |
3021 | llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; |
3022 | |
3023 | adapter->large_llq_header_supported = |
3024 | !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ)); |
3025 | adapter->large_llq_header_supported &= |
3026 | !!(llq->entry_size_ctrl_supported & |
3027 | ENA_ADMIN_LIST_ENTRY_SIZE_256B); |
3028 | |
3029 | if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && |
3030 | adapter->large_llq_header_enabled) { |
3031 | llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B; |
3032 | llq_config->llq_ring_entry_size_value = 256; |
3033 | } else { |
3034 | llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; |
3035 | llq_config->llq_ring_entry_size_value = 128; |
3036 | } |
3037 | } |
3038 | |
3039 | static int ena_set_queues_placement_policy(struct pci_dev *pdev, |
3040 | struct ena_com_dev *ena_dev, |
3041 | struct ena_admin_feature_llq_desc *llq, |
3042 | struct ena_llq_configurations *llq_default_configurations) |
3043 | { |
3044 | int rc; |
3045 | u32 llq_feature_mask; |
3046 | |
3047 | llq_feature_mask = 1 << ENA_ADMIN_LLQ; |
3048 | if (!(ena_dev->supported_features & llq_feature_mask)) { |
3049 | dev_warn(&pdev->dev, |
3050 | "LLQ is not supported Fallback to host mode policy.\n" ); |
3051 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
3052 | return 0; |
3053 | } |
3054 | |
3055 | if (!ena_dev->mem_bar) { |
3056 | netdev_err(dev: ena_dev->net_device, |
3057 | format: "LLQ is advertised as supported but device doesn't expose mem bar\n" ); |
3058 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
3059 | return 0; |
3060 | } |
3061 | |
3062 | rc = ena_com_config_dev_mode(ena_dev, llq_features: llq, llq_default_config: llq_default_configurations); |
3063 | if (unlikely(rc)) { |
3064 | dev_err(&pdev->dev, |
3065 | "Failed to configure the device mode. Fallback to host mode policy.\n" ); |
3066 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
3067 | } |
3068 | |
3069 | return 0; |
3070 | } |
3071 | |
3072 | static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev, |
3073 | int bars) |
3074 | { |
3075 | bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR)); |
3076 | |
3077 | if (!has_mem_bar) |
3078 | return 0; |
3079 | |
3080 | ena_dev->mem_bar = devm_ioremap_wc(dev: &pdev->dev, |
3081 | pci_resource_start(pdev, ENA_MEM_BAR), |
3082 | pci_resource_len(pdev, ENA_MEM_BAR)); |
3083 | |
3084 | if (!ena_dev->mem_bar) |
3085 | return -EFAULT; |
3086 | |
3087 | return 0; |
3088 | } |
3089 | |
3090 | static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev, |
3091 | struct ena_com_dev_get_features_ctx *get_feat_ctx, |
3092 | bool *wd_state) |
3093 | { |
3094 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
3095 | struct net_device *netdev = adapter->netdev; |
3096 | struct ena_llq_configurations llq_config; |
3097 | struct device *dev = &pdev->dev; |
3098 | bool readless_supported; |
3099 | u32 aenq_groups; |
3100 | int dma_width; |
3101 | int rc; |
3102 | |
3103 | rc = ena_com_mmio_reg_read_request_init(ena_dev); |
3104 | if (rc) { |
3105 | dev_err(dev, "Failed to init mmio read less\n" ); |
3106 | return rc; |
3107 | } |
3108 | |
3109 | /* The PCIe configuration space revision id indicate if mmio reg |
3110 | * read is disabled |
3111 | */ |
3112 | readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); |
3113 | ena_com_set_mmio_read_mode(ena_dev, readless_supported); |
3114 | |
3115 | rc = ena_com_dev_reset(ena_dev, reset_reason: ENA_REGS_RESET_NORMAL); |
3116 | if (rc) { |
3117 | dev_err(dev, "Can not reset device\n" ); |
3118 | goto err_mmio_read_less; |
3119 | } |
3120 | |
3121 | rc = ena_com_validate_version(ena_dev); |
3122 | if (rc) { |
3123 | dev_err(dev, "Device version is too low\n" ); |
3124 | goto err_mmio_read_less; |
3125 | } |
3126 | |
3127 | dma_width = ena_com_get_dma_width(ena_dev); |
3128 | if (dma_width < 0) { |
3129 | dev_err(dev, "Invalid dma width value %d" , dma_width); |
3130 | rc = dma_width; |
3131 | goto err_mmio_read_less; |
3132 | } |
3133 | |
3134 | rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width)); |
3135 | if (rc) { |
3136 | dev_err(dev, "dma_set_mask_and_coherent failed %d\n" , rc); |
3137 | goto err_mmio_read_less; |
3138 | } |
3139 | |
3140 | /* ENA admin level init */ |
3141 | rc = ena_com_admin_init(ena_dev, aenq_handlers: &aenq_handlers); |
3142 | if (rc) { |
3143 | dev_err(dev, |
3144 | "Can not initialize ena admin queue with device\n" ); |
3145 | goto err_mmio_read_less; |
3146 | } |
3147 | |
3148 | /* To enable the msix interrupts the driver needs to know the number |
3149 | * of queues. So the driver uses polling mode to retrieve this |
3150 | * information |
3151 | */ |
3152 | ena_com_set_admin_polling_mode(ena_dev, polling: true); |
3153 | |
3154 | ena_config_host_info(ena_dev, pdev); |
3155 | |
3156 | /* Get Device Attributes*/ |
3157 | rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); |
3158 | if (rc) { |
3159 | dev_err(dev, "Cannot get attribute for ena device rc=%d\n" , rc); |
3160 | goto err_admin_init; |
3161 | } |
3162 | |
3163 | /* Try to turn all the available aenq groups */ |
3164 | aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | |
3165 | BIT(ENA_ADMIN_FATAL_ERROR) | |
3166 | BIT(ENA_ADMIN_WARNING) | |
3167 | BIT(ENA_ADMIN_NOTIFICATION) | |
3168 | BIT(ENA_ADMIN_KEEP_ALIVE); |
3169 | |
3170 | aenq_groups &= get_feat_ctx->aenq.supported_groups; |
3171 | |
3172 | rc = ena_com_set_aenq_config(ena_dev, groups_flag: aenq_groups); |
3173 | if (rc) { |
3174 | dev_err(dev, "Cannot configure aenq groups rc= %d\n" , rc); |
3175 | goto err_admin_init; |
3176 | } |
3177 | |
3178 | *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); |
3179 | |
3180 | set_default_llq_configurations(adapter, llq_config: &llq_config, llq: &get_feat_ctx->llq); |
3181 | |
3182 | rc = ena_set_queues_placement_policy(pdev, ena_dev, llq: &get_feat_ctx->llq, |
3183 | llq_default_configurations: &llq_config); |
3184 | if (rc) { |
3185 | netdev_err(dev: netdev, format: "Cannot set queues placement policy rc= %d\n" , rc); |
3186 | goto err_admin_init; |
3187 | } |
3188 | |
3189 | rc = ena_calc_io_queue_size(adapter, get_feat_ctx); |
3190 | if (unlikely(rc)) |
3191 | goto err_admin_init; |
3192 | |
3193 | return 0; |
3194 | |
3195 | err_admin_init: |
3196 | ena_com_abort_admin_commands(ena_dev); |
3197 | ena_com_wait_for_abort_completion(ena_dev); |
3198 | ena_com_delete_host_info(ena_dev); |
3199 | ena_com_admin_destroy(ena_dev); |
3200 | err_mmio_read_less: |
3201 | ena_com_mmio_reg_read_request_destroy(ena_dev); |
3202 | |
3203 | return rc; |
3204 | } |
3205 | |
3206 | static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) |
3207 | { |
3208 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
3209 | struct device *dev = &adapter->pdev->dev; |
3210 | int rc; |
3211 | |
3212 | rc = ena_enable_msix(adapter); |
3213 | if (rc) { |
3214 | dev_err(dev, "Can not reserve msix vectors\n" ); |
3215 | return rc; |
3216 | } |
3217 | |
3218 | ena_setup_mgmnt_intr(adapter); |
3219 | |
3220 | rc = ena_request_mgmnt_irq(adapter); |
3221 | if (rc) { |
3222 | dev_err(dev, "Can not setup management interrupts\n" ); |
3223 | goto err_disable_msix; |
3224 | } |
3225 | |
3226 | ena_com_set_admin_polling_mode(ena_dev, polling: false); |
3227 | |
3228 | ena_com_admin_aenq_enable(ena_dev); |
3229 | |
3230 | return 0; |
3231 | |
3232 | err_disable_msix: |
3233 | ena_disable_msix(adapter); |
3234 | |
3235 | return rc; |
3236 | } |
3237 | |
3238 | static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) |
3239 | { |
3240 | struct net_device *netdev = adapter->netdev; |
3241 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
3242 | bool dev_up; |
3243 | |
3244 | if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) |
3245 | return; |
3246 | |
3247 | netif_carrier_off(dev: netdev); |
3248 | |
3249 | del_timer_sync(timer: &adapter->timer_service); |
3250 | |
3251 | dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
3252 | adapter->dev_up_before_reset = dev_up; |
3253 | if (!graceful) |
3254 | ena_com_set_admin_running_state(ena_dev, state: false); |
3255 | |
3256 | if (dev_up) |
3257 | ena_down(adapter); |
3258 | |
3259 | /* Stop the device from sending AENQ events (in case reset flag is set |
3260 | * and device is up, ena_down() already reset the device. |
3261 | */ |
3262 | if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) |
3263 | ena_com_dev_reset(ena_dev: adapter->ena_dev, reset_reason: adapter->reset_reason); |
3264 | |
3265 | ena_free_mgmnt_irq(adapter); |
3266 | |
3267 | ena_disable_msix(adapter); |
3268 | |
3269 | ena_com_abort_admin_commands(ena_dev); |
3270 | |
3271 | ena_com_wait_for_abort_completion(ena_dev); |
3272 | |
3273 | ena_com_admin_destroy(ena_dev); |
3274 | |
3275 | ena_com_mmio_reg_read_request_destroy(ena_dev); |
3276 | |
3277 | /* return reset reason to default value */ |
3278 | adapter->reset_reason = ENA_REGS_RESET_NORMAL; |
3279 | |
3280 | clear_bit(nr: ENA_FLAG_TRIGGER_RESET, addr: &adapter->flags); |
3281 | clear_bit(nr: ENA_FLAG_DEVICE_RUNNING, addr: &adapter->flags); |
3282 | } |
3283 | |
3284 | static int ena_restore_device(struct ena_adapter *adapter) |
3285 | { |
3286 | struct ena_com_dev_get_features_ctx get_feat_ctx; |
3287 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
3288 | struct pci_dev *pdev = adapter->pdev; |
3289 | struct ena_ring *txr; |
3290 | int rc, count, i; |
3291 | bool wd_state; |
3292 | |
3293 | set_bit(nr: ENA_FLAG_ONGOING_RESET, addr: &adapter->flags); |
3294 | rc = ena_device_init(adapter, pdev: adapter->pdev, get_feat_ctx: &get_feat_ctx, wd_state: &wd_state); |
3295 | if (rc) { |
3296 | dev_err(&pdev->dev, "Can not initialize device\n" ); |
3297 | goto err; |
3298 | } |
3299 | adapter->wd_state = wd_state; |
3300 | |
3301 | count = adapter->xdp_num_queues + adapter->num_io_queues; |
3302 | for (i = 0 ; i < count; i++) { |
3303 | txr = &adapter->tx_ring[i]; |
3304 | txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; |
3305 | txr->tx_max_header_size = ena_dev->tx_max_header_size; |
3306 | } |
3307 | |
3308 | rc = ena_device_validate_params(adapter, get_feat_ctx: &get_feat_ctx); |
3309 | if (rc) { |
3310 | dev_err(&pdev->dev, "Validation of device parameters failed\n" ); |
3311 | goto err_device_destroy; |
3312 | } |
3313 | |
3314 | rc = ena_enable_msix_and_set_admin_interrupts(adapter); |
3315 | if (rc) { |
3316 | dev_err(&pdev->dev, "Enable MSI-X failed\n" ); |
3317 | goto err_device_destroy; |
3318 | } |
3319 | /* If the interface was up before the reset bring it up */ |
3320 | if (adapter->dev_up_before_reset) { |
3321 | rc = ena_up(adapter); |
3322 | if (rc) { |
3323 | dev_err(&pdev->dev, "Failed to create I/O queues\n" ); |
3324 | goto err_disable_msix; |
3325 | } |
3326 | } |
3327 | |
3328 | set_bit(nr: ENA_FLAG_DEVICE_RUNNING, addr: &adapter->flags); |
3329 | |
3330 | clear_bit(nr: ENA_FLAG_ONGOING_RESET, addr: &adapter->flags); |
3331 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) |
3332 | netif_carrier_on(dev: adapter->netdev); |
3333 | |
3334 | mod_timer(timer: &adapter->timer_service, expires: round_jiffies(j: jiffies + HZ)); |
3335 | adapter->last_keep_alive_jiffies = jiffies; |
3336 | |
3337 | return rc; |
3338 | err_disable_msix: |
3339 | ena_free_mgmnt_irq(adapter); |
3340 | ena_disable_msix(adapter); |
3341 | err_device_destroy: |
3342 | ena_com_abort_admin_commands(ena_dev); |
3343 | ena_com_wait_for_abort_completion(ena_dev); |
3344 | ena_com_admin_destroy(ena_dev); |
3345 | ena_com_dev_reset(ena_dev, reset_reason: ENA_REGS_RESET_DRIVER_INVALID_STATE); |
3346 | ena_com_mmio_reg_read_request_destroy(ena_dev); |
3347 | err: |
3348 | clear_bit(nr: ENA_FLAG_DEVICE_RUNNING, addr: &adapter->flags); |
3349 | clear_bit(nr: ENA_FLAG_ONGOING_RESET, addr: &adapter->flags); |
3350 | dev_err(&pdev->dev, |
3351 | "Reset attempt failed. Can not reset the device\n" ); |
3352 | |
3353 | return rc; |
3354 | } |
3355 | |
3356 | static void ena_fw_reset_device(struct work_struct *work) |
3357 | { |
3358 | struct ena_adapter *adapter = |
3359 | container_of(work, struct ena_adapter, reset_task); |
3360 | |
3361 | rtnl_lock(); |
3362 | |
3363 | if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { |
3364 | ena_destroy_device(adapter, graceful: false); |
3365 | ena_restore_device(adapter); |
3366 | |
3367 | dev_err(&adapter->pdev->dev, "Device reset completed successfully\n" ); |
3368 | } |
3369 | |
3370 | rtnl_unlock(); |
3371 | } |
3372 | |
3373 | static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, |
3374 | struct ena_ring *rx_ring) |
3375 | { |
3376 | struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi); |
3377 | |
3378 | if (likely(READ_ONCE(ena_napi->first_interrupt))) |
3379 | return 0; |
3380 | |
3381 | if (ena_com_cq_empty(io_cq: rx_ring->ena_com_io_cq)) |
3382 | return 0; |
3383 | |
3384 | rx_ring->no_interrupt_event_cnt++; |
3385 | |
3386 | if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { |
3387 | netif_err(adapter, rx_err, adapter->netdev, |
3388 | "Potential MSIX issue on Rx side Queue = %d. Reset the device\n" , |
3389 | rx_ring->qid); |
3390 | |
3391 | ena_reset_device(adapter, reset_reason: ENA_REGS_RESET_MISS_INTERRUPT); |
3392 | return -EIO; |
3393 | } |
3394 | |
3395 | return 0; |
3396 | } |
3397 | |
3398 | static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, |
3399 | struct ena_ring *tx_ring) |
3400 | { |
3401 | struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi); |
3402 | enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; |
3403 | unsigned int time_since_last_napi; |
3404 | unsigned int missing_tx_comp_to; |
3405 | bool is_tx_comp_time_expired; |
3406 | struct ena_tx_buffer *tx_buf; |
3407 | unsigned long last_jiffies; |
3408 | int napi_scheduled; |
3409 | u32 missed_tx = 0; |
3410 | int i, rc = 0; |
3411 | |
3412 | missing_tx_comp_to = jiffies_to_msecs(j: adapter->missing_tx_completion_to); |
3413 | |
3414 | for (i = 0; i < tx_ring->ring_size; i++) { |
3415 | tx_buf = &tx_ring->tx_buffer_info[i]; |
3416 | last_jiffies = tx_buf->last_jiffies; |
3417 | |
3418 | if (last_jiffies == 0) |
3419 | /* no pending Tx at this location */ |
3420 | continue; |
3421 | |
3422 | is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies + |
3423 | 2 * adapter->missing_tx_completion_to); |
3424 | |
3425 | if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) { |
3426 | /* If after graceful period interrupt is still not |
3427 | * received, we schedule a reset |
3428 | */ |
3429 | netif_err(adapter, tx_err, adapter->netdev, |
3430 | "Potential MSIX issue on Tx side Queue = %d. Reset the device\n" , |
3431 | tx_ring->qid); |
3432 | ena_reset_device(adapter, reset_reason: ENA_REGS_RESET_MISS_INTERRUPT); |
3433 | return -EIO; |
3434 | } |
3435 | |
3436 | is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies + |
3437 | adapter->missing_tx_completion_to); |
3438 | |
3439 | if (unlikely(is_tx_comp_time_expired)) { |
3440 | time_since_last_napi = |
3441 | jiffies_to_usecs(j: jiffies - tx_ring->tx_stats.last_napi_jiffies); |
3442 | napi_scheduled = !!(ena_napi->napi.state & NAPIF_STATE_SCHED); |
3443 | |
3444 | if (missing_tx_comp_to < time_since_last_napi && napi_scheduled) { |
3445 | /* We suspect napi isn't called because the |
3446 | * bottom half is not run. Require a bigger |
3447 | * timeout for these cases |
3448 | */ |
3449 | if (!time_is_before_jiffies(last_jiffies + |
3450 | 2 * adapter->missing_tx_completion_to)) |
3451 | continue; |
3452 | |
3453 | reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION; |
3454 | } |
3455 | |
3456 | missed_tx++; |
3457 | |
3458 | if (tx_buf->print_once) |
3459 | continue; |
3460 | |
3461 | netif_notice(adapter, tx_err, adapter->netdev, |
3462 | "TX hasn't completed, qid %d, index %d. %u usecs from last napi execution, napi scheduled: %d\n" , |
3463 | tx_ring->qid, i, time_since_last_napi, napi_scheduled); |
3464 | |
3465 | tx_buf->print_once = 1; |
3466 | } |
3467 | } |
3468 | |
3469 | if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { |
3470 | netif_err(adapter, tx_err, adapter->netdev, |
3471 | "Lost TX completions are above the threshold (%d > %d). Completion transmission timeout: %u.\n" , |
3472 | missed_tx, |
3473 | adapter->missing_tx_completion_threshold, |
3474 | missing_tx_comp_to); |
3475 | netif_err(adapter, tx_err, adapter->netdev, |
3476 | "Resetting the device\n" ); |
3477 | |
3478 | ena_reset_device(adapter, reset_reason); |
3479 | rc = -EIO; |
3480 | } |
3481 | |
3482 | ena_increase_stat(statp: &tx_ring->tx_stats.missed_tx, cnt: missed_tx, |
3483 | syncp: &tx_ring->syncp); |
3484 | |
3485 | return rc; |
3486 | } |
3487 | |
3488 | static void check_for_missing_completions(struct ena_adapter *adapter) |
3489 | { |
3490 | struct ena_ring *tx_ring; |
3491 | struct ena_ring *rx_ring; |
3492 | int qid, budget, rc; |
3493 | int io_queue_count; |
3494 | |
3495 | io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues; |
3496 | |
3497 | /* Make sure the driver doesn't turn the device in other process */ |
3498 | smp_rmb(); |
3499 | |
3500 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
3501 | return; |
3502 | |
3503 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) |
3504 | return; |
3505 | |
3506 | if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) |
3507 | return; |
3508 | |
3509 | budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES); |
3510 | |
3511 | qid = adapter->last_monitored_tx_qid; |
3512 | |
3513 | while (budget) { |
3514 | qid = (qid + 1) % io_queue_count; |
3515 | |
3516 | tx_ring = &adapter->tx_ring[qid]; |
3517 | rx_ring = &adapter->rx_ring[qid]; |
3518 | |
3519 | rc = check_missing_comp_in_tx_queue(adapter, tx_ring); |
3520 | if (unlikely(rc)) |
3521 | return; |
3522 | |
3523 | rc = !ENA_IS_XDP_INDEX(adapter, qid) ? |
3524 | check_for_rx_interrupt_queue(adapter, rx_ring) : 0; |
3525 | if (unlikely(rc)) |
3526 | return; |
3527 | |
3528 | budget--; |
3529 | } |
3530 | |
3531 | adapter->last_monitored_tx_qid = qid; |
3532 | } |
3533 | |
3534 | /* trigger napi schedule after 2 consecutive detections */ |
3535 | #define EMPTY_RX_REFILL 2 |
3536 | /* For the rare case where the device runs out of Rx descriptors and the |
3537 | * napi handler failed to refill new Rx descriptors (due to a lack of memory |
3538 | * for example). |
3539 | * This case will lead to a deadlock: |
3540 | * The device won't send interrupts since all the new Rx packets will be dropped |
3541 | * The napi handler won't allocate new Rx descriptors so the device will be |
3542 | * able to send new packets. |
3543 | * |
3544 | * This scenario can happen when the kernel's vm.min_free_kbytes is too small. |
3545 | * It is recommended to have at least 512MB, with a minimum of 128MB for |
3546 | * constrained environment). |
3547 | * |
3548 | * When such a situation is detected - Reschedule napi |
3549 | */ |
3550 | static void check_for_empty_rx_ring(struct ena_adapter *adapter) |
3551 | { |
3552 | struct ena_ring *rx_ring; |
3553 | int i, refill_required; |
3554 | |
3555 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
3556 | return; |
3557 | |
3558 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) |
3559 | return; |
3560 | |
3561 | for (i = 0; i < adapter->num_io_queues; i++) { |
3562 | rx_ring = &adapter->rx_ring[i]; |
3563 | |
3564 | refill_required = ena_com_free_q_entries(io_sq: rx_ring->ena_com_io_sq); |
3565 | if (unlikely(refill_required == (rx_ring->ring_size - 1))) { |
3566 | rx_ring->empty_rx_queue++; |
3567 | |
3568 | if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { |
3569 | ena_increase_stat(statp: &rx_ring->rx_stats.empty_rx_ring, cnt: 1, |
3570 | syncp: &rx_ring->syncp); |
3571 | |
3572 | netif_err(adapter, drv, adapter->netdev, |
3573 | "Trigger refill for ring %d\n" , i); |
3574 | |
3575 | napi_schedule(n: rx_ring->napi); |
3576 | rx_ring->empty_rx_queue = 0; |
3577 | } |
3578 | } else { |
3579 | rx_ring->empty_rx_queue = 0; |
3580 | } |
3581 | } |
3582 | } |
3583 | |
3584 | /* Check for keep alive expiration */ |
3585 | static void check_for_missing_keep_alive(struct ena_adapter *adapter) |
3586 | { |
3587 | unsigned long keep_alive_expired; |
3588 | |
3589 | if (!adapter->wd_state) |
3590 | return; |
3591 | |
3592 | if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) |
3593 | return; |
3594 | |
3595 | keep_alive_expired = adapter->last_keep_alive_jiffies + |
3596 | adapter->keep_alive_timeout; |
3597 | if (unlikely(time_is_before_jiffies(keep_alive_expired))) { |
3598 | netif_err(adapter, drv, adapter->netdev, |
3599 | "Keep alive watchdog timeout.\n" ); |
3600 | ena_increase_stat(statp: &adapter->dev_stats.wd_expired, cnt: 1, |
3601 | syncp: &adapter->syncp); |
3602 | ena_reset_device(adapter, reset_reason: ENA_REGS_RESET_KEEP_ALIVE_TO); |
3603 | } |
3604 | } |
3605 | |
3606 | static void check_for_admin_com_state(struct ena_adapter *adapter) |
3607 | { |
3608 | if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { |
3609 | netif_err(adapter, drv, adapter->netdev, |
3610 | "ENA admin queue is not in running state!\n" ); |
3611 | ena_increase_stat(statp: &adapter->dev_stats.admin_q_pause, cnt: 1, |
3612 | syncp: &adapter->syncp); |
3613 | ena_reset_device(adapter, reset_reason: ENA_REGS_RESET_ADMIN_TO); |
3614 | } |
3615 | } |
3616 | |
3617 | static void ena_update_hints(struct ena_adapter *adapter, |
3618 | struct ena_admin_ena_hw_hints *hints) |
3619 | { |
3620 | struct net_device *netdev = adapter->netdev; |
3621 | |
3622 | if (hints->admin_completion_tx_timeout) |
3623 | adapter->ena_dev->admin_queue.completion_timeout = |
3624 | hints->admin_completion_tx_timeout * 1000; |
3625 | |
3626 | if (hints->mmio_read_timeout) |
3627 | /* convert to usec */ |
3628 | adapter->ena_dev->mmio_read.reg_read_to = |
3629 | hints->mmio_read_timeout * 1000; |
3630 | |
3631 | if (hints->missed_tx_completion_count_threshold_to_reset) |
3632 | adapter->missing_tx_completion_threshold = |
3633 | hints->missed_tx_completion_count_threshold_to_reset; |
3634 | |
3635 | if (hints->missing_tx_completion_timeout) { |
3636 | if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) |
3637 | adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT; |
3638 | else |
3639 | adapter->missing_tx_completion_to = |
3640 | msecs_to_jiffies(m: hints->missing_tx_completion_timeout); |
3641 | } |
3642 | |
3643 | if (hints->netdev_wd_timeout) |
3644 | netdev->watchdog_timeo = msecs_to_jiffies(m: hints->netdev_wd_timeout); |
3645 | |
3646 | if (hints->driver_watchdog_timeout) { |
3647 | if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) |
3648 | adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; |
3649 | else |
3650 | adapter->keep_alive_timeout = |
3651 | msecs_to_jiffies(m: hints->driver_watchdog_timeout); |
3652 | } |
3653 | } |
3654 | |
3655 | static void ena_update_host_info(struct ena_admin_host_info *host_info, |
3656 | struct net_device *netdev) |
3657 | { |
3658 | host_info->supported_network_features[0] = |
3659 | netdev->features & GENMASK_ULL(31, 0); |
3660 | host_info->supported_network_features[1] = |
3661 | (netdev->features & GENMASK_ULL(63, 32)) >> 32; |
3662 | } |
3663 | |
3664 | static void ena_timer_service(struct timer_list *t) |
3665 | { |
3666 | struct ena_adapter *adapter = from_timer(adapter, t, timer_service); |
3667 | u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; |
3668 | struct ena_admin_host_info *host_info = |
3669 | adapter->ena_dev->host_attr.host_info; |
3670 | |
3671 | check_for_missing_keep_alive(adapter); |
3672 | |
3673 | check_for_admin_com_state(adapter); |
3674 | |
3675 | check_for_missing_completions(adapter); |
3676 | |
3677 | check_for_empty_rx_ring(adapter); |
3678 | |
3679 | if (debug_area) |
3680 | ena_dump_stats_to_buf(adapter, buf: debug_area); |
3681 | |
3682 | if (host_info) |
3683 | ena_update_host_info(host_info, netdev: adapter->netdev); |
3684 | |
3685 | if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { |
3686 | netif_err(adapter, drv, adapter->netdev, |
3687 | "Trigger reset is on\n" ); |
3688 | ena_dump_stats_to_dmesg(adapter); |
3689 | queue_work(wq: ena_wq, work: &adapter->reset_task); |
3690 | return; |
3691 | } |
3692 | |
3693 | /* Reset the timer */ |
3694 | mod_timer(timer: &adapter->timer_service, expires: round_jiffies(j: jiffies + HZ)); |
3695 | } |
3696 | |
3697 | static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, |
3698 | struct ena_com_dev *ena_dev, |
3699 | struct ena_com_dev_get_features_ctx *get_feat_ctx) |
3700 | { |
3701 | u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; |
3702 | |
3703 | if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { |
3704 | struct ena_admin_queue_ext_feature_fields *max_queue_ext = |
3705 | &get_feat_ctx->max_queue_ext.max_queue_ext; |
3706 | io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num, |
3707 | max_queue_ext->max_rx_cq_num); |
3708 | |
3709 | io_tx_sq_num = max_queue_ext->max_tx_sq_num; |
3710 | io_tx_cq_num = max_queue_ext->max_tx_cq_num; |
3711 | } else { |
3712 | struct ena_admin_queue_feature_desc *max_queues = |
3713 | &get_feat_ctx->max_queues; |
3714 | io_tx_sq_num = max_queues->max_sq_num; |
3715 | io_tx_cq_num = max_queues->max_cq_num; |
3716 | io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num); |
3717 | } |
3718 | |
3719 | /* In case of LLQ use the llq fields for the tx SQ/CQ */ |
3720 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
3721 | io_tx_sq_num = get_feat_ctx->llq.max_llq_num; |
3722 | |
3723 | max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); |
3724 | max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num); |
3725 | max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num); |
3726 | max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); |
3727 | /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */ |
3728 | max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); |
3729 | |
3730 | return max_num_io_queues; |
3731 | } |
3732 | |
3733 | static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, |
3734 | struct net_device *netdev) |
3735 | { |
3736 | netdev_features_t dev_features = 0; |
3737 | |
3738 | /* Set offload features */ |
3739 | if (feat->offload.tx & |
3740 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) |
3741 | dev_features |= NETIF_F_IP_CSUM; |
3742 | |
3743 | if (feat->offload.tx & |
3744 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) |
3745 | dev_features |= NETIF_F_IPV6_CSUM; |
3746 | |
3747 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) |
3748 | dev_features |= NETIF_F_TSO; |
3749 | |
3750 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) |
3751 | dev_features |= NETIF_F_TSO6; |
3752 | |
3753 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) |
3754 | dev_features |= NETIF_F_TSO_ECN; |
3755 | |
3756 | if (feat->offload.rx_supported & |
3757 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) |
3758 | dev_features |= NETIF_F_RXCSUM; |
3759 | |
3760 | if (feat->offload.rx_supported & |
3761 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) |
3762 | dev_features |= NETIF_F_RXCSUM; |
3763 | |
3764 | netdev->features = |
3765 | dev_features | |
3766 | NETIF_F_SG | |
3767 | NETIF_F_RXHASH | |
3768 | NETIF_F_HIGHDMA; |
3769 | |
3770 | netdev->hw_features |= netdev->features; |
3771 | netdev->vlan_features |= netdev->features; |
3772 | } |
3773 | |
3774 | static void ena_set_conf_feat_params(struct ena_adapter *adapter, |
3775 | struct ena_com_dev_get_features_ctx *feat) |
3776 | { |
3777 | struct net_device *netdev = adapter->netdev; |
3778 | |
3779 | /* Copy mac address */ |
3780 | if (!is_valid_ether_addr(addr: feat->dev_attr.mac_addr)) { |
3781 | eth_hw_addr_random(dev: netdev); |
3782 | ether_addr_copy(dst: adapter->mac_addr, src: netdev->dev_addr); |
3783 | } else { |
3784 | ether_addr_copy(dst: adapter->mac_addr, src: feat->dev_attr.mac_addr); |
3785 | eth_hw_addr_set(dev: netdev, addr: adapter->mac_addr); |
3786 | } |
3787 | |
3788 | /* Set offload features */ |
3789 | ena_set_dev_offloads(feat, netdev); |
3790 | |
3791 | adapter->max_mtu = feat->dev_attr.max_mtu; |
3792 | netdev->max_mtu = adapter->max_mtu; |
3793 | netdev->min_mtu = ENA_MIN_MTU; |
3794 | } |
3795 | |
3796 | static int (struct ena_adapter *adapter) |
3797 | { |
3798 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
3799 | struct device *dev = &adapter->pdev->dev; |
3800 | int rc, i; |
3801 | u32 val; |
3802 | |
3803 | rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); |
3804 | if (unlikely(rc)) { |
3805 | dev_err(dev, "Cannot init indirect table\n" ); |
3806 | goto err_rss_init; |
3807 | } |
3808 | |
3809 | for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { |
3810 | val = ethtool_rxfh_indir_default(index: i, n_rx_rings: adapter->num_io_queues); |
3811 | rc = ena_com_indirect_table_fill_entry(ena_dev, entry_idx: i, |
3812 | ENA_IO_RXQ_IDX(val)); |
3813 | if (unlikely(rc)) { |
3814 | dev_err(dev, "Cannot fill indirect table\n" ); |
3815 | goto err_fill_indir; |
3816 | } |
3817 | } |
3818 | |
3819 | rc = ena_com_fill_hash_function(ena_dev, func: ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE, |
3820 | init_val: 0xFFFFFFFF); |
3821 | if (unlikely(rc && (rc != -EOPNOTSUPP))) { |
3822 | dev_err(dev, "Cannot fill hash function\n" ); |
3823 | goto err_fill_indir; |
3824 | } |
3825 | |
3826 | rc = ena_com_set_default_hash_ctrl(ena_dev); |
3827 | if (unlikely(rc && (rc != -EOPNOTSUPP))) { |
3828 | dev_err(dev, "Cannot fill hash control\n" ); |
3829 | goto err_fill_indir; |
3830 | } |
3831 | |
3832 | return 0; |
3833 | |
3834 | err_fill_indir: |
3835 | ena_com_rss_destroy(ena_dev); |
3836 | : |
3837 | |
3838 | return rc; |
3839 | } |
3840 | |
3841 | static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) |
3842 | { |
3843 | int release_bars = pci_select_bars(dev: pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
3844 | |
3845 | pci_release_selected_regions(pdev, release_bars); |
3846 | } |
3847 | |
3848 | /* ena_probe - Device Initialization Routine |
3849 | * @pdev: PCI device information struct |
3850 | * @ent: entry in ena_pci_tbl |
3851 | * |
3852 | * Returns 0 on success, negative on failure |
3853 | * |
3854 | * ena_probe initializes an adapter identified by a pci_dev structure. |
3855 | * The OS initialization, configuring of the adapter private structure, |
3856 | * and a hardware reset occur. |
3857 | */ |
3858 | static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
3859 | { |
3860 | struct ena_com_dev_get_features_ctx get_feat_ctx; |
3861 | struct ena_com_dev *ena_dev = NULL; |
3862 | struct ena_adapter *adapter; |
3863 | struct net_device *netdev; |
3864 | static int adapters_found; |
3865 | u32 max_num_io_queues; |
3866 | bool wd_state; |
3867 | int bars, rc; |
3868 | |
3869 | dev_dbg(&pdev->dev, "%s\n" , __func__); |
3870 | |
3871 | rc = pci_enable_device_mem(dev: pdev); |
3872 | if (rc) { |
3873 | dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n" ); |
3874 | return rc; |
3875 | } |
3876 | |
3877 | rc = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS)); |
3878 | if (rc) { |
3879 | dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n" , rc); |
3880 | goto err_disable_device; |
3881 | } |
3882 | |
3883 | pci_set_master(dev: pdev); |
3884 | |
3885 | ena_dev = vzalloc(size: sizeof(*ena_dev)); |
3886 | if (!ena_dev) { |
3887 | rc = -ENOMEM; |
3888 | goto err_disable_device; |
3889 | } |
3890 | |
3891 | bars = pci_select_bars(dev: pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
3892 | rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); |
3893 | if (rc) { |
3894 | dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n" , |
3895 | rc); |
3896 | goto err_free_ena_dev; |
3897 | } |
3898 | |
3899 | ena_dev->reg_bar = devm_ioremap(dev: &pdev->dev, |
3900 | pci_resource_start(pdev, ENA_REG_BAR), |
3901 | pci_resource_len(pdev, ENA_REG_BAR)); |
3902 | if (!ena_dev->reg_bar) { |
3903 | dev_err(&pdev->dev, "Failed to remap regs bar\n" ); |
3904 | rc = -EFAULT; |
3905 | goto err_free_region; |
3906 | } |
3907 | |
3908 | ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; |
3909 | |
3910 | ena_dev->dmadev = &pdev->dev; |
3911 | |
3912 | netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS); |
3913 | if (!netdev) { |
3914 | dev_err(&pdev->dev, "alloc_etherdev_mq failed\n" ); |
3915 | rc = -ENOMEM; |
3916 | goto err_free_region; |
3917 | } |
3918 | |
3919 | SET_NETDEV_DEV(netdev, &pdev->dev); |
3920 | adapter = netdev_priv(dev: netdev); |
3921 | adapter->ena_dev = ena_dev; |
3922 | adapter->netdev = netdev; |
3923 | adapter->pdev = pdev; |
3924 | adapter->msg_enable = DEFAULT_MSG_ENABLE; |
3925 | |
3926 | ena_dev->net_device = netdev; |
3927 | |
3928 | pci_set_drvdata(pdev, data: adapter); |
3929 | |
3930 | rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); |
3931 | if (rc) { |
3932 | dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n" ); |
3933 | goto err_netdev_destroy; |
3934 | } |
3935 | |
3936 | rc = ena_device_init(adapter, pdev, get_feat_ctx: &get_feat_ctx, wd_state: &wd_state); |
3937 | if (rc) { |
3938 | dev_err(&pdev->dev, "ENA device init failed\n" ); |
3939 | if (rc == -ETIME) |
3940 | rc = -EPROBE_DEFER; |
3941 | goto err_netdev_destroy; |
3942 | } |
3943 | |
3944 | /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. |
3945 | * Updated during device initialization with the real granularity |
3946 | */ |
3947 | ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; |
3948 | ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; |
3949 | ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; |
3950 | max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, get_feat_ctx: &get_feat_ctx); |
3951 | if (unlikely(!max_num_io_queues)) { |
3952 | rc = -EFAULT; |
3953 | goto err_device_destroy; |
3954 | } |
3955 | |
3956 | ena_set_conf_feat_params(adapter, feat: &get_feat_ctx); |
3957 | |
3958 | adapter->reset_reason = ENA_REGS_RESET_NORMAL; |
3959 | |
3960 | adapter->num_io_queues = max_num_io_queues; |
3961 | adapter->max_num_io_queues = max_num_io_queues; |
3962 | adapter->last_monitored_tx_qid = 0; |
3963 | |
3964 | adapter->xdp_first_ring = 0; |
3965 | adapter->xdp_num_queues = 0; |
3966 | |
3967 | adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; |
3968 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
3969 | adapter->disable_meta_caching = |
3970 | !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & |
3971 | BIT(ENA_ADMIN_DISABLE_META_CACHING)); |
3972 | |
3973 | adapter->wd_state = wd_state; |
3974 | |
3975 | snprintf(buf: adapter->name, ENA_NAME_MAX_LEN, fmt: "ena_%d" , adapters_found); |
3976 | |
3977 | rc = ena_com_init_interrupt_moderation(ena_dev: adapter->ena_dev); |
3978 | if (rc) { |
3979 | dev_err(&pdev->dev, |
3980 | "Failed to query interrupt moderation feature\n" ); |
3981 | goto err_device_destroy; |
3982 | } |
3983 | |
3984 | ena_init_io_rings(adapter, |
3985 | first_index: 0, |
3986 | count: adapter->xdp_num_queues + |
3987 | adapter->num_io_queues); |
3988 | |
3989 | netdev->netdev_ops = &ena_netdev_ops; |
3990 | netdev->watchdog_timeo = TX_TIMEOUT; |
3991 | ena_set_ethtool_ops(netdev); |
3992 | |
3993 | netdev->priv_flags |= IFF_UNICAST_FLT; |
3994 | |
3995 | u64_stats_init(syncp: &adapter->syncp); |
3996 | |
3997 | rc = ena_enable_msix_and_set_admin_interrupts(adapter); |
3998 | if (rc) { |
3999 | dev_err(&pdev->dev, |
4000 | "Failed to enable and set the admin interrupts\n" ); |
4001 | goto err_worker_destroy; |
4002 | } |
4003 | rc = ena_rss_init_default(adapter); |
4004 | if (rc && (rc != -EOPNOTSUPP)) { |
4005 | dev_err(&pdev->dev, "Cannot init RSS rc: %d\n" , rc); |
4006 | goto err_free_msix; |
4007 | } |
4008 | |
4009 | ena_config_debug_area(adapter); |
4010 | |
4011 | if (ena_xdp_legal_queue_count(adapter, queues: adapter->num_io_queues)) |
4012 | netdev->xdp_features = NETDEV_XDP_ACT_BASIC | |
4013 | NETDEV_XDP_ACT_REDIRECT; |
4014 | |
4015 | memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); |
4016 | |
4017 | netif_carrier_off(dev: netdev); |
4018 | |
4019 | rc = register_netdev(dev: netdev); |
4020 | if (rc) { |
4021 | dev_err(&pdev->dev, "Cannot register net device\n" ); |
4022 | goto err_rss; |
4023 | } |
4024 | |
4025 | INIT_WORK(&adapter->reset_task, ena_fw_reset_device); |
4026 | |
4027 | adapter->last_keep_alive_jiffies = jiffies; |
4028 | adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; |
4029 | adapter->missing_tx_completion_to = TX_TIMEOUT; |
4030 | adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS; |
4031 | |
4032 | ena_update_hints(adapter, hints: &get_feat_ctx.hw_hints); |
4033 | |
4034 | timer_setup(&adapter->timer_service, ena_timer_service, 0); |
4035 | mod_timer(timer: &adapter->timer_service, expires: round_jiffies(j: jiffies + HZ)); |
4036 | |
4037 | dev_info(&pdev->dev, |
4038 | "%s found at mem %lx, mac addr %pM\n" , |
4039 | DEVICE_NAME, (long)pci_resource_start(pdev, 0), |
4040 | netdev->dev_addr); |
4041 | |
4042 | set_bit(nr: ENA_FLAG_DEVICE_RUNNING, addr: &adapter->flags); |
4043 | |
4044 | adapters_found++; |
4045 | |
4046 | return 0; |
4047 | |
4048 | : |
4049 | ena_com_delete_debug_area(ena_dev); |
4050 | ena_com_rss_destroy(ena_dev); |
4051 | err_free_msix: |
4052 | ena_com_dev_reset(ena_dev, reset_reason: ENA_REGS_RESET_INIT_ERR); |
4053 | /* stop submitting admin commands on a device that was reset */ |
4054 | ena_com_set_admin_running_state(ena_dev, state: false); |
4055 | ena_free_mgmnt_irq(adapter); |
4056 | ena_disable_msix(adapter); |
4057 | err_worker_destroy: |
4058 | del_timer(timer: &adapter->timer_service); |
4059 | err_device_destroy: |
4060 | ena_com_delete_host_info(ena_dev); |
4061 | ena_com_admin_destroy(ena_dev); |
4062 | err_netdev_destroy: |
4063 | free_netdev(dev: netdev); |
4064 | err_free_region: |
4065 | ena_release_bars(ena_dev, pdev); |
4066 | err_free_ena_dev: |
4067 | vfree(addr: ena_dev); |
4068 | err_disable_device: |
4069 | pci_disable_device(dev: pdev); |
4070 | return rc; |
4071 | } |
4072 | |
4073 | /*****************************************************************************/ |
4074 | |
4075 | /* __ena_shutoff - Helper used in both PCI remove/shutdown routines |
4076 | * @pdev: PCI device information struct |
4077 | * @shutdown: Is it a shutdown operation? If false, means it is a removal |
4078 | * |
4079 | * __ena_shutoff is a helper routine that does the real work on shutdown and |
4080 | * removal paths; the difference between those paths is with regards to whether |
4081 | * dettach or unregister the netdevice. |
4082 | */ |
4083 | static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) |
4084 | { |
4085 | struct ena_adapter *adapter = pci_get_drvdata(pdev); |
4086 | struct ena_com_dev *ena_dev; |
4087 | struct net_device *netdev; |
4088 | |
4089 | ena_dev = adapter->ena_dev; |
4090 | netdev = adapter->netdev; |
4091 | |
4092 | #ifdef CONFIG_RFS_ACCEL |
4093 | if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { |
4094 | free_irq_cpu_rmap(rmap: netdev->rx_cpu_rmap); |
4095 | netdev->rx_cpu_rmap = NULL; |
4096 | } |
4097 | |
4098 | #endif /* CONFIG_RFS_ACCEL */ |
4099 | /* Make sure timer and reset routine won't be called after |
4100 | * freeing device resources. |
4101 | */ |
4102 | del_timer_sync(timer: &adapter->timer_service); |
4103 | cancel_work_sync(work: &adapter->reset_task); |
4104 | |
4105 | rtnl_lock(); /* lock released inside the below if-else block */ |
4106 | adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN; |
4107 | ena_destroy_device(adapter, graceful: true); |
4108 | |
4109 | if (shutdown) { |
4110 | netif_device_detach(dev: netdev); |
4111 | dev_close(dev: netdev); |
4112 | rtnl_unlock(); |
4113 | } else { |
4114 | rtnl_unlock(); |
4115 | unregister_netdev(dev: netdev); |
4116 | free_netdev(dev: netdev); |
4117 | } |
4118 | |
4119 | ena_com_rss_destroy(ena_dev); |
4120 | |
4121 | ena_com_delete_debug_area(ena_dev); |
4122 | |
4123 | ena_com_delete_host_info(ena_dev); |
4124 | |
4125 | ena_release_bars(ena_dev, pdev); |
4126 | |
4127 | pci_disable_device(dev: pdev); |
4128 | |
4129 | vfree(addr: ena_dev); |
4130 | } |
4131 | |
4132 | /* ena_remove - Device Removal Routine |
4133 | * @pdev: PCI device information struct |
4134 | * |
4135 | * ena_remove is called by the PCI subsystem to alert the driver |
4136 | * that it should release a PCI device. |
4137 | */ |
4138 | |
4139 | static void ena_remove(struct pci_dev *pdev) |
4140 | { |
4141 | __ena_shutoff(pdev, shutdown: false); |
4142 | } |
4143 | |
4144 | /* ena_shutdown - Device Shutdown Routine |
4145 | * @pdev: PCI device information struct |
4146 | * |
4147 | * ena_shutdown is called by the PCI subsystem to alert the driver that |
4148 | * a shutdown/reboot (or kexec) is happening and device must be disabled. |
4149 | */ |
4150 | |
4151 | static void ena_shutdown(struct pci_dev *pdev) |
4152 | { |
4153 | __ena_shutoff(pdev, shutdown: true); |
4154 | } |
4155 | |
4156 | /* ena_suspend - PM suspend callback |
4157 | * @dev_d: Device information struct |
4158 | */ |
4159 | static int __maybe_unused ena_suspend(struct device *dev_d) |
4160 | { |
4161 | struct pci_dev *pdev = to_pci_dev(dev_d); |
4162 | struct ena_adapter *adapter = pci_get_drvdata(pdev); |
4163 | |
4164 | ena_increase_stat(statp: &adapter->dev_stats.suspend, cnt: 1, syncp: &adapter->syncp); |
4165 | |
4166 | rtnl_lock(); |
4167 | if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { |
4168 | dev_err(&pdev->dev, |
4169 | "Ignoring device reset request as the device is being suspended\n" ); |
4170 | clear_bit(nr: ENA_FLAG_TRIGGER_RESET, addr: &adapter->flags); |
4171 | } |
4172 | ena_destroy_device(adapter, graceful: true); |
4173 | rtnl_unlock(); |
4174 | return 0; |
4175 | } |
4176 | |
4177 | /* ena_resume - PM resume callback |
4178 | * @dev_d: Device information struct |
4179 | */ |
4180 | static int __maybe_unused ena_resume(struct device *dev_d) |
4181 | { |
4182 | struct ena_adapter *adapter = dev_get_drvdata(dev: dev_d); |
4183 | int rc; |
4184 | |
4185 | ena_increase_stat(statp: &adapter->dev_stats.resume, cnt: 1, syncp: &adapter->syncp); |
4186 | |
4187 | rtnl_lock(); |
4188 | rc = ena_restore_device(adapter); |
4189 | rtnl_unlock(); |
4190 | return rc; |
4191 | } |
4192 | |
4193 | static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume); |
4194 | |
4195 | static struct pci_driver ena_pci_driver = { |
4196 | .name = DRV_MODULE_NAME, |
4197 | .id_table = ena_pci_tbl, |
4198 | .probe = ena_probe, |
4199 | .remove = ena_remove, |
4200 | .shutdown = ena_shutdown, |
4201 | .driver.pm = &ena_pm_ops, |
4202 | .sriov_configure = pci_sriov_configure_simple, |
4203 | }; |
4204 | |
4205 | static int __init ena_init(void) |
4206 | { |
4207 | int ret; |
4208 | |
4209 | ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); |
4210 | if (!ena_wq) { |
4211 | pr_err("Failed to create workqueue\n" ); |
4212 | return -ENOMEM; |
4213 | } |
4214 | |
4215 | ret = pci_register_driver(&ena_pci_driver); |
4216 | if (ret) |
4217 | destroy_workqueue(wq: ena_wq); |
4218 | |
4219 | return ret; |
4220 | } |
4221 | |
4222 | static void __exit ena_cleanup(void) |
4223 | { |
4224 | pci_unregister_driver(dev: &ena_pci_driver); |
4225 | |
4226 | if (ena_wq) { |
4227 | destroy_workqueue(wq: ena_wq); |
4228 | ena_wq = NULL; |
4229 | } |
4230 | } |
4231 | |
4232 | /****************************************************************************** |
4233 | ******************************** AENQ Handlers ******************************* |
4234 | *****************************************************************************/ |
4235 | /* ena_update_on_link_change: |
4236 | * Notify the network interface about the change in link status |
4237 | */ |
4238 | static void ena_update_on_link_change(void *adapter_data, |
4239 | struct ena_admin_aenq_entry *aenq_e) |
4240 | { |
4241 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; |
4242 | struct ena_admin_aenq_link_change_desc *aenq_desc = |
4243 | (struct ena_admin_aenq_link_change_desc *)aenq_e; |
4244 | int status = aenq_desc->flags & |
4245 | ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; |
4246 | |
4247 | if (status) { |
4248 | netif_dbg(adapter, ifup, adapter->netdev, "%s\n" , __func__); |
4249 | set_bit(nr: ENA_FLAG_LINK_UP, addr: &adapter->flags); |
4250 | if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags)) |
4251 | netif_carrier_on(dev: adapter->netdev); |
4252 | } else { |
4253 | clear_bit(nr: ENA_FLAG_LINK_UP, addr: &adapter->flags); |
4254 | netif_carrier_off(dev: adapter->netdev); |
4255 | } |
4256 | } |
4257 | |
4258 | static void ena_keep_alive_wd(void *adapter_data, |
4259 | struct ena_admin_aenq_entry *aenq_e) |
4260 | { |
4261 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; |
4262 | struct ena_admin_aenq_keep_alive_desc *desc; |
4263 | u64 rx_drops; |
4264 | u64 tx_drops; |
4265 | |
4266 | desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; |
4267 | adapter->last_keep_alive_jiffies = jiffies; |
4268 | |
4269 | rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; |
4270 | tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low; |
4271 | |
4272 | u64_stats_update_begin(syncp: &adapter->syncp); |
4273 | /* These stats are accumulated by the device, so the counters indicate |
4274 | * all drops since last reset. |
4275 | */ |
4276 | adapter->dev_stats.rx_drops = rx_drops; |
4277 | adapter->dev_stats.tx_drops = tx_drops; |
4278 | u64_stats_update_end(syncp: &adapter->syncp); |
4279 | } |
4280 | |
4281 | static void ena_notification(void *adapter_data, |
4282 | struct ena_admin_aenq_entry *aenq_e) |
4283 | { |
4284 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; |
4285 | struct ena_admin_ena_hw_hints *hints; |
4286 | |
4287 | WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, |
4288 | "Invalid group(%x) expected %x\n" , |
4289 | aenq_e->aenq_common_desc.group, |
4290 | ENA_ADMIN_NOTIFICATION); |
4291 | |
4292 | switch (aenq_e->aenq_common_desc.syndrome) { |
4293 | case ENA_ADMIN_UPDATE_HINTS: |
4294 | hints = (struct ena_admin_ena_hw_hints *) |
4295 | (&aenq_e->inline_data_w4); |
4296 | ena_update_hints(adapter, hints); |
4297 | break; |
4298 | default: |
4299 | netif_err(adapter, drv, adapter->netdev, |
4300 | "Invalid aenq notification link state %d\n" , |
4301 | aenq_e->aenq_common_desc.syndrome); |
4302 | } |
4303 | } |
4304 | |
4305 | /* This handler will called for unknown event group or unimplemented handlers*/ |
4306 | static void unimplemented_aenq_handler(void *data, |
4307 | struct ena_admin_aenq_entry *aenq_e) |
4308 | { |
4309 | struct ena_adapter *adapter = (struct ena_adapter *)data; |
4310 | |
4311 | netif_err(adapter, drv, adapter->netdev, |
4312 | "Unknown event was received or event with unimplemented handler\n" ); |
4313 | } |
4314 | |
4315 | static struct ena_aenq_handlers aenq_handlers = { |
4316 | .handlers = { |
4317 | [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, |
4318 | [ENA_ADMIN_NOTIFICATION] = ena_notification, |
4319 | [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, |
4320 | }, |
4321 | .unimplemented_handler = unimplemented_aenq_handler |
4322 | }; |
4323 | |
4324 | module_init(ena_init); |
4325 | module_exit(ena_cleanup); |
4326 | |