1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards |
4 | * Copyright 2005-2006 Fen Systems Ltd. |
5 | * Copyright 2006-2013 Solarflare Communications Inc. |
6 | */ |
7 | |
8 | #include <linux/bitops.h> |
9 | #include <linux/delay.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/pci.h> |
12 | #include <linux/module.h> |
13 | #include <linux/seq_file.h> |
14 | #include <linux/crc32.h> |
15 | #include "net_driver.h" |
16 | #include "bitfield.h" |
17 | #include "efx.h" |
18 | #include "nic.h" |
19 | #include "farch_regs.h" |
20 | #include "io.h" |
21 | #include "workarounds.h" |
22 | |
23 | /* Falcon-architecture (SFC4000) support */ |
24 | |
25 | /************************************************************************** |
26 | * |
27 | * Configurable values |
28 | * |
29 | ************************************************************************** |
30 | */ |
31 | |
32 | /* This is set to 16 for a good reason. In summary, if larger than |
33 | * 16, the descriptor cache holds more than a default socket |
34 | * buffer's worth of packets (for UDP we can only have at most one |
35 | * socket buffer's worth outstanding). This combined with the fact |
36 | * that we only get 1 TX event per descriptor cache means the NIC |
37 | * goes idle. |
38 | */ |
39 | #define TX_DC_ENTRIES 16 |
40 | #define TX_DC_ENTRIES_ORDER 1 |
41 | |
42 | #define RX_DC_ENTRIES 64 |
43 | #define RX_DC_ENTRIES_ORDER 3 |
44 | |
45 | /* If EF4_MAX_INT_ERRORS internal errors occur within |
46 | * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and |
47 | * disable it. |
48 | */ |
49 | #define EF4_INT_ERROR_EXPIRE 3600 |
50 | #define EF4_MAX_INT_ERRORS 5 |
51 | |
52 | /* Depth of RX flush request fifo */ |
53 | #define EF4_RX_FLUSH_COUNT 4 |
54 | |
55 | /* Driver generated events */ |
56 | #define _EF4_CHANNEL_MAGIC_TEST 0x000101 |
57 | #define _EF4_CHANNEL_MAGIC_FILL 0x000102 |
58 | #define _EF4_CHANNEL_MAGIC_RX_DRAIN 0x000103 |
59 | #define _EF4_CHANNEL_MAGIC_TX_DRAIN 0x000104 |
60 | |
61 | #define _EF4_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) |
62 | #define _EF4_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) |
63 | |
64 | #define EF4_CHANNEL_MAGIC_TEST(_channel) \ |
65 | _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TEST, (_channel)->channel) |
66 | #define EF4_CHANNEL_MAGIC_FILL(_rx_queue) \ |
67 | _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_FILL, \ |
68 | ef4_rx_queue_index(_rx_queue)) |
69 | #define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ |
70 | _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_RX_DRAIN, \ |
71 | ef4_rx_queue_index(_rx_queue)) |
72 | #define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ |
73 | _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TX_DRAIN, \ |
74 | (_tx_queue)->queue) |
75 | |
76 | static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic); |
77 | |
78 | /************************************************************************** |
79 | * |
80 | * Hardware access |
81 | * |
82 | **************************************************************************/ |
83 | |
84 | static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value, |
85 | unsigned int index) |
86 | { |
87 | ef4_sram_writeq(efx, membase: efx->membase + efx->type->buf_tbl_base, |
88 | value, index); |
89 | } |
90 | |
91 | static bool ef4_masked_compare_oword(const ef4_oword_t *a, const ef4_oword_t *b, |
92 | const ef4_oword_t *mask) |
93 | { |
94 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || |
95 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); |
96 | } |
97 | |
98 | int ef4_farch_test_registers(struct ef4_nic *efx, |
99 | const struct ef4_farch_register_test *regs, |
100 | size_t n_regs) |
101 | { |
102 | unsigned address = 0; |
103 | int i, j; |
104 | ef4_oword_t mask, imask, original, reg, buf; |
105 | |
106 | for (i = 0; i < n_regs; ++i) { |
107 | address = regs[i].address; |
108 | mask = imask = regs[i].mask; |
109 | EF4_INVERT_OWORD(imask); |
110 | |
111 | ef4_reado(efx, value: &original, reg: address); |
112 | |
113 | /* bit sweep on and off */ |
114 | for (j = 0; j < 128; j++) { |
115 | if (!EF4_EXTRACT_OWORD32(mask, j, j)) |
116 | continue; |
117 | |
118 | /* Test this testable bit can be set in isolation */ |
119 | EF4_AND_OWORD(reg, original, mask); |
120 | EF4_SET_OWORD32(reg, j, j, 1); |
121 | |
122 | ef4_writeo(efx, value: ®, reg: address); |
123 | ef4_reado(efx, value: &buf, reg: address); |
124 | |
125 | if (ef4_masked_compare_oword(a: ®, b: &buf, mask: &mask)) |
126 | goto fail; |
127 | |
128 | /* Test this testable bit can be cleared in isolation */ |
129 | EF4_OR_OWORD(reg, original, mask); |
130 | EF4_SET_OWORD32(reg, j, j, 0); |
131 | |
132 | ef4_writeo(efx, value: ®, reg: address); |
133 | ef4_reado(efx, value: &buf, reg: address); |
134 | |
135 | if (ef4_masked_compare_oword(a: ®, b: &buf, mask: &mask)) |
136 | goto fail; |
137 | } |
138 | |
139 | ef4_writeo(efx, value: &original, reg: address); |
140 | } |
141 | |
142 | return 0; |
143 | |
144 | fail: |
145 | netif_err(efx, hw, efx->net_dev, |
146 | "wrote " EF4_OWORD_FMT" read " EF4_OWORD_FMT |
147 | " at address 0x%x mask " EF4_OWORD_FMT"\n" , EF4_OWORD_VAL(reg), |
148 | EF4_OWORD_VAL(buf), address, EF4_OWORD_VAL(mask)); |
149 | return -EIO; |
150 | } |
151 | |
152 | /************************************************************************** |
153 | * |
154 | * Special buffer handling |
155 | * Special buffers are used for event queues and the TX and RX |
156 | * descriptor rings. |
157 | * |
158 | *************************************************************************/ |
159 | |
160 | /* |
161 | * Initialise a special buffer |
162 | * |
163 | * This will define a buffer (previously allocated via |
164 | * ef4_alloc_special_buffer()) in the buffer table, allowing |
165 | * it to be used for event queues, descriptor rings etc. |
166 | */ |
167 | static void |
168 | ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer) |
169 | { |
170 | ef4_qword_t buf_desc; |
171 | unsigned int index; |
172 | dma_addr_t dma_addr; |
173 | int i; |
174 | |
175 | EF4_BUG_ON_PARANOID(!buffer->buf.addr); |
176 | |
177 | /* Write buffer descriptors to NIC */ |
178 | for (i = 0; i < buffer->entries; i++) { |
179 | index = buffer->index + i; |
180 | dma_addr = buffer->buf.dma_addr + (i * EF4_BUF_SIZE); |
181 | netif_dbg(efx, probe, efx->net_dev, |
182 | "mapping special buffer %d at %llx\n" , |
183 | index, (unsigned long long)dma_addr); |
184 | EF4_POPULATE_QWORD_3(buf_desc, |
185 | FRF_AZ_BUF_ADR_REGION, 0, |
186 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, |
187 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); |
188 | ef4_write_buf_tbl(efx, value: &buf_desc, index); |
189 | } |
190 | } |
191 | |
192 | /* Unmaps a buffer and clears the buffer table entries */ |
193 | static void |
194 | ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer) |
195 | { |
196 | ef4_oword_t buf_tbl_upd; |
197 | unsigned int start = buffer->index; |
198 | unsigned int end = (buffer->index + buffer->entries - 1); |
199 | |
200 | if (!buffer->entries) |
201 | return; |
202 | |
203 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n" , |
204 | buffer->index, buffer->index + buffer->entries - 1); |
205 | |
206 | EF4_POPULATE_OWORD_4(buf_tbl_upd, |
207 | FRF_AZ_BUF_UPD_CMD, 0, |
208 | FRF_AZ_BUF_CLR_CMD, 1, |
209 | FRF_AZ_BUF_CLR_END_ID, end, |
210 | FRF_AZ_BUF_CLR_START_ID, start); |
211 | ef4_writeo(efx, value: &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); |
212 | } |
213 | |
214 | /* |
215 | * Allocate a new special buffer |
216 | * |
217 | * This allocates memory for a new buffer, clears it and allocates a |
218 | * new buffer ID range. It does not write into the buffer table. |
219 | * |
220 | * This call will allocate 4KB buffers, since 8KB buffers can't be |
221 | * used for event queues and descriptor rings. |
222 | */ |
223 | static int ef4_alloc_special_buffer(struct ef4_nic *efx, |
224 | struct ef4_special_buffer *buffer, |
225 | unsigned int len) |
226 | { |
227 | len = ALIGN(len, EF4_BUF_SIZE); |
228 | |
229 | if (ef4_nic_alloc_buffer(efx, buffer: &buffer->buf, len, GFP_KERNEL)) |
230 | return -ENOMEM; |
231 | buffer->entries = len / EF4_BUF_SIZE; |
232 | BUG_ON(buffer->buf.dma_addr & (EF4_BUF_SIZE - 1)); |
233 | |
234 | /* Select new buffer ID */ |
235 | buffer->index = efx->next_buffer_table; |
236 | efx->next_buffer_table += buffer->entries; |
237 | |
238 | netif_dbg(efx, probe, efx->net_dev, |
239 | "allocating special buffers %d-%d at %llx+%x " |
240 | "(virt %p phys %llx)\n" , buffer->index, |
241 | buffer->index + buffer->entries - 1, |
242 | (u64)buffer->buf.dma_addr, len, |
243 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); |
244 | |
245 | return 0; |
246 | } |
247 | |
248 | static void |
249 | ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer) |
250 | { |
251 | if (!buffer->buf.addr) |
252 | return; |
253 | |
254 | netif_dbg(efx, hw, efx->net_dev, |
255 | "deallocating special buffers %d-%d at %llx+%x " |
256 | "(virt %p phys %llx)\n" , buffer->index, |
257 | buffer->index + buffer->entries - 1, |
258 | (u64)buffer->buf.dma_addr, buffer->buf.len, |
259 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); |
260 | |
261 | ef4_nic_free_buffer(efx, buffer: &buffer->buf); |
262 | buffer->entries = 0; |
263 | } |
264 | |
265 | /************************************************************************** |
266 | * |
267 | * TX path |
268 | * |
269 | **************************************************************************/ |
270 | |
271 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ |
272 | static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue) |
273 | { |
274 | unsigned write_ptr; |
275 | ef4_dword_t reg; |
276 | |
277 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
278 | EF4_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); |
279 | ef4_writed_page(tx_queue->efx, ®, |
280 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); |
281 | } |
282 | |
283 | /* Write pointer and first descriptor for TX descriptor ring */ |
284 | static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue, |
285 | const ef4_qword_t *txd) |
286 | { |
287 | unsigned write_ptr; |
288 | ef4_oword_t reg; |
289 | |
290 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); |
291 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); |
292 | |
293 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
294 | EF4_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, |
295 | FRF_AZ_TX_DESC_WPTR, write_ptr); |
296 | reg.qword[0] = *txd; |
297 | ef4_writeo_page(tx_queue->efx, ®, |
298 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); |
299 | } |
300 | |
301 | |
302 | /* For each entry inserted into the software descriptor ring, create a |
303 | * descriptor in the hardware TX descriptor ring (in host memory), and |
304 | * write a doorbell. |
305 | */ |
306 | void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue) |
307 | { |
308 | struct ef4_tx_buffer *buffer; |
309 | ef4_qword_t *txd; |
310 | unsigned write_ptr; |
311 | unsigned old_write_count = tx_queue->write_count; |
312 | |
313 | tx_queue->xmit_more_available = false; |
314 | if (unlikely(tx_queue->write_count == tx_queue->insert_count)) |
315 | return; |
316 | |
317 | do { |
318 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
319 | buffer = &tx_queue->buffer[write_ptr]; |
320 | txd = ef4_tx_desc(tx_queue, index: write_ptr); |
321 | ++tx_queue->write_count; |
322 | |
323 | EF4_BUG_ON_PARANOID(buffer->flags & EF4_TX_BUF_OPTION); |
324 | |
325 | /* Create TX descriptor ring entry */ |
326 | BUILD_BUG_ON(EF4_TX_BUF_CONT != 1); |
327 | EF4_POPULATE_QWORD_4(*txd, |
328 | FSF_AZ_TX_KER_CONT, |
329 | buffer->flags & EF4_TX_BUF_CONT, |
330 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, |
331 | FSF_AZ_TX_KER_BUF_REGION, 0, |
332 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); |
333 | } while (tx_queue->write_count != tx_queue->insert_count); |
334 | |
335 | wmb(); /* Ensure descriptors are written before they are fetched */ |
336 | |
337 | if (ef4_nic_may_push_tx_desc(tx_queue, write_count: old_write_count)) { |
338 | txd = ef4_tx_desc(tx_queue, |
339 | index: old_write_count & tx_queue->ptr_mask); |
340 | ef4_farch_push_tx_desc(tx_queue, txd); |
341 | ++tx_queue->pushes; |
342 | } else { |
343 | ef4_farch_notify_tx_desc(tx_queue); |
344 | } |
345 | } |
346 | |
347 | unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue, |
348 | dma_addr_t dma_addr, unsigned int len) |
349 | { |
350 | /* Don't cross 4K boundaries with descriptors. */ |
351 | unsigned int limit = (~dma_addr & (EF4_PAGE_SIZE - 1)) + 1; |
352 | |
353 | len = min(limit, len); |
354 | |
355 | if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf)) |
356 | len = min_t(unsigned int, len, 512 - (dma_addr & 0xf)); |
357 | |
358 | return len; |
359 | } |
360 | |
361 | |
362 | /* Allocate hardware resources for a TX queue */ |
363 | int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue) |
364 | { |
365 | struct ef4_nic *efx = tx_queue->efx; |
366 | unsigned entries; |
367 | |
368 | entries = tx_queue->ptr_mask + 1; |
369 | return ef4_alloc_special_buffer(efx, buffer: &tx_queue->txd, |
370 | len: entries * sizeof(ef4_qword_t)); |
371 | } |
372 | |
373 | void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue) |
374 | { |
375 | struct ef4_nic *efx = tx_queue->efx; |
376 | ef4_oword_t reg; |
377 | |
378 | /* Pin TX descriptor ring */ |
379 | ef4_init_special_buffer(efx, buffer: &tx_queue->txd); |
380 | |
381 | /* Push TX descriptor ring to card */ |
382 | EF4_POPULATE_OWORD_10(reg, |
383 | FRF_AZ_TX_DESCQ_EN, 1, |
384 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, |
385 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, |
386 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, |
387 | FRF_AZ_TX_DESCQ_EVQ_ID, |
388 | tx_queue->channel->channel, |
389 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, |
390 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, |
391 | FRF_AZ_TX_DESCQ_SIZE, |
392 | __ffs(tx_queue->txd.entries), |
393 | FRF_AZ_TX_DESCQ_TYPE, 0, |
394 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); |
395 | |
396 | if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { |
397 | int csum = tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD; |
398 | EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); |
399 | EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, |
400 | !csum); |
401 | } |
402 | |
403 | ef4_writeo_table(efx, value: ®, reg: efx->type->txd_ptr_tbl_base, |
404 | index: tx_queue->queue); |
405 | |
406 | if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) { |
407 | /* Only 128 bits in this register */ |
408 | BUILD_BUG_ON(EF4_MAX_TX_QUEUES > 128); |
409 | |
410 | ef4_reado(efx, value: ®, FR_AA_TX_CHKSM_CFG); |
411 | if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD) |
412 | __clear_bit_le(nr: tx_queue->queue, addr: ®); |
413 | else |
414 | __set_bit_le(nr: tx_queue->queue, addr: ®); |
415 | ef4_writeo(efx, value: ®, FR_AA_TX_CHKSM_CFG); |
416 | } |
417 | |
418 | if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { |
419 | EF4_POPULATE_OWORD_1(reg, |
420 | FRF_BZ_TX_PACE, |
421 | (tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ? |
422 | FFE_BZ_TX_PACE_OFF : |
423 | FFE_BZ_TX_PACE_RESERVED); |
424 | ef4_writeo_table(efx, value: ®, FR_BZ_TX_PACE_TBL, |
425 | index: tx_queue->queue); |
426 | } |
427 | } |
428 | |
429 | static void ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue) |
430 | { |
431 | struct ef4_nic *efx = tx_queue->efx; |
432 | ef4_oword_t tx_flush_descq; |
433 | |
434 | WARN_ON(atomic_read(&tx_queue->flush_outstanding)); |
435 | atomic_set(v: &tx_queue->flush_outstanding, i: 1); |
436 | |
437 | EF4_POPULATE_OWORD_2(tx_flush_descq, |
438 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, |
439 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); |
440 | ef4_writeo(efx, value: &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); |
441 | } |
442 | |
443 | void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue) |
444 | { |
445 | struct ef4_nic *efx = tx_queue->efx; |
446 | ef4_oword_t tx_desc_ptr; |
447 | |
448 | /* Remove TX descriptor ring from card */ |
449 | EF4_ZERO_OWORD(tx_desc_ptr); |
450 | ef4_writeo_table(efx, value: &tx_desc_ptr, reg: efx->type->txd_ptr_tbl_base, |
451 | index: tx_queue->queue); |
452 | |
453 | /* Unpin TX descriptor ring */ |
454 | ef4_fini_special_buffer(efx, buffer: &tx_queue->txd); |
455 | } |
456 | |
457 | /* Free buffers backing TX queue */ |
458 | void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue) |
459 | { |
460 | ef4_free_special_buffer(efx: tx_queue->efx, buffer: &tx_queue->txd); |
461 | } |
462 | |
463 | /************************************************************************** |
464 | * |
465 | * RX path |
466 | * |
467 | **************************************************************************/ |
468 | |
469 | /* This creates an entry in the RX descriptor queue */ |
470 | static inline void |
471 | ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index) |
472 | { |
473 | struct ef4_rx_buffer *rx_buf; |
474 | ef4_qword_t *rxd; |
475 | |
476 | rxd = ef4_rx_desc(rx_queue, index); |
477 | rx_buf = ef4_rx_buffer(rx_queue, index); |
478 | EF4_POPULATE_QWORD_3(*rxd, |
479 | FSF_AZ_RX_KER_BUF_SIZE, |
480 | rx_buf->len - |
481 | rx_queue->efx->type->rx_buffer_padding, |
482 | FSF_AZ_RX_KER_BUF_REGION, 0, |
483 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); |
484 | } |
485 | |
486 | /* This writes to the RX_DESC_WPTR register for the specified receive |
487 | * descriptor ring. |
488 | */ |
489 | void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue) |
490 | { |
491 | struct ef4_nic *efx = rx_queue->efx; |
492 | ef4_dword_t reg; |
493 | unsigned write_ptr; |
494 | |
495 | while (rx_queue->notified_count != rx_queue->added_count) { |
496 | ef4_farch_build_rx_desc( |
497 | rx_queue, |
498 | index: rx_queue->notified_count & rx_queue->ptr_mask); |
499 | ++rx_queue->notified_count; |
500 | } |
501 | |
502 | wmb(); |
503 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; |
504 | EF4_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); |
505 | ef4_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, |
506 | ef4_rx_queue_index(rx_queue)); |
507 | } |
508 | |
509 | int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue) |
510 | { |
511 | struct ef4_nic *efx = rx_queue->efx; |
512 | unsigned entries; |
513 | |
514 | entries = rx_queue->ptr_mask + 1; |
515 | return ef4_alloc_special_buffer(efx, buffer: &rx_queue->rxd, |
516 | len: entries * sizeof(ef4_qword_t)); |
517 | } |
518 | |
519 | void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue) |
520 | { |
521 | ef4_oword_t rx_desc_ptr; |
522 | struct ef4_nic *efx = rx_queue->efx; |
523 | bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0; |
524 | bool iscsi_digest_en = is_b0; |
525 | bool jumbo_en; |
526 | |
527 | /* For kernel-mode queues in Falcon A1, the JUMBO flag enables |
528 | * DMA to continue after a PCIe page boundary (and scattering |
529 | * is not possible). In Falcon B0 and Siena, it enables |
530 | * scatter. |
531 | */ |
532 | jumbo_en = !is_b0 || efx->rx_scatter; |
533 | |
534 | netif_dbg(efx, hw, efx->net_dev, |
535 | "RX queue %d ring in special buffers %d-%d\n" , |
536 | ef4_rx_queue_index(rx_queue), rx_queue->rxd.index, |
537 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); |
538 | |
539 | rx_queue->scatter_n = 0; |
540 | |
541 | /* Pin RX descriptor ring */ |
542 | ef4_init_special_buffer(efx, buffer: &rx_queue->rxd); |
543 | |
544 | /* Push RX descriptor ring to card */ |
545 | EF4_POPULATE_OWORD_10(rx_desc_ptr, |
546 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, |
547 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, |
548 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, |
549 | FRF_AZ_RX_DESCQ_EVQ_ID, |
550 | ef4_rx_queue_channel(rx_queue)->channel, |
551 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, |
552 | FRF_AZ_RX_DESCQ_LABEL, |
553 | ef4_rx_queue_index(rx_queue), |
554 | FRF_AZ_RX_DESCQ_SIZE, |
555 | __ffs(rx_queue->rxd.entries), |
556 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , |
557 | FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, |
558 | FRF_AZ_RX_DESCQ_EN, 1); |
559 | ef4_writeo_table(efx, value: &rx_desc_ptr, reg: efx->type->rxd_ptr_tbl_base, |
560 | index: ef4_rx_queue_index(rx_queue)); |
561 | } |
562 | |
563 | static void ef4_farch_flush_rx_queue(struct ef4_rx_queue *rx_queue) |
564 | { |
565 | struct ef4_nic *efx = rx_queue->efx; |
566 | ef4_oword_t rx_flush_descq; |
567 | |
568 | EF4_POPULATE_OWORD_2(rx_flush_descq, |
569 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, |
570 | FRF_AZ_RX_FLUSH_DESCQ, |
571 | ef4_rx_queue_index(rx_queue)); |
572 | ef4_writeo(efx, value: &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); |
573 | } |
574 | |
575 | void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue) |
576 | { |
577 | ef4_oword_t rx_desc_ptr; |
578 | struct ef4_nic *efx = rx_queue->efx; |
579 | |
580 | /* Remove RX descriptor ring from card */ |
581 | EF4_ZERO_OWORD(rx_desc_ptr); |
582 | ef4_writeo_table(efx, value: &rx_desc_ptr, reg: efx->type->rxd_ptr_tbl_base, |
583 | index: ef4_rx_queue_index(rx_queue)); |
584 | |
585 | /* Unpin RX descriptor ring */ |
586 | ef4_fini_special_buffer(efx, buffer: &rx_queue->rxd); |
587 | } |
588 | |
589 | /* Free buffers backing RX queue */ |
590 | void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue) |
591 | { |
592 | ef4_free_special_buffer(efx: rx_queue->efx, buffer: &rx_queue->rxd); |
593 | } |
594 | |
595 | /************************************************************************** |
596 | * |
597 | * Flush handling |
598 | * |
599 | **************************************************************************/ |
600 | |
601 | /* ef4_farch_flush_queues() must be woken up when all flushes are completed, |
602 | * or more RX flushes can be kicked off. |
603 | */ |
604 | static bool ef4_farch_flush_wake(struct ef4_nic *efx) |
605 | { |
606 | /* Ensure that all updates are visible to ef4_farch_flush_queues() */ |
607 | smp_mb(); |
608 | |
609 | return (atomic_read(v: &efx->active_queues) == 0 || |
610 | (atomic_read(v: &efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT |
611 | && atomic_read(v: &efx->rxq_flush_pending) > 0)); |
612 | } |
613 | |
614 | static bool ef4_check_tx_flush_complete(struct ef4_nic *efx) |
615 | { |
616 | bool i = true; |
617 | ef4_oword_t txd_ptr_tbl; |
618 | struct ef4_channel *channel; |
619 | struct ef4_tx_queue *tx_queue; |
620 | |
621 | ef4_for_each_channel(channel, efx) { |
622 | ef4_for_each_channel_tx_queue(tx_queue, channel) { |
623 | ef4_reado_table(efx, value: &txd_ptr_tbl, |
624 | FR_BZ_TX_DESC_PTR_TBL, index: tx_queue->queue); |
625 | if (EF4_OWORD_FIELD(txd_ptr_tbl, |
626 | FRF_AZ_TX_DESCQ_FLUSH) || |
627 | EF4_OWORD_FIELD(txd_ptr_tbl, |
628 | FRF_AZ_TX_DESCQ_EN)) { |
629 | netif_dbg(efx, hw, efx->net_dev, |
630 | "flush did not complete on TXQ %d\n" , |
631 | tx_queue->queue); |
632 | i = false; |
633 | } else if (atomic_cmpxchg(v: &tx_queue->flush_outstanding, |
634 | old: 1, new: 0)) { |
635 | /* The flush is complete, but we didn't |
636 | * receive a flush completion event |
637 | */ |
638 | netif_dbg(efx, hw, efx->net_dev, |
639 | "flush complete on TXQ %d, so drain " |
640 | "the queue\n" , tx_queue->queue); |
641 | /* Don't need to increment active_queues as it |
642 | * has already been incremented for the queues |
643 | * which did not drain |
644 | */ |
645 | ef4_farch_magic_event(channel, |
646 | EF4_CHANNEL_MAGIC_TX_DRAIN( |
647 | tx_queue)); |
648 | } |
649 | } |
650 | } |
651 | |
652 | return i; |
653 | } |
654 | |
655 | /* Flush all the transmit queues, and continue flushing receive queues until |
656 | * they're all flushed. Wait for the DRAIN events to be received so that there |
657 | * are no more RX and TX events left on any channel. */ |
658 | static int ef4_farch_do_flush(struct ef4_nic *efx) |
659 | { |
660 | unsigned timeout = msecs_to_jiffies(m: 5000); /* 5s for all flushes and drains */ |
661 | struct ef4_channel *channel; |
662 | struct ef4_rx_queue *rx_queue; |
663 | struct ef4_tx_queue *tx_queue; |
664 | int rc = 0; |
665 | |
666 | ef4_for_each_channel(channel, efx) { |
667 | ef4_for_each_channel_tx_queue(tx_queue, channel) { |
668 | ef4_farch_flush_tx_queue(tx_queue); |
669 | } |
670 | ef4_for_each_channel_rx_queue(rx_queue, channel) { |
671 | rx_queue->flush_pending = true; |
672 | atomic_inc(v: &efx->rxq_flush_pending); |
673 | } |
674 | } |
675 | |
676 | while (timeout && atomic_read(v: &efx->active_queues) > 0) { |
677 | /* The hardware supports four concurrent rx flushes, each of |
678 | * which may need to be retried if there is an outstanding |
679 | * descriptor fetch |
680 | */ |
681 | ef4_for_each_channel(channel, efx) { |
682 | ef4_for_each_channel_rx_queue(rx_queue, channel) { |
683 | if (atomic_read(v: &efx->rxq_flush_outstanding) >= |
684 | EF4_RX_FLUSH_COUNT) |
685 | break; |
686 | |
687 | if (rx_queue->flush_pending) { |
688 | rx_queue->flush_pending = false; |
689 | atomic_dec(v: &efx->rxq_flush_pending); |
690 | atomic_inc(v: &efx->rxq_flush_outstanding); |
691 | ef4_farch_flush_rx_queue(rx_queue); |
692 | } |
693 | } |
694 | } |
695 | |
696 | timeout = wait_event_timeout(efx->flush_wq, |
697 | ef4_farch_flush_wake(efx), |
698 | timeout); |
699 | } |
700 | |
701 | if (atomic_read(v: &efx->active_queues) && |
702 | !ef4_check_tx_flush_complete(efx)) { |
703 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " |
704 | "(rx %d+%d)\n" , atomic_read(&efx->active_queues), |
705 | atomic_read(&efx->rxq_flush_outstanding), |
706 | atomic_read(&efx->rxq_flush_pending)); |
707 | rc = -ETIMEDOUT; |
708 | |
709 | atomic_set(v: &efx->active_queues, i: 0); |
710 | atomic_set(v: &efx->rxq_flush_pending, i: 0); |
711 | atomic_set(v: &efx->rxq_flush_outstanding, i: 0); |
712 | } |
713 | |
714 | return rc; |
715 | } |
716 | |
717 | int ef4_farch_fini_dmaq(struct ef4_nic *efx) |
718 | { |
719 | struct ef4_channel *channel; |
720 | struct ef4_tx_queue *tx_queue; |
721 | struct ef4_rx_queue *rx_queue; |
722 | int rc = 0; |
723 | |
724 | /* Do not attempt to write to the NIC during EEH recovery */ |
725 | if (efx->state != STATE_RECOVERY) { |
726 | /* Only perform flush if DMA is enabled */ |
727 | if (efx->pci_dev->is_busmaster) { |
728 | efx->type->prepare_flush(efx); |
729 | rc = ef4_farch_do_flush(efx); |
730 | efx->type->finish_flush(efx); |
731 | } |
732 | |
733 | ef4_for_each_channel(channel, efx) { |
734 | ef4_for_each_channel_rx_queue(rx_queue, channel) |
735 | ef4_farch_rx_fini(rx_queue); |
736 | ef4_for_each_channel_tx_queue(tx_queue, channel) |
737 | ef4_farch_tx_fini(tx_queue); |
738 | } |
739 | } |
740 | |
741 | return rc; |
742 | } |
743 | |
744 | /* Reset queue and flush accounting after FLR |
745 | * |
746 | * One possible cause of FLR recovery is that DMA may be failing (eg. if bus |
747 | * mastering was disabled), in which case we don't receive (RXQ) flush |
748 | * completion events. This means that efx->rxq_flush_outstanding remained at 4 |
749 | * after the FLR; also, efx->active_queues was non-zero (as no flush completion |
750 | * events were received, and we didn't go through ef4_check_tx_flush_complete()) |
751 | * If we don't fix this up, on the next call to ef4_realloc_channels() we won't |
752 | * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 |
753 | * for batched flush requests; and the efx->active_queues gets messed up because |
754 | * we keep incrementing for the newly initialised queues, but it never went to |
755 | * zero previously. Then we get a timeout every time we try to restart the |
756 | * queues, as it doesn't go back to zero when we should be flushing the queues. |
757 | */ |
758 | void ef4_farch_finish_flr(struct ef4_nic *efx) |
759 | { |
760 | atomic_set(v: &efx->rxq_flush_pending, i: 0); |
761 | atomic_set(v: &efx->rxq_flush_outstanding, i: 0); |
762 | atomic_set(v: &efx->active_queues, i: 0); |
763 | } |
764 | |
765 | |
766 | /************************************************************************** |
767 | * |
768 | * Event queue processing |
769 | * Event queues are processed by per-channel tasklets. |
770 | * |
771 | **************************************************************************/ |
772 | |
773 | /* Update a channel's event queue's read pointer (RPTR) register |
774 | * |
775 | * This writes the EVQ_RPTR_REG register for the specified channel's |
776 | * event queue. |
777 | */ |
778 | void ef4_farch_ev_read_ack(struct ef4_channel *channel) |
779 | { |
780 | ef4_dword_t reg; |
781 | struct ef4_nic *efx = channel->efx; |
782 | |
783 | EF4_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, |
784 | channel->eventq_read_ptr & channel->eventq_mask); |
785 | |
786 | /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size |
787 | * of 4 bytes, but it is really 16 bytes just like later revisions. |
788 | */ |
789 | ef4_writed(efx, value: ®, |
790 | reg: efx->type->evq_rptr_tbl_base + |
791 | FR_BZ_EVQ_RPTR_STEP * channel->channel); |
792 | } |
793 | |
794 | /* Use HW to insert a SW defined event */ |
795 | void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq, |
796 | ef4_qword_t *event) |
797 | { |
798 | ef4_oword_t drv_ev_reg; |
799 | |
800 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || |
801 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); |
802 | drv_ev_reg.u32[0] = event->u32[0]; |
803 | drv_ev_reg.u32[1] = event->u32[1]; |
804 | drv_ev_reg.u32[2] = 0; |
805 | drv_ev_reg.u32[3] = 0; |
806 | EF4_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); |
807 | ef4_writeo(efx, value: &drv_ev_reg, FR_AZ_DRV_EV); |
808 | } |
809 | |
810 | static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic) |
811 | { |
812 | ef4_qword_t event; |
813 | |
814 | EF4_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, |
815 | FSE_AZ_EV_CODE_DRV_GEN_EV, |
816 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); |
817 | ef4_farch_generate_event(efx: channel->efx, evq: channel->channel, event: &event); |
818 | } |
819 | |
820 | /* Handle a transmit completion event |
821 | * |
822 | * The NIC batches TX completion events; the message we receive is of |
823 | * the form "complete all TX events up to this index". |
824 | */ |
825 | static int |
826 | ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event) |
827 | { |
828 | unsigned int tx_ev_desc_ptr; |
829 | unsigned int tx_ev_q_label; |
830 | struct ef4_tx_queue *tx_queue; |
831 | struct ef4_nic *efx = channel->efx; |
832 | int tx_packets = 0; |
833 | |
834 | if (unlikely(READ_ONCE(efx->reset_pending))) |
835 | return 0; |
836 | |
837 | if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
838 | /* Transmit completion */ |
839 | tx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); |
840 | tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
841 | tx_queue = ef4_channel_get_tx_queue( |
842 | channel, type: tx_ev_q_label % EF4_TXQ_TYPES); |
843 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & |
844 | tx_queue->ptr_mask); |
845 | ef4_xmit_done(tx_queue, index: tx_ev_desc_ptr); |
846 | } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { |
847 | /* Rewrite the FIFO write pointer */ |
848 | tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
849 | tx_queue = ef4_channel_get_tx_queue( |
850 | channel, type: tx_ev_q_label % EF4_TXQ_TYPES); |
851 | |
852 | netif_tx_lock(dev: efx->net_dev); |
853 | ef4_farch_notify_tx_desc(tx_queue); |
854 | netif_tx_unlock(dev: efx->net_dev); |
855 | } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { |
856 | ef4_schedule_reset(efx, type: RESET_TYPE_DMA_ERROR); |
857 | } else { |
858 | netif_err(efx, tx_err, efx->net_dev, |
859 | "channel %d unexpected TX event " |
860 | EF4_QWORD_FMT"\n" , channel->channel, |
861 | EF4_QWORD_VAL(*event)); |
862 | } |
863 | |
864 | return tx_packets; |
865 | } |
866 | |
867 | /* Detect errors included in the rx_evt_pkt_ok bit. */ |
868 | static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue, |
869 | const ef4_qword_t *event) |
870 | { |
871 | struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue); |
872 | struct ef4_nic *efx = rx_queue->efx; |
873 | bool __maybe_unused rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; |
874 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; |
875 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; |
876 | bool rx_ev_pause_frm; |
877 | |
878 | rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); |
879 | rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event, |
880 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); |
881 | rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event, |
882 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); |
883 | rx_ev_tcp_udp_chksum_err = EF4_QWORD_FIELD(*event, |
884 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); |
885 | rx_ev_eth_crc_err = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); |
886 | rx_ev_frm_trunc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); |
887 | rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ? |
888 | 0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); |
889 | rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); |
890 | |
891 | |
892 | /* Count errors that are not in MAC stats. Ignore expected |
893 | * checksum errors during self-test. */ |
894 | if (rx_ev_frm_trunc) |
895 | ++channel->n_rx_frm_trunc; |
896 | else if (rx_ev_tobe_disc) |
897 | ++channel->n_rx_tobe_disc; |
898 | else if (!efx->loopback_selftest) { |
899 | if (rx_ev_ip_hdr_chksum_err) |
900 | ++channel->n_rx_ip_hdr_chksum_err; |
901 | else if (rx_ev_tcp_udp_chksum_err) |
902 | ++channel->n_rx_tcp_udp_chksum_err; |
903 | } |
904 | |
905 | /* TOBE_DISC is expected on unicast mismatches; don't print out an |
906 | * error message. FRM_TRUNC indicates RXDP dropped the packet due |
907 | * to a FIFO overflow. |
908 | */ |
909 | #ifdef DEBUG |
910 | { |
911 | /* Every error apart from tobe_disc and pause_frm */ |
912 | |
913 | bool rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | |
914 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | |
915 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); |
916 | |
917 | if (rx_ev_other_err && net_ratelimit()) { |
918 | netif_dbg(efx, rx_err, efx->net_dev, |
919 | " RX queue %d unexpected RX event " |
920 | EF4_QWORD_FMT "%s%s%s%s%s%s%s%s\n" , |
921 | ef4_rx_queue_index(rx_queue), EF4_QWORD_VAL(*event), |
922 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "" , |
923 | rx_ev_ip_hdr_chksum_err ? |
924 | " [IP_HDR_CHKSUM_ERR]" : "" , |
925 | rx_ev_tcp_udp_chksum_err ? |
926 | " [TCP_UDP_CHKSUM_ERR]" : "" , |
927 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "" , |
928 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "" , |
929 | rx_ev_drib_nib ? " [DRIB_NIB]" : "" , |
930 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "" , |
931 | rx_ev_pause_frm ? " [PAUSE]" : "" ); |
932 | } |
933 | } |
934 | #endif |
935 | |
936 | /* The frame must be discarded if any of these are true. */ |
937 | return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | |
938 | rx_ev_tobe_disc | rx_ev_pause_frm) ? |
939 | EF4_RX_PKT_DISCARD : 0; |
940 | } |
941 | |
942 | /* Handle receive events that are not in-order. Return true if this |
943 | * can be handled as a partial packet discard, false if it's more |
944 | * serious. |
945 | */ |
946 | static bool |
947 | ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index) |
948 | { |
949 | struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue); |
950 | struct ef4_nic *efx = rx_queue->efx; |
951 | unsigned expected, dropped; |
952 | |
953 | if (rx_queue->scatter_n && |
954 | index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & |
955 | rx_queue->ptr_mask)) { |
956 | ++channel->n_rx_nodesc_trunc; |
957 | return true; |
958 | } |
959 | |
960 | expected = rx_queue->removed_count & rx_queue->ptr_mask; |
961 | dropped = (index - expected) & rx_queue->ptr_mask; |
962 | netif_info(efx, rx_err, efx->net_dev, |
963 | "dropped %d events (index=%d expected=%d)\n" , |
964 | dropped, index, expected); |
965 | |
966 | ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ? |
967 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); |
968 | return false; |
969 | } |
970 | |
971 | /* Handle a packet received event |
972 | * |
973 | * The NIC gives a "discard" flag if it's a unicast packet with the |
974 | * wrong destination address |
975 | * Also "is multicast" and "matches multicast filter" flags can be used to |
976 | * discard non-matching multicast packets. |
977 | */ |
978 | static void |
979 | ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) |
980 | { |
981 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; |
982 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; |
983 | unsigned expected_ptr; |
984 | bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; |
985 | u16 flags; |
986 | struct ef4_rx_queue *rx_queue; |
987 | struct ef4_nic *efx = channel->efx; |
988 | |
989 | if (unlikely(READ_ONCE(efx->reset_pending))) |
990 | return; |
991 | |
992 | rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); |
993 | rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); |
994 | WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != |
995 | channel->channel); |
996 | |
997 | rx_queue = ef4_channel_get_rx_queue(channel); |
998 | |
999 | rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); |
1000 | expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & |
1001 | rx_queue->ptr_mask); |
1002 | |
1003 | /* Check for partial drops and other errors */ |
1004 | if (unlikely(rx_ev_desc_ptr != expected_ptr) || |
1005 | unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { |
1006 | if (rx_ev_desc_ptr != expected_ptr && |
1007 | !ef4_farch_handle_rx_bad_index(rx_queue, index: rx_ev_desc_ptr)) |
1008 | return; |
1009 | |
1010 | /* Discard all pending fragments */ |
1011 | if (rx_queue->scatter_n) { |
1012 | ef4_rx_packet( |
1013 | rx_queue, |
1014 | index: rx_queue->removed_count & rx_queue->ptr_mask, |
1015 | n_frags: rx_queue->scatter_n, len: 0, EF4_RX_PKT_DISCARD); |
1016 | rx_queue->removed_count += rx_queue->scatter_n; |
1017 | rx_queue->scatter_n = 0; |
1018 | } |
1019 | |
1020 | /* Return if there is no new fragment */ |
1021 | if (rx_ev_desc_ptr != expected_ptr) |
1022 | return; |
1023 | |
1024 | /* Discard new fragment if not SOP */ |
1025 | if (!rx_ev_sop) { |
1026 | ef4_rx_packet( |
1027 | rx_queue, |
1028 | index: rx_queue->removed_count & rx_queue->ptr_mask, |
1029 | n_frags: 1, len: 0, EF4_RX_PKT_DISCARD); |
1030 | ++rx_queue->removed_count; |
1031 | return; |
1032 | } |
1033 | } |
1034 | |
1035 | ++rx_queue->scatter_n; |
1036 | if (rx_ev_cont) |
1037 | return; |
1038 | |
1039 | rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); |
1040 | rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); |
1041 | rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); |
1042 | |
1043 | if (likely(rx_ev_pkt_ok)) { |
1044 | /* If packet is marked as OK then we can rely on the |
1045 | * hardware checksum and classification. |
1046 | */ |
1047 | flags = 0; |
1048 | switch (rx_ev_hdr_type) { |
1049 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: |
1050 | flags |= EF4_RX_PKT_TCP; |
1051 | fallthrough; |
1052 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: |
1053 | flags |= EF4_RX_PKT_CSUMMED; |
1054 | fallthrough; |
1055 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: |
1056 | case FSE_AZ_RX_EV_HDR_TYPE_OTHER: |
1057 | break; |
1058 | } |
1059 | } else { |
1060 | flags = ef4_farch_handle_rx_not_ok(rx_queue, event); |
1061 | } |
1062 | |
1063 | /* Detect multicast packets that didn't match the filter */ |
1064 | rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); |
1065 | if (rx_ev_mcast_pkt) { |
1066 | unsigned int rx_ev_mcast_hash_match = |
1067 | EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); |
1068 | |
1069 | if (unlikely(!rx_ev_mcast_hash_match)) { |
1070 | ++channel->n_rx_mcast_mismatch; |
1071 | flags |= EF4_RX_PKT_DISCARD; |
1072 | } |
1073 | } |
1074 | |
1075 | channel->irq_mod_score += 2; |
1076 | |
1077 | /* Handle received packet */ |
1078 | ef4_rx_packet(rx_queue, |
1079 | index: rx_queue->removed_count & rx_queue->ptr_mask, |
1080 | n_frags: rx_queue->scatter_n, len: rx_ev_byte_cnt, flags); |
1081 | rx_queue->removed_count += rx_queue->scatter_n; |
1082 | rx_queue->scatter_n = 0; |
1083 | } |
1084 | |
1085 | /* If this flush done event corresponds to a &struct ef4_tx_queue, then |
1086 | * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue |
1087 | * of all transmit completions. |
1088 | */ |
1089 | static void |
1090 | ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event) |
1091 | { |
1092 | struct ef4_tx_queue *tx_queue; |
1093 | int qid; |
1094 | |
1095 | qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); |
1096 | if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) { |
1097 | tx_queue = ef4_get_tx_queue(efx, index: qid / EF4_TXQ_TYPES, |
1098 | type: qid % EF4_TXQ_TYPES); |
1099 | if (atomic_cmpxchg(v: &tx_queue->flush_outstanding, old: 1, new: 0)) { |
1100 | ef4_farch_magic_event(channel: tx_queue->channel, |
1101 | EF4_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); |
1102 | } |
1103 | } |
1104 | } |
1105 | |
1106 | /* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush |
1107 | * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add |
1108 | * the RX queue back to the mask of RX queues in need of flushing. |
1109 | */ |
1110 | static void |
1111 | ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event) |
1112 | { |
1113 | struct ef4_channel *channel; |
1114 | struct ef4_rx_queue *rx_queue; |
1115 | int qid; |
1116 | bool failed; |
1117 | |
1118 | qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); |
1119 | failed = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); |
1120 | if (qid >= efx->n_channels) |
1121 | return; |
1122 | channel = ef4_get_channel(efx, index: qid); |
1123 | if (!ef4_channel_has_rx_queue(channel)) |
1124 | return; |
1125 | rx_queue = ef4_channel_get_rx_queue(channel); |
1126 | |
1127 | if (failed) { |
1128 | netif_info(efx, hw, efx->net_dev, |
1129 | "RXQ %d flush retry\n" , qid); |
1130 | rx_queue->flush_pending = true; |
1131 | atomic_inc(v: &efx->rxq_flush_pending); |
1132 | } else { |
1133 | ef4_farch_magic_event(channel: ef4_rx_queue_channel(rx_queue), |
1134 | EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); |
1135 | } |
1136 | atomic_dec(v: &efx->rxq_flush_outstanding); |
1137 | if (ef4_farch_flush_wake(efx)) |
1138 | wake_up(&efx->flush_wq); |
1139 | } |
1140 | |
1141 | static void |
1142 | ef4_farch_handle_drain_event(struct ef4_channel *channel) |
1143 | { |
1144 | struct ef4_nic *efx = channel->efx; |
1145 | |
1146 | WARN_ON(atomic_read(&efx->active_queues) == 0); |
1147 | atomic_dec(v: &efx->active_queues); |
1148 | if (ef4_farch_flush_wake(efx)) |
1149 | wake_up(&efx->flush_wq); |
1150 | } |
1151 | |
1152 | static void ef4_farch_handle_generated_event(struct ef4_channel *channel, |
1153 | ef4_qword_t *event) |
1154 | { |
1155 | struct ef4_nic *efx = channel->efx; |
1156 | struct ef4_rx_queue *rx_queue = |
1157 | ef4_channel_has_rx_queue(channel) ? |
1158 | ef4_channel_get_rx_queue(channel) : NULL; |
1159 | unsigned magic, code; |
1160 | |
1161 | magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); |
1162 | code = _EF4_CHANNEL_MAGIC_CODE(magic); |
1163 | |
1164 | if (magic == EF4_CHANNEL_MAGIC_TEST(channel)) { |
1165 | channel->event_test_cpu = raw_smp_processor_id(); |
1166 | } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_FILL(rx_queue)) { |
1167 | /* The queue must be empty, so we won't receive any rx |
1168 | * events, so ef4_process_channel() won't refill the |
1169 | * queue. Refill it here */ |
1170 | ef4_fast_push_rx_descriptors(rx_queue, atomic: true); |
1171 | } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { |
1172 | ef4_farch_handle_drain_event(channel); |
1173 | } else if (code == _EF4_CHANNEL_MAGIC_TX_DRAIN) { |
1174 | ef4_farch_handle_drain_event(channel); |
1175 | } else { |
1176 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " |
1177 | "generated event " EF4_QWORD_FMT"\n" , |
1178 | channel->channel, EF4_QWORD_VAL(*event)); |
1179 | } |
1180 | } |
1181 | |
1182 | static void |
1183 | ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event) |
1184 | { |
1185 | struct ef4_nic *efx = channel->efx; |
1186 | unsigned int ev_sub_code; |
1187 | unsigned int ev_sub_data; |
1188 | |
1189 | ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); |
1190 | ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); |
1191 | |
1192 | switch (ev_sub_code) { |
1193 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: |
1194 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n" , |
1195 | channel->channel, ev_sub_data); |
1196 | ef4_farch_handle_tx_flush_done(efx, event); |
1197 | break; |
1198 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: |
1199 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n" , |
1200 | channel->channel, ev_sub_data); |
1201 | ef4_farch_handle_rx_flush_done(efx, event); |
1202 | break; |
1203 | case FSE_AZ_EVQ_INIT_DONE_EV: |
1204 | netif_dbg(efx, hw, efx->net_dev, |
1205 | "channel %d EVQ %d initialised\n" , |
1206 | channel->channel, ev_sub_data); |
1207 | break; |
1208 | case FSE_AZ_SRM_UPD_DONE_EV: |
1209 | netif_vdbg(efx, hw, efx->net_dev, |
1210 | "channel %d SRAM update done\n" , channel->channel); |
1211 | break; |
1212 | case FSE_AZ_WAKE_UP_EV: |
1213 | netif_vdbg(efx, hw, efx->net_dev, |
1214 | "channel %d RXQ %d wakeup event\n" , |
1215 | channel->channel, ev_sub_data); |
1216 | break; |
1217 | case FSE_AZ_TIMER_EV: |
1218 | netif_vdbg(efx, hw, efx->net_dev, |
1219 | "channel %d RX queue %d timer expired\n" , |
1220 | channel->channel, ev_sub_data); |
1221 | break; |
1222 | case FSE_AA_RX_RECOVER_EV: |
1223 | netif_err(efx, rx_err, efx->net_dev, |
1224 | "channel %d seen DRIVER RX_RESET event. " |
1225 | "Resetting.\n" , channel->channel); |
1226 | atomic_inc(v: &efx->rx_reset); |
1227 | ef4_schedule_reset(efx, |
1228 | EF4_WORKAROUND_6555(efx) ? |
1229 | RESET_TYPE_RX_RECOVERY : |
1230 | RESET_TYPE_DISABLE); |
1231 | break; |
1232 | case FSE_BZ_RX_DSC_ERROR_EV: |
1233 | netif_err(efx, rx_err, efx->net_dev, |
1234 | "RX DMA Q %d reports descriptor fetch error." |
1235 | " RX Q %d is disabled.\n" , ev_sub_data, |
1236 | ev_sub_data); |
1237 | ef4_schedule_reset(efx, type: RESET_TYPE_DMA_ERROR); |
1238 | break; |
1239 | case FSE_BZ_TX_DSC_ERROR_EV: |
1240 | netif_err(efx, tx_err, efx->net_dev, |
1241 | "TX DMA Q %d reports descriptor fetch error." |
1242 | " TX Q %d is disabled.\n" , ev_sub_data, |
1243 | ev_sub_data); |
1244 | ef4_schedule_reset(efx, type: RESET_TYPE_DMA_ERROR); |
1245 | break; |
1246 | default: |
1247 | netif_vdbg(efx, hw, efx->net_dev, |
1248 | "channel %d unknown driver event code %d " |
1249 | "data %04x\n" , channel->channel, ev_sub_code, |
1250 | ev_sub_data); |
1251 | break; |
1252 | } |
1253 | } |
1254 | |
1255 | int ef4_farch_ev_process(struct ef4_channel *channel, int budget) |
1256 | { |
1257 | struct ef4_nic *efx = channel->efx; |
1258 | unsigned int read_ptr; |
1259 | ef4_qword_t event, *p_event; |
1260 | int ev_code; |
1261 | int tx_packets = 0; |
1262 | int spent = 0; |
1263 | |
1264 | if (budget <= 0) |
1265 | return spent; |
1266 | |
1267 | read_ptr = channel->eventq_read_ptr; |
1268 | |
1269 | for (;;) { |
1270 | p_event = ef4_event(channel, index: read_ptr); |
1271 | event = *p_event; |
1272 | |
1273 | if (!ef4_event_present(event: &event)) |
1274 | /* End of events */ |
1275 | break; |
1276 | |
1277 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, |
1278 | "channel %d event is " EF4_QWORD_FMT"\n" , |
1279 | channel->channel, EF4_QWORD_VAL(event)); |
1280 | |
1281 | /* Clear this event by marking it all ones */ |
1282 | EF4_SET_QWORD(*p_event); |
1283 | |
1284 | ++read_ptr; |
1285 | |
1286 | ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE); |
1287 | |
1288 | switch (ev_code) { |
1289 | case FSE_AZ_EV_CODE_RX_EV: |
1290 | ef4_farch_handle_rx_event(channel, event: &event); |
1291 | if (++spent == budget) |
1292 | goto out; |
1293 | break; |
1294 | case FSE_AZ_EV_CODE_TX_EV: |
1295 | tx_packets += ef4_farch_handle_tx_event(channel, |
1296 | event: &event); |
1297 | if (tx_packets > efx->txq_entries) { |
1298 | spent = budget; |
1299 | goto out; |
1300 | } |
1301 | break; |
1302 | case FSE_AZ_EV_CODE_DRV_GEN_EV: |
1303 | ef4_farch_handle_generated_event(channel, event: &event); |
1304 | break; |
1305 | case FSE_AZ_EV_CODE_DRIVER_EV: |
1306 | ef4_farch_handle_driver_event(channel, event: &event); |
1307 | break; |
1308 | case FSE_AZ_EV_CODE_GLOBAL_EV: |
1309 | if (efx->type->handle_global_event && |
1310 | efx->type->handle_global_event(channel, &event)) |
1311 | break; |
1312 | fallthrough; |
1313 | default: |
1314 | netif_err(channel->efx, hw, channel->efx->net_dev, |
1315 | "channel %d unknown event type %d (data " |
1316 | EF4_QWORD_FMT ")\n" , channel->channel, |
1317 | ev_code, EF4_QWORD_VAL(event)); |
1318 | } |
1319 | } |
1320 | |
1321 | out: |
1322 | channel->eventq_read_ptr = read_ptr; |
1323 | return spent; |
1324 | } |
1325 | |
1326 | /* Allocate buffer table entries for event queue */ |
1327 | int ef4_farch_ev_probe(struct ef4_channel *channel) |
1328 | { |
1329 | struct ef4_nic *efx = channel->efx; |
1330 | unsigned entries; |
1331 | |
1332 | entries = channel->eventq_mask + 1; |
1333 | return ef4_alloc_special_buffer(efx, buffer: &channel->eventq, |
1334 | len: entries * sizeof(ef4_qword_t)); |
1335 | } |
1336 | |
1337 | int ef4_farch_ev_init(struct ef4_channel *channel) |
1338 | { |
1339 | ef4_oword_t reg; |
1340 | struct ef4_nic *efx = channel->efx; |
1341 | |
1342 | netif_dbg(efx, hw, efx->net_dev, |
1343 | "channel %d event queue in special buffers %d-%d\n" , |
1344 | channel->channel, channel->eventq.index, |
1345 | channel->eventq.index + channel->eventq.entries - 1); |
1346 | |
1347 | /* Pin event queue buffer */ |
1348 | ef4_init_special_buffer(efx, buffer: &channel->eventq); |
1349 | |
1350 | /* Fill event queue with all ones (i.e. empty events) */ |
1351 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); |
1352 | |
1353 | /* Push event queue to card */ |
1354 | EF4_POPULATE_OWORD_3(reg, |
1355 | FRF_AZ_EVQ_EN, 1, |
1356 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), |
1357 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); |
1358 | ef4_writeo_table(efx, value: ®, reg: efx->type->evq_ptr_tbl_base, |
1359 | index: channel->channel); |
1360 | |
1361 | return 0; |
1362 | } |
1363 | |
1364 | void ef4_farch_ev_fini(struct ef4_channel *channel) |
1365 | { |
1366 | ef4_oword_t reg; |
1367 | struct ef4_nic *efx = channel->efx; |
1368 | |
1369 | /* Remove event queue from card */ |
1370 | EF4_ZERO_OWORD(reg); |
1371 | ef4_writeo_table(efx, value: ®, reg: efx->type->evq_ptr_tbl_base, |
1372 | index: channel->channel); |
1373 | |
1374 | /* Unpin event queue */ |
1375 | ef4_fini_special_buffer(efx, buffer: &channel->eventq); |
1376 | } |
1377 | |
1378 | /* Free buffers backing event queue */ |
1379 | void ef4_farch_ev_remove(struct ef4_channel *channel) |
1380 | { |
1381 | ef4_free_special_buffer(efx: channel->efx, buffer: &channel->eventq); |
1382 | } |
1383 | |
1384 | |
1385 | void ef4_farch_ev_test_generate(struct ef4_channel *channel) |
1386 | { |
1387 | ef4_farch_magic_event(channel, EF4_CHANNEL_MAGIC_TEST(channel)); |
1388 | } |
1389 | |
1390 | void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue) |
1391 | { |
1392 | ef4_farch_magic_event(channel: ef4_rx_queue_channel(rx_queue), |
1393 | EF4_CHANNEL_MAGIC_FILL(rx_queue)); |
1394 | } |
1395 | |
1396 | /************************************************************************** |
1397 | * |
1398 | * Hardware interrupts |
1399 | * The hardware interrupt handler does very little work; all the event |
1400 | * queue processing is carried out by per-channel tasklets. |
1401 | * |
1402 | **************************************************************************/ |
1403 | |
1404 | /* Enable/disable/generate interrupts */ |
1405 | static inline void ef4_farch_interrupts(struct ef4_nic *efx, |
1406 | bool enabled, bool force) |
1407 | { |
1408 | ef4_oword_t int_en_reg_ker; |
1409 | |
1410 | EF4_POPULATE_OWORD_3(int_en_reg_ker, |
1411 | FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, |
1412 | FRF_AZ_KER_INT_KER, force, |
1413 | FRF_AZ_DRV_INT_EN_KER, enabled); |
1414 | ef4_writeo(efx, value: &int_en_reg_ker, FR_AZ_INT_EN_KER); |
1415 | } |
1416 | |
1417 | void ef4_farch_irq_enable_master(struct ef4_nic *efx) |
1418 | { |
1419 | EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr)); |
1420 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ |
1421 | |
1422 | ef4_farch_interrupts(efx, enabled: true, force: false); |
1423 | } |
1424 | |
1425 | void ef4_farch_irq_disable_master(struct ef4_nic *efx) |
1426 | { |
1427 | /* Disable interrupts */ |
1428 | ef4_farch_interrupts(efx, enabled: false, force: false); |
1429 | } |
1430 | |
1431 | /* Generate a test interrupt |
1432 | * Interrupt must already have been enabled, otherwise nasty things |
1433 | * may happen. |
1434 | */ |
1435 | int ef4_farch_irq_test_generate(struct ef4_nic *efx) |
1436 | { |
1437 | ef4_farch_interrupts(efx, enabled: true, force: true); |
1438 | return 0; |
1439 | } |
1440 | |
1441 | /* Process a fatal interrupt |
1442 | * Disable bus mastering ASAP and schedule a reset |
1443 | */ |
1444 | irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx) |
1445 | { |
1446 | struct falcon_nic_data *nic_data = efx->nic_data; |
1447 | ef4_oword_t *int_ker = efx->irq_status.addr; |
1448 | ef4_oword_t fatal_intr; |
1449 | int error, mem_perr; |
1450 | |
1451 | ef4_reado(efx, value: &fatal_intr, FR_AZ_FATAL_INTR_KER); |
1452 | error = EF4_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); |
1453 | |
1454 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR " EF4_OWORD_FMT" status " |
1455 | EF4_OWORD_FMT ": %s\n" , EF4_OWORD_VAL(*int_ker), |
1456 | EF4_OWORD_VAL(fatal_intr), |
1457 | error ? "disabling bus mastering" : "no recognised error" ); |
1458 | |
1459 | /* If this is a memory parity error dump which blocks are offending */ |
1460 | mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || |
1461 | EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); |
1462 | if (mem_perr) { |
1463 | ef4_oword_t reg; |
1464 | ef4_reado(efx, value: ®, FR_AZ_MEM_STAT); |
1465 | netif_err(efx, hw, efx->net_dev, |
1466 | "SYSTEM ERROR: memory parity error " EF4_OWORD_FMT"\n" , |
1467 | EF4_OWORD_VAL(reg)); |
1468 | } |
1469 | |
1470 | /* Disable both devices */ |
1471 | pci_clear_master(dev: efx->pci_dev); |
1472 | if (ef4_nic_is_dual_func(efx)) |
1473 | pci_clear_master(dev: nic_data->pci_dev2); |
1474 | ef4_farch_irq_disable_master(efx); |
1475 | |
1476 | /* Count errors and reset or disable the NIC accordingly */ |
1477 | if (efx->int_error_count == 0 || |
1478 | time_after(jiffies, efx->int_error_expire)) { |
1479 | efx->int_error_count = 0; |
1480 | efx->int_error_expire = |
1481 | jiffies + EF4_INT_ERROR_EXPIRE * HZ; |
1482 | } |
1483 | if (++efx->int_error_count < EF4_MAX_INT_ERRORS) { |
1484 | netif_err(efx, hw, efx->net_dev, |
1485 | "SYSTEM ERROR - reset scheduled\n" ); |
1486 | ef4_schedule_reset(efx, type: RESET_TYPE_INT_ERROR); |
1487 | } else { |
1488 | netif_err(efx, hw, efx->net_dev, |
1489 | "SYSTEM ERROR - max number of errors seen." |
1490 | "NIC will be disabled\n" ); |
1491 | ef4_schedule_reset(efx, type: RESET_TYPE_DISABLE); |
1492 | } |
1493 | |
1494 | return IRQ_HANDLED; |
1495 | } |
1496 | |
1497 | /* Handle a legacy interrupt |
1498 | * Acknowledges the interrupt and schedule event queue processing. |
1499 | */ |
1500 | irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) |
1501 | { |
1502 | struct ef4_nic *efx = dev_id; |
1503 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
1504 | ef4_oword_t *int_ker = efx->irq_status.addr; |
1505 | irqreturn_t result = IRQ_NONE; |
1506 | struct ef4_channel *channel; |
1507 | ef4_dword_t reg; |
1508 | u32 queues; |
1509 | int syserr; |
1510 | |
1511 | /* Read the ISR which also ACKs the interrupts */ |
1512 | ef4_readd(efx, value: ®, FR_BZ_INT_ISR0); |
1513 | queues = EF4_EXTRACT_DWORD(reg, 0, 31); |
1514 | |
1515 | /* Legacy interrupts are disabled too late by the EEH kernel |
1516 | * code. Disable them earlier. |
1517 | * If an EEH error occurred, the read will have returned all ones. |
1518 | */ |
1519 | if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) && |
1520 | !efx->eeh_disabled_legacy_irq) { |
1521 | disable_irq_nosync(irq: efx->legacy_irq); |
1522 | efx->eeh_disabled_legacy_irq = true; |
1523 | } |
1524 | |
1525 | /* Handle non-event-queue sources */ |
1526 | if (queues & (1U << efx->irq_level) && soft_enabled) { |
1527 | syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1528 | if (unlikely(syserr)) |
1529 | return ef4_farch_fatal_interrupt(efx); |
1530 | efx->last_irq_cpu = raw_smp_processor_id(); |
1531 | } |
1532 | |
1533 | if (queues != 0) { |
1534 | efx->irq_zero_count = 0; |
1535 | |
1536 | /* Schedule processing of any interrupting queues */ |
1537 | if (likely(soft_enabled)) { |
1538 | ef4_for_each_channel(channel, efx) { |
1539 | if (queues & 1) |
1540 | ef4_schedule_channel_irq(channel); |
1541 | queues >>= 1; |
1542 | } |
1543 | } |
1544 | result = IRQ_HANDLED; |
1545 | |
1546 | } else { |
1547 | ef4_qword_t *event; |
1548 | |
1549 | /* Legacy ISR read can return zero once (SF bug 15783) */ |
1550 | |
1551 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 |
1552 | * because this might be a shared interrupt. */ |
1553 | if (efx->irq_zero_count++ == 0) |
1554 | result = IRQ_HANDLED; |
1555 | |
1556 | /* Ensure we schedule or rearm all event queues */ |
1557 | if (likely(soft_enabled)) { |
1558 | ef4_for_each_channel(channel, efx) { |
1559 | event = ef4_event(channel, |
1560 | index: channel->eventq_read_ptr); |
1561 | if (ef4_event_present(event)) |
1562 | ef4_schedule_channel_irq(channel); |
1563 | else |
1564 | ef4_farch_ev_read_ack(channel); |
1565 | } |
1566 | } |
1567 | } |
1568 | |
1569 | if (result == IRQ_HANDLED) |
1570 | netif_vdbg(efx, intr, efx->net_dev, |
1571 | "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n" , |
1572 | irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg)); |
1573 | |
1574 | return result; |
1575 | } |
1576 | |
1577 | /* Handle an MSI interrupt |
1578 | * |
1579 | * Handle an MSI hardware interrupt. This routine schedules event |
1580 | * queue processing. No interrupt acknowledgement cycle is necessary. |
1581 | * Also, we never need to check that the interrupt is for us, since |
1582 | * MSI interrupts cannot be shared. |
1583 | */ |
1584 | irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id) |
1585 | { |
1586 | struct ef4_msi_context *context = dev_id; |
1587 | struct ef4_nic *efx = context->efx; |
1588 | ef4_oword_t *int_ker = efx->irq_status.addr; |
1589 | int syserr; |
1590 | |
1591 | netif_vdbg(efx, intr, efx->net_dev, |
1592 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n" , |
1593 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); |
1594 | |
1595 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
1596 | return IRQ_HANDLED; |
1597 | |
1598 | /* Handle non-event-queue sources */ |
1599 | if (context->index == efx->irq_level) { |
1600 | syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1601 | if (unlikely(syserr)) |
1602 | return ef4_farch_fatal_interrupt(efx); |
1603 | efx->last_irq_cpu = raw_smp_processor_id(); |
1604 | } |
1605 | |
1606 | /* Schedule processing of the channel */ |
1607 | ef4_schedule_channel_irq(channel: efx->channel[context->index]); |
1608 | |
1609 | return IRQ_HANDLED; |
1610 | } |
1611 | |
1612 | /* Setup RSS indirection table. |
1613 | * This maps from the hash value of the packet to RXQ |
1614 | */ |
1615 | void ef4_farch_rx_push_indir_table(struct ef4_nic *efx) |
1616 | { |
1617 | size_t i = 0; |
1618 | ef4_dword_t dword; |
1619 | |
1620 | BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0); |
1621 | |
1622 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != |
1623 | FR_BZ_RX_INDIRECTION_TBL_ROWS); |
1624 | |
1625 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { |
1626 | EF4_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, |
1627 | efx->rx_indir_table[i]); |
1628 | ef4_writed(efx, value: &dword, |
1629 | FR_BZ_RX_INDIRECTION_TBL + |
1630 | FR_BZ_RX_INDIRECTION_TBL_STEP * i); |
1631 | } |
1632 | } |
1633 | |
1634 | /* Looks at available SRAM resources and works out how many queues we |
1635 | * can support, and where things like descriptor caches should live. |
1636 | * |
1637 | * SRAM is split up as follows: |
1638 | * 0 buftbl entries for channels |
1639 | * efx->vf_buftbl_base buftbl entries for SR-IOV |
1640 | * efx->rx_dc_base RX descriptor caches |
1641 | * efx->tx_dc_base TX descriptor caches |
1642 | */ |
1643 | void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw) |
1644 | { |
1645 | unsigned vi_count; |
1646 | |
1647 | /* Account for the buffer table entries backing the datapath channels |
1648 | * and the descriptor caches for those channels. |
1649 | */ |
1650 | vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES); |
1651 | |
1652 | efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; |
1653 | efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; |
1654 | } |
1655 | |
1656 | u32 ef4_farch_fpga_ver(struct ef4_nic *efx) |
1657 | { |
1658 | ef4_oword_t altera_build; |
1659 | ef4_reado(efx, value: &altera_build, FR_AZ_ALTERA_BUILD); |
1660 | return EF4_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); |
1661 | } |
1662 | |
1663 | void ef4_farch_init_common(struct ef4_nic *efx) |
1664 | { |
1665 | ef4_oword_t temp; |
1666 | |
1667 | /* Set positions of descriptor caches in SRAM. */ |
1668 | EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); |
1669 | ef4_writeo(efx, value: &temp, FR_AZ_SRM_TX_DC_CFG); |
1670 | EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); |
1671 | ef4_writeo(efx, value: &temp, FR_AZ_SRM_RX_DC_CFG); |
1672 | |
1673 | /* Set TX descriptor cache size. */ |
1674 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); |
1675 | EF4_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); |
1676 | ef4_writeo(efx, value: &temp, FR_AZ_TX_DC_CFG); |
1677 | |
1678 | /* Set RX descriptor cache size. Set low watermark to size-8, as |
1679 | * this allows most efficient prefetching. |
1680 | */ |
1681 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); |
1682 | EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); |
1683 | ef4_writeo(efx, value: &temp, FR_AZ_RX_DC_CFG); |
1684 | EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); |
1685 | ef4_writeo(efx, value: &temp, FR_AZ_RX_DC_PF_WM); |
1686 | |
1687 | /* Program INT_KER address */ |
1688 | EF4_POPULATE_OWORD_2(temp, |
1689 | FRF_AZ_NORM_INT_VEC_DIS_KER, |
1690 | EF4_INT_MODE_USE_MSI(efx), |
1691 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); |
1692 | ef4_writeo(efx, value: &temp, FR_AZ_INT_ADR_KER); |
1693 | |
1694 | /* Use a valid MSI-X vector */ |
1695 | efx->irq_level = 0; |
1696 | |
1697 | /* Enable all the genuinely fatal interrupts. (They are still |
1698 | * masked by the overall interrupt mask, controlled by |
1699 | * falcon_interrupts()). |
1700 | * |
1701 | * Note: All other fatal interrupts are enabled |
1702 | */ |
1703 | EF4_POPULATE_OWORD_3(temp, |
1704 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, |
1705 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, |
1706 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); |
1707 | EF4_INVERT_OWORD(temp); |
1708 | ef4_writeo(efx, value: &temp, FR_AZ_FATAL_INTR_KER); |
1709 | |
1710 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be |
1711 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. |
1712 | */ |
1713 | ef4_reado(efx, value: &temp, FR_AZ_TX_RESERVED); |
1714 | EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); |
1715 | EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); |
1716 | EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); |
1717 | EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); |
1718 | EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); |
1719 | /* Enable SW_EV to inherit in char driver - assume harmless here */ |
1720 | EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); |
1721 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
1722 | EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); |
1723 | /* Disable hardware watchdog which can misfire */ |
1724 | EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); |
1725 | /* Squash TX of packets of 16 bytes or less */ |
1726 | if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) |
1727 | EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); |
1728 | ef4_writeo(efx, value: &temp, FR_AZ_TX_RESERVED); |
1729 | |
1730 | if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { |
1731 | EF4_POPULATE_OWORD_4(temp, |
1732 | /* Default values */ |
1733 | FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, |
1734 | FRF_BZ_TX_PACE_SB_AF, 0xb, |
1735 | FRF_BZ_TX_PACE_FB_BASE, 0, |
1736 | /* Allow large pace values in the |
1737 | * fast bin. */ |
1738 | FRF_BZ_TX_PACE_BIN_TH, |
1739 | FFE_BZ_TX_PACE_RESERVED); |
1740 | ef4_writeo(efx, value: &temp, FR_BZ_TX_PACE); |
1741 | } |
1742 | } |
1743 | |
1744 | /************************************************************************** |
1745 | * |
1746 | * Filter tables |
1747 | * |
1748 | ************************************************************************** |
1749 | */ |
1750 | |
1751 | /* "Fudge factors" - difference between programmed value and actual depth. |
1752 | * Due to pipelined implementation we need to program H/W with a value that |
1753 | * is larger than the hop limit we want. |
1754 | */ |
1755 | #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 |
1756 | #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 |
1757 | |
1758 | /* Hard maximum search limit. Hardware will time-out beyond 200-something. |
1759 | * We also need to avoid infinite loops in ef4_farch_filter_search() when the |
1760 | * table is full. |
1761 | */ |
1762 | #define EF4_FARCH_FILTER_CTL_SRCH_MAX 200 |
1763 | |
1764 | /* Don't try very hard to find space for performance hints, as this is |
1765 | * counter-productive. */ |
1766 | #define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 |
1767 | |
1768 | enum ef4_farch_filter_type { |
1769 | EF4_FARCH_FILTER_TCP_FULL = 0, |
1770 | EF4_FARCH_FILTER_TCP_WILD, |
1771 | EF4_FARCH_FILTER_UDP_FULL, |
1772 | EF4_FARCH_FILTER_UDP_WILD, |
1773 | EF4_FARCH_FILTER_MAC_FULL = 4, |
1774 | EF4_FARCH_FILTER_MAC_WILD, |
1775 | EF4_FARCH_FILTER_UC_DEF = 8, |
1776 | EF4_FARCH_FILTER_MC_DEF, |
1777 | EF4_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ |
1778 | }; |
1779 | |
1780 | enum ef4_farch_filter_table_id { |
1781 | EF4_FARCH_FILTER_TABLE_RX_IP = 0, |
1782 | EF4_FARCH_FILTER_TABLE_RX_MAC, |
1783 | EF4_FARCH_FILTER_TABLE_RX_DEF, |
1784 | EF4_FARCH_FILTER_TABLE_TX_MAC, |
1785 | EF4_FARCH_FILTER_TABLE_COUNT, |
1786 | }; |
1787 | |
1788 | enum ef4_farch_filter_index { |
1789 | EF4_FARCH_FILTER_INDEX_UC_DEF, |
1790 | EF4_FARCH_FILTER_INDEX_MC_DEF, |
1791 | EF4_FARCH_FILTER_SIZE_RX_DEF, |
1792 | }; |
1793 | |
1794 | struct ef4_farch_filter_spec { |
1795 | u8 type:4; |
1796 | u8 priority:4; |
1797 | u8 flags; |
1798 | u16 dmaq_id; |
1799 | u32 data[3]; |
1800 | }; |
1801 | |
1802 | struct ef4_farch_filter_table { |
1803 | enum ef4_farch_filter_table_id id; |
1804 | u32 offset; /* address of table relative to BAR */ |
1805 | unsigned size; /* number of entries */ |
1806 | unsigned step; /* step between entries */ |
1807 | unsigned used; /* number currently used */ |
1808 | unsigned long *used_bitmap; |
1809 | struct ef4_farch_filter_spec *spec; |
1810 | unsigned search_limit[EF4_FARCH_FILTER_TYPE_COUNT]; |
1811 | }; |
1812 | |
1813 | struct ef4_farch_filter_state { |
1814 | struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT]; |
1815 | }; |
1816 | |
1817 | static void |
1818 | ef4_farch_filter_table_clear_entry(struct ef4_nic *efx, |
1819 | struct ef4_farch_filter_table *table, |
1820 | unsigned int filter_idx); |
1821 | |
1822 | /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit |
1823 | * key derived from the n-tuple. The initial LFSR state is 0xffff. */ |
1824 | static u16 ef4_farch_filter_hash(u32 key) |
1825 | { |
1826 | u16 tmp; |
1827 | |
1828 | /* First 16 rounds */ |
1829 | tmp = 0x1fff ^ key >> 16; |
1830 | tmp = tmp ^ tmp >> 3 ^ tmp >> 6; |
1831 | tmp = tmp ^ tmp >> 9; |
1832 | /* Last 16 rounds */ |
1833 | tmp = tmp ^ tmp << 13 ^ key; |
1834 | tmp = tmp ^ tmp >> 3 ^ tmp >> 6; |
1835 | return tmp ^ tmp >> 9; |
1836 | } |
1837 | |
1838 | /* To allow for hash collisions, filter search continues at these |
1839 | * increments from the first possible entry selected by the hash. */ |
1840 | static u16 ef4_farch_filter_increment(u32 key) |
1841 | { |
1842 | return key * 2 - 1; |
1843 | } |
1844 | |
1845 | static enum ef4_farch_filter_table_id |
1846 | ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec *spec) |
1847 | { |
1848 | BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP != |
1849 | (EF4_FARCH_FILTER_TCP_FULL >> 2)); |
1850 | BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP != |
1851 | (EF4_FARCH_FILTER_TCP_WILD >> 2)); |
1852 | BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP != |
1853 | (EF4_FARCH_FILTER_UDP_FULL >> 2)); |
1854 | BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP != |
1855 | (EF4_FARCH_FILTER_UDP_WILD >> 2)); |
1856 | BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC != |
1857 | (EF4_FARCH_FILTER_MAC_FULL >> 2)); |
1858 | BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC != |
1859 | (EF4_FARCH_FILTER_MAC_WILD >> 2)); |
1860 | BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_TX_MAC != |
1861 | EF4_FARCH_FILTER_TABLE_RX_MAC + 2); |
1862 | return (spec->type >> 2) + ((spec->flags & EF4_FILTER_FLAG_TX) ? 2 : 0); |
1863 | } |
1864 | |
1865 | static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx) |
1866 | { |
1867 | struct ef4_farch_filter_state *state = efx->filter_state; |
1868 | struct ef4_farch_filter_table *table; |
1869 | ef4_oword_t filter_ctl; |
1870 | |
1871 | ef4_reado(efx, value: &filter_ctl, FR_BZ_RX_FILTER_CTL); |
1872 | |
1873 | table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP]; |
1874 | EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, |
1875 | table->search_limit[EF4_FARCH_FILTER_TCP_FULL] + |
1876 | EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); |
1877 | EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, |
1878 | table->search_limit[EF4_FARCH_FILTER_TCP_WILD] + |
1879 | EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); |
1880 | EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, |
1881 | table->search_limit[EF4_FARCH_FILTER_UDP_FULL] + |
1882 | EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); |
1883 | EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, |
1884 | table->search_limit[EF4_FARCH_FILTER_UDP_WILD] + |
1885 | EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); |
1886 | |
1887 | table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC]; |
1888 | if (table->size) { |
1889 | EF4_SET_OWORD_FIELD( |
1890 | filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, |
1891 | table->search_limit[EF4_FARCH_FILTER_MAC_FULL] + |
1892 | EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); |
1893 | EF4_SET_OWORD_FIELD( |
1894 | filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, |
1895 | table->search_limit[EF4_FARCH_FILTER_MAC_WILD] + |
1896 | EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); |
1897 | } |
1898 | |
1899 | table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF]; |
1900 | if (table->size) { |
1901 | EF4_SET_OWORD_FIELD( |
1902 | filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, |
1903 | table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); |
1904 | EF4_SET_OWORD_FIELD( |
1905 | filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, |
1906 | !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags & |
1907 | EF4_FILTER_FLAG_RX_RSS)); |
1908 | EF4_SET_OWORD_FIELD( |
1909 | filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, |
1910 | table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); |
1911 | EF4_SET_OWORD_FIELD( |
1912 | filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, |
1913 | !!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags & |
1914 | EF4_FILTER_FLAG_RX_RSS)); |
1915 | |
1916 | /* There is a single bit to enable RX scatter for all |
1917 | * unmatched packets. Only set it if scatter is |
1918 | * enabled in both filter specs. |
1919 | */ |
1920 | EF4_SET_OWORD_FIELD( |
1921 | filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, |
1922 | !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags & |
1923 | table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags & |
1924 | EF4_FILTER_FLAG_RX_SCATTER)); |
1925 | } else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { |
1926 | /* We don't expose 'default' filters because unmatched |
1927 | * packets always go to the queue number found in the |
1928 | * RSS table. But we still need to set the RX scatter |
1929 | * bit here. |
1930 | */ |
1931 | EF4_SET_OWORD_FIELD( |
1932 | filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, |
1933 | efx->rx_scatter); |
1934 | } |
1935 | |
1936 | ef4_writeo(efx, value: &filter_ctl, FR_BZ_RX_FILTER_CTL); |
1937 | } |
1938 | |
1939 | static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx) |
1940 | { |
1941 | struct ef4_farch_filter_state *state = efx->filter_state; |
1942 | struct ef4_farch_filter_table *table; |
1943 | ef4_oword_t tx_cfg; |
1944 | |
1945 | ef4_reado(efx, value: &tx_cfg, FR_AZ_TX_CFG); |
1946 | |
1947 | table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC]; |
1948 | if (table->size) { |
1949 | EF4_SET_OWORD_FIELD( |
1950 | tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, |
1951 | table->search_limit[EF4_FARCH_FILTER_MAC_FULL] + |
1952 | EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); |
1953 | EF4_SET_OWORD_FIELD( |
1954 | tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, |
1955 | table->search_limit[EF4_FARCH_FILTER_MAC_WILD] + |
1956 | EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); |
1957 | } |
1958 | |
1959 | ef4_writeo(efx, value: &tx_cfg, FR_AZ_TX_CFG); |
1960 | } |
1961 | |
1962 | static int |
1963 | ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec, |
1964 | const struct ef4_filter_spec *gen_spec) |
1965 | { |
1966 | bool is_full = false; |
1967 | |
1968 | if ((gen_spec->flags & EF4_FILTER_FLAG_RX_RSS) && |
1969 | gen_spec->rss_context != EF4_FILTER_RSS_CONTEXT_DEFAULT) |
1970 | return -EINVAL; |
1971 | |
1972 | spec->priority = gen_spec->priority; |
1973 | spec->flags = gen_spec->flags; |
1974 | spec->dmaq_id = gen_spec->dmaq_id; |
1975 | |
1976 | switch (gen_spec->match_flags) { |
1977 | case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO | |
1978 | EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT | |
1979 | EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT): |
1980 | is_full = true; |
1981 | fallthrough; |
1982 | case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO | |
1983 | EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): { |
1984 | __be32 rhost, host1, host2; |
1985 | __be16 rport, port1, port2; |
1986 | |
1987 | EF4_BUG_ON_PARANOID(!(gen_spec->flags & EF4_FILTER_FLAG_RX)); |
1988 | |
1989 | if (gen_spec->ether_type != htons(ETH_P_IP)) |
1990 | return -EPROTONOSUPPORT; |
1991 | if (gen_spec->loc_port == 0 || |
1992 | (is_full && gen_spec->rem_port == 0)) |
1993 | return -EADDRNOTAVAIL; |
1994 | switch (gen_spec->ip_proto) { |
1995 | case IPPROTO_TCP: |
1996 | spec->type = (is_full ? EF4_FARCH_FILTER_TCP_FULL : |
1997 | EF4_FARCH_FILTER_TCP_WILD); |
1998 | break; |
1999 | case IPPROTO_UDP: |
2000 | spec->type = (is_full ? EF4_FARCH_FILTER_UDP_FULL : |
2001 | EF4_FARCH_FILTER_UDP_WILD); |
2002 | break; |
2003 | default: |
2004 | return -EPROTONOSUPPORT; |
2005 | } |
2006 | |
2007 | /* Filter is constructed in terms of source and destination, |
2008 | * with the odd wrinkle that the ports are swapped in a UDP |
2009 | * wildcard filter. We need to convert from local and remote |
2010 | * (= zero for wildcard) addresses. |
2011 | */ |
2012 | rhost = is_full ? gen_spec->rem_host[0] : 0; |
2013 | rport = is_full ? gen_spec->rem_port : 0; |
2014 | host1 = rhost; |
2015 | host2 = gen_spec->loc_host[0]; |
2016 | if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { |
2017 | port1 = gen_spec->loc_port; |
2018 | port2 = rport; |
2019 | } else { |
2020 | port1 = rport; |
2021 | port2 = gen_spec->loc_port; |
2022 | } |
2023 | spec->data[0] = ntohl(host1) << 16 | ntohs(port1); |
2024 | spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; |
2025 | spec->data[2] = ntohl(host2); |
2026 | |
2027 | break; |
2028 | } |
2029 | |
2030 | case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID: |
2031 | is_full = true; |
2032 | fallthrough; |
2033 | case EF4_FILTER_MATCH_LOC_MAC: |
2034 | spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL : |
2035 | EF4_FARCH_FILTER_MAC_WILD); |
2036 | spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; |
2037 | spec->data[1] = (gen_spec->loc_mac[2] << 24 | |
2038 | gen_spec->loc_mac[3] << 16 | |
2039 | gen_spec->loc_mac[4] << 8 | |
2040 | gen_spec->loc_mac[5]); |
2041 | spec->data[2] = (gen_spec->loc_mac[0] << 8 | |
2042 | gen_spec->loc_mac[1]); |
2043 | break; |
2044 | |
2045 | case EF4_FILTER_MATCH_LOC_MAC_IG: |
2046 | spec->type = (is_multicast_ether_addr(addr: gen_spec->loc_mac) ? |
2047 | EF4_FARCH_FILTER_MC_DEF : |
2048 | EF4_FARCH_FILTER_UC_DEF); |
2049 | memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ |
2050 | break; |
2051 | |
2052 | default: |
2053 | return -EPROTONOSUPPORT; |
2054 | } |
2055 | |
2056 | return 0; |
2057 | } |
2058 | |
2059 | static void |
2060 | ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec, |
2061 | const struct ef4_farch_filter_spec *spec) |
2062 | { |
2063 | bool is_full = false; |
2064 | |
2065 | /* *gen_spec should be completely initialised, to be consistent |
2066 | * with ef4_filter_init_{rx,tx}() and in case we want to copy |
2067 | * it back to userland. |
2068 | */ |
2069 | memset(gen_spec, 0, sizeof(*gen_spec)); |
2070 | |
2071 | gen_spec->priority = spec->priority; |
2072 | gen_spec->flags = spec->flags; |
2073 | gen_spec->dmaq_id = spec->dmaq_id; |
2074 | |
2075 | switch (spec->type) { |
2076 | case EF4_FARCH_FILTER_TCP_FULL: |
2077 | case EF4_FARCH_FILTER_UDP_FULL: |
2078 | is_full = true; |
2079 | fallthrough; |
2080 | case EF4_FARCH_FILTER_TCP_WILD: |
2081 | case EF4_FARCH_FILTER_UDP_WILD: { |
2082 | __be32 host1, host2; |
2083 | __be16 port1, port2; |
2084 | |
2085 | gen_spec->match_flags = |
2086 | EF4_FILTER_MATCH_ETHER_TYPE | |
2087 | EF4_FILTER_MATCH_IP_PROTO | |
2088 | EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT; |
2089 | if (is_full) |
2090 | gen_spec->match_flags |= (EF4_FILTER_MATCH_REM_HOST | |
2091 | EF4_FILTER_MATCH_REM_PORT); |
2092 | gen_spec->ether_type = htons(ETH_P_IP); |
2093 | gen_spec->ip_proto = |
2094 | (spec->type == EF4_FARCH_FILTER_TCP_FULL || |
2095 | spec->type == EF4_FARCH_FILTER_TCP_WILD) ? |
2096 | IPPROTO_TCP : IPPROTO_UDP; |
2097 | |
2098 | host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); |
2099 | port1 = htons(spec->data[0]); |
2100 | host2 = htonl(spec->data[2]); |
2101 | port2 = htons(spec->data[1] >> 16); |
2102 | if (spec->flags & EF4_FILTER_FLAG_TX) { |
2103 | gen_spec->loc_host[0] = host1; |
2104 | gen_spec->rem_host[0] = host2; |
2105 | } else { |
2106 | gen_spec->loc_host[0] = host2; |
2107 | gen_spec->rem_host[0] = host1; |
2108 | } |
2109 | if (!!(gen_spec->flags & EF4_FILTER_FLAG_TX) ^ |
2110 | (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { |
2111 | gen_spec->loc_port = port1; |
2112 | gen_spec->rem_port = port2; |
2113 | } else { |
2114 | gen_spec->loc_port = port2; |
2115 | gen_spec->rem_port = port1; |
2116 | } |
2117 | |
2118 | break; |
2119 | } |
2120 | |
2121 | case EF4_FARCH_FILTER_MAC_FULL: |
2122 | is_full = true; |
2123 | fallthrough; |
2124 | case EF4_FARCH_FILTER_MAC_WILD: |
2125 | gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC; |
2126 | if (is_full) |
2127 | gen_spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID; |
2128 | gen_spec->loc_mac[0] = spec->data[2] >> 8; |
2129 | gen_spec->loc_mac[1] = spec->data[2]; |
2130 | gen_spec->loc_mac[2] = spec->data[1] >> 24; |
2131 | gen_spec->loc_mac[3] = spec->data[1] >> 16; |
2132 | gen_spec->loc_mac[4] = spec->data[1] >> 8; |
2133 | gen_spec->loc_mac[5] = spec->data[1]; |
2134 | gen_spec->outer_vid = htons(spec->data[0]); |
2135 | break; |
2136 | |
2137 | case EF4_FARCH_FILTER_UC_DEF: |
2138 | case EF4_FARCH_FILTER_MC_DEF: |
2139 | gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC_IG; |
2140 | gen_spec->loc_mac[0] = spec->type == EF4_FARCH_FILTER_MC_DEF; |
2141 | break; |
2142 | |
2143 | default: |
2144 | WARN_ON(1); |
2145 | break; |
2146 | } |
2147 | } |
2148 | |
2149 | static void |
2150 | ef4_farch_filter_init_rx_auto(struct ef4_nic *efx, |
2151 | struct ef4_farch_filter_spec *spec) |
2152 | { |
2153 | /* If there's only one channel then disable RSS for non VF |
2154 | * traffic, thereby allowing VFs to use RSS when the PF can't. |
2155 | */ |
2156 | spec->priority = EF4_FILTER_PRI_AUTO; |
2157 | spec->flags = (EF4_FILTER_FLAG_RX | |
2158 | (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) | |
2159 | (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0)); |
2160 | spec->dmaq_id = 0; |
2161 | } |
2162 | |
2163 | /* Build a filter entry and return its n-tuple key. */ |
2164 | static u32 ef4_farch_filter_build(ef4_oword_t *filter, |
2165 | struct ef4_farch_filter_spec *spec) |
2166 | { |
2167 | u32 data3; |
2168 | |
2169 | switch (ef4_farch_filter_spec_table_id(spec)) { |
2170 | case EF4_FARCH_FILTER_TABLE_RX_IP: { |
2171 | bool is_udp = (spec->type == EF4_FARCH_FILTER_UDP_FULL || |
2172 | spec->type == EF4_FARCH_FILTER_UDP_WILD); |
2173 | EF4_POPULATE_OWORD_7( |
2174 | *filter, |
2175 | FRF_BZ_RSS_EN, |
2176 | !!(spec->flags & EF4_FILTER_FLAG_RX_RSS), |
2177 | FRF_BZ_SCATTER_EN, |
2178 | !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER), |
2179 | FRF_BZ_TCP_UDP, is_udp, |
2180 | FRF_BZ_RXQ_ID, spec->dmaq_id, |
2181 | EF4_DWORD_2, spec->data[2], |
2182 | EF4_DWORD_1, spec->data[1], |
2183 | EF4_DWORD_0, spec->data[0]); |
2184 | data3 = is_udp; |
2185 | break; |
2186 | } |
2187 | |
2188 | case EF4_FARCH_FILTER_TABLE_RX_MAC: { |
2189 | bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD; |
2190 | EF4_POPULATE_OWORD_7( |
2191 | *filter, |
2192 | FRF_CZ_RMFT_RSS_EN, |
2193 | !!(spec->flags & EF4_FILTER_FLAG_RX_RSS), |
2194 | FRF_CZ_RMFT_SCATTER_EN, |
2195 | !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER), |
2196 | FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, |
2197 | FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, |
2198 | FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], |
2199 | FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], |
2200 | FRF_CZ_RMFT_VLAN_ID, spec->data[0]); |
2201 | data3 = is_wild; |
2202 | break; |
2203 | } |
2204 | |
2205 | case EF4_FARCH_FILTER_TABLE_TX_MAC: { |
2206 | bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD; |
2207 | EF4_POPULATE_OWORD_5(*filter, |
2208 | FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, |
2209 | FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, |
2210 | FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], |
2211 | FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], |
2212 | FRF_CZ_TMFT_VLAN_ID, spec->data[0]); |
2213 | data3 = is_wild | spec->dmaq_id << 1; |
2214 | break; |
2215 | } |
2216 | |
2217 | default: |
2218 | BUG(); |
2219 | } |
2220 | |
2221 | return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; |
2222 | } |
2223 | |
2224 | static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec *left, |
2225 | const struct ef4_farch_filter_spec *right) |
2226 | { |
2227 | if (left->type != right->type || |
2228 | memcmp(p: left->data, q: right->data, size: sizeof(left->data))) |
2229 | return false; |
2230 | |
2231 | if (left->flags & EF4_FILTER_FLAG_TX && |
2232 | left->dmaq_id != right->dmaq_id) |
2233 | return false; |
2234 | |
2235 | return true; |
2236 | } |
2237 | |
2238 | /* |
2239 | * Construct/deconstruct external filter IDs. At least the RX filter |
2240 | * IDs must be ordered by matching priority, for RX NFC semantics. |
2241 | * |
2242 | * Deconstruction needs to be robust against invalid IDs so that |
2243 | * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can |
2244 | * accept user-provided IDs. |
2245 | */ |
2246 | |
2247 | #define EF4_FARCH_FILTER_MATCH_PRI_COUNT 5 |
2248 | |
2249 | static const u8 ef4_farch_filter_type_match_pri[EF4_FARCH_FILTER_TYPE_COUNT] = { |
2250 | [EF4_FARCH_FILTER_TCP_FULL] = 0, |
2251 | [EF4_FARCH_FILTER_UDP_FULL] = 0, |
2252 | [EF4_FARCH_FILTER_TCP_WILD] = 1, |
2253 | [EF4_FARCH_FILTER_UDP_WILD] = 1, |
2254 | [EF4_FARCH_FILTER_MAC_FULL] = 2, |
2255 | [EF4_FARCH_FILTER_MAC_WILD] = 3, |
2256 | [EF4_FARCH_FILTER_UC_DEF] = 4, |
2257 | [EF4_FARCH_FILTER_MC_DEF] = 4, |
2258 | }; |
2259 | |
2260 | static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table[] = { |
2261 | EF4_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ |
2262 | EF4_FARCH_FILTER_TABLE_RX_IP, |
2263 | EF4_FARCH_FILTER_TABLE_RX_MAC, |
2264 | EF4_FARCH_FILTER_TABLE_RX_MAC, |
2265 | EF4_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ |
2266 | EF4_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ |
2267 | EF4_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ |
2268 | }; |
2269 | |
2270 | #define EF4_FARCH_FILTER_INDEX_WIDTH 13 |
2271 | #define EF4_FARCH_FILTER_INDEX_MASK ((1 << EF4_FARCH_FILTER_INDEX_WIDTH) - 1) |
2272 | |
2273 | static inline u32 |
2274 | ef4_farch_filter_make_id(const struct ef4_farch_filter_spec *spec, |
2275 | unsigned int index) |
2276 | { |
2277 | unsigned int range; |
2278 | |
2279 | range = ef4_farch_filter_type_match_pri[spec->type]; |
2280 | if (!(spec->flags & EF4_FILTER_FLAG_RX)) |
2281 | range += EF4_FARCH_FILTER_MATCH_PRI_COUNT; |
2282 | |
2283 | return range << EF4_FARCH_FILTER_INDEX_WIDTH | index; |
2284 | } |
2285 | |
2286 | static inline enum ef4_farch_filter_table_id |
2287 | ef4_farch_filter_id_table_id(u32 id) |
2288 | { |
2289 | unsigned int range = id >> EF4_FARCH_FILTER_INDEX_WIDTH; |
2290 | |
2291 | if (range < ARRAY_SIZE(ef4_farch_filter_range_table)) |
2292 | return ef4_farch_filter_range_table[range]; |
2293 | else |
2294 | return EF4_FARCH_FILTER_TABLE_COUNT; /* invalid */ |
2295 | } |
2296 | |
2297 | static inline unsigned int ef4_farch_filter_id_index(u32 id) |
2298 | { |
2299 | return id & EF4_FARCH_FILTER_INDEX_MASK; |
2300 | } |
2301 | |
2302 | u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx) |
2303 | { |
2304 | struct ef4_farch_filter_state *state = efx->filter_state; |
2305 | unsigned int range = EF4_FARCH_FILTER_MATCH_PRI_COUNT - 1; |
2306 | enum ef4_farch_filter_table_id table_id; |
2307 | |
2308 | do { |
2309 | table_id = ef4_farch_filter_range_table[range]; |
2310 | if (state->table[table_id].size != 0) |
2311 | return range << EF4_FARCH_FILTER_INDEX_WIDTH | |
2312 | state->table[table_id].size; |
2313 | } while (range--); |
2314 | |
2315 | return 0; |
2316 | } |
2317 | |
2318 | s32 ef4_farch_filter_insert(struct ef4_nic *efx, |
2319 | struct ef4_filter_spec *gen_spec, |
2320 | bool replace_equal) |
2321 | { |
2322 | struct ef4_farch_filter_state *state = efx->filter_state; |
2323 | struct ef4_farch_filter_table *table; |
2324 | struct ef4_farch_filter_spec spec; |
2325 | ef4_oword_t filter; |
2326 | int rep_index, ins_index; |
2327 | unsigned int depth = 0; |
2328 | int rc; |
2329 | |
2330 | rc = ef4_farch_filter_from_gen_spec(spec: &spec, gen_spec); |
2331 | if (rc) |
2332 | return rc; |
2333 | |
2334 | table = &state->table[ef4_farch_filter_spec_table_id(spec: &spec)]; |
2335 | if (table->size == 0) |
2336 | return -EINVAL; |
2337 | |
2338 | netif_vdbg(efx, hw, efx->net_dev, |
2339 | "%s: type %d search_limit=%d" , __func__, spec.type, |
2340 | table->search_limit[spec.type]); |
2341 | |
2342 | if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) { |
2343 | /* One filter spec per type */ |
2344 | BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_UC_DEF != 0); |
2345 | BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_MC_DEF != |
2346 | EF4_FARCH_FILTER_MC_DEF - EF4_FARCH_FILTER_UC_DEF); |
2347 | rep_index = spec.type - EF4_FARCH_FILTER_UC_DEF; |
2348 | ins_index = rep_index; |
2349 | |
2350 | spin_lock_bh(lock: &efx->filter_lock); |
2351 | } else { |
2352 | /* Search concurrently for |
2353 | * (1) a filter to be replaced (rep_index): any filter |
2354 | * with the same match values, up to the current |
2355 | * search depth for this type, and |
2356 | * (2) the insertion point (ins_index): (1) or any |
2357 | * free slot before it or up to the maximum search |
2358 | * depth for this priority |
2359 | * We fail if we cannot find (2). |
2360 | * |
2361 | * We can stop once either |
2362 | * (a) we find (1), in which case we have definitely |
2363 | * found (2) as well; or |
2364 | * (b) we have searched exhaustively for (1), and have |
2365 | * either found (2) or searched exhaustively for it |
2366 | */ |
2367 | u32 key = ef4_farch_filter_build(filter: &filter, spec: &spec); |
2368 | unsigned int hash = ef4_farch_filter_hash(key); |
2369 | unsigned int incr = ef4_farch_filter_increment(key); |
2370 | unsigned int max_rep_depth = table->search_limit[spec.type]; |
2371 | unsigned int max_ins_depth = |
2372 | spec.priority <= EF4_FILTER_PRI_HINT ? |
2373 | EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX : |
2374 | EF4_FARCH_FILTER_CTL_SRCH_MAX; |
2375 | unsigned int i = hash & (table->size - 1); |
2376 | |
2377 | ins_index = -1; |
2378 | depth = 1; |
2379 | |
2380 | spin_lock_bh(lock: &efx->filter_lock); |
2381 | |
2382 | for (;;) { |
2383 | if (!test_bit(i, table->used_bitmap)) { |
2384 | if (ins_index < 0) |
2385 | ins_index = i; |
2386 | } else if (ef4_farch_filter_equal(left: &spec, |
2387 | right: &table->spec[i])) { |
2388 | /* Case (a) */ |
2389 | if (ins_index < 0) |
2390 | ins_index = i; |
2391 | rep_index = i; |
2392 | break; |
2393 | } |
2394 | |
2395 | if (depth >= max_rep_depth && |
2396 | (ins_index >= 0 || depth >= max_ins_depth)) { |
2397 | /* Case (b) */ |
2398 | if (ins_index < 0) { |
2399 | rc = -EBUSY; |
2400 | goto out; |
2401 | } |
2402 | rep_index = -1; |
2403 | break; |
2404 | } |
2405 | |
2406 | i = (i + incr) & (table->size - 1); |
2407 | ++depth; |
2408 | } |
2409 | } |
2410 | |
2411 | /* If we found a filter to be replaced, check whether we |
2412 | * should do so |
2413 | */ |
2414 | if (rep_index >= 0) { |
2415 | struct ef4_farch_filter_spec *saved_spec = |
2416 | &table->spec[rep_index]; |
2417 | |
2418 | if (spec.priority == saved_spec->priority && !replace_equal) { |
2419 | rc = -EEXIST; |
2420 | goto out; |
2421 | } |
2422 | if (spec.priority < saved_spec->priority) { |
2423 | rc = -EPERM; |
2424 | goto out; |
2425 | } |
2426 | if (saved_spec->priority == EF4_FILTER_PRI_AUTO || |
2427 | saved_spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) |
2428 | spec.flags |= EF4_FILTER_FLAG_RX_OVER_AUTO; |
2429 | } |
2430 | |
2431 | /* Insert the filter */ |
2432 | if (ins_index != rep_index) { |
2433 | __set_bit(ins_index, table->used_bitmap); |
2434 | ++table->used; |
2435 | } |
2436 | table->spec[ins_index] = spec; |
2437 | |
2438 | if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) { |
2439 | ef4_farch_filter_push_rx_config(efx); |
2440 | } else { |
2441 | if (table->search_limit[spec.type] < depth) { |
2442 | table->search_limit[spec.type] = depth; |
2443 | if (spec.flags & EF4_FILTER_FLAG_TX) |
2444 | ef4_farch_filter_push_tx_limits(efx); |
2445 | else |
2446 | ef4_farch_filter_push_rx_config(efx); |
2447 | } |
2448 | |
2449 | ef4_writeo(efx, value: &filter, |
2450 | reg: table->offset + table->step * ins_index); |
2451 | |
2452 | /* If we were able to replace a filter by inserting |
2453 | * at a lower depth, clear the replaced filter |
2454 | */ |
2455 | if (ins_index != rep_index && rep_index >= 0) |
2456 | ef4_farch_filter_table_clear_entry(efx, table, |
2457 | filter_idx: rep_index); |
2458 | } |
2459 | |
2460 | netif_vdbg(efx, hw, efx->net_dev, |
2461 | "%s: filter type %d index %d rxq %u set" , |
2462 | __func__, spec.type, ins_index, spec.dmaq_id); |
2463 | rc = ef4_farch_filter_make_id(spec: &spec, index: ins_index); |
2464 | |
2465 | out: |
2466 | spin_unlock_bh(lock: &efx->filter_lock); |
2467 | return rc; |
2468 | } |
2469 | |
2470 | static void |
2471 | ef4_farch_filter_table_clear_entry(struct ef4_nic *efx, |
2472 | struct ef4_farch_filter_table *table, |
2473 | unsigned int filter_idx) |
2474 | { |
2475 | static ef4_oword_t filter; |
2476 | |
2477 | EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); |
2478 | BUG_ON(table->offset == 0); /* can't clear MAC default filters */ |
2479 | |
2480 | __clear_bit(filter_idx, table->used_bitmap); |
2481 | --table->used; |
2482 | memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); |
2483 | |
2484 | ef4_writeo(efx, value: &filter, reg: table->offset + table->step * filter_idx); |
2485 | |
2486 | /* If this filter required a greater search depth than |
2487 | * any other, the search limit for its type can now be |
2488 | * decreased. However, it is hard to determine that |
2489 | * unless the table has become completely empty - in |
2490 | * which case, all its search limits can be set to 0. |
2491 | */ |
2492 | if (unlikely(table->used == 0)) { |
2493 | memset(table->search_limit, 0, sizeof(table->search_limit)); |
2494 | if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC) |
2495 | ef4_farch_filter_push_tx_limits(efx); |
2496 | else |
2497 | ef4_farch_filter_push_rx_config(efx); |
2498 | } |
2499 | } |
2500 | |
2501 | static int ef4_farch_filter_remove(struct ef4_nic *efx, |
2502 | struct ef4_farch_filter_table *table, |
2503 | unsigned int filter_idx, |
2504 | enum ef4_filter_priority priority) |
2505 | { |
2506 | struct ef4_farch_filter_spec *spec = &table->spec[filter_idx]; |
2507 | |
2508 | if (!test_bit(filter_idx, table->used_bitmap) || |
2509 | spec->priority != priority) |
2510 | return -ENOENT; |
2511 | |
2512 | if (spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) { |
2513 | ef4_farch_filter_init_rx_auto(efx, spec); |
2514 | ef4_farch_filter_push_rx_config(efx); |
2515 | } else { |
2516 | ef4_farch_filter_table_clear_entry(efx, table, filter_idx); |
2517 | } |
2518 | |
2519 | return 0; |
2520 | } |
2521 | |
2522 | int ef4_farch_filter_remove_safe(struct ef4_nic *efx, |
2523 | enum ef4_filter_priority priority, |
2524 | u32 filter_id) |
2525 | { |
2526 | struct ef4_farch_filter_state *state = efx->filter_state; |
2527 | enum ef4_farch_filter_table_id table_id; |
2528 | struct ef4_farch_filter_table *table; |
2529 | unsigned int filter_idx; |
2530 | int rc; |
2531 | |
2532 | table_id = ef4_farch_filter_id_table_id(id: filter_id); |
2533 | if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT) |
2534 | return -ENOENT; |
2535 | table = &state->table[table_id]; |
2536 | |
2537 | filter_idx = ef4_farch_filter_id_index(id: filter_id); |
2538 | if (filter_idx >= table->size) |
2539 | return -ENOENT; |
2540 | |
2541 | spin_lock_bh(lock: &efx->filter_lock); |
2542 | rc = ef4_farch_filter_remove(efx, table, filter_idx, priority); |
2543 | spin_unlock_bh(lock: &efx->filter_lock); |
2544 | |
2545 | return rc; |
2546 | } |
2547 | |
2548 | int ef4_farch_filter_get_safe(struct ef4_nic *efx, |
2549 | enum ef4_filter_priority priority, |
2550 | u32 filter_id, struct ef4_filter_spec *spec_buf) |
2551 | { |
2552 | struct ef4_farch_filter_state *state = efx->filter_state; |
2553 | enum ef4_farch_filter_table_id table_id; |
2554 | struct ef4_farch_filter_table *table; |
2555 | struct ef4_farch_filter_spec *spec; |
2556 | unsigned int filter_idx; |
2557 | int rc; |
2558 | |
2559 | table_id = ef4_farch_filter_id_table_id(id: filter_id); |
2560 | if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT) |
2561 | return -ENOENT; |
2562 | table = &state->table[table_id]; |
2563 | |
2564 | filter_idx = ef4_farch_filter_id_index(id: filter_id); |
2565 | if (filter_idx >= table->size) |
2566 | return -ENOENT; |
2567 | spec = &table->spec[filter_idx]; |
2568 | |
2569 | spin_lock_bh(lock: &efx->filter_lock); |
2570 | |
2571 | if (test_bit(filter_idx, table->used_bitmap) && |
2572 | spec->priority == priority) { |
2573 | ef4_farch_filter_to_gen_spec(gen_spec: spec_buf, spec); |
2574 | rc = 0; |
2575 | } else { |
2576 | rc = -ENOENT; |
2577 | } |
2578 | |
2579 | spin_unlock_bh(lock: &efx->filter_lock); |
2580 | |
2581 | return rc; |
2582 | } |
2583 | |
2584 | static void |
2585 | ef4_farch_filter_table_clear(struct ef4_nic *efx, |
2586 | enum ef4_farch_filter_table_id table_id, |
2587 | enum ef4_filter_priority priority) |
2588 | { |
2589 | struct ef4_farch_filter_state *state = efx->filter_state; |
2590 | struct ef4_farch_filter_table *table = &state->table[table_id]; |
2591 | unsigned int filter_idx; |
2592 | |
2593 | spin_lock_bh(lock: &efx->filter_lock); |
2594 | for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { |
2595 | if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO) |
2596 | ef4_farch_filter_remove(efx, table, |
2597 | filter_idx, priority); |
2598 | } |
2599 | spin_unlock_bh(lock: &efx->filter_lock); |
2600 | } |
2601 | |
2602 | int ef4_farch_filter_clear_rx(struct ef4_nic *efx, |
2603 | enum ef4_filter_priority priority) |
2604 | { |
2605 | ef4_farch_filter_table_clear(efx, table_id: EF4_FARCH_FILTER_TABLE_RX_IP, |
2606 | priority); |
2607 | ef4_farch_filter_table_clear(efx, table_id: EF4_FARCH_FILTER_TABLE_RX_MAC, |
2608 | priority); |
2609 | ef4_farch_filter_table_clear(efx, table_id: EF4_FARCH_FILTER_TABLE_RX_DEF, |
2610 | priority); |
2611 | return 0; |
2612 | } |
2613 | |
2614 | u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx, |
2615 | enum ef4_filter_priority priority) |
2616 | { |
2617 | struct ef4_farch_filter_state *state = efx->filter_state; |
2618 | enum ef4_farch_filter_table_id table_id; |
2619 | struct ef4_farch_filter_table *table; |
2620 | unsigned int filter_idx; |
2621 | u32 count = 0; |
2622 | |
2623 | spin_lock_bh(lock: &efx->filter_lock); |
2624 | |
2625 | for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP; |
2626 | table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF; |
2627 | table_id++) { |
2628 | table = &state->table[table_id]; |
2629 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { |
2630 | if (test_bit(filter_idx, table->used_bitmap) && |
2631 | table->spec[filter_idx].priority == priority) |
2632 | ++count; |
2633 | } |
2634 | } |
2635 | |
2636 | spin_unlock_bh(lock: &efx->filter_lock); |
2637 | |
2638 | return count; |
2639 | } |
2640 | |
2641 | s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx, |
2642 | enum ef4_filter_priority priority, |
2643 | u32 *buf, u32 size) |
2644 | { |
2645 | struct ef4_farch_filter_state *state = efx->filter_state; |
2646 | enum ef4_farch_filter_table_id table_id; |
2647 | struct ef4_farch_filter_table *table; |
2648 | unsigned int filter_idx; |
2649 | s32 count = 0; |
2650 | |
2651 | spin_lock_bh(lock: &efx->filter_lock); |
2652 | |
2653 | for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP; |
2654 | table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF; |
2655 | table_id++) { |
2656 | table = &state->table[table_id]; |
2657 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { |
2658 | if (test_bit(filter_idx, table->used_bitmap) && |
2659 | table->spec[filter_idx].priority == priority) { |
2660 | if (count == size) { |
2661 | count = -EMSGSIZE; |
2662 | goto out; |
2663 | } |
2664 | buf[count++] = ef4_farch_filter_make_id( |
2665 | spec: &table->spec[filter_idx], index: filter_idx); |
2666 | } |
2667 | } |
2668 | } |
2669 | out: |
2670 | spin_unlock_bh(lock: &efx->filter_lock); |
2671 | |
2672 | return count; |
2673 | } |
2674 | |
2675 | /* Restore filter stater after reset */ |
2676 | void ef4_farch_filter_table_restore(struct ef4_nic *efx) |
2677 | { |
2678 | struct ef4_farch_filter_state *state = efx->filter_state; |
2679 | enum ef4_farch_filter_table_id table_id; |
2680 | struct ef4_farch_filter_table *table; |
2681 | ef4_oword_t filter; |
2682 | unsigned int filter_idx; |
2683 | |
2684 | spin_lock_bh(lock: &efx->filter_lock); |
2685 | |
2686 | for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) { |
2687 | table = &state->table[table_id]; |
2688 | |
2689 | /* Check whether this is a regular register table */ |
2690 | if (table->step == 0) |
2691 | continue; |
2692 | |
2693 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { |
2694 | if (!test_bit(filter_idx, table->used_bitmap)) |
2695 | continue; |
2696 | ef4_farch_filter_build(filter: &filter, spec: &table->spec[filter_idx]); |
2697 | ef4_writeo(efx, value: &filter, |
2698 | reg: table->offset + table->step * filter_idx); |
2699 | } |
2700 | } |
2701 | |
2702 | ef4_farch_filter_push_rx_config(efx); |
2703 | ef4_farch_filter_push_tx_limits(efx); |
2704 | |
2705 | spin_unlock_bh(lock: &efx->filter_lock); |
2706 | } |
2707 | |
2708 | void ef4_farch_filter_table_remove(struct ef4_nic *efx) |
2709 | { |
2710 | struct ef4_farch_filter_state *state = efx->filter_state; |
2711 | enum ef4_farch_filter_table_id table_id; |
2712 | |
2713 | for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) { |
2714 | bitmap_free(bitmap: state->table[table_id].used_bitmap); |
2715 | vfree(addr: state->table[table_id].spec); |
2716 | } |
2717 | kfree(objp: state); |
2718 | } |
2719 | |
2720 | int ef4_farch_filter_table_probe(struct ef4_nic *efx) |
2721 | { |
2722 | struct ef4_farch_filter_state *state; |
2723 | struct ef4_farch_filter_table *table; |
2724 | unsigned table_id; |
2725 | |
2726 | state = kzalloc(size: sizeof(struct ef4_farch_filter_state), GFP_KERNEL); |
2727 | if (!state) |
2728 | return -ENOMEM; |
2729 | efx->filter_state = state; |
2730 | |
2731 | if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { |
2732 | table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP]; |
2733 | table->id = EF4_FARCH_FILTER_TABLE_RX_IP; |
2734 | table->offset = FR_BZ_RX_FILTER_TBL0; |
2735 | table->size = FR_BZ_RX_FILTER_TBL0_ROWS; |
2736 | table->step = FR_BZ_RX_FILTER_TBL0_STEP; |
2737 | } |
2738 | |
2739 | for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) { |
2740 | table = &state->table[table_id]; |
2741 | if (table->size == 0) |
2742 | continue; |
2743 | table->used_bitmap = bitmap_zalloc(nbits: table->size, GFP_KERNEL); |
2744 | if (!table->used_bitmap) |
2745 | goto fail; |
2746 | table->spec = vzalloc(array_size(sizeof(*table->spec), |
2747 | table->size)); |
2748 | if (!table->spec) |
2749 | goto fail; |
2750 | } |
2751 | |
2752 | table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF]; |
2753 | if (table->size) { |
2754 | /* RX default filters must always exist */ |
2755 | struct ef4_farch_filter_spec *spec; |
2756 | unsigned i; |
2757 | |
2758 | for (i = 0; i < EF4_FARCH_FILTER_SIZE_RX_DEF; i++) { |
2759 | spec = &table->spec[i]; |
2760 | spec->type = EF4_FARCH_FILTER_UC_DEF + i; |
2761 | ef4_farch_filter_init_rx_auto(efx, spec); |
2762 | __set_bit(i, table->used_bitmap); |
2763 | } |
2764 | } |
2765 | |
2766 | ef4_farch_filter_push_rx_config(efx); |
2767 | |
2768 | return 0; |
2769 | |
2770 | fail: |
2771 | ef4_farch_filter_table_remove(efx); |
2772 | return -ENOMEM; |
2773 | } |
2774 | |
2775 | /* Update scatter enable flags for filters pointing to our own RX queues */ |
2776 | void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx) |
2777 | { |
2778 | struct ef4_farch_filter_state *state = efx->filter_state; |
2779 | enum ef4_farch_filter_table_id table_id; |
2780 | struct ef4_farch_filter_table *table; |
2781 | ef4_oword_t filter; |
2782 | unsigned int filter_idx; |
2783 | |
2784 | spin_lock_bh(lock: &efx->filter_lock); |
2785 | |
2786 | for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP; |
2787 | table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF; |
2788 | table_id++) { |
2789 | table = &state->table[table_id]; |
2790 | |
2791 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { |
2792 | if (!test_bit(filter_idx, table->used_bitmap) || |
2793 | table->spec[filter_idx].dmaq_id >= |
2794 | efx->n_rx_channels) |
2795 | continue; |
2796 | |
2797 | if (efx->rx_scatter) |
2798 | table->spec[filter_idx].flags |= |
2799 | EF4_FILTER_FLAG_RX_SCATTER; |
2800 | else |
2801 | table->spec[filter_idx].flags &= |
2802 | ~EF4_FILTER_FLAG_RX_SCATTER; |
2803 | |
2804 | if (table_id == EF4_FARCH_FILTER_TABLE_RX_DEF) |
2805 | /* Pushed by ef4_farch_filter_push_rx_config() */ |
2806 | continue; |
2807 | |
2808 | ef4_farch_filter_build(filter: &filter, spec: &table->spec[filter_idx]); |
2809 | ef4_writeo(efx, value: &filter, |
2810 | reg: table->offset + table->step * filter_idx); |
2811 | } |
2812 | } |
2813 | |
2814 | ef4_farch_filter_push_rx_config(efx); |
2815 | |
2816 | spin_unlock_bh(lock: &efx->filter_lock); |
2817 | } |
2818 | |
2819 | #ifdef CONFIG_RFS_ACCEL |
2820 | |
2821 | s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx, |
2822 | struct ef4_filter_spec *gen_spec) |
2823 | { |
2824 | return ef4_farch_filter_insert(efx, gen_spec, replace_equal: true); |
2825 | } |
2826 | |
2827 | bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id, |
2828 | unsigned int index) |
2829 | { |
2830 | struct ef4_farch_filter_state *state = efx->filter_state; |
2831 | struct ef4_farch_filter_table *table = |
2832 | &state->table[EF4_FARCH_FILTER_TABLE_RX_IP]; |
2833 | |
2834 | if (test_bit(index, table->used_bitmap) && |
2835 | table->spec[index].priority == EF4_FILTER_PRI_HINT && |
2836 | rps_may_expire_flow(dev: efx->net_dev, rxq_index: table->spec[index].dmaq_id, |
2837 | flow_id, filter_id: index)) { |
2838 | ef4_farch_filter_table_clear_entry(efx, table, filter_idx: index); |
2839 | return true; |
2840 | } |
2841 | |
2842 | return false; |
2843 | } |
2844 | |
2845 | #endif /* CONFIG_RFS_ACCEL */ |
2846 | |
2847 | void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx) |
2848 | { |
2849 | struct net_device *net_dev = efx->net_dev; |
2850 | struct netdev_hw_addr *ha; |
2851 | union ef4_multicast_hash *mc_hash = &efx->multicast_hash; |
2852 | u32 crc; |
2853 | int bit; |
2854 | |
2855 | if (!ef4_dev_registered(efx)) |
2856 | return; |
2857 | |
2858 | netif_addr_lock_bh(dev: net_dev); |
2859 | |
2860 | efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); |
2861 | |
2862 | /* Build multicast hash table */ |
2863 | if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { |
2864 | memset(mc_hash, 0xff, sizeof(*mc_hash)); |
2865 | } else { |
2866 | memset(mc_hash, 0x00, sizeof(*mc_hash)); |
2867 | netdev_for_each_mc_addr(ha, net_dev) { |
2868 | crc = ether_crc_le(ETH_ALEN, ha->addr); |
2869 | bit = crc & (EF4_MCAST_HASH_ENTRIES - 1); |
2870 | __set_bit_le(nr: bit, addr: mc_hash); |
2871 | } |
2872 | |
2873 | /* Broadcast packets go through the multicast hash filter. |
2874 | * ether_crc_le() of the broadcast address is 0xbe2612ff |
2875 | * so we always add bit 0xff to the mask. |
2876 | */ |
2877 | __set_bit_le(nr: 0xff, addr: mc_hash); |
2878 | } |
2879 | |
2880 | netif_addr_unlock_bh(dev: net_dev); |
2881 | } |
2882 | |