1 | /* |
2 | * This file is part of the Chelsio T4 Ethernet driver for Linux. |
3 | * |
4 | * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. |
5 | * |
6 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU |
8 | * General Public License (GPL) Version 2, available from the file |
9 | * COPYING in the main directory of this source tree, or the |
10 | * OpenIB.org BSD license below: |
11 | * |
12 | * Redistribution and use in source and binary forms, with or |
13 | * without modification, are permitted provided that the following |
14 | * conditions are met: |
15 | * |
16 | * - Redistributions of source code must retain the above |
17 | * copyright notice, this list of conditions and the following |
18 | * disclaimer. |
19 | * |
20 | * - Redistributions in binary form must reproduce the above |
21 | * copyright notice, this list of conditions and the following |
22 | * disclaimer in the documentation and/or other materials |
23 | * provided with the distribution. |
24 | * |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
32 | * SOFTWARE. |
33 | */ |
34 | |
35 | #include <linux/skbuff.h> |
36 | #include <linux/netdevice.h> |
37 | #include <linux/etherdevice.h> |
38 | #include <linux/if_vlan.h> |
39 | #include <linux/ip.h> |
40 | #include <linux/dma-mapping.h> |
41 | #include <linux/jiffies.h> |
42 | #include <linux/prefetch.h> |
43 | #include <linux/export.h> |
44 | #include <net/xfrm.h> |
45 | #include <net/ipv6.h> |
46 | #include <net/tcp.h> |
47 | #include <net/busy_poll.h> |
48 | #ifdef CONFIG_CHELSIO_T4_FCOE |
49 | #include <scsi/fc/fc_fcoe.h> |
50 | #endif /* CONFIG_CHELSIO_T4_FCOE */ |
51 | #include "cxgb4.h" |
52 | #include "t4_regs.h" |
53 | #include "t4_values.h" |
54 | #include "t4_msg.h" |
55 | #include "t4fw_api.h" |
56 | #include "cxgb4_ptp.h" |
57 | #include "cxgb4_uld.h" |
58 | #include "cxgb4_tc_mqprio.h" |
59 | #include "sched.h" |
60 | |
61 | /* |
62 | * Rx buffer size. We use largish buffers if possible but settle for single |
63 | * pages under memory shortage. |
64 | */ |
65 | #if PAGE_SHIFT >= 16 |
66 | # define FL_PG_ORDER 0 |
67 | #else |
68 | # define FL_PG_ORDER (16 - PAGE_SHIFT) |
69 | #endif |
70 | |
71 | /* RX_PULL_LEN should be <= RX_COPY_THRES */ |
72 | #define RX_COPY_THRES 256 |
73 | #define RX_PULL_LEN 128 |
74 | |
75 | /* |
76 | * Main body length for sk_buffs used for Rx Ethernet packets with fragments. |
77 | * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. |
78 | */ |
79 | #define RX_PKT_SKB_LEN 512 |
80 | |
81 | /* |
82 | * Max number of Tx descriptors we clean up at a time. Should be modest as |
83 | * freeing skbs isn't cheap and it happens while holding locks. We just need |
84 | * to free packets faster than they arrive, we eventually catch up and keep |
85 | * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should |
86 | * also match the CIDX Flush Threshold. |
87 | */ |
88 | #define MAX_TX_RECLAIM 32 |
89 | |
90 | /* |
91 | * Max number of Rx buffers we replenish at a time. Again keep this modest, |
92 | * allocating buffers isn't cheap either. |
93 | */ |
94 | #define MAX_RX_REFILL 16U |
95 | |
96 | /* |
97 | * Period of the Rx queue check timer. This timer is infrequent as it has |
98 | * something to do only when the system experiences severe memory shortage. |
99 | */ |
100 | #define RX_QCHECK_PERIOD (HZ / 2) |
101 | |
102 | /* |
103 | * Period of the Tx queue check timer. |
104 | */ |
105 | #define TX_QCHECK_PERIOD (HZ / 2) |
106 | |
107 | /* |
108 | * Max number of Tx descriptors to be reclaimed by the Tx timer. |
109 | */ |
110 | #define MAX_TIMER_TX_RECLAIM 100 |
111 | |
112 | /* |
113 | * Timer index used when backing off due to memory shortage. |
114 | */ |
115 | #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) |
116 | |
117 | /* |
118 | * Suspension threshold for non-Ethernet Tx queues. We require enough room |
119 | * for a full sized WR. |
120 | */ |
121 | #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) |
122 | |
123 | /* |
124 | * Max Tx descriptor space we allow for an Ethernet packet to be inlined |
125 | * into a WR. |
126 | */ |
127 | #define MAX_IMM_TX_PKT_LEN 256 |
128 | |
129 | /* |
130 | * Max size of a WR sent through a control Tx queue. |
131 | */ |
132 | #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN |
133 | |
134 | struct rx_sw_desc { /* SW state per Rx descriptor */ |
135 | struct page *page; |
136 | dma_addr_t dma_addr; |
137 | }; |
138 | |
139 | /* |
140 | * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb |
141 | * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. |
142 | * We could easily support more but there doesn't seem to be much need for |
143 | * that ... |
144 | */ |
145 | #define FL_MTU_SMALL 1500 |
146 | #define FL_MTU_LARGE 9000 |
147 | |
148 | static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, |
149 | unsigned int mtu) |
150 | { |
151 | struct sge *s = &adapter->sge; |
152 | |
153 | return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); |
154 | } |
155 | |
156 | #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) |
157 | #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) |
158 | |
159 | /* |
160 | * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses |
161 | * these to specify the buffer size as an index into the SGE Free List Buffer |
162 | * Size register array. We also use bit 4, when the buffer has been unmapped |
163 | * for DMA, but this is of course never sent to the hardware and is only used |
164 | * to prevent double unmappings. All of the above requires that the Free List |
165 | * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are |
166 | * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal |
167 | * Free List Buffer alignment is 32 bytes, this works out for us ... |
168 | */ |
169 | enum { |
170 | RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ |
171 | RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ |
172 | RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ |
173 | |
174 | /* |
175 | * XXX We shouldn't depend on being able to use these indices. |
176 | * XXX Especially when some other Master PF has initialized the |
177 | * XXX adapter or we use the Firmware Configuration File. We |
178 | * XXX should really search through the Host Buffer Size register |
179 | * XXX array for the appropriately sized buffer indices. |
180 | */ |
181 | RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ |
182 | RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ |
183 | |
184 | RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ |
185 | RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ |
186 | }; |
187 | |
188 | static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; |
189 | #define MIN_NAPI_WORK 1 |
190 | |
191 | static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) |
192 | { |
193 | return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; |
194 | } |
195 | |
196 | static inline bool is_buf_mapped(const struct rx_sw_desc *d) |
197 | { |
198 | return !(d->dma_addr & RX_UNMAPPED_BUF); |
199 | } |
200 | |
201 | /** |
202 | * txq_avail - return the number of available slots in a Tx queue |
203 | * @q: the Tx queue |
204 | * |
205 | * Returns the number of descriptors in a Tx queue available to write new |
206 | * packets. |
207 | */ |
208 | static inline unsigned int txq_avail(const struct sge_txq *q) |
209 | { |
210 | return q->size - 1 - q->in_use; |
211 | } |
212 | |
213 | /** |
214 | * fl_cap - return the capacity of a free-buffer list |
215 | * @fl: the FL |
216 | * |
217 | * Returns the capacity of a free-buffer list. The capacity is less than |
218 | * the size because one descriptor needs to be left unpopulated, otherwise |
219 | * HW will think the FL is empty. |
220 | */ |
221 | static inline unsigned int fl_cap(const struct sge_fl *fl) |
222 | { |
223 | return fl->size - 8; /* 1 descriptor = 8 buffers */ |
224 | } |
225 | |
226 | /** |
227 | * fl_starving - return whether a Free List is starving. |
228 | * @adapter: pointer to the adapter |
229 | * @fl: the Free List |
230 | * |
231 | * Tests specified Free List to see whether the number of buffers |
232 | * available to the hardware has falled below our "starvation" |
233 | * threshold. |
234 | */ |
235 | static inline bool fl_starving(const struct adapter *adapter, |
236 | const struct sge_fl *fl) |
237 | { |
238 | const struct sge *s = &adapter->sge; |
239 | |
240 | return fl->avail - fl->pend_cred <= s->fl_starve_thres; |
241 | } |
242 | |
243 | int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, |
244 | dma_addr_t *addr) |
245 | { |
246 | const skb_frag_t *fp, *end; |
247 | const struct skb_shared_info *si; |
248 | |
249 | *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); |
250 | if (dma_mapping_error(dev, dma_addr: *addr)) |
251 | goto out_err; |
252 | |
253 | si = skb_shinfo(skb); |
254 | end = &si->frags[si->nr_frags]; |
255 | |
256 | for (fp = si->frags; fp < end; fp++) { |
257 | *++addr = skb_frag_dma_map(dev, frag: fp, offset: 0, size: skb_frag_size(frag: fp), |
258 | dir: DMA_TO_DEVICE); |
259 | if (dma_mapping_error(dev, dma_addr: *addr)) |
260 | goto unwind; |
261 | } |
262 | return 0; |
263 | |
264 | unwind: |
265 | while (fp-- > si->frags) |
266 | dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); |
267 | |
268 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); |
269 | out_err: |
270 | return -ENOMEM; |
271 | } |
272 | EXPORT_SYMBOL(cxgb4_map_skb); |
273 | |
274 | static void unmap_skb(struct device *dev, const struct sk_buff *skb, |
275 | const dma_addr_t *addr) |
276 | { |
277 | const skb_frag_t *fp, *end; |
278 | const struct skb_shared_info *si; |
279 | |
280 | dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); |
281 | |
282 | si = skb_shinfo(skb); |
283 | end = &si->frags[si->nr_frags]; |
284 | for (fp = si->frags; fp < end; fp++) |
285 | dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); |
286 | } |
287 | |
288 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
289 | /** |
290 | * deferred_unmap_destructor - unmap a packet when it is freed |
291 | * @skb: the packet |
292 | * |
293 | * This is the packet destructor used for Tx packets that need to remain |
294 | * mapped until they are freed rather than until their Tx descriptors are |
295 | * freed. |
296 | */ |
297 | static void deferred_unmap_destructor(struct sk_buff *skb) |
298 | { |
299 | unmap_skb(dev: skb->dev->dev.parent, skb, addr: (dma_addr_t *)skb->head); |
300 | } |
301 | #endif |
302 | |
303 | /** |
304 | * free_tx_desc - reclaims Tx descriptors and their buffers |
305 | * @adap: the adapter |
306 | * @q: the Tx queue to reclaim descriptors from |
307 | * @n: the number of descriptors to reclaim |
308 | * @unmap: whether the buffers should be unmapped for DMA |
309 | * |
310 | * Reclaims Tx descriptors from an SGE Tx queue and frees the associated |
311 | * Tx buffers. Called with the Tx queue lock held. |
312 | */ |
313 | void free_tx_desc(struct adapter *adap, struct sge_txq *q, |
314 | unsigned int n, bool unmap) |
315 | { |
316 | unsigned int cidx = q->cidx; |
317 | struct tx_sw_desc *d; |
318 | |
319 | d = &q->sdesc[cidx]; |
320 | while (n--) { |
321 | if (d->skb) { /* an SGL is present */ |
322 | if (unmap && d->addr[0]) { |
323 | unmap_skb(dev: adap->pdev_dev, skb: d->skb, addr: d->addr); |
324 | memset(d->addr, 0, sizeof(d->addr)); |
325 | } |
326 | dev_consume_skb_any(skb: d->skb); |
327 | d->skb = NULL; |
328 | } |
329 | ++d; |
330 | if (++cidx == q->size) { |
331 | cidx = 0; |
332 | d = q->sdesc; |
333 | } |
334 | } |
335 | q->cidx = cidx; |
336 | } |
337 | |
338 | /* |
339 | * Return the number of reclaimable descriptors in a Tx queue. |
340 | */ |
341 | static inline int reclaimable(const struct sge_txq *q) |
342 | { |
343 | int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
344 | hw_cidx -= q->cidx; |
345 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; |
346 | } |
347 | |
348 | /** |
349 | * reclaim_completed_tx - reclaims completed TX Descriptors |
350 | * @adap: the adapter |
351 | * @q: the Tx queue to reclaim completed descriptors from |
352 | * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 |
353 | * @unmap: whether the buffers should be unmapped for DMA |
354 | * |
355 | * Reclaims Tx Descriptors that the SGE has indicated it has processed, |
356 | * and frees the associated buffers if possible. If @max == -1, then |
357 | * we'll use a defaiult maximum. Called with the TX Queue locked. |
358 | */ |
359 | static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, |
360 | int maxreclaim, bool unmap) |
361 | { |
362 | int reclaim = reclaimable(q); |
363 | |
364 | if (reclaim) { |
365 | /* |
366 | * Limit the amount of clean up work we do at a time to keep |
367 | * the Tx lock hold time O(1). |
368 | */ |
369 | if (maxreclaim < 0) |
370 | maxreclaim = MAX_TX_RECLAIM; |
371 | if (reclaim > maxreclaim) |
372 | reclaim = maxreclaim; |
373 | |
374 | free_tx_desc(adap, q, n: reclaim, unmap); |
375 | q->in_use -= reclaim; |
376 | } |
377 | |
378 | return reclaim; |
379 | } |
380 | |
381 | /** |
382 | * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors |
383 | * @adap: the adapter |
384 | * @q: the Tx queue to reclaim completed descriptors from |
385 | * @unmap: whether the buffers should be unmapped for DMA |
386 | * |
387 | * Reclaims Tx descriptors that the SGE has indicated it has processed, |
388 | * and frees the associated buffers if possible. Called with the Tx |
389 | * queue locked. |
390 | */ |
391 | void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, |
392 | bool unmap) |
393 | { |
394 | (void)reclaim_completed_tx(adap, q, maxreclaim: -1, unmap); |
395 | } |
396 | EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); |
397 | |
398 | static inline int get_buf_size(struct adapter *adapter, |
399 | const struct rx_sw_desc *d) |
400 | { |
401 | struct sge *s = &adapter->sge; |
402 | unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; |
403 | int buf_size; |
404 | |
405 | switch (rx_buf_size_idx) { |
406 | case RX_SMALL_PG_BUF: |
407 | buf_size = PAGE_SIZE; |
408 | break; |
409 | |
410 | case RX_LARGE_PG_BUF: |
411 | buf_size = PAGE_SIZE << s->fl_pg_order; |
412 | break; |
413 | |
414 | case RX_SMALL_MTU_BUF: |
415 | buf_size = FL_MTU_SMALL_BUFSIZE(adapter); |
416 | break; |
417 | |
418 | case RX_LARGE_MTU_BUF: |
419 | buf_size = FL_MTU_LARGE_BUFSIZE(adapter); |
420 | break; |
421 | |
422 | default: |
423 | BUG(); |
424 | } |
425 | |
426 | return buf_size; |
427 | } |
428 | |
429 | /** |
430 | * free_rx_bufs - free the Rx buffers on an SGE free list |
431 | * @adap: the adapter |
432 | * @q: the SGE free list to free buffers from |
433 | * @n: how many buffers to free |
434 | * |
435 | * Release the next @n buffers on an SGE free-buffer Rx queue. The |
436 | * buffers must be made inaccessible to HW before calling this function. |
437 | */ |
438 | static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) |
439 | { |
440 | while (n--) { |
441 | struct rx_sw_desc *d = &q->sdesc[q->cidx]; |
442 | |
443 | if (is_buf_mapped(d)) |
444 | dma_unmap_page(adap->pdev_dev, get_buf_addr(d), |
445 | get_buf_size(adap, d), |
446 | DMA_FROM_DEVICE); |
447 | put_page(page: d->page); |
448 | d->page = NULL; |
449 | if (++q->cidx == q->size) |
450 | q->cidx = 0; |
451 | q->avail--; |
452 | } |
453 | } |
454 | |
455 | /** |
456 | * unmap_rx_buf - unmap the current Rx buffer on an SGE free list |
457 | * @adap: the adapter |
458 | * @q: the SGE free list |
459 | * |
460 | * Unmap the current buffer on an SGE free-buffer Rx queue. The |
461 | * buffer must be made inaccessible to HW before calling this function. |
462 | * |
463 | * This is similar to @free_rx_bufs above but does not free the buffer. |
464 | * Do note that the FL still loses any further access to the buffer. |
465 | */ |
466 | static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) |
467 | { |
468 | struct rx_sw_desc *d = &q->sdesc[q->cidx]; |
469 | |
470 | if (is_buf_mapped(d)) |
471 | dma_unmap_page(adap->pdev_dev, get_buf_addr(d), |
472 | get_buf_size(adap, d), DMA_FROM_DEVICE); |
473 | d->page = NULL; |
474 | if (++q->cidx == q->size) |
475 | q->cidx = 0; |
476 | q->avail--; |
477 | } |
478 | |
479 | static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) |
480 | { |
481 | if (q->pend_cred >= 8) { |
482 | u32 val = adap->params.arch.sge_fl_db; |
483 | |
484 | if (is_t4(chip: adap->params.chip)) |
485 | val |= PIDX_V(q->pend_cred / 8); |
486 | else |
487 | val |= PIDX_T5_V(q->pend_cred / 8); |
488 | |
489 | /* Make sure all memory writes to the Free List queue are |
490 | * committed before we tell the hardware about them. |
491 | */ |
492 | wmb(); |
493 | |
494 | /* If we don't have access to the new User Doorbell (T5+), use |
495 | * the old doorbell mechanism; otherwise use the new BAR2 |
496 | * mechanism. |
497 | */ |
498 | if (unlikely(q->bar2_addr == NULL)) { |
499 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
500 | val: val | QID_V(q->cntxt_id)); |
501 | } else { |
502 | writel(val: val | QID_V(q->bar2_qid), |
503 | addr: q->bar2_addr + SGE_UDB_KDOORBELL); |
504 | |
505 | /* This Write memory Barrier will force the write to |
506 | * the User Doorbell area to be flushed. |
507 | */ |
508 | wmb(); |
509 | } |
510 | q->pend_cred &= 7; |
511 | } |
512 | } |
513 | |
514 | static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, |
515 | dma_addr_t mapping) |
516 | { |
517 | sd->page = pg; |
518 | sd->dma_addr = mapping; /* includes size low bits */ |
519 | } |
520 | |
521 | /** |
522 | * refill_fl - refill an SGE Rx buffer ring |
523 | * @adap: the adapter |
524 | * @q: the ring to refill |
525 | * @n: the number of new buffers to allocate |
526 | * @gfp: the gfp flags for the allocations |
527 | * |
528 | * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, |
529 | * allocated with the supplied gfp flags. The caller must assure that |
530 | * @n does not exceed the queue's capacity. If afterwards the queue is |
531 | * found critically low mark it as starving in the bitmap of starving FLs. |
532 | * |
533 | * Returns the number of buffers allocated. |
534 | */ |
535 | static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, |
536 | gfp_t gfp) |
537 | { |
538 | struct sge *s = &adap->sge; |
539 | struct page *pg; |
540 | dma_addr_t mapping; |
541 | unsigned int cred = q->avail; |
542 | __be64 *d = &q->desc[q->pidx]; |
543 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; |
544 | int node; |
545 | |
546 | #ifdef CONFIG_DEBUG_FS |
547 | if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) |
548 | goto out; |
549 | #endif |
550 | |
551 | gfp |= __GFP_NOWARN; |
552 | node = dev_to_node(dev: adap->pdev_dev); |
553 | |
554 | if (s->fl_pg_order == 0) |
555 | goto alloc_small_pages; |
556 | |
557 | /* |
558 | * Prefer large buffers |
559 | */ |
560 | while (n) { |
561 | pg = alloc_pages_node(nid: node, gfp_mask: gfp | __GFP_COMP, order: s->fl_pg_order); |
562 | if (unlikely(!pg)) { |
563 | q->large_alloc_failed++; |
564 | break; /* fall back to single pages */ |
565 | } |
566 | |
567 | mapping = dma_map_page(adap->pdev_dev, pg, 0, |
568 | PAGE_SIZE << s->fl_pg_order, |
569 | DMA_FROM_DEVICE); |
570 | if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { |
571 | __free_pages(page: pg, order: s->fl_pg_order); |
572 | q->mapping_err++; |
573 | goto out; /* do not try small pages for this error */ |
574 | } |
575 | mapping |= RX_LARGE_PG_BUF; |
576 | *d++ = cpu_to_be64(mapping); |
577 | |
578 | set_rx_sw_desc(sd, pg, mapping); |
579 | sd++; |
580 | |
581 | q->avail++; |
582 | if (++q->pidx == q->size) { |
583 | q->pidx = 0; |
584 | sd = q->sdesc; |
585 | d = q->desc; |
586 | } |
587 | n--; |
588 | } |
589 | |
590 | alloc_small_pages: |
591 | while (n--) { |
592 | pg = alloc_pages_node(nid: node, gfp_mask: gfp, order: 0); |
593 | if (unlikely(!pg)) { |
594 | q->alloc_failed++; |
595 | break; |
596 | } |
597 | |
598 | mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, |
599 | DMA_FROM_DEVICE); |
600 | if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { |
601 | put_page(page: pg); |
602 | q->mapping_err++; |
603 | goto out; |
604 | } |
605 | *d++ = cpu_to_be64(mapping); |
606 | |
607 | set_rx_sw_desc(sd, pg, mapping); |
608 | sd++; |
609 | |
610 | q->avail++; |
611 | if (++q->pidx == q->size) { |
612 | q->pidx = 0; |
613 | sd = q->sdesc; |
614 | d = q->desc; |
615 | } |
616 | } |
617 | |
618 | out: cred = q->avail - cred; |
619 | q->pend_cred += cred; |
620 | ring_fl_db(adap, q); |
621 | |
622 | if (unlikely(fl_starving(adap, q))) { |
623 | smp_wmb(); |
624 | q->low++; |
625 | set_bit(nr: q->cntxt_id - adap->sge.egr_start, |
626 | addr: adap->sge.starving_fl); |
627 | } |
628 | |
629 | return cred; |
630 | } |
631 | |
632 | static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) |
633 | { |
634 | refill_fl(adap, q: fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), |
635 | GFP_ATOMIC); |
636 | } |
637 | |
638 | /** |
639 | * alloc_ring - allocate resources for an SGE descriptor ring |
640 | * @dev: the PCI device's core device |
641 | * @nelem: the number of descriptors |
642 | * @elem_size: the size of each descriptor |
643 | * @sw_size: the size of the SW state associated with each ring element |
644 | * @phys: the physical address of the allocated ring |
645 | * @metadata: address of the array holding the SW state for the ring |
646 | * @stat_size: extra space in HW ring for status information |
647 | * @node: preferred node for memory allocations |
648 | * |
649 | * Allocates resources for an SGE descriptor ring, such as Tx queues, |
650 | * free buffer lists, or response queues. Each SGE ring requires |
651 | * space for its HW descriptors plus, optionally, space for the SW state |
652 | * associated with each HW entry (the metadata). The function returns |
653 | * three values: the virtual address for the HW ring (the return value |
654 | * of the function), the bus address of the HW ring, and the address |
655 | * of the SW ring. |
656 | */ |
657 | static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, |
658 | size_t sw_size, dma_addr_t *phys, void *metadata, |
659 | size_t stat_size, int node) |
660 | { |
661 | size_t len = nelem * elem_size + stat_size; |
662 | void *s = NULL; |
663 | void *p = dma_alloc_coherent(dev, size: len, dma_handle: phys, GFP_KERNEL); |
664 | |
665 | if (!p) |
666 | return NULL; |
667 | if (sw_size) { |
668 | s = kcalloc_node(n: sw_size, size: nelem, GFP_KERNEL, node); |
669 | |
670 | if (!s) { |
671 | dma_free_coherent(dev, size: len, cpu_addr: p, dma_handle: *phys); |
672 | return NULL; |
673 | } |
674 | } |
675 | if (metadata) |
676 | *(void **)metadata = s; |
677 | return p; |
678 | } |
679 | |
680 | /** |
681 | * sgl_len - calculates the size of an SGL of the given capacity |
682 | * @n: the number of SGL entries |
683 | * |
684 | * Calculates the number of flits needed for a scatter/gather list that |
685 | * can hold the given number of entries. |
686 | */ |
687 | static inline unsigned int sgl_len(unsigned int n) |
688 | { |
689 | /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA |
690 | * addresses. The DSGL Work Request starts off with a 32-bit DSGL |
691 | * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, |
692 | * repeated sequences of { Length[i], Length[i+1], Address[i], |
693 | * Address[i+1] } (this ensures that all addresses are on 64-bit |
694 | * boundaries). If N is even, then Length[N+1] should be set to 0 and |
695 | * Address[N+1] is omitted. |
696 | * |
697 | * The following calculation incorporates all of the above. It's |
698 | * somewhat hard to follow but, briefly: the "+2" accounts for the |
699 | * first two flits which include the DSGL header, Length0 and |
700 | * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 |
701 | * flits for every pair of the remaining N) +1 if (n-1) is odd; and |
702 | * finally the "+((n-1)&1)" adds the one remaining flit needed if |
703 | * (n-1) is odd ... |
704 | */ |
705 | n--; |
706 | return (3 * n) / 2 + (n & 1) + 2; |
707 | } |
708 | |
709 | /** |
710 | * flits_to_desc - returns the num of Tx descriptors for the given flits |
711 | * @n: the number of flits |
712 | * |
713 | * Returns the number of Tx descriptors needed for the supplied number |
714 | * of flits. |
715 | */ |
716 | static inline unsigned int flits_to_desc(unsigned int n) |
717 | { |
718 | BUG_ON(n > SGE_MAX_WR_LEN / 8); |
719 | return DIV_ROUND_UP(n, 8); |
720 | } |
721 | |
722 | /** |
723 | * is_eth_imm - can an Ethernet packet be sent as immediate data? |
724 | * @skb: the packet |
725 | * @chip_ver: chip version |
726 | * |
727 | * Returns whether an Ethernet packet is small enough to fit as |
728 | * immediate data. Return value corresponds to headroom required. |
729 | */ |
730 | static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) |
731 | { |
732 | int hdrlen = 0; |
733 | |
734 | if (skb->encapsulation && skb_shinfo(skb)->gso_size && |
735 | chip_ver > CHELSIO_T5) { |
736 | hdrlen = sizeof(struct cpl_tx_tnl_lso); |
737 | hdrlen += sizeof(struct cpl_tx_pkt_core); |
738 | } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { |
739 | return 0; |
740 | } else { |
741 | hdrlen = skb_shinfo(skb)->gso_size ? |
742 | sizeof(struct cpl_tx_pkt_lso_core) : 0; |
743 | hdrlen += sizeof(struct cpl_tx_pkt); |
744 | } |
745 | if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) |
746 | return hdrlen; |
747 | return 0; |
748 | } |
749 | |
750 | /** |
751 | * calc_tx_flits - calculate the number of flits for a packet Tx WR |
752 | * @skb: the packet |
753 | * @chip_ver: chip version |
754 | * |
755 | * Returns the number of flits needed for a Tx WR for the given Ethernet |
756 | * packet, including the needed WR and CPL headers. |
757 | */ |
758 | static inline unsigned int calc_tx_flits(const struct sk_buff *skb, |
759 | unsigned int chip_ver) |
760 | { |
761 | unsigned int flits; |
762 | int hdrlen = is_eth_imm(skb, chip_ver); |
763 | |
764 | /* If the skb is small enough, we can pump it out as a work request |
765 | * with only immediate data. In that case we just have to have the |
766 | * TX Packet header plus the skb data in the Work Request. |
767 | */ |
768 | |
769 | if (hdrlen) |
770 | return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); |
771 | |
772 | /* Otherwise, we're going to have to construct a Scatter gather list |
773 | * of the skb body and fragments. We also include the flits necessary |
774 | * for the TX Packet Work Request and CPL. We always have a firmware |
775 | * Write Header (incorporated as part of the cpl_tx_pkt_lso and |
776 | * cpl_tx_pkt structures), followed by either a TX Packet Write CPL |
777 | * message or, if we're doing a Large Send Offload, an LSO CPL message |
778 | * with an embedded TX Packet Write CPL message. |
779 | */ |
780 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); |
781 | if (skb_shinfo(skb)->gso_size) { |
782 | if (skb->encapsulation && chip_ver > CHELSIO_T5) { |
783 | hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + |
784 | sizeof(struct cpl_tx_tnl_lso); |
785 | } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { |
786 | u32 pkt_hdrlen; |
787 | |
788 | pkt_hdrlen = eth_get_headlen(dev: skb->dev, data: skb->data, |
789 | len: skb_headlen(skb)); |
790 | hdrlen = sizeof(struct fw_eth_tx_eo_wr) + |
791 | round_up(pkt_hdrlen, 16); |
792 | } else { |
793 | hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + |
794 | sizeof(struct cpl_tx_pkt_lso_core); |
795 | } |
796 | |
797 | hdrlen += sizeof(struct cpl_tx_pkt_core); |
798 | flits += (hdrlen / sizeof(__be64)); |
799 | } else { |
800 | flits += (sizeof(struct fw_eth_tx_pkt_wr) + |
801 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); |
802 | } |
803 | return flits; |
804 | } |
805 | |
806 | /** |
807 | * cxgb4_write_sgl - populate a scatter/gather list for a packet |
808 | * @skb: the packet |
809 | * @q: the Tx queue we are writing into |
810 | * @sgl: starting location for writing the SGL |
811 | * @end: points right after the end of the SGL |
812 | * @start: start offset into skb main-body data to include in the SGL |
813 | * @addr: the list of bus addresses for the SGL elements |
814 | * |
815 | * Generates a gather list for the buffers that make up a packet. |
816 | * The caller must provide adequate space for the SGL that will be written. |
817 | * The SGL includes all of the packet's page fragments and the data in its |
818 | * main body except for the first @start bytes. @sgl must be 16-byte |
819 | * aligned and within a Tx descriptor with available space. @end points |
820 | * right after the end of the SGL but does not account for any potential |
821 | * wrap around, i.e., @end > @sgl. |
822 | */ |
823 | void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, |
824 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, |
825 | const dma_addr_t *addr) |
826 | { |
827 | unsigned int i, len; |
828 | struct ulptx_sge_pair *to; |
829 | const struct skb_shared_info *si = skb_shinfo(skb); |
830 | unsigned int nfrags = si->nr_frags; |
831 | struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; |
832 | |
833 | len = skb_headlen(skb) - start; |
834 | if (likely(len)) { |
835 | sgl->len0 = htonl(len); |
836 | sgl->addr0 = cpu_to_be64(addr[0] + start); |
837 | nfrags++; |
838 | } else { |
839 | sgl->len0 = htonl(skb_frag_size(&si->frags[0])); |
840 | sgl->addr0 = cpu_to_be64(addr[1]); |
841 | } |
842 | |
843 | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | |
844 | ULPTX_NSGE_V(nfrags)); |
845 | if (likely(--nfrags == 0)) |
846 | return; |
847 | /* |
848 | * Most of the complexity below deals with the possibility we hit the |
849 | * end of the queue in the middle of writing the SGL. For this case |
850 | * only we create the SGL in a temporary buffer and then copy it. |
851 | */ |
852 | to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; |
853 | |
854 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { |
855 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
856 | to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); |
857 | to->addr[0] = cpu_to_be64(addr[i]); |
858 | to->addr[1] = cpu_to_be64(addr[++i]); |
859 | } |
860 | if (nfrags) { |
861 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
862 | to->len[1] = cpu_to_be32(0); |
863 | to->addr[0] = cpu_to_be64(addr[i + 1]); |
864 | } |
865 | if (unlikely((u8 *)end > (u8 *)q->stat)) { |
866 | unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; |
867 | |
868 | if (likely(part0)) |
869 | memcpy(sgl->sge, buf, part0); |
870 | part1 = (u8 *)end - (u8 *)q->stat; |
871 | memcpy(q->desc, (u8 *)buf + part0, part1); |
872 | end = (void *)q->desc + part1; |
873 | } |
874 | if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ |
875 | *end = 0; |
876 | } |
877 | EXPORT_SYMBOL(cxgb4_write_sgl); |
878 | |
879 | /* cxgb4_write_partial_sgl - populate SGL for partial packet |
880 | * @skb: the packet |
881 | * @q: the Tx queue we are writing into |
882 | * @sgl: starting location for writing the SGL |
883 | * @end: points right after the end of the SGL |
884 | * @addr: the list of bus addresses for the SGL elements |
885 | * @start: start offset in the SKB where partial data starts |
886 | * @len: length of data from @start to send out |
887 | * |
888 | * This API will handle sending out partial data of a skb if required. |
889 | * Unlike cxgb4_write_sgl, @start can be any offset into the skb data, |
890 | * and @len will decide how much data after @start offset to send out. |
891 | */ |
892 | void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q, |
893 | struct ulptx_sgl *sgl, u64 *end, |
894 | const dma_addr_t *addr, u32 start, u32 len) |
895 | { |
896 | struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to; |
897 | u32 frag_size, skb_linear_data_len = skb_headlen(skb); |
898 | struct skb_shared_info *si = skb_shinfo(skb); |
899 | u8 i = 0, frag_idx = 0, nfrags = 0; |
900 | skb_frag_t *frag; |
901 | |
902 | /* Fill the first SGL either from linear data or from partial |
903 | * frag based on @start. |
904 | */ |
905 | if (unlikely(start < skb_linear_data_len)) { |
906 | frag_size = min(len, skb_linear_data_len - start); |
907 | sgl->len0 = htonl(frag_size); |
908 | sgl->addr0 = cpu_to_be64(addr[0] + start); |
909 | len -= frag_size; |
910 | nfrags++; |
911 | } else { |
912 | start -= skb_linear_data_len; |
913 | frag = &si->frags[frag_idx]; |
914 | frag_size = skb_frag_size(frag); |
915 | /* find the first frag */ |
916 | while (start >= frag_size) { |
917 | start -= frag_size; |
918 | frag_idx++; |
919 | frag = &si->frags[frag_idx]; |
920 | frag_size = skb_frag_size(frag); |
921 | } |
922 | |
923 | frag_size = min(len, skb_frag_size(frag) - start); |
924 | sgl->len0 = cpu_to_be32(frag_size); |
925 | sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start); |
926 | len -= frag_size; |
927 | nfrags++; |
928 | frag_idx++; |
929 | } |
930 | |
931 | /* If the entire partial data fit in one SGL, then send it out |
932 | * now. |
933 | */ |
934 | if (!len) |
935 | goto done; |
936 | |
937 | /* Most of the complexity below deals with the possibility we hit the |
938 | * end of the queue in the middle of writing the SGL. For this case |
939 | * only we create the SGL in a temporary buffer and then copy it. |
940 | */ |
941 | to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; |
942 | |
943 | /* If the skb couldn't fit in first SGL completely, fill the |
944 | * rest of the frags in subsequent SGLs. Note that each SGL |
945 | * pair can store 2 frags. |
946 | */ |
947 | while (len) { |
948 | frag_size = min(len, skb_frag_size(&si->frags[frag_idx])); |
949 | to->len[i & 1] = cpu_to_be32(frag_size); |
950 | to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]); |
951 | if (i && (i & 1)) |
952 | to++; |
953 | nfrags++; |
954 | frag_idx++; |
955 | i++; |
956 | len -= frag_size; |
957 | } |
958 | |
959 | /* If we ended in an odd boundary, then set the second SGL's |
960 | * length in the pair to 0. |
961 | */ |
962 | if (i & 1) |
963 | to->len[1] = cpu_to_be32(0); |
964 | |
965 | /* Copy from temporary buffer to Tx ring, in case we hit the |
966 | * end of the queue in the middle of writing the SGL. |
967 | */ |
968 | if (unlikely((u8 *)end > (u8 *)q->stat)) { |
969 | u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; |
970 | |
971 | if (likely(part0)) |
972 | memcpy(sgl->sge, buf, part0); |
973 | part1 = (u8 *)end - (u8 *)q->stat; |
974 | memcpy(q->desc, (u8 *)buf + part0, part1); |
975 | end = (void *)q->desc + part1; |
976 | } |
977 | |
978 | /* 0-pad to multiple of 16 */ |
979 | if ((uintptr_t)end & 8) |
980 | *end = 0; |
981 | done: |
982 | sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | |
983 | ULPTX_NSGE_V(nfrags)); |
984 | } |
985 | EXPORT_SYMBOL(cxgb4_write_partial_sgl); |
986 | |
987 | /* This function copies 64 byte coalesced work request to |
988 | * memory mapped BAR2 space. For coalesced WR SGE fetches |
989 | * data from the FIFO instead of from Host. |
990 | */ |
991 | static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) |
992 | { |
993 | int count = 8; |
994 | |
995 | while (count) { |
996 | writeq(val: *src, addr: dst); |
997 | src++; |
998 | dst++; |
999 | count--; |
1000 | } |
1001 | } |
1002 | |
1003 | /** |
1004 | * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell |
1005 | * @adap: the adapter |
1006 | * @q: the Tx queue |
1007 | * @n: number of new descriptors to give to HW |
1008 | * |
1009 | * Ring the doorbel for a Tx queue. |
1010 | */ |
1011 | inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) |
1012 | { |
1013 | /* Make sure that all writes to the TX Descriptors are committed |
1014 | * before we tell the hardware about them. |
1015 | */ |
1016 | wmb(); |
1017 | |
1018 | /* If we don't have access to the new User Doorbell (T5+), use the old |
1019 | * doorbell mechanism; otherwise use the new BAR2 mechanism. |
1020 | */ |
1021 | if (unlikely(q->bar2_addr == NULL)) { |
1022 | u32 val = PIDX_V(n); |
1023 | unsigned long flags; |
1024 | |
1025 | /* For T4 we need to participate in the Doorbell Recovery |
1026 | * mechanism. |
1027 | */ |
1028 | spin_lock_irqsave(&q->db_lock, flags); |
1029 | if (!q->db_disabled) |
1030 | t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), |
1031 | QID_V(q->cntxt_id) | val); |
1032 | else |
1033 | q->db_pidx_inc += n; |
1034 | q->db_pidx = q->pidx; |
1035 | spin_unlock_irqrestore(lock: &q->db_lock, flags); |
1036 | } else { |
1037 | u32 val = PIDX_T5_V(n); |
1038 | |
1039 | /* T4 and later chips share the same PIDX field offset within |
1040 | * the doorbell, but T5 and later shrank the field in order to |
1041 | * gain a bit for Doorbell Priority. The field was absurdly |
1042 | * large in the first place (14 bits) so we just use the T5 |
1043 | * and later limits and warn if a Queue ID is too large. |
1044 | */ |
1045 | WARN_ON(val & DBPRIO_F); |
1046 | |
1047 | /* If we're only writing a single TX Descriptor and we can use |
1048 | * Inferred QID registers, we can use the Write Combining |
1049 | * Gather Buffer; otherwise we use the simple doorbell. |
1050 | */ |
1051 | if (n == 1 && q->bar2_qid == 0) { |
1052 | int index = (q->pidx |
1053 | ? (q->pidx - 1) |
1054 | : (q->size - 1)); |
1055 | u64 *wr = (u64 *)&q->desc[index]; |
1056 | |
1057 | cxgb_pio_copy(dst: (u64 __iomem *) |
1058 | (q->bar2_addr + SGE_UDB_WCDOORBELL), |
1059 | src: wr); |
1060 | } else { |
1061 | writel(val: val | QID_V(q->bar2_qid), |
1062 | addr: q->bar2_addr + SGE_UDB_KDOORBELL); |
1063 | } |
1064 | |
1065 | /* This Write Memory Barrier will force the write to the User |
1066 | * Doorbell area to be flushed. This is needed to prevent |
1067 | * writes on different CPUs for the same queue from hitting |
1068 | * the adapter out of order. This is required when some Work |
1069 | * Requests take the Write Combine Gather Buffer path (user |
1070 | * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some |
1071 | * take the traditional path where we simply increment the |
1072 | * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the |
1073 | * hardware DMA read the actual Work Request. |
1074 | */ |
1075 | wmb(); |
1076 | } |
1077 | } |
1078 | EXPORT_SYMBOL(cxgb4_ring_tx_db); |
1079 | |
1080 | /** |
1081 | * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors |
1082 | * @skb: the packet |
1083 | * @q: the Tx queue where the packet will be inlined |
1084 | * @pos: starting position in the Tx queue where to inline the packet |
1085 | * |
1086 | * Inline a packet's contents directly into Tx descriptors, starting at |
1087 | * the given position within the Tx DMA ring. |
1088 | * Most of the complexity of this operation is dealing with wrap arounds |
1089 | * in the middle of the packet we want to inline. |
1090 | */ |
1091 | void cxgb4_inline_tx_skb(const struct sk_buff *skb, |
1092 | const struct sge_txq *q, void *pos) |
1093 | { |
1094 | int left = (void *)q->stat - pos; |
1095 | u64 *p; |
1096 | |
1097 | if (likely(skb->len <= left)) { |
1098 | if (likely(!skb->data_len)) |
1099 | skb_copy_from_linear_data(skb, to: pos, len: skb->len); |
1100 | else |
1101 | skb_copy_bits(skb, offset: 0, to: pos, len: skb->len); |
1102 | pos += skb->len; |
1103 | } else { |
1104 | skb_copy_bits(skb, offset: 0, to: pos, len: left); |
1105 | skb_copy_bits(skb, offset: left, to: q->desc, len: skb->len - left); |
1106 | pos = (void *)q->desc + (skb->len - left); |
1107 | } |
1108 | |
1109 | /* 0-pad to multiple of 16 */ |
1110 | p = PTR_ALIGN(pos, 8); |
1111 | if ((uintptr_t)p & 8) |
1112 | *p = 0; |
1113 | } |
1114 | EXPORT_SYMBOL(cxgb4_inline_tx_skb); |
1115 | |
1116 | static void *(const struct sk_buff *skb, |
1117 | const struct sge_txq *q, void *pos, |
1118 | int length) |
1119 | { |
1120 | u64 *p; |
1121 | int left = (void *)q->stat - pos; |
1122 | |
1123 | if (likely(length <= left)) { |
1124 | memcpy(pos, skb->data, length); |
1125 | pos += length; |
1126 | } else { |
1127 | memcpy(pos, skb->data, left); |
1128 | memcpy(q->desc, skb->data + left, length - left); |
1129 | pos = (void *)q->desc + (length - left); |
1130 | } |
1131 | /* 0-pad to multiple of 16 */ |
1132 | p = PTR_ALIGN(pos, 8); |
1133 | if ((uintptr_t)p & 8) { |
1134 | *p = 0; |
1135 | return p + 1; |
1136 | } |
1137 | return p; |
1138 | } |
1139 | |
1140 | /* |
1141 | * Figure out what HW csum a packet wants and return the appropriate control |
1142 | * bits. |
1143 | */ |
1144 | static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) |
1145 | { |
1146 | int csum_type; |
1147 | bool inner_hdr_csum = false; |
1148 | u16 proto, ver; |
1149 | |
1150 | if (skb->encapsulation && |
1151 | (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)) |
1152 | inner_hdr_csum = true; |
1153 | |
1154 | if (inner_hdr_csum) { |
1155 | ver = inner_ip_hdr(skb)->version; |
1156 | proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : |
1157 | inner_ipv6_hdr(skb)->nexthdr; |
1158 | } else { |
1159 | ver = ip_hdr(skb)->version; |
1160 | proto = (ver == 4) ? ip_hdr(skb)->protocol : |
1161 | ipv6_hdr(skb)->nexthdr; |
1162 | } |
1163 | |
1164 | if (ver == 4) { |
1165 | if (proto == IPPROTO_TCP) |
1166 | csum_type = TX_CSUM_TCPIP; |
1167 | else if (proto == IPPROTO_UDP) |
1168 | csum_type = TX_CSUM_UDPIP; |
1169 | else { |
1170 | nocsum: /* |
1171 | * unknown protocol, disable HW csum |
1172 | * and hope a bad packet is detected |
1173 | */ |
1174 | return TXPKT_L4CSUM_DIS_F; |
1175 | } |
1176 | } else { |
1177 | /* |
1178 | * this doesn't work with extension headers |
1179 | */ |
1180 | if (proto == IPPROTO_TCP) |
1181 | csum_type = TX_CSUM_TCPIP6; |
1182 | else if (proto == IPPROTO_UDP) |
1183 | csum_type = TX_CSUM_UDPIP6; |
1184 | else |
1185 | goto nocsum; |
1186 | } |
1187 | |
1188 | if (likely(csum_type >= TX_CSUM_TCPIP)) { |
1189 | int eth_hdr_len, l4_len; |
1190 | u64 hdr_len; |
1191 | |
1192 | if (inner_hdr_csum) { |
1193 | /* This allows checksum offload for all encapsulated |
1194 | * packets like GRE etc.. |
1195 | */ |
1196 | l4_len = skb_inner_network_header_len(skb); |
1197 | eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; |
1198 | } else { |
1199 | l4_len = skb_network_header_len(skb); |
1200 | eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; |
1201 | } |
1202 | hdr_len = TXPKT_IPHDR_LEN_V(l4_len); |
1203 | |
1204 | if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) |
1205 | hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); |
1206 | else |
1207 | hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); |
1208 | return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; |
1209 | } else { |
1210 | int start = skb_transport_offset(skb); |
1211 | |
1212 | return TXPKT_CSUM_TYPE_V(csum_type) | |
1213 | TXPKT_CSUM_START_V(start) | |
1214 | TXPKT_CSUM_LOC_V(start + skb->csum_offset); |
1215 | } |
1216 | } |
1217 | |
1218 | static void eth_txq_stop(struct sge_eth_txq *q) |
1219 | { |
1220 | netif_tx_stop_queue(dev_queue: q->txq); |
1221 | q->q.stops++; |
1222 | } |
1223 | |
1224 | static inline void txq_advance(struct sge_txq *q, unsigned int n) |
1225 | { |
1226 | q->in_use += n; |
1227 | q->pidx += n; |
1228 | if (q->pidx >= q->size) |
1229 | q->pidx -= q->size; |
1230 | } |
1231 | |
1232 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1233 | static inline int |
1234 | cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, |
1235 | const struct port_info *pi, u64 *cntrl) |
1236 | { |
1237 | const struct cxgb_fcoe *fcoe = &pi->fcoe; |
1238 | |
1239 | if (!(fcoe->flags & CXGB_FCOE_ENABLED)) |
1240 | return 0; |
1241 | |
1242 | if (skb->protocol != htons(ETH_P_FCOE)) |
1243 | return 0; |
1244 | |
1245 | skb_reset_mac_header(skb); |
1246 | skb->mac_len = sizeof(struct ethhdr); |
1247 | |
1248 | skb_set_network_header(skb, offset: skb->mac_len); |
1249 | skb_set_transport_header(skb, offset: skb->mac_len + sizeof(struct fcoe_hdr)); |
1250 | |
1251 | if (!cxgb_fcoe_sof_eof_supported(adap, skb)) |
1252 | return -ENOTSUPP; |
1253 | |
1254 | /* FC CRC offload */ |
1255 | *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) | |
1256 | TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F | |
1257 | TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) | |
1258 | TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) | |
1259 | TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END); |
1260 | return 0; |
1261 | } |
1262 | #endif /* CONFIG_CHELSIO_T4_FCOE */ |
1263 | |
1264 | /* Returns tunnel type if hardware supports offloading of the same. |
1265 | * It is called only for T5 and onwards. |
1266 | */ |
1267 | enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb) |
1268 | { |
1269 | u8 l4_hdr = 0; |
1270 | enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; |
1271 | struct port_info *pi = netdev_priv(dev: skb->dev); |
1272 | struct adapter *adapter = pi->adapter; |
1273 | |
1274 | if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || |
1275 | skb->inner_protocol != htons(ETH_P_TEB)) |
1276 | return tnl_type; |
1277 | |
1278 | switch (vlan_get_protocol(skb)) { |
1279 | case htons(ETH_P_IP): |
1280 | l4_hdr = ip_hdr(skb)->protocol; |
1281 | break; |
1282 | case htons(ETH_P_IPV6): |
1283 | l4_hdr = ipv6_hdr(skb)->nexthdr; |
1284 | break; |
1285 | default: |
1286 | return tnl_type; |
1287 | } |
1288 | |
1289 | switch (l4_hdr) { |
1290 | case IPPROTO_UDP: |
1291 | if (adapter->vxlan_port == udp_hdr(skb)->dest) |
1292 | tnl_type = TX_TNL_TYPE_VXLAN; |
1293 | else if (adapter->geneve_port == udp_hdr(skb)->dest) |
1294 | tnl_type = TX_TNL_TYPE_GENEVE; |
1295 | break; |
1296 | default: |
1297 | return tnl_type; |
1298 | } |
1299 | |
1300 | return tnl_type; |
1301 | } |
1302 | |
1303 | static inline void t6_fill_tnl_lso(struct sk_buff *skb, |
1304 | struct cpl_tx_tnl_lso *tnl_lso, |
1305 | enum cpl_tx_tnl_lso_type tnl_type) |
1306 | { |
1307 | u32 val; |
1308 | int in_eth_xtra_len; |
1309 | int l3hdr_len = skb_network_header_len(skb); |
1310 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; |
1311 | const struct skb_shared_info *ssi = skb_shinfo(skb); |
1312 | bool v6 = (ip_hdr(skb)->version == 6); |
1313 | |
1314 | val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) | |
1315 | CPL_TX_TNL_LSO_FIRST_F | |
1316 | CPL_TX_TNL_LSO_LAST_F | |
1317 | (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) | |
1318 | CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) | |
1319 | CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) | |
1320 | (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) | |
1321 | CPL_TX_TNL_LSO_IPLENSETOUT_F | |
1322 | (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F); |
1323 | tnl_lso->op_to_IpIdSplitOut = htonl(val); |
1324 | |
1325 | tnl_lso->IpIdOffsetOut = 0; |
1326 | |
1327 | /* Get the tunnel header length */ |
1328 | val = skb_inner_mac_header(skb) - skb_mac_header(skb); |
1329 | in_eth_xtra_len = skb_inner_network_header(skb) - |
1330 | skb_inner_mac_header(skb) - ETH_HLEN; |
1331 | |
1332 | switch (tnl_type) { |
1333 | case TX_TNL_TYPE_VXLAN: |
1334 | case TX_TNL_TYPE_GENEVE: |
1335 | tnl_lso->UdpLenSetOut_to_TnlHdrLen = |
1336 | htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F | |
1337 | CPL_TX_TNL_LSO_UDPLENSETOUT_F); |
1338 | break; |
1339 | default: |
1340 | tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; |
1341 | break; |
1342 | } |
1343 | |
1344 | tnl_lso->UdpLenSetOut_to_TnlHdrLen |= |
1345 | htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) | |
1346 | CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type)); |
1347 | |
1348 | tnl_lso->r1 = 0; |
1349 | |
1350 | val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) | |
1351 | CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | |
1352 | CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) | |
1353 | CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4); |
1354 | tnl_lso->Flow_to_TcpHdrLen = htonl(val); |
1355 | |
1356 | tnl_lso->IpIdOffset = htons(0); |
1357 | |
1358 | tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); |
1359 | tnl_lso->TCPSeqOffset = htonl(0); |
1360 | tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); |
1361 | } |
1362 | |
1363 | static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb, |
1364 | struct cpl_tx_pkt_lso_core *lso) |
1365 | { |
1366 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; |
1367 | int l3hdr_len = skb_network_header_len(skb); |
1368 | const struct skb_shared_info *ssi; |
1369 | bool ipv6 = false; |
1370 | |
1371 | ssi = skb_shinfo(skb); |
1372 | if (ssi->gso_type & SKB_GSO_TCPV6) |
1373 | ipv6 = true; |
1374 | |
1375 | lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | |
1376 | LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | |
1377 | LSO_IPV6_V(ipv6) | |
1378 | LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | |
1379 | LSO_IPHDR_LEN_V(l3hdr_len / 4) | |
1380 | LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); |
1381 | lso->ipid_ofst = htons(0); |
1382 | lso->mss = htons(ssi->gso_size); |
1383 | lso->seqno_offset = htonl(0); |
1384 | if (is_t4(chip: adap->params.chip)) |
1385 | lso->len = htonl(skb->len); |
1386 | else |
1387 | lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); |
1388 | |
1389 | return (void *)(lso + 1); |
1390 | } |
1391 | |
1392 | /** |
1393 | * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update |
1394 | * @adap: the adapter |
1395 | * @eq: the Ethernet TX Queue |
1396 | * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 |
1397 | * |
1398 | * We're typically called here to update the state of an Ethernet TX |
1399 | * Queue with respect to the hardware's progress in consuming the TX |
1400 | * Work Requests that we've put on that Egress Queue. This happens |
1401 | * when we get Egress Queue Update messages and also prophylactically |
1402 | * in regular timer-based Ethernet TX Queue maintenance. |
1403 | */ |
1404 | int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, |
1405 | int maxreclaim) |
1406 | { |
1407 | unsigned int reclaimed, hw_cidx; |
1408 | struct sge_txq *q = &eq->q; |
1409 | int hw_in_use; |
1410 | |
1411 | if (!q->in_use || !__netif_tx_trylock(txq: eq->txq)) |
1412 | return 0; |
1413 | |
1414 | /* Reclaim pending completed TX Descriptors. */ |
1415 | reclaimed = reclaim_completed_tx(adap, q: &eq->q, maxreclaim, unmap: true); |
1416 | |
1417 | hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
1418 | hw_in_use = q->pidx - hw_cidx; |
1419 | if (hw_in_use < 0) |
1420 | hw_in_use += q->size; |
1421 | |
1422 | /* If the TX Queue is currently stopped and there's now more than half |
1423 | * the queue available, restart it. Otherwise bail out since the rest |
1424 | * of what we want do here is with the possibility of shipping any |
1425 | * currently buffered Coalesced TX Work Request. |
1426 | */ |
1427 | if (netif_tx_queue_stopped(dev_queue: eq->txq) && hw_in_use < (q->size / 2)) { |
1428 | netif_tx_wake_queue(dev_queue: eq->txq); |
1429 | eq->q.restarts++; |
1430 | } |
1431 | |
1432 | __netif_tx_unlock(txq: eq->txq); |
1433 | return reclaimed; |
1434 | } |
1435 | |
1436 | static inline int cxgb4_validate_skb(struct sk_buff *skb, |
1437 | struct net_device *dev, |
1438 | u32 min_pkt_len) |
1439 | { |
1440 | u32 max_pkt_len; |
1441 | |
1442 | /* The chip min packet length is 10 octets but some firmware |
1443 | * commands have a minimum packet length requirement. So, play |
1444 | * safe and reject anything shorter than @min_pkt_len. |
1445 | */ |
1446 | if (unlikely(skb->len < min_pkt_len)) |
1447 | return -EINVAL; |
1448 | |
1449 | /* Discard the packet if the length is greater than mtu */ |
1450 | max_pkt_len = ETH_HLEN + dev->mtu; |
1451 | |
1452 | if (skb_vlan_tagged(skb)) |
1453 | max_pkt_len += VLAN_HLEN; |
1454 | |
1455 | if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) |
1456 | return -EINVAL; |
1457 | |
1458 | return 0; |
1459 | } |
1460 | |
1461 | static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, |
1462 | u32 hdr_len) |
1463 | { |
1464 | wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; |
1465 | wr->u.udpseg.ethlen = skb_network_offset(skb); |
1466 | wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); |
1467 | wr->u.udpseg.udplen = sizeof(struct udphdr); |
1468 | wr->u.udpseg.rtplen = 0; |
1469 | wr->u.udpseg.r4 = 0; |
1470 | if (skb_shinfo(skb)->gso_size) |
1471 | wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); |
1472 | else |
1473 | wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len); |
1474 | wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; |
1475 | wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len); |
1476 | |
1477 | return (void *)(wr + 1); |
1478 | } |
1479 | |
1480 | /** |
1481 | * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue |
1482 | * @skb: the packet |
1483 | * @dev: the egress net device |
1484 | * |
1485 | * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. |
1486 | */ |
1487 | static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) |
1488 | { |
1489 | enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; |
1490 | bool ptp_enabled = is_ptp_enabled(skb, dev); |
1491 | unsigned int last_desc, flits, ndesc; |
1492 | u32 wr_mid, ctrl0, op, sgl_off = 0; |
1493 | const struct skb_shared_info *ssi; |
1494 | int len, qidx, credits, ret, left; |
1495 | struct tx_sw_desc *sgl_sdesc; |
1496 | struct fw_eth_tx_eo_wr *eowr; |
1497 | struct fw_eth_tx_pkt_wr *wr; |
1498 | struct cpl_tx_pkt_core *cpl; |
1499 | const struct port_info *pi; |
1500 | bool immediate = false; |
1501 | u64 cntrl, *end, *sgl; |
1502 | struct sge_eth_txq *q; |
1503 | unsigned int chip_ver; |
1504 | struct adapter *adap; |
1505 | |
1506 | ret = cxgb4_validate_skb(skb, dev, ETH_HLEN); |
1507 | if (ret) |
1508 | goto out_free; |
1509 | |
1510 | pi = netdev_priv(dev); |
1511 | adap = pi->adapter; |
1512 | ssi = skb_shinfo(skb); |
1513 | #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) |
1514 | if (xfrm_offload(skb) && !ssi->gso_size) |
1515 | return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev); |
1516 | #endif /* CHELSIO_IPSEC_INLINE */ |
1517 | |
1518 | #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) |
1519 | if (tls_is_skb_tx_device_offloaded(skb) && |
1520 | (skb->len - skb_tcp_all_headers(skb))) |
1521 | return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); |
1522 | #endif /* CHELSIO_TLS_DEVICE */ |
1523 | |
1524 | qidx = skb_get_queue_mapping(skb); |
1525 | if (ptp_enabled) { |
1526 | if (!(adap->ptp_tx_skb)) { |
1527 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1528 | adap->ptp_tx_skb = skb_get(skb); |
1529 | } else { |
1530 | goto out_free; |
1531 | } |
1532 | q = &adap->sge.ptptxq; |
1533 | } else { |
1534 | q = &adap->sge.ethtxq[qidx + pi->first_qset]; |
1535 | } |
1536 | skb_tx_timestamp(skb); |
1537 | |
1538 | reclaim_completed_tx(adap, q: &q->q, maxreclaim: -1, unmap: true); |
1539 | cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; |
1540 | |
1541 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1542 | ret = cxgb_fcoe_offload(skb, adap, pi, cntrl: &cntrl); |
1543 | if (unlikely(ret == -EOPNOTSUPP)) |
1544 | goto out_free; |
1545 | #endif /* CONFIG_CHELSIO_T4_FCOE */ |
1546 | |
1547 | chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); |
1548 | flits = calc_tx_flits(skb, chip_ver); |
1549 | ndesc = flits_to_desc(n: flits); |
1550 | credits = txq_avail(q: &q->q) - ndesc; |
1551 | |
1552 | if (unlikely(credits < 0)) { |
1553 | eth_txq_stop(q); |
1554 | dev_err(adap->pdev_dev, |
1555 | "%s: Tx ring %u full while queue awake!\n" , |
1556 | dev->name, qidx); |
1557 | return NETDEV_TX_BUSY; |
1558 | } |
1559 | |
1560 | if (is_eth_imm(skb, chip_ver)) |
1561 | immediate = true; |
1562 | |
1563 | if (skb->encapsulation && chip_ver > CHELSIO_T5) |
1564 | tnl_type = cxgb_encap_offload_supported(skb); |
1565 | |
1566 | last_desc = q->q.pidx + ndesc - 1; |
1567 | if (last_desc >= q->q.size) |
1568 | last_desc -= q->q.size; |
1569 | sgl_sdesc = &q->q.sdesc[last_desc]; |
1570 | |
1571 | if (!immediate && |
1572 | unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { |
1573 | memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); |
1574 | q->mapping_err++; |
1575 | goto out_free; |
1576 | } |
1577 | |
1578 | wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); |
1579 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { |
1580 | /* After we're done injecting the Work Request for this |
1581 | * packet, we'll be below our "stop threshold" so stop the TX |
1582 | * Queue now and schedule a request for an SGE Egress Queue |
1583 | * Update message. The queue will get started later on when |
1584 | * the firmware processes this Work Request and sends us an |
1585 | * Egress Queue Status Update message indicating that space |
1586 | * has opened up. |
1587 | */ |
1588 | eth_txq_stop(q); |
1589 | if (chip_ver > CHELSIO_T5) |
1590 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; |
1591 | } |
1592 | |
1593 | wr = (void *)&q->q.desc[q->q.pidx]; |
1594 | eowr = (void *)&q->q.desc[q->q.pidx]; |
1595 | wr->equiq_to_len16 = htonl(wr_mid); |
1596 | wr->r3 = cpu_to_be64(0); |
1597 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) |
1598 | end = (u64 *)eowr + flits; |
1599 | else |
1600 | end = (u64 *)wr + flits; |
1601 | |
1602 | len = immediate ? skb->len : 0; |
1603 | len += sizeof(*cpl); |
1604 | if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) { |
1605 | struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); |
1606 | struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); |
1607 | |
1608 | if (tnl_type) |
1609 | len += sizeof(*tnl_lso); |
1610 | else |
1611 | len += sizeof(*lso); |
1612 | |
1613 | wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | |
1614 | FW_WR_IMMDLEN_V(len)); |
1615 | if (tnl_type) { |
1616 | struct iphdr *iph = ip_hdr(skb); |
1617 | |
1618 | t6_fill_tnl_lso(skb, tnl_lso, tnl_type); |
1619 | cpl = (void *)(tnl_lso + 1); |
1620 | /* Driver is expected to compute partial checksum that |
1621 | * does not include the IP Total Length. |
1622 | */ |
1623 | if (iph->version == 4) { |
1624 | iph->check = 0; |
1625 | iph->tot_len = 0; |
1626 | iph->check = ~ip_fast_csum(iph: (u8 *)iph, ihl: iph->ihl); |
1627 | } |
1628 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
1629 | cntrl = hwcsum(chip: adap->params.chip, skb); |
1630 | } else { |
1631 | cpl = write_tso_wr(adap, skb, lso); |
1632 | cntrl = hwcsum(chip: adap->params.chip, skb); |
1633 | } |
1634 | sgl = (u64 *)(cpl + 1); /* sgl start here */ |
1635 | q->tso++; |
1636 | q->tx_cso += ssi->gso_segs; |
1637 | } else if (ssi->gso_size) { |
1638 | u64 *start; |
1639 | u32 hdrlen; |
1640 | |
1641 | hdrlen = eth_get_headlen(dev, data: skb->data, len: skb_headlen(skb)); |
1642 | len += hdrlen; |
1643 | wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | |
1644 | FW_ETH_TX_EO_WR_IMMDLEN_V(len)); |
1645 | cpl = write_eo_udp_wr(skb, wr: eowr, hdr_len: hdrlen); |
1646 | cntrl = hwcsum(chip: adap->params.chip, skb); |
1647 | |
1648 | start = (u64 *)(cpl + 1); |
1649 | sgl = (u64 *)inline_tx_skb_header(skb, q: &q->q, pos: (void *)start, |
1650 | length: hdrlen); |
1651 | if (unlikely(start > sgl)) { |
1652 | left = (u8 *)end - (u8 *)q->q.stat; |
1653 | end = (void *)q->q.desc + left; |
1654 | } |
1655 | sgl_off = hdrlen; |
1656 | q->uso++; |
1657 | q->tx_cso += ssi->gso_segs; |
1658 | } else { |
1659 | if (ptp_enabled) |
1660 | op = FW_PTP_TX_PKT_WR; |
1661 | else |
1662 | op = FW_ETH_TX_PKT_WR; |
1663 | wr->op_immdlen = htonl(FW_WR_OP_V(op) | |
1664 | FW_WR_IMMDLEN_V(len)); |
1665 | cpl = (void *)(wr + 1); |
1666 | sgl = (u64 *)(cpl + 1); |
1667 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1668 | cntrl = hwcsum(chip: adap->params.chip, skb) | |
1669 | TXPKT_IPCSUM_DIS_F; |
1670 | q->tx_cso++; |
1671 | } |
1672 | } |
1673 | |
1674 | if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { |
1675 | /* If current position is already at the end of the |
1676 | * txq, reset the current to point to start of the queue |
1677 | * and update the end ptr as well. |
1678 | */ |
1679 | left = (u8 *)end - (u8 *)q->q.stat; |
1680 | end = (void *)q->q.desc + left; |
1681 | sgl = (void *)q->q.desc; |
1682 | } |
1683 | |
1684 | if (skb_vlan_tag_present(skb)) { |
1685 | q->vlan_ins++; |
1686 | cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); |
1687 | #ifdef CONFIG_CHELSIO_T4_FCOE |
1688 | if (skb->protocol == htons(ETH_P_FCOE)) |
1689 | cntrl |= TXPKT_VLAN_V( |
1690 | ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); |
1691 | #endif /* CONFIG_CHELSIO_T4_FCOE */ |
1692 | } |
1693 | |
1694 | ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | |
1695 | TXPKT_PF_V(adap->pf); |
1696 | if (ptp_enabled) |
1697 | ctrl0 |= TXPKT_TSTAMP_F; |
1698 | #ifdef CONFIG_CHELSIO_T4_DCB |
1699 | if (is_t4(chip: adap->params.chip)) |
1700 | ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); |
1701 | else |
1702 | ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); |
1703 | #endif |
1704 | cpl->ctrl0 = htonl(ctrl0); |
1705 | cpl->pack = htons(0); |
1706 | cpl->len = htons(skb->len); |
1707 | cpl->ctrl1 = cpu_to_be64(cntrl); |
1708 | |
1709 | if (immediate) { |
1710 | cxgb4_inline_tx_skb(skb, &q->q, sgl); |
1711 | dev_consume_skb_any(skb); |
1712 | } else { |
1713 | cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off, |
1714 | sgl_sdesc->addr); |
1715 | skb_orphan(skb); |
1716 | sgl_sdesc->skb = skb; |
1717 | } |
1718 | |
1719 | txq_advance(q: &q->q, n: ndesc); |
1720 | |
1721 | cxgb4_ring_tx_db(adap, &q->q, ndesc); |
1722 | return NETDEV_TX_OK; |
1723 | |
1724 | out_free: |
1725 | dev_kfree_skb_any(skb); |
1726 | return NETDEV_TX_OK; |
1727 | } |
1728 | |
1729 | /* Constants ... */ |
1730 | enum { |
1731 | /* Egress Queue sizes, producer and consumer indices are all in units |
1732 | * of Egress Context Units bytes. Note that as far as the hardware is |
1733 | * concerned, the free list is an Egress Queue (the host produces free |
1734 | * buffers which the hardware consumes) and free list entries are |
1735 | * 64-bit PCI DMA addresses. |
1736 | */ |
1737 | EQ_UNIT = SGE_EQ_IDXSIZE, |
1738 | FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), |
1739 | TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), |
1740 | |
1741 | T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + |
1742 | sizeof(struct cpl_tx_pkt_lso_core) + |
1743 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), |
1744 | }; |
1745 | |
1746 | /** |
1747 | * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data? |
1748 | * @skb: the packet |
1749 | * |
1750 | * Returns whether an Ethernet packet is small enough to fit completely as |
1751 | * immediate data. |
1752 | */ |
1753 | static inline int t4vf_is_eth_imm(const struct sk_buff *skb) |
1754 | { |
1755 | /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request |
1756 | * which does not accommodate immediate data. We could dike out all |
1757 | * of the support code for immediate data but that would tie our hands |
1758 | * too much if we ever want to enhace the firmware. It would also |
1759 | * create more differences between the PF and VF Drivers. |
1760 | */ |
1761 | return false; |
1762 | } |
1763 | |
1764 | /** |
1765 | * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR |
1766 | * @skb: the packet |
1767 | * |
1768 | * Returns the number of flits needed for a TX Work Request for the |
1769 | * given Ethernet packet, including the needed WR and CPL headers. |
1770 | */ |
1771 | static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb) |
1772 | { |
1773 | unsigned int flits; |
1774 | |
1775 | /* If the skb is small enough, we can pump it out as a work request |
1776 | * with only immediate data. In that case we just have to have the |
1777 | * TX Packet header plus the skb data in the Work Request. |
1778 | */ |
1779 | if (t4vf_is_eth_imm(skb)) |
1780 | return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), |
1781 | sizeof(__be64)); |
1782 | |
1783 | /* Otherwise, we're going to have to construct a Scatter gather list |
1784 | * of the skb body and fragments. We also include the flits necessary |
1785 | * for the TX Packet Work Request and CPL. We always have a firmware |
1786 | * Write Header (incorporated as part of the cpl_tx_pkt_lso and |
1787 | * cpl_tx_pkt structures), followed by either a TX Packet Write CPL |
1788 | * message or, if we're doing a Large Send Offload, an LSO CPL message |
1789 | * with an embedded TX Packet Write CPL message. |
1790 | */ |
1791 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); |
1792 | if (skb_shinfo(skb)->gso_size) |
1793 | flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + |
1794 | sizeof(struct cpl_tx_pkt_lso_core) + |
1795 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); |
1796 | else |
1797 | flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + |
1798 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); |
1799 | return flits; |
1800 | } |
1801 | |
1802 | /** |
1803 | * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue |
1804 | * @skb: the packet |
1805 | * @dev: the egress net device |
1806 | * |
1807 | * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. |
1808 | */ |
1809 | static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, |
1810 | struct net_device *dev) |
1811 | { |
1812 | unsigned int last_desc, flits, ndesc; |
1813 | const struct skb_shared_info *ssi; |
1814 | struct fw_eth_tx_pkt_vm_wr *wr; |
1815 | struct tx_sw_desc *sgl_sdesc; |
1816 | struct cpl_tx_pkt_core *cpl; |
1817 | const struct port_info *pi; |
1818 | struct sge_eth_txq *txq; |
1819 | struct adapter *adapter; |
1820 | int qidx, credits, ret; |
1821 | size_t fw_hdr_copy_len; |
1822 | unsigned int chip_ver; |
1823 | u64 cntrl, *end; |
1824 | u32 wr_mid; |
1825 | |
1826 | /* The chip minimum packet length is 10 octets but the firmware |
1827 | * command that we are using requires that we copy the Ethernet header |
1828 | * (including the VLAN tag) into the header so we reject anything |
1829 | * smaller than that ... |
1830 | */ |
1831 | BUILD_BUG_ON(sizeof(wr->firmware) != |
1832 | (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + |
1833 | sizeof(wr->ethtype) + sizeof(wr->vlantci))); |
1834 | fw_hdr_copy_len = sizeof(wr->firmware); |
1835 | ret = cxgb4_validate_skb(skb, dev, min_pkt_len: fw_hdr_copy_len); |
1836 | if (ret) |
1837 | goto out_free; |
1838 | |
1839 | /* Figure out which TX Queue we're going to use. */ |
1840 | pi = netdev_priv(dev); |
1841 | adapter = pi->adapter; |
1842 | qidx = skb_get_queue_mapping(skb); |
1843 | WARN_ON(qidx >= pi->nqsets); |
1844 | txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; |
1845 | |
1846 | /* Take this opportunity to reclaim any TX Descriptors whose DMA |
1847 | * transfers have completed. |
1848 | */ |
1849 | reclaim_completed_tx(adap: adapter, q: &txq->q, maxreclaim: -1, unmap: true); |
1850 | |
1851 | /* Calculate the number of flits and TX Descriptors we're going to |
1852 | * need along with how many TX Descriptors will be left over after |
1853 | * we inject our Work Request. |
1854 | */ |
1855 | flits = t4vf_calc_tx_flits(skb); |
1856 | ndesc = flits_to_desc(n: flits); |
1857 | credits = txq_avail(q: &txq->q) - ndesc; |
1858 | |
1859 | if (unlikely(credits < 0)) { |
1860 | /* Not enough room for this packet's Work Request. Stop the |
1861 | * TX Queue and return a "busy" condition. The queue will get |
1862 | * started later on when the firmware informs us that space |
1863 | * has opened up. |
1864 | */ |
1865 | eth_txq_stop(q: txq); |
1866 | dev_err(adapter->pdev_dev, |
1867 | "%s: TX ring %u full while queue awake!\n" , |
1868 | dev->name, qidx); |
1869 | return NETDEV_TX_BUSY; |
1870 | } |
1871 | |
1872 | last_desc = txq->q.pidx + ndesc - 1; |
1873 | if (last_desc >= txq->q.size) |
1874 | last_desc -= txq->q.size; |
1875 | sgl_sdesc = &txq->q.sdesc[last_desc]; |
1876 | |
1877 | if (!t4vf_is_eth_imm(skb) && |
1878 | unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, |
1879 | sgl_sdesc->addr) < 0)) { |
1880 | /* We need to map the skb into PCI DMA space (because it can't |
1881 | * be in-lined directly into the Work Request) and the mapping |
1882 | * operation failed. Record the error and drop the packet. |
1883 | */ |
1884 | memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); |
1885 | txq->mapping_err++; |
1886 | goto out_free; |
1887 | } |
1888 | |
1889 | chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); |
1890 | wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); |
1891 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { |
1892 | /* After we're done injecting the Work Request for this |
1893 | * packet, we'll be below our "stop threshold" so stop the TX |
1894 | * Queue now and schedule a request for an SGE Egress Queue |
1895 | * Update message. The queue will get started later on when |
1896 | * the firmware processes this Work Request and sends us an |
1897 | * Egress Queue Status Update message indicating that space |
1898 | * has opened up. |
1899 | */ |
1900 | eth_txq_stop(q: txq); |
1901 | if (chip_ver > CHELSIO_T5) |
1902 | wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; |
1903 | } |
1904 | |
1905 | /* Start filling in our Work Request. Note that we do _not_ handle |
1906 | * the WR Header wrapping around the TX Descriptor Ring. If our |
1907 | * maximum header size ever exceeds one TX Descriptor, we'll need to |
1908 | * do something else here. |
1909 | */ |
1910 | WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); |
1911 | wr = (void *)&txq->q.desc[txq->q.pidx]; |
1912 | wr->equiq_to_len16 = cpu_to_be32(wr_mid); |
1913 | wr->r3[0] = cpu_to_be32(0); |
1914 | wr->r3[1] = cpu_to_be32(0); |
1915 | skb_copy_from_linear_data(skb, to: &wr->firmware, len: fw_hdr_copy_len); |
1916 | end = (u64 *)wr + flits; |
1917 | |
1918 | /* If this is a Large Send Offload packet we'll put in an LSO CPL |
1919 | * message with an encapsulated TX Packet CPL message. Otherwise we |
1920 | * just use a TX Packet CPL message. |
1921 | */ |
1922 | ssi = skb_shinfo(skb); |
1923 | if (ssi->gso_size) { |
1924 | struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); |
1925 | bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; |
1926 | int l3hdr_len = skb_network_header_len(skb); |
1927 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; |
1928 | |
1929 | wr->op_immdlen = |
1930 | cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | |
1931 | FW_WR_IMMDLEN_V(sizeof(*lso) + |
1932 | sizeof(*cpl))); |
1933 | /* Fill in the LSO CPL message. */ |
1934 | lso->lso_ctrl = |
1935 | cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | |
1936 | LSO_FIRST_SLICE_F | |
1937 | LSO_LAST_SLICE_F | |
1938 | LSO_IPV6_V(v6) | |
1939 | LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | |
1940 | LSO_IPHDR_LEN_V(l3hdr_len / 4) | |
1941 | LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); |
1942 | lso->ipid_ofst = cpu_to_be16(0); |
1943 | lso->mss = cpu_to_be16(ssi->gso_size); |
1944 | lso->seqno_offset = cpu_to_be32(0); |
1945 | if (is_t4(chip: adapter->params.chip)) |
1946 | lso->len = cpu_to_be32(skb->len); |
1947 | else |
1948 | lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); |
1949 | |
1950 | /* Set up TX Packet CPL pointer, control word and perform |
1951 | * accounting. |
1952 | */ |
1953 | cpl = (void *)(lso + 1); |
1954 | |
1955 | if (chip_ver <= CHELSIO_T5) |
1956 | cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); |
1957 | else |
1958 | cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); |
1959 | |
1960 | cntrl |= TXPKT_CSUM_TYPE_V(v6 ? |
1961 | TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | |
1962 | TXPKT_IPHDR_LEN_V(l3hdr_len); |
1963 | txq->tso++; |
1964 | txq->tx_cso += ssi->gso_segs; |
1965 | } else { |
1966 | int len; |
1967 | |
1968 | len = (t4vf_is_eth_imm(skb) |
1969 | ? skb->len + sizeof(*cpl) |
1970 | : sizeof(*cpl)); |
1971 | wr->op_immdlen = |
1972 | cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | |
1973 | FW_WR_IMMDLEN_V(len)); |
1974 | |
1975 | /* Set up TX Packet CPL pointer, control word and perform |
1976 | * accounting. |
1977 | */ |
1978 | cpl = (void *)(wr + 1); |
1979 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1980 | cntrl = hwcsum(chip: adapter->params.chip, skb) | |
1981 | TXPKT_IPCSUM_DIS_F; |
1982 | txq->tx_cso++; |
1983 | } else { |
1984 | cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; |
1985 | } |
1986 | } |
1987 | |
1988 | /* If there's a VLAN tag present, add that to the list of things to |
1989 | * do in this Work Request. |
1990 | */ |
1991 | if (skb_vlan_tag_present(skb)) { |
1992 | txq->vlan_ins++; |
1993 | cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); |
1994 | } |
1995 | |
1996 | /* Fill in the TX Packet CPL message header. */ |
1997 | cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | |
1998 | TXPKT_INTF_V(pi->port_id) | |
1999 | TXPKT_PF_V(0)); |
2000 | cpl->pack = cpu_to_be16(0); |
2001 | cpl->len = cpu_to_be16(skb->len); |
2002 | cpl->ctrl1 = cpu_to_be64(cntrl); |
2003 | |
2004 | /* Fill in the body of the TX Packet CPL message with either in-lined |
2005 | * data or a Scatter/Gather List. |
2006 | */ |
2007 | if (t4vf_is_eth_imm(skb)) { |
2008 | /* In-line the packet's data and free the skb since we don't |
2009 | * need it any longer. |
2010 | */ |
2011 | cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); |
2012 | dev_consume_skb_any(skb); |
2013 | } else { |
2014 | /* Write the skb's Scatter/Gather list into the TX Packet CPL |
2015 | * message and retain a pointer to the skb so we can free it |
2016 | * later when its DMA completes. (We store the skb pointer |
2017 | * in the Software Descriptor corresponding to the last TX |
2018 | * Descriptor used by the Work Request.) |
2019 | * |
2020 | * The retained skb will be freed when the corresponding TX |
2021 | * Descriptors are reclaimed after their DMAs complete. |
2022 | * However, this could take quite a while since, in general, |
2023 | * the hardware is set up to be lazy about sending DMA |
2024 | * completion notifications to us and we mostly perform TX |
2025 | * reclaims in the transmit routine. |
2026 | * |
2027 | * This is good for performamce but means that we rely on new |
2028 | * TX packets arriving to run the destructors of completed |
2029 | * packets, which open up space in their sockets' send queues. |
2030 | * Sometimes we do not get such new packets causing TX to |
2031 | * stall. A single UDP transmitter is a good example of this |
2032 | * situation. We have a clean up timer that periodically |
2033 | * reclaims completed packets but it doesn't run often enough |
2034 | * (nor do we want it to) to prevent lengthy stalls. A |
2035 | * solution to this problem is to run the destructor early, |
2036 | * after the packet is queued but before it's DMAd. A con is |
2037 | * that we lie to socket memory accounting, but the amount of |
2038 | * extra memory is reasonable (limited by the number of TX |
2039 | * descriptors), the packets do actually get freed quickly by |
2040 | * new packets almost always, and for protocols like TCP that |
2041 | * wait for acks to really free up the data the extra memory |
2042 | * is even less. On the positive side we run the destructors |
2043 | * on the sending CPU rather than on a potentially different |
2044 | * completing CPU, usually a good thing. |
2045 | * |
2046 | * Run the destructor before telling the DMA engine about the |
2047 | * packet to make sure it doesn't complete and get freed |
2048 | * prematurely. |
2049 | */ |
2050 | struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); |
2051 | struct sge_txq *tq = &txq->q; |
2052 | |
2053 | /* If the Work Request header was an exact multiple of our TX |
2054 | * Descriptor length, then it's possible that the starting SGL |
2055 | * pointer lines up exactly with the end of our TX Descriptor |
2056 | * ring. If that's the case, wrap around to the beginning |
2057 | * here ... |
2058 | */ |
2059 | if (unlikely((void *)sgl == (void *)tq->stat)) { |
2060 | sgl = (void *)tq->desc; |
2061 | end = (void *)((void *)tq->desc + |
2062 | ((void *)end - (void *)tq->stat)); |
2063 | } |
2064 | |
2065 | cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr); |
2066 | skb_orphan(skb); |
2067 | sgl_sdesc->skb = skb; |
2068 | } |
2069 | |
2070 | /* Advance our internal TX Queue state, tell the hardware about |
2071 | * the new TX descriptors and return success. |
2072 | */ |
2073 | txq_advance(q: &txq->q, n: ndesc); |
2074 | |
2075 | cxgb4_ring_tx_db(adapter, &txq->q, ndesc); |
2076 | return NETDEV_TX_OK; |
2077 | |
2078 | out_free: |
2079 | /* An error of some sort happened. Free the TX skb and tell the |
2080 | * OS that we've "dealt" with the packet ... |
2081 | */ |
2082 | dev_kfree_skb_any(skb); |
2083 | return NETDEV_TX_OK; |
2084 | } |
2085 | |
2086 | /** |
2087 | * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs |
2088 | * @q: the SGE control Tx queue |
2089 | * |
2090 | * This is a variant of cxgb4_reclaim_completed_tx() that is used |
2091 | * for Tx queues that send only immediate data (presently just |
2092 | * the control queues) and thus do not have any sk_buffs to release. |
2093 | */ |
2094 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) |
2095 | { |
2096 | int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
2097 | int reclaim = hw_cidx - q->cidx; |
2098 | |
2099 | if (reclaim < 0) |
2100 | reclaim += q->size; |
2101 | |
2102 | q->in_use -= reclaim; |
2103 | q->cidx = hw_cidx; |
2104 | } |
2105 | |
2106 | static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max) |
2107 | { |
2108 | u32 val = *idx + n; |
2109 | |
2110 | if (val >= max) |
2111 | val -= max; |
2112 | |
2113 | *idx = val; |
2114 | } |
2115 | |
2116 | void cxgb4_eosw_txq_free_desc(struct adapter *adap, |
2117 | struct sge_eosw_txq *eosw_txq, u32 ndesc) |
2118 | { |
2119 | struct tx_sw_desc *d; |
2120 | |
2121 | d = &eosw_txq->desc[eosw_txq->last_cidx]; |
2122 | while (ndesc--) { |
2123 | if (d->skb) { |
2124 | if (d->addr[0]) { |
2125 | unmap_skb(dev: adap->pdev_dev, skb: d->skb, addr: d->addr); |
2126 | memset(d->addr, 0, sizeof(d->addr)); |
2127 | } |
2128 | dev_consume_skb_any(skb: d->skb); |
2129 | d->skb = NULL; |
2130 | } |
2131 | eosw_txq_advance_index(idx: &eosw_txq->last_cidx, n: 1, |
2132 | max: eosw_txq->ndesc); |
2133 | d = &eosw_txq->desc[eosw_txq->last_cidx]; |
2134 | } |
2135 | } |
2136 | |
2137 | static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n) |
2138 | { |
2139 | eosw_txq_advance_index(idx: &eosw_txq->pidx, n, max: eosw_txq->ndesc); |
2140 | eosw_txq->inuse += n; |
2141 | } |
2142 | |
2143 | static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq, |
2144 | struct sk_buff *skb) |
2145 | { |
2146 | if (eosw_txq->inuse == eosw_txq->ndesc) |
2147 | return -ENOMEM; |
2148 | |
2149 | eosw_txq->desc[eosw_txq->pidx].skb = skb; |
2150 | return 0; |
2151 | } |
2152 | |
2153 | static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq) |
2154 | { |
2155 | return eosw_txq->desc[eosw_txq->last_pidx].skb; |
2156 | } |
2157 | |
2158 | static inline u8 ethofld_calc_tx_flits(struct adapter *adap, |
2159 | struct sk_buff *skb, u32 hdr_len) |
2160 | { |
2161 | u8 flits, nsgl = 0; |
2162 | u32 wrlen; |
2163 | |
2164 | wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core); |
2165 | if (skb_shinfo(skb)->gso_size && |
2166 | !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) |
2167 | wrlen += sizeof(struct cpl_tx_pkt_lso_core); |
2168 | |
2169 | wrlen += roundup(hdr_len, 16); |
2170 | |
2171 | /* Packet headers + WR + CPLs */ |
2172 | flits = DIV_ROUND_UP(wrlen, 8); |
2173 | |
2174 | if (skb_shinfo(skb)->nr_frags > 0) { |
2175 | if (skb_headlen(skb) - hdr_len) |
2176 | nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1); |
2177 | else |
2178 | nsgl = sgl_len(skb_shinfo(skb)->nr_frags); |
2179 | } else if (skb->len - hdr_len) { |
2180 | nsgl = sgl_len(n: 1); |
2181 | } |
2182 | |
2183 | return flits + nsgl; |
2184 | } |
2185 | |
2186 | static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq, |
2187 | struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, |
2188 | u32 hdr_len, u32 wrlen) |
2189 | { |
2190 | const struct skb_shared_info *ssi = skb_shinfo(skb); |
2191 | struct cpl_tx_pkt_core *cpl; |
2192 | u32 immd_len, wrlen16; |
2193 | bool compl = false; |
2194 | u8 ver, proto; |
2195 | |
2196 | ver = ip_hdr(skb)->version; |
2197 | proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol; |
2198 | |
2199 | wrlen16 = DIV_ROUND_UP(wrlen, 16); |
2200 | immd_len = sizeof(struct cpl_tx_pkt_core); |
2201 | if (skb_shinfo(skb)->gso_size && |
2202 | !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) |
2203 | immd_len += sizeof(struct cpl_tx_pkt_lso_core); |
2204 | immd_len += hdr_len; |
2205 | |
2206 | if (!eosw_txq->ncompl || |
2207 | (eosw_txq->last_compl + wrlen16) >= |
2208 | (adap->params.ofldq_wr_cred / 2)) { |
2209 | compl = true; |
2210 | eosw_txq->ncompl++; |
2211 | eosw_txq->last_compl = 0; |
2212 | } |
2213 | |
2214 | wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | |
2215 | FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) | |
2216 | FW_WR_COMPL_V(compl)); |
2217 | wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) | |
2218 | FW_WR_FLOWID_V(eosw_txq->hwtid)); |
2219 | wr->r3 = 0; |
2220 | if (proto == IPPROTO_UDP) { |
2221 | cpl = write_eo_udp_wr(skb, wr, hdr_len); |
2222 | } else { |
2223 | wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; |
2224 | wr->u.tcpseg.ethlen = skb_network_offset(skb); |
2225 | wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); |
2226 | wr->u.tcpseg.tcplen = tcp_hdrlen(skb); |
2227 | wr->u.tcpseg.tsclk_tsoff = 0; |
2228 | wr->u.tcpseg.r4 = 0; |
2229 | wr->u.tcpseg.r5 = 0; |
2230 | wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); |
2231 | |
2232 | if (ssi->gso_size) { |
2233 | struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); |
2234 | |
2235 | wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); |
2236 | cpl = write_tso_wr(adap, skb, lso); |
2237 | } else { |
2238 | wr->u.tcpseg.mss = cpu_to_be16(0xffff); |
2239 | cpl = (void *)(wr + 1); |
2240 | } |
2241 | } |
2242 | |
2243 | eosw_txq->cred -= wrlen16; |
2244 | eosw_txq->last_compl += wrlen16; |
2245 | return cpl; |
2246 | } |
2247 | |
2248 | static int ethofld_hard_xmit(struct net_device *dev, |
2249 | struct sge_eosw_txq *eosw_txq) |
2250 | { |
2251 | struct port_info *pi = netdev2pinfo(dev); |
2252 | struct adapter *adap = netdev2adap(dev); |
2253 | u32 wrlen, wrlen16, hdr_len, data_len; |
2254 | enum sge_eosw_state next_state; |
2255 | u64 cntrl, *start, *end, *sgl; |
2256 | struct sge_eohw_txq *eohw_txq; |
2257 | struct cpl_tx_pkt_core *cpl; |
2258 | struct fw_eth_tx_eo_wr *wr; |
2259 | bool skip_eotx_wr = false; |
2260 | struct tx_sw_desc *d; |
2261 | struct sk_buff *skb; |
2262 | int left, ret = 0; |
2263 | u8 flits, ndesc; |
2264 | |
2265 | eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; |
2266 | spin_lock(lock: &eohw_txq->lock); |
2267 | reclaim_completed_tx_imm(q: &eohw_txq->q); |
2268 | |
2269 | d = &eosw_txq->desc[eosw_txq->last_pidx]; |
2270 | skb = d->skb; |
2271 | skb_tx_timestamp(skb); |
2272 | |
2273 | wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx]; |
2274 | if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && |
2275 | eosw_txq->last_pidx == eosw_txq->flowc_idx)) { |
2276 | hdr_len = skb->len; |
2277 | data_len = 0; |
2278 | flits = DIV_ROUND_UP(hdr_len, 8); |
2279 | if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) |
2280 | next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY; |
2281 | else |
2282 | next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY; |
2283 | skip_eotx_wr = true; |
2284 | } else { |
2285 | hdr_len = eth_get_headlen(dev, data: skb->data, len: skb_headlen(skb)); |
2286 | data_len = skb->len - hdr_len; |
2287 | flits = ethofld_calc_tx_flits(adap, skb, hdr_len); |
2288 | } |
2289 | ndesc = flits_to_desc(n: flits); |
2290 | wrlen = flits * 8; |
2291 | wrlen16 = DIV_ROUND_UP(wrlen, 16); |
2292 | |
2293 | left = txq_avail(q: &eohw_txq->q) - ndesc; |
2294 | |
2295 | /* If there are no descriptors left in hardware queues or no |
2296 | * CPL credits left in software queues, then wait for them |
2297 | * to come back and retry again. Note that we always request |
2298 | * for credits update via interrupt for every half credits |
2299 | * consumed. So, the interrupt will eventually restore the |
2300 | * credits and invoke the Tx path again. |
2301 | */ |
2302 | if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { |
2303 | ret = -ENOMEM; |
2304 | goto out_unlock; |
2305 | } |
2306 | |
2307 | if (unlikely(skip_eotx_wr)) { |
2308 | start = (u64 *)wr; |
2309 | eosw_txq->state = next_state; |
2310 | eosw_txq->cred -= wrlen16; |
2311 | eosw_txq->ncompl++; |
2312 | eosw_txq->last_compl = 0; |
2313 | goto write_wr_headers; |
2314 | } |
2315 | |
2316 | cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen); |
2317 | cntrl = hwcsum(chip: adap->params.chip, skb); |
2318 | if (skb_vlan_tag_present(skb)) |
2319 | cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); |
2320 | |
2321 | cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | |
2322 | TXPKT_INTF_V(pi->tx_chan) | |
2323 | TXPKT_PF_V(adap->pf)); |
2324 | cpl->pack = 0; |
2325 | cpl->len = cpu_to_be16(skb->len); |
2326 | cpl->ctrl1 = cpu_to_be64(cntrl); |
2327 | |
2328 | start = (u64 *)(cpl + 1); |
2329 | |
2330 | : |
2331 | sgl = (u64 *)inline_tx_skb_header(skb, q: &eohw_txq->q, pos: (void *)start, |
2332 | length: hdr_len); |
2333 | if (data_len) { |
2334 | ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr); |
2335 | if (unlikely(ret)) { |
2336 | memset(d->addr, 0, sizeof(d->addr)); |
2337 | eohw_txq->mapping_err++; |
2338 | goto out_unlock; |
2339 | } |
2340 | |
2341 | end = (u64 *)wr + flits; |
2342 | if (unlikely(start > sgl)) { |
2343 | left = (u8 *)end - (u8 *)eohw_txq->q.stat; |
2344 | end = (void *)eohw_txq->q.desc + left; |
2345 | } |
2346 | |
2347 | if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) { |
2348 | /* If current position is already at the end of the |
2349 | * txq, reset the current to point to start of the queue |
2350 | * and update the end ptr as well. |
2351 | */ |
2352 | left = (u8 *)end - (u8 *)eohw_txq->q.stat; |
2353 | |
2354 | end = (void *)eohw_txq->q.desc + left; |
2355 | sgl = (void *)eohw_txq->q.desc; |
2356 | } |
2357 | |
2358 | cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len, |
2359 | d->addr); |
2360 | } |
2361 | |
2362 | if (skb_shinfo(skb)->gso_size) { |
2363 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) |
2364 | eohw_txq->uso++; |
2365 | else |
2366 | eohw_txq->tso++; |
2367 | eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs; |
2368 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2369 | eohw_txq->tx_cso++; |
2370 | } |
2371 | |
2372 | if (skb_vlan_tag_present(skb)) |
2373 | eohw_txq->vlan_ins++; |
2374 | |
2375 | txq_advance(q: &eohw_txq->q, n: ndesc); |
2376 | cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); |
2377 | eosw_txq_advance_index(idx: &eosw_txq->last_pidx, n: 1, max: eosw_txq->ndesc); |
2378 | |
2379 | out_unlock: |
2380 | spin_unlock(lock: &eohw_txq->lock); |
2381 | return ret; |
2382 | } |
2383 | |
2384 | static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) |
2385 | { |
2386 | struct sk_buff *skb; |
2387 | int pktcount, ret; |
2388 | |
2389 | switch (eosw_txq->state) { |
2390 | case CXGB4_EO_STATE_ACTIVE: |
2391 | case CXGB4_EO_STATE_FLOWC_OPEN_SEND: |
2392 | case CXGB4_EO_STATE_FLOWC_CLOSE_SEND: |
2393 | pktcount = eosw_txq->pidx - eosw_txq->last_pidx; |
2394 | if (pktcount < 0) |
2395 | pktcount += eosw_txq->ndesc; |
2396 | break; |
2397 | case CXGB4_EO_STATE_FLOWC_OPEN_REPLY: |
2398 | case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY: |
2399 | case CXGB4_EO_STATE_CLOSED: |
2400 | default: |
2401 | return; |
2402 | } |
2403 | |
2404 | while (pktcount--) { |
2405 | skb = eosw_txq_peek(eosw_txq); |
2406 | if (!skb) { |
2407 | eosw_txq_advance_index(idx: &eosw_txq->last_pidx, n: 1, |
2408 | max: eosw_txq->ndesc); |
2409 | continue; |
2410 | } |
2411 | |
2412 | ret = ethofld_hard_xmit(dev, eosw_txq); |
2413 | if (ret) |
2414 | break; |
2415 | } |
2416 | } |
2417 | |
2418 | static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb, |
2419 | struct net_device *dev) |
2420 | { |
2421 | struct cxgb4_tc_port_mqprio *tc_port_mqprio; |
2422 | struct port_info *pi = netdev2pinfo(dev); |
2423 | struct adapter *adap = netdev2adap(dev); |
2424 | struct sge_eosw_txq *eosw_txq; |
2425 | u32 qid; |
2426 | int ret; |
2427 | |
2428 | ret = cxgb4_validate_skb(skb, dev, ETH_HLEN); |
2429 | if (ret) |
2430 | goto out_free; |
2431 | |
2432 | tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; |
2433 | qid = skb_get_queue_mapping(skb) - pi->nqsets; |
2434 | eosw_txq = &tc_port_mqprio->eosw_txq[qid]; |
2435 | spin_lock_bh(lock: &eosw_txq->lock); |
2436 | if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) |
2437 | goto out_unlock; |
2438 | |
2439 | ret = eosw_txq_enqueue(eosw_txq, skb); |
2440 | if (ret) |
2441 | goto out_unlock; |
2442 | |
2443 | /* SKB is queued for processing until credits are available. |
2444 | * So, call the destructor now and we'll free the skb later |
2445 | * after it has been successfully transmitted. |
2446 | */ |
2447 | skb_orphan(skb); |
2448 | |
2449 | eosw_txq_advance(eosw_txq, n: 1); |
2450 | ethofld_xmit(dev, eosw_txq); |
2451 | spin_unlock_bh(lock: &eosw_txq->lock); |
2452 | return NETDEV_TX_OK; |
2453 | |
2454 | out_unlock: |
2455 | spin_unlock_bh(lock: &eosw_txq->lock); |
2456 | out_free: |
2457 | dev_kfree_skb_any(skb); |
2458 | return NETDEV_TX_OK; |
2459 | } |
2460 | |
2461 | netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2462 | { |
2463 | struct port_info *pi = netdev_priv(dev); |
2464 | u16 qid = skb_get_queue_mapping(skb); |
2465 | |
2466 | if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) |
2467 | return cxgb4_vf_eth_xmit(skb, dev); |
2468 | |
2469 | if (unlikely(qid >= pi->nqsets)) |
2470 | return cxgb4_ethofld_xmit(skb, dev); |
2471 | |
2472 | if (is_ptp_enabled(skb, dev)) { |
2473 | struct adapter *adap = netdev2adap(dev); |
2474 | netdev_tx_t ret; |
2475 | |
2476 | spin_lock(lock: &adap->ptp_lock); |
2477 | ret = cxgb4_eth_xmit(skb, dev); |
2478 | spin_unlock(lock: &adap->ptp_lock); |
2479 | return ret; |
2480 | } |
2481 | |
2482 | return cxgb4_eth_xmit(skb, dev); |
2483 | } |
2484 | |
2485 | static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) |
2486 | { |
2487 | int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; |
2488 | int pidx = eosw_txq->pidx; |
2489 | struct sk_buff *skb; |
2490 | |
2491 | if (!pktcount) |
2492 | return; |
2493 | |
2494 | if (pktcount < 0) |
2495 | pktcount += eosw_txq->ndesc; |
2496 | |
2497 | while (pktcount--) { |
2498 | pidx--; |
2499 | if (pidx < 0) |
2500 | pidx += eosw_txq->ndesc; |
2501 | |
2502 | skb = eosw_txq->desc[pidx].skb; |
2503 | if (skb) { |
2504 | dev_consume_skb_any(skb); |
2505 | eosw_txq->desc[pidx].skb = NULL; |
2506 | eosw_txq->inuse--; |
2507 | } |
2508 | } |
2509 | |
2510 | eosw_txq->pidx = eosw_txq->last_pidx + 1; |
2511 | } |
2512 | |
2513 | /** |
2514 | * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc. |
2515 | * @dev: netdevice |
2516 | * @eotid: ETHOFLD tid to bind/unbind |
2517 | * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid |
2518 | * |
2519 | * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class. |
2520 | * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from |
2521 | * a traffic class. |
2522 | */ |
2523 | int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) |
2524 | { |
2525 | struct port_info *pi = netdev2pinfo(dev); |
2526 | struct adapter *adap = netdev2adap(dev); |
2527 | enum sge_eosw_state next_state; |
2528 | struct sge_eosw_txq *eosw_txq; |
2529 | u32 len, len16, nparams = 6; |
2530 | struct fw_flowc_wr *flowc; |
2531 | struct eotid_entry *entry; |
2532 | struct sge_ofld_rxq *rxq; |
2533 | struct sk_buff *skb; |
2534 | int ret = 0; |
2535 | |
2536 | len = struct_size(flowc, mnemval, nparams); |
2537 | len16 = DIV_ROUND_UP(len, 16); |
2538 | |
2539 | entry = cxgb4_lookup_eotid(t: &adap->tids, eotid); |
2540 | if (!entry) |
2541 | return -ENOMEM; |
2542 | |
2543 | eosw_txq = (struct sge_eosw_txq *)entry->data; |
2544 | if (!eosw_txq) |
2545 | return -ENOMEM; |
2546 | |
2547 | if (!(adap->flags & CXGB4_FW_OK)) { |
2548 | /* Don't stall caller when access to FW is lost */ |
2549 | complete(&eosw_txq->completion); |
2550 | return -EIO; |
2551 | } |
2552 | |
2553 | skb = alloc_skb(size: len, GFP_KERNEL); |
2554 | if (!skb) |
2555 | return -ENOMEM; |
2556 | |
2557 | spin_lock_bh(lock: &eosw_txq->lock); |
2558 | if (tc != FW_SCHED_CLS_NONE) { |
2559 | if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) |
2560 | goto out_free_skb; |
2561 | |
2562 | next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND; |
2563 | } else { |
2564 | if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) |
2565 | goto out_free_skb; |
2566 | |
2567 | next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND; |
2568 | } |
2569 | |
2570 | flowc = __skb_put(skb, len); |
2571 | memset(flowc, 0, len); |
2572 | |
2573 | rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; |
2574 | flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) | |
2575 | FW_WR_FLOWID_V(eosw_txq->hwtid)); |
2576 | flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | |
2577 | FW_FLOWC_WR_NPARAMS_V(nparams) | |
2578 | FW_WR_COMPL_V(1)); |
2579 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; |
2580 | flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf)); |
2581 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; |
2582 | flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan); |
2583 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; |
2584 | flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan); |
2585 | flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; |
2586 | flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); |
2587 | flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; |
2588 | flowc->mnemval[4].val = cpu_to_be32(tc); |
2589 | flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE; |
2590 | flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ? |
2591 | FW_FLOWC_MNEM_EOSTATE_CLOSING : |
2592 | FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); |
2593 | |
2594 | /* Free up any pending skbs to ensure there's room for |
2595 | * termination FLOWC. |
2596 | */ |
2597 | if (tc == FW_SCHED_CLS_NONE) |
2598 | eosw_txq_flush_pending_skbs(eosw_txq); |
2599 | |
2600 | ret = eosw_txq_enqueue(eosw_txq, skb); |
2601 | if (ret) |
2602 | goto out_free_skb; |
2603 | |
2604 | eosw_txq->state = next_state; |
2605 | eosw_txq->flowc_idx = eosw_txq->pidx; |
2606 | eosw_txq_advance(eosw_txq, n: 1); |
2607 | ethofld_xmit(dev, eosw_txq); |
2608 | |
2609 | spin_unlock_bh(lock: &eosw_txq->lock); |
2610 | return 0; |
2611 | |
2612 | out_free_skb: |
2613 | dev_consume_skb_any(skb); |
2614 | spin_unlock_bh(lock: &eosw_txq->lock); |
2615 | return ret; |
2616 | } |
2617 | |
2618 | /** |
2619 | * is_imm - check whether a packet can be sent as immediate data |
2620 | * @skb: the packet |
2621 | * |
2622 | * Returns true if a packet can be sent as a WR with immediate data. |
2623 | */ |
2624 | static inline int is_imm(const struct sk_buff *skb) |
2625 | { |
2626 | return skb->len <= MAX_CTRL_WR_LEN; |
2627 | } |
2628 | |
2629 | /** |
2630 | * ctrlq_check_stop - check if a control queue is full and should stop |
2631 | * @q: the queue |
2632 | * @wr: most recent WR written to the queue |
2633 | * |
2634 | * Check if a control queue has become full and should be stopped. |
2635 | * We clean up control queue descriptors very lazily, only when we are out. |
2636 | * If the queue is still full after reclaiming any completed descriptors |
2637 | * we suspend it and have the last WR wake it up. |
2638 | */ |
2639 | static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) |
2640 | { |
2641 | reclaim_completed_tx_imm(q: &q->q); |
2642 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { |
2643 | wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); |
2644 | q->q.stops++; |
2645 | q->full = 1; |
2646 | } |
2647 | } |
2648 | |
2649 | #define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST" |
2650 | |
2651 | int cxgb4_selftest_lb_pkt(struct net_device *netdev) |
2652 | { |
2653 | struct port_info *pi = netdev_priv(dev: netdev); |
2654 | struct adapter *adap = pi->adapter; |
2655 | struct cxgb4_ethtool_lb_test *lb; |
2656 | int ret, i = 0, pkt_len, credits; |
2657 | struct fw_eth_tx_pkt_wr *wr; |
2658 | struct cpl_tx_pkt_core *cpl; |
2659 | u32 ctrl0, ndesc, flits; |
2660 | struct sge_eth_txq *q; |
2661 | u8 *sgl; |
2662 | |
2663 | pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR); |
2664 | |
2665 | flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr), |
2666 | sizeof(__be64)); |
2667 | ndesc = flits_to_desc(n: flits); |
2668 | |
2669 | lb = &pi->ethtool_lb; |
2670 | lb->loopback = 1; |
2671 | |
2672 | q = &adap->sge.ethtxq[pi->first_qset]; |
2673 | __netif_tx_lock(txq: q->txq, smp_processor_id()); |
2674 | |
2675 | reclaim_completed_tx(adap, q: &q->q, maxreclaim: -1, unmap: true); |
2676 | credits = txq_avail(q: &q->q) - ndesc; |
2677 | if (unlikely(credits < 0)) { |
2678 | __netif_tx_unlock(txq: q->txq); |
2679 | return -ENOMEM; |
2680 | } |
2681 | |
2682 | wr = (void *)&q->q.desc[q->q.pidx]; |
2683 | memset(wr, 0, sizeof(struct tx_desc)); |
2684 | |
2685 | wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | |
2686 | FW_WR_IMMDLEN_V(pkt_len + |
2687 | sizeof(*cpl))); |
2688 | wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2))); |
2689 | wr->r3 = cpu_to_be64(0); |
2690 | |
2691 | cpl = (void *)(wr + 1); |
2692 | sgl = (u8 *)(cpl + 1); |
2693 | |
2694 | ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) | |
2695 | TXPKT_INTF_V(pi->tx_chan + 4); |
2696 | |
2697 | cpl->ctrl0 = htonl(ctrl0); |
2698 | cpl->pack = htons(0); |
2699 | cpl->len = htons(pkt_len); |
2700 | cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F); |
2701 | |
2702 | eth_broadcast_addr(addr: sgl); |
2703 | i += ETH_ALEN; |
2704 | ether_addr_copy(dst: &sgl[i], src: netdev->dev_addr); |
2705 | i += ETH_ALEN; |
2706 | |
2707 | snprintf(buf: &sgl[i], size: sizeof(CXGB4_SELFTEST_LB_STR), fmt: "%s" , |
2708 | CXGB4_SELFTEST_LB_STR); |
2709 | |
2710 | init_completion(x: &lb->completion); |
2711 | txq_advance(q: &q->q, n: ndesc); |
2712 | cxgb4_ring_tx_db(adap, &q->q, ndesc); |
2713 | __netif_tx_unlock(txq: q->txq); |
2714 | |
2715 | /* wait for the pkt to return */ |
2716 | ret = wait_for_completion_timeout(x: &lb->completion, timeout: 10 * HZ); |
2717 | if (!ret) |
2718 | ret = -ETIMEDOUT; |
2719 | else |
2720 | ret = lb->result; |
2721 | |
2722 | lb->loopback = 0; |
2723 | |
2724 | return ret; |
2725 | } |
2726 | |
2727 | /** |
2728 | * ctrl_xmit - send a packet through an SGE control Tx queue |
2729 | * @q: the control queue |
2730 | * @skb: the packet |
2731 | * |
2732 | * Send a packet through an SGE control Tx queue. Packets sent through |
2733 | * a control queue must fit entirely as immediate data. |
2734 | */ |
2735 | static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) |
2736 | { |
2737 | unsigned int ndesc; |
2738 | struct fw_wr_hdr *wr; |
2739 | |
2740 | if (unlikely(!is_imm(skb))) { |
2741 | WARN_ON(1); |
2742 | dev_kfree_skb(skb); |
2743 | return NET_XMIT_DROP; |
2744 | } |
2745 | |
2746 | ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); |
2747 | spin_lock(lock: &q->sendq.lock); |
2748 | |
2749 | if (unlikely(q->full)) { |
2750 | skb->priority = ndesc; /* save for restart */ |
2751 | __skb_queue_tail(list: &q->sendq, newsk: skb); |
2752 | spin_unlock(lock: &q->sendq.lock); |
2753 | return NET_XMIT_CN; |
2754 | } |
2755 | |
2756 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; |
2757 | cxgb4_inline_tx_skb(skb, &q->q, wr); |
2758 | |
2759 | txq_advance(q: &q->q, n: ndesc); |
2760 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) |
2761 | ctrlq_check_stop(q, wr); |
2762 | |
2763 | cxgb4_ring_tx_db(q->adap, &q->q, ndesc); |
2764 | spin_unlock(lock: &q->sendq.lock); |
2765 | |
2766 | kfree_skb(skb); |
2767 | return NET_XMIT_SUCCESS; |
2768 | } |
2769 | |
2770 | /** |
2771 | * restart_ctrlq - restart a suspended control queue |
2772 | * @t: pointer to the tasklet associated with this handler |
2773 | * |
2774 | * Resumes transmission on a suspended Tx control queue. |
2775 | */ |
2776 | static void restart_ctrlq(struct tasklet_struct *t) |
2777 | { |
2778 | struct sk_buff *skb; |
2779 | unsigned int written = 0; |
2780 | struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk); |
2781 | |
2782 | spin_lock(lock: &q->sendq.lock); |
2783 | reclaim_completed_tx_imm(q: &q->q); |
2784 | BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ |
2785 | |
2786 | while ((skb = __skb_dequeue(list: &q->sendq)) != NULL) { |
2787 | struct fw_wr_hdr *wr; |
2788 | unsigned int ndesc = skb->priority; /* previously saved */ |
2789 | |
2790 | written += ndesc; |
2791 | /* Write descriptors and free skbs outside the lock to limit |
2792 | * wait times. q->full is still set so new skbs will be queued. |
2793 | */ |
2794 | wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; |
2795 | txq_advance(q: &q->q, n: ndesc); |
2796 | spin_unlock(lock: &q->sendq.lock); |
2797 | |
2798 | cxgb4_inline_tx_skb(skb, &q->q, wr); |
2799 | kfree_skb(skb); |
2800 | |
2801 | if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { |
2802 | unsigned long old = q->q.stops; |
2803 | |
2804 | ctrlq_check_stop(q, wr); |
2805 | if (q->q.stops != old) { /* suspended anew */ |
2806 | spin_lock(lock: &q->sendq.lock); |
2807 | goto ringdb; |
2808 | } |
2809 | } |
2810 | if (written > 16) { |
2811 | cxgb4_ring_tx_db(q->adap, &q->q, written); |
2812 | written = 0; |
2813 | } |
2814 | spin_lock(lock: &q->sendq.lock); |
2815 | } |
2816 | q->full = 0; |
2817 | ringdb: |
2818 | if (written) |
2819 | cxgb4_ring_tx_db(q->adap, &q->q, written); |
2820 | spin_unlock(lock: &q->sendq.lock); |
2821 | } |
2822 | |
2823 | /** |
2824 | * t4_mgmt_tx - send a management message |
2825 | * @adap: the adapter |
2826 | * @skb: the packet containing the management message |
2827 | * |
2828 | * Send a management message through control queue 0. |
2829 | */ |
2830 | int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) |
2831 | { |
2832 | int ret; |
2833 | |
2834 | local_bh_disable(); |
2835 | ret = ctrl_xmit(q: &adap->sge.ctrlq[0], skb); |
2836 | local_bh_enable(); |
2837 | return ret; |
2838 | } |
2839 | |
2840 | /** |
2841 | * is_ofld_imm - check whether a packet can be sent as immediate data |
2842 | * @skb: the packet |
2843 | * |
2844 | * Returns true if a packet can be sent as an offload WR with immediate |
2845 | * data. |
2846 | * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field. |
2847 | * However, FW_ULPTX_WR commands have a 256 byte immediate only |
2848 | * payload limit. |
2849 | */ |
2850 | static inline int is_ofld_imm(const struct sk_buff *skb) |
2851 | { |
2852 | struct work_request_hdr *req = (struct work_request_hdr *)skb->data; |
2853 | unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); |
2854 | |
2855 | if (unlikely(opcode == FW_ULPTX_WR)) |
2856 | return skb->len <= MAX_IMM_ULPTX_WR_LEN; |
2857 | else if (opcode == FW_CRYPTO_LOOKASIDE_WR) |
2858 | return skb->len <= SGE_MAX_WR_LEN; |
2859 | else |
2860 | return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN; |
2861 | } |
2862 | |
2863 | /** |
2864 | * calc_tx_flits_ofld - calculate # of flits for an offload packet |
2865 | * @skb: the packet |
2866 | * |
2867 | * Returns the number of flits needed for the given offload packet. |
2868 | * These packets are already fully constructed and no additional headers |
2869 | * will be added. |
2870 | */ |
2871 | static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) |
2872 | { |
2873 | unsigned int flits, cnt; |
2874 | |
2875 | if (is_ofld_imm(skb)) |
2876 | return DIV_ROUND_UP(skb->len, 8); |
2877 | |
2878 | flits = skb_transport_offset(skb) / 8U; /* headers */ |
2879 | cnt = skb_shinfo(skb)->nr_frags; |
2880 | if (skb_tail_pointer(skb) != skb_transport_header(skb)) |
2881 | cnt++; |
2882 | return flits + sgl_len(n: cnt); |
2883 | } |
2884 | |
2885 | /** |
2886 | * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion |
2887 | * @q: the queue to stop |
2888 | * |
2889 | * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting |
2890 | * inability to map packets. A periodic timer attempts to restart |
2891 | * queues so marked. |
2892 | */ |
2893 | static void txq_stop_maperr(struct sge_uld_txq *q) |
2894 | { |
2895 | q->mapping_err++; |
2896 | q->q.stops++; |
2897 | set_bit(nr: q->q.cntxt_id - q->adap->sge.egr_start, |
2898 | addr: q->adap->sge.txq_maperr); |
2899 | } |
2900 | |
2901 | /** |
2902 | * ofldtxq_stop - stop an offload Tx queue that has become full |
2903 | * @q: the queue to stop |
2904 | * @wr: the Work Request causing the queue to become full |
2905 | * |
2906 | * Stops an offload Tx queue that has become full and modifies the packet |
2907 | * being written to request a wakeup. |
2908 | */ |
2909 | static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr) |
2910 | { |
2911 | wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); |
2912 | q->q.stops++; |
2913 | q->full = 1; |
2914 | } |
2915 | |
2916 | /** |
2917 | * service_ofldq - service/restart a suspended offload queue |
2918 | * @q: the offload queue |
2919 | * |
2920 | * Services an offload Tx queue by moving packets from its Pending Send |
2921 | * Queue to the Hardware TX ring. The function starts and ends with the |
2922 | * Send Queue locked, but drops the lock while putting the skb at the |
2923 | * head of the Send Queue onto the Hardware TX Ring. Dropping the lock |
2924 | * allows more skbs to be added to the Send Queue by other threads. |
2925 | * The packet being processed at the head of the Pending Send Queue is |
2926 | * left on the queue in case we experience DMA Mapping errors, etc. |
2927 | * and need to give up and restart later. |
2928 | * |
2929 | * service_ofldq() can be thought of as a task which opportunistically |
2930 | * uses other threads execution contexts. We use the Offload Queue |
2931 | * boolean "service_ofldq_running" to make sure that only one instance |
2932 | * is ever running at a time ... |
2933 | */ |
2934 | static void service_ofldq(struct sge_uld_txq *q) |
2935 | __must_hold(&q->sendq.lock) |
2936 | { |
2937 | u64 *pos, *before, *end; |
2938 | int credits; |
2939 | struct sk_buff *skb; |
2940 | struct sge_txq *txq; |
2941 | unsigned int left; |
2942 | unsigned int written = 0; |
2943 | unsigned int flits, ndesc; |
2944 | |
2945 | /* If another thread is currently in service_ofldq() processing the |
2946 | * Pending Send Queue then there's nothing to do. Otherwise, flag |
2947 | * that we're doing the work and continue. Examining/modifying |
2948 | * the Offload Queue boolean "service_ofldq_running" must be done |
2949 | * while holding the Pending Send Queue Lock. |
2950 | */ |
2951 | if (q->service_ofldq_running) |
2952 | return; |
2953 | q->service_ofldq_running = true; |
2954 | |
2955 | while ((skb = skb_peek(list_: &q->sendq)) != NULL && !q->full) { |
2956 | /* We drop the lock while we're working with the skb at the |
2957 | * head of the Pending Send Queue. This allows more skbs to |
2958 | * be added to the Pending Send Queue while we're working on |
2959 | * this one. We don't need to lock to guard the TX Ring |
2960 | * updates because only one thread of execution is ever |
2961 | * allowed into service_ofldq() at a time. |
2962 | */ |
2963 | spin_unlock(lock: &q->sendq.lock); |
2964 | |
2965 | cxgb4_reclaim_completed_tx(q->adap, &q->q, false); |
2966 | |
2967 | flits = skb->priority; /* previously saved */ |
2968 | ndesc = flits_to_desc(n: flits); |
2969 | credits = txq_avail(q: &q->q) - ndesc; |
2970 | BUG_ON(credits < 0); |
2971 | if (unlikely(credits < TXQ_STOP_THRES)) |
2972 | ofldtxq_stop(q, wr: (struct fw_wr_hdr *)skb->data); |
2973 | |
2974 | pos = (u64 *)&q->q.desc[q->q.pidx]; |
2975 | if (is_ofld_imm(skb)) |
2976 | cxgb4_inline_tx_skb(skb, &q->q, pos); |
2977 | else if (cxgb4_map_skb(q->adap->pdev_dev, skb, |
2978 | (dma_addr_t *)skb->head)) { |
2979 | txq_stop_maperr(q); |
2980 | spin_lock(lock: &q->sendq.lock); |
2981 | break; |
2982 | } else { |
2983 | int last_desc, hdr_len = skb_transport_offset(skb); |
2984 | |
2985 | /* The WR headers may not fit within one descriptor. |
2986 | * So we need to deal with wrap-around here. |
2987 | */ |
2988 | before = (u64 *)pos; |
2989 | end = (u64 *)pos + flits; |
2990 | txq = &q->q; |
2991 | pos = (void *)inline_tx_skb_header(skb, q: &q->q, |
2992 | pos: (void *)pos, |
2993 | length: hdr_len); |
2994 | if (before > (u64 *)pos) { |
2995 | left = (u8 *)end - (u8 *)txq->stat; |
2996 | end = (void *)txq->desc + left; |
2997 | } |
2998 | |
2999 | /* If current position is already at the end of the |
3000 | * ofld queue, reset the current to point to |
3001 | * start of the queue and update the end ptr as well. |
3002 | */ |
3003 | if (pos == (u64 *)txq->stat) { |
3004 | left = (u8 *)end - (u8 *)txq->stat; |
3005 | end = (void *)txq->desc + left; |
3006 | pos = (void *)txq->desc; |
3007 | } |
3008 | |
3009 | cxgb4_write_sgl(skb, &q->q, (void *)pos, |
3010 | end, hdr_len, |
3011 | (dma_addr_t *)skb->head); |
3012 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
3013 | skb->dev = q->adap->port[0]; |
3014 | skb->destructor = deferred_unmap_destructor; |
3015 | #endif |
3016 | last_desc = q->q.pidx + ndesc - 1; |
3017 | if (last_desc >= q->q.size) |
3018 | last_desc -= q->q.size; |
3019 | q->q.sdesc[last_desc].skb = skb; |
3020 | } |
3021 | |
3022 | txq_advance(q: &q->q, n: ndesc); |
3023 | written += ndesc; |
3024 | if (unlikely(written > 32)) { |
3025 | cxgb4_ring_tx_db(q->adap, &q->q, written); |
3026 | written = 0; |
3027 | } |
3028 | |
3029 | /* Reacquire the Pending Send Queue Lock so we can unlink the |
3030 | * skb we've just successfully transferred to the TX Ring and |
3031 | * loop for the next skb which may be at the head of the |
3032 | * Pending Send Queue. |
3033 | */ |
3034 | spin_lock(lock: &q->sendq.lock); |
3035 | __skb_unlink(skb, list: &q->sendq); |
3036 | if (is_ofld_imm(skb)) |
3037 | kfree_skb(skb); |
3038 | } |
3039 | if (likely(written)) |
3040 | cxgb4_ring_tx_db(q->adap, &q->q, written); |
3041 | |
3042 | /*Indicate that no thread is processing the Pending Send Queue |
3043 | * currently. |
3044 | */ |
3045 | q->service_ofldq_running = false; |
3046 | } |
3047 | |
3048 | /** |
3049 | * ofld_xmit - send a packet through an offload queue |
3050 | * @q: the Tx offload queue |
3051 | * @skb: the packet |
3052 | * |
3053 | * Send an offload packet through an SGE offload queue. |
3054 | */ |
3055 | static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) |
3056 | { |
3057 | skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ |
3058 | spin_lock(lock: &q->sendq.lock); |
3059 | |
3060 | /* Queue the new skb onto the Offload Queue's Pending Send Queue. If |
3061 | * that results in this new skb being the only one on the queue, start |
3062 | * servicing it. If there are other skbs already on the list, then |
3063 | * either the queue is currently being processed or it's been stopped |
3064 | * for some reason and it'll be restarted at a later time. Restart |
3065 | * paths are triggered by events like experiencing a DMA Mapping Error |
3066 | * or filling the Hardware TX Ring. |
3067 | */ |
3068 | __skb_queue_tail(list: &q->sendq, newsk: skb); |
3069 | if (q->sendq.qlen == 1) |
3070 | service_ofldq(q); |
3071 | |
3072 | spin_unlock(lock: &q->sendq.lock); |
3073 | return NET_XMIT_SUCCESS; |
3074 | } |
3075 | |
3076 | /** |
3077 | * restart_ofldq - restart a suspended offload queue |
3078 | * @t: pointer to the tasklet associated with this handler |
3079 | * |
3080 | * Resumes transmission on a suspended Tx offload queue. |
3081 | */ |
3082 | static void restart_ofldq(struct tasklet_struct *t) |
3083 | { |
3084 | struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk); |
3085 | |
3086 | spin_lock(lock: &q->sendq.lock); |
3087 | q->full = 0; /* the queue actually is completely empty now */ |
3088 | service_ofldq(q); |
3089 | spin_unlock(lock: &q->sendq.lock); |
3090 | } |
3091 | |
3092 | /** |
3093 | * skb_txq - return the Tx queue an offload packet should use |
3094 | * @skb: the packet |
3095 | * |
3096 | * Returns the Tx queue an offload packet should use as indicated by bits |
3097 | * 1-15 in the packet's queue_mapping. |
3098 | */ |
3099 | static inline unsigned int skb_txq(const struct sk_buff *skb) |
3100 | { |
3101 | return skb->queue_mapping >> 1; |
3102 | } |
3103 | |
3104 | /** |
3105 | * is_ctrl_pkt - return whether an offload packet is a control packet |
3106 | * @skb: the packet |
3107 | * |
3108 | * Returns whether an offload packet should use an OFLD or a CTRL |
3109 | * Tx queue as indicated by bit 0 in the packet's queue_mapping. |
3110 | */ |
3111 | static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) |
3112 | { |
3113 | return skb->queue_mapping & 1; |
3114 | } |
3115 | |
3116 | static inline int uld_send(struct adapter *adap, struct sk_buff *skb, |
3117 | unsigned int tx_uld_type) |
3118 | { |
3119 | struct sge_uld_txq_info *txq_info; |
3120 | struct sge_uld_txq *txq; |
3121 | unsigned int idx = skb_txq(skb); |
3122 | |
3123 | if (unlikely(is_ctrl_pkt(skb))) { |
3124 | /* Single ctrl queue is a requirement for LE workaround path */ |
3125 | if (adap->tids.nsftids) |
3126 | idx = 0; |
3127 | return ctrl_xmit(q: &adap->sge.ctrlq[idx], skb); |
3128 | } |
3129 | |
3130 | txq_info = adap->sge.uld_txq_info[tx_uld_type]; |
3131 | if (unlikely(!txq_info)) { |
3132 | WARN_ON(true); |
3133 | kfree_skb(skb); |
3134 | return NET_XMIT_DROP; |
3135 | } |
3136 | |
3137 | txq = &txq_info->uldtxq[idx]; |
3138 | return ofld_xmit(q: txq, skb); |
3139 | } |
3140 | |
3141 | /** |
3142 | * t4_ofld_send - send an offload packet |
3143 | * @adap: the adapter |
3144 | * @skb: the packet |
3145 | * |
3146 | * Sends an offload packet. We use the packet queue_mapping to select the |
3147 | * appropriate Tx queue as follows: bit 0 indicates whether the packet |
3148 | * should be sent as regular or control, bits 1-15 select the queue. |
3149 | */ |
3150 | int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) |
3151 | { |
3152 | int ret; |
3153 | |
3154 | local_bh_disable(); |
3155 | ret = uld_send(adap, skb, tx_uld_type: CXGB4_TX_OFLD); |
3156 | local_bh_enable(); |
3157 | return ret; |
3158 | } |
3159 | |
3160 | /** |
3161 | * cxgb4_ofld_send - send an offload packet |
3162 | * @dev: the net device |
3163 | * @skb: the packet |
3164 | * |
3165 | * Sends an offload packet. This is an exported version of @t4_ofld_send, |
3166 | * intended for ULDs. |
3167 | */ |
3168 | int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) |
3169 | { |
3170 | return t4_ofld_send(adap: netdev2adap(dev), skb); |
3171 | } |
3172 | EXPORT_SYMBOL(cxgb4_ofld_send); |
3173 | |
3174 | static void *(const void *src, |
3175 | const struct sge_txq *q, |
3176 | void *pos, int length) |
3177 | { |
3178 | int left = (void *)q->stat - pos; |
3179 | u64 *p; |
3180 | |
3181 | if (likely(length <= left)) { |
3182 | memcpy(pos, src, length); |
3183 | pos += length; |
3184 | } else { |
3185 | memcpy(pos, src, left); |
3186 | memcpy(q->desc, src + left, length - left); |
3187 | pos = (void *)q->desc + (length - left); |
3188 | } |
3189 | /* 0-pad to multiple of 16 */ |
3190 | p = PTR_ALIGN(pos, 8); |
3191 | if ((uintptr_t)p & 8) { |
3192 | *p = 0; |
3193 | return p + 1; |
3194 | } |
3195 | return p; |
3196 | } |
3197 | |
3198 | /** |
3199 | * ofld_xmit_direct - copy a WR into offload queue |
3200 | * @q: the Tx offload queue |
3201 | * @src: location of WR |
3202 | * @len: WR length |
3203 | * |
3204 | * Copy an immediate WR into an uncontended SGE offload queue. |
3205 | */ |
3206 | static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src, |
3207 | unsigned int len) |
3208 | { |
3209 | unsigned int ndesc; |
3210 | int credits; |
3211 | u64 *pos; |
3212 | |
3213 | /* Use the lower limit as the cut-off */ |
3214 | if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) { |
3215 | WARN_ON(1); |
3216 | return NET_XMIT_DROP; |
3217 | } |
3218 | |
3219 | /* Don't return NET_XMIT_CN here as the current |
3220 | * implementation doesn't queue the request |
3221 | * using an skb when the following conditions not met |
3222 | */ |
3223 | if (!spin_trylock(lock: &q->sendq.lock)) |
3224 | return NET_XMIT_DROP; |
3225 | |
3226 | if (q->full || !skb_queue_empty(list: &q->sendq) || |
3227 | q->service_ofldq_running) { |
3228 | spin_unlock(lock: &q->sendq.lock); |
3229 | return NET_XMIT_DROP; |
3230 | } |
3231 | ndesc = flits_to_desc(DIV_ROUND_UP(len, 8)); |
3232 | credits = txq_avail(q: &q->q) - ndesc; |
3233 | pos = (u64 *)&q->q.desc[q->q.pidx]; |
3234 | |
3235 | /* ofldtxq_stop modifies WR header in-situ */ |
3236 | inline_tx_header(src, q: &q->q, pos, length: len); |
3237 | if (unlikely(credits < TXQ_STOP_THRES)) |
3238 | ofldtxq_stop(q, wr: (struct fw_wr_hdr *)pos); |
3239 | txq_advance(q: &q->q, n: ndesc); |
3240 | cxgb4_ring_tx_db(q->adap, &q->q, ndesc); |
3241 | |
3242 | spin_unlock(lock: &q->sendq.lock); |
3243 | return NET_XMIT_SUCCESS; |
3244 | } |
3245 | |
3246 | int cxgb4_immdata_send(struct net_device *dev, unsigned int idx, |
3247 | const void *src, unsigned int len) |
3248 | { |
3249 | struct sge_uld_txq_info *txq_info; |
3250 | struct sge_uld_txq *txq; |
3251 | struct adapter *adap; |
3252 | int ret; |
3253 | |
3254 | adap = netdev2adap(dev); |
3255 | |
3256 | local_bh_disable(); |
3257 | txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; |
3258 | if (unlikely(!txq_info)) { |
3259 | WARN_ON(true); |
3260 | local_bh_enable(); |
3261 | return NET_XMIT_DROP; |
3262 | } |
3263 | txq = &txq_info->uldtxq[idx]; |
3264 | |
3265 | ret = ofld_xmit_direct(q: txq, src, len); |
3266 | local_bh_enable(); |
3267 | return net_xmit_eval(ret); |
3268 | } |
3269 | EXPORT_SYMBOL(cxgb4_immdata_send); |
3270 | |
3271 | /** |
3272 | * t4_crypto_send - send crypto packet |
3273 | * @adap: the adapter |
3274 | * @skb: the packet |
3275 | * |
3276 | * Sends crypto packet. We use the packet queue_mapping to select the |
3277 | * appropriate Tx queue as follows: bit 0 indicates whether the packet |
3278 | * should be sent as regular or control, bits 1-15 select the queue. |
3279 | */ |
3280 | static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb) |
3281 | { |
3282 | int ret; |
3283 | |
3284 | local_bh_disable(); |
3285 | ret = uld_send(adap, skb, tx_uld_type: CXGB4_TX_CRYPTO); |
3286 | local_bh_enable(); |
3287 | return ret; |
3288 | } |
3289 | |
3290 | /** |
3291 | * cxgb4_crypto_send - send crypto packet |
3292 | * @dev: the net device |
3293 | * @skb: the packet |
3294 | * |
3295 | * Sends crypto packet. This is an exported version of @t4_crypto_send, |
3296 | * intended for ULDs. |
3297 | */ |
3298 | int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb) |
3299 | { |
3300 | return t4_crypto_send(adap: netdev2adap(dev), skb); |
3301 | } |
3302 | EXPORT_SYMBOL(cxgb4_crypto_send); |
3303 | |
3304 | static inline void copy_frags(struct sk_buff *skb, |
3305 | const struct pkt_gl *gl, unsigned int offset) |
3306 | { |
3307 | int i; |
3308 | |
3309 | /* usually there's just one frag */ |
3310 | __skb_fill_page_desc(skb, i: 0, page: gl->frags[0].page, |
3311 | off: gl->frags[0].offset + offset, |
3312 | size: gl->frags[0].size - offset); |
3313 | skb_shinfo(skb)->nr_frags = gl->nfrags; |
3314 | for (i = 1; i < gl->nfrags; i++) |
3315 | __skb_fill_page_desc(skb, i, page: gl->frags[i].page, |
3316 | off: gl->frags[i].offset, |
3317 | size: gl->frags[i].size); |
3318 | |
3319 | /* get a reference to the last page, we don't own it */ |
3320 | get_page(page: gl->frags[gl->nfrags - 1].page); |
3321 | } |
3322 | |
3323 | /** |
3324 | * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list |
3325 | * @gl: the gather list |
3326 | * @skb_len: size of sk_buff main body if it carries fragments |
3327 | * @pull_len: amount of data to move to the sk_buff's main body |
3328 | * |
3329 | * Builds an sk_buff from the given packet gather list. Returns the |
3330 | * sk_buff or %NULL if sk_buff allocation failed. |
3331 | */ |
3332 | struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, |
3333 | unsigned int skb_len, unsigned int pull_len) |
3334 | { |
3335 | struct sk_buff *skb; |
3336 | |
3337 | /* |
3338 | * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer |
3339 | * size, which is expected since buffers are at least PAGE_SIZEd. |
3340 | * In this case packets up to RX_COPY_THRES have only one fragment. |
3341 | */ |
3342 | if (gl->tot_len <= RX_COPY_THRES) { |
3343 | skb = dev_alloc_skb(length: gl->tot_len); |
3344 | if (unlikely(!skb)) |
3345 | goto out; |
3346 | __skb_put(skb, len: gl->tot_len); |
3347 | skb_copy_to_linear_data(skb, from: gl->va, len: gl->tot_len); |
3348 | } else { |
3349 | skb = dev_alloc_skb(length: skb_len); |
3350 | if (unlikely(!skb)) |
3351 | goto out; |
3352 | __skb_put(skb, len: pull_len); |
3353 | skb_copy_to_linear_data(skb, from: gl->va, len: pull_len); |
3354 | |
3355 | copy_frags(skb, gl, offset: pull_len); |
3356 | skb->len = gl->tot_len; |
3357 | skb->data_len = skb->len - pull_len; |
3358 | skb->truesize += skb->data_len; |
3359 | } |
3360 | out: return skb; |
3361 | } |
3362 | EXPORT_SYMBOL(cxgb4_pktgl_to_skb); |
3363 | |
3364 | /** |
3365 | * t4_pktgl_free - free a packet gather list |
3366 | * @gl: the gather list |
3367 | * |
3368 | * Releases the pages of a packet gather list. We do not own the last |
3369 | * page on the list and do not free it. |
3370 | */ |
3371 | static void t4_pktgl_free(const struct pkt_gl *gl) |
3372 | { |
3373 | int n; |
3374 | const struct page_frag *p; |
3375 | |
3376 | for (p = gl->frags, n = gl->nfrags - 1; n--; p++) |
3377 | put_page(page: p->page); |
3378 | } |
3379 | |
3380 | /* |
3381 | * Process an MPS trace packet. Give it an unused protocol number so it won't |
3382 | * be delivered to anyone and send it to the stack for capture. |
3383 | */ |
3384 | static noinline int handle_trace_pkt(struct adapter *adap, |
3385 | const struct pkt_gl *gl) |
3386 | { |
3387 | struct sk_buff *skb; |
3388 | |
3389 | skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); |
3390 | if (unlikely(!skb)) { |
3391 | t4_pktgl_free(gl); |
3392 | return 0; |
3393 | } |
3394 | |
3395 | if (is_t4(chip: adap->params.chip)) |
3396 | __skb_pull(skb, len: sizeof(struct cpl_trace_pkt)); |
3397 | else |
3398 | __skb_pull(skb, len: sizeof(struct cpl_t5_trace_pkt)); |
3399 | |
3400 | skb_reset_mac_header(skb); |
3401 | skb->protocol = htons(0xffff); |
3402 | skb->dev = adap->port[0]; |
3403 | netif_receive_skb(skb); |
3404 | return 0; |
3405 | } |
3406 | |
3407 | /** |
3408 | * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp |
3409 | * @adap: the adapter |
3410 | * @hwtstamps: time stamp structure to update |
3411 | * @sgetstamp: 60bit iqe timestamp |
3412 | * |
3413 | * Every ingress queue entry has the 60-bit timestamp, convert that timestamp |
3414 | * which is in Core Clock ticks into ktime_t and assign it |
3415 | **/ |
3416 | static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap, |
3417 | struct skb_shared_hwtstamps *hwtstamps, |
3418 | u64 sgetstamp) |
3419 | { |
3420 | u64 ns; |
3421 | u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); |
3422 | |
3423 | ns = div_u64(dividend: tmp, divisor: adap->params.vpd.cclk); |
3424 | |
3425 | memset(hwtstamps, 0, sizeof(*hwtstamps)); |
3426 | hwtstamps->hwtstamp = ns_to_ktime(ns); |
3427 | } |
3428 | |
3429 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, |
3430 | const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len) |
3431 | { |
3432 | struct adapter *adapter = rxq->rspq.adap; |
3433 | struct sge *s = &adapter->sge; |
3434 | struct port_info *pi; |
3435 | int ret; |
3436 | struct sk_buff *skb; |
3437 | |
3438 | skb = napi_get_frags(napi: &rxq->rspq.napi); |
3439 | if (unlikely(!skb)) { |
3440 | t4_pktgl_free(gl); |
3441 | rxq->stats.rx_drops++; |
3442 | return; |
3443 | } |
3444 | |
3445 | copy_frags(skb, gl, offset: s->pktshift); |
3446 | if (tnl_hdr_len) |
3447 | skb->csum_level = 1; |
3448 | skb->len = gl->tot_len - s->pktshift; |
3449 | skb->data_len = skb->len; |
3450 | skb->truesize += skb->data_len; |
3451 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3452 | skb_record_rx_queue(skb, rx_queue: rxq->rspq.idx); |
3453 | pi = netdev_priv(dev: skb->dev); |
3454 | if (pi->rxtstamp) |
3455 | cxgb4_sgetim_to_hwtstamp(adap: adapter, hwtstamps: skb_hwtstamps(skb), |
3456 | sgetstamp: gl->sgetstamp); |
3457 | if (rxq->rspq.netdev->features & NETIF_F_RXHASH) |
3458 | skb_set_hash(skb, hash: (__force u32)pkt->rsshdr.hash_val, |
3459 | type: PKT_HASH_TYPE_L3); |
3460 | |
3461 | if (unlikely(pkt->vlan_ex)) { |
3462 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); |
3463 | rxq->stats.vlan_ex++; |
3464 | } |
3465 | ret = napi_gro_frags(napi: &rxq->rspq.napi); |
3466 | if (ret == GRO_HELD) |
3467 | rxq->stats.lro_pkts++; |
3468 | else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) |
3469 | rxq->stats.lro_merged++; |
3470 | rxq->stats.pkts++; |
3471 | rxq->stats.rx_cso++; |
3472 | } |
3473 | |
3474 | enum { |
3475 | RX_NON_PTP_PKT = 0, |
3476 | RX_PTP_PKT_SUC = 1, |
3477 | RX_PTP_PKT_ERR = 2 |
3478 | }; |
3479 | |
3480 | /** |
3481 | * t4_systim_to_hwstamp - read hardware time stamp |
3482 | * @adapter: the adapter |
3483 | * @skb: the packet |
3484 | * |
3485 | * Read Time Stamp from MPS packet and insert in skb which |
3486 | * is forwarded to PTP application |
3487 | */ |
3488 | static noinline int t4_systim_to_hwstamp(struct adapter *adapter, |
3489 | struct sk_buff *skb) |
3490 | { |
3491 | struct skb_shared_hwtstamps *hwtstamps; |
3492 | struct cpl_rx_mps_pkt *cpl = NULL; |
3493 | unsigned char *data; |
3494 | int offset; |
3495 | |
3496 | cpl = (struct cpl_rx_mps_pkt *)skb->data; |
3497 | if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & |
3498 | X_CPL_RX_MPS_PKT_TYPE_PTP)) |
3499 | return RX_PTP_PKT_ERR; |
3500 | |
3501 | data = skb->data + sizeof(*cpl); |
3502 | skb_pull(skb, len: 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt)); |
3503 | offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; |
3504 | if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) |
3505 | return RX_PTP_PKT_ERR; |
3506 | |
3507 | hwtstamps = skb_hwtstamps(skb); |
3508 | memset(hwtstamps, 0, sizeof(*hwtstamps)); |
3509 | hwtstamps->hwtstamp = ns_to_ktime(ns: get_unaligned_be64(p: data)); |
3510 | |
3511 | return RX_PTP_PKT_SUC; |
3512 | } |
3513 | |
3514 | /** |
3515 | * t4_rx_hststamp - Recv PTP Event Message |
3516 | * @adapter: the adapter |
3517 | * @rsp: the response queue descriptor holding the RX_PKT message |
3518 | * @rxq: the response queue holding the RX_PKT message |
3519 | * @skb: the packet |
3520 | * |
3521 | * PTP enabled and MPS packet, read HW timestamp |
3522 | */ |
3523 | static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp, |
3524 | struct sge_eth_rxq *rxq, struct sk_buff *skb) |
3525 | { |
3526 | int ret; |
3527 | |
3528 | if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) && |
3529 | !is_t4(adapter->params.chip))) { |
3530 | ret = t4_systim_to_hwstamp(adapter, skb); |
3531 | if (ret == RX_PTP_PKT_ERR) { |
3532 | kfree_skb(skb); |
3533 | rxq->stats.rx_drops++; |
3534 | } |
3535 | return ret; |
3536 | } |
3537 | return RX_NON_PTP_PKT; |
3538 | } |
3539 | |
3540 | /** |
3541 | * t4_tx_hststamp - Loopback PTP Transmit Event Message |
3542 | * @adapter: the adapter |
3543 | * @skb: the packet |
3544 | * @dev: the ingress net device |
3545 | * |
3546 | * Read hardware timestamp for the loopback PTP Tx event message |
3547 | */ |
3548 | static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb, |
3549 | struct net_device *dev) |
3550 | { |
3551 | struct port_info *pi = netdev_priv(dev); |
3552 | |
3553 | if (!is_t4(chip: adapter->params.chip) && adapter->ptp_tx_skb) { |
3554 | cxgb4_ptp_read_hwstamp(adap: adapter, pi); |
3555 | kfree_skb(skb); |
3556 | return 0; |
3557 | } |
3558 | return 1; |
3559 | } |
3560 | |
3561 | /** |
3562 | * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages |
3563 | * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue |
3564 | * @rsp: Response Entry pointer into Response Queue |
3565 | * @gl: Gather List pointer |
3566 | * |
3567 | * For adapters which support the SGE Doorbell Queue Timer facility, |
3568 | * we configure the Ethernet TX Queues to send CIDX Updates to the |
3569 | * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE |
3570 | * messages. This adds a small load to PCIe Link RX bandwidth and, |
3571 | * potentially, higher CPU Interrupt load, but allows us to respond |
3572 | * much more quickly to the CIDX Updates. This is important for |
3573 | * Upper Layer Software which isn't willing to have a large amount |
3574 | * of TX Data outstanding before receiving DMA Completions. |
3575 | */ |
3576 | static void t4_tx_completion_handler(struct sge_rspq *rspq, |
3577 | const __be64 *rsp, |
3578 | const struct pkt_gl *gl) |
3579 | { |
3580 | u8 opcode = ((const struct rss_header *)rsp)->opcode; |
3581 | struct port_info *pi = netdev_priv(dev: rspq->netdev); |
3582 | struct adapter *adapter = rspq->adap; |
3583 | struct sge *s = &adapter->sge; |
3584 | struct sge_eth_txq *txq; |
3585 | |
3586 | /* skip RSS header */ |
3587 | rsp++; |
3588 | |
3589 | /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. |
3590 | */ |
3591 | if (unlikely(opcode == CPL_FW4_MSG && |
3592 | ((const struct cpl_fw4_msg *)rsp)->type == |
3593 | FW_TYPE_RSSCPL)) { |
3594 | rsp++; |
3595 | opcode = ((const struct rss_header *)rsp)->opcode; |
3596 | rsp++; |
3597 | } |
3598 | |
3599 | if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) { |
3600 | pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n" , |
3601 | __func__, opcode); |
3602 | return; |
3603 | } |
3604 | |
3605 | txq = &s->ethtxq[pi->first_qset + rspq->idx]; |
3606 | |
3607 | /* We've got the Hardware Consumer Index Update in the Egress Update |
3608 | * message. These Egress Update messages will be our sole CIDX Updates |
3609 | * we get since we don't want to chew up PCIe bandwidth for both Ingress |
3610 | * Messages and Status Page writes. However, The code which manages |
3611 | * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value |
3612 | * stored in the Status Page at the end of the TX Queue. It's easiest |
3613 | * to simply copy the CIDX Update value from the Egress Update message |
3614 | * to the Status Page. Also note that no Endian issues need to be |
3615 | * considered here since both are Big Endian and we're just copying |
3616 | * bytes consistently ... |
3617 | */ |
3618 | if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { |
3619 | struct cpl_sge_egr_update *egr; |
3620 | |
3621 | egr = (struct cpl_sge_egr_update *)rsp; |
3622 | WRITE_ONCE(txq->q.stat->cidx, egr->cidx); |
3623 | } |
3624 | |
3625 | t4_sge_eth_txq_egress_update(adap: adapter, eq: txq, maxreclaim: -1); |
3626 | } |
3627 | |
3628 | static int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si) |
3629 | { |
3630 | struct adapter *adap = pi->adapter; |
3631 | struct cxgb4_ethtool_lb_test *lb; |
3632 | struct sge *s = &adap->sge; |
3633 | struct net_device *netdev; |
3634 | u8 *data; |
3635 | int i; |
3636 | |
3637 | netdev = adap->port[pi->port_id]; |
3638 | lb = &pi->ethtool_lb; |
3639 | data = si->va + s->pktshift; |
3640 | |
3641 | i = ETH_ALEN; |
3642 | if (!ether_addr_equal(addr1: data + i, addr2: netdev->dev_addr)) |
3643 | return -1; |
3644 | |
3645 | i += ETH_ALEN; |
3646 | if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR)) |
3647 | lb->result = -EIO; |
3648 | |
3649 | complete(&lb->completion); |
3650 | return 0; |
3651 | } |
3652 | |
3653 | /** |
3654 | * t4_ethrx_handler - process an ingress ethernet packet |
3655 | * @q: the response queue that received the packet |
3656 | * @rsp: the response queue descriptor holding the RX_PKT message |
3657 | * @si: the gather list of packet fragments |
3658 | * |
3659 | * Process an ingress ethernet packet and deliver it to the stack. |
3660 | */ |
3661 | int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, |
3662 | const struct pkt_gl *si) |
3663 | { |
3664 | bool csum_ok; |
3665 | struct sk_buff *skb; |
3666 | const struct cpl_rx_pkt *pkt; |
3667 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); |
3668 | struct adapter *adapter = q->adap; |
3669 | struct sge *s = &q->adap->sge; |
3670 | int cpl_trace_pkt = is_t4(chip: q->adap->params.chip) ? |
3671 | CPL_TRACE_PKT : CPL_TRACE_PKT_T5; |
3672 | u16 err_vec, tnl_hdr_len = 0; |
3673 | struct port_info *pi; |
3674 | int ret = 0; |
3675 | |
3676 | pi = netdev_priv(dev: q->netdev); |
3677 | /* If we're looking at TX Queue CIDX Update, handle that separately |
3678 | * and return. |
3679 | */ |
3680 | if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) || |
3681 | (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) { |
3682 | t4_tx_completion_handler(rspq: q, rsp, gl: si); |
3683 | return 0; |
3684 | } |
3685 | |
3686 | if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) |
3687 | return handle_trace_pkt(adap: q->adap, gl: si); |
3688 | |
3689 | pkt = (const struct cpl_rx_pkt *)rsp; |
3690 | /* Compressed error vector is enabled for T6 only */ |
3691 | if (q->adap->params.tp.rx_pkt_encap) { |
3692 | err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); |
3693 | tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); |
3694 | } else { |
3695 | err_vec = be16_to_cpu(pkt->err_vec); |
3696 | } |
3697 | |
3698 | csum_ok = pkt->csum_calc && !err_vec && |
3699 | (q->netdev->features & NETIF_F_RXCSUM); |
3700 | |
3701 | if (err_vec) |
3702 | rxq->stats.bad_rx_pkts++; |
3703 | |
3704 | if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) { |
3705 | ret = cxgb4_validate_lb_pkt(pi, si); |
3706 | if (!ret) |
3707 | return 0; |
3708 | } |
3709 | |
3710 | if (((pkt->l2info & htonl(RXF_TCP_F)) || |
3711 | tnl_hdr_len) && |
3712 | (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { |
3713 | do_gro(rxq, gl: si, pkt, tnl_hdr_len); |
3714 | return 0; |
3715 | } |
3716 | |
3717 | skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); |
3718 | if (unlikely(!skb)) { |
3719 | t4_pktgl_free(gl: si); |
3720 | rxq->stats.rx_drops++; |
3721 | return 0; |
3722 | } |
3723 | |
3724 | /* Handle PTP Event Rx packet */ |
3725 | if (unlikely(pi->ptp_enable)) { |
3726 | ret = t4_rx_hststamp(adapter, rsp, rxq, skb); |
3727 | if (ret == RX_PTP_PKT_ERR) |
3728 | return 0; |
3729 | } |
3730 | if (likely(!ret)) |
3731 | __skb_pull(skb, len: s->pktshift); /* remove ethernet header pad */ |
3732 | |
3733 | /* Handle the PTP Event Tx Loopback packet */ |
3734 | if (unlikely(pi->ptp_enable && !ret && |
3735 | (pkt->l2info & htonl(RXF_UDP_F)) && |
3736 | cxgb4_ptp_is_ptp_rx(skb))) { |
3737 | if (!t4_tx_hststamp(adapter, skb, dev: q->netdev)) |
3738 | return 0; |
3739 | } |
3740 | |
3741 | skb->protocol = eth_type_trans(skb, dev: q->netdev); |
3742 | skb_record_rx_queue(skb, rx_queue: q->idx); |
3743 | if (skb->dev->features & NETIF_F_RXHASH) |
3744 | skb_set_hash(skb, hash: (__force u32)pkt->rsshdr.hash_val, |
3745 | type: PKT_HASH_TYPE_L3); |
3746 | |
3747 | rxq->stats.pkts++; |
3748 | |
3749 | if (pi->rxtstamp) |
3750 | cxgb4_sgetim_to_hwtstamp(adap: q->adap, hwtstamps: skb_hwtstamps(skb), |
3751 | sgetstamp: si->sgetstamp); |
3752 | if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { |
3753 | if (!pkt->ip_frag) { |
3754 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3755 | rxq->stats.rx_cso++; |
3756 | } else if (pkt->l2info & htonl(RXF_IP_F)) { |
3757 | __sum16 c = (__force __sum16)pkt->csum; |
3758 | skb->csum = csum_unfold(n: c); |
3759 | |
3760 | if (tnl_hdr_len) { |
3761 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3762 | skb->csum_level = 1; |
3763 | } else { |
3764 | skb->ip_summed = CHECKSUM_COMPLETE; |
3765 | } |
3766 | rxq->stats.rx_cso++; |
3767 | } |
3768 | } else { |
3769 | skb_checksum_none_assert(skb); |
3770 | #ifdef CONFIG_CHELSIO_T4_FCOE |
3771 | #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \ |
3772 | RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F) |
3773 | |
3774 | if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { |
3775 | if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && |
3776 | (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { |
3777 | if (q->adap->params.tp.rx_pkt_encap) |
3778 | csum_ok = err_vec & |
3779 | T6_COMPR_RXERR_SUM_F; |
3780 | else |
3781 | csum_ok = err_vec & RXERR_CSUM_F; |
3782 | if (!csum_ok) |
3783 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3784 | } |
3785 | } |
3786 | |
3787 | #undef CPL_RX_PKT_FLAGS |
3788 | #endif /* CONFIG_CHELSIO_T4_FCOE */ |
3789 | } |
3790 | |
3791 | if (unlikely(pkt->vlan_ex)) { |
3792 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); |
3793 | rxq->stats.vlan_ex++; |
3794 | } |
3795 | skb_mark_napi_id(skb, napi: &q->napi); |
3796 | netif_receive_skb(skb); |
3797 | return 0; |
3798 | } |
3799 | |
3800 | /** |
3801 | * restore_rx_bufs - put back a packet's Rx buffers |
3802 | * @si: the packet gather list |
3803 | * @q: the SGE free list |
3804 | * @frags: number of FL buffers to restore |
3805 | * |
3806 | * Puts back on an FL the Rx buffers associated with @si. The buffers |
3807 | * have already been unmapped and are left unmapped, we mark them so to |
3808 | * prevent further unmapping attempts. |
3809 | * |
3810 | * This function undoes a series of @unmap_rx_buf calls when we find out |
3811 | * that the current packet can't be processed right away afterall and we |
3812 | * need to come back to it later. This is a very rare event and there's |
3813 | * no effort to make this particularly efficient. |
3814 | */ |
3815 | static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, |
3816 | int frags) |
3817 | { |
3818 | struct rx_sw_desc *d; |
3819 | |
3820 | while (frags--) { |
3821 | if (q->cidx == 0) |
3822 | q->cidx = q->size - 1; |
3823 | else |
3824 | q->cidx--; |
3825 | d = &q->sdesc[q->cidx]; |
3826 | d->page = si->frags[frags].page; |
3827 | d->dma_addr |= RX_UNMAPPED_BUF; |
3828 | q->avail++; |
3829 | } |
3830 | } |
3831 | |
3832 | /** |
3833 | * is_new_response - check if a response is newly written |
3834 | * @r: the response descriptor |
3835 | * @q: the response queue |
3836 | * |
3837 | * Returns true if a response descriptor contains a yet unprocessed |
3838 | * response. |
3839 | */ |
3840 | static inline bool is_new_response(const struct rsp_ctrl *r, |
3841 | const struct sge_rspq *q) |
3842 | { |
3843 | return (r->type_gen >> RSPD_GEN_S) == q->gen; |
3844 | } |
3845 | |
3846 | /** |
3847 | * rspq_next - advance to the next entry in a response queue |
3848 | * @q: the queue |
3849 | * |
3850 | * Updates the state of a response queue to advance it to the next entry. |
3851 | */ |
3852 | static inline void rspq_next(struct sge_rspq *q) |
3853 | { |
3854 | q->cur_desc = (void *)q->cur_desc + q->iqe_len; |
3855 | if (unlikely(++q->cidx == q->size)) { |
3856 | q->cidx = 0; |
3857 | q->gen ^= 1; |
3858 | q->cur_desc = q->desc; |
3859 | } |
3860 | } |
3861 | |
3862 | /** |
3863 | * process_responses - process responses from an SGE response queue |
3864 | * @q: the ingress queue to process |
3865 | * @budget: how many responses can be processed in this round |
3866 | * |
3867 | * Process responses from an SGE response queue up to the supplied budget. |
3868 | * Responses include received packets as well as control messages from FW |
3869 | * or HW. |
3870 | * |
3871 | * Additionally choose the interrupt holdoff time for the next interrupt |
3872 | * on this queue. If the system is under memory shortage use a fairly |
3873 | * long delay to help recovery. |
3874 | */ |
3875 | static int process_responses(struct sge_rspq *q, int budget) |
3876 | { |
3877 | int ret, rsp_type; |
3878 | int budget_left = budget; |
3879 | const struct rsp_ctrl *rc; |
3880 | struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); |
3881 | struct adapter *adapter = q->adap; |
3882 | struct sge *s = &adapter->sge; |
3883 | |
3884 | while (likely(budget_left)) { |
3885 | rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); |
3886 | if (!is_new_response(r: rc, q)) { |
3887 | if (q->flush_handler) |
3888 | q->flush_handler(q); |
3889 | break; |
3890 | } |
3891 | |
3892 | dma_rmb(); |
3893 | rsp_type = RSPD_TYPE_G(rc->type_gen); |
3894 | if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) { |
3895 | struct page_frag *fp; |
3896 | struct pkt_gl si; |
3897 | const struct rx_sw_desc *rsd; |
3898 | u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; |
3899 | |
3900 | if (len & RSPD_NEWBUF_F) { |
3901 | if (likely(q->offset > 0)) { |
3902 | free_rx_bufs(adap: q->adap, q: &rxq->fl, n: 1); |
3903 | q->offset = 0; |
3904 | } |
3905 | len = RSPD_LEN_G(len); |
3906 | } |
3907 | si.tot_len = len; |
3908 | |
3909 | /* gather packet fragments */ |
3910 | for (frags = 0, fp = si.frags; ; frags++, fp++) { |
3911 | rsd = &rxq->fl.sdesc[rxq->fl.cidx]; |
3912 | bufsz = get_buf_size(adapter, d: rsd); |
3913 | fp->page = rsd->page; |
3914 | fp->offset = q->offset; |
3915 | fp->size = min(bufsz, len); |
3916 | len -= fp->size; |
3917 | if (!len) |
3918 | break; |
3919 | unmap_rx_buf(adap: q->adap, q: &rxq->fl); |
3920 | } |
3921 | |
3922 | si.sgetstamp = SGE_TIMESTAMP_G( |
3923 | be64_to_cpu(rc->last_flit)); |
3924 | /* |
3925 | * Last buffer remains mapped so explicitly make it |
3926 | * coherent for CPU access. |
3927 | */ |
3928 | dma_sync_single_for_cpu(dev: q->adap->pdev_dev, |
3929 | addr: get_buf_addr(d: rsd), |
3930 | size: fp->size, dir: DMA_FROM_DEVICE); |
3931 | |
3932 | si.va = page_address(si.frags[0].page) + |
3933 | si.frags[0].offset; |
3934 | prefetch(si.va); |
3935 | |
3936 | si.nfrags = frags + 1; |
3937 | ret = q->handler(q, q->cur_desc, &si); |
3938 | if (likely(ret == 0)) |
3939 | q->offset += ALIGN(fp->size, s->fl_align); |
3940 | else |
3941 | restore_rx_bufs(si: &si, q: &rxq->fl, frags); |
3942 | } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) { |
3943 | ret = q->handler(q, q->cur_desc, NULL); |
3944 | } else { |
3945 | ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); |
3946 | } |
3947 | |
3948 | if (unlikely(ret)) { |
3949 | /* couldn't process descriptor, back off for recovery */ |
3950 | q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); |
3951 | break; |
3952 | } |
3953 | |
3954 | rspq_next(q); |
3955 | budget_left--; |
3956 | } |
3957 | |
3958 | if (q->offset >= 0 && fl_cap(fl: &rxq->fl) - rxq->fl.avail >= 16) |
3959 | __refill_fl(adap: q->adap, fl: &rxq->fl); |
3960 | return budget - budget_left; |
3961 | } |
3962 | |
3963 | /** |
3964 | * napi_rx_handler - the NAPI handler for Rx processing |
3965 | * @napi: the napi instance |
3966 | * @budget: how many packets we can process in this round |
3967 | * |
3968 | * Handler for new data events when using NAPI. This does not need any |
3969 | * locking or protection from interrupts as data interrupts are off at |
3970 | * this point and other adapter interrupts do not interfere (the latter |
3971 | * in not a concern at all with MSI-X as non-data interrupts then have |
3972 | * a separate handler). |
3973 | */ |
3974 | static int napi_rx_handler(struct napi_struct *napi, int budget) |
3975 | { |
3976 | unsigned int params; |
3977 | struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); |
3978 | int work_done; |
3979 | u32 val; |
3980 | |
3981 | work_done = process_responses(q, budget); |
3982 | if (likely(work_done < budget)) { |
3983 | int timer_index; |
3984 | |
3985 | napi_complete_done(n: napi, work_done); |
3986 | timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); |
3987 | |
3988 | if (q->adaptive_rx) { |
3989 | if (work_done > max(timer_pkt_quota[timer_index], |
3990 | MIN_NAPI_WORK)) |
3991 | timer_index = (timer_index + 1); |
3992 | else |
3993 | timer_index = timer_index - 1; |
3994 | |
3995 | timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); |
3996 | q->next_intr_params = |
3997 | QINTR_TIMER_IDX_V(timer_index) | |
3998 | QINTR_CNT_EN_V(0); |
3999 | params = q->next_intr_params; |
4000 | } else { |
4001 | params = q->next_intr_params; |
4002 | q->next_intr_params = q->intr_params; |
4003 | } |
4004 | } else |
4005 | params = QINTR_TIMER_IDX_V(7); |
4006 | |
4007 | val = CIDXINC_V(work_done) | SEINTARM_V(params); |
4008 | |
4009 | /* If we don't have access to the new User GTS (T5+), use the old |
4010 | * doorbell mechanism; otherwise use the new BAR2 mechanism. |
4011 | */ |
4012 | if (unlikely(q->bar2_addr == NULL)) { |
4013 | t4_write_reg(adap: q->adap, MYPF_REG(SGE_PF_GTS_A), |
4014 | val: val | INGRESSQID_V((u32)q->cntxt_id)); |
4015 | } else { |
4016 | writel(val: val | INGRESSQID_V(q->bar2_qid), |
4017 | addr: q->bar2_addr + SGE_UDB_GTS); |
4018 | wmb(); |
4019 | } |
4020 | return work_done; |
4021 | } |
4022 | |
4023 | void cxgb4_ethofld_restart(struct tasklet_struct *t) |
4024 | { |
4025 | struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t, |
4026 | qresume_tsk); |
4027 | int pktcount; |
4028 | |
4029 | spin_lock(lock: &eosw_txq->lock); |
4030 | pktcount = eosw_txq->cidx - eosw_txq->last_cidx; |
4031 | if (pktcount < 0) |
4032 | pktcount += eosw_txq->ndesc; |
4033 | |
4034 | if (pktcount) { |
4035 | cxgb4_eosw_txq_free_desc(adap: netdev2adap(dev: eosw_txq->netdev), |
4036 | eosw_txq, ndesc: pktcount); |
4037 | eosw_txq->inuse -= pktcount; |
4038 | } |
4039 | |
4040 | /* There may be some packets waiting for completions. So, |
4041 | * attempt to send these packets now. |
4042 | */ |
4043 | ethofld_xmit(dev: eosw_txq->netdev, eosw_txq); |
4044 | spin_unlock(lock: &eosw_txq->lock); |
4045 | } |
4046 | |
4047 | /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions |
4048 | * @q: the response queue that received the packet |
4049 | * @rsp: the response queue descriptor holding the CPL message |
4050 | * @si: the gather list of packet fragments |
4051 | * |
4052 | * Process a ETHOFLD Tx completion. Increment the cidx here, but |
4053 | * free up the descriptors in a tasklet later. |
4054 | */ |
4055 | int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp, |
4056 | const struct pkt_gl *si) |
4057 | { |
4058 | u8 opcode = ((const struct rss_header *)rsp)->opcode; |
4059 | |
4060 | /* skip RSS header */ |
4061 | rsp++; |
4062 | |
4063 | if (opcode == CPL_FW4_ACK) { |
4064 | const struct cpl_fw4_ack *cpl; |
4065 | struct sge_eosw_txq *eosw_txq; |
4066 | struct eotid_entry *entry; |
4067 | struct sk_buff *skb; |
4068 | u32 hdr_len, eotid; |
4069 | u8 flits, wrlen16; |
4070 | int credits; |
4071 | |
4072 | cpl = (const struct cpl_fw4_ack *)rsp; |
4073 | eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) - |
4074 | q->adap->tids.eotid_base; |
4075 | entry = cxgb4_lookup_eotid(t: &q->adap->tids, eotid); |
4076 | if (!entry) |
4077 | goto out_done; |
4078 | |
4079 | eosw_txq = (struct sge_eosw_txq *)entry->data; |
4080 | if (!eosw_txq) |
4081 | goto out_done; |
4082 | |
4083 | spin_lock(lock: &eosw_txq->lock); |
4084 | credits = cpl->credits; |
4085 | while (credits > 0) { |
4086 | skb = eosw_txq->desc[eosw_txq->cidx].skb; |
4087 | if (!skb) |
4088 | break; |
4089 | |
4090 | if (unlikely((eosw_txq->state == |
4091 | CXGB4_EO_STATE_FLOWC_OPEN_REPLY || |
4092 | eosw_txq->state == |
4093 | CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) && |
4094 | eosw_txq->cidx == eosw_txq->flowc_idx)) { |
4095 | flits = DIV_ROUND_UP(skb->len, 8); |
4096 | if (eosw_txq->state == |
4097 | CXGB4_EO_STATE_FLOWC_OPEN_REPLY) |
4098 | eosw_txq->state = CXGB4_EO_STATE_ACTIVE; |
4099 | else |
4100 | eosw_txq->state = CXGB4_EO_STATE_CLOSED; |
4101 | complete(&eosw_txq->completion); |
4102 | } else { |
4103 | hdr_len = eth_get_headlen(dev: eosw_txq->netdev, |
4104 | data: skb->data, |
4105 | len: skb_headlen(skb)); |
4106 | flits = ethofld_calc_tx_flits(adap: q->adap, skb, |
4107 | hdr_len); |
4108 | } |
4109 | eosw_txq_advance_index(idx: &eosw_txq->cidx, n: 1, |
4110 | max: eosw_txq->ndesc); |
4111 | wrlen16 = DIV_ROUND_UP(flits * 8, 16); |
4112 | credits -= wrlen16; |
4113 | } |
4114 | |
4115 | eosw_txq->cred += cpl->credits; |
4116 | eosw_txq->ncompl--; |
4117 | |
4118 | spin_unlock(lock: &eosw_txq->lock); |
4119 | |
4120 | /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx, |
4121 | * if there were packets waiting for completion. |
4122 | */ |
4123 | tasklet_schedule(t: &eosw_txq->qresume_tsk); |
4124 | } |
4125 | |
4126 | out_done: |
4127 | return 0; |
4128 | } |
4129 | |
4130 | /* |
4131 | * The MSI-X interrupt handler for an SGE response queue. |
4132 | */ |
4133 | irqreturn_t t4_sge_intr_msix(int irq, void *cookie) |
4134 | { |
4135 | struct sge_rspq *q = cookie; |
4136 | |
4137 | napi_schedule(n: &q->napi); |
4138 | return IRQ_HANDLED; |
4139 | } |
4140 | |
4141 | /* |
4142 | * Process the indirect interrupt entries in the interrupt queue and kick off |
4143 | * NAPI for each queue that has generated an entry. |
4144 | */ |
4145 | static unsigned int process_intrq(struct adapter *adap) |
4146 | { |
4147 | unsigned int credits; |
4148 | const struct rsp_ctrl *rc; |
4149 | struct sge_rspq *q = &adap->sge.intrq; |
4150 | u32 val; |
4151 | |
4152 | spin_lock(lock: &adap->sge.intrq_lock); |
4153 | for (credits = 0; ; credits++) { |
4154 | rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); |
4155 | if (!is_new_response(r: rc, q)) |
4156 | break; |
4157 | |
4158 | dma_rmb(); |
4159 | if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { |
4160 | unsigned int qid = ntohl(rc->pldbuflen_qid); |
4161 | |
4162 | qid -= adap->sge.ingr_start; |
4163 | napi_schedule(n: &adap->sge.ingr_map[qid]->napi); |
4164 | } |
4165 | |
4166 | rspq_next(q); |
4167 | } |
4168 | |
4169 | val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); |
4170 | |
4171 | /* If we don't have access to the new User GTS (T5+), use the old |
4172 | * doorbell mechanism; otherwise use the new BAR2 mechanism. |
4173 | */ |
4174 | if (unlikely(q->bar2_addr == NULL)) { |
4175 | t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), |
4176 | val: val | INGRESSQID_V(q->cntxt_id)); |
4177 | } else { |
4178 | writel(val: val | INGRESSQID_V(q->bar2_qid), |
4179 | addr: q->bar2_addr + SGE_UDB_GTS); |
4180 | wmb(); |
4181 | } |
4182 | spin_unlock(lock: &adap->sge.intrq_lock); |
4183 | return credits; |
4184 | } |
4185 | |
4186 | /* |
4187 | * The MSI interrupt handler, which handles data events from SGE response queues |
4188 | * as well as error and other async events as they all use the same MSI vector. |
4189 | */ |
4190 | static irqreturn_t t4_intr_msi(int irq, void *cookie) |
4191 | { |
4192 | struct adapter *adap = cookie; |
4193 | |
4194 | if (adap->flags & CXGB4_MASTER_PF) |
4195 | t4_slow_intr_handler(adapter: adap); |
4196 | process_intrq(adap); |
4197 | return IRQ_HANDLED; |
4198 | } |
4199 | |
4200 | /* |
4201 | * Interrupt handler for legacy INTx interrupts. |
4202 | * Handles data events from SGE response queues as well as error and other |
4203 | * async events as they all use the same interrupt line. |
4204 | */ |
4205 | static irqreturn_t t4_intr_intx(int irq, void *cookie) |
4206 | { |
4207 | struct adapter *adap = cookie; |
4208 | |
4209 | t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), val: 0); |
4210 | if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adapter: adap)) | |
4211 | process_intrq(adap)) |
4212 | return IRQ_HANDLED; |
4213 | return IRQ_NONE; /* probably shared interrupt */ |
4214 | } |
4215 | |
4216 | /** |
4217 | * t4_intr_handler - select the top-level interrupt handler |
4218 | * @adap: the adapter |
4219 | * |
4220 | * Selects the top-level interrupt handler based on the type of interrupts |
4221 | * (MSI-X, MSI, or INTx). |
4222 | */ |
4223 | irq_handler_t t4_intr_handler(struct adapter *adap) |
4224 | { |
4225 | if (adap->flags & CXGB4_USING_MSIX) |
4226 | return t4_sge_intr_msix; |
4227 | if (adap->flags & CXGB4_USING_MSI) |
4228 | return t4_intr_msi; |
4229 | return t4_intr_intx; |
4230 | } |
4231 | |
4232 | static void sge_rx_timer_cb(struct timer_list *t) |
4233 | { |
4234 | unsigned long m; |
4235 | unsigned int i; |
4236 | struct adapter *adap = from_timer(adap, t, sge.rx_timer); |
4237 | struct sge *s = &adap->sge; |
4238 | |
4239 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
4240 | for (m = s->starving_fl[i]; m; m &= m - 1) { |
4241 | struct sge_eth_rxq *rxq; |
4242 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; |
4243 | struct sge_fl *fl = s->egr_map[id]; |
4244 | |
4245 | clear_bit(nr: id, addr: s->starving_fl); |
4246 | smp_mb__after_atomic(); |
4247 | |
4248 | if (fl_starving(adapter: adap, fl)) { |
4249 | rxq = container_of(fl, struct sge_eth_rxq, fl); |
4250 | if (napi_schedule(n: &rxq->rspq.napi)) |
4251 | fl->starving++; |
4252 | else |
4253 | set_bit(nr: id, addr: s->starving_fl); |
4254 | } |
4255 | } |
4256 | /* The remainder of the SGE RX Timer Callback routine is dedicated to |
4257 | * global Master PF activities like checking for chip ingress stalls, |
4258 | * etc. |
4259 | */ |
4260 | if (!(adap->flags & CXGB4_MASTER_PF)) |
4261 | goto done; |
4262 | |
4263 | t4_idma_monitor(adapter: adap, idma: &s->idma_monitor, HZ, RX_QCHECK_PERIOD); |
4264 | |
4265 | done: |
4266 | mod_timer(timer: &s->rx_timer, expires: jiffies + RX_QCHECK_PERIOD); |
4267 | } |
4268 | |
4269 | static void sge_tx_timer_cb(struct timer_list *t) |
4270 | { |
4271 | struct adapter *adap = from_timer(adap, t, sge.tx_timer); |
4272 | struct sge *s = &adap->sge; |
4273 | unsigned long m, period; |
4274 | unsigned int i, budget; |
4275 | |
4276 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
4277 | for (m = s->txq_maperr[i]; m; m &= m - 1) { |
4278 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; |
4279 | struct sge_uld_txq *txq = s->egr_map[id]; |
4280 | |
4281 | clear_bit(nr: id, addr: s->txq_maperr); |
4282 | tasklet_schedule(t: &txq->qresume_tsk); |
4283 | } |
4284 | |
4285 | if (!is_t4(chip: adap->params.chip)) { |
4286 | struct sge_eth_txq *q = &s->ptptxq; |
4287 | int avail; |
4288 | |
4289 | spin_lock(lock: &adap->ptp_lock); |
4290 | avail = reclaimable(q: &q->q); |
4291 | |
4292 | if (avail) { |
4293 | free_tx_desc(adap, q: &q->q, n: avail, unmap: false); |
4294 | q->q.in_use -= avail; |
4295 | } |
4296 | spin_unlock(lock: &adap->ptp_lock); |
4297 | } |
4298 | |
4299 | budget = MAX_TIMER_TX_RECLAIM; |
4300 | i = s->ethtxq_rover; |
4301 | do { |
4302 | budget -= t4_sge_eth_txq_egress_update(adap, eq: &s->ethtxq[i], |
4303 | maxreclaim: budget); |
4304 | if (!budget) |
4305 | break; |
4306 | |
4307 | if (++i >= s->ethqsets) |
4308 | i = 0; |
4309 | } while (i != s->ethtxq_rover); |
4310 | s->ethtxq_rover = i; |
4311 | |
4312 | if (budget == 0) { |
4313 | /* If we found too many reclaimable packets schedule a timer |
4314 | * in the near future to continue where we left off. |
4315 | */ |
4316 | period = 2; |
4317 | } else { |
4318 | /* We reclaimed all reclaimable TX Descriptors, so reschedule |
4319 | * at the normal period. |
4320 | */ |
4321 | period = TX_QCHECK_PERIOD; |
4322 | } |
4323 | |
4324 | mod_timer(timer: &s->tx_timer, expires: jiffies + period); |
4325 | } |
4326 | |
4327 | /** |
4328 | * bar2_address - return the BAR2 address for an SGE Queue's Registers |
4329 | * @adapter: the adapter |
4330 | * @qid: the SGE Queue ID |
4331 | * @qtype: the SGE Queue Type (Egress or Ingress) |
4332 | * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues |
4333 | * |
4334 | * Returns the BAR2 address for the SGE Queue Registers associated with |
4335 | * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also |
4336 | * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE |
4337 | * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" |
4338 | * Registers are supported (e.g. the Write Combining Doorbell Buffer). |
4339 | */ |
4340 | static void __iomem *bar2_address(struct adapter *adapter, |
4341 | unsigned int qid, |
4342 | enum t4_bar2_qtype qtype, |
4343 | unsigned int *pbar2_qid) |
4344 | { |
4345 | u64 bar2_qoffset; |
4346 | int ret; |
4347 | |
4348 | ret = t4_bar2_sge_qregs(adapter, qid, qtype, user: 0, |
4349 | pbar2_qoffset: &bar2_qoffset, pbar2_qid); |
4350 | if (ret) |
4351 | return NULL; |
4352 | |
4353 | return adapter->bar2 + bar2_qoffset; |
4354 | } |
4355 | |
4356 | /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0 |
4357 | * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map |
4358 | */ |
4359 | int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, |
4360 | struct net_device *dev, int intr_idx, |
4361 | struct sge_fl *fl, rspq_handler_t hnd, |
4362 | rspq_flush_handler_t flush_hnd, int cong) |
4363 | { |
4364 | int ret, flsz = 0; |
4365 | struct fw_iq_cmd c; |
4366 | struct sge *s = &adap->sge; |
4367 | struct port_info *pi = netdev_priv(dev); |
4368 | int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING); |
4369 | |
4370 | /* Size needs to be multiple of 16, including status entry. */ |
4371 | iq->size = roundup(iq->size, 16); |
4372 | |
4373 | iq->desc = alloc_ring(dev: adap->pdev_dev, nelem: iq->size, elem_size: iq->iqe_len, sw_size: 0, |
4374 | phys: &iq->phys_addr, NULL, stat_size: 0, |
4375 | node: dev_to_node(dev: adap->pdev_dev)); |
4376 | if (!iq->desc) |
4377 | return -ENOMEM; |
4378 | |
4379 | memset(&c, 0, sizeof(c)); |
4380 | c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | |
4381 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | |
4382 | FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); |
4383 | c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | |
4384 | FW_LEN16(c)); |
4385 | c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | |
4386 | FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | |
4387 | FW_IQ_CMD_IQANDST_V(intr_idx < 0) | |
4388 | FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) | |
4389 | FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : |
4390 | -intr_idx - 1)); |
4391 | c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | |
4392 | FW_IQ_CMD_IQGTSMODE_F | |
4393 | FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | |
4394 | FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); |
4395 | c.iqsize = htons(iq->size); |
4396 | c.iqaddr = cpu_to_be64(iq->phys_addr); |
4397 | if (cong >= 0) |
4398 | c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F | |
4399 | FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC |
4400 | : FW_IQ_IQTYPE_OFLD)); |
4401 | |
4402 | if (fl) { |
4403 | unsigned int chip_ver = |
4404 | CHELSIO_CHIP_VERSION(adap->params.chip); |
4405 | |
4406 | /* Allocate the ring for the hardware free list (with space |
4407 | * for its status page) along with the associated software |
4408 | * descriptor ring. The free list size needs to be a multiple |
4409 | * of the Egress Queue Unit and at least 2 Egress Units larger |
4410 | * than the SGE's Egress Congrestion Threshold |
4411 | * (fl_starve_thres - 1). |
4412 | */ |
4413 | if (fl->size < s->fl_starve_thres - 1 + 2 * 8) |
4414 | fl->size = s->fl_starve_thres - 1 + 2 * 8; |
4415 | fl->size = roundup(fl->size, 8); |
4416 | fl->desc = alloc_ring(dev: adap->pdev_dev, nelem: fl->size, elem_size: sizeof(__be64), |
4417 | sw_size: sizeof(struct rx_sw_desc), phys: &fl->addr, |
4418 | metadata: &fl->sdesc, stat_size: s->stat_len, |
4419 | node: dev_to_node(dev: adap->pdev_dev)); |
4420 | if (!fl->desc) |
4421 | goto fl_nomem; |
4422 | |
4423 | flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); |
4424 | c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | |
4425 | FW_IQ_CMD_FL0FETCHRO_V(relaxed) | |
4426 | FW_IQ_CMD_FL0DATARO_V(relaxed) | |
4427 | FW_IQ_CMD_FL0PADEN_F); |
4428 | if (cong >= 0) |
4429 | c.iqns_to_fl0congen |= |
4430 | htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | |
4431 | FW_IQ_CMD_FL0CONGCIF_F | |
4432 | FW_IQ_CMD_FL0CONGEN_F); |
4433 | /* In T6, for egress queue type FL there is internal overhead |
4434 | * of 16B for header going into FLM module. Hence the maximum |
4435 | * allowed burst size is 448 bytes. For T4/T5, the hardware |
4436 | * doesn't coalesce fetch requests if more than 64 bytes of |
4437 | * Free List pointers are provided, so we use a 128-byte Fetch |
4438 | * Burst Minimum there (T6 implements coalescing so we can use |
4439 | * the smaller 64-byte value there). |
4440 | */ |
4441 | c.fl0dcaen_to_fl0cidxfthresh = |
4442 | htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ? |
4443 | FETCHBURSTMIN_128B_X : |
4444 | FETCHBURSTMIN_64B_T6_X) | |
4445 | FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ? |
4446 | FETCHBURSTMAX_512B_X : |
4447 | FETCHBURSTMAX_256B_X)); |
4448 | c.fl0size = htons(flsz); |
4449 | c.fl0addr = cpu_to_be64(fl->addr); |
4450 | } |
4451 | |
4452 | ret = t4_wr_mbox(adap, mbox: adap->mbox, cmd: &c, size: sizeof(c), rpl: &c); |
4453 | if (ret) |
4454 | goto err; |
4455 | |
4456 | netif_napi_add(dev, napi: &iq->napi, poll: napi_rx_handler); |
4457 | iq->cur_desc = iq->desc; |
4458 | iq->cidx = 0; |
4459 | iq->gen = 1; |
4460 | iq->next_intr_params = iq->intr_params; |
4461 | iq->cntxt_id = ntohs(c.iqid); |
4462 | iq->abs_id = ntohs(c.physiqid); |
4463 | iq->bar2_addr = bar2_address(adapter: adap, |
4464 | qid: iq->cntxt_id, |
4465 | qtype: T4_BAR2_QTYPE_INGRESS, |
4466 | pbar2_qid: &iq->bar2_qid); |
4467 | iq->size--; /* subtract status entry */ |
4468 | iq->netdev = dev; |
4469 | iq->handler = hnd; |
4470 | iq->flush_handler = flush_hnd; |
4471 | |
4472 | memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); |
4473 | skb_queue_head_init(list: &iq->lro_mgr.lroq); |
4474 | |
4475 | /* set offset to -1 to distinguish ingress queues without FL */ |
4476 | iq->offset = fl ? 0 : -1; |
4477 | |
4478 | adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; |
4479 | |
4480 | if (fl) { |
4481 | fl->cntxt_id = ntohs(c.fl0id); |
4482 | fl->avail = fl->pend_cred = 0; |
4483 | fl->pidx = fl->cidx = 0; |
4484 | fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; |
4485 | adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; |
4486 | |
4487 | /* Note, we must initialize the BAR2 Free List User Doorbell |
4488 | * information before refilling the Free List! |
4489 | */ |
4490 | fl->bar2_addr = bar2_address(adapter: adap, |
4491 | qid: fl->cntxt_id, |
4492 | qtype: T4_BAR2_QTYPE_EGRESS, |
4493 | pbar2_qid: &fl->bar2_qid); |
4494 | refill_fl(adap, q: fl, n: fl_cap(fl), GFP_KERNEL); |
4495 | } |
4496 | |
4497 | /* For T5 and later we attempt to set up the Congestion Manager values |
4498 | * of the new RX Ethernet Queue. This should really be handled by |
4499 | * firmware because it's more complex than any host driver wants to |
4500 | * get involved with and it's different per chip and this is almost |
4501 | * certainly wrong. Firmware would be wrong as well, but it would be |
4502 | * a lot easier to fix in one place ... For now we do something very |
4503 | * simple (and hopefully less wrong). |
4504 | */ |
4505 | if (!is_t4(chip: adap->params.chip) && cong >= 0) { |
4506 | u32 param, val, ch_map = 0; |
4507 | int i; |
4508 | u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; |
4509 | |
4510 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | |
4511 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | |
4512 | FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); |
4513 | if (cong == 0) { |
4514 | val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X); |
4515 | } else { |
4516 | val = |
4517 | CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X); |
4518 | for (i = 0; i < 4; i++) { |
4519 | if (cong & (1 << i)) |
4520 | ch_map |= 1 << (i << cng_ch_bits_log); |
4521 | } |
4522 | val |= CONMCTXT_CNGCHMAP_V(ch_map); |
4523 | } |
4524 | ret = t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, |
4525 | params: ¶m, val: &val); |
4526 | if (ret) |
4527 | dev_warn(adap->pdev_dev, "Failed to set Congestion" |
4528 | " Manager Context for Ingress Queue %d: %d\n" , |
4529 | iq->cntxt_id, -ret); |
4530 | } |
4531 | |
4532 | return 0; |
4533 | |
4534 | fl_nomem: |
4535 | ret = -ENOMEM; |
4536 | err: |
4537 | if (iq->desc) { |
4538 | dma_free_coherent(dev: adap->pdev_dev, size: iq->size * iq->iqe_len, |
4539 | cpu_addr: iq->desc, dma_handle: iq->phys_addr); |
4540 | iq->desc = NULL; |
4541 | } |
4542 | if (fl && fl->desc) { |
4543 | kfree(objp: fl->sdesc); |
4544 | fl->sdesc = NULL; |
4545 | dma_free_coherent(dev: adap->pdev_dev, size: flsz * sizeof(struct tx_desc), |
4546 | cpu_addr: fl->desc, dma_handle: fl->addr); |
4547 | fl->desc = NULL; |
4548 | } |
4549 | return ret; |
4550 | } |
4551 | |
4552 | static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) |
4553 | { |
4554 | q->cntxt_id = id; |
4555 | q->bar2_addr = bar2_address(adapter: adap, |
4556 | qid: q->cntxt_id, |
4557 | qtype: T4_BAR2_QTYPE_EGRESS, |
4558 | pbar2_qid: &q->bar2_qid); |
4559 | q->in_use = 0; |
4560 | q->cidx = q->pidx = 0; |
4561 | q->stops = q->restarts = 0; |
4562 | q->stat = (void *)&q->desc[q->size]; |
4563 | spin_lock_init(&q->db_lock); |
4564 | adap->sge.egr_map[id - adap->sge.egr_start] = q; |
4565 | } |
4566 | |
4567 | /** |
4568 | * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue |
4569 | * @adap: the adapter |
4570 | * @txq: the SGE Ethernet TX Queue to initialize |
4571 | * @dev: the Linux Network Device |
4572 | * @netdevq: the corresponding Linux TX Queue |
4573 | * @iqid: the Ingress Queue to which to deliver CIDX Update messages |
4574 | * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers |
4575 | */ |
4576 | int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, |
4577 | struct net_device *dev, struct netdev_queue *netdevq, |
4578 | unsigned int iqid, u8 dbqt) |
4579 | { |
4580 | unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); |
4581 | struct port_info *pi = netdev_priv(dev); |
4582 | struct sge *s = &adap->sge; |
4583 | struct fw_eq_eth_cmd c; |
4584 | int ret, nentries; |
4585 | |
4586 | /* Add status entries */ |
4587 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
4588 | |
4589 | txq->q.desc = alloc_ring(dev: adap->pdev_dev, nelem: txq->q.size, |
4590 | elem_size: sizeof(struct tx_desc), sw_size: sizeof(struct tx_sw_desc), |
4591 | phys: &txq->q.phys_addr, metadata: &txq->q.sdesc, stat_size: s->stat_len, |
4592 | node: netdev_queue_numa_node_read(q: netdevq)); |
4593 | if (!txq->q.desc) |
4594 | return -ENOMEM; |
4595 | |
4596 | memset(&c, 0, sizeof(c)); |
4597 | c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | |
4598 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | |
4599 | FW_EQ_ETH_CMD_PFN_V(adap->pf) | |
4600 | FW_EQ_ETH_CMD_VFN_V(0)); |
4601 | c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | |
4602 | FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); |
4603 | |
4604 | /* For TX Ethernet Queues using the SGE Doorbell Queue Timer |
4605 | * mechanism, we use Ingress Queue messages for Hardware Consumer |
4606 | * Index Updates on the TX Queue. Otherwise we have the Hardware |
4607 | * write the CIDX Updates into the Status Page at the end of the |
4608 | * TX Queue. |
4609 | */ |
4610 | c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ? |
4611 | FW_EQ_ETH_CMD_AUTOEQUIQE_F : |
4612 | FW_EQ_ETH_CMD_AUTOEQUEQE_F) | |
4613 | FW_EQ_ETH_CMD_VIID_V(pi->viid)); |
4614 | |
4615 | c.fetchszm_to_iqid = |
4616 | htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ? |
4617 | HOSTFCMODE_INGRESS_QUEUE_X : |
4618 | HOSTFCMODE_STATUS_PAGE_X) | |
4619 | FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | |
4620 | FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); |
4621 | |
4622 | /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */ |
4623 | c.dcaen_to_eqsize = |
4624 | htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 |
4625 | ? FETCHBURSTMIN_64B_X |
4626 | : FETCHBURSTMIN_64B_T6_X) | |
4627 | FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | |
4628 | FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | |
4629 | FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) | |
4630 | FW_EQ_ETH_CMD_EQSIZE_V(nentries)); |
4631 | |
4632 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); |
4633 | |
4634 | /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the |
4635 | * currently configured Timer Index. THis can be changed later via an |
4636 | * ethtool -C tx-usecs {Timer Val} command. Note that the SGE |
4637 | * Doorbell Queue mode is currently automatically enabled in the |
4638 | * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ... |
4639 | */ |
4640 | if (dbqt) |
4641 | c.timeren_timerix = |
4642 | cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F | |
4643 | FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); |
4644 | |
4645 | ret = t4_wr_mbox(adap, mbox: adap->mbox, cmd: &c, size: sizeof(c), rpl: &c); |
4646 | if (ret) { |
4647 | kfree(objp: txq->q.sdesc); |
4648 | txq->q.sdesc = NULL; |
4649 | dma_free_coherent(dev: adap->pdev_dev, |
4650 | size: nentries * sizeof(struct tx_desc), |
4651 | cpu_addr: txq->q.desc, dma_handle: txq->q.phys_addr); |
4652 | txq->q.desc = NULL; |
4653 | return ret; |
4654 | } |
4655 | |
4656 | txq->q.q_type = CXGB4_TXQ_ETH; |
4657 | init_txq(adap, q: &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); |
4658 | txq->txq = netdevq; |
4659 | txq->tso = 0; |
4660 | txq->uso = 0; |
4661 | txq->tx_cso = 0; |
4662 | txq->vlan_ins = 0; |
4663 | txq->mapping_err = 0; |
4664 | txq->dbqt = dbqt; |
4665 | |
4666 | return 0; |
4667 | } |
4668 | |
4669 | int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, |
4670 | struct net_device *dev, unsigned int iqid, |
4671 | unsigned int cmplqid) |
4672 | { |
4673 | unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); |
4674 | struct port_info *pi = netdev_priv(dev); |
4675 | struct sge *s = &adap->sge; |
4676 | struct fw_eq_ctrl_cmd c; |
4677 | int ret, nentries; |
4678 | |
4679 | /* Add status entries */ |
4680 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
4681 | |
4682 | txq->q.desc = alloc_ring(dev: adap->pdev_dev, nelem: nentries, |
4683 | elem_size: sizeof(struct tx_desc), sw_size: 0, phys: &txq->q.phys_addr, |
4684 | NULL, stat_size: 0, node: dev_to_node(dev: adap->pdev_dev)); |
4685 | if (!txq->q.desc) |
4686 | return -ENOMEM; |
4687 | |
4688 | c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | |
4689 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | |
4690 | FW_EQ_CTRL_CMD_PFN_V(adap->pf) | |
4691 | FW_EQ_CTRL_CMD_VFN_V(0)); |
4692 | c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | |
4693 | FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); |
4694 | c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); |
4695 | c.physeqid_pkd = htonl(0); |
4696 | c.fetchszm_to_iqid = |
4697 | htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | |
4698 | FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | |
4699 | FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid)); |
4700 | c.dcaen_to_eqsize = |
4701 | htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 |
4702 | ? FETCHBURSTMIN_64B_X |
4703 | : FETCHBURSTMIN_64B_T6_X) | |
4704 | FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | |
4705 | FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | |
4706 | FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); |
4707 | c.eqaddr = cpu_to_be64(txq->q.phys_addr); |
4708 | |
4709 | ret = t4_wr_mbox(adap, mbox: adap->mbox, cmd: &c, size: sizeof(c), rpl: &c); |
4710 | if (ret) { |
4711 | dma_free_coherent(dev: adap->pdev_dev, |
4712 | size: nentries * sizeof(struct tx_desc), |
4713 | cpu_addr: txq->q.desc, dma_handle: txq->q.phys_addr); |
4714 | txq->q.desc = NULL; |
4715 | return ret; |
4716 | } |
4717 | |
4718 | txq->q.q_type = CXGB4_TXQ_CTRL; |
4719 | init_txq(adap, q: &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); |
4720 | txq->adap = adap; |
4721 | skb_queue_head_init(list: &txq->sendq); |
4722 | tasklet_setup(t: &txq->qresume_tsk, callback: restart_ctrlq); |
4723 | txq->full = 0; |
4724 | return 0; |
4725 | } |
4726 | |
4727 | int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid, |
4728 | unsigned int cmplqid) |
4729 | { |
4730 | u32 param, val; |
4731 | |
4732 | param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | |
4733 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) | |
4734 | FW_PARAMS_PARAM_YZ_V(eqid)); |
4735 | val = cmplqid; |
4736 | return t4_set_params(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, nparams: 1, params: ¶m, val: &val); |
4737 | } |
4738 | |
4739 | static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q, |
4740 | struct net_device *dev, u32 cmd, u32 iqid) |
4741 | { |
4742 | unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); |
4743 | struct port_info *pi = netdev_priv(dev); |
4744 | struct sge *s = &adap->sge; |
4745 | struct fw_eq_ofld_cmd c; |
4746 | u32 fb_min, nentries; |
4747 | int ret; |
4748 | |
4749 | /* Add status entries */ |
4750 | nentries = q->size + s->stat_len / sizeof(struct tx_desc); |
4751 | q->desc = alloc_ring(dev: adap->pdev_dev, nelem: q->size, elem_size: sizeof(struct tx_desc), |
4752 | sw_size: sizeof(struct tx_sw_desc), phys: &q->phys_addr, |
4753 | metadata: &q->sdesc, stat_size: s->stat_len, NUMA_NO_NODE); |
4754 | if (!q->desc) |
4755 | return -ENOMEM; |
4756 | |
4757 | if (chip_ver <= CHELSIO_T5) |
4758 | fb_min = FETCHBURSTMIN_64B_X; |
4759 | else |
4760 | fb_min = FETCHBURSTMIN_64B_T6_X; |
4761 | |
4762 | memset(&c, 0, sizeof(c)); |
4763 | c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | |
4764 | FW_CMD_WRITE_F | FW_CMD_EXEC_F | |
4765 | FW_EQ_OFLD_CMD_PFN_V(adap->pf) | |
4766 | FW_EQ_OFLD_CMD_VFN_V(0)); |
4767 | c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | |
4768 | FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); |
4769 | c.fetchszm_to_iqid = |
4770 | htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | |
4771 | FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | |
4772 | FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); |
4773 | c.dcaen_to_eqsize = |
4774 | htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) | |
4775 | FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | |
4776 | FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | |
4777 | FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); |
4778 | c.eqaddr = cpu_to_be64(q->phys_addr); |
4779 | |
4780 | ret = t4_wr_mbox(adap, mbox: adap->mbox, cmd: &c, size: sizeof(c), rpl: &c); |
4781 | if (ret) { |
4782 | kfree(objp: q->sdesc); |
4783 | q->sdesc = NULL; |
4784 | dma_free_coherent(dev: adap->pdev_dev, |
4785 | size: nentries * sizeof(struct tx_desc), |
4786 | cpu_addr: q->desc, dma_handle: q->phys_addr); |
4787 | q->desc = NULL; |
4788 | return ret; |
4789 | } |
4790 | |
4791 | init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); |
4792 | return 0; |
4793 | } |
4794 | |
4795 | int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, |
4796 | struct net_device *dev, unsigned int iqid, |
4797 | unsigned int uld_type) |
4798 | { |
4799 | u32 cmd = FW_EQ_OFLD_CMD; |
4800 | int ret; |
4801 | |
4802 | if (unlikely(uld_type == CXGB4_TX_CRYPTO)) |
4803 | cmd = FW_EQ_CTRL_CMD; |
4804 | |
4805 | ret = t4_sge_alloc_ofld_txq(adap, q: &txq->q, dev, cmd, iqid); |
4806 | if (ret) |
4807 | return ret; |
4808 | |
4809 | txq->q.q_type = CXGB4_TXQ_ULD; |
4810 | txq->adap = adap; |
4811 | skb_queue_head_init(list: &txq->sendq); |
4812 | tasklet_setup(t: &txq->qresume_tsk, callback: restart_ofldq); |
4813 | txq->full = 0; |
4814 | txq->mapping_err = 0; |
4815 | return 0; |
4816 | } |
4817 | |
4818 | int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, |
4819 | struct net_device *dev, u32 iqid) |
4820 | { |
4821 | int ret; |
4822 | |
4823 | ret = t4_sge_alloc_ofld_txq(adap, q: &txq->q, dev, cmd: FW_EQ_OFLD_CMD, iqid); |
4824 | if (ret) |
4825 | return ret; |
4826 | |
4827 | txq->q.q_type = CXGB4_TXQ_ULD; |
4828 | spin_lock_init(&txq->lock); |
4829 | txq->adap = adap; |
4830 | txq->tso = 0; |
4831 | txq->uso = 0; |
4832 | txq->tx_cso = 0; |
4833 | txq->vlan_ins = 0; |
4834 | txq->mapping_err = 0; |
4835 | return 0; |
4836 | } |
4837 | |
4838 | void free_txq(struct adapter *adap, struct sge_txq *q) |
4839 | { |
4840 | struct sge *s = &adap->sge; |
4841 | |
4842 | dma_free_coherent(dev: adap->pdev_dev, |
4843 | size: q->size * sizeof(struct tx_desc) + s->stat_len, |
4844 | cpu_addr: q->desc, dma_handle: q->phys_addr); |
4845 | q->cntxt_id = 0; |
4846 | q->sdesc = NULL; |
4847 | q->desc = NULL; |
4848 | } |
4849 | |
4850 | void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, |
4851 | struct sge_fl *fl) |
4852 | { |
4853 | struct sge *s = &adap->sge; |
4854 | unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; |
4855 | |
4856 | adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; |
4857 | t4_iq_free(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, iqtype: FW_IQ_TYPE_FL_INT_CAP, |
4858 | iqid: rq->cntxt_id, fl0id: fl_id, fl1id: 0xffff); |
4859 | dma_free_coherent(dev: adap->pdev_dev, size: (rq->size + 1) * rq->iqe_len, |
4860 | cpu_addr: rq->desc, dma_handle: rq->phys_addr); |
4861 | netif_napi_del(napi: &rq->napi); |
4862 | rq->netdev = NULL; |
4863 | rq->cntxt_id = rq->abs_id = 0; |
4864 | rq->desc = NULL; |
4865 | |
4866 | if (fl) { |
4867 | free_rx_bufs(adap, q: fl, n: fl->avail); |
4868 | dma_free_coherent(dev: adap->pdev_dev, size: fl->size * 8 + s->stat_len, |
4869 | cpu_addr: fl->desc, dma_handle: fl->addr); |
4870 | kfree(objp: fl->sdesc); |
4871 | fl->sdesc = NULL; |
4872 | fl->cntxt_id = 0; |
4873 | fl->desc = NULL; |
4874 | } |
4875 | } |
4876 | |
4877 | /** |
4878 | * t4_free_ofld_rxqs - free a block of consecutive Rx queues |
4879 | * @adap: the adapter |
4880 | * @n: number of queues |
4881 | * @q: pointer to first queue |
4882 | * |
4883 | * Release the resources of a consecutive block of offload Rx queues. |
4884 | */ |
4885 | void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) |
4886 | { |
4887 | for ( ; n; n--, q++) |
4888 | if (q->rspq.desc) |
4889 | free_rspq_fl(adap, rq: &q->rspq, |
4890 | fl: q->fl.size ? &q->fl : NULL); |
4891 | } |
4892 | |
4893 | void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq) |
4894 | { |
4895 | if (txq->q.desc) { |
4896 | t4_ofld_eq_free(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
4897 | eqid: txq->q.cntxt_id); |
4898 | free_tx_desc(adap, q: &txq->q, n: txq->q.in_use, unmap: false); |
4899 | kfree(objp: txq->q.sdesc); |
4900 | free_txq(adap, q: &txq->q); |
4901 | } |
4902 | } |
4903 | |
4904 | /** |
4905 | * t4_free_sge_resources - free SGE resources |
4906 | * @adap: the adapter |
4907 | * |
4908 | * Frees resources used by the SGE queue sets. |
4909 | */ |
4910 | void t4_free_sge_resources(struct adapter *adap) |
4911 | { |
4912 | int i; |
4913 | struct sge_eth_rxq *eq; |
4914 | struct sge_eth_txq *etq; |
4915 | |
4916 | /* stop all Rx queues in order to start them draining */ |
4917 | for (i = 0; i < adap->sge.ethqsets; i++) { |
4918 | eq = &adap->sge.ethrxq[i]; |
4919 | if (eq->rspq.desc) |
4920 | t4_iq_stop(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
4921 | iqtype: FW_IQ_TYPE_FL_INT_CAP, |
4922 | iqid: eq->rspq.cntxt_id, |
4923 | fl0id: eq->fl.size ? eq->fl.cntxt_id : 0xffff, |
4924 | fl1id: 0xffff); |
4925 | } |
4926 | |
4927 | /* clean up Ethernet Tx/Rx queues */ |
4928 | for (i = 0; i < adap->sge.ethqsets; i++) { |
4929 | eq = &adap->sge.ethrxq[i]; |
4930 | if (eq->rspq.desc) |
4931 | free_rspq_fl(adap, rq: &eq->rspq, |
4932 | fl: eq->fl.size ? &eq->fl : NULL); |
4933 | if (eq->msix) { |
4934 | cxgb4_free_msix_idx_in_bmap(adap, msix_idx: eq->msix->idx); |
4935 | eq->msix = NULL; |
4936 | } |
4937 | |
4938 | etq = &adap->sge.ethtxq[i]; |
4939 | if (etq->q.desc) { |
4940 | t4_eth_eq_free(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
4941 | eqid: etq->q.cntxt_id); |
4942 | __netif_tx_lock_bh(txq: etq->txq); |
4943 | free_tx_desc(adap, q: &etq->q, n: etq->q.in_use, unmap: true); |
4944 | __netif_tx_unlock_bh(txq: etq->txq); |
4945 | kfree(objp: etq->q.sdesc); |
4946 | free_txq(adap, q: &etq->q); |
4947 | } |
4948 | } |
4949 | |
4950 | /* clean up control Tx queues */ |
4951 | for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { |
4952 | struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; |
4953 | |
4954 | if (cq->q.desc) { |
4955 | tasklet_kill(t: &cq->qresume_tsk); |
4956 | t4_ctrl_eq_free(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
4957 | eqid: cq->q.cntxt_id); |
4958 | __skb_queue_purge(list: &cq->sendq); |
4959 | free_txq(adap, q: &cq->q); |
4960 | } |
4961 | } |
4962 | |
4963 | if (adap->sge.fw_evtq.desc) { |
4964 | free_rspq_fl(adap, rq: &adap->sge.fw_evtq, NULL); |
4965 | if (adap->sge.fwevtq_msix_idx >= 0) |
4966 | cxgb4_free_msix_idx_in_bmap(adap, |
4967 | msix_idx: adap->sge.fwevtq_msix_idx); |
4968 | } |
4969 | |
4970 | if (adap->sge.nd_msix_idx >= 0) |
4971 | cxgb4_free_msix_idx_in_bmap(adap, msix_idx: adap->sge.nd_msix_idx); |
4972 | |
4973 | if (adap->sge.intrq.desc) |
4974 | free_rspq_fl(adap, rq: &adap->sge.intrq, NULL); |
4975 | |
4976 | if (!is_t4(chip: adap->params.chip)) { |
4977 | etq = &adap->sge.ptptxq; |
4978 | if (etq->q.desc) { |
4979 | t4_eth_eq_free(adap, mbox: adap->mbox, pf: adap->pf, vf: 0, |
4980 | eqid: etq->q.cntxt_id); |
4981 | spin_lock_bh(lock: &adap->ptp_lock); |
4982 | free_tx_desc(adap, q: &etq->q, n: etq->q.in_use, unmap: true); |
4983 | spin_unlock_bh(lock: &adap->ptp_lock); |
4984 | kfree(objp: etq->q.sdesc); |
4985 | free_txq(adap, q: &etq->q); |
4986 | } |
4987 | } |
4988 | |
4989 | /* clear the reverse egress queue map */ |
4990 | memset(adap->sge.egr_map, 0, |
4991 | adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); |
4992 | } |
4993 | |
4994 | void t4_sge_start(struct adapter *adap) |
4995 | { |
4996 | adap->sge.ethtxq_rover = 0; |
4997 | mod_timer(timer: &adap->sge.rx_timer, expires: jiffies + RX_QCHECK_PERIOD); |
4998 | mod_timer(timer: &adap->sge.tx_timer, expires: jiffies + TX_QCHECK_PERIOD); |
4999 | } |
5000 | |
5001 | /** |
5002 | * t4_sge_stop - disable SGE operation |
5003 | * @adap: the adapter |
5004 | * |
5005 | * Stop tasklets and timers associated with the DMA engine. Note that |
5006 | * this is effective only if measures have been taken to disable any HW |
5007 | * events that may restart them. |
5008 | */ |
5009 | void t4_sge_stop(struct adapter *adap) |
5010 | { |
5011 | int i; |
5012 | struct sge *s = &adap->sge; |
5013 | |
5014 | if (s->rx_timer.function) |
5015 | del_timer_sync(timer: &s->rx_timer); |
5016 | if (s->tx_timer.function) |
5017 | del_timer_sync(timer: &s->tx_timer); |
5018 | |
5019 | if (is_offload(adap)) { |
5020 | struct sge_uld_txq_info *txq_info; |
5021 | |
5022 | txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; |
5023 | if (txq_info) { |
5024 | struct sge_uld_txq *txq = txq_info->uldtxq; |
5025 | |
5026 | for_each_ofldtxq(&adap->sge, i) { |
5027 | if (txq->q.desc) |
5028 | tasklet_kill(t: &txq->qresume_tsk); |
5029 | } |
5030 | } |
5031 | } |
5032 | |
5033 | if (is_pci_uld(adap)) { |
5034 | struct sge_uld_txq_info *txq_info; |
5035 | |
5036 | txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; |
5037 | if (txq_info) { |
5038 | struct sge_uld_txq *txq = txq_info->uldtxq; |
5039 | |
5040 | for_each_ofldtxq(&adap->sge, i) { |
5041 | if (txq->q.desc) |
5042 | tasklet_kill(t: &txq->qresume_tsk); |
5043 | } |
5044 | } |
5045 | } |
5046 | |
5047 | for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { |
5048 | struct sge_ctrl_txq *cq = &s->ctrlq[i]; |
5049 | |
5050 | if (cq->q.desc) |
5051 | tasklet_kill(t: &cq->qresume_tsk); |
5052 | } |
5053 | } |
5054 | |
5055 | /** |
5056 | * t4_sge_init_soft - grab core SGE values needed by SGE code |
5057 | * @adap: the adapter |
5058 | * |
5059 | * We need to grab the SGE operating parameters that we need to have |
5060 | * in order to do our job and make sure we can live with them. |
5061 | */ |
5062 | |
5063 | static int t4_sge_init_soft(struct adapter *adap) |
5064 | { |
5065 | struct sge *s = &adap->sge; |
5066 | u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; |
5067 | u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; |
5068 | u32 ingress_rx_threshold; |
5069 | |
5070 | /* |
5071 | * Verify that CPL messages are going to the Ingress Queue for |
5072 | * process_responses() and that only packet data is going to the |
5073 | * Free Lists. |
5074 | */ |
5075 | if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != |
5076 | RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { |
5077 | dev_err(adap->pdev_dev, "bad SGE CPL MODE\n" ); |
5078 | return -EINVAL; |
5079 | } |
5080 | |
5081 | /* |
5082 | * Validate the Host Buffer Register Array indices that we want to |
5083 | * use ... |
5084 | * |
5085 | * XXX Note that we should really read through the Host Buffer Size |
5086 | * XXX register array and find the indices of the Buffer Sizes which |
5087 | * XXX meet our needs! |
5088 | */ |
5089 | #define READ_FL_BUF(x) \ |
5090 | t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) |
5091 | |
5092 | fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); |
5093 | fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); |
5094 | fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); |
5095 | fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); |
5096 | |
5097 | /* We only bother using the Large Page logic if the Large Page Buffer |
5098 | * is larger than our Page Size Buffer. |
5099 | */ |
5100 | if (fl_large_pg <= fl_small_pg) |
5101 | fl_large_pg = 0; |
5102 | |
5103 | #undef READ_FL_BUF |
5104 | |
5105 | /* The Page Size Buffer must be exactly equal to our Page Size and the |
5106 | * Large Page Size Buffer should be 0 (per above) or a power of 2. |
5107 | */ |
5108 | if (fl_small_pg != PAGE_SIZE || |
5109 | (fl_large_pg & (fl_large_pg-1)) != 0) { |
5110 | dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n" , |
5111 | fl_small_pg, fl_large_pg); |
5112 | return -EINVAL; |
5113 | } |
5114 | if (fl_large_pg) |
5115 | s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; |
5116 | |
5117 | if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || |
5118 | fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { |
5119 | dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n" , |
5120 | fl_small_mtu, fl_large_mtu); |
5121 | return -EINVAL; |
5122 | } |
5123 | |
5124 | /* |
5125 | * Retrieve our RX interrupt holdoff timer values and counter |
5126 | * threshold values from the SGE parameters. |
5127 | */ |
5128 | timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); |
5129 | timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); |
5130 | timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); |
5131 | s->timer_val[0] = core_ticks_to_us(adapter: adap, |
5132 | TIMERVALUE0_G(timer_value_0_and_1)); |
5133 | s->timer_val[1] = core_ticks_to_us(adapter: adap, |
5134 | TIMERVALUE1_G(timer_value_0_and_1)); |
5135 | s->timer_val[2] = core_ticks_to_us(adapter: adap, |
5136 | TIMERVALUE2_G(timer_value_2_and_3)); |
5137 | s->timer_val[3] = core_ticks_to_us(adapter: adap, |
5138 | TIMERVALUE3_G(timer_value_2_and_3)); |
5139 | s->timer_val[4] = core_ticks_to_us(adapter: adap, |
5140 | TIMERVALUE4_G(timer_value_4_and_5)); |
5141 | s->timer_val[5] = core_ticks_to_us(adapter: adap, |
5142 | TIMERVALUE5_G(timer_value_4_and_5)); |
5143 | |
5144 | ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); |
5145 | s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); |
5146 | s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); |
5147 | s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); |
5148 | s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); |
5149 | |
5150 | return 0; |
5151 | } |
5152 | |
5153 | /** |
5154 | * t4_sge_init - initialize SGE |
5155 | * @adap: the adapter |
5156 | * |
5157 | * Perform low-level SGE code initialization needed every time after a |
5158 | * chip reset. |
5159 | */ |
5160 | int t4_sge_init(struct adapter *adap) |
5161 | { |
5162 | struct sge *s = &adap->sge; |
5163 | u32 sge_control, sge_conm_ctrl; |
5164 | int ret, egress_threshold; |
5165 | |
5166 | /* |
5167 | * Ingress Padding Boundary and Egress Status Page Size are set up by |
5168 | * t4_fixup_host_params(). |
5169 | */ |
5170 | sge_control = t4_read_reg(adap, SGE_CONTROL_A); |
5171 | s->pktshift = PKTSHIFT_G(sge_control); |
5172 | s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; |
5173 | |
5174 | s->fl_align = t4_fl_pkt_align(adap); |
5175 | ret = t4_sge_init_soft(adap); |
5176 | if (ret < 0) |
5177 | return ret; |
5178 | |
5179 | /* |
5180 | * A FL with <= fl_starve_thres buffers is starving and a periodic |
5181 | * timer will attempt to refill it. This needs to be larger than the |
5182 | * SGE's Egress Congestion Threshold. If it isn't, then we can get |
5183 | * stuck waiting for new packets while the SGE is waiting for us to |
5184 | * give it more Free List entries. (Note that the SGE's Egress |
5185 | * Congestion Threshold is in units of 2 Free List pointers.) For T4, |
5186 | * there was only a single field to control this. For T5 there's the |
5187 | * original field which now only applies to Unpacked Mode Free List |
5188 | * buffers and a new field which only applies to Packed Mode Free List |
5189 | * buffers. |
5190 | */ |
5191 | sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); |
5192 | switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { |
5193 | case CHELSIO_T4: |
5194 | egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); |
5195 | break; |
5196 | case CHELSIO_T5: |
5197 | egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); |
5198 | break; |
5199 | case CHELSIO_T6: |
5200 | egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl); |
5201 | break; |
5202 | default: |
5203 | dev_err(adap->pdev_dev, "Unsupported Chip version %d\n" , |
5204 | CHELSIO_CHIP_VERSION(adap->params.chip)); |
5205 | return -EINVAL; |
5206 | } |
5207 | s->fl_starve_thres = 2*egress_threshold + 1; |
5208 | |
5209 | t4_idma_monitor_init(adapter: adap, idma: &s->idma_monitor); |
5210 | |
5211 | /* Set up timers used for recuring callbacks to process RX and TX |
5212 | * administrative tasks. |
5213 | */ |
5214 | timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); |
5215 | timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); |
5216 | |
5217 | spin_lock_init(&s->intrq_lock); |
5218 | |
5219 | return 0; |
5220 | } |
5221 | |