1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. |
3 | */ |
4 | |
5 | /* Qualcomm Technologies, Inc. EMAC Ethernet Controller MAC layer support |
6 | */ |
7 | |
8 | #include <linux/tcp.h> |
9 | #include <linux/ip.h> |
10 | #include <linux/ipv6.h> |
11 | #include <linux/crc32.h> |
12 | #include <linux/if_vlan.h> |
13 | #include <linux/jiffies.h> |
14 | #include <linux/phy.h> |
15 | #include <linux/of.h> |
16 | #include <net/ip6_checksum.h> |
17 | #include "emac.h" |
18 | #include "emac-sgmii.h" |
19 | |
20 | /* EMAC_MAC_CTRL */ |
21 | #define SINGLE_PAUSE_MODE 0x10000000 |
22 | #define DEBUG_MODE 0x08000000 |
23 | #define BROAD_EN 0x04000000 |
24 | #define MULTI_ALL 0x02000000 |
25 | #define RX_CHKSUM_EN 0x01000000 |
26 | #define HUGE 0x00800000 |
27 | #define SPEED(x) (((x) & 0x3) << 20) |
28 | #define SPEED_MASK SPEED(0x3) |
29 | #define SIMR 0x00080000 |
30 | #define TPAUSE 0x00010000 |
31 | #define PROM_MODE 0x00008000 |
32 | #define VLAN_STRIP 0x00004000 |
33 | #define PRLEN_BMSK 0x00003c00 |
34 | #define PRLEN_SHFT 10 |
35 | #define HUGEN 0x00000200 |
36 | #define FLCHK 0x00000100 |
37 | #define PCRCE 0x00000080 |
38 | #define CRCE 0x00000040 |
39 | #define FULLD 0x00000020 |
40 | #define MAC_LP_EN 0x00000010 |
41 | #define RXFC 0x00000008 |
42 | #define TXFC 0x00000004 |
43 | #define RXEN 0x00000002 |
44 | #define TXEN 0x00000001 |
45 | |
46 | /* EMAC_DESC_CTRL_3 */ |
47 | #define RFD_RING_SIZE_BMSK 0xfff |
48 | |
49 | /* EMAC_DESC_CTRL_4 */ |
50 | #define RX_BUFFER_SIZE_BMSK 0xffff |
51 | |
52 | /* EMAC_DESC_CTRL_6 */ |
53 | #define RRD_RING_SIZE_BMSK 0xfff |
54 | |
55 | /* EMAC_DESC_CTRL_9 */ |
56 | #define TPD_RING_SIZE_BMSK 0xffff |
57 | |
58 | /* EMAC_TXQ_CTRL_0 */ |
59 | #define NUM_TXF_BURST_PREF_BMSK 0xffff0000 |
60 | #define NUM_TXF_BURST_PREF_SHFT 16 |
61 | #define LS_8023_SP 0x80 |
62 | #define TXQ_MODE 0x40 |
63 | #define TXQ_EN 0x20 |
64 | #define IP_OP_SP 0x10 |
65 | #define NUM_TPD_BURST_PREF_BMSK 0xf |
66 | #define NUM_TPD_BURST_PREF_SHFT 0 |
67 | |
68 | /* EMAC_TXQ_CTRL_1 */ |
69 | #define JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK 0x7ff |
70 | |
71 | /* EMAC_TXQ_CTRL_2 */ |
72 | #define TXF_HWM_BMSK 0xfff0000 |
73 | #define TXF_LWM_BMSK 0xfff |
74 | |
75 | /* EMAC_RXQ_CTRL_0 */ |
76 | #define RXQ_EN BIT(31) |
77 | #define CUT_THRU_EN BIT(30) |
78 | #define BIT(29) |
79 | #define NUM_RFD_BURST_PREF_BMSK 0x3f00000 |
80 | #define NUM_RFD_BURST_PREF_SHFT 20 |
81 | #define IDT_TABLE_SIZE_BMSK 0x1ff00 |
82 | #define IDT_TABLE_SIZE_SHFT 8 |
83 | #define SP_IPV6 0x80 |
84 | |
85 | /* EMAC_RXQ_CTRL_1 */ |
86 | #define JUMBO_1KAH_BMSK 0xf000 |
87 | #define JUMBO_1KAH_SHFT 12 |
88 | #define RFD_PREF_LOW_TH 0x10 |
89 | #define RFD_PREF_LOW_THRESHOLD_BMSK 0xfc0 |
90 | #define RFD_PREF_LOW_THRESHOLD_SHFT 6 |
91 | #define RFD_PREF_UP_TH 0x10 |
92 | #define RFD_PREF_UP_THRESHOLD_BMSK 0x3f |
93 | #define RFD_PREF_UP_THRESHOLD_SHFT 0 |
94 | |
95 | /* EMAC_RXQ_CTRL_2 */ |
96 | #define RXF_DOF_THRESFHOLD 0x1a0 |
97 | #define RXF_DOF_THRESHOLD_BMSK 0xfff0000 |
98 | #define RXF_DOF_THRESHOLD_SHFT 16 |
99 | #define RXF_UOF_THRESFHOLD 0xbe |
100 | #define RXF_UOF_THRESHOLD_BMSK 0xfff |
101 | #define RXF_UOF_THRESHOLD_SHFT 0 |
102 | |
103 | /* EMAC_RXQ_CTRL_3 */ |
104 | #define RXD_TIMER_BMSK 0xffff0000 |
105 | #define RXD_THRESHOLD_BMSK 0xfff |
106 | #define RXD_THRESHOLD_SHFT 0 |
107 | |
108 | /* EMAC_DMA_CTRL */ |
109 | #define DMAW_DLY_CNT_BMSK 0xf0000 |
110 | #define DMAW_DLY_CNT_SHFT 16 |
111 | #define DMAR_DLY_CNT_BMSK 0xf800 |
112 | #define DMAR_DLY_CNT_SHFT 11 |
113 | #define DMAR_REQ_PRI 0x400 |
114 | #define REGWRBLEN_BMSK 0x380 |
115 | #define REGWRBLEN_SHFT 7 |
116 | #define REGRDBLEN_BMSK 0x70 |
117 | #define REGRDBLEN_SHFT 4 |
118 | #define OUT_ORDER_MODE 0x4 |
119 | #define ENH_ORDER_MODE 0x2 |
120 | #define IN_ORDER_MODE 0x1 |
121 | |
122 | /* EMAC_MAILBOX_13 */ |
123 | #define RFD3_PROC_IDX_BMSK 0xfff0000 |
124 | #define RFD3_PROC_IDX_SHFT 16 |
125 | #define RFD3_PROD_IDX_BMSK 0xfff |
126 | #define RFD3_PROD_IDX_SHFT 0 |
127 | |
128 | /* EMAC_MAILBOX_2 */ |
129 | #define NTPD_CONS_IDX_BMSK 0xffff0000 |
130 | #define NTPD_CONS_IDX_SHFT 16 |
131 | |
132 | /* EMAC_MAILBOX_3 */ |
133 | #define RFD0_CONS_IDX_BMSK 0xfff |
134 | #define RFD0_CONS_IDX_SHFT 0 |
135 | |
136 | /* EMAC_MAILBOX_11 */ |
137 | #define H3TPD_PROD_IDX_BMSK 0xffff0000 |
138 | #define H3TPD_PROD_IDX_SHFT 16 |
139 | |
140 | /* EMAC_AXI_MAST_CTRL */ |
141 | #define DATA_BYTE_SWAP 0x8 |
142 | #define MAX_BOUND 0x2 |
143 | #define MAX_BTYPE 0x1 |
144 | |
145 | /* EMAC_MAILBOX_12 */ |
146 | #define H3TPD_CONS_IDX_BMSK 0xffff0000 |
147 | #define H3TPD_CONS_IDX_SHFT 16 |
148 | |
149 | /* EMAC_MAILBOX_9 */ |
150 | #define H2TPD_PROD_IDX_BMSK 0xffff |
151 | #define H2TPD_PROD_IDX_SHFT 0 |
152 | |
153 | /* EMAC_MAILBOX_10 */ |
154 | #define H1TPD_CONS_IDX_BMSK 0xffff0000 |
155 | #define H1TPD_CONS_IDX_SHFT 16 |
156 | #define H2TPD_CONS_IDX_BMSK 0xffff |
157 | #define H2TPD_CONS_IDX_SHFT 0 |
158 | |
159 | /* EMAC_ATHR_HEADER_CTRL */ |
160 | #define 0x2 |
161 | #define 0x1 |
162 | |
163 | /* EMAC_MAILBOX_0 */ |
164 | #define RFD0_PROC_IDX_BMSK 0xfff0000 |
165 | #define RFD0_PROC_IDX_SHFT 16 |
166 | #define RFD0_PROD_IDX_BMSK 0xfff |
167 | #define RFD0_PROD_IDX_SHFT 0 |
168 | |
169 | /* EMAC_MAILBOX_5 */ |
170 | #define RFD1_PROC_IDX_BMSK 0xfff0000 |
171 | #define RFD1_PROC_IDX_SHFT 16 |
172 | #define RFD1_PROD_IDX_BMSK 0xfff |
173 | #define RFD1_PROD_IDX_SHFT 0 |
174 | |
175 | /* EMAC_MISC_CTRL */ |
176 | #define RX_UNCPL_INT_EN 0x1 |
177 | |
178 | /* EMAC_MAILBOX_7 */ |
179 | #define RFD2_CONS_IDX_BMSK 0xfff0000 |
180 | #define RFD2_CONS_IDX_SHFT 16 |
181 | #define RFD1_CONS_IDX_BMSK 0xfff |
182 | #define RFD1_CONS_IDX_SHFT 0 |
183 | |
184 | /* EMAC_MAILBOX_8 */ |
185 | #define RFD3_CONS_IDX_BMSK 0xfff |
186 | #define RFD3_CONS_IDX_SHFT 0 |
187 | |
188 | /* EMAC_MAILBOX_15 */ |
189 | #define NTPD_PROD_IDX_BMSK 0xffff |
190 | #define NTPD_PROD_IDX_SHFT 0 |
191 | |
192 | /* EMAC_MAILBOX_16 */ |
193 | #define H1TPD_PROD_IDX_BMSK 0xffff |
194 | #define H1TPD_PROD_IDX_SHFT 0 |
195 | |
196 | #define 0x20 |
197 | #define 0x10 |
198 | #define 0x8 |
199 | #define 0x4 |
200 | |
201 | /* EMAC_EMAC_WRAPPER_TX_TS_INX */ |
202 | #define EMAC_WRAPPER_TX_TS_EMPTY BIT(31) |
203 | #define EMAC_WRAPPER_TX_TS_INX_BMSK 0xffff |
204 | |
205 | struct emac_skb_cb { |
206 | u32 tpd_idx; |
207 | unsigned long jiffies; |
208 | }; |
209 | |
210 | #define EMAC_SKB_CB(skb) ((struct emac_skb_cb *)(skb)->cb) |
211 | #define 256 |
212 | #define JUMBO_1KAH 0x4 |
213 | #define RXD_TH 0x100 |
214 | #define EMAC_TPD_LAST_FRAGMENT 0x80000000 |
215 | #define EMAC_TPD_TSTAMP_SAVE 0x80000000 |
216 | |
217 | /* EMAC Errors in emac_rrd.word[3] */ |
218 | #define EMAC_RRD_L4F BIT(14) |
219 | #define EMAC_RRD_IPF BIT(15) |
220 | #define EMAC_RRD_CRC BIT(21) |
221 | #define EMAC_RRD_FAE BIT(22) |
222 | #define EMAC_RRD_TRN BIT(23) |
223 | #define EMAC_RRD_RNT BIT(24) |
224 | #define EMAC_RRD_INC BIT(25) |
225 | #define EMAC_RRD_FOV BIT(29) |
226 | #define EMAC_RRD_LEN BIT(30) |
227 | |
228 | /* Error bits that will result in a received frame being discarded */ |
229 | #define EMAC_RRD_ERROR (EMAC_RRD_IPF | EMAC_RRD_CRC | EMAC_RRD_FAE | \ |
230 | EMAC_RRD_TRN | EMAC_RRD_RNT | EMAC_RRD_INC | \ |
231 | EMAC_RRD_FOV | EMAC_RRD_LEN) |
232 | #define EMAC_RRD_STATS_DW_IDX 3 |
233 | |
234 | #define EMAC_RRD(RXQ, SIZE, IDX) ((RXQ)->rrd.v_addr + (SIZE * (IDX))) |
235 | #define EMAC_RFD(RXQ, SIZE, IDX) ((RXQ)->rfd.v_addr + (SIZE * (IDX))) |
236 | #define EMAC_TPD(TXQ, SIZE, IDX) ((TXQ)->tpd.v_addr + (SIZE * (IDX))) |
237 | |
238 | #define GET_RFD_BUFFER(RXQ, IDX) (&((RXQ)->rfd.rfbuff[(IDX)])) |
239 | #define GET_TPD_BUFFER(RTQ, IDX) (&((RTQ)->tpd.tpbuff[(IDX)])) |
240 | |
241 | #define EMAC_TX_POLL_HWTXTSTAMP_THRESHOLD 8 |
242 | |
243 | #define ISR_RX_PKT (\ |
244 | RX_PKT_INT0 |\ |
245 | RX_PKT_INT1 |\ |
246 | RX_PKT_INT2 |\ |
247 | RX_PKT_INT3) |
248 | |
249 | void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr) |
250 | { |
251 | u32 crc32, bit, reg, mta; |
252 | |
253 | /* Calculate the CRC of the MAC address */ |
254 | crc32 = ether_crc(ETH_ALEN, addr); |
255 | |
256 | /* The HASH Table is an array of 2 32-bit registers. It is |
257 | * treated like an array of 64 bits (BitArray[hash_value]). |
258 | * Use the upper 6 bits of the above CRC as the hash value. |
259 | */ |
260 | reg = (crc32 >> 31) & 0x1; |
261 | bit = (crc32 >> 26) & 0x1F; |
262 | |
263 | mta = readl(addr: adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2)); |
264 | mta |= BIT(bit); |
265 | writel(val: mta, addr: adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2)); |
266 | } |
267 | |
268 | void emac_mac_multicast_addr_clear(struct emac_adapter *adpt) |
269 | { |
270 | writel(val: 0, addr: adpt->base + EMAC_HASH_TAB_REG0); |
271 | writel(val: 0, addr: adpt->base + EMAC_HASH_TAB_REG1); |
272 | } |
273 | |
274 | /* definitions for RSS */ |
275 | #define (_i, _type) \ |
276 | (EMAC_RSS_KEY0 + ((_i) * sizeof(_type))) |
277 | #define (_i, _type) \ |
278 | (EMAC_IDT_TABLE0 + ((_i) * sizeof(_type))) |
279 | |
280 | /* Config MAC modes */ |
281 | void emac_mac_mode_config(struct emac_adapter *adpt) |
282 | { |
283 | struct net_device *netdev = adpt->netdev; |
284 | u32 mac; |
285 | |
286 | mac = readl(addr: adpt->base + EMAC_MAC_CTRL); |
287 | mac &= ~(VLAN_STRIP | PROM_MODE | MULTI_ALL | MAC_LP_EN); |
288 | |
289 | if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) |
290 | mac |= VLAN_STRIP; |
291 | |
292 | if (netdev->flags & IFF_PROMISC) |
293 | mac |= PROM_MODE; |
294 | |
295 | if (netdev->flags & IFF_ALLMULTI) |
296 | mac |= MULTI_ALL; |
297 | |
298 | writel(val: mac, addr: adpt->base + EMAC_MAC_CTRL); |
299 | } |
300 | |
301 | /* Config descriptor rings */ |
302 | static void emac_mac_dma_rings_config(struct emac_adapter *adpt) |
303 | { |
304 | /* TPD (Transmit Packet Descriptor) */ |
305 | writel(upper_32_bits(adpt->tx_q.tpd.dma_addr), |
306 | addr: adpt->base + EMAC_DESC_CTRL_1); |
307 | |
308 | writel(lower_32_bits(adpt->tx_q.tpd.dma_addr), |
309 | addr: adpt->base + EMAC_DESC_CTRL_8); |
310 | |
311 | writel(val: adpt->tx_q.tpd.count & TPD_RING_SIZE_BMSK, |
312 | addr: adpt->base + EMAC_DESC_CTRL_9); |
313 | |
314 | /* RFD (Receive Free Descriptor) & RRD (Receive Return Descriptor) */ |
315 | writel(upper_32_bits(adpt->rx_q.rfd.dma_addr), |
316 | addr: adpt->base + EMAC_DESC_CTRL_0); |
317 | |
318 | writel(lower_32_bits(adpt->rx_q.rfd.dma_addr), |
319 | addr: adpt->base + EMAC_DESC_CTRL_2); |
320 | writel(lower_32_bits(adpt->rx_q.rrd.dma_addr), |
321 | addr: adpt->base + EMAC_DESC_CTRL_5); |
322 | |
323 | writel(val: adpt->rx_q.rfd.count & RFD_RING_SIZE_BMSK, |
324 | addr: adpt->base + EMAC_DESC_CTRL_3); |
325 | writel(val: adpt->rx_q.rrd.count & RRD_RING_SIZE_BMSK, |
326 | addr: adpt->base + EMAC_DESC_CTRL_6); |
327 | |
328 | writel(val: adpt->rxbuf_size & RX_BUFFER_SIZE_BMSK, |
329 | addr: adpt->base + EMAC_DESC_CTRL_4); |
330 | |
331 | writel(val: 0, addr: adpt->base + EMAC_DESC_CTRL_11); |
332 | |
333 | /* Load all of the base addresses above and ensure that triggering HW to |
334 | * read ring pointers is flushed |
335 | */ |
336 | writel(val: 1, addr: adpt->base + EMAC_INTER_SRAM_PART9); |
337 | } |
338 | |
339 | /* Config transmit parameters */ |
340 | static void emac_mac_tx_config(struct emac_adapter *adpt) |
341 | { |
342 | u32 val; |
343 | |
344 | writel(val: (EMAC_MAX_TX_OFFLOAD_THRESH >> 3) & |
345 | JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK, addr: adpt->base + EMAC_TXQ_CTRL_1); |
346 | |
347 | val = (adpt->tpd_burst << NUM_TPD_BURST_PREF_SHFT) & |
348 | NUM_TPD_BURST_PREF_BMSK; |
349 | |
350 | val |= TXQ_MODE | LS_8023_SP; |
351 | val |= (0x0100 << NUM_TXF_BURST_PREF_SHFT) & |
352 | NUM_TXF_BURST_PREF_BMSK; |
353 | |
354 | writel(val, addr: adpt->base + EMAC_TXQ_CTRL_0); |
355 | emac_reg_update32(addr: adpt->base + EMAC_TXQ_CTRL_2, |
356 | mask: (TXF_HWM_BMSK | TXF_LWM_BMSK), val: 0); |
357 | } |
358 | |
359 | /* Config receive parameters */ |
360 | static void emac_mac_rx_config(struct emac_adapter *adpt) |
361 | { |
362 | u32 val; |
363 | |
364 | val = (adpt->rfd_burst << NUM_RFD_BURST_PREF_SHFT) & |
365 | NUM_RFD_BURST_PREF_BMSK; |
366 | val |= (SP_IPV6 | CUT_THRU_EN); |
367 | |
368 | writel(val, addr: adpt->base + EMAC_RXQ_CTRL_0); |
369 | |
370 | val = readl(addr: adpt->base + EMAC_RXQ_CTRL_1); |
371 | val &= ~(JUMBO_1KAH_BMSK | RFD_PREF_LOW_THRESHOLD_BMSK | |
372 | RFD_PREF_UP_THRESHOLD_BMSK); |
373 | val |= (JUMBO_1KAH << JUMBO_1KAH_SHFT) | |
374 | (RFD_PREF_LOW_TH << RFD_PREF_LOW_THRESHOLD_SHFT) | |
375 | (RFD_PREF_UP_TH << RFD_PREF_UP_THRESHOLD_SHFT); |
376 | writel(val, addr: adpt->base + EMAC_RXQ_CTRL_1); |
377 | |
378 | val = readl(addr: adpt->base + EMAC_RXQ_CTRL_2); |
379 | val &= ~(RXF_DOF_THRESHOLD_BMSK | RXF_UOF_THRESHOLD_BMSK); |
380 | val |= (RXF_DOF_THRESFHOLD << RXF_DOF_THRESHOLD_SHFT) | |
381 | (RXF_UOF_THRESFHOLD << RXF_UOF_THRESHOLD_SHFT); |
382 | writel(val, addr: adpt->base + EMAC_RXQ_CTRL_2); |
383 | |
384 | val = readl(addr: adpt->base + EMAC_RXQ_CTRL_3); |
385 | val &= ~(RXD_TIMER_BMSK | RXD_THRESHOLD_BMSK); |
386 | val |= RXD_TH << RXD_THRESHOLD_SHFT; |
387 | writel(val, addr: adpt->base + EMAC_RXQ_CTRL_3); |
388 | } |
389 | |
390 | /* Config dma */ |
391 | static void emac_mac_dma_config(struct emac_adapter *adpt) |
392 | { |
393 | u32 dma_ctrl = DMAR_REQ_PRI; |
394 | |
395 | switch (adpt->dma_order) { |
396 | case emac_dma_ord_in: |
397 | dma_ctrl |= IN_ORDER_MODE; |
398 | break; |
399 | case emac_dma_ord_enh: |
400 | dma_ctrl |= ENH_ORDER_MODE; |
401 | break; |
402 | case emac_dma_ord_out: |
403 | dma_ctrl |= OUT_ORDER_MODE; |
404 | break; |
405 | default: |
406 | break; |
407 | } |
408 | |
409 | dma_ctrl |= (((u32)adpt->dmar_block) << REGRDBLEN_SHFT) & |
410 | REGRDBLEN_BMSK; |
411 | dma_ctrl |= (((u32)adpt->dmaw_block) << REGWRBLEN_SHFT) & |
412 | REGWRBLEN_BMSK; |
413 | dma_ctrl |= (((u32)adpt->dmar_dly_cnt) << DMAR_DLY_CNT_SHFT) & |
414 | DMAR_DLY_CNT_BMSK; |
415 | dma_ctrl |= (((u32)adpt->dmaw_dly_cnt) << DMAW_DLY_CNT_SHFT) & |
416 | DMAW_DLY_CNT_BMSK; |
417 | |
418 | /* config DMA and ensure that configuration is flushed to HW */ |
419 | writel(val: dma_ctrl, addr: adpt->base + EMAC_DMA_CTRL); |
420 | } |
421 | |
422 | /* set MAC address */ |
423 | static void emac_set_mac_address(struct emac_adapter *adpt, const u8 *addr) |
424 | { |
425 | u32 sta; |
426 | |
427 | /* for example: 00-A0-C6-11-22-33 |
428 | * 0<-->C6112233, 1<-->00A0. |
429 | */ |
430 | |
431 | /* low 32bit word */ |
432 | sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) | |
433 | (((u32)addr[4]) << 8) | (((u32)addr[5])); |
434 | writel(val: sta, addr: adpt->base + EMAC_MAC_STA_ADDR0); |
435 | |
436 | /* hight 32bit word */ |
437 | sta = (((u32)addr[0]) << 8) | (u32)addr[1]; |
438 | writel(val: sta, addr: adpt->base + EMAC_MAC_STA_ADDR1); |
439 | } |
440 | |
441 | static void emac_mac_config(struct emac_adapter *adpt) |
442 | { |
443 | struct net_device *netdev = adpt->netdev; |
444 | unsigned int max_frame; |
445 | u32 val; |
446 | |
447 | emac_set_mac_address(adpt, addr: netdev->dev_addr); |
448 | |
449 | max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
450 | adpt->rxbuf_size = netdev->mtu > EMAC_DEF_RX_BUF_SIZE ? |
451 | ALIGN(max_frame, 8) : EMAC_DEF_RX_BUF_SIZE; |
452 | |
453 | emac_mac_dma_rings_config(adpt); |
454 | |
455 | writel(val: netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, |
456 | addr: adpt->base + EMAC_MAX_FRAM_LEN_CTRL); |
457 | |
458 | emac_mac_tx_config(adpt); |
459 | emac_mac_rx_config(adpt); |
460 | emac_mac_dma_config(adpt); |
461 | |
462 | val = readl(addr: adpt->base + EMAC_AXI_MAST_CTRL); |
463 | val &= ~(DATA_BYTE_SWAP | MAX_BOUND); |
464 | val |= MAX_BTYPE; |
465 | writel(val, addr: adpt->base + EMAC_AXI_MAST_CTRL); |
466 | writel(val: 0, addr: adpt->base + EMAC_CLK_GATE_CTRL); |
467 | writel(RX_UNCPL_INT_EN, addr: adpt->base + EMAC_MISC_CTRL); |
468 | } |
469 | |
470 | void emac_mac_reset(struct emac_adapter *adpt) |
471 | { |
472 | emac_mac_stop(adpt); |
473 | |
474 | emac_reg_update32(addr: adpt->base + EMAC_DMA_MAS_CTRL, mask: 0, SOFT_RST); |
475 | usleep_range(min: 100, max: 150); /* reset may take up to 100usec */ |
476 | |
477 | /* interrupt clear-on-read */ |
478 | emac_reg_update32(addr: adpt->base + EMAC_DMA_MAS_CTRL, mask: 0, INT_RD_CLR_EN); |
479 | } |
480 | |
481 | static void emac_mac_start(struct emac_adapter *adpt) |
482 | { |
483 | struct phy_device *phydev = adpt->phydev; |
484 | u32 mac, csr1; |
485 | |
486 | /* enable tx queue */ |
487 | emac_reg_update32(addr: adpt->base + EMAC_TXQ_CTRL_0, mask: 0, TXQ_EN); |
488 | |
489 | /* enable rx queue */ |
490 | emac_reg_update32(addr: adpt->base + EMAC_RXQ_CTRL_0, mask: 0, RXQ_EN); |
491 | |
492 | /* enable mac control */ |
493 | mac = readl(addr: adpt->base + EMAC_MAC_CTRL); |
494 | csr1 = readl(addr: adpt->csr + EMAC_EMAC_WRAPPER_CSR1); |
495 | |
496 | mac |= TXEN | RXEN; /* enable RX/TX */ |
497 | |
498 | /* Configure MAC flow control. If set to automatic, then match |
499 | * whatever the PHY does. Otherwise, enable or disable it, depending |
500 | * on what the user configured via ethtool. |
501 | */ |
502 | mac &= ~(RXFC | TXFC); |
503 | |
504 | if (adpt->automatic) { |
505 | /* If it's set to automatic, then update our local values */ |
506 | adpt->rx_flow_control = phydev->pause; |
507 | adpt->tx_flow_control = phydev->pause != phydev->asym_pause; |
508 | } |
509 | mac |= adpt->rx_flow_control ? RXFC : 0; |
510 | mac |= adpt->tx_flow_control ? TXFC : 0; |
511 | |
512 | /* setup link speed */ |
513 | mac &= ~SPEED_MASK; |
514 | if (phydev->speed == SPEED_1000) { |
515 | mac |= SPEED(2); |
516 | csr1 |= FREQ_MODE; |
517 | } else { |
518 | mac |= SPEED(1); |
519 | csr1 &= ~FREQ_MODE; |
520 | } |
521 | |
522 | if (phydev->duplex == DUPLEX_FULL) |
523 | mac |= FULLD; |
524 | else |
525 | mac &= ~FULLD; |
526 | |
527 | /* other parameters */ |
528 | mac |= (CRCE | PCRCE); |
529 | mac |= ((adpt->preamble << PRLEN_SHFT) & PRLEN_BMSK); |
530 | mac |= BROAD_EN; |
531 | mac |= FLCHK; |
532 | mac &= ~RX_CHKSUM_EN; |
533 | mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL | |
534 | DEBUG_MODE | SINGLE_PAUSE_MODE); |
535 | |
536 | /* Enable single-pause-frame mode if requested. |
537 | * |
538 | * If enabled, the EMAC will send a single pause frame when the RX |
539 | * queue is full. This normally leads to packet loss because |
540 | * the pause frame disables the remote MAC only for 33ms (the quanta), |
541 | * and then the remote MAC continues sending packets even though |
542 | * the RX queue is still full. |
543 | * |
544 | * If disabled, the EMAC sends a pause frame every 31ms until the RX |
545 | * queue is no longer full. Normally, this is the preferred |
546 | * method of operation. However, when the system is hung (e.g. |
547 | * cores are halted), the EMAC interrupt handler is never called |
548 | * and so the RX queue fills up quickly and stays full. The resuling |
549 | * non-stop "flood" of pause frames sometimes has the effect of |
550 | * disabling nearby switches. In some cases, other nearby switches |
551 | * are also affected, shutting down the entire network. |
552 | * |
553 | * The user can enable or disable single-pause-frame mode |
554 | * via ethtool. |
555 | */ |
556 | mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0; |
557 | |
558 | writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1); |
559 | |
560 | writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL); |
561 | |
562 | /* enable interrupt read clear, low power sleep mode and |
563 | * the irq moderators |
564 | */ |
565 | |
566 | writel_relaxed(adpt->irq_mod, adpt->base + EMAC_IRQ_MOD_TIM_INIT); |
567 | writel_relaxed(INT_RD_CLR_EN | LPW_MODE | IRQ_MODERATOR_EN | |
568 | IRQ_MODERATOR2_EN, adpt->base + EMAC_DMA_MAS_CTRL); |
569 | |
570 | emac_mac_mode_config(adpt); |
571 | |
572 | emac_reg_update32(addr: adpt->base + EMAC_ATHR_HEADER_CTRL, |
573 | mask: (HEADER_ENABLE | HEADER_CNT_EN), val: 0); |
574 | } |
575 | |
576 | void emac_mac_stop(struct emac_adapter *adpt) |
577 | { |
578 | emac_reg_update32(addr: adpt->base + EMAC_RXQ_CTRL_0, RXQ_EN, val: 0); |
579 | emac_reg_update32(addr: adpt->base + EMAC_TXQ_CTRL_0, TXQ_EN, val: 0); |
580 | emac_reg_update32(addr: adpt->base + EMAC_MAC_CTRL, TXEN | RXEN, val: 0); |
581 | usleep_range(min: 1000, max: 1050); /* stopping mac may take upto 1msec */ |
582 | } |
583 | |
584 | /* Free all descriptors of given transmit queue */ |
585 | static void emac_tx_q_descs_free(struct emac_adapter *adpt) |
586 | { |
587 | struct emac_tx_queue *tx_q = &adpt->tx_q; |
588 | unsigned int i; |
589 | size_t size; |
590 | |
591 | /* ring already cleared, nothing to do */ |
592 | if (!tx_q->tpd.tpbuff) |
593 | return; |
594 | |
595 | for (i = 0; i < tx_q->tpd.count; i++) { |
596 | struct emac_buffer *tpbuf = GET_TPD_BUFFER(tx_q, i); |
597 | |
598 | if (tpbuf->dma_addr) { |
599 | dma_unmap_single(adpt->netdev->dev.parent, |
600 | tpbuf->dma_addr, tpbuf->length, |
601 | DMA_TO_DEVICE); |
602 | tpbuf->dma_addr = 0; |
603 | } |
604 | if (tpbuf->skb) { |
605 | dev_kfree_skb_any(skb: tpbuf->skb); |
606 | tpbuf->skb = NULL; |
607 | } |
608 | } |
609 | |
610 | size = sizeof(struct emac_buffer) * tx_q->tpd.count; |
611 | memset(tx_q->tpd.tpbuff, 0, size); |
612 | |
613 | /* clear the descriptor ring */ |
614 | memset(tx_q->tpd.v_addr, 0, tx_q->tpd.size); |
615 | |
616 | tx_q->tpd.consume_idx = 0; |
617 | tx_q->tpd.produce_idx = 0; |
618 | } |
619 | |
620 | /* Free all descriptors of given receive queue */ |
621 | static void emac_rx_q_free_descs(struct emac_adapter *adpt) |
622 | { |
623 | struct device *dev = adpt->netdev->dev.parent; |
624 | struct emac_rx_queue *rx_q = &adpt->rx_q; |
625 | unsigned int i; |
626 | size_t size; |
627 | |
628 | /* ring already cleared, nothing to do */ |
629 | if (!rx_q->rfd.rfbuff) |
630 | return; |
631 | |
632 | for (i = 0; i < rx_q->rfd.count; i++) { |
633 | struct emac_buffer *rfbuf = GET_RFD_BUFFER(rx_q, i); |
634 | |
635 | if (rfbuf->dma_addr) { |
636 | dma_unmap_single(dev, rfbuf->dma_addr, rfbuf->length, |
637 | DMA_FROM_DEVICE); |
638 | rfbuf->dma_addr = 0; |
639 | } |
640 | if (rfbuf->skb) { |
641 | dev_kfree_skb(rfbuf->skb); |
642 | rfbuf->skb = NULL; |
643 | } |
644 | } |
645 | |
646 | size = sizeof(struct emac_buffer) * rx_q->rfd.count; |
647 | memset(rx_q->rfd.rfbuff, 0, size); |
648 | |
649 | /* clear the descriptor rings */ |
650 | memset(rx_q->rrd.v_addr, 0, rx_q->rrd.size); |
651 | rx_q->rrd.produce_idx = 0; |
652 | rx_q->rrd.consume_idx = 0; |
653 | |
654 | memset(rx_q->rfd.v_addr, 0, rx_q->rfd.size); |
655 | rx_q->rfd.produce_idx = 0; |
656 | rx_q->rfd.consume_idx = 0; |
657 | } |
658 | |
659 | /* Free all buffers associated with given transmit queue */ |
660 | static void emac_tx_q_bufs_free(struct emac_adapter *adpt) |
661 | { |
662 | struct emac_tx_queue *tx_q = &adpt->tx_q; |
663 | |
664 | emac_tx_q_descs_free(adpt); |
665 | |
666 | kfree(objp: tx_q->tpd.tpbuff); |
667 | tx_q->tpd.tpbuff = NULL; |
668 | tx_q->tpd.v_addr = NULL; |
669 | tx_q->tpd.dma_addr = 0; |
670 | tx_q->tpd.size = 0; |
671 | } |
672 | |
673 | /* Allocate TX descriptor ring for the given transmit queue */ |
674 | static int emac_tx_q_desc_alloc(struct emac_adapter *adpt, |
675 | struct emac_tx_queue *tx_q) |
676 | { |
677 | struct emac_ring_header * = &adpt->ring_header; |
678 | int node = dev_to_node(dev: adpt->netdev->dev.parent); |
679 | size_t size; |
680 | |
681 | size = sizeof(struct emac_buffer) * tx_q->tpd.count; |
682 | tx_q->tpd.tpbuff = kzalloc_node(size, GFP_KERNEL, node); |
683 | if (!tx_q->tpd.tpbuff) |
684 | return -ENOMEM; |
685 | |
686 | tx_q->tpd.size = tx_q->tpd.count * (adpt->tpd_size * 4); |
687 | tx_q->tpd.dma_addr = ring_header->dma_addr + ring_header->used; |
688 | tx_q->tpd.v_addr = ring_header->v_addr + ring_header->used; |
689 | ring_header->used += ALIGN(tx_q->tpd.size, 8); |
690 | tx_q->tpd.produce_idx = 0; |
691 | tx_q->tpd.consume_idx = 0; |
692 | |
693 | return 0; |
694 | } |
695 | |
696 | /* Free all buffers associated with given transmit queue */ |
697 | static void emac_rx_q_bufs_free(struct emac_adapter *adpt) |
698 | { |
699 | struct emac_rx_queue *rx_q = &adpt->rx_q; |
700 | |
701 | emac_rx_q_free_descs(adpt); |
702 | |
703 | kfree(objp: rx_q->rfd.rfbuff); |
704 | rx_q->rfd.rfbuff = NULL; |
705 | |
706 | rx_q->rfd.v_addr = NULL; |
707 | rx_q->rfd.dma_addr = 0; |
708 | rx_q->rfd.size = 0; |
709 | |
710 | rx_q->rrd.v_addr = NULL; |
711 | rx_q->rrd.dma_addr = 0; |
712 | rx_q->rrd.size = 0; |
713 | } |
714 | |
715 | /* Allocate RX descriptor rings for the given receive queue */ |
716 | static int emac_rx_descs_alloc(struct emac_adapter *adpt) |
717 | { |
718 | struct emac_ring_header * = &adpt->ring_header; |
719 | int node = dev_to_node(dev: adpt->netdev->dev.parent); |
720 | struct emac_rx_queue *rx_q = &adpt->rx_q; |
721 | size_t size; |
722 | |
723 | size = sizeof(struct emac_buffer) * rx_q->rfd.count; |
724 | rx_q->rfd.rfbuff = kzalloc_node(size, GFP_KERNEL, node); |
725 | if (!rx_q->rfd.rfbuff) |
726 | return -ENOMEM; |
727 | |
728 | rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4); |
729 | rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4); |
730 | |
731 | rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used; |
732 | rx_q->rrd.v_addr = ring_header->v_addr + ring_header->used; |
733 | ring_header->used += ALIGN(rx_q->rrd.size, 8); |
734 | |
735 | rx_q->rfd.dma_addr = ring_header->dma_addr + ring_header->used; |
736 | rx_q->rfd.v_addr = ring_header->v_addr + ring_header->used; |
737 | ring_header->used += ALIGN(rx_q->rfd.size, 8); |
738 | |
739 | rx_q->rrd.produce_idx = 0; |
740 | rx_q->rrd.consume_idx = 0; |
741 | |
742 | rx_q->rfd.produce_idx = 0; |
743 | rx_q->rfd.consume_idx = 0; |
744 | |
745 | return 0; |
746 | } |
747 | |
748 | /* Allocate all TX and RX descriptor rings */ |
749 | int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) |
750 | { |
751 | struct emac_ring_header * = &adpt->ring_header; |
752 | struct device *dev = adpt->netdev->dev.parent; |
753 | unsigned int num_tx_descs = adpt->tx_desc_cnt; |
754 | unsigned int num_rx_descs = adpt->rx_desc_cnt; |
755 | int ret; |
756 | |
757 | adpt->tx_q.tpd.count = adpt->tx_desc_cnt; |
758 | |
759 | adpt->rx_q.rrd.count = adpt->rx_desc_cnt; |
760 | adpt->rx_q.rfd.count = adpt->rx_desc_cnt; |
761 | |
762 | /* Ring DMA buffer. Each ring may need up to 8 bytes for alignment, |
763 | * hence the additional padding bytes are allocated. |
764 | */ |
765 | ring_header->size = num_tx_descs * (adpt->tpd_size * 4) + |
766 | num_rx_descs * (adpt->rfd_size * 4) + |
767 | num_rx_descs * (adpt->rrd_size * 4) + |
768 | 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ |
769 | |
770 | ring_header->used = 0; |
771 | ring_header->v_addr = dma_alloc_coherent(dev, size: ring_header->size, |
772 | dma_handle: &ring_header->dma_addr, |
773 | GFP_KERNEL); |
774 | if (!ring_header->v_addr) |
775 | return -ENOMEM; |
776 | |
777 | ring_header->used = ALIGN(ring_header->dma_addr, 8) - |
778 | ring_header->dma_addr; |
779 | |
780 | ret = emac_tx_q_desc_alloc(adpt, tx_q: &adpt->tx_q); |
781 | if (ret) { |
782 | netdev_err(dev: adpt->netdev, format: "error: Tx Queue alloc failed\n" ); |
783 | goto err_alloc_tx; |
784 | } |
785 | |
786 | ret = emac_rx_descs_alloc(adpt); |
787 | if (ret) { |
788 | netdev_err(dev: adpt->netdev, format: "error: Rx Queue alloc failed\n" ); |
789 | goto err_alloc_rx; |
790 | } |
791 | |
792 | return 0; |
793 | |
794 | err_alloc_rx: |
795 | emac_tx_q_bufs_free(adpt); |
796 | err_alloc_tx: |
797 | dma_free_coherent(dev, size: ring_header->size, |
798 | cpu_addr: ring_header->v_addr, dma_handle: ring_header->dma_addr); |
799 | |
800 | ring_header->v_addr = NULL; |
801 | ring_header->dma_addr = 0; |
802 | ring_header->size = 0; |
803 | ring_header->used = 0; |
804 | |
805 | return ret; |
806 | } |
807 | |
808 | /* Free all TX and RX descriptor rings */ |
809 | void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt) |
810 | { |
811 | struct emac_ring_header * = &adpt->ring_header; |
812 | struct device *dev = adpt->netdev->dev.parent; |
813 | |
814 | emac_tx_q_bufs_free(adpt); |
815 | emac_rx_q_bufs_free(adpt); |
816 | |
817 | dma_free_coherent(dev, size: ring_header->size, |
818 | cpu_addr: ring_header->v_addr, dma_handle: ring_header->dma_addr); |
819 | |
820 | ring_header->v_addr = NULL; |
821 | ring_header->dma_addr = 0; |
822 | ring_header->size = 0; |
823 | ring_header->used = 0; |
824 | } |
825 | |
826 | /* Initialize descriptor rings */ |
827 | static void emac_mac_rx_tx_ring_reset_all(struct emac_adapter *adpt) |
828 | { |
829 | unsigned int i; |
830 | |
831 | adpt->tx_q.tpd.produce_idx = 0; |
832 | adpt->tx_q.tpd.consume_idx = 0; |
833 | for (i = 0; i < adpt->tx_q.tpd.count; i++) |
834 | adpt->tx_q.tpd.tpbuff[i].dma_addr = 0; |
835 | |
836 | adpt->rx_q.rrd.produce_idx = 0; |
837 | adpt->rx_q.rrd.consume_idx = 0; |
838 | adpt->rx_q.rfd.produce_idx = 0; |
839 | adpt->rx_q.rfd.consume_idx = 0; |
840 | for (i = 0; i < adpt->rx_q.rfd.count; i++) |
841 | adpt->rx_q.rfd.rfbuff[i].dma_addr = 0; |
842 | } |
843 | |
844 | /* Produce new receive free descriptor */ |
845 | static void emac_mac_rx_rfd_create(struct emac_adapter *adpt, |
846 | struct emac_rx_queue *rx_q, |
847 | dma_addr_t addr) |
848 | { |
849 | u32 *hw_rfd = EMAC_RFD(rx_q, adpt->rfd_size, rx_q->rfd.produce_idx); |
850 | |
851 | *(hw_rfd++) = lower_32_bits(addr); |
852 | *hw_rfd = upper_32_bits(addr); |
853 | |
854 | if (++rx_q->rfd.produce_idx == rx_q->rfd.count) |
855 | rx_q->rfd.produce_idx = 0; |
856 | } |
857 | |
858 | /* Fill up receive queue's RFD with preallocated receive buffers */ |
859 | static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, |
860 | struct emac_rx_queue *rx_q) |
861 | { |
862 | struct emac_buffer *curr_rxbuf; |
863 | struct emac_buffer *next_rxbuf; |
864 | unsigned int count = 0; |
865 | u32 next_produce_idx; |
866 | |
867 | next_produce_idx = rx_q->rfd.produce_idx + 1; |
868 | if (next_produce_idx == rx_q->rfd.count) |
869 | next_produce_idx = 0; |
870 | |
871 | curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx); |
872 | next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx); |
873 | |
874 | /* this always has a blank rx_buffer*/ |
875 | while (!next_rxbuf->dma_addr) { |
876 | struct sk_buff *skb; |
877 | int ret; |
878 | |
879 | skb = netdev_alloc_skb_ip_align(dev: adpt->netdev, length: adpt->rxbuf_size); |
880 | if (!skb) |
881 | break; |
882 | |
883 | curr_rxbuf->dma_addr = |
884 | dma_map_single(adpt->netdev->dev.parent, skb->data, |
885 | adpt->rxbuf_size, DMA_FROM_DEVICE); |
886 | |
887 | ret = dma_mapping_error(dev: adpt->netdev->dev.parent, |
888 | dma_addr: curr_rxbuf->dma_addr); |
889 | if (ret) { |
890 | dev_kfree_skb(skb); |
891 | break; |
892 | } |
893 | curr_rxbuf->skb = skb; |
894 | curr_rxbuf->length = adpt->rxbuf_size; |
895 | |
896 | emac_mac_rx_rfd_create(adpt, rx_q, addr: curr_rxbuf->dma_addr); |
897 | next_produce_idx = rx_q->rfd.produce_idx + 1; |
898 | if (next_produce_idx == rx_q->rfd.count) |
899 | next_produce_idx = 0; |
900 | |
901 | curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx); |
902 | next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx); |
903 | count++; |
904 | } |
905 | |
906 | if (count) { |
907 | u32 prod_idx = (rx_q->rfd.produce_idx << rx_q->produce_shift) & |
908 | rx_q->produce_mask; |
909 | emac_reg_update32(addr: adpt->base + rx_q->produce_reg, |
910 | mask: rx_q->produce_mask, val: prod_idx); |
911 | } |
912 | } |
913 | |
914 | static void emac_adjust_link(struct net_device *netdev) |
915 | { |
916 | struct emac_adapter *adpt = netdev_priv(dev: netdev); |
917 | struct phy_device *phydev = netdev->phydev; |
918 | |
919 | if (phydev->link) { |
920 | emac_mac_start(adpt); |
921 | emac_sgmii_link_change(adpt, link_state: true); |
922 | } else { |
923 | emac_sgmii_link_change(adpt, link_state: false); |
924 | emac_mac_stop(adpt); |
925 | } |
926 | |
927 | phy_print_status(phydev); |
928 | } |
929 | |
930 | /* Bringup the interface/HW */ |
931 | int emac_mac_up(struct emac_adapter *adpt) |
932 | { |
933 | struct net_device *netdev = adpt->netdev; |
934 | int ret; |
935 | |
936 | emac_mac_rx_tx_ring_reset_all(adpt); |
937 | emac_mac_config(adpt); |
938 | emac_mac_rx_descs_refill(adpt, rx_q: &adpt->rx_q); |
939 | |
940 | adpt->phydev->irq = PHY_POLL; |
941 | ret = phy_connect_direct(dev: netdev, phydev: adpt->phydev, handler: emac_adjust_link, |
942 | interface: PHY_INTERFACE_MODE_SGMII); |
943 | if (ret) { |
944 | netdev_err(dev: adpt->netdev, format: "could not connect phy\n" ); |
945 | return ret; |
946 | } |
947 | |
948 | phy_attached_print(phydev: adpt->phydev, NULL); |
949 | |
950 | /* enable mac irq */ |
951 | writel(val: (u32)~DIS_INT, addr: adpt->base + EMAC_INT_STATUS); |
952 | writel(val: adpt->irq.mask, addr: adpt->base + EMAC_INT_MASK); |
953 | |
954 | phy_start(phydev: adpt->phydev); |
955 | |
956 | napi_enable(n: &adpt->rx_q.napi); |
957 | netif_start_queue(dev: netdev); |
958 | |
959 | return 0; |
960 | } |
961 | |
962 | /* Bring down the interface/HW */ |
963 | void emac_mac_down(struct emac_adapter *adpt) |
964 | { |
965 | struct net_device *netdev = adpt->netdev; |
966 | |
967 | netif_stop_queue(dev: netdev); |
968 | napi_disable(n: &adpt->rx_q.napi); |
969 | |
970 | phy_stop(phydev: adpt->phydev); |
971 | |
972 | /* Interrupts must be disabled before the PHY is disconnected, to |
973 | * avoid a race condition where adjust_link is null when we get |
974 | * an interrupt. |
975 | */ |
976 | writel(DIS_INT, addr: adpt->base + EMAC_INT_STATUS); |
977 | writel(val: 0, addr: adpt->base + EMAC_INT_MASK); |
978 | synchronize_irq(irq: adpt->irq.irq); |
979 | |
980 | phy_disconnect(phydev: adpt->phydev); |
981 | |
982 | emac_mac_reset(adpt); |
983 | |
984 | emac_tx_q_descs_free(adpt); |
985 | netdev_reset_queue(dev_queue: adpt->netdev); |
986 | emac_rx_q_free_descs(adpt); |
987 | } |
988 | |
989 | /* Consume next received packet descriptor */ |
990 | static bool emac_rx_process_rrd(struct emac_adapter *adpt, |
991 | struct emac_rx_queue *rx_q, |
992 | struct emac_rrd *rrd) |
993 | { |
994 | u32 *hw_rrd = EMAC_RRD(rx_q, adpt->rrd_size, rx_q->rrd.consume_idx); |
995 | |
996 | rrd->word[3] = *(hw_rrd + 3); |
997 | |
998 | if (!RRD_UPDT(rrd)) |
999 | return false; |
1000 | |
1001 | rrd->word[4] = 0; |
1002 | rrd->word[5] = 0; |
1003 | |
1004 | rrd->word[0] = *(hw_rrd++); |
1005 | rrd->word[1] = *(hw_rrd++); |
1006 | rrd->word[2] = *(hw_rrd++); |
1007 | |
1008 | if (unlikely(RRD_NOR(rrd) != 1)) { |
1009 | netdev_err(dev: adpt->netdev, |
1010 | format: "error: multi-RFD not support yet! nor:%lu\n" , |
1011 | RRD_NOR(rrd)); |
1012 | } |
1013 | |
1014 | /* mark rrd as processed */ |
1015 | RRD_UPDT_SET(rrd, 0); |
1016 | *hw_rrd = rrd->word[3]; |
1017 | |
1018 | if (++rx_q->rrd.consume_idx == rx_q->rrd.count) |
1019 | rx_q->rrd.consume_idx = 0; |
1020 | |
1021 | return true; |
1022 | } |
1023 | |
1024 | /* Produce new transmit descriptor */ |
1025 | static void emac_tx_tpd_create(struct emac_adapter *adpt, |
1026 | struct emac_tx_queue *tx_q, struct emac_tpd *tpd) |
1027 | { |
1028 | u32 *hw_tpd; |
1029 | |
1030 | tx_q->tpd.last_produce_idx = tx_q->tpd.produce_idx; |
1031 | hw_tpd = EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.produce_idx); |
1032 | |
1033 | if (++tx_q->tpd.produce_idx == tx_q->tpd.count) |
1034 | tx_q->tpd.produce_idx = 0; |
1035 | |
1036 | *(hw_tpd++) = tpd->word[0]; |
1037 | *(hw_tpd++) = tpd->word[1]; |
1038 | *(hw_tpd++) = tpd->word[2]; |
1039 | *hw_tpd = tpd->word[3]; |
1040 | } |
1041 | |
1042 | /* Mark the last transmit descriptor as such (for the transmit packet) */ |
1043 | static void emac_tx_tpd_mark_last(struct emac_adapter *adpt, |
1044 | struct emac_tx_queue *tx_q) |
1045 | { |
1046 | u32 *hw_tpd = |
1047 | EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.last_produce_idx); |
1048 | u32 tmp_tpd; |
1049 | |
1050 | tmp_tpd = *(hw_tpd + 1); |
1051 | tmp_tpd |= EMAC_TPD_LAST_FRAGMENT; |
1052 | *(hw_tpd + 1) = tmp_tpd; |
1053 | } |
1054 | |
1055 | static void emac_rx_rfd_clean(struct emac_rx_queue *rx_q, struct emac_rrd *rrd) |
1056 | { |
1057 | struct emac_buffer *rfbuf = rx_q->rfd.rfbuff; |
1058 | u32 consume_idx = RRD_SI(rrd); |
1059 | unsigned int i; |
1060 | |
1061 | for (i = 0; i < RRD_NOR(rrd); i++) { |
1062 | rfbuf[consume_idx].skb = NULL; |
1063 | if (++consume_idx == rx_q->rfd.count) |
1064 | consume_idx = 0; |
1065 | } |
1066 | |
1067 | rx_q->rfd.consume_idx = consume_idx; |
1068 | rx_q->rfd.process_idx = consume_idx; |
1069 | } |
1070 | |
1071 | /* Push the received skb to upper layers */ |
1072 | static void emac_receive_skb(struct emac_rx_queue *rx_q, |
1073 | struct sk_buff *skb, |
1074 | u16 vlan_tag, bool vlan_flag) |
1075 | { |
1076 | if (vlan_flag) { |
1077 | u16 vlan; |
1078 | |
1079 | EMAC_TAG_TO_VLAN(vlan_tag, vlan); |
1080 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vlan); |
1081 | } |
1082 | |
1083 | napi_gro_receive(napi: &rx_q->napi, skb); |
1084 | } |
1085 | |
1086 | /* Process receive event */ |
1087 | void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, |
1088 | int *num_pkts, int max_pkts) |
1089 | { |
1090 | u32 proc_idx, hw_consume_idx, num_consume_pkts; |
1091 | struct net_device *netdev = adpt->netdev; |
1092 | struct emac_buffer *rfbuf; |
1093 | unsigned int count = 0; |
1094 | struct emac_rrd rrd; |
1095 | struct sk_buff *skb; |
1096 | u32 reg; |
1097 | |
1098 | reg = readl_relaxed(adpt->base + rx_q->consume_reg); |
1099 | |
1100 | hw_consume_idx = (reg & rx_q->consume_mask) >> rx_q->consume_shift; |
1101 | num_consume_pkts = (hw_consume_idx >= rx_q->rrd.consume_idx) ? |
1102 | (hw_consume_idx - rx_q->rrd.consume_idx) : |
1103 | (hw_consume_idx + rx_q->rrd.count - rx_q->rrd.consume_idx); |
1104 | |
1105 | do { |
1106 | if (!num_consume_pkts) |
1107 | break; |
1108 | |
1109 | if (!emac_rx_process_rrd(adpt, rx_q, rrd: &rrd)) |
1110 | break; |
1111 | |
1112 | if (likely(RRD_NOR(&rrd) == 1)) { |
1113 | /* good receive */ |
1114 | rfbuf = GET_RFD_BUFFER(rx_q, RRD_SI(&rrd)); |
1115 | dma_unmap_single(adpt->netdev->dev.parent, |
1116 | rfbuf->dma_addr, rfbuf->length, |
1117 | DMA_FROM_DEVICE); |
1118 | rfbuf->dma_addr = 0; |
1119 | skb = rfbuf->skb; |
1120 | } else { |
1121 | netdev_err(dev: adpt->netdev, |
1122 | format: "error: multi-RFD not support yet!\n" ); |
1123 | break; |
1124 | } |
1125 | emac_rx_rfd_clean(rx_q, rrd: &rrd); |
1126 | num_consume_pkts--; |
1127 | count++; |
1128 | |
1129 | /* Due to a HW issue in L4 check sum detection (UDP/TCP frags |
1130 | * with DF set are marked as error), drop packets based on the |
1131 | * error mask rather than the summary bit (ignoring L4F errors) |
1132 | */ |
1133 | if (rrd.word[EMAC_RRD_STATS_DW_IDX] & EMAC_RRD_ERROR) { |
1134 | netif_dbg(adpt, rx_status, adpt->netdev, |
1135 | "Drop error packet[RRD: 0x%x:0x%x:0x%x:0x%x]\n" , |
1136 | rrd.word[0], rrd.word[1], |
1137 | rrd.word[2], rrd.word[3]); |
1138 | |
1139 | dev_kfree_skb(skb); |
1140 | continue; |
1141 | } |
1142 | |
1143 | skb_put(skb, RRD_PKT_SIZE(&rrd) - ETH_FCS_LEN); |
1144 | skb->dev = netdev; |
1145 | skb->protocol = eth_type_trans(skb, dev: skb->dev); |
1146 | if (netdev->features & NETIF_F_RXCSUM) |
1147 | skb->ip_summed = RRD_L4F(&rrd) ? |
1148 | CHECKSUM_NONE : CHECKSUM_UNNECESSARY; |
1149 | else |
1150 | skb_checksum_none_assert(skb); |
1151 | |
1152 | emac_receive_skb(rx_q, skb, vlan_tag: (u16)RRD_CVALN_TAG(&rrd), |
1153 | vlan_flag: (bool)RRD_CVTAG(&rrd)); |
1154 | |
1155 | (*num_pkts)++; |
1156 | } while (*num_pkts < max_pkts); |
1157 | |
1158 | if (count) { |
1159 | proc_idx = (rx_q->rfd.process_idx << rx_q->process_shft) & |
1160 | rx_q->process_mask; |
1161 | emac_reg_update32(addr: adpt->base + rx_q->process_reg, |
1162 | mask: rx_q->process_mask, val: proc_idx); |
1163 | emac_mac_rx_descs_refill(adpt, rx_q); |
1164 | } |
1165 | } |
1166 | |
1167 | /* get the number of free transmit descriptors */ |
1168 | static unsigned int emac_tpd_num_free_descs(struct emac_tx_queue *tx_q) |
1169 | { |
1170 | u32 produce_idx = tx_q->tpd.produce_idx; |
1171 | u32 consume_idx = tx_q->tpd.consume_idx; |
1172 | |
1173 | return (consume_idx > produce_idx) ? |
1174 | (consume_idx - produce_idx - 1) : |
1175 | (tx_q->tpd.count + consume_idx - produce_idx - 1); |
1176 | } |
1177 | |
1178 | /* Process transmit event */ |
1179 | void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) |
1180 | { |
1181 | u32 reg = readl_relaxed(adpt->base + tx_q->consume_reg); |
1182 | u32 hw_consume_idx, pkts_compl = 0, bytes_compl = 0; |
1183 | struct emac_buffer *tpbuf; |
1184 | |
1185 | hw_consume_idx = (reg & tx_q->consume_mask) >> tx_q->consume_shift; |
1186 | |
1187 | while (tx_q->tpd.consume_idx != hw_consume_idx) { |
1188 | tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); |
1189 | if (tpbuf->dma_addr) { |
1190 | dma_unmap_page(adpt->netdev->dev.parent, |
1191 | tpbuf->dma_addr, tpbuf->length, |
1192 | DMA_TO_DEVICE); |
1193 | tpbuf->dma_addr = 0; |
1194 | } |
1195 | |
1196 | if (tpbuf->skb) { |
1197 | pkts_compl++; |
1198 | bytes_compl += tpbuf->skb->len; |
1199 | dev_consume_skb_irq(skb: tpbuf->skb); |
1200 | tpbuf->skb = NULL; |
1201 | } |
1202 | |
1203 | if (++tx_q->tpd.consume_idx == tx_q->tpd.count) |
1204 | tx_q->tpd.consume_idx = 0; |
1205 | } |
1206 | |
1207 | netdev_completed_queue(dev: adpt->netdev, pkts: pkts_compl, bytes: bytes_compl); |
1208 | |
1209 | if (netif_queue_stopped(dev: adpt->netdev)) |
1210 | if (emac_tpd_num_free_descs(tx_q) > (MAX_SKB_FRAGS + 1)) |
1211 | netif_wake_queue(dev: adpt->netdev); |
1212 | } |
1213 | |
1214 | /* Initialize all queue data structures */ |
1215 | void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev, |
1216 | struct emac_adapter *adpt) |
1217 | { |
1218 | adpt->rx_q.netdev = adpt->netdev; |
1219 | |
1220 | adpt->rx_q.produce_reg = EMAC_MAILBOX_0; |
1221 | adpt->rx_q.produce_mask = RFD0_PROD_IDX_BMSK; |
1222 | adpt->rx_q.produce_shift = RFD0_PROD_IDX_SHFT; |
1223 | |
1224 | adpt->rx_q.process_reg = EMAC_MAILBOX_0; |
1225 | adpt->rx_q.process_mask = RFD0_PROC_IDX_BMSK; |
1226 | adpt->rx_q.process_shft = RFD0_PROC_IDX_SHFT; |
1227 | |
1228 | adpt->rx_q.consume_reg = EMAC_MAILBOX_3; |
1229 | adpt->rx_q.consume_mask = RFD0_CONS_IDX_BMSK; |
1230 | adpt->rx_q.consume_shift = RFD0_CONS_IDX_SHFT; |
1231 | |
1232 | adpt->rx_q.irq = &adpt->irq; |
1233 | adpt->rx_q.intr = adpt->irq.mask & ISR_RX_PKT; |
1234 | |
1235 | adpt->tx_q.produce_reg = EMAC_MAILBOX_15; |
1236 | adpt->tx_q.produce_mask = NTPD_PROD_IDX_BMSK; |
1237 | adpt->tx_q.produce_shift = NTPD_PROD_IDX_SHFT; |
1238 | |
1239 | adpt->tx_q.consume_reg = EMAC_MAILBOX_2; |
1240 | adpt->tx_q.consume_mask = NTPD_CONS_IDX_BMSK; |
1241 | adpt->tx_q.consume_shift = NTPD_CONS_IDX_SHFT; |
1242 | } |
1243 | |
1244 | /* Fill up transmit descriptors with TSO and Checksum offload information */ |
1245 | static int emac_tso_csum(struct emac_adapter *adpt, |
1246 | struct emac_tx_queue *tx_q, |
1247 | struct sk_buff *skb, |
1248 | struct emac_tpd *tpd) |
1249 | { |
1250 | unsigned int hdr_len; |
1251 | int ret; |
1252 | |
1253 | if (skb_is_gso(skb)) { |
1254 | if (skb_header_cloned(skb)) { |
1255 | ret = pskb_expand_head(skb, nhead: 0, ntail: 0, GFP_ATOMIC); |
1256 | if (unlikely(ret)) |
1257 | return ret; |
1258 | } |
1259 | |
1260 | if (skb->protocol == htons(ETH_P_IP)) { |
1261 | u32 pkt_len = ((unsigned char *)ip_hdr(skb) - skb->data) |
1262 | + ntohs(ip_hdr(skb)->tot_len); |
1263 | if (skb->len > pkt_len) { |
1264 | ret = pskb_trim(skb, len: pkt_len); |
1265 | if (unlikely(ret)) |
1266 | return ret; |
1267 | } |
1268 | } |
1269 | |
1270 | hdr_len = skb_tcp_all_headers(skb); |
1271 | if (unlikely(skb->len == hdr_len)) { |
1272 | /* we only need to do csum */ |
1273 | netif_warn(adpt, tx_err, adpt->netdev, |
1274 | "tso not needed for packet with 0 data\n" ); |
1275 | goto do_csum; |
1276 | } |
1277 | |
1278 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { |
1279 | ip_hdr(skb)->check = 0; |
1280 | tcp_hdr(skb)->check = |
1281 | ~csum_tcpudp_magic(saddr: ip_hdr(skb)->saddr, |
1282 | daddr: ip_hdr(skb)->daddr, |
1283 | len: 0, IPPROTO_TCP, sum: 0); |
1284 | TPD_IPV4_SET(tpd, 1); |
1285 | } |
1286 | |
1287 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { |
1288 | /* ipv6 tso need an extra tpd */ |
1289 | struct emac_tpd ; |
1290 | |
1291 | memset(tpd, 0, sizeof(*tpd)); |
1292 | memset(&extra_tpd, 0, sizeof(extra_tpd)); |
1293 | |
1294 | tcp_v6_gso_csum_prep(skb); |
1295 | |
1296 | TPD_PKT_LEN_SET(&extra_tpd, skb->len); |
1297 | TPD_LSO_SET(&extra_tpd, 1); |
1298 | TPD_LSOV_SET(&extra_tpd, 1); |
1299 | emac_tx_tpd_create(adpt, tx_q, tpd: &extra_tpd); |
1300 | TPD_LSOV_SET(tpd, 1); |
1301 | } |
1302 | |
1303 | TPD_LSO_SET(tpd, 1); |
1304 | TPD_TCPHDR_OFFSET_SET(tpd, skb_transport_offset(skb)); |
1305 | TPD_MSS_SET(tpd, skb_shinfo(skb)->gso_size); |
1306 | return 0; |
1307 | } |
1308 | |
1309 | do_csum: |
1310 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
1311 | unsigned int css, cso; |
1312 | |
1313 | cso = skb_transport_offset(skb); |
1314 | if (unlikely(cso & 0x1)) { |
1315 | netdev_err(dev: adpt->netdev, |
1316 | format: "error: payload offset should be even\n" ); |
1317 | return -EINVAL; |
1318 | } |
1319 | css = cso + skb->csum_offset; |
1320 | |
1321 | TPD_PAYLOAD_OFFSET_SET(tpd, cso >> 1); |
1322 | TPD_CXSUM_OFFSET_SET(tpd, css >> 1); |
1323 | TPD_CSX_SET(tpd, 1); |
1324 | } |
1325 | |
1326 | return 0; |
1327 | } |
1328 | |
1329 | /* Fill up transmit descriptors */ |
1330 | static void emac_tx_fill_tpd(struct emac_adapter *adpt, |
1331 | struct emac_tx_queue *tx_q, struct sk_buff *skb, |
1332 | struct emac_tpd *tpd) |
1333 | { |
1334 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
1335 | unsigned int first = tx_q->tpd.produce_idx; |
1336 | unsigned int len = skb_headlen(skb); |
1337 | struct emac_buffer *tpbuf = NULL; |
1338 | unsigned int mapped_len = 0; |
1339 | unsigned int i; |
1340 | int count = 0; |
1341 | int ret; |
1342 | |
1343 | /* if Large Segment Offload is (in TCP Segmentation Offload struct) */ |
1344 | if (TPD_LSO(tpd)) { |
1345 | mapped_len = skb_tcp_all_headers(skb); |
1346 | |
1347 | tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); |
1348 | tpbuf->length = mapped_len; |
1349 | tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, |
1350 | virt_to_page(skb->data), |
1351 | offset_in_page(skb->data), |
1352 | tpbuf->length, |
1353 | DMA_TO_DEVICE); |
1354 | ret = dma_mapping_error(dev: adpt->netdev->dev.parent, |
1355 | dma_addr: tpbuf->dma_addr); |
1356 | if (ret) |
1357 | goto error; |
1358 | |
1359 | TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr)); |
1360 | TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr)); |
1361 | TPD_BUF_LEN_SET(tpd, tpbuf->length); |
1362 | emac_tx_tpd_create(adpt, tx_q, tpd); |
1363 | count++; |
1364 | } |
1365 | |
1366 | if (mapped_len < len) { |
1367 | tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); |
1368 | tpbuf->length = len - mapped_len; |
1369 | tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, |
1370 | virt_to_page(skb->data + |
1371 | mapped_len), |
1372 | offset_in_page(skb->data + |
1373 | mapped_len), |
1374 | tpbuf->length, DMA_TO_DEVICE); |
1375 | ret = dma_mapping_error(dev: adpt->netdev->dev.parent, |
1376 | dma_addr: tpbuf->dma_addr); |
1377 | if (ret) |
1378 | goto error; |
1379 | |
1380 | TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr)); |
1381 | TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr)); |
1382 | TPD_BUF_LEN_SET(tpd, tpbuf->length); |
1383 | emac_tx_tpd_create(adpt, tx_q, tpd); |
1384 | count++; |
1385 | } |
1386 | |
1387 | for (i = 0; i < nr_frags; i++) { |
1388 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1389 | |
1390 | tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); |
1391 | tpbuf->length = skb_frag_size(frag); |
1392 | tpbuf->dma_addr = skb_frag_dma_map(dev: adpt->netdev->dev.parent, |
1393 | frag, offset: 0, size: tpbuf->length, |
1394 | dir: DMA_TO_DEVICE); |
1395 | ret = dma_mapping_error(dev: adpt->netdev->dev.parent, |
1396 | dma_addr: tpbuf->dma_addr); |
1397 | if (ret) |
1398 | goto error; |
1399 | |
1400 | TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr)); |
1401 | TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr)); |
1402 | TPD_BUF_LEN_SET(tpd, tpbuf->length); |
1403 | emac_tx_tpd_create(adpt, tx_q, tpd); |
1404 | count++; |
1405 | } |
1406 | |
1407 | /* The last tpd */ |
1408 | wmb(); |
1409 | emac_tx_tpd_mark_last(adpt, tx_q); |
1410 | |
1411 | /* The last buffer info contain the skb address, |
1412 | * so it will be freed after unmap |
1413 | */ |
1414 | tpbuf->skb = skb; |
1415 | |
1416 | return; |
1417 | |
1418 | error: |
1419 | /* One of the memory mappings failed, so undo everything */ |
1420 | tx_q->tpd.produce_idx = first; |
1421 | |
1422 | while (count--) { |
1423 | tpbuf = GET_TPD_BUFFER(tx_q, first); |
1424 | dma_unmap_page(adpt->netdev->dev.parent, tpbuf->dma_addr, |
1425 | tpbuf->length, DMA_TO_DEVICE); |
1426 | tpbuf->dma_addr = 0; |
1427 | tpbuf->length = 0; |
1428 | |
1429 | if (++first == tx_q->tpd.count) |
1430 | first = 0; |
1431 | } |
1432 | |
1433 | dev_kfree_skb(skb); |
1434 | } |
1435 | |
1436 | /* Transmit the packet using specified transmit queue */ |
1437 | netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt, |
1438 | struct emac_tx_queue *tx_q, |
1439 | struct sk_buff *skb) |
1440 | { |
1441 | struct emac_tpd tpd; |
1442 | u32 prod_idx; |
1443 | int len; |
1444 | |
1445 | memset(&tpd, 0, sizeof(tpd)); |
1446 | |
1447 | if (emac_tso_csum(adpt, tx_q, skb, tpd: &tpd) != 0) { |
1448 | dev_kfree_skb_any(skb); |
1449 | return NETDEV_TX_OK; |
1450 | } |
1451 | |
1452 | if (skb_vlan_tag_present(skb)) { |
1453 | u16 tag; |
1454 | |
1455 | EMAC_VLAN_TO_TAG(skb_vlan_tag_get(skb), tag); |
1456 | TPD_CVLAN_TAG_SET(&tpd, tag); |
1457 | TPD_INSTC_SET(&tpd, 1); |
1458 | } |
1459 | |
1460 | if (skb_network_offset(skb) != ETH_HLEN) |
1461 | TPD_TYP_SET(&tpd, 1); |
1462 | |
1463 | len = skb->len; |
1464 | emac_tx_fill_tpd(adpt, tx_q, skb, tpd: &tpd); |
1465 | |
1466 | netdev_sent_queue(dev: adpt->netdev, bytes: len); |
1467 | |
1468 | /* Make sure the are enough free descriptors to hold one |
1469 | * maximum-sized SKB. We need one desc for each fragment, |
1470 | * one for the checksum (emac_tso_csum), one for TSO, and |
1471 | * one for the SKB header. |
1472 | */ |
1473 | if (emac_tpd_num_free_descs(tx_q) < (MAX_SKB_FRAGS + 3)) |
1474 | netif_stop_queue(dev: adpt->netdev); |
1475 | |
1476 | /* update produce idx */ |
1477 | prod_idx = (tx_q->tpd.produce_idx << tx_q->produce_shift) & |
1478 | tx_q->produce_mask; |
1479 | emac_reg_update32(addr: adpt->base + tx_q->produce_reg, |
1480 | mask: tx_q->produce_mask, val: prod_idx); |
1481 | |
1482 | return NETDEV_TX_OK; |
1483 | } |
1484 | |