1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. |
4 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) |
5 | * |
6 | * Right now, I am very wasteful with the buffers. I allocate memory |
7 | * pages and then divide them into 2K frame buffers. This way I know I |
8 | * have buffers large enough to hold one frame within one buffer descriptor. |
9 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which |
10 | * will be much more memory efficient and will easily handle lots of |
11 | * small packets. |
12 | * |
13 | * Much better multiple PHY support by Magnus Damm. |
14 | * Copyright (c) 2000 Ericsson Radio Systems AB. |
15 | * |
16 | * Support for FEC controller of ColdFire processors. |
17 | * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) |
18 | * |
19 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) |
20 | * Copyright (c) 2004-2006 Macq Electronique SA. |
21 | * |
22 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. |
23 | */ |
24 | |
25 | #include <linux/module.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/string.h> |
28 | #include <linux/pm_runtime.h> |
29 | #include <linux/ptrace.h> |
30 | #include <linux/errno.h> |
31 | #include <linux/ioport.h> |
32 | #include <linux/slab.h> |
33 | #include <linux/interrupt.h> |
34 | #include <linux/delay.h> |
35 | #include <linux/netdevice.h> |
36 | #include <linux/etherdevice.h> |
37 | #include <linux/skbuff.h> |
38 | #include <linux/in.h> |
39 | #include <linux/ip.h> |
40 | #include <net/ip.h> |
41 | #include <net/page_pool/helpers.h> |
42 | #include <net/selftests.h> |
43 | #include <net/tso.h> |
44 | #include <linux/tcp.h> |
45 | #include <linux/udp.h> |
46 | #include <linux/icmp.h> |
47 | #include <linux/spinlock.h> |
48 | #include <linux/workqueue.h> |
49 | #include <linux/bitops.h> |
50 | #include <linux/io.h> |
51 | #include <linux/irq.h> |
52 | #include <linux/clk.h> |
53 | #include <linux/crc32.h> |
54 | #include <linux/platform_device.h> |
55 | #include <linux/property.h> |
56 | #include <linux/mdio.h> |
57 | #include <linux/phy.h> |
58 | #include <linux/fec.h> |
59 | #include <linux/of.h> |
60 | #include <linux/of_mdio.h> |
61 | #include <linux/of_net.h> |
62 | #include <linux/regulator/consumer.h> |
63 | #include <linux/if_vlan.h> |
64 | #include <linux/pinctrl/consumer.h> |
65 | #include <linux/gpio/consumer.h> |
66 | #include <linux/prefetch.h> |
67 | #include <linux/mfd/syscon.h> |
68 | #include <linux/regmap.h> |
69 | #include <soc/imx/cpuidle.h> |
70 | #include <linux/filter.h> |
71 | #include <linux/bpf.h> |
72 | #include <linux/bpf_trace.h> |
73 | |
74 | #include <asm/cacheflush.h> |
75 | |
76 | #include "fec.h" |
77 | |
78 | static void set_multicast_list(struct net_device *ndev); |
79 | static void fec_enet_itr_coal_set(struct net_device *ndev); |
80 | static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, |
81 | int cpu, struct xdp_buff *xdp, |
82 | u32 dma_sync_len); |
83 | |
84 | #define DRIVER_NAME "fec" |
85 | |
86 | static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; |
87 | |
88 | /* Pause frame feild and FIFO threshold */ |
89 | #define FEC_ENET_FCE (1 << 5) |
90 | #define FEC_ENET_RSEM_V 0x84 |
91 | #define FEC_ENET_RSFL_V 16 |
92 | #define FEC_ENET_RAEM_V 0x8 |
93 | #define FEC_ENET_RAFL_V 0x8 |
94 | #define FEC_ENET_OPD_V 0xFFF0 |
95 | #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ |
96 | |
97 | #define FEC_ENET_XDP_PASS 0 |
98 | #define FEC_ENET_XDP_CONSUMED BIT(0) |
99 | #define FEC_ENET_XDP_TX BIT(1) |
100 | #define FEC_ENET_XDP_REDIR BIT(2) |
101 | |
102 | struct fec_devinfo { |
103 | u32 quirks; |
104 | }; |
105 | |
106 | static const struct fec_devinfo fec_imx25_info = { |
107 | .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | |
108 | FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45, |
109 | }; |
110 | |
111 | static const struct fec_devinfo fec_imx27_info = { |
112 | .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG | |
113 | FEC_QUIRK_HAS_MDIO_C45, |
114 | }; |
115 | |
116 | static const struct fec_devinfo fec_imx28_info = { |
117 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | |
118 | FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | |
119 | FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII | |
120 | FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45, |
121 | }; |
122 | |
123 | static const struct fec_devinfo fec_imx6q_info = { |
124 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
125 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
126 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | |
127 | FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII | |
128 | FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45, |
129 | }; |
130 | |
131 | static const struct fec_devinfo fec_mvf600_info = { |
132 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC | |
133 | FEC_QUIRK_HAS_MDIO_C45, |
134 | }; |
135 | |
136 | static const struct fec_devinfo fec_imx6x_info = { |
137 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
138 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
139 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | |
140 | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | |
141 | FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | |
142 | FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | |
143 | FEC_QUIRK_HAS_MDIO_C45, |
144 | }; |
145 | |
146 | static const struct fec_devinfo fec_imx6ul_info = { |
147 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
148 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
149 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | |
150 | FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | |
151 | FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII | |
152 | FEC_QUIRK_HAS_MDIO_C45, |
153 | }; |
154 | |
155 | static const struct fec_devinfo fec_imx8mq_info = { |
156 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
157 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
158 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | |
159 | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | |
160 | FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | |
161 | FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | |
162 | FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 | |
163 | FEC_QUIRK_HAS_MDIO_C45, |
164 | }; |
165 | |
166 | static const struct fec_devinfo fec_imx8qm_info = { |
167 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
168 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
169 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | |
170 | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | |
171 | FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | |
172 | FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | |
173 | FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45, |
174 | }; |
175 | |
176 | static const struct fec_devinfo fec_s32v234_info = { |
177 | .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
178 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
179 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | |
180 | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | |
181 | FEC_QUIRK_HAS_MDIO_C45, |
182 | }; |
183 | |
184 | static struct platform_device_id fec_devtype[] = { |
185 | { |
186 | /* keep it for coldfire */ |
187 | .name = DRIVER_NAME, |
188 | .driver_data = 0, |
189 | }, { |
190 | /* sentinel */ |
191 | } |
192 | }; |
193 | MODULE_DEVICE_TABLE(platform, fec_devtype); |
194 | |
195 | static const struct of_device_id fec_dt_ids[] = { |
196 | { .compatible = "fsl,imx25-fec" , .data = &fec_imx25_info, }, |
197 | { .compatible = "fsl,imx27-fec" , .data = &fec_imx27_info, }, |
198 | { .compatible = "fsl,imx28-fec" , .data = &fec_imx28_info, }, |
199 | { .compatible = "fsl,imx6q-fec" , .data = &fec_imx6q_info, }, |
200 | { .compatible = "fsl,mvf600-fec" , .data = &fec_mvf600_info, }, |
201 | { .compatible = "fsl,imx6sx-fec" , .data = &fec_imx6x_info, }, |
202 | { .compatible = "fsl,imx6ul-fec" , .data = &fec_imx6ul_info, }, |
203 | { .compatible = "fsl,imx8mq-fec" , .data = &fec_imx8mq_info, }, |
204 | { .compatible = "fsl,imx8qm-fec" , .data = &fec_imx8qm_info, }, |
205 | { .compatible = "fsl,s32v234-fec" , .data = &fec_s32v234_info, }, |
206 | { /* sentinel */ } |
207 | }; |
208 | MODULE_DEVICE_TABLE(of, fec_dt_ids); |
209 | |
210 | static unsigned char macaddr[ETH_ALEN]; |
211 | module_param_array(macaddr, byte, NULL, 0); |
212 | MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address" ); |
213 | |
214 | #if defined(CONFIG_M5272) |
215 | /* |
216 | * Some hardware gets it MAC address out of local flash memory. |
217 | * if this is non-zero then assume it is the address to get MAC from. |
218 | */ |
219 | #if defined(CONFIG_NETtel) |
220 | #define FEC_FLASHMAC 0xf0006006 |
221 | #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) |
222 | #define FEC_FLASHMAC 0xf0006000 |
223 | #elif defined(CONFIG_CANCam) |
224 | #define FEC_FLASHMAC 0xf0020000 |
225 | #elif defined (CONFIG_M5272C3) |
226 | #define FEC_FLASHMAC (0xffe04000 + 4) |
227 | #elif defined(CONFIG_MOD5272) |
228 | #define FEC_FLASHMAC 0xffc0406b |
229 | #else |
230 | #define FEC_FLASHMAC 0 |
231 | #endif |
232 | #endif /* CONFIG_M5272 */ |
233 | |
234 | /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. |
235 | * |
236 | * 2048 byte skbufs are allocated. However, alignment requirements |
237 | * varies between FEC variants. Worst case is 64, so round down by 64. |
238 | */ |
239 | #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) |
240 | #define PKT_MINBUF_SIZE 64 |
241 | |
242 | /* FEC receive acceleration */ |
243 | #define FEC_RACC_IPDIS (1 << 1) |
244 | #define FEC_RACC_PRODIS (1 << 2) |
245 | #define FEC_RACC_SHIFT16 BIT(7) |
246 | #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) |
247 | |
248 | /* MIB Control Register */ |
249 | #define FEC_MIB_CTRLSTAT_DISABLE BIT(31) |
250 | |
251 | /* |
252 | * The 5270/5271/5280/5282/532x RX control register also contains maximum frame |
253 | * size bits. Other FEC hardware does not, so we need to take that into |
254 | * account when setting it. |
255 | */ |
256 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
257 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ |
258 | defined(CONFIG_ARM64) |
259 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) |
260 | #else |
261 | #define OPT_FRAME_SIZE 0 |
262 | #endif |
263 | |
264 | /* FEC MII MMFR bits definition */ |
265 | #define FEC_MMFR_ST (1 << 30) |
266 | #define FEC_MMFR_ST_C45 (0) |
267 | #define FEC_MMFR_OP_READ (2 << 28) |
268 | #define FEC_MMFR_OP_READ_C45 (3 << 28) |
269 | #define FEC_MMFR_OP_WRITE (1 << 28) |
270 | #define FEC_MMFR_OP_ADDR_WRITE (0) |
271 | #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) |
272 | #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) |
273 | #define FEC_MMFR_TA (2 << 16) |
274 | #define FEC_MMFR_DATA(v) (v & 0xffff) |
275 | /* FEC ECR bits definition */ |
276 | #define FEC_ECR_MAGICEN (1 << 2) |
277 | #define FEC_ECR_SLEEP (1 << 3) |
278 | |
279 | #define FEC_MII_TIMEOUT 30000 /* us */ |
280 | |
281 | /* Transmitter timeout */ |
282 | #define TX_TIMEOUT (2 * HZ) |
283 | |
284 | #define FEC_PAUSE_FLAG_AUTONEG 0x1 |
285 | #define FEC_PAUSE_FLAG_ENABLE 0x2 |
286 | #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) |
287 | #define FEC_WOL_FLAG_ENABLE (0x1 << 1) |
288 | #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) |
289 | |
290 | /* Max number of allowed TCP segments for software TSO */ |
291 | #define FEC_MAX_TSO_SEGS 100 |
292 | #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) |
293 | |
294 | #define (txq, addr) \ |
295 | ((addr >= txq->tso_hdrs_dma) && \ |
296 | (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) |
297 | |
298 | static int mii_cnt; |
299 | |
300 | static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, |
301 | struct bufdesc_prop *bd) |
302 | { |
303 | return (bdp >= bd->last) ? bd->base |
304 | : (struct bufdesc *)(((void *)bdp) + bd->dsize); |
305 | } |
306 | |
307 | static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, |
308 | struct bufdesc_prop *bd) |
309 | { |
310 | return (bdp <= bd->base) ? bd->last |
311 | : (struct bufdesc *)(((void *)bdp) - bd->dsize); |
312 | } |
313 | |
314 | static int fec_enet_get_bd_index(struct bufdesc *bdp, |
315 | struct bufdesc_prop *bd) |
316 | { |
317 | return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; |
318 | } |
319 | |
320 | static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) |
321 | { |
322 | int entries; |
323 | |
324 | entries = (((const char *)txq->dirty_tx - |
325 | (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; |
326 | |
327 | return entries >= 0 ? entries : entries + txq->bd.ring_size; |
328 | } |
329 | |
330 | static void swap_buffer(void *bufaddr, int len) |
331 | { |
332 | int i; |
333 | unsigned int *buf = bufaddr; |
334 | |
335 | for (i = 0; i < len; i += 4, buf++) |
336 | swab32s(p: buf); |
337 | } |
338 | |
339 | static void fec_dump(struct net_device *ndev) |
340 | { |
341 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
342 | struct bufdesc *bdp; |
343 | struct fec_enet_priv_tx_q *txq; |
344 | int index = 0; |
345 | |
346 | netdev_info(dev: ndev, format: "TX ring dump\n" ); |
347 | pr_info("Nr SC addr len SKB\n" ); |
348 | |
349 | txq = fep->tx_queue[0]; |
350 | bdp = txq->bd.base; |
351 | |
352 | do { |
353 | pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n" , |
354 | index, |
355 | bdp == txq->bd.cur ? 'S' : ' ', |
356 | bdp == txq->dirty_tx ? 'H' : ' ', |
357 | fec16_to_cpu(bdp->cbd_sc), |
358 | fec32_to_cpu(bdp->cbd_bufaddr), |
359 | fec16_to_cpu(bdp->cbd_datlen), |
360 | txq->tx_buf[index].buf_p); |
361 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
362 | index++; |
363 | } while (bdp != txq->bd.base); |
364 | } |
365 | |
366 | /* |
367 | * Coldfire does not support DMA coherent allocations, and has historically used |
368 | * a band-aid with a manual flush in fec_enet_rx_queue. |
369 | */ |
370 | #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) |
371 | static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
372 | gfp_t gfp) |
373 | { |
374 | return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp); |
375 | } |
376 | |
377 | static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, |
378 | dma_addr_t handle) |
379 | { |
380 | dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL); |
381 | } |
382 | #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ |
383 | static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
384 | gfp_t gfp) |
385 | { |
386 | return dma_alloc_coherent(dev, size, dma_handle: handle, gfp); |
387 | } |
388 | |
389 | static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr, |
390 | dma_addr_t handle) |
391 | { |
392 | dma_free_coherent(dev, size, cpu_addr, dma_handle: handle); |
393 | } |
394 | #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */ |
395 | |
396 | struct fec_dma_devres { |
397 | size_t size; |
398 | void *vaddr; |
399 | dma_addr_t dma_handle; |
400 | }; |
401 | |
402 | static void fec_dmam_release(struct device *dev, void *res) |
403 | { |
404 | struct fec_dma_devres *this = res; |
405 | |
406 | fec_dma_free(dev, size: this->size, cpu_addr: this->vaddr, handle: this->dma_handle); |
407 | } |
408 | |
409 | static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
410 | gfp_t gfp) |
411 | { |
412 | struct fec_dma_devres *dr; |
413 | void *vaddr; |
414 | |
415 | dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp); |
416 | if (!dr) |
417 | return NULL; |
418 | vaddr = fec_dma_alloc(dev, size, handle, gfp); |
419 | if (!vaddr) { |
420 | devres_free(res: dr); |
421 | return NULL; |
422 | } |
423 | dr->vaddr = vaddr; |
424 | dr->dma_handle = *handle; |
425 | dr->size = size; |
426 | devres_add(dev, res: dr); |
427 | return vaddr; |
428 | } |
429 | |
430 | static inline bool is_ipv4_pkt(struct sk_buff *skb) |
431 | { |
432 | return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; |
433 | } |
434 | |
435 | static int |
436 | fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) |
437 | { |
438 | /* Only run for packets requiring a checksum. */ |
439 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
440 | return 0; |
441 | |
442 | if (unlikely(skb_cow_head(skb, 0))) |
443 | return -1; |
444 | |
445 | if (is_ipv4_pkt(skb)) |
446 | ip_hdr(skb)->check = 0; |
447 | *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; |
448 | |
449 | return 0; |
450 | } |
451 | |
452 | static int |
453 | fec_enet_create_page_pool(struct fec_enet_private *fep, |
454 | struct fec_enet_priv_rx_q *rxq, int size) |
455 | { |
456 | struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); |
457 | struct page_pool_params pp_params = { |
458 | .order = 0, |
459 | .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, |
460 | .pool_size = size, |
461 | .nid = dev_to_node(dev: &fep->pdev->dev), |
462 | .dev = &fep->pdev->dev, |
463 | .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, |
464 | .offset = FEC_ENET_XDP_HEADROOM, |
465 | .max_len = FEC_ENET_RX_FRSIZE, |
466 | }; |
467 | int err; |
468 | |
469 | rxq->page_pool = page_pool_create(params: &pp_params); |
470 | if (IS_ERR(ptr: rxq->page_pool)) { |
471 | err = PTR_ERR(ptr: rxq->page_pool); |
472 | rxq->page_pool = NULL; |
473 | return err; |
474 | } |
475 | |
476 | err = xdp_rxq_info_reg(xdp_rxq: &rxq->xdp_rxq, dev: fep->netdev, queue_index: rxq->id, napi_id: 0); |
477 | if (err < 0) |
478 | goto err_free_pp; |
479 | |
480 | err = xdp_rxq_info_reg_mem_model(xdp_rxq: &rxq->xdp_rxq, type: MEM_TYPE_PAGE_POOL, |
481 | allocator: rxq->page_pool); |
482 | if (err) |
483 | goto err_unregister_rxq; |
484 | |
485 | return 0; |
486 | |
487 | err_unregister_rxq: |
488 | xdp_rxq_info_unreg(xdp_rxq: &rxq->xdp_rxq); |
489 | err_free_pp: |
490 | page_pool_destroy(pool: rxq->page_pool); |
491 | rxq->page_pool = NULL; |
492 | return err; |
493 | } |
494 | |
495 | static struct bufdesc * |
496 | fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, |
497 | struct sk_buff *skb, |
498 | struct net_device *ndev) |
499 | { |
500 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
501 | struct bufdesc *bdp = txq->bd.cur; |
502 | struct bufdesc_ex *ebdp; |
503 | int nr_frags = skb_shinfo(skb)->nr_frags; |
504 | int frag, frag_len; |
505 | unsigned short status; |
506 | unsigned int estatus = 0; |
507 | skb_frag_t *this_frag; |
508 | unsigned int index; |
509 | void *bufaddr; |
510 | dma_addr_t addr; |
511 | int i; |
512 | |
513 | for (frag = 0; frag < nr_frags; frag++) { |
514 | this_frag = &skb_shinfo(skb)->frags[frag]; |
515 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
516 | ebdp = (struct bufdesc_ex *)bdp; |
517 | |
518 | status = fec16_to_cpu(bdp->cbd_sc); |
519 | status &= ~BD_ENET_TX_STATS; |
520 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); |
521 | frag_len = skb_frag_size(frag: &skb_shinfo(skb)->frags[frag]); |
522 | |
523 | /* Handle the last BD specially */ |
524 | if (frag == nr_frags - 1) { |
525 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); |
526 | if (fep->bufdesc_ex) { |
527 | estatus |= BD_ENET_TX_INT; |
528 | if (unlikely(skb_shinfo(skb)->tx_flags & |
529 | SKBTX_HW_TSTAMP && fep->hwts_tx_en)) |
530 | estatus |= BD_ENET_TX_TS; |
531 | } |
532 | } |
533 | |
534 | if (fep->bufdesc_ex) { |
535 | if (fep->quirks & FEC_QUIRK_HAS_AVB) |
536 | estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); |
537 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
538 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; |
539 | |
540 | ebdp->cbd_bdu = 0; |
541 | ebdp->cbd_esc = cpu_to_fec32(estatus); |
542 | } |
543 | |
544 | bufaddr = skb_frag_address(frag: this_frag); |
545 | |
546 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
547 | if (((unsigned long) bufaddr) & fep->tx_align || |
548 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { |
549 | memcpy(txq->tx_bounce[index], bufaddr, frag_len); |
550 | bufaddr = txq->tx_bounce[index]; |
551 | |
552 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) |
553 | swap_buffer(bufaddr, len: frag_len); |
554 | } |
555 | |
556 | addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, |
557 | DMA_TO_DEVICE); |
558 | if (dma_mapping_error(dev: &fep->pdev->dev, dma_addr: addr)) { |
559 | if (net_ratelimit()) |
560 | netdev_err(dev: ndev, format: "Tx DMA memory map failed\n" ); |
561 | goto dma_mapping_error; |
562 | } |
563 | |
564 | bdp->cbd_bufaddr = cpu_to_fec32(addr); |
565 | bdp->cbd_datlen = cpu_to_fec16(frag_len); |
566 | /* Make sure the updates to rest of the descriptor are |
567 | * performed before transferring ownership. |
568 | */ |
569 | wmb(); |
570 | bdp->cbd_sc = cpu_to_fec16(status); |
571 | } |
572 | |
573 | return bdp; |
574 | dma_mapping_error: |
575 | bdp = txq->bd.cur; |
576 | for (i = 0; i < frag; i++) { |
577 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
578 | dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), |
579 | fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); |
580 | } |
581 | return ERR_PTR(error: -ENOMEM); |
582 | } |
583 | |
584 | static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, |
585 | struct sk_buff *skb, struct net_device *ndev) |
586 | { |
587 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
588 | int nr_frags = skb_shinfo(skb)->nr_frags; |
589 | struct bufdesc *bdp, *last_bdp; |
590 | void *bufaddr; |
591 | dma_addr_t addr; |
592 | unsigned short status; |
593 | unsigned short buflen; |
594 | unsigned int estatus = 0; |
595 | unsigned int index; |
596 | int entries_free; |
597 | |
598 | entries_free = fec_enet_get_free_txdesc_num(txq); |
599 | if (entries_free < MAX_SKB_FRAGS + 1) { |
600 | dev_kfree_skb_any(skb); |
601 | if (net_ratelimit()) |
602 | netdev_err(dev: ndev, format: "NOT enough BD for SG!\n" ); |
603 | return NETDEV_TX_OK; |
604 | } |
605 | |
606 | /* Protocol checksum off-load for TCP and UDP. */ |
607 | if (fec_enet_clear_csum(skb, ndev)) { |
608 | dev_kfree_skb_any(skb); |
609 | return NETDEV_TX_OK; |
610 | } |
611 | |
612 | /* Fill in a Tx ring entry */ |
613 | bdp = txq->bd.cur; |
614 | last_bdp = bdp; |
615 | status = fec16_to_cpu(bdp->cbd_sc); |
616 | status &= ~BD_ENET_TX_STATS; |
617 | |
618 | /* Set buffer length and buffer pointer */ |
619 | bufaddr = skb->data; |
620 | buflen = skb_headlen(skb); |
621 | |
622 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
623 | if (((unsigned long) bufaddr) & fep->tx_align || |
624 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { |
625 | memcpy(txq->tx_bounce[index], skb->data, buflen); |
626 | bufaddr = txq->tx_bounce[index]; |
627 | |
628 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) |
629 | swap_buffer(bufaddr, len: buflen); |
630 | } |
631 | |
632 | /* Push the data cache so the CPM does not get stale memory data. */ |
633 | addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); |
634 | if (dma_mapping_error(dev: &fep->pdev->dev, dma_addr: addr)) { |
635 | dev_kfree_skb_any(skb); |
636 | if (net_ratelimit()) |
637 | netdev_err(dev: ndev, format: "Tx DMA memory map failed\n" ); |
638 | return NETDEV_TX_OK; |
639 | } |
640 | |
641 | if (nr_frags) { |
642 | last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); |
643 | if (IS_ERR(ptr: last_bdp)) { |
644 | dma_unmap_single(&fep->pdev->dev, addr, |
645 | buflen, DMA_TO_DEVICE); |
646 | dev_kfree_skb_any(skb); |
647 | return NETDEV_TX_OK; |
648 | } |
649 | } else { |
650 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); |
651 | if (fep->bufdesc_ex) { |
652 | estatus = BD_ENET_TX_INT; |
653 | if (unlikely(skb_shinfo(skb)->tx_flags & |
654 | SKBTX_HW_TSTAMP && fep->hwts_tx_en)) |
655 | estatus |= BD_ENET_TX_TS; |
656 | } |
657 | } |
658 | bdp->cbd_bufaddr = cpu_to_fec32(addr); |
659 | bdp->cbd_datlen = cpu_to_fec16(buflen); |
660 | |
661 | if (fep->bufdesc_ex) { |
662 | |
663 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
664 | |
665 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && |
666 | fep->hwts_tx_en)) |
667 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
668 | |
669 | if (fep->quirks & FEC_QUIRK_HAS_AVB) |
670 | estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); |
671 | |
672 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
673 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; |
674 | |
675 | ebdp->cbd_bdu = 0; |
676 | ebdp->cbd_esc = cpu_to_fec32(estatus); |
677 | } |
678 | |
679 | index = fec_enet_get_bd_index(bdp: last_bdp, bd: &txq->bd); |
680 | /* Save skb pointer */ |
681 | txq->tx_buf[index].buf_p = skb; |
682 | |
683 | /* Make sure the updates to rest of the descriptor are performed before |
684 | * transferring ownership. |
685 | */ |
686 | wmb(); |
687 | |
688 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
689 | * it's the last BD of the frame, and to put the CRC on the end. |
690 | */ |
691 | status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); |
692 | bdp->cbd_sc = cpu_to_fec16(status); |
693 | |
694 | /* If this was the last BD in the ring, start at the beginning again. */ |
695 | bdp = fec_enet_get_nextdesc(bdp: last_bdp, bd: &txq->bd); |
696 | |
697 | skb_tx_timestamp(skb); |
698 | |
699 | /* Make sure the update to bdp is performed before txq->bd.cur. */ |
700 | wmb(); |
701 | txq->bd.cur = bdp; |
702 | |
703 | /* Trigger transmission start */ |
704 | writel(val: 0, addr: txq->bd.reg_desc_active); |
705 | |
706 | return 0; |
707 | } |
708 | |
709 | static int |
710 | fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, |
711 | struct net_device *ndev, |
712 | struct bufdesc *bdp, int index, char *data, |
713 | int size, bool last_tcp, bool is_last) |
714 | { |
715 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
716 | struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); |
717 | unsigned short status; |
718 | unsigned int estatus = 0; |
719 | dma_addr_t addr; |
720 | |
721 | status = fec16_to_cpu(bdp->cbd_sc); |
722 | status &= ~BD_ENET_TX_STATS; |
723 | |
724 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); |
725 | |
726 | if (((unsigned long) data) & fep->tx_align || |
727 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { |
728 | memcpy(txq->tx_bounce[index], data, size); |
729 | data = txq->tx_bounce[index]; |
730 | |
731 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) |
732 | swap_buffer(bufaddr: data, len: size); |
733 | } |
734 | |
735 | addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); |
736 | if (dma_mapping_error(dev: &fep->pdev->dev, dma_addr: addr)) { |
737 | dev_kfree_skb_any(skb); |
738 | if (net_ratelimit()) |
739 | netdev_err(dev: ndev, format: "Tx DMA memory map failed\n" ); |
740 | return NETDEV_TX_OK; |
741 | } |
742 | |
743 | bdp->cbd_datlen = cpu_to_fec16(size); |
744 | bdp->cbd_bufaddr = cpu_to_fec32(addr); |
745 | |
746 | if (fep->bufdesc_ex) { |
747 | if (fep->quirks & FEC_QUIRK_HAS_AVB) |
748 | estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); |
749 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
750 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; |
751 | ebdp->cbd_bdu = 0; |
752 | ebdp->cbd_esc = cpu_to_fec32(estatus); |
753 | } |
754 | |
755 | /* Handle the last BD specially */ |
756 | if (last_tcp) |
757 | status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); |
758 | if (is_last) { |
759 | status |= BD_ENET_TX_INTR; |
760 | if (fep->bufdesc_ex) |
761 | ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); |
762 | } |
763 | |
764 | bdp->cbd_sc = cpu_to_fec16(status); |
765 | |
766 | return 0; |
767 | } |
768 | |
769 | static int |
770 | fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, |
771 | struct sk_buff *skb, struct net_device *ndev, |
772 | struct bufdesc *bdp, int index) |
773 | { |
774 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
775 | int hdr_len = skb_tcp_all_headers(skb); |
776 | struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); |
777 | void *bufaddr; |
778 | unsigned long dmabuf; |
779 | unsigned short status; |
780 | unsigned int estatus = 0; |
781 | |
782 | status = fec16_to_cpu(bdp->cbd_sc); |
783 | status &= ~BD_ENET_TX_STATS; |
784 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); |
785 | |
786 | bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; |
787 | dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; |
788 | if (((unsigned long)bufaddr) & fep->tx_align || |
789 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { |
790 | memcpy(txq->tx_bounce[index], skb->data, hdr_len); |
791 | bufaddr = txq->tx_bounce[index]; |
792 | |
793 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) |
794 | swap_buffer(bufaddr, len: hdr_len); |
795 | |
796 | dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, |
797 | hdr_len, DMA_TO_DEVICE); |
798 | if (dma_mapping_error(dev: &fep->pdev->dev, dma_addr: dmabuf)) { |
799 | dev_kfree_skb_any(skb); |
800 | if (net_ratelimit()) |
801 | netdev_err(dev: ndev, format: "Tx DMA memory map failed\n" ); |
802 | return NETDEV_TX_OK; |
803 | } |
804 | } |
805 | |
806 | bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); |
807 | bdp->cbd_datlen = cpu_to_fec16(hdr_len); |
808 | |
809 | if (fep->bufdesc_ex) { |
810 | if (fep->quirks & FEC_QUIRK_HAS_AVB) |
811 | estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); |
812 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
813 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; |
814 | ebdp->cbd_bdu = 0; |
815 | ebdp->cbd_esc = cpu_to_fec32(estatus); |
816 | } |
817 | |
818 | bdp->cbd_sc = cpu_to_fec16(status); |
819 | |
820 | return 0; |
821 | } |
822 | |
823 | static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, |
824 | struct sk_buff *skb, |
825 | struct net_device *ndev) |
826 | { |
827 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
828 | int hdr_len, total_len, data_left; |
829 | struct bufdesc *bdp = txq->bd.cur; |
830 | struct tso_t tso; |
831 | unsigned int index = 0; |
832 | int ret; |
833 | |
834 | if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { |
835 | dev_kfree_skb_any(skb); |
836 | if (net_ratelimit()) |
837 | netdev_err(dev: ndev, format: "NOT enough BD for TSO!\n" ); |
838 | return NETDEV_TX_OK; |
839 | } |
840 | |
841 | /* Protocol checksum off-load for TCP and UDP. */ |
842 | if (fec_enet_clear_csum(skb, ndev)) { |
843 | dev_kfree_skb_any(skb); |
844 | return NETDEV_TX_OK; |
845 | } |
846 | |
847 | /* Initialize the TSO handler, and prepare the first payload */ |
848 | hdr_len = tso_start(skb, tso: &tso); |
849 | |
850 | total_len = skb->len - hdr_len; |
851 | while (total_len > 0) { |
852 | char *hdr; |
853 | |
854 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
855 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); |
856 | total_len -= data_left; |
857 | |
858 | /* prepare packet headers: MAC + IP + TCP */ |
859 | hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; |
860 | tso_build_hdr(skb, hdr, tso: &tso, size: data_left, is_last: total_len == 0); |
861 | ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); |
862 | if (ret) |
863 | goto err_release; |
864 | |
865 | while (data_left > 0) { |
866 | int size; |
867 | |
868 | size = min_t(int, tso.size, data_left); |
869 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
870 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
871 | ret = fec_enet_txq_put_data_tso(txq, skb, ndev, |
872 | bdp, index, |
873 | data: tso.data, size, |
874 | last_tcp: size == data_left, |
875 | is_last: total_len == 0); |
876 | if (ret) |
877 | goto err_release; |
878 | |
879 | data_left -= size; |
880 | tso_build_data(skb, tso: &tso, size); |
881 | } |
882 | |
883 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
884 | } |
885 | |
886 | /* Save skb pointer */ |
887 | txq->tx_buf[index].buf_p = skb; |
888 | |
889 | skb_tx_timestamp(skb); |
890 | txq->bd.cur = bdp; |
891 | |
892 | /* Trigger transmission start */ |
893 | if (!(fep->quirks & FEC_QUIRK_ERR007885) || |
894 | !readl(addr: txq->bd.reg_desc_active) || |
895 | !readl(addr: txq->bd.reg_desc_active) || |
896 | !readl(addr: txq->bd.reg_desc_active) || |
897 | !readl(addr: txq->bd.reg_desc_active)) |
898 | writel(val: 0, addr: txq->bd.reg_desc_active); |
899 | |
900 | return 0; |
901 | |
902 | err_release: |
903 | /* TODO: Release all used data descriptors for TSO */ |
904 | return ret; |
905 | } |
906 | |
907 | static netdev_tx_t |
908 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
909 | { |
910 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
911 | int entries_free; |
912 | unsigned short queue; |
913 | struct fec_enet_priv_tx_q *txq; |
914 | struct netdev_queue *nq; |
915 | int ret; |
916 | |
917 | queue = skb_get_queue_mapping(skb); |
918 | txq = fep->tx_queue[queue]; |
919 | nq = netdev_get_tx_queue(dev: ndev, index: queue); |
920 | |
921 | if (skb_is_gso(skb)) |
922 | ret = fec_enet_txq_submit_tso(txq, skb, ndev); |
923 | else |
924 | ret = fec_enet_txq_submit_skb(txq, skb, ndev); |
925 | if (ret) |
926 | return ret; |
927 | |
928 | entries_free = fec_enet_get_free_txdesc_num(txq); |
929 | if (entries_free <= txq->tx_stop_threshold) |
930 | netif_tx_stop_queue(dev_queue: nq); |
931 | |
932 | return NETDEV_TX_OK; |
933 | } |
934 | |
935 | /* Init RX & TX buffer descriptors |
936 | */ |
937 | static void fec_enet_bd_init(struct net_device *dev) |
938 | { |
939 | struct fec_enet_private *fep = netdev_priv(dev); |
940 | struct fec_enet_priv_tx_q *txq; |
941 | struct fec_enet_priv_rx_q *rxq; |
942 | struct bufdesc *bdp; |
943 | unsigned int i; |
944 | unsigned int q; |
945 | |
946 | for (q = 0; q < fep->num_rx_queues; q++) { |
947 | /* Initialize the receive buffer descriptors. */ |
948 | rxq = fep->rx_queue[q]; |
949 | bdp = rxq->bd.base; |
950 | |
951 | for (i = 0; i < rxq->bd.ring_size; i++) { |
952 | |
953 | /* Initialize the BD for every fragment in the page. */ |
954 | if (bdp->cbd_bufaddr) |
955 | bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); |
956 | else |
957 | bdp->cbd_sc = cpu_to_fec16(0); |
958 | bdp = fec_enet_get_nextdesc(bdp, bd: &rxq->bd); |
959 | } |
960 | |
961 | /* Set the last buffer to wrap */ |
962 | bdp = fec_enet_get_prevdesc(bdp, bd: &rxq->bd); |
963 | bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); |
964 | |
965 | rxq->bd.cur = rxq->bd.base; |
966 | } |
967 | |
968 | for (q = 0; q < fep->num_tx_queues; q++) { |
969 | /* ...and the same for transmit */ |
970 | txq = fep->tx_queue[q]; |
971 | bdp = txq->bd.base; |
972 | txq->bd.cur = bdp; |
973 | |
974 | for (i = 0; i < txq->bd.ring_size; i++) { |
975 | /* Initialize the BD for every fragment in the page. */ |
976 | bdp->cbd_sc = cpu_to_fec16(0); |
977 | if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { |
978 | if (bdp->cbd_bufaddr && |
979 | !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) |
980 | dma_unmap_single(&fep->pdev->dev, |
981 | fec32_to_cpu(bdp->cbd_bufaddr), |
982 | fec16_to_cpu(bdp->cbd_datlen), |
983 | DMA_TO_DEVICE); |
984 | if (txq->tx_buf[i].buf_p) |
985 | dev_kfree_skb_any(skb: txq->tx_buf[i].buf_p); |
986 | } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { |
987 | if (bdp->cbd_bufaddr) |
988 | dma_unmap_single(&fep->pdev->dev, |
989 | fec32_to_cpu(bdp->cbd_bufaddr), |
990 | fec16_to_cpu(bdp->cbd_datlen), |
991 | DMA_TO_DEVICE); |
992 | |
993 | if (txq->tx_buf[i].buf_p) |
994 | xdp_return_frame(xdpf: txq->tx_buf[i].buf_p); |
995 | } else { |
996 | struct page *page = txq->tx_buf[i].buf_p; |
997 | |
998 | if (page) |
999 | page_pool_put_page(pool: page->pp, page, dma_sync_size: 0, allow_direct: false); |
1000 | } |
1001 | |
1002 | txq->tx_buf[i].buf_p = NULL; |
1003 | /* restore default tx buffer type: FEC_TXBUF_T_SKB */ |
1004 | txq->tx_buf[i].type = FEC_TXBUF_T_SKB; |
1005 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
1006 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
1007 | } |
1008 | |
1009 | /* Set the last buffer to wrap */ |
1010 | bdp = fec_enet_get_prevdesc(bdp, bd: &txq->bd); |
1011 | bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); |
1012 | txq->dirty_tx = bdp; |
1013 | } |
1014 | } |
1015 | |
1016 | static void fec_enet_active_rxring(struct net_device *ndev) |
1017 | { |
1018 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1019 | int i; |
1020 | |
1021 | for (i = 0; i < fep->num_rx_queues; i++) |
1022 | writel(val: 0, addr: fep->rx_queue[i]->bd.reg_desc_active); |
1023 | } |
1024 | |
1025 | static void fec_enet_enable_ring(struct net_device *ndev) |
1026 | { |
1027 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1028 | struct fec_enet_priv_tx_q *txq; |
1029 | struct fec_enet_priv_rx_q *rxq; |
1030 | int i; |
1031 | |
1032 | for (i = 0; i < fep->num_rx_queues; i++) { |
1033 | rxq = fep->rx_queue[i]; |
1034 | writel(val: rxq->bd.dma, addr: fep->hwp + FEC_R_DES_START(i)); |
1035 | writel(PKT_MAXBUF_SIZE, addr: fep->hwp + FEC_R_BUFF_SIZE(i)); |
1036 | |
1037 | /* enable DMA1/2 */ |
1038 | if (i) |
1039 | writel(RCMR_MATCHEN | RCMR_CMP(i), |
1040 | addr: fep->hwp + FEC_RCMR(i)); |
1041 | } |
1042 | |
1043 | for (i = 0; i < fep->num_tx_queues; i++) { |
1044 | txq = fep->tx_queue[i]; |
1045 | writel(val: txq->bd.dma, addr: fep->hwp + FEC_X_DES_START(i)); |
1046 | |
1047 | /* enable DMA1/2 */ |
1048 | if (i) |
1049 | writel(DMA_CLASS_EN | IDLE_SLOPE(i), |
1050 | addr: fep->hwp + FEC_DMA_CFG(i)); |
1051 | } |
1052 | } |
1053 | |
1054 | /* |
1055 | * This function is called to start or restart the FEC during a link |
1056 | * change, transmit timeout, or to reconfigure the FEC. The network |
1057 | * packet processing for this device must be stopped before this call. |
1058 | */ |
1059 | static void |
1060 | fec_restart(struct net_device *ndev) |
1061 | { |
1062 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1063 | u32 temp_mac[2]; |
1064 | u32 rcntl = OPT_FRAME_SIZE | 0x04; |
1065 | u32 ecntl = 0x2; /* ETHEREN */ |
1066 | |
1067 | /* Whack a reset. We should wait for this. |
1068 | * For i.MX6SX SOC, enet use AXI bus, we use disable MAC |
1069 | * instead of reset MAC itself. |
1070 | */ |
1071 | if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || |
1072 | ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { |
1073 | writel(val: 0, addr: fep->hwp + FEC_ECNTRL); |
1074 | } else { |
1075 | writel(val: 1, addr: fep->hwp + FEC_ECNTRL); |
1076 | udelay(10); |
1077 | } |
1078 | |
1079 | /* |
1080 | * enet-mac reset will reset mac address registers too, |
1081 | * so need to reconfigure it. |
1082 | */ |
1083 | memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); |
1084 | writel(val: (__force u32)cpu_to_be32(temp_mac[0]), |
1085 | addr: fep->hwp + FEC_ADDR_LOW); |
1086 | writel(val: (__force u32)cpu_to_be32(temp_mac[1]), |
1087 | addr: fep->hwp + FEC_ADDR_HIGH); |
1088 | |
1089 | /* Clear any outstanding interrupt, except MDIO. */ |
1090 | writel(val: (0xffffffff & ~FEC_ENET_MII), addr: fep->hwp + FEC_IEVENT); |
1091 | |
1092 | fec_enet_bd_init(dev: ndev); |
1093 | |
1094 | fec_enet_enable_ring(ndev); |
1095 | |
1096 | /* Enable MII mode */ |
1097 | if (fep->full_duplex == DUPLEX_FULL) { |
1098 | /* FD enable */ |
1099 | writel(val: 0x04, addr: fep->hwp + FEC_X_CNTRL); |
1100 | } else { |
1101 | /* No Rcv on Xmit */ |
1102 | rcntl |= 0x02; |
1103 | writel(val: 0x0, addr: fep->hwp + FEC_X_CNTRL); |
1104 | } |
1105 | |
1106 | /* Set MII speed */ |
1107 | writel(val: fep->phy_speed, addr: fep->hwp + FEC_MII_SPEED); |
1108 | |
1109 | #if !defined(CONFIG_M5272) |
1110 | if (fep->quirks & FEC_QUIRK_HAS_RACC) { |
1111 | u32 val = readl(addr: fep->hwp + FEC_RACC); |
1112 | |
1113 | /* align IP header */ |
1114 | val |= FEC_RACC_SHIFT16; |
1115 | if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) |
1116 | /* set RX checksum */ |
1117 | val |= FEC_RACC_OPTIONS; |
1118 | else |
1119 | val &= ~FEC_RACC_OPTIONS; |
1120 | writel(val, addr: fep->hwp + FEC_RACC); |
1121 | writel(PKT_MAXBUF_SIZE, addr: fep->hwp + FEC_FTRL); |
1122 | } |
1123 | #endif |
1124 | |
1125 | /* |
1126 | * The phy interface and speed need to get configured |
1127 | * differently on enet-mac. |
1128 | */ |
1129 | if (fep->quirks & FEC_QUIRK_ENET_MAC) { |
1130 | /* Enable flow control and length check */ |
1131 | rcntl |= 0x40000000 | 0x00000020; |
1132 | |
1133 | /* RGMII, RMII or MII */ |
1134 | if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || |
1135 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || |
1136 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || |
1137 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) |
1138 | rcntl |= (1 << 6); |
1139 | else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) |
1140 | rcntl |= (1 << 8); |
1141 | else |
1142 | rcntl &= ~(1 << 8); |
1143 | |
1144 | /* 1G, 100M or 10M */ |
1145 | if (ndev->phydev) { |
1146 | if (ndev->phydev->speed == SPEED_1000) |
1147 | ecntl |= (1 << 5); |
1148 | else if (ndev->phydev->speed == SPEED_100) |
1149 | rcntl &= ~(1 << 9); |
1150 | else |
1151 | rcntl |= (1 << 9); |
1152 | } |
1153 | } else { |
1154 | #ifdef FEC_MIIGSK_ENR |
1155 | if (fep->quirks & FEC_QUIRK_USE_GASKET) { |
1156 | u32 cfgr; |
1157 | /* disable the gasket and wait */ |
1158 | writel(val: 0, addr: fep->hwp + FEC_MIIGSK_ENR); |
1159 | while (readl(addr: fep->hwp + FEC_MIIGSK_ENR) & 4) |
1160 | udelay(1); |
1161 | |
1162 | /* |
1163 | * configure the gasket: |
1164 | * RMII, 50 MHz, no loopback, no echo |
1165 | * MII, 25 MHz, no loopback, no echo |
1166 | */ |
1167 | cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) |
1168 | ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; |
1169 | if (ndev->phydev && ndev->phydev->speed == SPEED_10) |
1170 | cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; |
1171 | writel(val: cfgr, addr: fep->hwp + FEC_MIIGSK_CFGR); |
1172 | |
1173 | /* re-enable the gasket */ |
1174 | writel(val: 2, addr: fep->hwp + FEC_MIIGSK_ENR); |
1175 | } |
1176 | #endif |
1177 | } |
1178 | |
1179 | #if !defined(CONFIG_M5272) |
1180 | /* enable pause frame*/ |
1181 | if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || |
1182 | ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && |
1183 | ndev->phydev && ndev->phydev->pause)) { |
1184 | rcntl |= FEC_ENET_FCE; |
1185 | |
1186 | /* set FIFO threshold parameter to reduce overrun */ |
1187 | writel(FEC_ENET_RSEM_V, addr: fep->hwp + FEC_R_FIFO_RSEM); |
1188 | writel(FEC_ENET_RSFL_V, addr: fep->hwp + FEC_R_FIFO_RSFL); |
1189 | writel(FEC_ENET_RAEM_V, addr: fep->hwp + FEC_R_FIFO_RAEM); |
1190 | writel(FEC_ENET_RAFL_V, addr: fep->hwp + FEC_R_FIFO_RAFL); |
1191 | |
1192 | /* OPD */ |
1193 | writel(FEC_ENET_OPD_V, addr: fep->hwp + FEC_OPD); |
1194 | } else { |
1195 | rcntl &= ~FEC_ENET_FCE; |
1196 | } |
1197 | #endif /* !defined(CONFIG_M5272) */ |
1198 | |
1199 | writel(val: rcntl, addr: fep->hwp + FEC_R_CNTRL); |
1200 | |
1201 | /* Setup multicast filter. */ |
1202 | set_multicast_list(ndev); |
1203 | #ifndef CONFIG_M5272 |
1204 | writel(val: 0, addr: fep->hwp + FEC_HASH_TABLE_HIGH); |
1205 | writel(val: 0, addr: fep->hwp + FEC_HASH_TABLE_LOW); |
1206 | #endif |
1207 | |
1208 | if (fep->quirks & FEC_QUIRK_ENET_MAC) { |
1209 | /* enable ENET endian swap */ |
1210 | ecntl |= (1 << 8); |
1211 | /* enable ENET store and forward mode */ |
1212 | writel(val: 1 << 8, addr: fep->hwp + FEC_X_WMRK); |
1213 | } |
1214 | |
1215 | if (fep->bufdesc_ex) |
1216 | ecntl |= (1 << 4); |
1217 | |
1218 | if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && |
1219 | fep->rgmii_txc_dly) |
1220 | ecntl |= FEC_ENET_TXC_DLY; |
1221 | if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && |
1222 | fep->rgmii_rxc_dly) |
1223 | ecntl |= FEC_ENET_RXC_DLY; |
1224 | |
1225 | #ifndef CONFIG_M5272 |
1226 | /* Enable the MIB statistic event counters */ |
1227 | writel(val: 0 << 31, addr: fep->hwp + FEC_MIB_CTRLSTAT); |
1228 | #endif |
1229 | |
1230 | /* And last, enable the transmit and receive processing */ |
1231 | writel(val: ecntl, addr: fep->hwp + FEC_ECNTRL); |
1232 | fec_enet_active_rxring(ndev); |
1233 | |
1234 | if (fep->bufdesc_ex) |
1235 | fec_ptp_start_cyclecounter(ndev); |
1236 | |
1237 | /* Enable interrupts we wish to service */ |
1238 | if (fep->link) |
1239 | writel(FEC_DEFAULT_IMASK, addr: fep->hwp + FEC_IMASK); |
1240 | else |
1241 | writel(val: 0, addr: fep->hwp + FEC_IMASK); |
1242 | |
1243 | /* Init the interrupt coalescing */ |
1244 | if (fep->quirks & FEC_QUIRK_HAS_COALESCE) |
1245 | fec_enet_itr_coal_set(ndev); |
1246 | } |
1247 | |
1248 | static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) |
1249 | { |
1250 | if (!(of_machine_is_compatible(compat: "fsl,imx8qm" ) || |
1251 | of_machine_is_compatible(compat: "fsl,imx8qxp" ) || |
1252 | of_machine_is_compatible(compat: "fsl,imx8dxl" ))) |
1253 | return 0; |
1254 | |
1255 | return imx_scu_get_handle(ipc: &fep->ipc_handle); |
1256 | } |
1257 | |
1258 | static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) |
1259 | { |
1260 | struct device_node *np = fep->pdev->dev.of_node; |
1261 | u32 rsrc_id, val; |
1262 | int idx; |
1263 | |
1264 | if (!np || !fep->ipc_handle) |
1265 | return; |
1266 | |
1267 | idx = of_alias_get_id(np, stem: "ethernet" ); |
1268 | if (idx < 0) |
1269 | idx = 0; |
1270 | rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; |
1271 | |
1272 | val = enabled ? 1 : 0; |
1273 | imx_sc_misc_set_control(ipc: fep->ipc_handle, resource: rsrc_id, IMX_SC_C_IPG_STOP, val); |
1274 | } |
1275 | |
1276 | static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) |
1277 | { |
1278 | struct fec_platform_data *pdata = fep->pdev->dev.platform_data; |
1279 | struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; |
1280 | |
1281 | if (stop_gpr->gpr) { |
1282 | if (enabled) |
1283 | regmap_update_bits(map: stop_gpr->gpr, reg: stop_gpr->reg, |
1284 | BIT(stop_gpr->bit), |
1285 | BIT(stop_gpr->bit)); |
1286 | else |
1287 | regmap_update_bits(map: stop_gpr->gpr, reg: stop_gpr->reg, |
1288 | BIT(stop_gpr->bit), val: 0); |
1289 | } else if (pdata && pdata->sleep_mode_enable) { |
1290 | pdata->sleep_mode_enable(enabled); |
1291 | } else { |
1292 | fec_enet_ipg_stop_set(fep, enabled); |
1293 | } |
1294 | } |
1295 | |
1296 | static void fec_irqs_disable(struct net_device *ndev) |
1297 | { |
1298 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1299 | |
1300 | writel(val: 0, addr: fep->hwp + FEC_IMASK); |
1301 | } |
1302 | |
1303 | static void fec_irqs_disable_except_wakeup(struct net_device *ndev) |
1304 | { |
1305 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1306 | |
1307 | writel(val: 0, addr: fep->hwp + FEC_IMASK); |
1308 | writel(FEC_ENET_WAKEUP, addr: fep->hwp + FEC_IMASK); |
1309 | } |
1310 | |
1311 | static void |
1312 | fec_stop(struct net_device *ndev) |
1313 | { |
1314 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1315 | u32 rmii_mode = readl(addr: fep->hwp + FEC_R_CNTRL) & (1 << 8); |
1316 | u32 val; |
1317 | |
1318 | /* We cannot expect a graceful transmit stop without link !!! */ |
1319 | if (fep->link) { |
1320 | writel(val: 1, addr: fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ |
1321 | udelay(10); |
1322 | if (!(readl(addr: fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) |
1323 | netdev_err(dev: ndev, format: "Graceful transmit stop did not complete!\n" ); |
1324 | } |
1325 | |
1326 | /* Whack a reset. We should wait for this. |
1327 | * For i.MX6SX SOC, enet use AXI bus, we use disable MAC |
1328 | * instead of reset MAC itself. |
1329 | */ |
1330 | if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { |
1331 | if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { |
1332 | writel(val: 0, addr: fep->hwp + FEC_ECNTRL); |
1333 | } else { |
1334 | writel(val: 1, addr: fep->hwp + FEC_ECNTRL); |
1335 | udelay(10); |
1336 | } |
1337 | } else { |
1338 | val = readl(addr: fep->hwp + FEC_ECNTRL); |
1339 | val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); |
1340 | writel(val, addr: fep->hwp + FEC_ECNTRL); |
1341 | } |
1342 | writel(val: fep->phy_speed, addr: fep->hwp + FEC_MII_SPEED); |
1343 | writel(FEC_DEFAULT_IMASK, addr: fep->hwp + FEC_IMASK); |
1344 | |
1345 | /* We have to keep ENET enabled to have MII interrupt stay working */ |
1346 | if (fep->quirks & FEC_QUIRK_ENET_MAC && |
1347 | !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { |
1348 | writel(val: 2, addr: fep->hwp + FEC_ECNTRL); |
1349 | writel(val: rmii_mode, addr: fep->hwp + FEC_R_CNTRL); |
1350 | } |
1351 | } |
1352 | |
1353 | |
1354 | static void |
1355 | fec_timeout(struct net_device *ndev, unsigned int txqueue) |
1356 | { |
1357 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1358 | |
1359 | fec_dump(ndev); |
1360 | |
1361 | ndev->stats.tx_errors++; |
1362 | |
1363 | schedule_work(work: &fep->tx_timeout_work); |
1364 | } |
1365 | |
1366 | static void fec_enet_timeout_work(struct work_struct *work) |
1367 | { |
1368 | struct fec_enet_private *fep = |
1369 | container_of(work, struct fec_enet_private, tx_timeout_work); |
1370 | struct net_device *ndev = fep->netdev; |
1371 | |
1372 | rtnl_lock(); |
1373 | if (netif_device_present(dev: ndev) || netif_running(dev: ndev)) { |
1374 | napi_disable(n: &fep->napi); |
1375 | netif_tx_lock_bh(dev: ndev); |
1376 | fec_restart(ndev); |
1377 | netif_tx_wake_all_queues(dev: ndev); |
1378 | netif_tx_unlock_bh(dev: ndev); |
1379 | napi_enable(n: &fep->napi); |
1380 | } |
1381 | rtnl_unlock(); |
1382 | } |
1383 | |
1384 | static void |
1385 | fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, |
1386 | struct skb_shared_hwtstamps *hwtstamps) |
1387 | { |
1388 | unsigned long flags; |
1389 | u64 ns; |
1390 | |
1391 | spin_lock_irqsave(&fep->tmreg_lock, flags); |
1392 | ns = timecounter_cyc2time(tc: &fep->tc, cycle_tstamp: ts); |
1393 | spin_unlock_irqrestore(lock: &fep->tmreg_lock, flags); |
1394 | |
1395 | memset(hwtstamps, 0, sizeof(*hwtstamps)); |
1396 | hwtstamps->hwtstamp = ns_to_ktime(ns); |
1397 | } |
1398 | |
1399 | static void |
1400 | fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) |
1401 | { |
1402 | struct fec_enet_private *fep; |
1403 | struct xdp_frame *xdpf; |
1404 | struct bufdesc *bdp; |
1405 | unsigned short status; |
1406 | struct sk_buff *skb; |
1407 | struct fec_enet_priv_tx_q *txq; |
1408 | struct netdev_queue *nq; |
1409 | int index = 0; |
1410 | int entries_free; |
1411 | struct page *page; |
1412 | int frame_len; |
1413 | |
1414 | fep = netdev_priv(dev: ndev); |
1415 | |
1416 | txq = fep->tx_queue[queue_id]; |
1417 | /* get next bdp of dirty_tx */ |
1418 | nq = netdev_get_tx_queue(dev: ndev, index: queue_id); |
1419 | bdp = txq->dirty_tx; |
1420 | |
1421 | /* get next bdp of dirty_tx */ |
1422 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
1423 | |
1424 | while (bdp != READ_ONCE(txq->bd.cur)) { |
1425 | /* Order the load of bd.cur and cbd_sc */ |
1426 | rmb(); |
1427 | status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); |
1428 | if (status & BD_ENET_TX_READY) |
1429 | break; |
1430 | |
1431 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
1432 | |
1433 | if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { |
1434 | skb = txq->tx_buf[index].buf_p; |
1435 | if (bdp->cbd_bufaddr && |
1436 | !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) |
1437 | dma_unmap_single(&fep->pdev->dev, |
1438 | fec32_to_cpu(bdp->cbd_bufaddr), |
1439 | fec16_to_cpu(bdp->cbd_datlen), |
1440 | DMA_TO_DEVICE); |
1441 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
1442 | if (!skb) |
1443 | goto tx_buf_done; |
1444 | } else { |
1445 | /* Tx processing cannot call any XDP (or page pool) APIs if |
1446 | * the "budget" is 0. Because NAPI is called with budget of |
1447 | * 0 (such as netpoll) indicates we may be in an IRQ context, |
1448 | * however, we can't use the page pool from IRQ context. |
1449 | */ |
1450 | if (unlikely(!budget)) |
1451 | break; |
1452 | |
1453 | if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { |
1454 | xdpf = txq->tx_buf[index].buf_p; |
1455 | if (bdp->cbd_bufaddr) |
1456 | dma_unmap_single(&fep->pdev->dev, |
1457 | fec32_to_cpu(bdp->cbd_bufaddr), |
1458 | fec16_to_cpu(bdp->cbd_datlen), |
1459 | DMA_TO_DEVICE); |
1460 | } else { |
1461 | page = txq->tx_buf[index].buf_p; |
1462 | } |
1463 | |
1464 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
1465 | if (unlikely(!txq->tx_buf[index].buf_p)) { |
1466 | txq->tx_buf[index].type = FEC_TXBUF_T_SKB; |
1467 | goto tx_buf_done; |
1468 | } |
1469 | |
1470 | frame_len = fec16_to_cpu(bdp->cbd_datlen); |
1471 | } |
1472 | |
1473 | /* Check for errors. */ |
1474 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
1475 | BD_ENET_TX_RL | BD_ENET_TX_UN | |
1476 | BD_ENET_TX_CSL)) { |
1477 | ndev->stats.tx_errors++; |
1478 | if (status & BD_ENET_TX_HB) /* No heartbeat */ |
1479 | ndev->stats.tx_heartbeat_errors++; |
1480 | if (status & BD_ENET_TX_LC) /* Late collision */ |
1481 | ndev->stats.tx_window_errors++; |
1482 | if (status & BD_ENET_TX_RL) /* Retrans limit */ |
1483 | ndev->stats.tx_aborted_errors++; |
1484 | if (status & BD_ENET_TX_UN) /* Underrun */ |
1485 | ndev->stats.tx_fifo_errors++; |
1486 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ |
1487 | ndev->stats.tx_carrier_errors++; |
1488 | } else { |
1489 | ndev->stats.tx_packets++; |
1490 | |
1491 | if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) |
1492 | ndev->stats.tx_bytes += skb->len; |
1493 | else |
1494 | ndev->stats.tx_bytes += frame_len; |
1495 | } |
1496 | |
1497 | /* Deferred means some collisions occurred during transmit, |
1498 | * but we eventually sent the packet OK. |
1499 | */ |
1500 | if (status & BD_ENET_TX_DEF) |
1501 | ndev->stats.collisions++; |
1502 | |
1503 | if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { |
1504 | /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who |
1505 | * are to time stamp the packet, so we still need to check time |
1506 | * stamping enabled flag. |
1507 | */ |
1508 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && |
1509 | fep->hwts_tx_en) && fep->bufdesc_ex) { |
1510 | struct skb_shared_hwtstamps shhwtstamps; |
1511 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
1512 | |
1513 | fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), hwtstamps: &shhwtstamps); |
1514 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &shhwtstamps); |
1515 | } |
1516 | |
1517 | /* Free the sk buffer associated with this last transmit */ |
1518 | napi_consume_skb(skb, budget); |
1519 | } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) { |
1520 | xdp_return_frame_rx_napi(xdpf); |
1521 | } else { /* recycle pages of XDP_TX frames */ |
1522 | /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */ |
1523 | page_pool_put_page(pool: page->pp, page, dma_sync_size: 0, allow_direct: true); |
1524 | } |
1525 | |
1526 | txq->tx_buf[index].buf_p = NULL; |
1527 | /* restore default tx buffer type: FEC_TXBUF_T_SKB */ |
1528 | txq->tx_buf[index].type = FEC_TXBUF_T_SKB; |
1529 | |
1530 | tx_buf_done: |
1531 | /* Make sure the update to bdp and tx_buf are performed |
1532 | * before dirty_tx |
1533 | */ |
1534 | wmb(); |
1535 | txq->dirty_tx = bdp; |
1536 | |
1537 | /* Update pointer to next buffer descriptor to be transmitted */ |
1538 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
1539 | |
1540 | /* Since we have freed up a buffer, the ring is no longer full |
1541 | */ |
1542 | if (netif_tx_queue_stopped(dev_queue: nq)) { |
1543 | entries_free = fec_enet_get_free_txdesc_num(txq); |
1544 | if (entries_free >= txq->tx_wake_threshold) |
1545 | netif_tx_wake_queue(dev_queue: nq); |
1546 | } |
1547 | } |
1548 | |
1549 | /* ERR006358: Keep the transmitter going */ |
1550 | if (bdp != txq->bd.cur && |
1551 | readl(addr: txq->bd.reg_desc_active) == 0) |
1552 | writel(val: 0, addr: txq->bd.reg_desc_active); |
1553 | } |
1554 | |
1555 | static void fec_enet_tx(struct net_device *ndev, int budget) |
1556 | { |
1557 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1558 | int i; |
1559 | |
1560 | /* Make sure that AVB queues are processed first. */ |
1561 | for (i = fep->num_tx_queues - 1; i >= 0; i--) |
1562 | fec_enet_tx_queue(ndev, queue_id: i, budget); |
1563 | } |
1564 | |
1565 | static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, |
1566 | struct bufdesc *bdp, int index) |
1567 | { |
1568 | struct page *new_page; |
1569 | dma_addr_t phys_addr; |
1570 | |
1571 | new_page = page_pool_dev_alloc_pages(pool: rxq->page_pool); |
1572 | WARN_ON(!new_page); |
1573 | rxq->rx_skb_info[index].page = new_page; |
1574 | |
1575 | rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; |
1576 | phys_addr = page_pool_get_dma_addr(page: new_page) + FEC_ENET_XDP_HEADROOM; |
1577 | bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); |
1578 | } |
1579 | |
1580 | static u32 |
1581 | fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, |
1582 | struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu) |
1583 | { |
1584 | unsigned int sync, len = xdp->data_end - xdp->data; |
1585 | u32 ret = FEC_ENET_XDP_PASS; |
1586 | struct page *page; |
1587 | int err; |
1588 | u32 act; |
1589 | |
1590 | act = bpf_prog_run_xdp(prog, xdp); |
1591 | |
1592 | /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover |
1593 | * max len CPU touch |
1594 | */ |
1595 | sync = xdp->data_end - xdp->data; |
1596 | sync = max(sync, len); |
1597 | |
1598 | switch (act) { |
1599 | case XDP_PASS: |
1600 | rxq->stats[RX_XDP_PASS]++; |
1601 | ret = FEC_ENET_XDP_PASS; |
1602 | break; |
1603 | |
1604 | case XDP_REDIRECT: |
1605 | rxq->stats[RX_XDP_REDIRECT]++; |
1606 | err = xdp_do_redirect(dev: fep->netdev, xdp, prog); |
1607 | if (unlikely(err)) |
1608 | goto xdp_err; |
1609 | |
1610 | ret = FEC_ENET_XDP_REDIR; |
1611 | break; |
1612 | |
1613 | case XDP_TX: |
1614 | rxq->stats[RX_XDP_TX]++; |
1615 | err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, dma_sync_len: sync); |
1616 | if (unlikely(err)) { |
1617 | rxq->stats[RX_XDP_TX_ERRORS]++; |
1618 | goto xdp_err; |
1619 | } |
1620 | |
1621 | ret = FEC_ENET_XDP_TX; |
1622 | break; |
1623 | |
1624 | default: |
1625 | bpf_warn_invalid_xdp_action(dev: fep->netdev, prog, act); |
1626 | fallthrough; |
1627 | |
1628 | case XDP_ABORTED: |
1629 | fallthrough; /* handle aborts by dropping packet */ |
1630 | |
1631 | case XDP_DROP: |
1632 | rxq->stats[RX_XDP_DROP]++; |
1633 | xdp_err: |
1634 | ret = FEC_ENET_XDP_CONSUMED; |
1635 | page = virt_to_head_page(x: xdp->data); |
1636 | page_pool_put_page(pool: rxq->page_pool, page, dma_sync_size: sync, allow_direct: true); |
1637 | if (act != XDP_DROP) |
1638 | trace_xdp_exception(dev: fep->netdev, xdp: prog, act); |
1639 | break; |
1640 | } |
1641 | |
1642 | return ret; |
1643 | } |
1644 | |
1645 | /* During a receive, the bd_rx.cur points to the current incoming buffer. |
1646 | * When we update through the ring, if the next incoming buffer has |
1647 | * not been given to the system, we just set the empty indicator, |
1648 | * effectively tossing the packet. |
1649 | */ |
1650 | static int |
1651 | fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) |
1652 | { |
1653 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1654 | struct fec_enet_priv_rx_q *rxq; |
1655 | struct bufdesc *bdp; |
1656 | unsigned short status; |
1657 | struct sk_buff *skb; |
1658 | ushort pkt_len; |
1659 | __u8 *data; |
1660 | int pkt_received = 0; |
1661 | struct bufdesc_ex *ebdp = NULL; |
1662 | bool vlan_packet_rcvd = false; |
1663 | u16 vlan_tag; |
1664 | int index = 0; |
1665 | bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; |
1666 | struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); |
1667 | u32 ret, xdp_result = FEC_ENET_XDP_PASS; |
1668 | u32 data_start = FEC_ENET_XDP_HEADROOM; |
1669 | int cpu = smp_processor_id(); |
1670 | struct xdp_buff xdp; |
1671 | struct page *page; |
1672 | u32 sub_len = 4; |
1673 | |
1674 | #if !defined(CONFIG_M5272) |
1675 | /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of |
1676 | * FEC_RACC_SHIFT16 is set by default in the probe function. |
1677 | */ |
1678 | if (fep->quirks & FEC_QUIRK_HAS_RACC) { |
1679 | data_start += 2; |
1680 | sub_len += 2; |
1681 | } |
1682 | #endif |
1683 | |
1684 | #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) |
1685 | /* |
1686 | * Hacky flush of all caches instead of using the DMA API for the TSO |
1687 | * headers. |
1688 | */ |
1689 | flush_cache_all(); |
1690 | #endif |
1691 | rxq = fep->rx_queue[queue_id]; |
1692 | |
1693 | /* First, grab all of the stats for the incoming packet. |
1694 | * These get messed up if we get called due to a busy condition. |
1695 | */ |
1696 | bdp = rxq->bd.cur; |
1697 | xdp_init_buff(xdp: &xdp, PAGE_SIZE, rxq: &rxq->xdp_rxq); |
1698 | |
1699 | while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { |
1700 | |
1701 | if (pkt_received >= budget) |
1702 | break; |
1703 | pkt_received++; |
1704 | |
1705 | writel(FEC_ENET_RXF_GET(queue_id), addr: fep->hwp + FEC_IEVENT); |
1706 | |
1707 | /* Check for errors. */ |
1708 | status ^= BD_ENET_RX_LAST; |
1709 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | |
1710 | BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | |
1711 | BD_ENET_RX_CL)) { |
1712 | ndev->stats.rx_errors++; |
1713 | if (status & BD_ENET_RX_OV) { |
1714 | /* FIFO overrun */ |
1715 | ndev->stats.rx_fifo_errors++; |
1716 | goto rx_processing_done; |
1717 | } |
1718 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH |
1719 | | BD_ENET_RX_LAST)) { |
1720 | /* Frame too long or too short. */ |
1721 | ndev->stats.rx_length_errors++; |
1722 | if (status & BD_ENET_RX_LAST) |
1723 | netdev_err(dev: ndev, format: "rcv is not +last\n" ); |
1724 | } |
1725 | if (status & BD_ENET_RX_CR) /* CRC Error */ |
1726 | ndev->stats.rx_crc_errors++; |
1727 | /* Report late collisions as a frame error. */ |
1728 | if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) |
1729 | ndev->stats.rx_frame_errors++; |
1730 | goto rx_processing_done; |
1731 | } |
1732 | |
1733 | /* Process the incoming frame. */ |
1734 | ndev->stats.rx_packets++; |
1735 | pkt_len = fec16_to_cpu(bdp->cbd_datlen); |
1736 | ndev->stats.rx_bytes += pkt_len; |
1737 | |
1738 | index = fec_enet_get_bd_index(bdp, bd: &rxq->bd); |
1739 | page = rxq->rx_skb_info[index].page; |
1740 | dma_sync_single_for_cpu(dev: &fep->pdev->dev, |
1741 | fec32_to_cpu(bdp->cbd_bufaddr), |
1742 | size: pkt_len, |
1743 | dir: DMA_FROM_DEVICE); |
1744 | prefetch(page_address(page)); |
1745 | fec_enet_update_cbd(rxq, bdp, index); |
1746 | |
1747 | if (xdp_prog) { |
1748 | xdp_buff_clear_frags_flag(xdp: &xdp); |
1749 | /* subtract 16bit shift and FCS */ |
1750 | xdp_prepare_buff(xdp: &xdp, page_address(page), |
1751 | headroom: data_start, data_len: pkt_len - sub_len, meta_valid: false); |
1752 | ret = fec_enet_run_xdp(fep, prog: xdp_prog, xdp: &xdp, rxq, cpu); |
1753 | xdp_result |= ret; |
1754 | if (ret != FEC_ENET_XDP_PASS) |
1755 | goto rx_processing_done; |
1756 | } |
1757 | |
1758 | /* The packet length includes FCS, but we don't want to |
1759 | * include that when passing upstream as it messes up |
1760 | * bridging applications. |
1761 | */ |
1762 | skb = build_skb(page_address(page), PAGE_SIZE); |
1763 | if (unlikely(!skb)) { |
1764 | page_pool_recycle_direct(pool: rxq->page_pool, page); |
1765 | ndev->stats.rx_dropped++; |
1766 | |
1767 | netdev_err_once(ndev, "build_skb failed!\n" ); |
1768 | goto rx_processing_done; |
1769 | } |
1770 | |
1771 | skb_reserve(skb, len: data_start); |
1772 | skb_put(skb, len: pkt_len - sub_len); |
1773 | skb_mark_for_recycle(skb); |
1774 | |
1775 | if (unlikely(need_swap)) { |
1776 | data = page_address(page) + FEC_ENET_XDP_HEADROOM; |
1777 | swap_buffer(bufaddr: data, len: pkt_len); |
1778 | } |
1779 | data = skb->data; |
1780 | |
1781 | /* Extract the enhanced buffer descriptor */ |
1782 | ebdp = NULL; |
1783 | if (fep->bufdesc_ex) |
1784 | ebdp = (struct bufdesc_ex *)bdp; |
1785 | |
1786 | /* If this is a VLAN packet remove the VLAN Tag */ |
1787 | vlan_packet_rcvd = false; |
1788 | if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
1789 | fep->bufdesc_ex && |
1790 | (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { |
1791 | /* Push and remove the vlan tag */ |
1792 | struct vlan_hdr * = |
1793 | (struct vlan_hdr *) (data + ETH_HLEN); |
1794 | vlan_tag = ntohs(vlan_header->h_vlan_TCI); |
1795 | |
1796 | vlan_packet_rcvd = true; |
1797 | |
1798 | memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); |
1799 | skb_pull(skb, VLAN_HLEN); |
1800 | } |
1801 | |
1802 | skb->protocol = eth_type_trans(skb, dev: ndev); |
1803 | |
1804 | /* Get receive timestamp from the skb */ |
1805 | if (fep->hwts_rx_en && fep->bufdesc_ex) |
1806 | fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), |
1807 | hwtstamps: skb_hwtstamps(skb)); |
1808 | |
1809 | if (fep->bufdesc_ex && |
1810 | (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { |
1811 | if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { |
1812 | /* don't check it */ |
1813 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1814 | } else { |
1815 | skb_checksum_none_assert(skb); |
1816 | } |
1817 | } |
1818 | |
1819 | /* Handle received VLAN packets */ |
1820 | if (vlan_packet_rcvd) |
1821 | __vlan_hwaccel_put_tag(skb, |
1822 | htons(ETH_P_8021Q), |
1823 | vlan_tci: vlan_tag); |
1824 | |
1825 | skb_record_rx_queue(skb, rx_queue: queue_id); |
1826 | napi_gro_receive(napi: &fep->napi, skb); |
1827 | |
1828 | rx_processing_done: |
1829 | /* Clear the status flags for this buffer */ |
1830 | status &= ~BD_ENET_RX_STATS; |
1831 | |
1832 | /* Mark the buffer empty */ |
1833 | status |= BD_ENET_RX_EMPTY; |
1834 | |
1835 | if (fep->bufdesc_ex) { |
1836 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
1837 | |
1838 | ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); |
1839 | ebdp->cbd_prot = 0; |
1840 | ebdp->cbd_bdu = 0; |
1841 | } |
1842 | /* Make sure the updates to rest of the descriptor are |
1843 | * performed before transferring ownership. |
1844 | */ |
1845 | wmb(); |
1846 | bdp->cbd_sc = cpu_to_fec16(status); |
1847 | |
1848 | /* Update BD pointer to next entry */ |
1849 | bdp = fec_enet_get_nextdesc(bdp, bd: &rxq->bd); |
1850 | |
1851 | /* Doing this here will keep the FEC running while we process |
1852 | * incoming frames. On a heavily loaded network, we should be |
1853 | * able to keep up at the expense of system resources. |
1854 | */ |
1855 | writel(val: 0, addr: rxq->bd.reg_desc_active); |
1856 | } |
1857 | rxq->bd.cur = bdp; |
1858 | |
1859 | if (xdp_result & FEC_ENET_XDP_REDIR) |
1860 | xdp_do_flush(); |
1861 | |
1862 | return pkt_received; |
1863 | } |
1864 | |
1865 | static int fec_enet_rx(struct net_device *ndev, int budget) |
1866 | { |
1867 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1868 | int i, done = 0; |
1869 | |
1870 | /* Make sure that AVB queues are processed first. */ |
1871 | for (i = fep->num_rx_queues - 1; i >= 0; i--) |
1872 | done += fec_enet_rx_queue(ndev, budget: budget - done, queue_id: i); |
1873 | |
1874 | return done; |
1875 | } |
1876 | |
1877 | static bool fec_enet_collect_events(struct fec_enet_private *fep) |
1878 | { |
1879 | uint int_events; |
1880 | |
1881 | int_events = readl(addr: fep->hwp + FEC_IEVENT); |
1882 | |
1883 | /* Don't clear MDIO events, we poll for those */ |
1884 | int_events &= ~FEC_ENET_MII; |
1885 | |
1886 | writel(val: int_events, addr: fep->hwp + FEC_IEVENT); |
1887 | |
1888 | return int_events != 0; |
1889 | } |
1890 | |
1891 | static irqreturn_t |
1892 | fec_enet_interrupt(int irq, void *dev_id) |
1893 | { |
1894 | struct net_device *ndev = dev_id; |
1895 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1896 | irqreturn_t ret = IRQ_NONE; |
1897 | |
1898 | if (fec_enet_collect_events(fep) && fep->link) { |
1899 | ret = IRQ_HANDLED; |
1900 | |
1901 | if (napi_schedule_prep(n: &fep->napi)) { |
1902 | /* Disable interrupts */ |
1903 | writel(val: 0, addr: fep->hwp + FEC_IMASK); |
1904 | __napi_schedule(n: &fep->napi); |
1905 | } |
1906 | } |
1907 | |
1908 | return ret; |
1909 | } |
1910 | |
1911 | static int fec_enet_rx_napi(struct napi_struct *napi, int budget) |
1912 | { |
1913 | struct net_device *ndev = napi->dev; |
1914 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1915 | int done = 0; |
1916 | |
1917 | do { |
1918 | done += fec_enet_rx(ndev, budget: budget - done); |
1919 | fec_enet_tx(ndev, budget); |
1920 | } while ((done < budget) && fec_enet_collect_events(fep)); |
1921 | |
1922 | if (done < budget) { |
1923 | napi_complete_done(n: napi, work_done: done); |
1924 | writel(FEC_DEFAULT_IMASK, addr: fep->hwp + FEC_IMASK); |
1925 | } |
1926 | |
1927 | return done; |
1928 | } |
1929 | |
1930 | /* ------------------------------------------------------------------------- */ |
1931 | static int fec_get_mac(struct net_device *ndev) |
1932 | { |
1933 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
1934 | unsigned char *iap, tmpaddr[ETH_ALEN]; |
1935 | int ret; |
1936 | |
1937 | /* |
1938 | * try to get mac address in following order: |
1939 | * |
1940 | * 1) module parameter via kernel command line in form |
1941 | * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 |
1942 | */ |
1943 | iap = macaddr; |
1944 | |
1945 | /* |
1946 | * 2) from device tree data |
1947 | */ |
1948 | if (!is_valid_ether_addr(addr: iap)) { |
1949 | struct device_node *np = fep->pdev->dev.of_node; |
1950 | if (np) { |
1951 | ret = of_get_mac_address(np, mac: tmpaddr); |
1952 | if (!ret) |
1953 | iap = tmpaddr; |
1954 | else if (ret == -EPROBE_DEFER) |
1955 | return ret; |
1956 | } |
1957 | } |
1958 | |
1959 | /* |
1960 | * 3) from flash or fuse (via platform data) |
1961 | */ |
1962 | if (!is_valid_ether_addr(addr: iap)) { |
1963 | #ifdef CONFIG_M5272 |
1964 | if (FEC_FLASHMAC) |
1965 | iap = (unsigned char *)FEC_FLASHMAC; |
1966 | #else |
1967 | struct fec_platform_data *pdata = dev_get_platdata(dev: &fep->pdev->dev); |
1968 | |
1969 | if (pdata) |
1970 | iap = (unsigned char *)&pdata->mac; |
1971 | #endif |
1972 | } |
1973 | |
1974 | /* |
1975 | * 4) FEC mac registers set by bootloader |
1976 | */ |
1977 | if (!is_valid_ether_addr(addr: iap)) { |
1978 | *((__be32 *) &tmpaddr[0]) = |
1979 | cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); |
1980 | *((__be16 *) &tmpaddr[4]) = |
1981 | cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); |
1982 | iap = &tmpaddr[0]; |
1983 | } |
1984 | |
1985 | /* |
1986 | * 5) random mac address |
1987 | */ |
1988 | if (!is_valid_ether_addr(addr: iap)) { |
1989 | /* Report it and use a random ethernet address instead */ |
1990 | dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n" , iap); |
1991 | eth_hw_addr_random(dev: ndev); |
1992 | dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n" , |
1993 | ndev->dev_addr); |
1994 | return 0; |
1995 | } |
1996 | |
1997 | /* Adjust MAC if using macaddr */ |
1998 | eth_hw_addr_gen(dev: ndev, base_addr: iap, id: iap == macaddr ? fep->dev_id : 0); |
1999 | |
2000 | return 0; |
2001 | } |
2002 | |
2003 | /* ------------------------------------------------------------------------- */ |
2004 | |
2005 | /* |
2006 | * Phy section |
2007 | */ |
2008 | static void fec_enet_adjust_link(struct net_device *ndev) |
2009 | { |
2010 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2011 | struct phy_device *phy_dev = ndev->phydev; |
2012 | int status_change = 0; |
2013 | |
2014 | /* |
2015 | * If the netdev is down, or is going down, we're not interested |
2016 | * in link state events, so just mark our idea of the link as down |
2017 | * and ignore the event. |
2018 | */ |
2019 | if (!netif_running(dev: ndev) || !netif_device_present(dev: ndev)) { |
2020 | fep->link = 0; |
2021 | } else if (phy_dev->link) { |
2022 | if (!fep->link) { |
2023 | fep->link = phy_dev->link; |
2024 | status_change = 1; |
2025 | } |
2026 | |
2027 | if (fep->full_duplex != phy_dev->duplex) { |
2028 | fep->full_duplex = phy_dev->duplex; |
2029 | status_change = 1; |
2030 | } |
2031 | |
2032 | if (phy_dev->speed != fep->speed) { |
2033 | fep->speed = phy_dev->speed; |
2034 | status_change = 1; |
2035 | } |
2036 | |
2037 | /* if any of the above changed restart the FEC */ |
2038 | if (status_change) { |
2039 | napi_disable(n: &fep->napi); |
2040 | netif_tx_lock_bh(dev: ndev); |
2041 | fec_restart(ndev); |
2042 | netif_tx_wake_all_queues(dev: ndev); |
2043 | netif_tx_unlock_bh(dev: ndev); |
2044 | napi_enable(n: &fep->napi); |
2045 | } |
2046 | } else { |
2047 | if (fep->link) { |
2048 | napi_disable(n: &fep->napi); |
2049 | netif_tx_lock_bh(dev: ndev); |
2050 | fec_stop(ndev); |
2051 | netif_tx_unlock_bh(dev: ndev); |
2052 | napi_enable(n: &fep->napi); |
2053 | fep->link = phy_dev->link; |
2054 | status_change = 1; |
2055 | } |
2056 | } |
2057 | |
2058 | if (status_change) |
2059 | phy_print_status(phydev: phy_dev); |
2060 | } |
2061 | |
2062 | static int fec_enet_mdio_wait(struct fec_enet_private *fep) |
2063 | { |
2064 | uint ievent; |
2065 | int ret; |
2066 | |
2067 | ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, |
2068 | ievent & FEC_ENET_MII, 2, 30000); |
2069 | |
2070 | if (!ret) |
2071 | writel(FEC_ENET_MII, addr: fep->hwp + FEC_IEVENT); |
2072 | |
2073 | return ret; |
2074 | } |
2075 | |
2076 | static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) |
2077 | { |
2078 | struct fec_enet_private *fep = bus->priv; |
2079 | struct device *dev = &fep->pdev->dev; |
2080 | int ret = 0, frame_start, frame_addr, frame_op; |
2081 | |
2082 | ret = pm_runtime_resume_and_get(dev); |
2083 | if (ret < 0) |
2084 | return ret; |
2085 | |
2086 | /* C22 read */ |
2087 | frame_op = FEC_MMFR_OP_READ; |
2088 | frame_start = FEC_MMFR_ST; |
2089 | frame_addr = regnum; |
2090 | |
2091 | /* start a read op */ |
2092 | writel(val: frame_start | frame_op | |
2093 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | |
2094 | FEC_MMFR_TA, addr: fep->hwp + FEC_MII_DATA); |
2095 | |
2096 | /* wait for end of transfer */ |
2097 | ret = fec_enet_mdio_wait(fep); |
2098 | if (ret) { |
2099 | netdev_err(dev: fep->netdev, format: "MDIO read timeout\n" ); |
2100 | goto out; |
2101 | } |
2102 | |
2103 | ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); |
2104 | |
2105 | out: |
2106 | pm_runtime_mark_last_busy(dev); |
2107 | pm_runtime_put_autosuspend(dev); |
2108 | |
2109 | return ret; |
2110 | } |
2111 | |
2112 | static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, |
2113 | int devad, int regnum) |
2114 | { |
2115 | struct fec_enet_private *fep = bus->priv; |
2116 | struct device *dev = &fep->pdev->dev; |
2117 | int ret = 0, frame_start, frame_op; |
2118 | |
2119 | ret = pm_runtime_resume_and_get(dev); |
2120 | if (ret < 0) |
2121 | return ret; |
2122 | |
2123 | frame_start = FEC_MMFR_ST_C45; |
2124 | |
2125 | /* write address */ |
2126 | writel(val: frame_start | FEC_MMFR_OP_ADDR_WRITE | |
2127 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | |
2128 | FEC_MMFR_TA | (regnum & 0xFFFF), |
2129 | addr: fep->hwp + FEC_MII_DATA); |
2130 | |
2131 | /* wait for end of transfer */ |
2132 | ret = fec_enet_mdio_wait(fep); |
2133 | if (ret) { |
2134 | netdev_err(dev: fep->netdev, format: "MDIO address write timeout\n" ); |
2135 | goto out; |
2136 | } |
2137 | |
2138 | frame_op = FEC_MMFR_OP_READ_C45; |
2139 | |
2140 | /* start a read op */ |
2141 | writel(val: frame_start | frame_op | |
2142 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | |
2143 | FEC_MMFR_TA, addr: fep->hwp + FEC_MII_DATA); |
2144 | |
2145 | /* wait for end of transfer */ |
2146 | ret = fec_enet_mdio_wait(fep); |
2147 | if (ret) { |
2148 | netdev_err(dev: fep->netdev, format: "MDIO read timeout\n" ); |
2149 | goto out; |
2150 | } |
2151 | |
2152 | ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); |
2153 | |
2154 | out: |
2155 | pm_runtime_mark_last_busy(dev); |
2156 | pm_runtime_put_autosuspend(dev); |
2157 | |
2158 | return ret; |
2159 | } |
2160 | |
2161 | static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, |
2162 | u16 value) |
2163 | { |
2164 | struct fec_enet_private *fep = bus->priv; |
2165 | struct device *dev = &fep->pdev->dev; |
2166 | int ret, frame_start, frame_addr; |
2167 | |
2168 | ret = pm_runtime_resume_and_get(dev); |
2169 | if (ret < 0) |
2170 | return ret; |
2171 | |
2172 | /* C22 write */ |
2173 | frame_start = FEC_MMFR_ST; |
2174 | frame_addr = regnum; |
2175 | |
2176 | /* start a write op */ |
2177 | writel(val: frame_start | FEC_MMFR_OP_WRITE | |
2178 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | |
2179 | FEC_MMFR_TA | FEC_MMFR_DATA(value), |
2180 | addr: fep->hwp + FEC_MII_DATA); |
2181 | |
2182 | /* wait for end of transfer */ |
2183 | ret = fec_enet_mdio_wait(fep); |
2184 | if (ret) |
2185 | netdev_err(dev: fep->netdev, format: "MDIO write timeout\n" ); |
2186 | |
2187 | pm_runtime_mark_last_busy(dev); |
2188 | pm_runtime_put_autosuspend(dev); |
2189 | |
2190 | return ret; |
2191 | } |
2192 | |
2193 | static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, |
2194 | int devad, int regnum, u16 value) |
2195 | { |
2196 | struct fec_enet_private *fep = bus->priv; |
2197 | struct device *dev = &fep->pdev->dev; |
2198 | int ret, frame_start; |
2199 | |
2200 | ret = pm_runtime_resume_and_get(dev); |
2201 | if (ret < 0) |
2202 | return ret; |
2203 | |
2204 | frame_start = FEC_MMFR_ST_C45; |
2205 | |
2206 | /* write address */ |
2207 | writel(val: frame_start | FEC_MMFR_OP_ADDR_WRITE | |
2208 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | |
2209 | FEC_MMFR_TA | (regnum & 0xFFFF), |
2210 | addr: fep->hwp + FEC_MII_DATA); |
2211 | |
2212 | /* wait for end of transfer */ |
2213 | ret = fec_enet_mdio_wait(fep); |
2214 | if (ret) { |
2215 | netdev_err(dev: fep->netdev, format: "MDIO address write timeout\n" ); |
2216 | goto out; |
2217 | } |
2218 | |
2219 | /* start a write op */ |
2220 | writel(val: frame_start | FEC_MMFR_OP_WRITE | |
2221 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | |
2222 | FEC_MMFR_TA | FEC_MMFR_DATA(value), |
2223 | addr: fep->hwp + FEC_MII_DATA); |
2224 | |
2225 | /* wait for end of transfer */ |
2226 | ret = fec_enet_mdio_wait(fep); |
2227 | if (ret) |
2228 | netdev_err(dev: fep->netdev, format: "MDIO write timeout\n" ); |
2229 | |
2230 | out: |
2231 | pm_runtime_mark_last_busy(dev); |
2232 | pm_runtime_put_autosuspend(dev); |
2233 | |
2234 | return ret; |
2235 | } |
2236 | |
2237 | static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) |
2238 | { |
2239 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2240 | struct phy_device *phy_dev = ndev->phydev; |
2241 | |
2242 | if (phy_dev) { |
2243 | phy_reset_after_clk_enable(phydev: phy_dev); |
2244 | } else if (fep->phy_node) { |
2245 | /* |
2246 | * If the PHY still is not bound to the MAC, but there is |
2247 | * OF PHY node and a matching PHY device instance already, |
2248 | * use the OF PHY node to obtain the PHY device instance, |
2249 | * and then use that PHY device instance when triggering |
2250 | * the PHY reset. |
2251 | */ |
2252 | phy_dev = of_phy_find_device(phy_np: fep->phy_node); |
2253 | phy_reset_after_clk_enable(phydev: phy_dev); |
2254 | put_device(dev: &phy_dev->mdio.dev); |
2255 | } |
2256 | } |
2257 | |
2258 | static int fec_enet_clk_enable(struct net_device *ndev, bool enable) |
2259 | { |
2260 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2261 | int ret; |
2262 | |
2263 | if (enable) { |
2264 | ret = clk_prepare_enable(clk: fep->clk_enet_out); |
2265 | if (ret) |
2266 | return ret; |
2267 | |
2268 | if (fep->clk_ptp) { |
2269 | mutex_lock(&fep->ptp_clk_mutex); |
2270 | ret = clk_prepare_enable(clk: fep->clk_ptp); |
2271 | if (ret) { |
2272 | mutex_unlock(lock: &fep->ptp_clk_mutex); |
2273 | goto failed_clk_ptp; |
2274 | } else { |
2275 | fep->ptp_clk_on = true; |
2276 | } |
2277 | mutex_unlock(lock: &fep->ptp_clk_mutex); |
2278 | } |
2279 | |
2280 | ret = clk_prepare_enable(clk: fep->clk_ref); |
2281 | if (ret) |
2282 | goto failed_clk_ref; |
2283 | |
2284 | ret = clk_prepare_enable(clk: fep->clk_2x_txclk); |
2285 | if (ret) |
2286 | goto failed_clk_2x_txclk; |
2287 | |
2288 | fec_enet_phy_reset_after_clk_enable(ndev); |
2289 | } else { |
2290 | clk_disable_unprepare(clk: fep->clk_enet_out); |
2291 | if (fep->clk_ptp) { |
2292 | mutex_lock(&fep->ptp_clk_mutex); |
2293 | clk_disable_unprepare(clk: fep->clk_ptp); |
2294 | fep->ptp_clk_on = false; |
2295 | mutex_unlock(lock: &fep->ptp_clk_mutex); |
2296 | } |
2297 | clk_disable_unprepare(clk: fep->clk_ref); |
2298 | clk_disable_unprepare(clk: fep->clk_2x_txclk); |
2299 | } |
2300 | |
2301 | return 0; |
2302 | |
2303 | failed_clk_2x_txclk: |
2304 | if (fep->clk_ref) |
2305 | clk_disable_unprepare(clk: fep->clk_ref); |
2306 | failed_clk_ref: |
2307 | if (fep->clk_ptp) { |
2308 | mutex_lock(&fep->ptp_clk_mutex); |
2309 | clk_disable_unprepare(clk: fep->clk_ptp); |
2310 | fep->ptp_clk_on = false; |
2311 | mutex_unlock(lock: &fep->ptp_clk_mutex); |
2312 | } |
2313 | failed_clk_ptp: |
2314 | clk_disable_unprepare(clk: fep->clk_enet_out); |
2315 | |
2316 | return ret; |
2317 | } |
2318 | |
2319 | static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, |
2320 | struct device_node *np) |
2321 | { |
2322 | u32 rgmii_tx_delay, rgmii_rx_delay; |
2323 | |
2324 | /* For rgmii tx internal delay, valid values are 0ps and 2000ps */ |
2325 | if (!of_property_read_u32(np, propname: "tx-internal-delay-ps" , out_value: &rgmii_tx_delay)) { |
2326 | if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) { |
2327 | dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps" ); |
2328 | return -EINVAL; |
2329 | } else if (rgmii_tx_delay == 2000) { |
2330 | fep->rgmii_txc_dly = true; |
2331 | } |
2332 | } |
2333 | |
2334 | /* For rgmii rx internal delay, valid values are 0ps and 2000ps */ |
2335 | if (!of_property_read_u32(np, propname: "rx-internal-delay-ps" , out_value: &rgmii_rx_delay)) { |
2336 | if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) { |
2337 | dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps" ); |
2338 | return -EINVAL; |
2339 | } else if (rgmii_rx_delay == 2000) { |
2340 | fep->rgmii_rxc_dly = true; |
2341 | } |
2342 | } |
2343 | |
2344 | return 0; |
2345 | } |
2346 | |
2347 | static int fec_enet_mii_probe(struct net_device *ndev) |
2348 | { |
2349 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2350 | struct phy_device *phy_dev = NULL; |
2351 | char mdio_bus_id[MII_BUS_ID_SIZE]; |
2352 | char phy_name[MII_BUS_ID_SIZE + 3]; |
2353 | int phy_id; |
2354 | int dev_id = fep->dev_id; |
2355 | |
2356 | if (fep->phy_node) { |
2357 | phy_dev = of_phy_connect(dev: ndev, phy_np: fep->phy_node, |
2358 | hndlr: &fec_enet_adjust_link, flags: 0, |
2359 | iface: fep->phy_interface); |
2360 | if (!phy_dev) { |
2361 | netdev_err(dev: ndev, format: "Unable to connect to phy\n" ); |
2362 | return -ENODEV; |
2363 | } |
2364 | } else { |
2365 | /* check for attached phy */ |
2366 | for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { |
2367 | if (!mdiobus_is_registered_device(bus: fep->mii_bus, addr: phy_id)) |
2368 | continue; |
2369 | if (dev_id--) |
2370 | continue; |
2371 | strscpy(p: mdio_bus_id, q: fep->mii_bus->id, MII_BUS_ID_SIZE); |
2372 | break; |
2373 | } |
2374 | |
2375 | if (phy_id >= PHY_MAX_ADDR) { |
2376 | netdev_info(dev: ndev, format: "no PHY, assuming direct connection to switch\n" ); |
2377 | strscpy(p: mdio_bus_id, q: "fixed-0" , MII_BUS_ID_SIZE); |
2378 | phy_id = 0; |
2379 | } |
2380 | |
2381 | snprintf(buf: phy_name, size: sizeof(phy_name), |
2382 | PHY_ID_FMT, mdio_bus_id, phy_id); |
2383 | phy_dev = phy_connect(dev: ndev, bus_id: phy_name, handler: &fec_enet_adjust_link, |
2384 | interface: fep->phy_interface); |
2385 | } |
2386 | |
2387 | if (IS_ERR(ptr: phy_dev)) { |
2388 | netdev_err(dev: ndev, format: "could not attach to PHY\n" ); |
2389 | return PTR_ERR(ptr: phy_dev); |
2390 | } |
2391 | |
2392 | /* mask with MAC supported features */ |
2393 | if (fep->quirks & FEC_QUIRK_HAS_GBIT) { |
2394 | phy_set_max_speed(phydev: phy_dev, max_speed: 1000); |
2395 | phy_remove_link_mode(phydev: phy_dev, |
2396 | link_mode: ETHTOOL_LINK_MODE_1000baseT_Half_BIT); |
2397 | #if !defined(CONFIG_M5272) |
2398 | phy_support_sym_pause(phydev: phy_dev); |
2399 | #endif |
2400 | } |
2401 | else |
2402 | phy_set_max_speed(phydev: phy_dev, max_speed: 100); |
2403 | |
2404 | fep->link = 0; |
2405 | fep->full_duplex = 0; |
2406 | |
2407 | phy_dev->mac_managed_pm = true; |
2408 | |
2409 | phy_attached_info(phydev: phy_dev); |
2410 | |
2411 | return 0; |
2412 | } |
2413 | |
2414 | static int fec_enet_mii_init(struct platform_device *pdev) |
2415 | { |
2416 | static struct mii_bus *fec0_mii_bus; |
2417 | struct net_device *ndev = platform_get_drvdata(pdev); |
2418 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2419 | bool suppress_preamble = false; |
2420 | struct device_node *node; |
2421 | int err = -ENXIO; |
2422 | u32 mii_speed, holdtime; |
2423 | u32 bus_freq; |
2424 | |
2425 | /* |
2426 | * The i.MX28 dual fec interfaces are not equal. |
2427 | * Here are the differences: |
2428 | * |
2429 | * - fec0 supports MII & RMII modes while fec1 only supports RMII |
2430 | * - fec0 acts as the 1588 time master while fec1 is slave |
2431 | * - external phys can only be configured by fec0 |
2432 | * |
2433 | * That is to say fec1 can not work independently. It only works |
2434 | * when fec0 is working. The reason behind this design is that the |
2435 | * second interface is added primarily for Switch mode. |
2436 | * |
2437 | * Because of the last point above, both phys are attached on fec0 |
2438 | * mdio interface in board design, and need to be configured by |
2439 | * fec0 mii_bus. |
2440 | */ |
2441 | if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { |
2442 | /* fec1 uses fec0 mii_bus */ |
2443 | if (mii_cnt && fec0_mii_bus) { |
2444 | fep->mii_bus = fec0_mii_bus; |
2445 | mii_cnt++; |
2446 | return 0; |
2447 | } |
2448 | return -ENOENT; |
2449 | } |
2450 | |
2451 | bus_freq = 2500000; /* 2.5MHz by default */ |
2452 | node = of_get_child_by_name(node: pdev->dev.of_node, name: "mdio" ); |
2453 | if (node) { |
2454 | of_property_read_u32(np: node, propname: "clock-frequency" , out_value: &bus_freq); |
2455 | suppress_preamble = of_property_read_bool(np: node, |
2456 | propname: "suppress-preamble" ); |
2457 | } |
2458 | |
2459 | /* |
2460 | * Set MII speed (= clk_get_rate() / 2 * phy_speed) |
2461 | * |
2462 | * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while |
2463 | * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 |
2464 | * Reference Manual has an error on this, and gets fixed on i.MX6Q |
2465 | * document. |
2466 | */ |
2467 | mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); |
2468 | if (fep->quirks & FEC_QUIRK_ENET_MAC) |
2469 | mii_speed--; |
2470 | if (mii_speed > 63) { |
2471 | dev_err(&pdev->dev, |
2472 | "fec clock (%lu) too fast to get right mii speed\n" , |
2473 | clk_get_rate(fep->clk_ipg)); |
2474 | err = -EINVAL; |
2475 | goto err_out; |
2476 | } |
2477 | |
2478 | /* |
2479 | * The i.MX28 and i.MX6 types have another filed in the MSCR (aka |
2480 | * MII_SPEED) register that defines the MDIO output hold time. Earlier |
2481 | * versions are RAZ there, so just ignore the difference and write the |
2482 | * register always. |
2483 | * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. |
2484 | * HOLDTIME + 1 is the number of clk cycles the fec is holding the |
2485 | * output. |
2486 | * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). |
2487 | * Given that ceil(clkrate / 5000000) <= 64, the calculation for |
2488 | * holdtime cannot result in a value greater than 3. |
2489 | */ |
2490 | holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; |
2491 | |
2492 | fep->phy_speed = mii_speed << 1 | holdtime << 8; |
2493 | |
2494 | if (suppress_preamble) |
2495 | fep->phy_speed |= BIT(7); |
2496 | |
2497 | if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { |
2498 | /* Clear MMFR to avoid to generate MII event by writing MSCR. |
2499 | * MII event generation condition: |
2500 | * - writing MSCR: |
2501 | * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & |
2502 | * mscr_reg_data_in[7:0] != 0 |
2503 | * - writing MMFR: |
2504 | * - mscr[7:0]_not_zero |
2505 | */ |
2506 | writel(val: 0, addr: fep->hwp + FEC_MII_DATA); |
2507 | } |
2508 | |
2509 | writel(val: fep->phy_speed, addr: fep->hwp + FEC_MII_SPEED); |
2510 | |
2511 | /* Clear any pending transaction complete indication */ |
2512 | writel(FEC_ENET_MII, addr: fep->hwp + FEC_IEVENT); |
2513 | |
2514 | fep->mii_bus = mdiobus_alloc(); |
2515 | if (fep->mii_bus == NULL) { |
2516 | err = -ENOMEM; |
2517 | goto err_out; |
2518 | } |
2519 | |
2520 | fep->mii_bus->name = "fec_enet_mii_bus" ; |
2521 | fep->mii_bus->read = fec_enet_mdio_read_c22; |
2522 | fep->mii_bus->write = fec_enet_mdio_write_c22; |
2523 | if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) { |
2524 | fep->mii_bus->read_c45 = fec_enet_mdio_read_c45; |
2525 | fep->mii_bus->write_c45 = fec_enet_mdio_write_c45; |
2526 | } |
2527 | snprintf(buf: fep->mii_bus->id, MII_BUS_ID_SIZE, fmt: "%s-%x" , |
2528 | pdev->name, fep->dev_id + 1); |
2529 | fep->mii_bus->priv = fep; |
2530 | fep->mii_bus->parent = &pdev->dev; |
2531 | |
2532 | err = of_mdiobus_register(mdio: fep->mii_bus, np: node); |
2533 | if (err) |
2534 | goto err_out_free_mdiobus; |
2535 | of_node_put(node); |
2536 | |
2537 | mii_cnt++; |
2538 | |
2539 | /* save fec0 mii_bus */ |
2540 | if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) |
2541 | fec0_mii_bus = fep->mii_bus; |
2542 | |
2543 | return 0; |
2544 | |
2545 | err_out_free_mdiobus: |
2546 | mdiobus_free(bus: fep->mii_bus); |
2547 | err_out: |
2548 | of_node_put(node); |
2549 | return err; |
2550 | } |
2551 | |
2552 | static void fec_enet_mii_remove(struct fec_enet_private *fep) |
2553 | { |
2554 | if (--mii_cnt == 0) { |
2555 | mdiobus_unregister(bus: fep->mii_bus); |
2556 | mdiobus_free(bus: fep->mii_bus); |
2557 | } |
2558 | } |
2559 | |
2560 | static void fec_enet_get_drvinfo(struct net_device *ndev, |
2561 | struct ethtool_drvinfo *info) |
2562 | { |
2563 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2564 | |
2565 | strscpy(p: info->driver, q: fep->pdev->dev.driver->name, |
2566 | size: sizeof(info->driver)); |
2567 | strscpy(p: info->bus_info, q: dev_name(dev: &ndev->dev), size: sizeof(info->bus_info)); |
2568 | } |
2569 | |
2570 | static int fec_enet_get_regs_len(struct net_device *ndev) |
2571 | { |
2572 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2573 | struct resource *r; |
2574 | int s = 0; |
2575 | |
2576 | r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); |
2577 | if (r) |
2578 | s = resource_size(res: r); |
2579 | |
2580 | return s; |
2581 | } |
2582 | |
2583 | /* List of registers that can be safety be read to dump them with ethtool */ |
2584 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
2585 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ |
2586 | defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) |
2587 | static __u32 fec_enet_register_version = 2; |
2588 | static u32 fec_enet_register_offset[] = { |
2589 | FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, |
2590 | FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, |
2591 | FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, |
2592 | FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, |
2593 | FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, |
2594 | FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, |
2595 | FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, |
2596 | FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, |
2597 | FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, |
2598 | FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, |
2599 | FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, |
2600 | FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, |
2601 | RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, |
2602 | RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, |
2603 | RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, |
2604 | RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, |
2605 | RMON_T_P_GTE2048, RMON_T_OCTETS, |
2606 | IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, |
2607 | IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, |
2608 | IEEE_T_FDXFC, IEEE_T_OCTETS_OK, |
2609 | RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, |
2610 | RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, |
2611 | RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, |
2612 | RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, |
2613 | RMON_R_P_GTE2048, RMON_R_OCTETS, |
2614 | IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, |
2615 | IEEE_R_FDXFC, IEEE_R_OCTETS_OK |
2616 | }; |
2617 | /* for i.MX6ul */ |
2618 | static u32 fec_enet_register_offset_6ul[] = { |
2619 | FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, |
2620 | FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, |
2621 | FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, |
2622 | FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, |
2623 | FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, |
2624 | FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, |
2625 | FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, |
2626 | RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, |
2627 | RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, |
2628 | RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, |
2629 | RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, |
2630 | RMON_T_P_GTE2048, RMON_T_OCTETS, |
2631 | IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, |
2632 | IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, |
2633 | IEEE_T_FDXFC, IEEE_T_OCTETS_OK, |
2634 | RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, |
2635 | RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, |
2636 | RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, |
2637 | RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, |
2638 | RMON_R_P_GTE2048, RMON_R_OCTETS, |
2639 | IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, |
2640 | IEEE_R_FDXFC, IEEE_R_OCTETS_OK |
2641 | }; |
2642 | #else |
2643 | static __u32 fec_enet_register_version = 1; |
2644 | static u32 fec_enet_register_offset[] = { |
2645 | FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, |
2646 | FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, |
2647 | FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, |
2648 | FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, |
2649 | FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, |
2650 | FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, |
2651 | FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, |
2652 | FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, |
2653 | FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 |
2654 | }; |
2655 | #endif |
2656 | |
2657 | static void fec_enet_get_regs(struct net_device *ndev, |
2658 | struct ethtool_regs *regs, void *regbuf) |
2659 | { |
2660 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2661 | u32 __iomem *theregs = (u32 __iomem *)fep->hwp; |
2662 | struct device *dev = &fep->pdev->dev; |
2663 | u32 *buf = (u32 *)regbuf; |
2664 | u32 i, off; |
2665 | int ret; |
2666 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
2667 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ |
2668 | defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) |
2669 | u32 *reg_list; |
2670 | u32 reg_cnt; |
2671 | |
2672 | if (!of_machine_is_compatible(compat: "fsl,imx6ul" )) { |
2673 | reg_list = fec_enet_register_offset; |
2674 | reg_cnt = ARRAY_SIZE(fec_enet_register_offset); |
2675 | } else { |
2676 | reg_list = fec_enet_register_offset_6ul; |
2677 | reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); |
2678 | } |
2679 | #else |
2680 | /* coldfire */ |
2681 | static u32 *reg_list = fec_enet_register_offset; |
2682 | static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); |
2683 | #endif |
2684 | ret = pm_runtime_resume_and_get(dev); |
2685 | if (ret < 0) |
2686 | return; |
2687 | |
2688 | regs->version = fec_enet_register_version; |
2689 | |
2690 | memset(buf, 0, regs->len); |
2691 | |
2692 | for (i = 0; i < reg_cnt; i++) { |
2693 | off = reg_list[i]; |
2694 | |
2695 | if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && |
2696 | !(fep->quirks & FEC_QUIRK_HAS_FRREG)) |
2697 | continue; |
2698 | |
2699 | off >>= 2; |
2700 | buf[off] = readl(addr: &theregs[off]); |
2701 | } |
2702 | |
2703 | pm_runtime_mark_last_busy(dev); |
2704 | pm_runtime_put_autosuspend(dev); |
2705 | } |
2706 | |
2707 | static int fec_enet_get_ts_info(struct net_device *ndev, |
2708 | struct ethtool_ts_info *info) |
2709 | { |
2710 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2711 | |
2712 | if (fep->bufdesc_ex) { |
2713 | |
2714 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | |
2715 | SOF_TIMESTAMPING_RX_SOFTWARE | |
2716 | SOF_TIMESTAMPING_SOFTWARE | |
2717 | SOF_TIMESTAMPING_TX_HARDWARE | |
2718 | SOF_TIMESTAMPING_RX_HARDWARE | |
2719 | SOF_TIMESTAMPING_RAW_HARDWARE; |
2720 | if (fep->ptp_clock) |
2721 | info->phc_index = ptp_clock_index(ptp: fep->ptp_clock); |
2722 | else |
2723 | info->phc_index = -1; |
2724 | |
2725 | info->tx_types = (1 << HWTSTAMP_TX_OFF) | |
2726 | (1 << HWTSTAMP_TX_ON); |
2727 | |
2728 | info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | |
2729 | (1 << HWTSTAMP_FILTER_ALL); |
2730 | return 0; |
2731 | } else { |
2732 | return ethtool_op_get_ts_info(dev: ndev, eti: info); |
2733 | } |
2734 | } |
2735 | |
2736 | #if !defined(CONFIG_M5272) |
2737 | |
2738 | static void fec_enet_get_pauseparam(struct net_device *ndev, |
2739 | struct ethtool_pauseparam *pause) |
2740 | { |
2741 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2742 | |
2743 | pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; |
2744 | pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; |
2745 | pause->rx_pause = pause->tx_pause; |
2746 | } |
2747 | |
2748 | static int fec_enet_set_pauseparam(struct net_device *ndev, |
2749 | struct ethtool_pauseparam *pause) |
2750 | { |
2751 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
2752 | |
2753 | if (!ndev->phydev) |
2754 | return -ENODEV; |
2755 | |
2756 | if (pause->tx_pause != pause->rx_pause) { |
2757 | netdev_info(dev: ndev, |
2758 | format: "hardware only support enable/disable both tx and rx" ); |
2759 | return -EINVAL; |
2760 | } |
2761 | |
2762 | fep->pause_flag = 0; |
2763 | |
2764 | /* tx pause must be same as rx pause */ |
2765 | fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; |
2766 | fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; |
2767 | |
2768 | phy_set_sym_pause(phydev: ndev->phydev, rx: pause->rx_pause, tx: pause->tx_pause, |
2769 | autoneg: pause->autoneg); |
2770 | |
2771 | if (pause->autoneg) { |
2772 | if (netif_running(dev: ndev)) |
2773 | fec_stop(ndev); |
2774 | phy_start_aneg(phydev: ndev->phydev); |
2775 | } |
2776 | if (netif_running(dev: ndev)) { |
2777 | napi_disable(n: &fep->napi); |
2778 | netif_tx_lock_bh(dev: ndev); |
2779 | fec_restart(ndev); |
2780 | netif_tx_wake_all_queues(dev: ndev); |
2781 | netif_tx_unlock_bh(dev: ndev); |
2782 | napi_enable(n: &fep->napi); |
2783 | } |
2784 | |
2785 | return 0; |
2786 | } |
2787 | |
2788 | static const struct fec_stat { |
2789 | char name[ETH_GSTRING_LEN]; |
2790 | u16 offset; |
2791 | } fec_stats[] = { |
2792 | /* RMON TX */ |
2793 | { "tx_dropped" , RMON_T_DROP }, |
2794 | { "tx_packets" , RMON_T_PACKETS }, |
2795 | { "tx_broadcast" , RMON_T_BC_PKT }, |
2796 | { "tx_multicast" , RMON_T_MC_PKT }, |
2797 | { "tx_crc_errors" , RMON_T_CRC_ALIGN }, |
2798 | { "tx_undersize" , RMON_T_UNDERSIZE }, |
2799 | { "tx_oversize" , RMON_T_OVERSIZE }, |
2800 | { "tx_fragment" , RMON_T_FRAG }, |
2801 | { "tx_jabber" , RMON_T_JAB }, |
2802 | { "tx_collision" , RMON_T_COL }, |
2803 | { "tx_64byte" , RMON_T_P64 }, |
2804 | { "tx_65to127byte" , RMON_T_P65TO127 }, |
2805 | { "tx_128to255byte" , RMON_T_P128TO255 }, |
2806 | { "tx_256to511byte" , RMON_T_P256TO511 }, |
2807 | { "tx_512to1023byte" , RMON_T_P512TO1023 }, |
2808 | { "tx_1024to2047byte" , RMON_T_P1024TO2047 }, |
2809 | { "tx_GTE2048byte" , RMON_T_P_GTE2048 }, |
2810 | { "tx_octets" , RMON_T_OCTETS }, |
2811 | |
2812 | /* IEEE TX */ |
2813 | { "IEEE_tx_drop" , IEEE_T_DROP }, |
2814 | { "IEEE_tx_frame_ok" , IEEE_T_FRAME_OK }, |
2815 | { "IEEE_tx_1col" , IEEE_T_1COL }, |
2816 | { "IEEE_tx_mcol" , IEEE_T_MCOL }, |
2817 | { "IEEE_tx_def" , IEEE_T_DEF }, |
2818 | { "IEEE_tx_lcol" , IEEE_T_LCOL }, |
2819 | { "IEEE_tx_excol" , IEEE_T_EXCOL }, |
2820 | { "IEEE_tx_macerr" , IEEE_T_MACERR }, |
2821 | { "IEEE_tx_cserr" , IEEE_T_CSERR }, |
2822 | { "IEEE_tx_sqe" , IEEE_T_SQE }, |
2823 | { "IEEE_tx_fdxfc" , IEEE_T_FDXFC }, |
2824 | { "IEEE_tx_octets_ok" , IEEE_T_OCTETS_OK }, |
2825 | |
2826 | /* RMON RX */ |
2827 | { "rx_packets" , RMON_R_PACKETS }, |
2828 | { "rx_broadcast" , RMON_R_BC_PKT }, |
2829 | { "rx_multicast" , RMON_R_MC_PKT }, |
2830 | { "rx_crc_errors" , RMON_R_CRC_ALIGN }, |
2831 | { "rx_undersize" , RMON_R_UNDERSIZE }, |
2832 | { "rx_oversize" , RMON_R_OVERSIZE }, |
2833 | { "rx_fragment" , RMON_R_FRAG }, |
2834 | { "rx_jabber" , RMON_R_JAB }, |
2835 | { "rx_64byte" , RMON_R_P64 }, |
2836 | { "rx_65to127byte" , RMON_R_P65TO127 }, |
2837 | { "rx_128to255byte" , RMON_R_P128TO255 }, |
2838 | { "rx_256to511byte" , RMON_R_P256TO511 }, |
2839 | { "rx_512to1023byte" , RMON_R_P512TO1023 }, |
2840 | { "rx_1024to2047byte" , RMON_R_P1024TO2047 }, |
2841 | { "rx_GTE2048byte" , RMON_R_P_GTE2048 }, |
2842 | { "rx_octets" , RMON_R_OCTETS }, |
2843 | |
2844 | /* IEEE RX */ |
2845 | { "IEEE_rx_drop" , IEEE_R_DROP }, |
2846 | { "IEEE_rx_frame_ok" , IEEE_R_FRAME_OK }, |
2847 | { "IEEE_rx_crc" , IEEE_R_CRC }, |
2848 | { "IEEE_rx_align" , IEEE_R_ALIGN }, |
2849 | { "IEEE_rx_macerr" , IEEE_R_MACERR }, |
2850 | { "IEEE_rx_fdxfc" , IEEE_R_FDXFC }, |
2851 | { "IEEE_rx_octets_ok" , IEEE_R_OCTETS_OK }, |
2852 | }; |
2853 | |
2854 | #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) |
2855 | |
2856 | static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { |
2857 | "rx_xdp_redirect" , /* RX_XDP_REDIRECT = 0, */ |
2858 | "rx_xdp_pass" , /* RX_XDP_PASS, */ |
2859 | "rx_xdp_drop" , /* RX_XDP_DROP, */ |
2860 | "rx_xdp_tx" , /* RX_XDP_TX, */ |
2861 | "rx_xdp_tx_errors" , /* RX_XDP_TX_ERRORS, */ |
2862 | "tx_xdp_xmit" , /* TX_XDP_XMIT, */ |
2863 | "tx_xdp_xmit_errors" , /* TX_XDP_XMIT_ERRORS, */ |
2864 | }; |
2865 | |
2866 | static void fec_enet_update_ethtool_stats(struct net_device *dev) |
2867 | { |
2868 | struct fec_enet_private *fep = netdev_priv(dev); |
2869 | int i; |
2870 | |
2871 | for (i = 0; i < ARRAY_SIZE(fec_stats); i++) |
2872 | fep->ethtool_stats[i] = readl(addr: fep->hwp + fec_stats[i].offset); |
2873 | } |
2874 | |
2875 | static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) |
2876 | { |
2877 | u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; |
2878 | struct fec_enet_priv_rx_q *rxq; |
2879 | int i, j; |
2880 | |
2881 | for (i = fep->num_rx_queues - 1; i >= 0; i--) { |
2882 | rxq = fep->rx_queue[i]; |
2883 | |
2884 | for (j = 0; j < XDP_STATS_TOTAL; j++) |
2885 | xdp_stats[j] += rxq->stats[j]; |
2886 | } |
2887 | |
2888 | memcpy(data, xdp_stats, sizeof(xdp_stats)); |
2889 | } |
2890 | |
2891 | static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) |
2892 | { |
2893 | #ifdef CONFIG_PAGE_POOL_STATS |
2894 | struct page_pool_stats stats = {}; |
2895 | struct fec_enet_priv_rx_q *rxq; |
2896 | int i; |
2897 | |
2898 | for (i = fep->num_rx_queues - 1; i >= 0; i--) { |
2899 | rxq = fep->rx_queue[i]; |
2900 | |
2901 | if (!rxq->page_pool) |
2902 | continue; |
2903 | |
2904 | page_pool_get_stats(pool: rxq->page_pool, stats: &stats); |
2905 | } |
2906 | |
2907 | page_pool_ethtool_stats_get(data, stats: &stats); |
2908 | #endif |
2909 | } |
2910 | |
2911 | static void fec_enet_get_ethtool_stats(struct net_device *dev, |
2912 | struct ethtool_stats *stats, u64 *data) |
2913 | { |
2914 | struct fec_enet_private *fep = netdev_priv(dev); |
2915 | |
2916 | if (netif_running(dev)) |
2917 | fec_enet_update_ethtool_stats(dev); |
2918 | |
2919 | memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); |
2920 | data += FEC_STATS_SIZE / sizeof(u64); |
2921 | |
2922 | fec_enet_get_xdp_stats(fep, data); |
2923 | data += XDP_STATS_TOTAL; |
2924 | |
2925 | fec_enet_page_pool_stats(fep, data); |
2926 | } |
2927 | |
2928 | static void fec_enet_get_strings(struct net_device *netdev, |
2929 | u32 stringset, u8 *data) |
2930 | { |
2931 | int i; |
2932 | switch (stringset) { |
2933 | case ETH_SS_STATS: |
2934 | for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { |
2935 | ethtool_sprintf(data: &data, fmt: "%s" , fec_stats[i].name); |
2936 | } |
2937 | for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { |
2938 | ethtool_sprintf(data: &data, fmt: "%s" , fec_xdp_stat_strs[i]); |
2939 | } |
2940 | page_pool_ethtool_stats_get_strings(data); |
2941 | |
2942 | break; |
2943 | case ETH_SS_TEST: |
2944 | net_selftest_get_strings(data); |
2945 | break; |
2946 | } |
2947 | } |
2948 | |
2949 | static int fec_enet_get_sset_count(struct net_device *dev, int sset) |
2950 | { |
2951 | int count; |
2952 | |
2953 | switch (sset) { |
2954 | case ETH_SS_STATS: |
2955 | count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL; |
2956 | count += page_pool_ethtool_stats_get_count(); |
2957 | return count; |
2958 | |
2959 | case ETH_SS_TEST: |
2960 | return net_selftest_get_count(); |
2961 | default: |
2962 | return -EOPNOTSUPP; |
2963 | } |
2964 | } |
2965 | |
2966 | static void fec_enet_clear_ethtool_stats(struct net_device *dev) |
2967 | { |
2968 | struct fec_enet_private *fep = netdev_priv(dev); |
2969 | struct fec_enet_priv_rx_q *rxq; |
2970 | int i, j; |
2971 | |
2972 | /* Disable MIB statistics counters */ |
2973 | writel(FEC_MIB_CTRLSTAT_DISABLE, addr: fep->hwp + FEC_MIB_CTRLSTAT); |
2974 | |
2975 | for (i = 0; i < ARRAY_SIZE(fec_stats); i++) |
2976 | writel(val: 0, addr: fep->hwp + fec_stats[i].offset); |
2977 | |
2978 | for (i = fep->num_rx_queues - 1; i >= 0; i--) { |
2979 | rxq = fep->rx_queue[i]; |
2980 | for (j = 0; j < XDP_STATS_TOTAL; j++) |
2981 | rxq->stats[j] = 0; |
2982 | } |
2983 | |
2984 | /* Don't disable MIB statistics counters */ |
2985 | writel(val: 0, addr: fep->hwp + FEC_MIB_CTRLSTAT); |
2986 | } |
2987 | |
2988 | #else /* !defined(CONFIG_M5272) */ |
2989 | #define FEC_STATS_SIZE 0 |
2990 | static inline void fec_enet_update_ethtool_stats(struct net_device *dev) |
2991 | { |
2992 | } |
2993 | |
2994 | static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) |
2995 | { |
2996 | } |
2997 | #endif /* !defined(CONFIG_M5272) */ |
2998 | |
2999 | /* ITR clock source is enet system clock (clk_ahb). |
3000 | * TCTT unit is cycle_ns * 64 cycle |
3001 | * So, the ICTT value = X us / (cycle_ns * 64) |
3002 | */ |
3003 | static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) |
3004 | { |
3005 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3006 | |
3007 | return us * (fep->itr_clk_rate / 64000) / 1000; |
3008 | } |
3009 | |
3010 | /* Set threshold for interrupt coalescing */ |
3011 | static void fec_enet_itr_coal_set(struct net_device *ndev) |
3012 | { |
3013 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3014 | int rx_itr, tx_itr; |
3015 | |
3016 | /* Must be greater than zero to avoid unpredictable behavior */ |
3017 | if (!fep->rx_time_itr || !fep->rx_pkts_itr || |
3018 | !fep->tx_time_itr || !fep->tx_pkts_itr) |
3019 | return; |
3020 | |
3021 | /* Select enet system clock as Interrupt Coalescing |
3022 | * timer Clock Source |
3023 | */ |
3024 | rx_itr = FEC_ITR_CLK_SEL; |
3025 | tx_itr = FEC_ITR_CLK_SEL; |
3026 | |
3027 | /* set ICFT and ICTT */ |
3028 | rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); |
3029 | rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); |
3030 | tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); |
3031 | tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); |
3032 | |
3033 | rx_itr |= FEC_ITR_EN; |
3034 | tx_itr |= FEC_ITR_EN; |
3035 | |
3036 | writel(val: tx_itr, addr: fep->hwp + FEC_TXIC0); |
3037 | writel(val: rx_itr, addr: fep->hwp + FEC_RXIC0); |
3038 | if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { |
3039 | writel(val: tx_itr, addr: fep->hwp + FEC_TXIC1); |
3040 | writel(val: rx_itr, addr: fep->hwp + FEC_RXIC1); |
3041 | writel(val: tx_itr, addr: fep->hwp + FEC_TXIC2); |
3042 | writel(val: rx_itr, addr: fep->hwp + FEC_RXIC2); |
3043 | } |
3044 | } |
3045 | |
3046 | static int fec_enet_get_coalesce(struct net_device *ndev, |
3047 | struct ethtool_coalesce *ec, |
3048 | struct kernel_ethtool_coalesce *kernel_coal, |
3049 | struct netlink_ext_ack *extack) |
3050 | { |
3051 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3052 | |
3053 | if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) |
3054 | return -EOPNOTSUPP; |
3055 | |
3056 | ec->rx_coalesce_usecs = fep->rx_time_itr; |
3057 | ec->rx_max_coalesced_frames = fep->rx_pkts_itr; |
3058 | |
3059 | ec->tx_coalesce_usecs = fep->tx_time_itr; |
3060 | ec->tx_max_coalesced_frames = fep->tx_pkts_itr; |
3061 | |
3062 | return 0; |
3063 | } |
3064 | |
3065 | static int fec_enet_set_coalesce(struct net_device *ndev, |
3066 | struct ethtool_coalesce *ec, |
3067 | struct kernel_ethtool_coalesce *kernel_coal, |
3068 | struct netlink_ext_ack *extack) |
3069 | { |
3070 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3071 | struct device *dev = &fep->pdev->dev; |
3072 | unsigned int cycle; |
3073 | |
3074 | if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) |
3075 | return -EOPNOTSUPP; |
3076 | |
3077 | if (ec->rx_max_coalesced_frames > 255) { |
3078 | dev_err(dev, "Rx coalesced frames exceed hardware limitation\n" ); |
3079 | return -EINVAL; |
3080 | } |
3081 | |
3082 | if (ec->tx_max_coalesced_frames > 255) { |
3083 | dev_err(dev, "Tx coalesced frame exceed hardware limitation\n" ); |
3084 | return -EINVAL; |
3085 | } |
3086 | |
3087 | cycle = fec_enet_us_to_itr_clock(ndev, us: ec->rx_coalesce_usecs); |
3088 | if (cycle > 0xFFFF) { |
3089 | dev_err(dev, "Rx coalesced usec exceed hardware limitation\n" ); |
3090 | return -EINVAL; |
3091 | } |
3092 | |
3093 | cycle = fec_enet_us_to_itr_clock(ndev, us: ec->tx_coalesce_usecs); |
3094 | if (cycle > 0xFFFF) { |
3095 | dev_err(dev, "Tx coalesced usec exceed hardware limitation\n" ); |
3096 | return -EINVAL; |
3097 | } |
3098 | |
3099 | fep->rx_time_itr = ec->rx_coalesce_usecs; |
3100 | fep->rx_pkts_itr = ec->rx_max_coalesced_frames; |
3101 | |
3102 | fep->tx_time_itr = ec->tx_coalesce_usecs; |
3103 | fep->tx_pkts_itr = ec->tx_max_coalesced_frames; |
3104 | |
3105 | fec_enet_itr_coal_set(ndev); |
3106 | |
3107 | return 0; |
3108 | } |
3109 | |
3110 | /* LPI Sleep Ts count base on tx clk (clk_ref). |
3111 | * The lpi sleep cnt value = X us / (cycle_ns). |
3112 | */ |
3113 | static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) |
3114 | { |
3115 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3116 | |
3117 | return us * (fep->clk_ref_rate / 1000) / 1000; |
3118 | } |
3119 | |
3120 | static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) |
3121 | { |
3122 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3123 | struct ethtool_eee *p = &fep->eee; |
3124 | unsigned int sleep_cycle, wake_cycle; |
3125 | int ret = 0; |
3126 | |
3127 | if (enable) { |
3128 | ret = phy_init_eee(phydev: ndev->phydev, clk_stop_enable: false); |
3129 | if (ret) |
3130 | return ret; |
3131 | |
3132 | sleep_cycle = fec_enet_us_to_tx_cycle(ndev, us: p->tx_lpi_timer); |
3133 | wake_cycle = sleep_cycle; |
3134 | } else { |
3135 | sleep_cycle = 0; |
3136 | wake_cycle = 0; |
3137 | } |
3138 | |
3139 | p->tx_lpi_enabled = enable; |
3140 | p->eee_enabled = enable; |
3141 | p->eee_active = enable; |
3142 | |
3143 | writel(val: sleep_cycle, addr: fep->hwp + FEC_LPI_SLEEP); |
3144 | writel(val: wake_cycle, addr: fep->hwp + FEC_LPI_WAKE); |
3145 | |
3146 | return 0; |
3147 | } |
3148 | |
3149 | static int |
3150 | fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata) |
3151 | { |
3152 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3153 | struct ethtool_eee *p = &fep->eee; |
3154 | |
3155 | if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) |
3156 | return -EOPNOTSUPP; |
3157 | |
3158 | if (!netif_running(dev: ndev)) |
3159 | return -ENETDOWN; |
3160 | |
3161 | edata->eee_enabled = p->eee_enabled; |
3162 | edata->eee_active = p->eee_active; |
3163 | edata->tx_lpi_timer = p->tx_lpi_timer; |
3164 | edata->tx_lpi_enabled = p->tx_lpi_enabled; |
3165 | |
3166 | return phy_ethtool_get_eee(phydev: ndev->phydev, data: edata); |
3167 | } |
3168 | |
3169 | static int |
3170 | fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata) |
3171 | { |
3172 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3173 | struct ethtool_eee *p = &fep->eee; |
3174 | int ret = 0; |
3175 | |
3176 | if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) |
3177 | return -EOPNOTSUPP; |
3178 | |
3179 | if (!netif_running(dev: ndev)) |
3180 | return -ENETDOWN; |
3181 | |
3182 | p->tx_lpi_timer = edata->tx_lpi_timer; |
3183 | |
3184 | if (!edata->eee_enabled || !edata->tx_lpi_enabled || |
3185 | !edata->tx_lpi_timer) |
3186 | ret = fec_enet_eee_mode_set(ndev, enable: false); |
3187 | else |
3188 | ret = fec_enet_eee_mode_set(ndev, enable: true); |
3189 | |
3190 | if (ret) |
3191 | return ret; |
3192 | |
3193 | return phy_ethtool_set_eee(phydev: ndev->phydev, data: edata); |
3194 | } |
3195 | |
3196 | static void |
3197 | fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) |
3198 | { |
3199 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3200 | |
3201 | if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { |
3202 | wol->supported = WAKE_MAGIC; |
3203 | wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; |
3204 | } else { |
3205 | wol->supported = wol->wolopts = 0; |
3206 | } |
3207 | } |
3208 | |
3209 | static int |
3210 | fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) |
3211 | { |
3212 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3213 | |
3214 | if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) |
3215 | return -EINVAL; |
3216 | |
3217 | if (wol->wolopts & ~WAKE_MAGIC) |
3218 | return -EINVAL; |
3219 | |
3220 | device_set_wakeup_enable(dev: &ndev->dev, enable: wol->wolopts & WAKE_MAGIC); |
3221 | if (device_may_wakeup(dev: &ndev->dev)) |
3222 | fep->wol_flag |= FEC_WOL_FLAG_ENABLE; |
3223 | else |
3224 | fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); |
3225 | |
3226 | return 0; |
3227 | } |
3228 | |
3229 | static const struct ethtool_ops fec_enet_ethtool_ops = { |
3230 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
3231 | ETHTOOL_COALESCE_MAX_FRAMES, |
3232 | .get_drvinfo = fec_enet_get_drvinfo, |
3233 | .get_regs_len = fec_enet_get_regs_len, |
3234 | .get_regs = fec_enet_get_regs, |
3235 | .nway_reset = phy_ethtool_nway_reset, |
3236 | .get_link = ethtool_op_get_link, |
3237 | .get_coalesce = fec_enet_get_coalesce, |
3238 | .set_coalesce = fec_enet_set_coalesce, |
3239 | #ifndef CONFIG_M5272 |
3240 | .get_pauseparam = fec_enet_get_pauseparam, |
3241 | .set_pauseparam = fec_enet_set_pauseparam, |
3242 | .get_strings = fec_enet_get_strings, |
3243 | .get_ethtool_stats = fec_enet_get_ethtool_stats, |
3244 | .get_sset_count = fec_enet_get_sset_count, |
3245 | #endif |
3246 | .get_ts_info = fec_enet_get_ts_info, |
3247 | .get_wol = fec_enet_get_wol, |
3248 | .set_wol = fec_enet_set_wol, |
3249 | .get_eee = fec_enet_get_eee, |
3250 | .set_eee = fec_enet_set_eee, |
3251 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
3252 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
3253 | .self_test = net_selftest, |
3254 | }; |
3255 | |
3256 | static void fec_enet_free_buffers(struct net_device *ndev) |
3257 | { |
3258 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3259 | unsigned int i; |
3260 | struct fec_enet_priv_tx_q *txq; |
3261 | struct fec_enet_priv_rx_q *rxq; |
3262 | unsigned int q; |
3263 | |
3264 | for (q = 0; q < fep->num_rx_queues; q++) { |
3265 | rxq = fep->rx_queue[q]; |
3266 | for (i = 0; i < rxq->bd.ring_size; i++) |
3267 | page_pool_put_full_page(pool: rxq->page_pool, page: rxq->rx_skb_info[i].page, allow_direct: false); |
3268 | |
3269 | for (i = 0; i < XDP_STATS_TOTAL; i++) |
3270 | rxq->stats[i] = 0; |
3271 | |
3272 | if (xdp_rxq_info_is_reg(xdp_rxq: &rxq->xdp_rxq)) |
3273 | xdp_rxq_info_unreg(xdp_rxq: &rxq->xdp_rxq); |
3274 | page_pool_destroy(pool: rxq->page_pool); |
3275 | rxq->page_pool = NULL; |
3276 | } |
3277 | |
3278 | for (q = 0; q < fep->num_tx_queues; q++) { |
3279 | txq = fep->tx_queue[q]; |
3280 | for (i = 0; i < txq->bd.ring_size; i++) { |
3281 | kfree(objp: txq->tx_bounce[i]); |
3282 | txq->tx_bounce[i] = NULL; |
3283 | |
3284 | if (!txq->tx_buf[i].buf_p) { |
3285 | txq->tx_buf[i].type = FEC_TXBUF_T_SKB; |
3286 | continue; |
3287 | } |
3288 | |
3289 | if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { |
3290 | dev_kfree_skb(txq->tx_buf[i].buf_p); |
3291 | } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) { |
3292 | xdp_return_frame(xdpf: txq->tx_buf[i].buf_p); |
3293 | } else { |
3294 | struct page *page = txq->tx_buf[i].buf_p; |
3295 | |
3296 | page_pool_put_page(pool: page->pp, page, dma_sync_size: 0, allow_direct: false); |
3297 | } |
3298 | |
3299 | txq->tx_buf[i].buf_p = NULL; |
3300 | txq->tx_buf[i].type = FEC_TXBUF_T_SKB; |
3301 | } |
3302 | } |
3303 | } |
3304 | |
3305 | static void fec_enet_free_queue(struct net_device *ndev) |
3306 | { |
3307 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3308 | int i; |
3309 | struct fec_enet_priv_tx_q *txq; |
3310 | |
3311 | for (i = 0; i < fep->num_tx_queues; i++) |
3312 | if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { |
3313 | txq = fep->tx_queue[i]; |
3314 | fec_dma_free(dev: &fep->pdev->dev, |
3315 | size: txq->bd.ring_size * TSO_HEADER_SIZE, |
3316 | cpu_addr: txq->tso_hdrs, handle: txq->tso_hdrs_dma); |
3317 | } |
3318 | |
3319 | for (i = 0; i < fep->num_rx_queues; i++) |
3320 | kfree(objp: fep->rx_queue[i]); |
3321 | for (i = 0; i < fep->num_tx_queues; i++) |
3322 | kfree(objp: fep->tx_queue[i]); |
3323 | } |
3324 | |
3325 | static int fec_enet_alloc_queue(struct net_device *ndev) |
3326 | { |
3327 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3328 | int i; |
3329 | int ret = 0; |
3330 | struct fec_enet_priv_tx_q *txq; |
3331 | |
3332 | for (i = 0; i < fep->num_tx_queues; i++) { |
3333 | txq = kzalloc(size: sizeof(*txq), GFP_KERNEL); |
3334 | if (!txq) { |
3335 | ret = -ENOMEM; |
3336 | goto alloc_failed; |
3337 | } |
3338 | |
3339 | fep->tx_queue[i] = txq; |
3340 | txq->bd.ring_size = TX_RING_SIZE; |
3341 | fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; |
3342 | |
3343 | txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; |
3344 | txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; |
3345 | |
3346 | txq->tso_hdrs = fec_dma_alloc(dev: &fep->pdev->dev, |
3347 | size: txq->bd.ring_size * TSO_HEADER_SIZE, |
3348 | handle: &txq->tso_hdrs_dma, GFP_KERNEL); |
3349 | if (!txq->tso_hdrs) { |
3350 | ret = -ENOMEM; |
3351 | goto alloc_failed; |
3352 | } |
3353 | } |
3354 | |
3355 | for (i = 0; i < fep->num_rx_queues; i++) { |
3356 | fep->rx_queue[i] = kzalloc(size: sizeof(*fep->rx_queue[i]), |
3357 | GFP_KERNEL); |
3358 | if (!fep->rx_queue[i]) { |
3359 | ret = -ENOMEM; |
3360 | goto alloc_failed; |
3361 | } |
3362 | |
3363 | fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; |
3364 | fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; |
3365 | } |
3366 | return ret; |
3367 | |
3368 | alloc_failed: |
3369 | fec_enet_free_queue(ndev); |
3370 | return ret; |
3371 | } |
3372 | |
3373 | static int |
3374 | fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) |
3375 | { |
3376 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3377 | struct fec_enet_priv_rx_q *rxq; |
3378 | dma_addr_t phys_addr; |
3379 | struct bufdesc *bdp; |
3380 | struct page *page; |
3381 | int i, err; |
3382 | |
3383 | rxq = fep->rx_queue[queue]; |
3384 | bdp = rxq->bd.base; |
3385 | |
3386 | err = fec_enet_create_page_pool(fep, rxq, size: rxq->bd.ring_size); |
3387 | if (err < 0) { |
3388 | netdev_err(dev: ndev, format: "%s failed queue %d (%d)\n" , __func__, queue, err); |
3389 | return err; |
3390 | } |
3391 | |
3392 | for (i = 0; i < rxq->bd.ring_size; i++) { |
3393 | page = page_pool_dev_alloc_pages(pool: rxq->page_pool); |
3394 | if (!page) |
3395 | goto err_alloc; |
3396 | |
3397 | phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; |
3398 | bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); |
3399 | |
3400 | rxq->rx_skb_info[i].page = page; |
3401 | rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; |
3402 | bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); |
3403 | |
3404 | if (fep->bufdesc_ex) { |
3405 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
3406 | ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); |
3407 | } |
3408 | |
3409 | bdp = fec_enet_get_nextdesc(bdp, bd: &rxq->bd); |
3410 | } |
3411 | |
3412 | /* Set the last buffer to wrap. */ |
3413 | bdp = fec_enet_get_prevdesc(bdp, bd: &rxq->bd); |
3414 | bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); |
3415 | return 0; |
3416 | |
3417 | err_alloc: |
3418 | fec_enet_free_buffers(ndev); |
3419 | return -ENOMEM; |
3420 | } |
3421 | |
3422 | static int |
3423 | fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) |
3424 | { |
3425 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3426 | unsigned int i; |
3427 | struct bufdesc *bdp; |
3428 | struct fec_enet_priv_tx_q *txq; |
3429 | |
3430 | txq = fep->tx_queue[queue]; |
3431 | bdp = txq->bd.base; |
3432 | for (i = 0; i < txq->bd.ring_size; i++) { |
3433 | txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); |
3434 | if (!txq->tx_bounce[i]) |
3435 | goto err_alloc; |
3436 | |
3437 | bdp->cbd_sc = cpu_to_fec16(0); |
3438 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
3439 | |
3440 | if (fep->bufdesc_ex) { |
3441 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
3442 | ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); |
3443 | } |
3444 | |
3445 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
3446 | } |
3447 | |
3448 | /* Set the last buffer to wrap. */ |
3449 | bdp = fec_enet_get_prevdesc(bdp, bd: &txq->bd); |
3450 | bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); |
3451 | |
3452 | return 0; |
3453 | |
3454 | err_alloc: |
3455 | fec_enet_free_buffers(ndev); |
3456 | return -ENOMEM; |
3457 | } |
3458 | |
3459 | static int fec_enet_alloc_buffers(struct net_device *ndev) |
3460 | { |
3461 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3462 | unsigned int i; |
3463 | |
3464 | for (i = 0; i < fep->num_rx_queues; i++) |
3465 | if (fec_enet_alloc_rxq_buffers(ndev, queue: i)) |
3466 | return -ENOMEM; |
3467 | |
3468 | for (i = 0; i < fep->num_tx_queues; i++) |
3469 | if (fec_enet_alloc_txq_buffers(ndev, queue: i)) |
3470 | return -ENOMEM; |
3471 | return 0; |
3472 | } |
3473 | |
3474 | static int |
3475 | fec_enet_open(struct net_device *ndev) |
3476 | { |
3477 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3478 | int ret; |
3479 | bool reset_again; |
3480 | |
3481 | ret = pm_runtime_resume_and_get(dev: &fep->pdev->dev); |
3482 | if (ret < 0) |
3483 | return ret; |
3484 | |
3485 | pinctrl_pm_select_default_state(dev: &fep->pdev->dev); |
3486 | ret = fec_enet_clk_enable(ndev, enable: true); |
3487 | if (ret) |
3488 | goto clk_enable; |
3489 | |
3490 | /* During the first fec_enet_open call the PHY isn't probed at this |
3491 | * point. Therefore the phy_reset_after_clk_enable() call within |
3492 | * fec_enet_clk_enable() fails. As we need this reset in order to be |
3493 | * sure the PHY is working correctly we check if we need to reset again |
3494 | * later when the PHY is probed |
3495 | */ |
3496 | if (ndev->phydev && ndev->phydev->drv) |
3497 | reset_again = false; |
3498 | else |
3499 | reset_again = true; |
3500 | |
3501 | /* I should reset the ring buffers here, but I don't yet know |
3502 | * a simple way to do that. |
3503 | */ |
3504 | |
3505 | ret = fec_enet_alloc_buffers(ndev); |
3506 | if (ret) |
3507 | goto err_enet_alloc; |
3508 | |
3509 | /* Init MAC prior to mii bus probe */ |
3510 | fec_restart(ndev); |
3511 | |
3512 | /* Call phy_reset_after_clk_enable() again if it failed during |
3513 | * phy_reset_after_clk_enable() before because the PHY wasn't probed. |
3514 | */ |
3515 | if (reset_again) |
3516 | fec_enet_phy_reset_after_clk_enable(ndev); |
3517 | |
3518 | /* Probe and connect to PHY when open the interface */ |
3519 | ret = fec_enet_mii_probe(ndev); |
3520 | if (ret) |
3521 | goto err_enet_mii_probe; |
3522 | |
3523 | if (fep->quirks & FEC_QUIRK_ERR006687) |
3524 | imx6q_cpuidle_fec_irqs_used(); |
3525 | |
3526 | if (fep->quirks & FEC_QUIRK_HAS_PMQOS) |
3527 | cpu_latency_qos_add_request(req: &fep->pm_qos_req, value: 0); |
3528 | |
3529 | napi_enable(n: &fep->napi); |
3530 | phy_start(phydev: ndev->phydev); |
3531 | netif_tx_start_all_queues(dev: ndev); |
3532 | |
3533 | device_set_wakeup_enable(dev: &ndev->dev, enable: fep->wol_flag & |
3534 | FEC_WOL_FLAG_ENABLE); |
3535 | |
3536 | return 0; |
3537 | |
3538 | err_enet_mii_probe: |
3539 | fec_enet_free_buffers(ndev); |
3540 | err_enet_alloc: |
3541 | fec_enet_clk_enable(ndev, enable: false); |
3542 | clk_enable: |
3543 | pm_runtime_mark_last_busy(dev: &fep->pdev->dev); |
3544 | pm_runtime_put_autosuspend(dev: &fep->pdev->dev); |
3545 | pinctrl_pm_select_sleep_state(dev: &fep->pdev->dev); |
3546 | return ret; |
3547 | } |
3548 | |
3549 | static int |
3550 | fec_enet_close(struct net_device *ndev) |
3551 | { |
3552 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3553 | |
3554 | phy_stop(phydev: ndev->phydev); |
3555 | |
3556 | if (netif_device_present(dev: ndev)) { |
3557 | napi_disable(n: &fep->napi); |
3558 | netif_tx_disable(dev: ndev); |
3559 | fec_stop(ndev); |
3560 | } |
3561 | |
3562 | phy_disconnect(phydev: ndev->phydev); |
3563 | |
3564 | if (fep->quirks & FEC_QUIRK_ERR006687) |
3565 | imx6q_cpuidle_fec_irqs_unused(); |
3566 | |
3567 | fec_enet_update_ethtool_stats(dev: ndev); |
3568 | |
3569 | fec_enet_clk_enable(ndev, enable: false); |
3570 | if (fep->quirks & FEC_QUIRK_HAS_PMQOS) |
3571 | cpu_latency_qos_remove_request(req: &fep->pm_qos_req); |
3572 | |
3573 | pinctrl_pm_select_sleep_state(dev: &fep->pdev->dev); |
3574 | pm_runtime_mark_last_busy(dev: &fep->pdev->dev); |
3575 | pm_runtime_put_autosuspend(dev: &fep->pdev->dev); |
3576 | |
3577 | fec_enet_free_buffers(ndev); |
3578 | |
3579 | return 0; |
3580 | } |
3581 | |
3582 | /* Set or clear the multicast filter for this adaptor. |
3583 | * Skeleton taken from sunlance driver. |
3584 | * The CPM Ethernet implementation allows Multicast as well as individual |
3585 | * MAC address filtering. Some of the drivers check to make sure it is |
3586 | * a group multicast address, and discard those that are not. I guess I |
3587 | * will do the same for now, but just remove the test if you want |
3588 | * individual filtering as well (do the upper net layers want or support |
3589 | * this kind of feature?). |
3590 | */ |
3591 | |
3592 | #define FEC_HASH_BITS 6 /* #bits in hash */ |
3593 | |
3594 | static void set_multicast_list(struct net_device *ndev) |
3595 | { |
3596 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3597 | struct netdev_hw_addr *ha; |
3598 | unsigned int crc, tmp; |
3599 | unsigned char hash; |
3600 | unsigned int hash_high = 0, hash_low = 0; |
3601 | |
3602 | if (ndev->flags & IFF_PROMISC) { |
3603 | tmp = readl(addr: fep->hwp + FEC_R_CNTRL); |
3604 | tmp |= 0x8; |
3605 | writel(val: tmp, addr: fep->hwp + FEC_R_CNTRL); |
3606 | return; |
3607 | } |
3608 | |
3609 | tmp = readl(addr: fep->hwp + FEC_R_CNTRL); |
3610 | tmp &= ~0x8; |
3611 | writel(val: tmp, addr: fep->hwp + FEC_R_CNTRL); |
3612 | |
3613 | if (ndev->flags & IFF_ALLMULTI) { |
3614 | /* Catch all multicast addresses, so set the |
3615 | * filter to all 1's |
3616 | */ |
3617 | writel(val: 0xffffffff, addr: fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
3618 | writel(val: 0xffffffff, addr: fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
3619 | |
3620 | return; |
3621 | } |
3622 | |
3623 | /* Add the addresses in hash register */ |
3624 | netdev_for_each_mc_addr(ha, ndev) { |
3625 | /* calculate crc32 value of mac address */ |
3626 | crc = ether_crc_le(ndev->addr_len, ha->addr); |
3627 | |
3628 | /* only upper 6 bits (FEC_HASH_BITS) are used |
3629 | * which point to specific bit in the hash registers |
3630 | */ |
3631 | hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; |
3632 | |
3633 | if (hash > 31) |
3634 | hash_high |= 1 << (hash - 32); |
3635 | else |
3636 | hash_low |= 1 << hash; |
3637 | } |
3638 | |
3639 | writel(val: hash_high, addr: fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
3640 | writel(val: hash_low, addr: fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
3641 | } |
3642 | |
3643 | /* Set a MAC change in hardware. */ |
3644 | static int |
3645 | fec_set_mac_address(struct net_device *ndev, void *p) |
3646 | { |
3647 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3648 | struct sockaddr *addr = p; |
3649 | |
3650 | if (addr) { |
3651 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
3652 | return -EADDRNOTAVAIL; |
3653 | eth_hw_addr_set(dev: ndev, addr: addr->sa_data); |
3654 | } |
3655 | |
3656 | /* Add netif status check here to avoid system hang in below case: |
3657 | * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; |
3658 | * After ethx down, fec all clocks are gated off and then register |
3659 | * access causes system hang. |
3660 | */ |
3661 | if (!netif_running(dev: ndev)) |
3662 | return 0; |
3663 | |
3664 | writel(val: ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | |
3665 | (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), |
3666 | addr: fep->hwp + FEC_ADDR_LOW); |
3667 | writel(val: (ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), |
3668 | addr: fep->hwp + FEC_ADDR_HIGH); |
3669 | return 0; |
3670 | } |
3671 | |
3672 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3673 | /** |
3674 | * fec_poll_controller - FEC Poll controller function |
3675 | * @dev: The FEC network adapter |
3676 | * |
3677 | * Polled functionality used by netconsole and others in non interrupt mode |
3678 | * |
3679 | */ |
3680 | static void fec_poll_controller(struct net_device *dev) |
3681 | { |
3682 | int i; |
3683 | struct fec_enet_private *fep = netdev_priv(dev); |
3684 | |
3685 | for (i = 0; i < FEC_IRQ_NUM; i++) { |
3686 | if (fep->irq[i] > 0) { |
3687 | disable_irq(irq: fep->irq[i]); |
3688 | fec_enet_interrupt(irq: fep->irq[i], dev_id: dev); |
3689 | enable_irq(irq: fep->irq[i]); |
3690 | } |
3691 | } |
3692 | } |
3693 | #endif |
3694 | |
3695 | static inline void fec_enet_set_netdev_features(struct net_device *netdev, |
3696 | netdev_features_t features) |
3697 | { |
3698 | struct fec_enet_private *fep = netdev_priv(dev: netdev); |
3699 | netdev_features_t changed = features ^ netdev->features; |
3700 | |
3701 | netdev->features = features; |
3702 | |
3703 | /* Receive checksum has been changed */ |
3704 | if (changed & NETIF_F_RXCSUM) { |
3705 | if (features & NETIF_F_RXCSUM) |
3706 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; |
3707 | else |
3708 | fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; |
3709 | } |
3710 | } |
3711 | |
3712 | static int fec_set_features(struct net_device *netdev, |
3713 | netdev_features_t features) |
3714 | { |
3715 | struct fec_enet_private *fep = netdev_priv(dev: netdev); |
3716 | netdev_features_t changed = features ^ netdev->features; |
3717 | |
3718 | if (netif_running(dev: netdev) && changed & NETIF_F_RXCSUM) { |
3719 | napi_disable(n: &fep->napi); |
3720 | netif_tx_lock_bh(dev: netdev); |
3721 | fec_stop(ndev: netdev); |
3722 | fec_enet_set_netdev_features(netdev, features); |
3723 | fec_restart(ndev: netdev); |
3724 | netif_tx_wake_all_queues(dev: netdev); |
3725 | netif_tx_unlock_bh(dev: netdev); |
3726 | napi_enable(n: &fep->napi); |
3727 | } else { |
3728 | fec_enet_set_netdev_features(netdev, features); |
3729 | } |
3730 | |
3731 | return 0; |
3732 | } |
3733 | |
3734 | static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb) |
3735 | { |
3736 | struct vlan_ethhdr *vhdr; |
3737 | unsigned short vlan_TCI = 0; |
3738 | |
3739 | if (skb->protocol == htons(ETH_P_ALL)) { |
3740 | vhdr = (struct vlan_ethhdr *)(skb->data); |
3741 | vlan_TCI = ntohs(vhdr->h_vlan_TCI); |
3742 | } |
3743 | |
3744 | return vlan_TCI; |
3745 | } |
3746 | |
3747 | static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, |
3748 | struct net_device *sb_dev) |
3749 | { |
3750 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3751 | u16 vlan_tag; |
3752 | |
3753 | if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) |
3754 | return netdev_pick_tx(dev: ndev, skb, NULL); |
3755 | |
3756 | vlan_tag = fec_enet_get_raw_vlan_tci(skb); |
3757 | if (!vlan_tag) |
3758 | return vlan_tag; |
3759 | |
3760 | return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; |
3761 | } |
3762 | |
3763 | static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) |
3764 | { |
3765 | struct fec_enet_private *fep = netdev_priv(dev); |
3766 | bool is_run = netif_running(dev); |
3767 | struct bpf_prog *old_prog; |
3768 | |
3769 | switch (bpf->command) { |
3770 | case XDP_SETUP_PROG: |
3771 | /* No need to support the SoCs that require to |
3772 | * do the frame swap because the performance wouldn't be |
3773 | * better than the skb mode. |
3774 | */ |
3775 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) |
3776 | return -EOPNOTSUPP; |
3777 | |
3778 | if (!bpf->prog) |
3779 | xdp_features_clear_redirect_target(dev); |
3780 | |
3781 | if (is_run) { |
3782 | napi_disable(n: &fep->napi); |
3783 | netif_tx_disable(dev); |
3784 | } |
3785 | |
3786 | old_prog = xchg(&fep->xdp_prog, bpf->prog); |
3787 | if (old_prog) |
3788 | bpf_prog_put(prog: old_prog); |
3789 | |
3790 | fec_restart(ndev: dev); |
3791 | |
3792 | if (is_run) { |
3793 | napi_enable(n: &fep->napi); |
3794 | netif_tx_start_all_queues(dev); |
3795 | } |
3796 | |
3797 | if (bpf->prog) |
3798 | xdp_features_set_redirect_target(dev, support_sg: false); |
3799 | |
3800 | return 0; |
3801 | |
3802 | case XDP_SETUP_XSK_POOL: |
3803 | return -EOPNOTSUPP; |
3804 | |
3805 | default: |
3806 | return -EOPNOTSUPP; |
3807 | } |
3808 | } |
3809 | |
3810 | static int |
3811 | fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) |
3812 | { |
3813 | if (unlikely(index < 0)) |
3814 | return 0; |
3815 | |
3816 | return (index % fep->num_tx_queues); |
3817 | } |
3818 | |
3819 | static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, |
3820 | struct fec_enet_priv_tx_q *txq, |
3821 | void *frame, u32 dma_sync_len, |
3822 | bool ndo_xmit) |
3823 | { |
3824 | unsigned int index, status, estatus; |
3825 | struct bufdesc *bdp; |
3826 | dma_addr_t dma_addr; |
3827 | int entries_free; |
3828 | u16 frame_len; |
3829 | |
3830 | entries_free = fec_enet_get_free_txdesc_num(txq); |
3831 | if (entries_free < MAX_SKB_FRAGS + 1) { |
3832 | netdev_err_once(fep->netdev, "NOT enough BD for SG!\n" ); |
3833 | return -EBUSY; |
3834 | } |
3835 | |
3836 | /* Fill in a Tx ring entry */ |
3837 | bdp = txq->bd.cur; |
3838 | status = fec16_to_cpu(bdp->cbd_sc); |
3839 | status &= ~BD_ENET_TX_STATS; |
3840 | |
3841 | index = fec_enet_get_bd_index(bdp, bd: &txq->bd); |
3842 | |
3843 | if (ndo_xmit) { |
3844 | struct xdp_frame *xdpf = frame; |
3845 | |
3846 | dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data, |
3847 | xdpf->len, DMA_TO_DEVICE); |
3848 | if (dma_mapping_error(dev: &fep->pdev->dev, dma_addr)) |
3849 | return -ENOMEM; |
3850 | |
3851 | frame_len = xdpf->len; |
3852 | txq->tx_buf[index].buf_p = xdpf; |
3853 | txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; |
3854 | } else { |
3855 | struct xdp_buff *xdpb = frame; |
3856 | struct page *page; |
3857 | |
3858 | page = virt_to_page(xdpb->data); |
3859 | dma_addr = page_pool_get_dma_addr(page) + |
3860 | (xdpb->data - xdpb->data_hard_start); |
3861 | dma_sync_single_for_device(dev: &fep->pdev->dev, addr: dma_addr, |
3862 | size: dma_sync_len, dir: DMA_BIDIRECTIONAL); |
3863 | frame_len = xdpb->data_end - xdpb->data; |
3864 | txq->tx_buf[index].buf_p = page; |
3865 | txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX; |
3866 | } |
3867 | |
3868 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); |
3869 | if (fep->bufdesc_ex) |
3870 | estatus = BD_ENET_TX_INT; |
3871 | |
3872 | bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); |
3873 | bdp->cbd_datlen = cpu_to_fec16(frame_len); |
3874 | |
3875 | if (fep->bufdesc_ex) { |
3876 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
3877 | |
3878 | if (fep->quirks & FEC_QUIRK_HAS_AVB) |
3879 | estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); |
3880 | |
3881 | ebdp->cbd_bdu = 0; |
3882 | ebdp->cbd_esc = cpu_to_fec32(estatus); |
3883 | } |
3884 | |
3885 | /* Make sure the updates to rest of the descriptor are performed before |
3886 | * transferring ownership. |
3887 | */ |
3888 | dma_wmb(); |
3889 | |
3890 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
3891 | * it's the last BD of the frame, and to put the CRC on the end. |
3892 | */ |
3893 | status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); |
3894 | bdp->cbd_sc = cpu_to_fec16(status); |
3895 | |
3896 | /* If this was the last BD in the ring, start at the beginning again. */ |
3897 | bdp = fec_enet_get_nextdesc(bdp, bd: &txq->bd); |
3898 | |
3899 | /* Make sure the update to bdp are performed before txq->bd.cur. */ |
3900 | dma_wmb(); |
3901 | |
3902 | txq->bd.cur = bdp; |
3903 | |
3904 | /* Trigger transmission start */ |
3905 | writel(val: 0, addr: txq->bd.reg_desc_active); |
3906 | |
3907 | return 0; |
3908 | } |
3909 | |
3910 | static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, |
3911 | int cpu, struct xdp_buff *xdp, |
3912 | u32 dma_sync_len) |
3913 | { |
3914 | struct fec_enet_priv_tx_q *txq; |
3915 | struct netdev_queue *nq; |
3916 | int queue, ret; |
3917 | |
3918 | queue = fec_enet_xdp_get_tx_queue(fep, index: cpu); |
3919 | txq = fep->tx_queue[queue]; |
3920 | nq = netdev_get_tx_queue(dev: fep->netdev, index: queue); |
3921 | |
3922 | __netif_tx_lock(txq: nq, cpu); |
3923 | |
3924 | /* Avoid tx timeout as XDP shares the queue with kernel stack */ |
3925 | txq_trans_cond_update(txq: nq); |
3926 | ret = fec_enet_txq_xmit_frame(fep, txq, frame: xdp, dma_sync_len, ndo_xmit: false); |
3927 | |
3928 | __netif_tx_unlock(txq: nq); |
3929 | |
3930 | return ret; |
3931 | } |
3932 | |
3933 | static int fec_enet_xdp_xmit(struct net_device *dev, |
3934 | int num_frames, |
3935 | struct xdp_frame **frames, |
3936 | u32 flags) |
3937 | { |
3938 | struct fec_enet_private *fep = netdev_priv(dev); |
3939 | struct fec_enet_priv_tx_q *txq; |
3940 | int cpu = smp_processor_id(); |
3941 | unsigned int sent_frames = 0; |
3942 | struct netdev_queue *nq; |
3943 | unsigned int queue; |
3944 | int i; |
3945 | |
3946 | queue = fec_enet_xdp_get_tx_queue(fep, index: cpu); |
3947 | txq = fep->tx_queue[queue]; |
3948 | nq = netdev_get_tx_queue(dev: fep->netdev, index: queue); |
3949 | |
3950 | __netif_tx_lock(txq: nq, cpu); |
3951 | |
3952 | /* Avoid tx timeout as XDP shares the queue with kernel stack */ |
3953 | txq_trans_cond_update(txq: nq); |
3954 | for (i = 0; i < num_frames; i++) { |
3955 | if (fec_enet_txq_xmit_frame(fep, txq, frame: frames[i], dma_sync_len: 0, ndo_xmit: true) < 0) |
3956 | break; |
3957 | sent_frames++; |
3958 | } |
3959 | |
3960 | __netif_tx_unlock(txq: nq); |
3961 | |
3962 | return sent_frames; |
3963 | } |
3964 | |
3965 | static int fec_hwtstamp_get(struct net_device *ndev, |
3966 | struct kernel_hwtstamp_config *config) |
3967 | { |
3968 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3969 | |
3970 | if (!netif_running(dev: ndev)) |
3971 | return -EINVAL; |
3972 | |
3973 | if (!fep->bufdesc_ex) |
3974 | return -EOPNOTSUPP; |
3975 | |
3976 | fec_ptp_get(ndev, config); |
3977 | |
3978 | return 0; |
3979 | } |
3980 | |
3981 | static int fec_hwtstamp_set(struct net_device *ndev, |
3982 | struct kernel_hwtstamp_config *config, |
3983 | struct netlink_ext_ack *extack) |
3984 | { |
3985 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
3986 | |
3987 | if (!netif_running(dev: ndev)) |
3988 | return -EINVAL; |
3989 | |
3990 | if (!fep->bufdesc_ex) |
3991 | return -EOPNOTSUPP; |
3992 | |
3993 | return fec_ptp_set(ndev, config, extack); |
3994 | } |
3995 | |
3996 | static const struct net_device_ops fec_netdev_ops = { |
3997 | .ndo_open = fec_enet_open, |
3998 | .ndo_stop = fec_enet_close, |
3999 | .ndo_start_xmit = fec_enet_start_xmit, |
4000 | .ndo_select_queue = fec_enet_select_queue, |
4001 | .ndo_set_rx_mode = set_multicast_list, |
4002 | .ndo_validate_addr = eth_validate_addr, |
4003 | .ndo_tx_timeout = fec_timeout, |
4004 | .ndo_set_mac_address = fec_set_mac_address, |
4005 | .ndo_eth_ioctl = phy_do_ioctl_running, |
4006 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4007 | .ndo_poll_controller = fec_poll_controller, |
4008 | #endif |
4009 | .ndo_set_features = fec_set_features, |
4010 | .ndo_bpf = fec_enet_bpf, |
4011 | .ndo_xdp_xmit = fec_enet_xdp_xmit, |
4012 | .ndo_hwtstamp_get = fec_hwtstamp_get, |
4013 | .ndo_hwtstamp_set = fec_hwtstamp_set, |
4014 | }; |
4015 | |
4016 | static const unsigned short offset_des_active_rxq[] = { |
4017 | FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 |
4018 | }; |
4019 | |
4020 | static const unsigned short offset_des_active_txq[] = { |
4021 | FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 |
4022 | }; |
4023 | |
4024 | /* |
4025 | * XXX: We need to clean up on failure exits here. |
4026 | * |
4027 | */ |
4028 | static int fec_enet_init(struct net_device *ndev) |
4029 | { |
4030 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4031 | struct bufdesc *cbd_base; |
4032 | dma_addr_t bd_dma; |
4033 | int bd_size; |
4034 | unsigned int i; |
4035 | unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : |
4036 | sizeof(struct bufdesc); |
4037 | unsigned dsize_log2 = __fls(word: dsize); |
4038 | int ret; |
4039 | |
4040 | WARN_ON(dsize != (1 << dsize_log2)); |
4041 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) |
4042 | fep->rx_align = 0xf; |
4043 | fep->tx_align = 0xf; |
4044 | #else |
4045 | fep->rx_align = 0x3; |
4046 | fep->tx_align = 0x3; |
4047 | #endif |
4048 | fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; |
4049 | fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT; |
4050 | fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT; |
4051 | fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT; |
4052 | |
4053 | /* Check mask of the streaming and coherent API */ |
4054 | ret = dma_set_mask_and_coherent(dev: &fep->pdev->dev, DMA_BIT_MASK(32)); |
4055 | if (ret < 0) { |
4056 | dev_warn(&fep->pdev->dev, "No suitable DMA available\n" ); |
4057 | return ret; |
4058 | } |
4059 | |
4060 | ret = fec_enet_alloc_queue(ndev); |
4061 | if (ret) |
4062 | return ret; |
4063 | |
4064 | bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; |
4065 | |
4066 | /* Allocate memory for buffer descriptors. */ |
4067 | cbd_base = fec_dmam_alloc(dev: &fep->pdev->dev, size: bd_size, handle: &bd_dma, |
4068 | GFP_KERNEL); |
4069 | if (!cbd_base) { |
4070 | ret = -ENOMEM; |
4071 | goto free_queue_mem; |
4072 | } |
4073 | |
4074 | /* Get the Ethernet address */ |
4075 | ret = fec_get_mac(ndev); |
4076 | if (ret) |
4077 | goto free_queue_mem; |
4078 | |
4079 | /* Set receive and transmit descriptor base. */ |
4080 | for (i = 0; i < fep->num_rx_queues; i++) { |
4081 | struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; |
4082 | unsigned size = dsize * rxq->bd.ring_size; |
4083 | |
4084 | rxq->bd.qid = i; |
4085 | rxq->bd.base = cbd_base; |
4086 | rxq->bd.cur = cbd_base; |
4087 | rxq->bd.dma = bd_dma; |
4088 | rxq->bd.dsize = dsize; |
4089 | rxq->bd.dsize_log2 = dsize_log2; |
4090 | rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; |
4091 | bd_dma += size; |
4092 | cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); |
4093 | rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); |
4094 | } |
4095 | |
4096 | for (i = 0; i < fep->num_tx_queues; i++) { |
4097 | struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; |
4098 | unsigned size = dsize * txq->bd.ring_size; |
4099 | |
4100 | txq->bd.qid = i; |
4101 | txq->bd.base = cbd_base; |
4102 | txq->bd.cur = cbd_base; |
4103 | txq->bd.dma = bd_dma; |
4104 | txq->bd.dsize = dsize; |
4105 | txq->bd.dsize_log2 = dsize_log2; |
4106 | txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; |
4107 | bd_dma += size; |
4108 | cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); |
4109 | txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); |
4110 | } |
4111 | |
4112 | |
4113 | /* The FEC Ethernet specific entries in the device structure */ |
4114 | ndev->watchdog_timeo = TX_TIMEOUT; |
4115 | ndev->netdev_ops = &fec_netdev_ops; |
4116 | ndev->ethtool_ops = &fec_enet_ethtool_ops; |
4117 | |
4118 | writel(FEC_RX_DISABLED_IMASK, addr: fep->hwp + FEC_IMASK); |
4119 | netif_napi_add(dev: ndev, napi: &fep->napi, poll: fec_enet_rx_napi); |
4120 | |
4121 | if (fep->quirks & FEC_QUIRK_HAS_VLAN) |
4122 | /* enable hw VLAN support */ |
4123 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; |
4124 | |
4125 | if (fep->quirks & FEC_QUIRK_HAS_CSUM) { |
4126 | netif_set_tso_max_segs(dev: ndev, FEC_MAX_TSO_SEGS); |
4127 | |
4128 | /* enable hw accelerator */ |
4129 | ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4130 | | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); |
4131 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; |
4132 | } |
4133 | |
4134 | if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { |
4135 | fep->tx_align = 0; |
4136 | fep->rx_align = 0x3f; |
4137 | } |
4138 | |
4139 | ndev->hw_features = ndev->features; |
4140 | |
4141 | if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) |
4142 | ndev->xdp_features = NETDEV_XDP_ACT_BASIC | |
4143 | NETDEV_XDP_ACT_REDIRECT; |
4144 | |
4145 | fec_restart(ndev); |
4146 | |
4147 | if (fep->quirks & FEC_QUIRK_MIB_CLEAR) |
4148 | fec_enet_clear_ethtool_stats(dev: ndev); |
4149 | else |
4150 | fec_enet_update_ethtool_stats(dev: ndev); |
4151 | |
4152 | return 0; |
4153 | |
4154 | free_queue_mem: |
4155 | fec_enet_free_queue(ndev); |
4156 | return ret; |
4157 | } |
4158 | |
4159 | #ifdef CONFIG_OF |
4160 | static int fec_reset_phy(struct platform_device *pdev) |
4161 | { |
4162 | struct gpio_desc *phy_reset; |
4163 | int msec = 1, phy_post_delay = 0; |
4164 | struct device_node *np = pdev->dev.of_node; |
4165 | int err; |
4166 | |
4167 | if (!np) |
4168 | return 0; |
4169 | |
4170 | err = of_property_read_u32(np, propname: "phy-reset-duration" , out_value: &msec); |
4171 | /* A sane reset duration should not be longer than 1s */ |
4172 | if (!err && msec > 1000) |
4173 | msec = 1; |
4174 | |
4175 | err = of_property_read_u32(np, propname: "phy-reset-post-delay" , out_value: &phy_post_delay); |
4176 | /* valid reset duration should be less than 1s */ |
4177 | if (!err && phy_post_delay > 1000) |
4178 | return -EINVAL; |
4179 | |
4180 | phy_reset = devm_gpiod_get_optional(dev: &pdev->dev, con_id: "phy-reset" , |
4181 | flags: GPIOD_OUT_HIGH); |
4182 | if (IS_ERR(ptr: phy_reset)) |
4183 | return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: phy_reset), |
4184 | fmt: "failed to get phy-reset-gpios\n" ); |
4185 | |
4186 | if (!phy_reset) |
4187 | return 0; |
4188 | |
4189 | if (msec > 20) |
4190 | msleep(msecs: msec); |
4191 | else |
4192 | usleep_range(min: msec * 1000, max: msec * 1000 + 1000); |
4193 | |
4194 | gpiod_set_value_cansleep(desc: phy_reset, value: 0); |
4195 | |
4196 | if (!phy_post_delay) |
4197 | return 0; |
4198 | |
4199 | if (phy_post_delay > 20) |
4200 | msleep(msecs: phy_post_delay); |
4201 | else |
4202 | usleep_range(min: phy_post_delay * 1000, |
4203 | max: phy_post_delay * 1000 + 1000); |
4204 | |
4205 | return 0; |
4206 | } |
4207 | #else /* CONFIG_OF */ |
4208 | static int fec_reset_phy(struct platform_device *pdev) |
4209 | { |
4210 | /* |
4211 | * In case of platform probe, the reset has been done |
4212 | * by machine code. |
4213 | */ |
4214 | return 0; |
4215 | } |
4216 | #endif /* CONFIG_OF */ |
4217 | |
4218 | static void |
4219 | fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) |
4220 | { |
4221 | struct device_node *np = pdev->dev.of_node; |
4222 | |
4223 | *num_tx = *num_rx = 1; |
4224 | |
4225 | if (!np || !of_device_is_available(device: np)) |
4226 | return; |
4227 | |
4228 | /* parse the num of tx and rx queues */ |
4229 | of_property_read_u32(np, propname: "fsl,num-tx-queues" , out_value: num_tx); |
4230 | |
4231 | of_property_read_u32(np, propname: "fsl,num-rx-queues" , out_value: num_rx); |
4232 | |
4233 | if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { |
4234 | dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n" , |
4235 | *num_tx); |
4236 | *num_tx = 1; |
4237 | return; |
4238 | } |
4239 | |
4240 | if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { |
4241 | dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n" , |
4242 | *num_rx); |
4243 | *num_rx = 1; |
4244 | return; |
4245 | } |
4246 | |
4247 | } |
4248 | |
4249 | static int fec_enet_get_irq_cnt(struct platform_device *pdev) |
4250 | { |
4251 | int irq_cnt = platform_irq_count(pdev); |
4252 | |
4253 | if (irq_cnt > FEC_IRQ_NUM) |
4254 | irq_cnt = FEC_IRQ_NUM; /* last for pps */ |
4255 | else if (irq_cnt == 2) |
4256 | irq_cnt = 1; /* last for pps */ |
4257 | else if (irq_cnt <= 0) |
4258 | irq_cnt = 1; /* At least 1 irq is needed */ |
4259 | return irq_cnt; |
4260 | } |
4261 | |
4262 | static void fec_enet_get_wakeup_irq(struct platform_device *pdev) |
4263 | { |
4264 | struct net_device *ndev = platform_get_drvdata(pdev); |
4265 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4266 | |
4267 | if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) |
4268 | fep->wake_irq = fep->irq[2]; |
4269 | else |
4270 | fep->wake_irq = fep->irq[0]; |
4271 | } |
4272 | |
4273 | static int fec_enet_init_stop_mode(struct fec_enet_private *fep, |
4274 | struct device_node *np) |
4275 | { |
4276 | struct device_node *gpr_np; |
4277 | u32 out_val[3]; |
4278 | int ret = 0; |
4279 | |
4280 | gpr_np = of_parse_phandle(np, phandle_name: "fsl,stop-mode" , index: 0); |
4281 | if (!gpr_np) |
4282 | return 0; |
4283 | |
4284 | ret = of_property_read_u32_array(np, propname: "fsl,stop-mode" , out_values: out_val, |
4285 | ARRAY_SIZE(out_val)); |
4286 | if (ret) { |
4287 | dev_dbg(&fep->pdev->dev, "no stop mode property\n" ); |
4288 | goto out; |
4289 | } |
4290 | |
4291 | fep->stop_gpr.gpr = syscon_node_to_regmap(np: gpr_np); |
4292 | if (IS_ERR(ptr: fep->stop_gpr.gpr)) { |
4293 | dev_err(&fep->pdev->dev, "could not find gpr regmap\n" ); |
4294 | ret = PTR_ERR(ptr: fep->stop_gpr.gpr); |
4295 | fep->stop_gpr.gpr = NULL; |
4296 | goto out; |
4297 | } |
4298 | |
4299 | fep->stop_gpr.reg = out_val[1]; |
4300 | fep->stop_gpr.bit = out_val[2]; |
4301 | |
4302 | out: |
4303 | of_node_put(node: gpr_np); |
4304 | |
4305 | return ret; |
4306 | } |
4307 | |
4308 | static int |
4309 | fec_probe(struct platform_device *pdev) |
4310 | { |
4311 | struct fec_enet_private *fep; |
4312 | struct fec_platform_data *pdata; |
4313 | phy_interface_t interface; |
4314 | struct net_device *ndev; |
4315 | int i, irq, ret = 0; |
4316 | static int dev_id; |
4317 | struct device_node *np = pdev->dev.of_node, *phy_node; |
4318 | int num_tx_qs; |
4319 | int num_rx_qs; |
4320 | char irq_name[8]; |
4321 | int irq_cnt; |
4322 | const struct fec_devinfo *dev_info; |
4323 | |
4324 | fec_enet_get_queue_num(pdev, num_tx: &num_tx_qs, num_rx: &num_rx_qs); |
4325 | |
4326 | /* Init network device */ |
4327 | ndev = alloc_etherdev_mqs(sizeof_priv: sizeof(struct fec_enet_private) + |
4328 | FEC_STATS_SIZE, txqs: num_tx_qs, rxqs: num_rx_qs); |
4329 | if (!ndev) |
4330 | return -ENOMEM; |
4331 | |
4332 | SET_NETDEV_DEV(ndev, &pdev->dev); |
4333 | |
4334 | /* setup board info structure */ |
4335 | fep = netdev_priv(dev: ndev); |
4336 | |
4337 | dev_info = device_get_match_data(dev: &pdev->dev); |
4338 | if (!dev_info) |
4339 | dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data; |
4340 | if (dev_info) |
4341 | fep->quirks = dev_info->quirks; |
4342 | |
4343 | fep->netdev = ndev; |
4344 | fep->num_rx_queues = num_rx_qs; |
4345 | fep->num_tx_queues = num_tx_qs; |
4346 | |
4347 | #if !defined(CONFIG_M5272) |
4348 | /* default enable pause frame auto negotiation */ |
4349 | if (fep->quirks & FEC_QUIRK_HAS_GBIT) |
4350 | fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; |
4351 | #endif |
4352 | |
4353 | /* Select default pin state */ |
4354 | pinctrl_pm_select_default_state(dev: &pdev->dev); |
4355 | |
4356 | fep->hwp = devm_platform_ioremap_resource(pdev, index: 0); |
4357 | if (IS_ERR(ptr: fep->hwp)) { |
4358 | ret = PTR_ERR(ptr: fep->hwp); |
4359 | goto failed_ioremap; |
4360 | } |
4361 | |
4362 | fep->pdev = pdev; |
4363 | fep->dev_id = dev_id++; |
4364 | |
4365 | platform_set_drvdata(pdev, data: ndev); |
4366 | |
4367 | if ((of_machine_is_compatible(compat: "fsl,imx6q" ) || |
4368 | of_machine_is_compatible(compat: "fsl,imx6dl" )) && |
4369 | !of_property_read_bool(np, propname: "fsl,err006687-workaround-present" )) |
4370 | fep->quirks |= FEC_QUIRK_ERR006687; |
4371 | |
4372 | ret = fec_enet_ipc_handle_init(fep); |
4373 | if (ret) |
4374 | goto failed_ipc_init; |
4375 | |
4376 | if (of_property_read_bool(np, propname: "fsl,magic-packet" )) |
4377 | fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; |
4378 | |
4379 | ret = fec_enet_init_stop_mode(fep, np); |
4380 | if (ret) |
4381 | goto failed_stop_mode; |
4382 | |
4383 | phy_node = of_parse_phandle(np, phandle_name: "phy-handle" , index: 0); |
4384 | if (!phy_node && of_phy_is_fixed_link(np)) { |
4385 | ret = of_phy_register_fixed_link(np); |
4386 | if (ret < 0) { |
4387 | dev_err(&pdev->dev, |
4388 | "broken fixed-link specification\n" ); |
4389 | goto failed_phy; |
4390 | } |
4391 | phy_node = of_node_get(node: np); |
4392 | } |
4393 | fep->phy_node = phy_node; |
4394 | |
4395 | ret = of_get_phy_mode(np: pdev->dev.of_node, interface: &interface); |
4396 | if (ret) { |
4397 | pdata = dev_get_platdata(dev: &pdev->dev); |
4398 | if (pdata) |
4399 | fep->phy_interface = pdata->phy; |
4400 | else |
4401 | fep->phy_interface = PHY_INTERFACE_MODE_MII; |
4402 | } else { |
4403 | fep->phy_interface = interface; |
4404 | } |
4405 | |
4406 | ret = fec_enet_parse_rgmii_delay(fep, np); |
4407 | if (ret) |
4408 | goto failed_rgmii_delay; |
4409 | |
4410 | fep->clk_ipg = devm_clk_get(dev: &pdev->dev, id: "ipg" ); |
4411 | if (IS_ERR(ptr: fep->clk_ipg)) { |
4412 | ret = PTR_ERR(ptr: fep->clk_ipg); |
4413 | goto failed_clk; |
4414 | } |
4415 | |
4416 | fep->clk_ahb = devm_clk_get(dev: &pdev->dev, id: "ahb" ); |
4417 | if (IS_ERR(ptr: fep->clk_ahb)) { |
4418 | ret = PTR_ERR(ptr: fep->clk_ahb); |
4419 | goto failed_clk; |
4420 | } |
4421 | |
4422 | fep->itr_clk_rate = clk_get_rate(clk: fep->clk_ahb); |
4423 | |
4424 | /* enet_out is optional, depends on board */ |
4425 | fep->clk_enet_out = devm_clk_get_optional(dev: &pdev->dev, id: "enet_out" ); |
4426 | if (IS_ERR(ptr: fep->clk_enet_out)) { |
4427 | ret = PTR_ERR(ptr: fep->clk_enet_out); |
4428 | goto failed_clk; |
4429 | } |
4430 | |
4431 | fep->ptp_clk_on = false; |
4432 | mutex_init(&fep->ptp_clk_mutex); |
4433 | |
4434 | /* clk_ref is optional, depends on board */ |
4435 | fep->clk_ref = devm_clk_get_optional(dev: &pdev->dev, id: "enet_clk_ref" ); |
4436 | if (IS_ERR(ptr: fep->clk_ref)) { |
4437 | ret = PTR_ERR(ptr: fep->clk_ref); |
4438 | goto failed_clk; |
4439 | } |
4440 | fep->clk_ref_rate = clk_get_rate(clk: fep->clk_ref); |
4441 | |
4442 | /* clk_2x_txclk is optional, depends on board */ |
4443 | if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { |
4444 | fep->clk_2x_txclk = devm_clk_get(dev: &pdev->dev, id: "enet_2x_txclk" ); |
4445 | if (IS_ERR(ptr: fep->clk_2x_txclk)) |
4446 | fep->clk_2x_txclk = NULL; |
4447 | } |
4448 | |
4449 | fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; |
4450 | fep->clk_ptp = devm_clk_get(dev: &pdev->dev, id: "ptp" ); |
4451 | if (IS_ERR(ptr: fep->clk_ptp)) { |
4452 | fep->clk_ptp = NULL; |
4453 | fep->bufdesc_ex = false; |
4454 | } |
4455 | |
4456 | ret = fec_enet_clk_enable(ndev, enable: true); |
4457 | if (ret) |
4458 | goto failed_clk; |
4459 | |
4460 | ret = clk_prepare_enable(clk: fep->clk_ipg); |
4461 | if (ret) |
4462 | goto failed_clk_ipg; |
4463 | ret = clk_prepare_enable(clk: fep->clk_ahb); |
4464 | if (ret) |
4465 | goto failed_clk_ahb; |
4466 | |
4467 | fep->reg_phy = devm_regulator_get_optional(dev: &pdev->dev, id: "phy" ); |
4468 | if (!IS_ERR(ptr: fep->reg_phy)) { |
4469 | ret = regulator_enable(regulator: fep->reg_phy); |
4470 | if (ret) { |
4471 | dev_err(&pdev->dev, |
4472 | "Failed to enable phy regulator: %d\n" , ret); |
4473 | goto failed_regulator; |
4474 | } |
4475 | } else { |
4476 | if (PTR_ERR(ptr: fep->reg_phy) == -EPROBE_DEFER) { |
4477 | ret = -EPROBE_DEFER; |
4478 | goto failed_regulator; |
4479 | } |
4480 | fep->reg_phy = NULL; |
4481 | } |
4482 | |
4483 | pm_runtime_set_autosuspend_delay(dev: &pdev->dev, FEC_MDIO_PM_TIMEOUT); |
4484 | pm_runtime_use_autosuspend(dev: &pdev->dev); |
4485 | pm_runtime_get_noresume(dev: &pdev->dev); |
4486 | pm_runtime_set_active(dev: &pdev->dev); |
4487 | pm_runtime_enable(dev: &pdev->dev); |
4488 | |
4489 | ret = fec_reset_phy(pdev); |
4490 | if (ret) |
4491 | goto failed_reset; |
4492 | |
4493 | irq_cnt = fec_enet_get_irq_cnt(pdev); |
4494 | if (fep->bufdesc_ex) |
4495 | fec_ptp_init(pdev, irq_idx: irq_cnt); |
4496 | |
4497 | ret = fec_enet_init(ndev); |
4498 | if (ret) |
4499 | goto failed_init; |
4500 | |
4501 | for (i = 0; i < irq_cnt; i++) { |
4502 | snprintf(buf: irq_name, size: sizeof(irq_name), fmt: "int%d" , i); |
4503 | irq = platform_get_irq_byname_optional(dev: pdev, name: irq_name); |
4504 | if (irq < 0) |
4505 | irq = platform_get_irq(pdev, i); |
4506 | if (irq < 0) { |
4507 | ret = irq; |
4508 | goto failed_irq; |
4509 | } |
4510 | ret = devm_request_irq(dev: &pdev->dev, irq, handler: fec_enet_interrupt, |
4511 | irqflags: 0, devname: pdev->name, dev_id: ndev); |
4512 | if (ret) |
4513 | goto failed_irq; |
4514 | |
4515 | fep->irq[i] = irq; |
4516 | } |
4517 | |
4518 | /* Decide which interrupt line is wakeup capable */ |
4519 | fec_enet_get_wakeup_irq(pdev); |
4520 | |
4521 | ret = fec_enet_mii_init(pdev); |
4522 | if (ret) |
4523 | goto failed_mii_init; |
4524 | |
4525 | /* Carrier starts down, phylib will bring it up */ |
4526 | netif_carrier_off(dev: ndev); |
4527 | fec_enet_clk_enable(ndev, enable: false); |
4528 | pinctrl_pm_select_sleep_state(dev: &pdev->dev); |
4529 | |
4530 | ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN; |
4531 | |
4532 | ret = register_netdev(dev: ndev); |
4533 | if (ret) |
4534 | goto failed_register; |
4535 | |
4536 | device_init_wakeup(dev: &ndev->dev, enable: fep->wol_flag & |
4537 | FEC_WOL_HAS_MAGIC_PACKET); |
4538 | |
4539 | if (fep->bufdesc_ex && fep->ptp_clock) |
4540 | netdev_info(dev: ndev, format: "registered PHC device %d\n" , fep->dev_id); |
4541 | |
4542 | INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); |
4543 | |
4544 | pm_runtime_mark_last_busy(dev: &pdev->dev); |
4545 | pm_runtime_put_autosuspend(dev: &pdev->dev); |
4546 | |
4547 | return 0; |
4548 | |
4549 | failed_register: |
4550 | fec_enet_mii_remove(fep); |
4551 | failed_mii_init: |
4552 | failed_irq: |
4553 | failed_init: |
4554 | fec_ptp_stop(pdev); |
4555 | failed_reset: |
4556 | pm_runtime_put_noidle(dev: &pdev->dev); |
4557 | pm_runtime_disable(dev: &pdev->dev); |
4558 | if (fep->reg_phy) |
4559 | regulator_disable(regulator: fep->reg_phy); |
4560 | failed_regulator: |
4561 | clk_disable_unprepare(clk: fep->clk_ahb); |
4562 | failed_clk_ahb: |
4563 | clk_disable_unprepare(clk: fep->clk_ipg); |
4564 | failed_clk_ipg: |
4565 | fec_enet_clk_enable(ndev, enable: false); |
4566 | failed_clk: |
4567 | failed_rgmii_delay: |
4568 | if (of_phy_is_fixed_link(np)) |
4569 | of_phy_deregister_fixed_link(np); |
4570 | of_node_put(node: phy_node); |
4571 | failed_stop_mode: |
4572 | failed_ipc_init: |
4573 | failed_phy: |
4574 | dev_id--; |
4575 | failed_ioremap: |
4576 | free_netdev(dev: ndev); |
4577 | |
4578 | return ret; |
4579 | } |
4580 | |
4581 | static void |
4582 | fec_drv_remove(struct platform_device *pdev) |
4583 | { |
4584 | struct net_device *ndev = platform_get_drvdata(pdev); |
4585 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4586 | struct device_node *np = pdev->dev.of_node; |
4587 | int ret; |
4588 | |
4589 | ret = pm_runtime_get_sync(dev: &pdev->dev); |
4590 | if (ret < 0) |
4591 | dev_err(&pdev->dev, |
4592 | "Failed to resume device in remove callback (%pe)\n" , |
4593 | ERR_PTR(ret)); |
4594 | |
4595 | cancel_work_sync(work: &fep->tx_timeout_work); |
4596 | fec_ptp_stop(pdev); |
4597 | unregister_netdev(dev: ndev); |
4598 | fec_enet_mii_remove(fep); |
4599 | if (fep->reg_phy) |
4600 | regulator_disable(regulator: fep->reg_phy); |
4601 | |
4602 | if (of_phy_is_fixed_link(np)) |
4603 | of_phy_deregister_fixed_link(np); |
4604 | of_node_put(node: fep->phy_node); |
4605 | |
4606 | /* After pm_runtime_get_sync() failed, the clks are still off, so skip |
4607 | * disabling them again. |
4608 | */ |
4609 | if (ret >= 0) { |
4610 | clk_disable_unprepare(clk: fep->clk_ahb); |
4611 | clk_disable_unprepare(clk: fep->clk_ipg); |
4612 | } |
4613 | pm_runtime_put_noidle(dev: &pdev->dev); |
4614 | pm_runtime_disable(dev: &pdev->dev); |
4615 | |
4616 | free_netdev(dev: ndev); |
4617 | } |
4618 | |
4619 | static int __maybe_unused fec_suspend(struct device *dev) |
4620 | { |
4621 | struct net_device *ndev = dev_get_drvdata(dev); |
4622 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4623 | int ret; |
4624 | |
4625 | rtnl_lock(); |
4626 | if (netif_running(dev: ndev)) { |
4627 | if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) |
4628 | fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; |
4629 | phy_stop(phydev: ndev->phydev); |
4630 | napi_disable(n: &fep->napi); |
4631 | netif_tx_lock_bh(dev: ndev); |
4632 | netif_device_detach(dev: ndev); |
4633 | netif_tx_unlock_bh(dev: ndev); |
4634 | fec_stop(ndev); |
4635 | if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { |
4636 | fec_irqs_disable(ndev); |
4637 | pinctrl_pm_select_sleep_state(dev: &fep->pdev->dev); |
4638 | } else { |
4639 | fec_irqs_disable_except_wakeup(ndev); |
4640 | if (fep->wake_irq > 0) { |
4641 | disable_irq(irq: fep->wake_irq); |
4642 | enable_irq_wake(irq: fep->wake_irq); |
4643 | } |
4644 | fec_enet_stop_mode(fep, enabled: true); |
4645 | } |
4646 | /* It's safe to disable clocks since interrupts are masked */ |
4647 | fec_enet_clk_enable(ndev, enable: false); |
4648 | |
4649 | fep->rpm_active = !pm_runtime_status_suspended(dev); |
4650 | if (fep->rpm_active) { |
4651 | ret = pm_runtime_force_suspend(dev); |
4652 | if (ret < 0) { |
4653 | rtnl_unlock(); |
4654 | return ret; |
4655 | } |
4656 | } |
4657 | } |
4658 | rtnl_unlock(); |
4659 | |
4660 | if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) |
4661 | regulator_disable(regulator: fep->reg_phy); |
4662 | |
4663 | /* SOC supply clock to phy, when clock is disabled, phy link down |
4664 | * SOC control phy regulator, when regulator is disabled, phy link down |
4665 | */ |
4666 | if (fep->clk_enet_out || fep->reg_phy) |
4667 | fep->link = 0; |
4668 | |
4669 | return 0; |
4670 | } |
4671 | |
4672 | static int __maybe_unused fec_resume(struct device *dev) |
4673 | { |
4674 | struct net_device *ndev = dev_get_drvdata(dev); |
4675 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4676 | int ret; |
4677 | int val; |
4678 | |
4679 | if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { |
4680 | ret = regulator_enable(regulator: fep->reg_phy); |
4681 | if (ret) |
4682 | return ret; |
4683 | } |
4684 | |
4685 | rtnl_lock(); |
4686 | if (netif_running(dev: ndev)) { |
4687 | if (fep->rpm_active) |
4688 | pm_runtime_force_resume(dev); |
4689 | |
4690 | ret = fec_enet_clk_enable(ndev, enable: true); |
4691 | if (ret) { |
4692 | rtnl_unlock(); |
4693 | goto failed_clk; |
4694 | } |
4695 | if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { |
4696 | fec_enet_stop_mode(fep, enabled: false); |
4697 | if (fep->wake_irq) { |
4698 | disable_irq_wake(irq: fep->wake_irq); |
4699 | enable_irq(irq: fep->wake_irq); |
4700 | } |
4701 | |
4702 | val = readl(addr: fep->hwp + FEC_ECNTRL); |
4703 | val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); |
4704 | writel(val, addr: fep->hwp + FEC_ECNTRL); |
4705 | fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; |
4706 | } else { |
4707 | pinctrl_pm_select_default_state(dev: &fep->pdev->dev); |
4708 | } |
4709 | fec_restart(ndev); |
4710 | netif_tx_lock_bh(dev: ndev); |
4711 | netif_device_attach(dev: ndev); |
4712 | netif_tx_unlock_bh(dev: ndev); |
4713 | napi_enable(n: &fep->napi); |
4714 | phy_init_hw(phydev: ndev->phydev); |
4715 | phy_start(phydev: ndev->phydev); |
4716 | } |
4717 | rtnl_unlock(); |
4718 | |
4719 | return 0; |
4720 | |
4721 | failed_clk: |
4722 | if (fep->reg_phy) |
4723 | regulator_disable(regulator: fep->reg_phy); |
4724 | return ret; |
4725 | } |
4726 | |
4727 | static int __maybe_unused fec_runtime_suspend(struct device *dev) |
4728 | { |
4729 | struct net_device *ndev = dev_get_drvdata(dev); |
4730 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4731 | |
4732 | clk_disable_unprepare(clk: fep->clk_ahb); |
4733 | clk_disable_unprepare(clk: fep->clk_ipg); |
4734 | |
4735 | return 0; |
4736 | } |
4737 | |
4738 | static int __maybe_unused fec_runtime_resume(struct device *dev) |
4739 | { |
4740 | struct net_device *ndev = dev_get_drvdata(dev); |
4741 | struct fec_enet_private *fep = netdev_priv(dev: ndev); |
4742 | int ret; |
4743 | |
4744 | ret = clk_prepare_enable(clk: fep->clk_ahb); |
4745 | if (ret) |
4746 | return ret; |
4747 | ret = clk_prepare_enable(clk: fep->clk_ipg); |
4748 | if (ret) |
4749 | goto failed_clk_ipg; |
4750 | |
4751 | return 0; |
4752 | |
4753 | failed_clk_ipg: |
4754 | clk_disable_unprepare(clk: fep->clk_ahb); |
4755 | return ret; |
4756 | } |
4757 | |
4758 | static const struct dev_pm_ops fec_pm_ops = { |
4759 | SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) |
4760 | SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) |
4761 | }; |
4762 | |
4763 | static struct platform_driver fec_driver = { |
4764 | .driver = { |
4765 | .name = DRIVER_NAME, |
4766 | .pm = &fec_pm_ops, |
4767 | .of_match_table = fec_dt_ids, |
4768 | .suppress_bind_attrs = true, |
4769 | }, |
4770 | .id_table = fec_devtype, |
4771 | .probe = fec_probe, |
4772 | .remove_new = fec_drv_remove, |
4773 | }; |
4774 | |
4775 | module_platform_driver(fec_driver); |
4776 | |
4777 | MODULE_LICENSE("GPL" ); |
4778 | |