1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Xilinx Axi Ethernet device driver |
4 | * |
5 | * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi |
6 | * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> |
7 | * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. |
8 | * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> |
9 | * Copyright (c) 2010 - 2011 PetaLogix |
10 | * Copyright (c) 2019 - 2022 Calian Advanced Technologies |
11 | * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. |
12 | * |
13 | * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 |
14 | * and Spartan6. |
15 | * |
16 | * TODO: |
17 | * - Add Axi Fifo support. |
18 | * - Factor out Axi DMA code into separate driver. |
19 | * - Test and fix basic multicast filtering. |
20 | * - Add support for extended multicast filtering. |
21 | * - Test basic VLAN support. |
22 | * - Add support for extended VLAN support. |
23 | */ |
24 | |
25 | #include <linux/clk.h> |
26 | #include <linux/delay.h> |
27 | #include <linux/etherdevice.h> |
28 | #include <linux/module.h> |
29 | #include <linux/netdevice.h> |
30 | #include <linux/of.h> |
31 | #include <linux/of_mdio.h> |
32 | #include <linux/of_net.h> |
33 | #include <linux/of_irq.h> |
34 | #include <linux/of_address.h> |
35 | #include <linux/platform_device.h> |
36 | #include <linux/skbuff.h> |
37 | #include <linux/math64.h> |
38 | #include <linux/phy.h> |
39 | #include <linux/mii.h> |
40 | #include <linux/ethtool.h> |
41 | |
42 | #include "xilinx_axienet.h" |
43 | |
44 | /* Descriptors defines for Tx and Rx DMA */ |
45 | #define TX_BD_NUM_DEFAULT 128 |
46 | #define RX_BD_NUM_DEFAULT 1024 |
47 | #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) |
48 | #define TX_BD_NUM_MAX 4096 |
49 | #define RX_BD_NUM_MAX 4096 |
50 | |
51 | /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ |
52 | #define DRIVER_NAME "xaxienet" |
53 | #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" |
54 | #define DRIVER_VERSION "1.00a" |
55 | |
56 | #define AXIENET_REGS_N 40 |
57 | |
58 | /* Match table for of_platform binding */ |
59 | static const struct of_device_id axienet_of_match[] = { |
60 | { .compatible = "xlnx,axi-ethernet-1.00.a" , }, |
61 | { .compatible = "xlnx,axi-ethernet-1.01.a" , }, |
62 | { .compatible = "xlnx,axi-ethernet-2.01.a" , }, |
63 | {}, |
64 | }; |
65 | |
66 | MODULE_DEVICE_TABLE(of, axienet_of_match); |
67 | |
68 | /* Option table for setting up Axi Ethernet hardware options */ |
69 | static struct axienet_option axienet_options[] = { |
70 | /* Turn on jumbo packet support for both Rx and Tx */ |
71 | { |
72 | .opt = XAE_OPTION_JUMBO, |
73 | .reg = XAE_TC_OFFSET, |
74 | .m_or = XAE_TC_JUM_MASK, |
75 | }, { |
76 | .opt = XAE_OPTION_JUMBO, |
77 | .reg = XAE_RCW1_OFFSET, |
78 | .m_or = XAE_RCW1_JUM_MASK, |
79 | }, { /* Turn on VLAN packet support for both Rx and Tx */ |
80 | .opt = XAE_OPTION_VLAN, |
81 | .reg = XAE_TC_OFFSET, |
82 | .m_or = XAE_TC_VLAN_MASK, |
83 | }, { |
84 | .opt = XAE_OPTION_VLAN, |
85 | .reg = XAE_RCW1_OFFSET, |
86 | .m_or = XAE_RCW1_VLAN_MASK, |
87 | }, { /* Turn on FCS stripping on receive packets */ |
88 | .opt = XAE_OPTION_FCS_STRIP, |
89 | .reg = XAE_RCW1_OFFSET, |
90 | .m_or = XAE_RCW1_FCS_MASK, |
91 | }, { /* Turn on FCS insertion on transmit packets */ |
92 | .opt = XAE_OPTION_FCS_INSERT, |
93 | .reg = XAE_TC_OFFSET, |
94 | .m_or = XAE_TC_FCS_MASK, |
95 | }, { /* Turn off length/type field checking on receive packets */ |
96 | .opt = XAE_OPTION_LENTYPE_ERR, |
97 | .reg = XAE_RCW1_OFFSET, |
98 | .m_or = XAE_RCW1_LT_DIS_MASK, |
99 | }, { /* Turn on Rx flow control */ |
100 | .opt = XAE_OPTION_FLOW_CONTROL, |
101 | .reg = XAE_FCC_OFFSET, |
102 | .m_or = XAE_FCC_FCRX_MASK, |
103 | }, { /* Turn on Tx flow control */ |
104 | .opt = XAE_OPTION_FLOW_CONTROL, |
105 | .reg = XAE_FCC_OFFSET, |
106 | .m_or = XAE_FCC_FCTX_MASK, |
107 | }, { /* Turn on promiscuous frame filtering */ |
108 | .opt = XAE_OPTION_PROMISC, |
109 | .reg = XAE_FMI_OFFSET, |
110 | .m_or = XAE_FMI_PM_MASK, |
111 | }, { /* Enable transmitter */ |
112 | .opt = XAE_OPTION_TXEN, |
113 | .reg = XAE_TC_OFFSET, |
114 | .m_or = XAE_TC_TX_MASK, |
115 | }, { /* Enable receiver */ |
116 | .opt = XAE_OPTION_RXEN, |
117 | .reg = XAE_RCW1_OFFSET, |
118 | .m_or = XAE_RCW1_RX_MASK, |
119 | }, |
120 | {} |
121 | }; |
122 | |
123 | /** |
124 | * axienet_dma_in32 - Memory mapped Axi DMA register read |
125 | * @lp: Pointer to axienet local structure |
126 | * @reg: Address offset from the base address of the Axi DMA core |
127 | * |
128 | * Return: The contents of the Axi DMA register |
129 | * |
130 | * This function returns the contents of the corresponding Axi DMA register. |
131 | */ |
132 | static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) |
133 | { |
134 | return ioread32(lp->dma_regs + reg); |
135 | } |
136 | |
137 | static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, |
138 | struct axidma_bd *desc) |
139 | { |
140 | desc->phys = lower_32_bits(addr); |
141 | if (lp->features & XAE_FEATURE_DMA_64BIT) |
142 | desc->phys_msb = upper_32_bits(addr); |
143 | } |
144 | |
145 | static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, |
146 | struct axidma_bd *desc) |
147 | { |
148 | dma_addr_t ret = desc->phys; |
149 | |
150 | if (lp->features & XAE_FEATURE_DMA_64BIT) |
151 | ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; |
152 | |
153 | return ret; |
154 | } |
155 | |
156 | /** |
157 | * axienet_dma_bd_release - Release buffer descriptor rings |
158 | * @ndev: Pointer to the net_device structure |
159 | * |
160 | * This function is used to release the descriptors allocated in |
161 | * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet |
162 | * driver stop api is called. |
163 | */ |
164 | static void axienet_dma_bd_release(struct net_device *ndev) |
165 | { |
166 | int i; |
167 | struct axienet_local *lp = netdev_priv(dev: ndev); |
168 | |
169 | /* If we end up here, tx_bd_v must have been DMA allocated. */ |
170 | dma_free_coherent(dev: lp->dev, |
171 | size: sizeof(*lp->tx_bd_v) * lp->tx_bd_num, |
172 | cpu_addr: lp->tx_bd_v, |
173 | dma_handle: lp->tx_bd_p); |
174 | |
175 | if (!lp->rx_bd_v) |
176 | return; |
177 | |
178 | for (i = 0; i < lp->rx_bd_num; i++) { |
179 | dma_addr_t phys; |
180 | |
181 | /* A NULL skb means this descriptor has not been initialised |
182 | * at all. |
183 | */ |
184 | if (!lp->rx_bd_v[i].skb) |
185 | break; |
186 | |
187 | dev_kfree_skb(lp->rx_bd_v[i].skb); |
188 | |
189 | /* For each descriptor, we programmed cntrl with the (non-zero) |
190 | * descriptor size, after it had been successfully allocated. |
191 | * So a non-zero value in there means we need to unmap it. |
192 | */ |
193 | if (lp->rx_bd_v[i].cntrl) { |
194 | phys = desc_get_phys_addr(lp, desc: &lp->rx_bd_v[i]); |
195 | dma_unmap_single(lp->dev, phys, |
196 | lp->max_frm_size, DMA_FROM_DEVICE); |
197 | } |
198 | } |
199 | |
200 | dma_free_coherent(dev: lp->dev, |
201 | size: sizeof(*lp->rx_bd_v) * lp->rx_bd_num, |
202 | cpu_addr: lp->rx_bd_v, |
203 | dma_handle: lp->rx_bd_p); |
204 | } |
205 | |
206 | /** |
207 | * axienet_usec_to_timer - Calculate IRQ delay timer value |
208 | * @lp: Pointer to the axienet_local structure |
209 | * @coalesce_usec: Microseconds to convert into timer value |
210 | */ |
211 | static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) |
212 | { |
213 | u32 result; |
214 | u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ |
215 | |
216 | if (lp->axi_clk) |
217 | clk_rate = clk_get_rate(clk: lp->axi_clk); |
218 | |
219 | /* 1 Timeout Interval = 125 * (clock period of SG clock) */ |
220 | result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, |
221 | (u64)125000000); |
222 | if (result > 255) |
223 | result = 255; |
224 | |
225 | return result; |
226 | } |
227 | |
228 | /** |
229 | * axienet_dma_start - Set up DMA registers and start DMA operation |
230 | * @lp: Pointer to the axienet_local structure |
231 | */ |
232 | static void axienet_dma_start(struct axienet_local *lp) |
233 | { |
234 | /* Start updating the Rx channel control register */ |
235 | lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | |
236 | XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; |
237 | /* Only set interrupt delay timer if not generating an interrupt on |
238 | * the first RX packet. Otherwise leave at 0 to disable delay interrupt. |
239 | */ |
240 | if (lp->coalesce_count_rx > 1) |
241 | lp->rx_dma_cr |= (axienet_usec_to_timer(lp, coalesce_usec: lp->coalesce_usec_rx) |
242 | << XAXIDMA_DELAY_SHIFT) | |
243 | XAXIDMA_IRQ_DELAY_MASK; |
244 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: lp->rx_dma_cr); |
245 | |
246 | /* Start updating the Tx channel control register */ |
247 | lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | |
248 | XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; |
249 | /* Only set interrupt delay timer if not generating an interrupt on |
250 | * the first TX packet. Otherwise leave at 0 to disable delay interrupt. |
251 | */ |
252 | if (lp->coalesce_count_tx > 1) |
253 | lp->tx_dma_cr |= (axienet_usec_to_timer(lp, coalesce_usec: lp->coalesce_usec_tx) |
254 | << XAXIDMA_DELAY_SHIFT) | |
255 | XAXIDMA_IRQ_DELAY_MASK; |
256 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: lp->tx_dma_cr); |
257 | |
258 | /* Populate the tail pointer and bring the Rx Axi DMA engine out of |
259 | * halted state. This will make the Rx side ready for reception. |
260 | */ |
261 | axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, addr: lp->rx_bd_p); |
262 | lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; |
263 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: lp->rx_dma_cr); |
264 | axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, addr: lp->rx_bd_p + |
265 | (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); |
266 | |
267 | /* Write to the RS (Run-stop) bit in the Tx channel control register. |
268 | * Tx channel is now ready to run. But only after we write to the |
269 | * tail pointer register that the Tx channel will start transmitting. |
270 | */ |
271 | axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, addr: lp->tx_bd_p); |
272 | lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; |
273 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: lp->tx_dma_cr); |
274 | } |
275 | |
276 | /** |
277 | * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA |
278 | * @ndev: Pointer to the net_device structure |
279 | * |
280 | * Return: 0, on success -ENOMEM, on failure |
281 | * |
282 | * This function is called to initialize the Rx and Tx DMA descriptor |
283 | * rings. This initializes the descriptors with required default values |
284 | * and is called when Axi Ethernet driver reset is called. |
285 | */ |
286 | static int axienet_dma_bd_init(struct net_device *ndev) |
287 | { |
288 | int i; |
289 | struct sk_buff *skb; |
290 | struct axienet_local *lp = netdev_priv(dev: ndev); |
291 | |
292 | /* Reset the indexes which are used for accessing the BDs */ |
293 | lp->tx_bd_ci = 0; |
294 | lp->tx_bd_tail = 0; |
295 | lp->rx_bd_ci = 0; |
296 | |
297 | /* Allocate the Tx and Rx buffer descriptors. */ |
298 | lp->tx_bd_v = dma_alloc_coherent(dev: lp->dev, |
299 | size: sizeof(*lp->tx_bd_v) * lp->tx_bd_num, |
300 | dma_handle: &lp->tx_bd_p, GFP_KERNEL); |
301 | if (!lp->tx_bd_v) |
302 | return -ENOMEM; |
303 | |
304 | lp->rx_bd_v = dma_alloc_coherent(dev: lp->dev, |
305 | size: sizeof(*lp->rx_bd_v) * lp->rx_bd_num, |
306 | dma_handle: &lp->rx_bd_p, GFP_KERNEL); |
307 | if (!lp->rx_bd_v) |
308 | goto out; |
309 | |
310 | for (i = 0; i < lp->tx_bd_num; i++) { |
311 | dma_addr_t addr = lp->tx_bd_p + |
312 | sizeof(*lp->tx_bd_v) * |
313 | ((i + 1) % lp->tx_bd_num); |
314 | |
315 | lp->tx_bd_v[i].next = lower_32_bits(addr); |
316 | if (lp->features & XAE_FEATURE_DMA_64BIT) |
317 | lp->tx_bd_v[i].next_msb = upper_32_bits(addr); |
318 | } |
319 | |
320 | for (i = 0; i < lp->rx_bd_num; i++) { |
321 | dma_addr_t addr; |
322 | |
323 | addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * |
324 | ((i + 1) % lp->rx_bd_num); |
325 | lp->rx_bd_v[i].next = lower_32_bits(addr); |
326 | if (lp->features & XAE_FEATURE_DMA_64BIT) |
327 | lp->rx_bd_v[i].next_msb = upper_32_bits(addr); |
328 | |
329 | skb = netdev_alloc_skb_ip_align(dev: ndev, length: lp->max_frm_size); |
330 | if (!skb) |
331 | goto out; |
332 | |
333 | lp->rx_bd_v[i].skb = skb; |
334 | addr = dma_map_single(lp->dev, skb->data, |
335 | lp->max_frm_size, DMA_FROM_DEVICE); |
336 | if (dma_mapping_error(dev: lp->dev, dma_addr: addr)) { |
337 | netdev_err(dev: ndev, format: "DMA mapping error\n" ); |
338 | goto out; |
339 | } |
340 | desc_set_phys_addr(lp, addr, desc: &lp->rx_bd_v[i]); |
341 | |
342 | lp->rx_bd_v[i].cntrl = lp->max_frm_size; |
343 | } |
344 | |
345 | axienet_dma_start(lp); |
346 | |
347 | return 0; |
348 | out: |
349 | axienet_dma_bd_release(ndev); |
350 | return -ENOMEM; |
351 | } |
352 | |
353 | /** |
354 | * axienet_set_mac_address - Write the MAC address |
355 | * @ndev: Pointer to the net_device structure |
356 | * @address: 6 byte Address to be written as MAC address |
357 | * |
358 | * This function is called to initialize the MAC address of the Axi Ethernet |
359 | * core. It writes to the UAW0 and UAW1 registers of the core. |
360 | */ |
361 | static void axienet_set_mac_address(struct net_device *ndev, |
362 | const void *address) |
363 | { |
364 | struct axienet_local *lp = netdev_priv(dev: ndev); |
365 | |
366 | if (address) |
367 | eth_hw_addr_set(dev: ndev, addr: address); |
368 | if (!is_valid_ether_addr(addr: ndev->dev_addr)) |
369 | eth_hw_addr_random(dev: ndev); |
370 | |
371 | /* Set up unicast MAC address filter set its mac address */ |
372 | axienet_iow(lp, XAE_UAW0_OFFSET, |
373 | value: (ndev->dev_addr[0]) | |
374 | (ndev->dev_addr[1] << 8) | |
375 | (ndev->dev_addr[2] << 16) | |
376 | (ndev->dev_addr[3] << 24)); |
377 | axienet_iow(lp, XAE_UAW1_OFFSET, |
378 | value: (((axienet_ior(lp, XAE_UAW1_OFFSET)) & |
379 | ~XAE_UAW1_UNICASTADDR_MASK) | |
380 | (ndev->dev_addr[4] | |
381 | (ndev->dev_addr[5] << 8)))); |
382 | } |
383 | |
384 | /** |
385 | * netdev_set_mac_address - Write the MAC address (from outside the driver) |
386 | * @ndev: Pointer to the net_device structure |
387 | * @p: 6 byte Address to be written as MAC address |
388 | * |
389 | * Return: 0 for all conditions. Presently, there is no failure case. |
390 | * |
391 | * This function is called to initialize the MAC address of the Axi Ethernet |
392 | * core. It calls the core specific axienet_set_mac_address. This is the |
393 | * function that goes into net_device_ops structure entry ndo_set_mac_address. |
394 | */ |
395 | static int netdev_set_mac_address(struct net_device *ndev, void *p) |
396 | { |
397 | struct sockaddr *addr = p; |
398 | axienet_set_mac_address(ndev, address: addr->sa_data); |
399 | return 0; |
400 | } |
401 | |
402 | /** |
403 | * axienet_set_multicast_list - Prepare the multicast table |
404 | * @ndev: Pointer to the net_device structure |
405 | * |
406 | * This function is called to initialize the multicast table during |
407 | * initialization. The Axi Ethernet basic multicast support has a four-entry |
408 | * multicast table which is initialized here. Additionally this function |
409 | * goes into the net_device_ops structure entry ndo_set_multicast_list. This |
410 | * means whenever the multicast table entries need to be updated this |
411 | * function gets called. |
412 | */ |
413 | static void axienet_set_multicast_list(struct net_device *ndev) |
414 | { |
415 | int i; |
416 | u32 reg, af0reg, af1reg; |
417 | struct axienet_local *lp = netdev_priv(dev: ndev); |
418 | |
419 | if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || |
420 | netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { |
421 | /* We must make the kernel realize we had to move into |
422 | * promiscuous mode. If it was a promiscuous mode request |
423 | * the flag is already set. If not we set it. |
424 | */ |
425 | ndev->flags |= IFF_PROMISC; |
426 | reg = axienet_ior(lp, XAE_FMI_OFFSET); |
427 | reg |= XAE_FMI_PM_MASK; |
428 | axienet_iow(lp, XAE_FMI_OFFSET, value: reg); |
429 | dev_info(&ndev->dev, "Promiscuous mode enabled.\n" ); |
430 | } else if (!netdev_mc_empty(ndev)) { |
431 | struct netdev_hw_addr *ha; |
432 | |
433 | i = 0; |
434 | netdev_for_each_mc_addr(ha, ndev) { |
435 | if (i >= XAE_MULTICAST_CAM_TABLE_NUM) |
436 | break; |
437 | |
438 | af0reg = (ha->addr[0]); |
439 | af0reg |= (ha->addr[1] << 8); |
440 | af0reg |= (ha->addr[2] << 16); |
441 | af0reg |= (ha->addr[3] << 24); |
442 | |
443 | af1reg = (ha->addr[4]); |
444 | af1reg |= (ha->addr[5] << 8); |
445 | |
446 | reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; |
447 | reg |= i; |
448 | |
449 | axienet_iow(lp, XAE_FMI_OFFSET, value: reg); |
450 | axienet_iow(lp, XAE_AF0_OFFSET, value: af0reg); |
451 | axienet_iow(lp, XAE_AF1_OFFSET, value: af1reg); |
452 | i++; |
453 | } |
454 | } else { |
455 | reg = axienet_ior(lp, XAE_FMI_OFFSET); |
456 | reg &= ~XAE_FMI_PM_MASK; |
457 | |
458 | axienet_iow(lp, XAE_FMI_OFFSET, value: reg); |
459 | |
460 | for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { |
461 | reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; |
462 | reg |= i; |
463 | |
464 | axienet_iow(lp, XAE_FMI_OFFSET, value: reg); |
465 | axienet_iow(lp, XAE_AF0_OFFSET, value: 0); |
466 | axienet_iow(lp, XAE_AF1_OFFSET, value: 0); |
467 | } |
468 | |
469 | dev_info(&ndev->dev, "Promiscuous mode disabled.\n" ); |
470 | } |
471 | } |
472 | |
473 | /** |
474 | * axienet_setoptions - Set an Axi Ethernet option |
475 | * @ndev: Pointer to the net_device structure |
476 | * @options: Option to be enabled/disabled |
477 | * |
478 | * The Axi Ethernet core has multiple features which can be selectively turned |
479 | * on or off. The typical options could be jumbo frame option, basic VLAN |
480 | * option, promiscuous mode option etc. This function is used to set or clear |
481 | * these options in the Axi Ethernet hardware. This is done through |
482 | * axienet_option structure . |
483 | */ |
484 | static void axienet_setoptions(struct net_device *ndev, u32 options) |
485 | { |
486 | int reg; |
487 | struct axienet_local *lp = netdev_priv(dev: ndev); |
488 | struct axienet_option *tp = &axienet_options[0]; |
489 | |
490 | while (tp->opt) { |
491 | reg = ((axienet_ior(lp, offset: tp->reg)) & ~(tp->m_or)); |
492 | if (options & tp->opt) |
493 | reg |= tp->m_or; |
494 | axienet_iow(lp, offset: tp->reg, value: reg); |
495 | tp++; |
496 | } |
497 | |
498 | lp->options |= options; |
499 | } |
500 | |
501 | static int __axienet_device_reset(struct axienet_local *lp) |
502 | { |
503 | u32 value; |
504 | int ret; |
505 | |
506 | /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset |
507 | * process of Axi DMA takes a while to complete as all pending |
508 | * commands/transfers will be flushed or completed during this |
509 | * reset process. |
510 | * Note that even though both TX and RX have their own reset register, |
511 | * they both reset the entire DMA core, so only one needs to be used. |
512 | */ |
513 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); |
514 | ret = read_poll_timeout(axienet_dma_in32, value, |
515 | !(value & XAXIDMA_CR_RESET_MASK), |
516 | DELAY_OF_ONE_MILLISEC, 50000, false, lp, |
517 | XAXIDMA_TX_CR_OFFSET); |
518 | if (ret) { |
519 | dev_err(lp->dev, "%s: DMA reset timeout!\n" , __func__); |
520 | return ret; |
521 | } |
522 | |
523 | /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ |
524 | ret = read_poll_timeout(axienet_ior, value, |
525 | value & XAE_INT_PHYRSTCMPLT_MASK, |
526 | DELAY_OF_ONE_MILLISEC, 50000, false, lp, |
527 | XAE_IS_OFFSET); |
528 | if (ret) { |
529 | dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n" , __func__); |
530 | return ret; |
531 | } |
532 | |
533 | return 0; |
534 | } |
535 | |
536 | /** |
537 | * axienet_dma_stop - Stop DMA operation |
538 | * @lp: Pointer to the axienet_local structure |
539 | */ |
540 | static void axienet_dma_stop(struct axienet_local *lp) |
541 | { |
542 | int count; |
543 | u32 cr, sr; |
544 | |
545 | cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); |
546 | cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); |
547 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: cr); |
548 | synchronize_irq(irq: lp->rx_irq); |
549 | |
550 | cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
551 | cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); |
552 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: cr); |
553 | synchronize_irq(irq: lp->tx_irq); |
554 | |
555 | /* Give DMAs a chance to halt gracefully */ |
556 | sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
557 | for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { |
558 | msleep(msecs: 20); |
559 | sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
560 | } |
561 | |
562 | sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
563 | for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { |
564 | msleep(msecs: 20); |
565 | sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
566 | } |
567 | |
568 | /* Do a reset to ensure DMA is really stopped */ |
569 | axienet_lock_mii(lp); |
570 | __axienet_device_reset(lp); |
571 | axienet_unlock_mii(lp); |
572 | } |
573 | |
574 | /** |
575 | * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. |
576 | * @ndev: Pointer to the net_device structure |
577 | * |
578 | * This function is called to reset and initialize the Axi Ethernet core. This |
579 | * is typically called during initialization. It does a reset of the Axi DMA |
580 | * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines |
581 | * are connected to Axi Ethernet reset lines, this in turn resets the Axi |
582 | * Ethernet core. No separate hardware reset is done for the Axi Ethernet |
583 | * core. |
584 | * Returns 0 on success or a negative error number otherwise. |
585 | */ |
586 | static int axienet_device_reset(struct net_device *ndev) |
587 | { |
588 | u32 axienet_status; |
589 | struct axienet_local *lp = netdev_priv(dev: ndev); |
590 | int ret; |
591 | |
592 | ret = __axienet_device_reset(lp); |
593 | if (ret) |
594 | return ret; |
595 | |
596 | lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; |
597 | lp->options |= XAE_OPTION_VLAN; |
598 | lp->options &= (~XAE_OPTION_JUMBO); |
599 | |
600 | if ((ndev->mtu > XAE_MTU) && |
601 | (ndev->mtu <= XAE_JUMBO_MTU)) { |
602 | lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + |
603 | XAE_TRL_SIZE; |
604 | |
605 | if (lp->max_frm_size <= lp->rxmem) |
606 | lp->options |= XAE_OPTION_JUMBO; |
607 | } |
608 | |
609 | ret = axienet_dma_bd_init(ndev); |
610 | if (ret) { |
611 | netdev_err(dev: ndev, format: "%s: descriptor allocation failed\n" , |
612 | __func__); |
613 | return ret; |
614 | } |
615 | |
616 | axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); |
617 | axienet_status &= ~XAE_RCW1_RX_MASK; |
618 | axienet_iow(lp, XAE_RCW1_OFFSET, value: axienet_status); |
619 | |
620 | axienet_status = axienet_ior(lp, XAE_IP_OFFSET); |
621 | if (axienet_status & XAE_INT_RXRJECT_MASK) |
622 | axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); |
623 | axienet_iow(lp, XAE_IE_OFFSET, value: lp->eth_irq > 0 ? |
624 | XAE_INT_RECV_ERROR_MASK : 0); |
625 | |
626 | axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); |
627 | |
628 | /* Sync default options with HW but leave receiver and |
629 | * transmitter disabled. |
630 | */ |
631 | axienet_setoptions(ndev, options: lp->options & |
632 | ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); |
633 | axienet_set_mac_address(ndev, NULL); |
634 | axienet_set_multicast_list(ndev); |
635 | axienet_setoptions(ndev, options: lp->options); |
636 | |
637 | netif_trans_update(dev: ndev); |
638 | |
639 | return 0; |
640 | } |
641 | |
642 | /** |
643 | * axienet_free_tx_chain - Clean up a series of linked TX descriptors. |
644 | * @lp: Pointer to the axienet_local structure |
645 | * @first_bd: Index of first descriptor to clean up |
646 | * @nr_bds: Max number of descriptors to clean up |
647 | * @force: Whether to clean descriptors even if not complete |
648 | * @sizep: Pointer to a u32 filled with the total sum of all bytes |
649 | * in all cleaned-up descriptors. Ignored if NULL. |
650 | * @budget: NAPI budget (use 0 when not called from NAPI poll) |
651 | * |
652 | * Would either be called after a successful transmit operation, or after |
653 | * there was an error when setting up the chain. |
654 | * Returns the number of descriptors handled. |
655 | */ |
656 | static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, |
657 | int nr_bds, bool force, u32 *sizep, int budget) |
658 | { |
659 | struct axidma_bd *cur_p; |
660 | unsigned int status; |
661 | dma_addr_t phys; |
662 | int i; |
663 | |
664 | for (i = 0; i < nr_bds; i++) { |
665 | cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; |
666 | status = cur_p->status; |
667 | |
668 | /* If force is not specified, clean up only descriptors |
669 | * that have been completed by the MAC. |
670 | */ |
671 | if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) |
672 | break; |
673 | |
674 | /* Ensure we see complete descriptor update */ |
675 | dma_rmb(); |
676 | phys = desc_get_phys_addr(lp, desc: cur_p); |
677 | dma_unmap_single(lp->dev, phys, |
678 | (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), |
679 | DMA_TO_DEVICE); |
680 | |
681 | if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) |
682 | napi_consume_skb(skb: cur_p->skb, budget); |
683 | |
684 | cur_p->app0 = 0; |
685 | cur_p->app1 = 0; |
686 | cur_p->app2 = 0; |
687 | cur_p->app4 = 0; |
688 | cur_p->skb = NULL; |
689 | /* ensure our transmit path and device don't prematurely see status cleared */ |
690 | wmb(); |
691 | cur_p->cntrl = 0; |
692 | cur_p->status = 0; |
693 | |
694 | if (sizep) |
695 | *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; |
696 | } |
697 | |
698 | return i; |
699 | } |
700 | |
701 | /** |
702 | * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy |
703 | * @lp: Pointer to the axienet_local structure |
704 | * @num_frag: The number of BDs to check for |
705 | * |
706 | * Return: 0, on success |
707 | * NETDEV_TX_BUSY, if any of the descriptors are not free |
708 | * |
709 | * This function is invoked before BDs are allocated and transmission starts. |
710 | * This function returns 0 if a BD or group of BDs can be allocated for |
711 | * transmission. If the BD or any of the BDs are not free the function |
712 | * returns a busy status. |
713 | */ |
714 | static inline int axienet_check_tx_bd_space(struct axienet_local *lp, |
715 | int num_frag) |
716 | { |
717 | struct axidma_bd *cur_p; |
718 | |
719 | /* Ensure we see all descriptor updates from device or TX polling */ |
720 | rmb(); |
721 | cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % |
722 | lp->tx_bd_num]; |
723 | if (cur_p->cntrl) |
724 | return NETDEV_TX_BUSY; |
725 | return 0; |
726 | } |
727 | |
728 | /** |
729 | * axienet_tx_poll - Invoked once a transmit is completed by the |
730 | * Axi DMA Tx channel. |
731 | * @napi: Pointer to NAPI structure. |
732 | * @budget: Max number of TX packets to process. |
733 | * |
734 | * Return: Number of TX packets processed. |
735 | * |
736 | * This function is invoked from the NAPI processing to notify the completion |
737 | * of transmit operation. It clears fields in the corresponding Tx BDs and |
738 | * unmaps the corresponding buffer so that CPU can regain ownership of the |
739 | * buffer. It finally invokes "netif_wake_queue" to restart transmission if |
740 | * required. |
741 | */ |
742 | static int axienet_tx_poll(struct napi_struct *napi, int budget) |
743 | { |
744 | struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); |
745 | struct net_device *ndev = lp->ndev; |
746 | u32 size = 0; |
747 | int packets; |
748 | |
749 | packets = axienet_free_tx_chain(lp, first_bd: lp->tx_bd_ci, nr_bds: budget, force: false, sizep: &size, budget); |
750 | |
751 | if (packets) { |
752 | lp->tx_bd_ci += packets; |
753 | if (lp->tx_bd_ci >= lp->tx_bd_num) |
754 | lp->tx_bd_ci %= lp->tx_bd_num; |
755 | |
756 | u64_stats_update_begin(syncp: &lp->tx_stat_sync); |
757 | u64_stats_add(p: &lp->tx_packets, val: packets); |
758 | u64_stats_add(p: &lp->tx_bytes, val: size); |
759 | u64_stats_update_end(syncp: &lp->tx_stat_sync); |
760 | |
761 | /* Matches barrier in axienet_start_xmit */ |
762 | smp_mb(); |
763 | |
764 | if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) |
765 | netif_wake_queue(dev: ndev); |
766 | } |
767 | |
768 | if (packets < budget && napi_complete_done(n: napi, work_done: packets)) { |
769 | /* Re-enable TX completion interrupts. This should |
770 | * cause an immediate interrupt if any TX packets are |
771 | * already pending. |
772 | */ |
773 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: lp->tx_dma_cr); |
774 | } |
775 | return packets; |
776 | } |
777 | |
778 | /** |
779 | * axienet_start_xmit - Starts the transmission. |
780 | * @skb: sk_buff pointer that contains data to be Txed. |
781 | * @ndev: Pointer to net_device structure. |
782 | * |
783 | * Return: NETDEV_TX_OK, on success |
784 | * NETDEV_TX_BUSY, if any of the descriptors are not free |
785 | * |
786 | * This function is invoked from upper layers to initiate transmission. The |
787 | * function uses the next available free BDs and populates their fields to |
788 | * start the transmission. Additionally if checksum offloading is supported, |
789 | * it populates AXI Stream Control fields with appropriate values. |
790 | */ |
791 | static netdev_tx_t |
792 | axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
793 | { |
794 | u32 ii; |
795 | u32 num_frag; |
796 | u32 csum_start_off; |
797 | u32 csum_index_off; |
798 | skb_frag_t *frag; |
799 | dma_addr_t tail_p, phys; |
800 | u32 orig_tail_ptr, new_tail_ptr; |
801 | struct axienet_local *lp = netdev_priv(dev: ndev); |
802 | struct axidma_bd *cur_p; |
803 | |
804 | orig_tail_ptr = lp->tx_bd_tail; |
805 | new_tail_ptr = orig_tail_ptr; |
806 | |
807 | num_frag = skb_shinfo(skb)->nr_frags; |
808 | cur_p = &lp->tx_bd_v[orig_tail_ptr]; |
809 | |
810 | if (axienet_check_tx_bd_space(lp, num_frag: num_frag + 1)) { |
811 | /* Should not happen as last start_xmit call should have |
812 | * checked for sufficient space and queue should only be |
813 | * woken when sufficient space is available. |
814 | */ |
815 | netif_stop_queue(dev: ndev); |
816 | if (net_ratelimit()) |
817 | netdev_warn(dev: ndev, format: "TX ring unexpectedly full\n" ); |
818 | return NETDEV_TX_BUSY; |
819 | } |
820 | |
821 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
822 | if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { |
823 | /* Tx Full Checksum Offload Enabled */ |
824 | cur_p->app0 |= 2; |
825 | } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { |
826 | csum_start_off = skb_transport_offset(skb); |
827 | csum_index_off = csum_start_off + skb->csum_offset; |
828 | /* Tx Partial Checksum Offload Enabled */ |
829 | cur_p->app0 |= 1; |
830 | cur_p->app1 = (csum_start_off << 16) | csum_index_off; |
831 | } |
832 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
833 | cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ |
834 | } |
835 | |
836 | phys = dma_map_single(lp->dev, skb->data, |
837 | skb_headlen(skb), DMA_TO_DEVICE); |
838 | if (unlikely(dma_mapping_error(lp->dev, phys))) { |
839 | if (net_ratelimit()) |
840 | netdev_err(dev: ndev, format: "TX DMA mapping error\n" ); |
841 | ndev->stats.tx_dropped++; |
842 | return NETDEV_TX_OK; |
843 | } |
844 | desc_set_phys_addr(lp, addr: phys, desc: cur_p); |
845 | cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; |
846 | |
847 | for (ii = 0; ii < num_frag; ii++) { |
848 | if (++new_tail_ptr >= lp->tx_bd_num) |
849 | new_tail_ptr = 0; |
850 | cur_p = &lp->tx_bd_v[new_tail_ptr]; |
851 | frag = &skb_shinfo(skb)->frags[ii]; |
852 | phys = dma_map_single(lp->dev, |
853 | skb_frag_address(frag), |
854 | skb_frag_size(frag), |
855 | DMA_TO_DEVICE); |
856 | if (unlikely(dma_mapping_error(lp->dev, phys))) { |
857 | if (net_ratelimit()) |
858 | netdev_err(dev: ndev, format: "TX DMA mapping error\n" ); |
859 | ndev->stats.tx_dropped++; |
860 | axienet_free_tx_chain(lp, first_bd: orig_tail_ptr, nr_bds: ii + 1, |
861 | force: true, NULL, budget: 0); |
862 | return NETDEV_TX_OK; |
863 | } |
864 | desc_set_phys_addr(lp, addr: phys, desc: cur_p); |
865 | cur_p->cntrl = skb_frag_size(frag); |
866 | } |
867 | |
868 | cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; |
869 | cur_p->skb = skb; |
870 | |
871 | tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; |
872 | if (++new_tail_ptr >= lp->tx_bd_num) |
873 | new_tail_ptr = 0; |
874 | WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); |
875 | |
876 | /* Start the transfer */ |
877 | axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, addr: tail_p); |
878 | |
879 | /* Stop queue if next transmit may not have space */ |
880 | if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { |
881 | netif_stop_queue(dev: ndev); |
882 | |
883 | /* Matches barrier in axienet_tx_poll */ |
884 | smp_mb(); |
885 | |
886 | /* Space might have just been freed - check again */ |
887 | if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) |
888 | netif_wake_queue(dev: ndev); |
889 | } |
890 | |
891 | return NETDEV_TX_OK; |
892 | } |
893 | |
894 | /** |
895 | * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. |
896 | * @napi: Pointer to NAPI structure. |
897 | * @budget: Max number of RX packets to process. |
898 | * |
899 | * Return: Number of RX packets processed. |
900 | */ |
901 | static int axienet_rx_poll(struct napi_struct *napi, int budget) |
902 | { |
903 | u32 length; |
904 | u32 csumstatus; |
905 | u32 size = 0; |
906 | int packets = 0; |
907 | dma_addr_t tail_p = 0; |
908 | struct axidma_bd *cur_p; |
909 | struct sk_buff *skb, *new_skb; |
910 | struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); |
911 | |
912 | cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; |
913 | |
914 | while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { |
915 | dma_addr_t phys; |
916 | |
917 | /* Ensure we see complete descriptor update */ |
918 | dma_rmb(); |
919 | |
920 | skb = cur_p->skb; |
921 | cur_p->skb = NULL; |
922 | |
923 | /* skb could be NULL if a previous pass already received the |
924 | * packet for this slot in the ring, but failed to refill it |
925 | * with a newly allocated buffer. In this case, don't try to |
926 | * receive it again. |
927 | */ |
928 | if (likely(skb)) { |
929 | length = cur_p->app4 & 0x0000FFFF; |
930 | |
931 | phys = desc_get_phys_addr(lp, desc: cur_p); |
932 | dma_unmap_single(lp->dev, phys, lp->max_frm_size, |
933 | DMA_FROM_DEVICE); |
934 | |
935 | skb_put(skb, len: length); |
936 | skb->protocol = eth_type_trans(skb, dev: lp->ndev); |
937 | /*skb_checksum_none_assert(skb);*/ |
938 | skb->ip_summed = CHECKSUM_NONE; |
939 | |
940 | /* if we're doing Rx csum offload, set it up */ |
941 | if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { |
942 | csumstatus = (cur_p->app2 & |
943 | XAE_FULL_CSUM_STATUS_MASK) >> 3; |
944 | if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || |
945 | csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { |
946 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
947 | } |
948 | } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && |
949 | skb->protocol == htons(ETH_P_IP) && |
950 | skb->len > 64) { |
951 | skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); |
952 | skb->ip_summed = CHECKSUM_COMPLETE; |
953 | } |
954 | |
955 | napi_gro_receive(napi, skb); |
956 | |
957 | size += length; |
958 | packets++; |
959 | } |
960 | |
961 | new_skb = napi_alloc_skb(napi, length: lp->max_frm_size); |
962 | if (!new_skb) |
963 | break; |
964 | |
965 | phys = dma_map_single(lp->dev, new_skb->data, |
966 | lp->max_frm_size, |
967 | DMA_FROM_DEVICE); |
968 | if (unlikely(dma_mapping_error(lp->dev, phys))) { |
969 | if (net_ratelimit()) |
970 | netdev_err(dev: lp->ndev, format: "RX DMA mapping error\n" ); |
971 | dev_kfree_skb(new_skb); |
972 | break; |
973 | } |
974 | desc_set_phys_addr(lp, addr: phys, desc: cur_p); |
975 | |
976 | cur_p->cntrl = lp->max_frm_size; |
977 | cur_p->status = 0; |
978 | cur_p->skb = new_skb; |
979 | |
980 | /* Only update tail_p to mark this slot as usable after it has |
981 | * been successfully refilled. |
982 | */ |
983 | tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; |
984 | |
985 | if (++lp->rx_bd_ci >= lp->rx_bd_num) |
986 | lp->rx_bd_ci = 0; |
987 | cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; |
988 | } |
989 | |
990 | u64_stats_update_begin(syncp: &lp->rx_stat_sync); |
991 | u64_stats_add(p: &lp->rx_packets, val: packets); |
992 | u64_stats_add(p: &lp->rx_bytes, val: size); |
993 | u64_stats_update_end(syncp: &lp->rx_stat_sync); |
994 | |
995 | if (tail_p) |
996 | axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, addr: tail_p); |
997 | |
998 | if (packets < budget && napi_complete_done(n: napi, work_done: packets)) { |
999 | /* Re-enable RX completion interrupts. This should |
1000 | * cause an immediate interrupt if any RX packets are |
1001 | * already pending. |
1002 | */ |
1003 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: lp->rx_dma_cr); |
1004 | } |
1005 | return packets; |
1006 | } |
1007 | |
1008 | /** |
1009 | * axienet_tx_irq - Tx Done Isr. |
1010 | * @irq: irq number |
1011 | * @_ndev: net_device pointer |
1012 | * |
1013 | * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. |
1014 | * |
1015 | * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the |
1016 | * TX BD processing. |
1017 | */ |
1018 | static irqreturn_t axienet_tx_irq(int irq, void *_ndev) |
1019 | { |
1020 | unsigned int status; |
1021 | struct net_device *ndev = _ndev; |
1022 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1023 | |
1024 | status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
1025 | |
1026 | if (!(status & XAXIDMA_IRQ_ALL_MASK)) |
1027 | return IRQ_NONE; |
1028 | |
1029 | axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, value: status); |
1030 | |
1031 | if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { |
1032 | netdev_err(dev: ndev, format: "DMA Tx error 0x%x\n" , status); |
1033 | netdev_err(dev: ndev, format: "Current BD is at: 0x%x%08x\n" , |
1034 | (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, |
1035 | (lp->tx_bd_v[lp->tx_bd_ci]).phys); |
1036 | schedule_work(work: &lp->dma_err_task); |
1037 | } else { |
1038 | /* Disable further TX completion interrupts and schedule |
1039 | * NAPI to handle the completions. |
1040 | */ |
1041 | u32 cr = lp->tx_dma_cr; |
1042 | |
1043 | cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); |
1044 | axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, value: cr); |
1045 | |
1046 | napi_schedule(n: &lp->napi_tx); |
1047 | } |
1048 | |
1049 | return IRQ_HANDLED; |
1050 | } |
1051 | |
1052 | /** |
1053 | * axienet_rx_irq - Rx Isr. |
1054 | * @irq: irq number |
1055 | * @_ndev: net_device pointer |
1056 | * |
1057 | * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. |
1058 | * |
1059 | * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD |
1060 | * processing. |
1061 | */ |
1062 | static irqreturn_t axienet_rx_irq(int irq, void *_ndev) |
1063 | { |
1064 | unsigned int status; |
1065 | struct net_device *ndev = _ndev; |
1066 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1067 | |
1068 | status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
1069 | |
1070 | if (!(status & XAXIDMA_IRQ_ALL_MASK)) |
1071 | return IRQ_NONE; |
1072 | |
1073 | axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, value: status); |
1074 | |
1075 | if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { |
1076 | netdev_err(dev: ndev, format: "DMA Rx error 0x%x\n" , status); |
1077 | netdev_err(dev: ndev, format: "Current BD is at: 0x%x%08x\n" , |
1078 | (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, |
1079 | (lp->rx_bd_v[lp->rx_bd_ci]).phys); |
1080 | schedule_work(work: &lp->dma_err_task); |
1081 | } else { |
1082 | /* Disable further RX completion interrupts and schedule |
1083 | * NAPI receive. |
1084 | */ |
1085 | u32 cr = lp->rx_dma_cr; |
1086 | |
1087 | cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); |
1088 | axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, value: cr); |
1089 | |
1090 | napi_schedule(n: &lp->napi_rx); |
1091 | } |
1092 | |
1093 | return IRQ_HANDLED; |
1094 | } |
1095 | |
1096 | /** |
1097 | * axienet_eth_irq - Ethernet core Isr. |
1098 | * @irq: irq number |
1099 | * @_ndev: net_device pointer |
1100 | * |
1101 | * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. |
1102 | * |
1103 | * Handle miscellaneous conditions indicated by Ethernet core IRQ. |
1104 | */ |
1105 | static irqreturn_t axienet_eth_irq(int irq, void *_ndev) |
1106 | { |
1107 | struct net_device *ndev = _ndev; |
1108 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1109 | unsigned int pending; |
1110 | |
1111 | pending = axienet_ior(lp, XAE_IP_OFFSET); |
1112 | if (!pending) |
1113 | return IRQ_NONE; |
1114 | |
1115 | if (pending & XAE_INT_RXFIFOOVR_MASK) |
1116 | ndev->stats.rx_missed_errors++; |
1117 | |
1118 | if (pending & XAE_INT_RXRJECT_MASK) |
1119 | ndev->stats.rx_frame_errors++; |
1120 | |
1121 | axienet_iow(lp, XAE_IS_OFFSET, value: pending); |
1122 | return IRQ_HANDLED; |
1123 | } |
1124 | |
1125 | static void axienet_dma_err_handler(struct work_struct *work); |
1126 | |
1127 | /** |
1128 | * axienet_open - Driver open routine. |
1129 | * @ndev: Pointer to net_device structure |
1130 | * |
1131 | * Return: 0, on success. |
1132 | * non-zero error value on failure |
1133 | * |
1134 | * This is the driver open routine. It calls phylink_start to start the |
1135 | * PHY device. |
1136 | * It also allocates interrupt service routines, enables the interrupt lines |
1137 | * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer |
1138 | * descriptors are initialized. |
1139 | */ |
1140 | static int axienet_open(struct net_device *ndev) |
1141 | { |
1142 | int ret; |
1143 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1144 | |
1145 | dev_dbg(&ndev->dev, "axienet_open()\n" ); |
1146 | |
1147 | /* When we do an Axi Ethernet reset, it resets the complete core |
1148 | * including the MDIO. MDIO must be disabled before resetting. |
1149 | * Hold MDIO bus lock to avoid MDIO accesses during the reset. |
1150 | */ |
1151 | axienet_lock_mii(lp); |
1152 | ret = axienet_device_reset(ndev); |
1153 | axienet_unlock_mii(lp); |
1154 | |
1155 | ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, flags: 0); |
1156 | if (ret) { |
1157 | dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n" , ret); |
1158 | return ret; |
1159 | } |
1160 | |
1161 | phylink_start(lp->phylink); |
1162 | |
1163 | /* Enable worker thread for Axi DMA error handling */ |
1164 | INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); |
1165 | |
1166 | napi_enable(n: &lp->napi_rx); |
1167 | napi_enable(n: &lp->napi_tx); |
1168 | |
1169 | /* Enable interrupts for Axi DMA Tx */ |
1170 | ret = request_irq(irq: lp->tx_irq, handler: axienet_tx_irq, IRQF_SHARED, |
1171 | name: ndev->name, dev: ndev); |
1172 | if (ret) |
1173 | goto err_tx_irq; |
1174 | /* Enable interrupts for Axi DMA Rx */ |
1175 | ret = request_irq(irq: lp->rx_irq, handler: axienet_rx_irq, IRQF_SHARED, |
1176 | name: ndev->name, dev: ndev); |
1177 | if (ret) |
1178 | goto err_rx_irq; |
1179 | /* Enable interrupts for Axi Ethernet core (if defined) */ |
1180 | if (lp->eth_irq > 0) { |
1181 | ret = request_irq(irq: lp->eth_irq, handler: axienet_eth_irq, IRQF_SHARED, |
1182 | name: ndev->name, dev: ndev); |
1183 | if (ret) |
1184 | goto err_eth_irq; |
1185 | } |
1186 | |
1187 | return 0; |
1188 | |
1189 | err_eth_irq: |
1190 | free_irq(lp->rx_irq, ndev); |
1191 | err_rx_irq: |
1192 | free_irq(lp->tx_irq, ndev); |
1193 | err_tx_irq: |
1194 | napi_disable(n: &lp->napi_tx); |
1195 | napi_disable(n: &lp->napi_rx); |
1196 | phylink_stop(lp->phylink); |
1197 | phylink_disconnect_phy(lp->phylink); |
1198 | cancel_work_sync(work: &lp->dma_err_task); |
1199 | dev_err(lp->dev, "request_irq() failed\n" ); |
1200 | return ret; |
1201 | } |
1202 | |
1203 | /** |
1204 | * axienet_stop - Driver stop routine. |
1205 | * @ndev: Pointer to net_device structure |
1206 | * |
1207 | * Return: 0, on success. |
1208 | * |
1209 | * This is the driver stop routine. It calls phylink_disconnect to stop the PHY |
1210 | * device. It also removes the interrupt handlers and disables the interrupts. |
1211 | * The Axi DMA Tx/Rx BDs are released. |
1212 | */ |
1213 | static int axienet_stop(struct net_device *ndev) |
1214 | { |
1215 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1216 | |
1217 | dev_dbg(&ndev->dev, "axienet_close()\n" ); |
1218 | |
1219 | napi_disable(n: &lp->napi_tx); |
1220 | napi_disable(n: &lp->napi_rx); |
1221 | |
1222 | phylink_stop(lp->phylink); |
1223 | phylink_disconnect_phy(lp->phylink); |
1224 | |
1225 | axienet_setoptions(ndev, options: lp->options & |
1226 | ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); |
1227 | |
1228 | axienet_dma_stop(lp); |
1229 | |
1230 | axienet_iow(lp, XAE_IE_OFFSET, value: 0); |
1231 | |
1232 | cancel_work_sync(work: &lp->dma_err_task); |
1233 | |
1234 | if (lp->eth_irq > 0) |
1235 | free_irq(lp->eth_irq, ndev); |
1236 | free_irq(lp->tx_irq, ndev); |
1237 | free_irq(lp->rx_irq, ndev); |
1238 | |
1239 | axienet_dma_bd_release(ndev); |
1240 | return 0; |
1241 | } |
1242 | |
1243 | /** |
1244 | * axienet_change_mtu - Driver change mtu routine. |
1245 | * @ndev: Pointer to net_device structure |
1246 | * @new_mtu: New mtu value to be applied |
1247 | * |
1248 | * Return: Always returns 0 (success). |
1249 | * |
1250 | * This is the change mtu driver routine. It checks if the Axi Ethernet |
1251 | * hardware supports jumbo frames before changing the mtu. This can be |
1252 | * called only when the device is not up. |
1253 | */ |
1254 | static int axienet_change_mtu(struct net_device *ndev, int new_mtu) |
1255 | { |
1256 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1257 | |
1258 | if (netif_running(dev: ndev)) |
1259 | return -EBUSY; |
1260 | |
1261 | if ((new_mtu + VLAN_ETH_HLEN + |
1262 | XAE_TRL_SIZE) > lp->rxmem) |
1263 | return -EINVAL; |
1264 | |
1265 | ndev->mtu = new_mtu; |
1266 | |
1267 | return 0; |
1268 | } |
1269 | |
1270 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1271 | /** |
1272 | * axienet_poll_controller - Axi Ethernet poll mechanism. |
1273 | * @ndev: Pointer to net_device structure |
1274 | * |
1275 | * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior |
1276 | * to polling the ISRs and are enabled back after the polling is done. |
1277 | */ |
1278 | static void axienet_poll_controller(struct net_device *ndev) |
1279 | { |
1280 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1281 | disable_irq(irq: lp->tx_irq); |
1282 | disable_irq(irq: lp->rx_irq); |
1283 | axienet_rx_irq(irq: lp->tx_irq, ndev: ndev); |
1284 | axienet_tx_irq(irq: lp->rx_irq, ndev: ndev); |
1285 | enable_irq(irq: lp->tx_irq); |
1286 | enable_irq(irq: lp->rx_irq); |
1287 | } |
1288 | #endif |
1289 | |
1290 | static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1291 | { |
1292 | struct axienet_local *lp = netdev_priv(dev); |
1293 | |
1294 | if (!netif_running(dev)) |
1295 | return -EINVAL; |
1296 | |
1297 | return phylink_mii_ioctl(lp->phylink, rq, cmd); |
1298 | } |
1299 | |
1300 | static void |
1301 | axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
1302 | { |
1303 | struct axienet_local *lp = netdev_priv(dev); |
1304 | unsigned int start; |
1305 | |
1306 | netdev_stats_to_stats64(stats64: stats, netdev_stats: &dev->stats); |
1307 | |
1308 | do { |
1309 | start = u64_stats_fetch_begin(syncp: &lp->rx_stat_sync); |
1310 | stats->rx_packets = u64_stats_read(p: &lp->rx_packets); |
1311 | stats->rx_bytes = u64_stats_read(p: &lp->rx_bytes); |
1312 | } while (u64_stats_fetch_retry(syncp: &lp->rx_stat_sync, start)); |
1313 | |
1314 | do { |
1315 | start = u64_stats_fetch_begin(syncp: &lp->tx_stat_sync); |
1316 | stats->tx_packets = u64_stats_read(p: &lp->tx_packets); |
1317 | stats->tx_bytes = u64_stats_read(p: &lp->tx_bytes); |
1318 | } while (u64_stats_fetch_retry(syncp: &lp->tx_stat_sync, start)); |
1319 | } |
1320 | |
1321 | static const struct net_device_ops axienet_netdev_ops = { |
1322 | .ndo_open = axienet_open, |
1323 | .ndo_stop = axienet_stop, |
1324 | .ndo_start_xmit = axienet_start_xmit, |
1325 | .ndo_get_stats64 = axienet_get_stats64, |
1326 | .ndo_change_mtu = axienet_change_mtu, |
1327 | .ndo_set_mac_address = netdev_set_mac_address, |
1328 | .ndo_validate_addr = eth_validate_addr, |
1329 | .ndo_eth_ioctl = axienet_ioctl, |
1330 | .ndo_set_rx_mode = axienet_set_multicast_list, |
1331 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1332 | .ndo_poll_controller = axienet_poll_controller, |
1333 | #endif |
1334 | }; |
1335 | |
1336 | /** |
1337 | * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. |
1338 | * @ndev: Pointer to net_device structure |
1339 | * @ed: Pointer to ethtool_drvinfo structure |
1340 | * |
1341 | * This implements ethtool command for getting the driver information. |
1342 | * Issue "ethtool -i ethX" under linux prompt to execute this function. |
1343 | */ |
1344 | static void axienet_ethtools_get_drvinfo(struct net_device *ndev, |
1345 | struct ethtool_drvinfo *ed) |
1346 | { |
1347 | strscpy(p: ed->driver, DRIVER_NAME, size: sizeof(ed->driver)); |
1348 | strscpy(p: ed->version, DRIVER_VERSION, size: sizeof(ed->version)); |
1349 | } |
1350 | |
1351 | /** |
1352 | * axienet_ethtools_get_regs_len - Get the total regs length present in the |
1353 | * AxiEthernet core. |
1354 | * @ndev: Pointer to net_device structure |
1355 | * |
1356 | * This implements ethtool command for getting the total register length |
1357 | * information. |
1358 | * |
1359 | * Return: the total regs length |
1360 | */ |
1361 | static int axienet_ethtools_get_regs_len(struct net_device *ndev) |
1362 | { |
1363 | return sizeof(u32) * AXIENET_REGS_N; |
1364 | } |
1365 | |
1366 | /** |
1367 | * axienet_ethtools_get_regs - Dump the contents of all registers present |
1368 | * in AxiEthernet core. |
1369 | * @ndev: Pointer to net_device structure |
1370 | * @regs: Pointer to ethtool_regs structure |
1371 | * @ret: Void pointer used to return the contents of the registers. |
1372 | * |
1373 | * This implements ethtool command for getting the Axi Ethernet register dump. |
1374 | * Issue "ethtool -d ethX" to execute this function. |
1375 | */ |
1376 | static void axienet_ethtools_get_regs(struct net_device *ndev, |
1377 | struct ethtool_regs *regs, void *ret) |
1378 | { |
1379 | u32 *data = (u32 *)ret; |
1380 | size_t len = sizeof(u32) * AXIENET_REGS_N; |
1381 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1382 | |
1383 | regs->version = 0; |
1384 | regs->len = len; |
1385 | |
1386 | memset(data, 0, len); |
1387 | data[0] = axienet_ior(lp, XAE_RAF_OFFSET); |
1388 | data[1] = axienet_ior(lp, XAE_TPF_OFFSET); |
1389 | data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); |
1390 | data[3] = axienet_ior(lp, XAE_IS_OFFSET); |
1391 | data[4] = axienet_ior(lp, XAE_IP_OFFSET); |
1392 | data[5] = axienet_ior(lp, XAE_IE_OFFSET); |
1393 | data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); |
1394 | data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); |
1395 | data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); |
1396 | data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); |
1397 | data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); |
1398 | data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); |
1399 | data[12] = axienet_ior(lp, XAE_PPST_OFFSET); |
1400 | data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); |
1401 | data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); |
1402 | data[15] = axienet_ior(lp, XAE_TC_OFFSET); |
1403 | data[16] = axienet_ior(lp, XAE_FCC_OFFSET); |
1404 | data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); |
1405 | data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); |
1406 | data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); |
1407 | data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); |
1408 | data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); |
1409 | data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); |
1410 | data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); |
1411 | data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); |
1412 | data[29] = axienet_ior(lp, XAE_FMI_OFFSET); |
1413 | data[30] = axienet_ior(lp, XAE_AF0_OFFSET); |
1414 | data[31] = axienet_ior(lp, XAE_AF1_OFFSET); |
1415 | data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); |
1416 | data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); |
1417 | data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); |
1418 | data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); |
1419 | data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); |
1420 | data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); |
1421 | data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); |
1422 | data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); |
1423 | } |
1424 | |
1425 | static void |
1426 | axienet_ethtools_get_ringparam(struct net_device *ndev, |
1427 | struct ethtool_ringparam *ering, |
1428 | struct kernel_ethtool_ringparam *kernel_ering, |
1429 | struct netlink_ext_ack *extack) |
1430 | { |
1431 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1432 | |
1433 | ering->rx_max_pending = RX_BD_NUM_MAX; |
1434 | ering->rx_mini_max_pending = 0; |
1435 | ering->rx_jumbo_max_pending = 0; |
1436 | ering->tx_max_pending = TX_BD_NUM_MAX; |
1437 | ering->rx_pending = lp->rx_bd_num; |
1438 | ering->rx_mini_pending = 0; |
1439 | ering->rx_jumbo_pending = 0; |
1440 | ering->tx_pending = lp->tx_bd_num; |
1441 | } |
1442 | |
1443 | static int |
1444 | axienet_ethtools_set_ringparam(struct net_device *ndev, |
1445 | struct ethtool_ringparam *ering, |
1446 | struct kernel_ethtool_ringparam *kernel_ering, |
1447 | struct netlink_ext_ack *extack) |
1448 | { |
1449 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1450 | |
1451 | if (ering->rx_pending > RX_BD_NUM_MAX || |
1452 | ering->rx_mini_pending || |
1453 | ering->rx_jumbo_pending || |
1454 | ering->tx_pending < TX_BD_NUM_MIN || |
1455 | ering->tx_pending > TX_BD_NUM_MAX) |
1456 | return -EINVAL; |
1457 | |
1458 | if (netif_running(dev: ndev)) |
1459 | return -EBUSY; |
1460 | |
1461 | lp->rx_bd_num = ering->rx_pending; |
1462 | lp->tx_bd_num = ering->tx_pending; |
1463 | return 0; |
1464 | } |
1465 | |
1466 | /** |
1467 | * axienet_ethtools_get_pauseparam - Get the pause parameter setting for |
1468 | * Tx and Rx paths. |
1469 | * @ndev: Pointer to net_device structure |
1470 | * @epauseparm: Pointer to ethtool_pauseparam structure. |
1471 | * |
1472 | * This implements ethtool command for getting axi ethernet pause frame |
1473 | * setting. Issue "ethtool -a ethX" to execute this function. |
1474 | */ |
1475 | static void |
1476 | axienet_ethtools_get_pauseparam(struct net_device *ndev, |
1477 | struct ethtool_pauseparam *epauseparm) |
1478 | { |
1479 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1480 | |
1481 | phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); |
1482 | } |
1483 | |
1484 | /** |
1485 | * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) |
1486 | * settings. |
1487 | * @ndev: Pointer to net_device structure |
1488 | * @epauseparm:Pointer to ethtool_pauseparam structure |
1489 | * |
1490 | * This implements ethtool command for enabling flow control on Rx and Tx |
1491 | * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this |
1492 | * function. |
1493 | * |
1494 | * Return: 0 on success, -EFAULT if device is running |
1495 | */ |
1496 | static int |
1497 | axienet_ethtools_set_pauseparam(struct net_device *ndev, |
1498 | struct ethtool_pauseparam *epauseparm) |
1499 | { |
1500 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1501 | |
1502 | return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); |
1503 | } |
1504 | |
1505 | /** |
1506 | * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. |
1507 | * @ndev: Pointer to net_device structure |
1508 | * @ecoalesce: Pointer to ethtool_coalesce structure |
1509 | * @kernel_coal: ethtool CQE mode setting structure |
1510 | * @extack: extack for reporting error messages |
1511 | * |
1512 | * This implements ethtool command for getting the DMA interrupt coalescing |
1513 | * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to |
1514 | * execute this function. |
1515 | * |
1516 | * Return: 0 always |
1517 | */ |
1518 | static int |
1519 | axienet_ethtools_get_coalesce(struct net_device *ndev, |
1520 | struct ethtool_coalesce *ecoalesce, |
1521 | struct kernel_ethtool_coalesce *kernel_coal, |
1522 | struct netlink_ext_ack *extack) |
1523 | { |
1524 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1525 | |
1526 | ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; |
1527 | ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; |
1528 | ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; |
1529 | ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; |
1530 | return 0; |
1531 | } |
1532 | |
1533 | /** |
1534 | * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. |
1535 | * @ndev: Pointer to net_device structure |
1536 | * @ecoalesce: Pointer to ethtool_coalesce structure |
1537 | * @kernel_coal: ethtool CQE mode setting structure |
1538 | * @extack: extack for reporting error messages |
1539 | * |
1540 | * This implements ethtool command for setting the DMA interrupt coalescing |
1541 | * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux |
1542 | * prompt to execute this function. |
1543 | * |
1544 | * Return: 0, on success, Non-zero error value on failure. |
1545 | */ |
1546 | static int |
1547 | axienet_ethtools_set_coalesce(struct net_device *ndev, |
1548 | struct ethtool_coalesce *ecoalesce, |
1549 | struct kernel_ethtool_coalesce *kernel_coal, |
1550 | struct netlink_ext_ack *extack) |
1551 | { |
1552 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1553 | |
1554 | if (netif_running(dev: ndev)) { |
1555 | netdev_err(dev: ndev, |
1556 | format: "Please stop netif before applying configuration\n" ); |
1557 | return -EFAULT; |
1558 | } |
1559 | |
1560 | if (ecoalesce->rx_max_coalesced_frames) |
1561 | lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; |
1562 | if (ecoalesce->rx_coalesce_usecs) |
1563 | lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; |
1564 | if (ecoalesce->tx_max_coalesced_frames) |
1565 | lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; |
1566 | if (ecoalesce->tx_coalesce_usecs) |
1567 | lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; |
1568 | |
1569 | return 0; |
1570 | } |
1571 | |
1572 | static int |
1573 | axienet_ethtools_get_link_ksettings(struct net_device *ndev, |
1574 | struct ethtool_link_ksettings *cmd) |
1575 | { |
1576 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1577 | |
1578 | return phylink_ethtool_ksettings_get(lp->phylink, cmd); |
1579 | } |
1580 | |
1581 | static int |
1582 | axienet_ethtools_set_link_ksettings(struct net_device *ndev, |
1583 | const struct ethtool_link_ksettings *cmd) |
1584 | { |
1585 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1586 | |
1587 | return phylink_ethtool_ksettings_set(lp->phylink, cmd); |
1588 | } |
1589 | |
1590 | static int axienet_ethtools_nway_reset(struct net_device *dev) |
1591 | { |
1592 | struct axienet_local *lp = netdev_priv(dev); |
1593 | |
1594 | return phylink_ethtool_nway_reset(lp->phylink); |
1595 | } |
1596 | |
1597 | static const struct ethtool_ops axienet_ethtool_ops = { |
1598 | .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | |
1599 | ETHTOOL_COALESCE_USECS, |
1600 | .get_drvinfo = axienet_ethtools_get_drvinfo, |
1601 | .get_regs_len = axienet_ethtools_get_regs_len, |
1602 | .get_regs = axienet_ethtools_get_regs, |
1603 | .get_link = ethtool_op_get_link, |
1604 | .get_ringparam = axienet_ethtools_get_ringparam, |
1605 | .set_ringparam = axienet_ethtools_set_ringparam, |
1606 | .get_pauseparam = axienet_ethtools_get_pauseparam, |
1607 | .set_pauseparam = axienet_ethtools_set_pauseparam, |
1608 | .get_coalesce = axienet_ethtools_get_coalesce, |
1609 | .set_coalesce = axienet_ethtools_set_coalesce, |
1610 | .get_link_ksettings = axienet_ethtools_get_link_ksettings, |
1611 | .set_link_ksettings = axienet_ethtools_set_link_ksettings, |
1612 | .nway_reset = axienet_ethtools_nway_reset, |
1613 | }; |
1614 | |
1615 | static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) |
1616 | { |
1617 | return container_of(pcs, struct axienet_local, pcs); |
1618 | } |
1619 | |
1620 | static void axienet_pcs_get_state(struct phylink_pcs *pcs, |
1621 | struct phylink_link_state *state) |
1622 | { |
1623 | struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; |
1624 | |
1625 | phylink_mii_c22_pcs_get_state(pcs: pcs_phy, state); |
1626 | } |
1627 | |
1628 | static void axienet_pcs_an_restart(struct phylink_pcs *pcs) |
1629 | { |
1630 | struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; |
1631 | |
1632 | phylink_mii_c22_pcs_an_restart(pcs: pcs_phy); |
1633 | } |
1634 | |
1635 | static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, |
1636 | phy_interface_t interface, |
1637 | const unsigned long *advertising, |
1638 | bool permit_pause_to_mac) |
1639 | { |
1640 | struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; |
1641 | struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; |
1642 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1643 | int ret; |
1644 | |
1645 | if (lp->switch_x_sgmii) { |
1646 | ret = mdiodev_write(mdiodev: pcs_phy, XLNX_MII_STD_SELECT_REG, |
1647 | val: interface == PHY_INTERFACE_MODE_SGMII ? |
1648 | XLNX_MII_STD_SELECT_SGMII : 0); |
1649 | if (ret < 0) { |
1650 | netdev_warn(dev: ndev, |
1651 | format: "Failed to switch PHY interface: %d\n" , |
1652 | ret); |
1653 | return ret; |
1654 | } |
1655 | } |
1656 | |
1657 | ret = phylink_mii_c22_pcs_config(pcs: pcs_phy, interface, advertising, |
1658 | neg_mode); |
1659 | if (ret < 0) |
1660 | netdev_warn(dev: ndev, format: "Failed to configure PCS: %d\n" , ret); |
1661 | |
1662 | return ret; |
1663 | } |
1664 | |
1665 | static const struct phylink_pcs_ops axienet_pcs_ops = { |
1666 | .pcs_get_state = axienet_pcs_get_state, |
1667 | .pcs_config = axienet_pcs_config, |
1668 | .pcs_an_restart = axienet_pcs_an_restart, |
1669 | }; |
1670 | |
1671 | static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, |
1672 | phy_interface_t interface) |
1673 | { |
1674 | struct net_device *ndev = to_net_dev(config->dev); |
1675 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1676 | |
1677 | if (interface == PHY_INTERFACE_MODE_1000BASEX || |
1678 | interface == PHY_INTERFACE_MODE_SGMII) |
1679 | return &lp->pcs; |
1680 | |
1681 | return NULL; |
1682 | } |
1683 | |
1684 | static void axienet_mac_config(struct phylink_config *config, unsigned int mode, |
1685 | const struct phylink_link_state *state) |
1686 | { |
1687 | /* nothing meaningful to do */ |
1688 | } |
1689 | |
1690 | static void axienet_mac_link_down(struct phylink_config *config, |
1691 | unsigned int mode, |
1692 | phy_interface_t interface) |
1693 | { |
1694 | /* nothing meaningful to do */ |
1695 | } |
1696 | |
1697 | static void axienet_mac_link_up(struct phylink_config *config, |
1698 | struct phy_device *phy, |
1699 | unsigned int mode, phy_interface_t interface, |
1700 | int speed, int duplex, |
1701 | bool tx_pause, bool rx_pause) |
1702 | { |
1703 | struct net_device *ndev = to_net_dev(config->dev); |
1704 | struct axienet_local *lp = netdev_priv(dev: ndev); |
1705 | u32 emmc_reg, fcc_reg; |
1706 | |
1707 | emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); |
1708 | emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; |
1709 | |
1710 | switch (speed) { |
1711 | case SPEED_1000: |
1712 | emmc_reg |= XAE_EMMC_LINKSPD_1000; |
1713 | break; |
1714 | case SPEED_100: |
1715 | emmc_reg |= XAE_EMMC_LINKSPD_100; |
1716 | break; |
1717 | case SPEED_10: |
1718 | emmc_reg |= XAE_EMMC_LINKSPD_10; |
1719 | break; |
1720 | default: |
1721 | dev_err(&ndev->dev, |
1722 | "Speed other than 10, 100 or 1Gbps is not supported\n" ); |
1723 | break; |
1724 | } |
1725 | |
1726 | axienet_iow(lp, XAE_EMMC_OFFSET, value: emmc_reg); |
1727 | |
1728 | fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); |
1729 | if (tx_pause) |
1730 | fcc_reg |= XAE_FCC_FCTX_MASK; |
1731 | else |
1732 | fcc_reg &= ~XAE_FCC_FCTX_MASK; |
1733 | if (rx_pause) |
1734 | fcc_reg |= XAE_FCC_FCRX_MASK; |
1735 | else |
1736 | fcc_reg &= ~XAE_FCC_FCRX_MASK; |
1737 | axienet_iow(lp, XAE_FCC_OFFSET, value: fcc_reg); |
1738 | } |
1739 | |
1740 | static const struct phylink_mac_ops axienet_phylink_ops = { |
1741 | .mac_select_pcs = axienet_mac_select_pcs, |
1742 | .mac_config = axienet_mac_config, |
1743 | .mac_link_down = axienet_mac_link_down, |
1744 | .mac_link_up = axienet_mac_link_up, |
1745 | }; |
1746 | |
1747 | /** |
1748 | * axienet_dma_err_handler - Work queue task for Axi DMA Error |
1749 | * @work: pointer to work_struct |
1750 | * |
1751 | * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the |
1752 | * Tx/Rx BDs. |
1753 | */ |
1754 | static void axienet_dma_err_handler(struct work_struct *work) |
1755 | { |
1756 | u32 i; |
1757 | u32 axienet_status; |
1758 | struct axidma_bd *cur_p; |
1759 | struct axienet_local *lp = container_of(work, struct axienet_local, |
1760 | dma_err_task); |
1761 | struct net_device *ndev = lp->ndev; |
1762 | |
1763 | napi_disable(n: &lp->napi_tx); |
1764 | napi_disable(n: &lp->napi_rx); |
1765 | |
1766 | axienet_setoptions(ndev, options: lp->options & |
1767 | ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); |
1768 | |
1769 | axienet_dma_stop(lp); |
1770 | |
1771 | for (i = 0; i < lp->tx_bd_num; i++) { |
1772 | cur_p = &lp->tx_bd_v[i]; |
1773 | if (cur_p->cntrl) { |
1774 | dma_addr_t addr = desc_get_phys_addr(lp, desc: cur_p); |
1775 | |
1776 | dma_unmap_single(lp->dev, addr, |
1777 | (cur_p->cntrl & |
1778 | XAXIDMA_BD_CTRL_LENGTH_MASK), |
1779 | DMA_TO_DEVICE); |
1780 | } |
1781 | if (cur_p->skb) |
1782 | dev_kfree_skb_irq(skb: cur_p->skb); |
1783 | cur_p->phys = 0; |
1784 | cur_p->phys_msb = 0; |
1785 | cur_p->cntrl = 0; |
1786 | cur_p->status = 0; |
1787 | cur_p->app0 = 0; |
1788 | cur_p->app1 = 0; |
1789 | cur_p->app2 = 0; |
1790 | cur_p->app3 = 0; |
1791 | cur_p->app4 = 0; |
1792 | cur_p->skb = NULL; |
1793 | } |
1794 | |
1795 | for (i = 0; i < lp->rx_bd_num; i++) { |
1796 | cur_p = &lp->rx_bd_v[i]; |
1797 | cur_p->status = 0; |
1798 | cur_p->app0 = 0; |
1799 | cur_p->app1 = 0; |
1800 | cur_p->app2 = 0; |
1801 | cur_p->app3 = 0; |
1802 | cur_p->app4 = 0; |
1803 | } |
1804 | |
1805 | lp->tx_bd_ci = 0; |
1806 | lp->tx_bd_tail = 0; |
1807 | lp->rx_bd_ci = 0; |
1808 | |
1809 | axienet_dma_start(lp); |
1810 | |
1811 | axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); |
1812 | axienet_status &= ~XAE_RCW1_RX_MASK; |
1813 | axienet_iow(lp, XAE_RCW1_OFFSET, value: axienet_status); |
1814 | |
1815 | axienet_status = axienet_ior(lp, XAE_IP_OFFSET); |
1816 | if (axienet_status & XAE_INT_RXRJECT_MASK) |
1817 | axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); |
1818 | axienet_iow(lp, XAE_IE_OFFSET, value: lp->eth_irq > 0 ? |
1819 | XAE_INT_RECV_ERROR_MASK : 0); |
1820 | axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); |
1821 | |
1822 | /* Sync default options with HW but leave receiver and |
1823 | * transmitter disabled. |
1824 | */ |
1825 | axienet_setoptions(ndev, options: lp->options & |
1826 | ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); |
1827 | axienet_set_mac_address(ndev, NULL); |
1828 | axienet_set_multicast_list(ndev); |
1829 | axienet_setoptions(ndev, options: lp->options); |
1830 | napi_enable(n: &lp->napi_rx); |
1831 | napi_enable(n: &lp->napi_tx); |
1832 | } |
1833 | |
1834 | /** |
1835 | * axienet_probe - Axi Ethernet probe function. |
1836 | * @pdev: Pointer to platform device structure. |
1837 | * |
1838 | * Return: 0, on success |
1839 | * Non-zero error value on failure. |
1840 | * |
1841 | * This is the probe routine for Axi Ethernet driver. This is called before |
1842 | * any other driver routines are invoked. It allocates and sets up the Ethernet |
1843 | * device. Parses through device tree and populates fields of |
1844 | * axienet_local. It registers the Ethernet device. |
1845 | */ |
1846 | static int axienet_probe(struct platform_device *pdev) |
1847 | { |
1848 | int ret; |
1849 | struct device_node *np; |
1850 | struct axienet_local *lp; |
1851 | struct net_device *ndev; |
1852 | struct resource *ethres; |
1853 | u8 mac_addr[ETH_ALEN]; |
1854 | int addr_width = 32; |
1855 | u32 value; |
1856 | |
1857 | ndev = alloc_etherdev(sizeof(*lp)); |
1858 | if (!ndev) |
1859 | return -ENOMEM; |
1860 | |
1861 | platform_set_drvdata(pdev, data: ndev); |
1862 | |
1863 | SET_NETDEV_DEV(ndev, &pdev->dev); |
1864 | ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ |
1865 | ndev->features = NETIF_F_SG; |
1866 | ndev->netdev_ops = &axienet_netdev_ops; |
1867 | ndev->ethtool_ops = &axienet_ethtool_ops; |
1868 | |
1869 | /* MTU range: 64 - 9000 */ |
1870 | ndev->min_mtu = 64; |
1871 | ndev->max_mtu = XAE_JUMBO_MTU; |
1872 | |
1873 | lp = netdev_priv(dev: ndev); |
1874 | lp->ndev = ndev; |
1875 | lp->dev = &pdev->dev; |
1876 | lp->options = XAE_OPTION_DEFAULTS; |
1877 | lp->rx_bd_num = RX_BD_NUM_DEFAULT; |
1878 | lp->tx_bd_num = TX_BD_NUM_DEFAULT; |
1879 | |
1880 | u64_stats_init(syncp: &lp->rx_stat_sync); |
1881 | u64_stats_init(syncp: &lp->tx_stat_sync); |
1882 | |
1883 | netif_napi_add(dev: ndev, napi: &lp->napi_rx, poll: axienet_rx_poll); |
1884 | netif_napi_add(dev: ndev, napi: &lp->napi_tx, poll: axienet_tx_poll); |
1885 | |
1886 | lp->axi_clk = devm_clk_get_optional(dev: &pdev->dev, id: "s_axi_lite_clk" ); |
1887 | if (!lp->axi_clk) { |
1888 | /* For backward compatibility, if named AXI clock is not present, |
1889 | * treat the first clock specified as the AXI clock. |
1890 | */ |
1891 | lp->axi_clk = devm_clk_get_optional(dev: &pdev->dev, NULL); |
1892 | } |
1893 | if (IS_ERR(ptr: lp->axi_clk)) { |
1894 | ret = PTR_ERR(ptr: lp->axi_clk); |
1895 | goto free_netdev; |
1896 | } |
1897 | ret = clk_prepare_enable(clk: lp->axi_clk); |
1898 | if (ret) { |
1899 | dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n" , ret); |
1900 | goto free_netdev; |
1901 | } |
1902 | |
1903 | lp->misc_clks[0].id = "axis_clk" ; |
1904 | lp->misc_clks[1].id = "ref_clk" ; |
1905 | lp->misc_clks[2].id = "mgt_clk" ; |
1906 | |
1907 | ret = devm_clk_bulk_get_optional(dev: &pdev->dev, XAE_NUM_MISC_CLOCKS, clks: lp->misc_clks); |
1908 | if (ret) |
1909 | goto cleanup_clk; |
1910 | |
1911 | ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, clks: lp->misc_clks); |
1912 | if (ret) |
1913 | goto cleanup_clk; |
1914 | |
1915 | /* Map device registers */ |
1916 | lp->regs = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: ðres); |
1917 | if (IS_ERR(ptr: lp->regs)) { |
1918 | ret = PTR_ERR(ptr: lp->regs); |
1919 | goto cleanup_clk; |
1920 | } |
1921 | lp->regs_start = ethres->start; |
1922 | |
1923 | /* Setup checksum offload, but default to off if not specified */ |
1924 | lp->features = 0; |
1925 | |
1926 | ret = of_property_read_u32(np: pdev->dev.of_node, propname: "xlnx,txcsum" , out_value: &value); |
1927 | if (!ret) { |
1928 | switch (value) { |
1929 | case 1: |
1930 | lp->csum_offload_on_tx_path = |
1931 | XAE_FEATURE_PARTIAL_TX_CSUM; |
1932 | lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; |
1933 | /* Can checksum TCP/UDP over IPv4. */ |
1934 | ndev->features |= NETIF_F_IP_CSUM; |
1935 | break; |
1936 | case 2: |
1937 | lp->csum_offload_on_tx_path = |
1938 | XAE_FEATURE_FULL_TX_CSUM; |
1939 | lp->features |= XAE_FEATURE_FULL_TX_CSUM; |
1940 | /* Can checksum TCP/UDP over IPv4. */ |
1941 | ndev->features |= NETIF_F_IP_CSUM; |
1942 | break; |
1943 | default: |
1944 | lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; |
1945 | } |
1946 | } |
1947 | ret = of_property_read_u32(np: pdev->dev.of_node, propname: "xlnx,rxcsum" , out_value: &value); |
1948 | if (!ret) { |
1949 | switch (value) { |
1950 | case 1: |
1951 | lp->csum_offload_on_rx_path = |
1952 | XAE_FEATURE_PARTIAL_RX_CSUM; |
1953 | lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; |
1954 | break; |
1955 | case 2: |
1956 | lp->csum_offload_on_rx_path = |
1957 | XAE_FEATURE_FULL_RX_CSUM; |
1958 | lp->features |= XAE_FEATURE_FULL_RX_CSUM; |
1959 | break; |
1960 | default: |
1961 | lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; |
1962 | } |
1963 | } |
1964 | /* For supporting jumbo frames, the Axi Ethernet hardware must have |
1965 | * a larger Rx/Tx Memory. Typically, the size must be large so that |
1966 | * we can enable jumbo option and start supporting jumbo frames. |
1967 | * Here we check for memory allocated for Rx/Tx in the hardware from |
1968 | * the device-tree and accordingly set flags. |
1969 | */ |
1970 | of_property_read_u32(np: pdev->dev.of_node, propname: "xlnx,rxmem" , out_value: &lp->rxmem); |
1971 | |
1972 | lp->switch_x_sgmii = of_property_read_bool(np: pdev->dev.of_node, |
1973 | propname: "xlnx,switch-x-sgmii" ); |
1974 | |
1975 | /* Start with the proprietary, and broken phy_type */ |
1976 | ret = of_property_read_u32(np: pdev->dev.of_node, propname: "xlnx,phy-type" , out_value: &value); |
1977 | if (!ret) { |
1978 | netdev_warn(dev: ndev, format: "Please upgrade your device tree binary blob to use phy-mode" ); |
1979 | switch (value) { |
1980 | case XAE_PHY_TYPE_MII: |
1981 | lp->phy_mode = PHY_INTERFACE_MODE_MII; |
1982 | break; |
1983 | case XAE_PHY_TYPE_GMII: |
1984 | lp->phy_mode = PHY_INTERFACE_MODE_GMII; |
1985 | break; |
1986 | case XAE_PHY_TYPE_RGMII_2_0: |
1987 | lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; |
1988 | break; |
1989 | case XAE_PHY_TYPE_SGMII: |
1990 | lp->phy_mode = PHY_INTERFACE_MODE_SGMII; |
1991 | break; |
1992 | case XAE_PHY_TYPE_1000BASE_X: |
1993 | lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; |
1994 | break; |
1995 | default: |
1996 | ret = -EINVAL; |
1997 | goto cleanup_clk; |
1998 | } |
1999 | } else { |
2000 | ret = of_get_phy_mode(np: pdev->dev.of_node, interface: &lp->phy_mode); |
2001 | if (ret) |
2002 | goto cleanup_clk; |
2003 | } |
2004 | if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && |
2005 | lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { |
2006 | dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n" ); |
2007 | ret = -EINVAL; |
2008 | goto cleanup_clk; |
2009 | } |
2010 | |
2011 | /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ |
2012 | np = of_parse_phandle(np: pdev->dev.of_node, phandle_name: "axistream-connected" , index: 0); |
2013 | if (np) { |
2014 | struct resource dmares; |
2015 | |
2016 | ret = of_address_to_resource(dev: np, index: 0, r: &dmares); |
2017 | if (ret) { |
2018 | dev_err(&pdev->dev, |
2019 | "unable to get DMA resource\n" ); |
2020 | of_node_put(node: np); |
2021 | goto cleanup_clk; |
2022 | } |
2023 | lp->dma_regs = devm_ioremap_resource(dev: &pdev->dev, |
2024 | res: &dmares); |
2025 | lp->rx_irq = irq_of_parse_and_map(node: np, index: 1); |
2026 | lp->tx_irq = irq_of_parse_and_map(node: np, index: 0); |
2027 | of_node_put(node: np); |
2028 | lp->eth_irq = platform_get_irq_optional(pdev, 0); |
2029 | } else { |
2030 | /* Check for these resources directly on the Ethernet node. */ |
2031 | lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, index: 1, NULL); |
2032 | lp->rx_irq = platform_get_irq(pdev, 1); |
2033 | lp->tx_irq = platform_get_irq(pdev, 0); |
2034 | lp->eth_irq = platform_get_irq_optional(pdev, 2); |
2035 | } |
2036 | if (IS_ERR(ptr: lp->dma_regs)) { |
2037 | dev_err(&pdev->dev, "could not map DMA regs\n" ); |
2038 | ret = PTR_ERR(ptr: lp->dma_regs); |
2039 | goto cleanup_clk; |
2040 | } |
2041 | if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { |
2042 | dev_err(&pdev->dev, "could not determine irqs\n" ); |
2043 | ret = -ENOMEM; |
2044 | goto cleanup_clk; |
2045 | } |
2046 | |
2047 | /* Reset core now that clocks are enabled, prior to accessing MDIO */ |
2048 | ret = __axienet_device_reset(lp); |
2049 | if (ret) |
2050 | goto cleanup_clk; |
2051 | |
2052 | /* Autodetect the need for 64-bit DMA pointers. |
2053 | * When the IP is configured for a bus width bigger than 32 bits, |
2054 | * writing the MSB registers is mandatory, even if they are all 0. |
2055 | * We can detect this case by writing all 1's to one such register |
2056 | * and see if that sticks: when the IP is configured for 32 bits |
2057 | * only, those registers are RES0. |
2058 | * Those MSB registers were introduced in IP v7.1, which we check first. |
2059 | */ |
2060 | if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { |
2061 | void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; |
2062 | |
2063 | iowrite32(0x0, desc); |
2064 | if (ioread32(desc) == 0) { /* sanity check */ |
2065 | iowrite32(0xffffffff, desc); |
2066 | if (ioread32(desc) > 0) { |
2067 | lp->features |= XAE_FEATURE_DMA_64BIT; |
2068 | addr_width = 64; |
2069 | dev_info(&pdev->dev, |
2070 | "autodetected 64-bit DMA range\n" ); |
2071 | } |
2072 | iowrite32(0x0, desc); |
2073 | } |
2074 | } |
2075 | if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { |
2076 | dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n" ); |
2077 | ret = -EINVAL; |
2078 | goto cleanup_clk; |
2079 | } |
2080 | |
2081 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(addr_width)); |
2082 | if (ret) { |
2083 | dev_err(&pdev->dev, "No suitable DMA available\n" ); |
2084 | goto cleanup_clk; |
2085 | } |
2086 | |
2087 | /* Check for Ethernet core IRQ (optional) */ |
2088 | if (lp->eth_irq <= 0) |
2089 | dev_info(&pdev->dev, "Ethernet core IRQ not defined\n" ); |
2090 | |
2091 | /* Retrieve the MAC address */ |
2092 | ret = of_get_mac_address(np: pdev->dev.of_node, mac: mac_addr); |
2093 | if (!ret) { |
2094 | axienet_set_mac_address(ndev, address: mac_addr); |
2095 | } else { |
2096 | dev_warn(&pdev->dev, "could not find MAC address property: %d\n" , |
2097 | ret); |
2098 | axienet_set_mac_address(ndev, NULL); |
2099 | } |
2100 | |
2101 | lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; |
2102 | lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; |
2103 | lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; |
2104 | lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; |
2105 | |
2106 | ret = axienet_mdio_setup(lp); |
2107 | if (ret) |
2108 | dev_warn(&pdev->dev, |
2109 | "error registering MDIO bus: %d\n" , ret); |
2110 | |
2111 | if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || |
2112 | lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { |
2113 | np = of_parse_phandle(np: pdev->dev.of_node, phandle_name: "pcs-handle" , index: 0); |
2114 | if (!np) { |
2115 | /* Deprecated: Always use "pcs-handle" for pcs_phy. |
2116 | * Falling back to "phy-handle" here is only for |
2117 | * backward compatibility with old device trees. |
2118 | */ |
2119 | np = of_parse_phandle(np: pdev->dev.of_node, phandle_name: "phy-handle" , index: 0); |
2120 | } |
2121 | if (!np) { |
2122 | dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n" ); |
2123 | ret = -EINVAL; |
2124 | goto cleanup_mdio; |
2125 | } |
2126 | lp->pcs_phy = of_mdio_find_device(np); |
2127 | if (!lp->pcs_phy) { |
2128 | ret = -EPROBE_DEFER; |
2129 | of_node_put(node: np); |
2130 | goto cleanup_mdio; |
2131 | } |
2132 | of_node_put(node: np); |
2133 | lp->pcs.ops = &axienet_pcs_ops; |
2134 | lp->pcs.neg_mode = true; |
2135 | lp->pcs.poll = true; |
2136 | } |
2137 | |
2138 | lp->phylink_config.dev = &ndev->dev; |
2139 | lp->phylink_config.type = PHYLINK_NETDEV; |
2140 | lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | |
2141 | MAC_10FD | MAC_100FD | MAC_1000FD; |
2142 | |
2143 | __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); |
2144 | if (lp->switch_x_sgmii) { |
2145 | __set_bit(PHY_INTERFACE_MODE_1000BASEX, |
2146 | lp->phylink_config.supported_interfaces); |
2147 | __set_bit(PHY_INTERFACE_MODE_SGMII, |
2148 | lp->phylink_config.supported_interfaces); |
2149 | } |
2150 | |
2151 | lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, |
2152 | lp->phy_mode, |
2153 | &axienet_phylink_ops); |
2154 | if (IS_ERR(ptr: lp->phylink)) { |
2155 | ret = PTR_ERR(ptr: lp->phylink); |
2156 | dev_err(&pdev->dev, "phylink_create error (%i)\n" , ret); |
2157 | goto cleanup_mdio; |
2158 | } |
2159 | |
2160 | ret = register_netdev(dev: lp->ndev); |
2161 | if (ret) { |
2162 | dev_err(lp->dev, "register_netdev() error (%i)\n" , ret); |
2163 | goto cleanup_phylink; |
2164 | } |
2165 | |
2166 | return 0; |
2167 | |
2168 | cleanup_phylink: |
2169 | phylink_destroy(lp->phylink); |
2170 | |
2171 | cleanup_mdio: |
2172 | if (lp->pcs_phy) |
2173 | put_device(dev: &lp->pcs_phy->dev); |
2174 | if (lp->mii_bus) |
2175 | axienet_mdio_teardown(lp); |
2176 | cleanup_clk: |
2177 | clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, clks: lp->misc_clks); |
2178 | clk_disable_unprepare(clk: lp->axi_clk); |
2179 | |
2180 | free_netdev: |
2181 | free_netdev(dev: ndev); |
2182 | |
2183 | return ret; |
2184 | } |
2185 | |
2186 | static void axienet_remove(struct platform_device *pdev) |
2187 | { |
2188 | struct net_device *ndev = platform_get_drvdata(pdev); |
2189 | struct axienet_local *lp = netdev_priv(dev: ndev); |
2190 | |
2191 | unregister_netdev(dev: ndev); |
2192 | |
2193 | if (lp->phylink) |
2194 | phylink_destroy(lp->phylink); |
2195 | |
2196 | if (lp->pcs_phy) |
2197 | put_device(dev: &lp->pcs_phy->dev); |
2198 | |
2199 | axienet_mdio_teardown(lp); |
2200 | |
2201 | clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, clks: lp->misc_clks); |
2202 | clk_disable_unprepare(clk: lp->axi_clk); |
2203 | |
2204 | free_netdev(dev: ndev); |
2205 | } |
2206 | |
2207 | static void axienet_shutdown(struct platform_device *pdev) |
2208 | { |
2209 | struct net_device *ndev = platform_get_drvdata(pdev); |
2210 | |
2211 | rtnl_lock(); |
2212 | netif_device_detach(dev: ndev); |
2213 | |
2214 | if (netif_running(dev: ndev)) |
2215 | dev_close(dev: ndev); |
2216 | |
2217 | rtnl_unlock(); |
2218 | } |
2219 | |
2220 | static int axienet_suspend(struct device *dev) |
2221 | { |
2222 | struct net_device *ndev = dev_get_drvdata(dev); |
2223 | |
2224 | if (!netif_running(dev: ndev)) |
2225 | return 0; |
2226 | |
2227 | netif_device_detach(dev: ndev); |
2228 | |
2229 | rtnl_lock(); |
2230 | axienet_stop(ndev); |
2231 | rtnl_unlock(); |
2232 | |
2233 | return 0; |
2234 | } |
2235 | |
2236 | static int axienet_resume(struct device *dev) |
2237 | { |
2238 | struct net_device *ndev = dev_get_drvdata(dev); |
2239 | |
2240 | if (!netif_running(dev: ndev)) |
2241 | return 0; |
2242 | |
2243 | rtnl_lock(); |
2244 | axienet_open(ndev); |
2245 | rtnl_unlock(); |
2246 | |
2247 | netif_device_attach(dev: ndev); |
2248 | |
2249 | return 0; |
2250 | } |
2251 | |
2252 | static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, |
2253 | axienet_suspend, axienet_resume); |
2254 | |
2255 | static struct platform_driver axienet_driver = { |
2256 | .probe = axienet_probe, |
2257 | .remove_new = axienet_remove, |
2258 | .shutdown = axienet_shutdown, |
2259 | .driver = { |
2260 | .name = "xilinx_axienet" , |
2261 | .pm = &axienet_pm_ops, |
2262 | .of_match_table = axienet_of_match, |
2263 | }, |
2264 | }; |
2265 | |
2266 | module_platform_driver(axienet_driver); |
2267 | |
2268 | MODULE_DESCRIPTION("Xilinx Axi Ethernet driver" ); |
2269 | MODULE_AUTHOR("Xilinx" ); |
2270 | MODULE_LICENSE("GPL" ); |
2271 | |