1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Driver for Xilinx TEMAC Ethernet device |
4 | * |
5 | * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi |
6 | * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> |
7 | * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. |
8 | * |
9 | * This is a driver for the Xilinx ll_temac ipcore which is often used |
10 | * in the Virtex and Spartan series of chips. |
11 | * |
12 | * Notes: |
13 | * - The ll_temac hardware uses indirect access for many of the TEMAC |
14 | * registers, include the MDIO bus. However, indirect access to MDIO |
15 | * registers take considerably more clock cycles than to TEMAC registers. |
16 | * MDIO accesses are long, so threads doing them should probably sleep |
17 | * rather than busywait. However, since only one indirect access can be |
18 | * in progress at any given time, that means that *all* indirect accesses |
19 | * could end up sleeping (to wait for an MDIO access to complete). |
20 | * Fortunately none of the indirect accesses are on the 'hot' path for tx |
21 | * or rx, so this should be okay. |
22 | * |
23 | * TODO: |
24 | * - Factor out locallink DMA code into separate driver |
25 | * - Fix support for hardware checksumming. |
26 | * - Testing. Lots and lots of testing. |
27 | * |
28 | */ |
29 | |
30 | #include <linux/delay.h> |
31 | #include <linux/etherdevice.h> |
32 | #include <linux/mii.h> |
33 | #include <linux/module.h> |
34 | #include <linux/mutex.h> |
35 | #include <linux/netdevice.h> |
36 | #include <linux/if_ether.h> |
37 | #include <linux/of.h> |
38 | #include <linux/of_irq.h> |
39 | #include <linux/of_mdio.h> |
40 | #include <linux/of_net.h> |
41 | #include <linux/platform_device.h> |
42 | #include <linux/skbuff.h> |
43 | #include <linux/spinlock.h> |
44 | #include <linux/tcp.h> /* needed for sizeof(tcphdr) */ |
45 | #include <linux/udp.h> /* needed for sizeof(udphdr) */ |
46 | #include <linux/phy.h> |
47 | #include <linux/in.h> |
48 | #include <linux/io.h> |
49 | #include <linux/ip.h> |
50 | #include <linux/slab.h> |
51 | #include <linux/interrupt.h> |
52 | #include <linux/workqueue.h> |
53 | #include <linux/dma-mapping.h> |
54 | #include <linux/processor.h> |
55 | #include <linux/platform_data/xilinx-ll-temac.h> |
56 | |
57 | #include "ll_temac.h" |
58 | |
59 | /* Descriptors defines for Tx and Rx DMA */ |
60 | #define TX_BD_NUM_DEFAULT 64 |
61 | #define RX_BD_NUM_DEFAULT 1024 |
62 | #define TX_BD_NUM_MAX 4096 |
63 | #define RX_BD_NUM_MAX 4096 |
64 | |
65 | /* --------------------------------------------------------------------- |
66 | * Low level register access functions |
67 | */ |
68 | |
69 | static u32 _temac_ior_be(struct temac_local *lp, int offset) |
70 | { |
71 | return ioread32be(lp->regs + offset); |
72 | } |
73 | |
74 | static void _temac_iow_be(struct temac_local *lp, int offset, u32 value) |
75 | { |
76 | return iowrite32be(value, lp->regs + offset); |
77 | } |
78 | |
79 | static u32 _temac_ior_le(struct temac_local *lp, int offset) |
80 | { |
81 | return ioread32(lp->regs + offset); |
82 | } |
83 | |
84 | static void _temac_iow_le(struct temac_local *lp, int offset, u32 value) |
85 | { |
86 | return iowrite32(value, lp->regs + offset); |
87 | } |
88 | |
89 | static bool hard_acs_rdy(struct temac_local *lp) |
90 | { |
91 | return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK; |
92 | } |
93 | |
94 | static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout) |
95 | { |
96 | ktime_t cur = ktime_get(); |
97 | |
98 | return hard_acs_rdy(lp) || ktime_after(cmp1: cur, cmp2: timeout); |
99 | } |
100 | |
101 | /* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz |
102 | * that was used before, and should cover MDIO bus speed down to 3200 |
103 | * Hz. |
104 | */ |
105 | #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC) |
106 | |
107 | /* |
108 | * temac_indirect_busywait - Wait for current indirect register access |
109 | * to complete. |
110 | */ |
111 | int temac_indirect_busywait(struct temac_local *lp) |
112 | { |
113 | ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS); |
114 | |
115 | spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout)); |
116 | if (WARN_ON(!hard_acs_rdy(lp))) |
117 | return -ETIMEDOUT; |
118 | |
119 | return 0; |
120 | } |
121 | |
122 | /* |
123 | * temac_indirect_in32 - Indirect register read access. This function |
124 | * must be called without lp->indirect_lock being held. |
125 | */ |
126 | u32 temac_indirect_in32(struct temac_local *lp, int reg) |
127 | { |
128 | unsigned long flags; |
129 | int val; |
130 | |
131 | spin_lock_irqsave(lp->indirect_lock, flags); |
132 | val = temac_indirect_in32_locked(lp, reg); |
133 | spin_unlock_irqrestore(lock: lp->indirect_lock, flags); |
134 | return val; |
135 | } |
136 | |
137 | /* |
138 | * temac_indirect_in32_locked - Indirect register read access. This |
139 | * function must be called with lp->indirect_lock being held. Use |
140 | * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid |
141 | * repeated lock/unlock and to ensure uninterrupted access to indirect |
142 | * registers. |
143 | */ |
144 | u32 temac_indirect_in32_locked(struct temac_local *lp, int reg) |
145 | { |
146 | /* This initial wait should normally not spin, as we always |
147 | * try to wait for indirect access to complete before |
148 | * releasing the indirect_lock. |
149 | */ |
150 | if (WARN_ON(temac_indirect_busywait(lp))) |
151 | return -ETIMEDOUT; |
152 | /* Initiate read from indirect register */ |
153 | temac_iow(lp, XTE_CTL0_OFFSET, reg); |
154 | /* Wait for indirect register access to complete. We really |
155 | * should not see timeouts, and could even end up causing |
156 | * problem for following indirect access, so let's make a bit |
157 | * of WARN noise. |
158 | */ |
159 | if (WARN_ON(temac_indirect_busywait(lp))) |
160 | return -ETIMEDOUT; |
161 | /* Value is ready now */ |
162 | return temac_ior(lp, XTE_LSW0_OFFSET); |
163 | } |
164 | |
165 | /* |
166 | * temac_indirect_out32 - Indirect register write access. This function |
167 | * must be called without lp->indirect_lock being held. |
168 | */ |
169 | void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) |
170 | { |
171 | unsigned long flags; |
172 | |
173 | spin_lock_irqsave(lp->indirect_lock, flags); |
174 | temac_indirect_out32_locked(lp, reg, value); |
175 | spin_unlock_irqrestore(lock: lp->indirect_lock, flags); |
176 | } |
177 | |
178 | /* |
179 | * temac_indirect_out32_locked - Indirect register write access. This |
180 | * function must be called with lp->indirect_lock being held. Use |
181 | * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid |
182 | * repeated lock/unlock and to ensure uninterrupted access to indirect |
183 | * registers. |
184 | */ |
185 | void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value) |
186 | { |
187 | /* As in temac_indirect_in32_locked(), we should normally not |
188 | * spin here. And if it happens, we actually end up silently |
189 | * ignoring the write request. Ouch. |
190 | */ |
191 | if (WARN_ON(temac_indirect_busywait(lp))) |
192 | return; |
193 | /* Initiate write to indirect register */ |
194 | temac_iow(lp, XTE_LSW0_OFFSET, value); |
195 | temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); |
196 | /* As in temac_indirect_in32_locked(), we should not see timeouts |
197 | * here. And if it happens, we continue before the write has |
198 | * completed. Not good. |
199 | */ |
200 | WARN_ON(temac_indirect_busywait(lp)); |
201 | } |
202 | |
203 | /* |
204 | * temac_dma_in32_* - Memory mapped DMA read, these function expects a |
205 | * register input that is based on DCR word addresses which are then |
206 | * converted to memory mapped byte addresses. To be assigned to |
207 | * lp->dma_in32. |
208 | */ |
209 | static u32 temac_dma_in32_be(struct temac_local *lp, int reg) |
210 | { |
211 | return ioread32be(lp->sdma_regs + (reg << 2)); |
212 | } |
213 | |
214 | static u32 temac_dma_in32_le(struct temac_local *lp, int reg) |
215 | { |
216 | return ioread32(lp->sdma_regs + (reg << 2)); |
217 | } |
218 | |
219 | /* |
220 | * temac_dma_out32_* - Memory mapped DMA read, these function expects |
221 | * a register input that is based on DCR word addresses which are then |
222 | * converted to memory mapped byte addresses. To be assigned to |
223 | * lp->dma_out32. |
224 | */ |
225 | static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value) |
226 | { |
227 | iowrite32be(value, lp->sdma_regs + (reg << 2)); |
228 | } |
229 | |
230 | static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value) |
231 | { |
232 | iowrite32(value, lp->sdma_regs + (reg << 2)); |
233 | } |
234 | |
235 | /* DMA register access functions can be DCR based or memory mapped. |
236 | * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both |
237 | * memory mapped. |
238 | */ |
239 | #ifdef CONFIG_PPC_DCR |
240 | |
241 | /* |
242 | * temac_dma_dcr_in32 - DCR based DMA read |
243 | */ |
244 | static u32 temac_dma_dcr_in(struct temac_local *lp, int reg) |
245 | { |
246 | return dcr_read(lp->sdma_dcrs, reg); |
247 | } |
248 | |
249 | /* |
250 | * temac_dma_dcr_out32 - DCR based DMA write |
251 | */ |
252 | static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value) |
253 | { |
254 | dcr_write(lp->sdma_dcrs, reg, value); |
255 | } |
256 | |
257 | /* |
258 | * temac_dcr_setup - If the DMA is DCR based, then setup the address and |
259 | * I/O functions |
260 | */ |
261 | static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, |
262 | struct device_node *np) |
263 | { |
264 | unsigned int dcrs; |
265 | |
266 | /* setup the dcr address mapping if it's in the device tree */ |
267 | |
268 | dcrs = dcr_resource_start(np, 0); |
269 | if (dcrs != 0) { |
270 | lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0)); |
271 | lp->dma_in = temac_dma_dcr_in; |
272 | lp->dma_out = temac_dma_dcr_out; |
273 | dev_dbg(&op->dev, "DCR base: %x\n" , dcrs); |
274 | return 0; |
275 | } |
276 | /* no DCR in the device tree, indicate a failure */ |
277 | return -1; |
278 | } |
279 | |
280 | #else |
281 | |
282 | /* |
283 | * temac_dcr_setup - This is a stub for when DCR is not supported, |
284 | * such as with MicroBlaze and x86 |
285 | */ |
286 | static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, |
287 | struct device_node *np) |
288 | { |
289 | return -1; |
290 | } |
291 | |
292 | #endif |
293 | |
294 | /* |
295 | * temac_dma_bd_release - Release buffer descriptor rings |
296 | */ |
297 | static void temac_dma_bd_release(struct net_device *ndev) |
298 | { |
299 | struct temac_local *lp = netdev_priv(dev: ndev); |
300 | int i; |
301 | |
302 | /* Reset Local Link (DMA) */ |
303 | lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); |
304 | |
305 | for (i = 0; i < lp->rx_bd_num; i++) { |
306 | if (!lp->rx_skb[i]) |
307 | break; |
308 | dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, |
309 | XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); |
310 | dev_kfree_skb(lp->rx_skb[i]); |
311 | } |
312 | if (lp->rx_bd_v) |
313 | dma_free_coherent(dev: ndev->dev.parent, |
314 | size: sizeof(*lp->rx_bd_v) * lp->rx_bd_num, |
315 | cpu_addr: lp->rx_bd_v, dma_handle: lp->rx_bd_p); |
316 | if (lp->tx_bd_v) |
317 | dma_free_coherent(dev: ndev->dev.parent, |
318 | size: sizeof(*lp->tx_bd_v) * lp->tx_bd_num, |
319 | cpu_addr: lp->tx_bd_v, dma_handle: lp->tx_bd_p); |
320 | } |
321 | |
322 | /* |
323 | * temac_dma_bd_init - Setup buffer descriptor rings |
324 | */ |
325 | static int temac_dma_bd_init(struct net_device *ndev) |
326 | { |
327 | struct temac_local *lp = netdev_priv(dev: ndev); |
328 | struct sk_buff *skb; |
329 | dma_addr_t skb_dma_addr; |
330 | int i; |
331 | |
332 | lp->rx_skb = devm_kcalloc(dev: &ndev->dev, n: lp->rx_bd_num, |
333 | size: sizeof(*lp->rx_skb), GFP_KERNEL); |
334 | if (!lp->rx_skb) |
335 | goto out; |
336 | |
337 | /* allocate the tx and rx ring buffer descriptors. */ |
338 | /* returns a virtual address and a physical address. */ |
339 | lp->tx_bd_v = dma_alloc_coherent(dev: ndev->dev.parent, |
340 | size: sizeof(*lp->tx_bd_v) * lp->tx_bd_num, |
341 | dma_handle: &lp->tx_bd_p, GFP_KERNEL); |
342 | if (!lp->tx_bd_v) |
343 | goto out; |
344 | |
345 | lp->rx_bd_v = dma_alloc_coherent(dev: ndev->dev.parent, |
346 | size: sizeof(*lp->rx_bd_v) * lp->rx_bd_num, |
347 | dma_handle: &lp->rx_bd_p, GFP_KERNEL); |
348 | if (!lp->rx_bd_v) |
349 | goto out; |
350 | |
351 | for (i = 0; i < lp->tx_bd_num; i++) { |
352 | lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p |
353 | + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num)); |
354 | } |
355 | |
356 | for (i = 0; i < lp->rx_bd_num; i++) { |
357 | lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p |
358 | + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num)); |
359 | |
360 | skb = __netdev_alloc_skb_ip_align(dev: ndev, |
361 | XTE_MAX_JUMBO_FRAME_SIZE, |
362 | GFP_KERNEL); |
363 | if (!skb) |
364 | goto out; |
365 | |
366 | lp->rx_skb[i] = skb; |
367 | /* returns physical address of skb->data */ |
368 | skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, |
369 | XTE_MAX_JUMBO_FRAME_SIZE, |
370 | DMA_FROM_DEVICE); |
371 | if (dma_mapping_error(dev: ndev->dev.parent, dma_addr: skb_dma_addr)) |
372 | goto out; |
373 | lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr); |
374 | lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); |
375 | lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); |
376 | } |
377 | |
378 | /* Configure DMA channel (irq setup) */ |
379 | lp->dma_out(lp, TX_CHNL_CTRL, |
380 | lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 | |
381 | 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used! |
382 | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | |
383 | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); |
384 | lp->dma_out(lp, RX_CHNL_CTRL, |
385 | lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 | |
386 | CHNL_CTRL_IRQ_IOE | |
387 | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | |
388 | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); |
389 | |
390 | /* Init descriptor indexes */ |
391 | lp->tx_bd_ci = 0; |
392 | lp->tx_bd_tail = 0; |
393 | lp->rx_bd_ci = 0; |
394 | lp->rx_bd_tail = lp->rx_bd_num - 1; |
395 | |
396 | /* Enable RX DMA transfers */ |
397 | wmb(); |
398 | lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); |
399 | lp->dma_out(lp, RX_TAILDESC_PTR, |
400 | lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail)); |
401 | |
402 | /* Prepare for TX DMA transfer */ |
403 | lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); |
404 | |
405 | return 0; |
406 | |
407 | out: |
408 | temac_dma_bd_release(ndev); |
409 | return -ENOMEM; |
410 | } |
411 | |
412 | /* --------------------------------------------------------------------- |
413 | * net_device_ops |
414 | */ |
415 | |
416 | static void temac_do_set_mac_address(struct net_device *ndev) |
417 | { |
418 | struct temac_local *lp = netdev_priv(dev: ndev); |
419 | unsigned long flags; |
420 | |
421 | /* set up unicast MAC address filter set its mac address */ |
422 | spin_lock_irqsave(lp->indirect_lock, flags); |
423 | temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET, |
424 | value: (ndev->dev_addr[0]) | |
425 | (ndev->dev_addr[1] << 8) | |
426 | (ndev->dev_addr[2] << 16) | |
427 | (ndev->dev_addr[3] << 24)); |
428 | /* There are reserved bits in EUAW1 |
429 | * so don't affect them Set MAC bits [47:32] in EUAW1 |
430 | */ |
431 | temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET, |
432 | value: (ndev->dev_addr[4] & 0x000000ff) | |
433 | (ndev->dev_addr[5] << 8)); |
434 | spin_unlock_irqrestore(lock: lp->indirect_lock, flags); |
435 | } |
436 | |
437 | static int temac_init_mac_address(struct net_device *ndev, const void *address) |
438 | { |
439 | eth_hw_addr_set(dev: ndev, addr: address); |
440 | if (!is_valid_ether_addr(addr: ndev->dev_addr)) |
441 | eth_hw_addr_random(dev: ndev); |
442 | temac_do_set_mac_address(ndev); |
443 | return 0; |
444 | } |
445 | |
446 | static int temac_set_mac_address(struct net_device *ndev, void *p) |
447 | { |
448 | struct sockaddr *addr = p; |
449 | |
450 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
451 | return -EADDRNOTAVAIL; |
452 | eth_hw_addr_set(dev: ndev, addr: addr->sa_data); |
453 | temac_do_set_mac_address(ndev); |
454 | return 0; |
455 | } |
456 | |
457 | static void temac_set_multicast_list(struct net_device *ndev) |
458 | { |
459 | struct temac_local *lp = netdev_priv(dev: ndev); |
460 | u32 multi_addr_msw, multi_addr_lsw; |
461 | int i = 0; |
462 | unsigned long flags; |
463 | bool promisc_mode_disabled = false; |
464 | |
465 | if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) || |
466 | (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) { |
467 | temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); |
468 | dev_info(&ndev->dev, "Promiscuous mode enabled.\n" ); |
469 | return; |
470 | } |
471 | |
472 | spin_lock_irqsave(lp->indirect_lock, flags); |
473 | |
474 | if (!netdev_mc_empty(ndev)) { |
475 | struct netdev_hw_addr *ha; |
476 | |
477 | netdev_for_each_mc_addr(ha, ndev) { |
478 | if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM)) |
479 | break; |
480 | multi_addr_msw = ((ha->addr[3] << 24) | |
481 | (ha->addr[2] << 16) | |
482 | (ha->addr[1] << 8) | |
483 | (ha->addr[0])); |
484 | temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, |
485 | value: multi_addr_msw); |
486 | multi_addr_lsw = ((ha->addr[5] << 8) | |
487 | (ha->addr[4]) | (i << 16)); |
488 | temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, |
489 | value: multi_addr_lsw); |
490 | i++; |
491 | } |
492 | } |
493 | |
494 | /* Clear all or remaining/unused address table entries */ |
495 | while (i < MULTICAST_CAM_TABLE_NUM) { |
496 | temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, value: 0); |
497 | temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, value: i << 16); |
498 | i++; |
499 | } |
500 | |
501 | /* Enable address filter block if currently disabled */ |
502 | if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET) |
503 | & XTE_AFM_EPPRM_MASK) { |
504 | temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, value: 0); |
505 | promisc_mode_disabled = true; |
506 | } |
507 | |
508 | spin_unlock_irqrestore(lock: lp->indirect_lock, flags); |
509 | |
510 | if (promisc_mode_disabled) |
511 | dev_info(&ndev->dev, "Promiscuous mode disabled.\n" ); |
512 | } |
513 | |
514 | static struct temac_option { |
515 | int flg; |
516 | u32 opt; |
517 | u32 reg; |
518 | u32 m_or; |
519 | u32 m_and; |
520 | } temac_options[] = { |
521 | /* Turn on jumbo packet support for both Rx and Tx */ |
522 | { |
523 | .opt = XTE_OPTION_JUMBO, |
524 | .reg = XTE_TXC_OFFSET, |
525 | .m_or = XTE_TXC_TXJMBO_MASK, |
526 | }, |
527 | { |
528 | .opt = XTE_OPTION_JUMBO, |
529 | .reg = XTE_RXC1_OFFSET, |
530 | .m_or = XTE_RXC1_RXJMBO_MASK, |
531 | }, |
532 | /* Turn on VLAN packet support for both Rx and Tx */ |
533 | { |
534 | .opt = XTE_OPTION_VLAN, |
535 | .reg = XTE_TXC_OFFSET, |
536 | .m_or = XTE_TXC_TXVLAN_MASK, |
537 | }, |
538 | { |
539 | .opt = XTE_OPTION_VLAN, |
540 | .reg = XTE_RXC1_OFFSET, |
541 | .m_or = XTE_RXC1_RXVLAN_MASK, |
542 | }, |
543 | /* Turn on FCS stripping on receive packets */ |
544 | { |
545 | .opt = XTE_OPTION_FCS_STRIP, |
546 | .reg = XTE_RXC1_OFFSET, |
547 | .m_or = XTE_RXC1_RXFCS_MASK, |
548 | }, |
549 | /* Turn on FCS insertion on transmit packets */ |
550 | { |
551 | .opt = XTE_OPTION_FCS_INSERT, |
552 | .reg = XTE_TXC_OFFSET, |
553 | .m_or = XTE_TXC_TXFCS_MASK, |
554 | }, |
555 | /* Turn on length/type field checking on receive packets */ |
556 | { |
557 | .opt = XTE_OPTION_LENTYPE_ERR, |
558 | .reg = XTE_RXC1_OFFSET, |
559 | .m_or = XTE_RXC1_RXLT_MASK, |
560 | }, |
561 | /* Turn on flow control */ |
562 | { |
563 | .opt = XTE_OPTION_FLOW_CONTROL, |
564 | .reg = XTE_FCC_OFFSET, |
565 | .m_or = XTE_FCC_RXFLO_MASK, |
566 | }, |
567 | /* Turn on flow control */ |
568 | { |
569 | .opt = XTE_OPTION_FLOW_CONTROL, |
570 | .reg = XTE_FCC_OFFSET, |
571 | .m_or = XTE_FCC_TXFLO_MASK, |
572 | }, |
573 | /* Turn on promiscuous frame filtering (all frames are received ) */ |
574 | { |
575 | .opt = XTE_OPTION_PROMISC, |
576 | .reg = XTE_AFM_OFFSET, |
577 | .m_or = XTE_AFM_EPPRM_MASK, |
578 | }, |
579 | /* Enable transmitter if not already enabled */ |
580 | { |
581 | .opt = XTE_OPTION_TXEN, |
582 | .reg = XTE_TXC_OFFSET, |
583 | .m_or = XTE_TXC_TXEN_MASK, |
584 | }, |
585 | /* Enable receiver? */ |
586 | { |
587 | .opt = XTE_OPTION_RXEN, |
588 | .reg = XTE_RXC1_OFFSET, |
589 | .m_or = XTE_RXC1_RXEN_MASK, |
590 | }, |
591 | {} |
592 | }; |
593 | |
594 | /* |
595 | * temac_setoptions |
596 | */ |
597 | static u32 temac_setoptions(struct net_device *ndev, u32 options) |
598 | { |
599 | struct temac_local *lp = netdev_priv(dev: ndev); |
600 | struct temac_option *tp = &temac_options[0]; |
601 | int reg; |
602 | unsigned long flags; |
603 | |
604 | spin_lock_irqsave(lp->indirect_lock, flags); |
605 | while (tp->opt) { |
606 | reg = temac_indirect_in32_locked(lp, reg: tp->reg) & ~tp->m_or; |
607 | if (options & tp->opt) { |
608 | reg |= tp->m_or; |
609 | temac_indirect_out32_locked(lp, reg: tp->reg, value: reg); |
610 | } |
611 | tp++; |
612 | } |
613 | spin_unlock_irqrestore(lock: lp->indirect_lock, flags); |
614 | lp->options |= options; |
615 | |
616 | return 0; |
617 | } |
618 | |
619 | /* Initialize temac */ |
620 | static void temac_device_reset(struct net_device *ndev) |
621 | { |
622 | struct temac_local *lp = netdev_priv(dev: ndev); |
623 | u32 timeout; |
624 | u32 val; |
625 | unsigned long flags; |
626 | |
627 | /* Perform a software reset */ |
628 | |
629 | /* 0x300 host enable bit ? */ |
630 | /* reset PHY through control register ?:1 */ |
631 | |
632 | dev_dbg(&ndev->dev, "%s()\n" , __func__); |
633 | |
634 | /* Reset the receiver and wait for it to finish reset */ |
635 | temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK); |
636 | timeout = 1000; |
637 | while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) { |
638 | udelay(1); |
639 | if (--timeout == 0) { |
640 | dev_err(&ndev->dev, |
641 | "%s RX reset timeout!!\n" , __func__); |
642 | break; |
643 | } |
644 | } |
645 | |
646 | /* Reset the transmitter and wait for it to finish reset */ |
647 | temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK); |
648 | timeout = 1000; |
649 | while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) { |
650 | udelay(1); |
651 | if (--timeout == 0) { |
652 | dev_err(&ndev->dev, |
653 | "%s TX reset timeout!!\n" , __func__); |
654 | break; |
655 | } |
656 | } |
657 | |
658 | /* Disable the receiver */ |
659 | spin_lock_irqsave(lp->indirect_lock, flags); |
660 | val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET); |
661 | temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, |
662 | value: val & ~XTE_RXC1_RXEN_MASK); |
663 | spin_unlock_irqrestore(lock: lp->indirect_lock, flags); |
664 | |
665 | /* Reset Local Link (DMA) */ |
666 | lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); |
667 | timeout = 1000; |
668 | while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) { |
669 | udelay(1); |
670 | if (--timeout == 0) { |
671 | dev_err(&ndev->dev, |
672 | "%s DMA reset timeout!!\n" , __func__); |
673 | break; |
674 | } |
675 | } |
676 | lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); |
677 | |
678 | if (temac_dma_bd_init(ndev)) { |
679 | dev_err(&ndev->dev, |
680 | "%s descriptor allocation failed\n" , __func__); |
681 | } |
682 | |
683 | spin_lock_irqsave(lp->indirect_lock, flags); |
684 | temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, value: 0); |
685 | temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, value: 0); |
686 | temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, value: 0); |
687 | temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK); |
688 | spin_unlock_irqrestore(lock: lp->indirect_lock, flags); |
689 | |
690 | /* Sync default options with HW |
691 | * but leave receiver and transmitter disabled. |
692 | */ |
693 | temac_setoptions(ndev, |
694 | options: lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN)); |
695 | |
696 | temac_do_set_mac_address(ndev); |
697 | |
698 | /* Set address filter table */ |
699 | temac_set_multicast_list(ndev); |
700 | if (temac_setoptions(ndev, options: lp->options)) |
701 | dev_err(&ndev->dev, "Error setting TEMAC options\n" ); |
702 | |
703 | /* Init Driver variable */ |
704 | netif_trans_update(dev: ndev); /* prevent tx timeout */ |
705 | } |
706 | |
707 | static void temac_adjust_link(struct net_device *ndev) |
708 | { |
709 | struct temac_local *lp = netdev_priv(dev: ndev); |
710 | struct phy_device *phy = ndev->phydev; |
711 | u32 mii_speed; |
712 | int link_state; |
713 | unsigned long flags; |
714 | |
715 | /* hash together the state values to decide if something has changed */ |
716 | link_state = phy->speed | (phy->duplex << 1) | phy->link; |
717 | |
718 | if (lp->last_link != link_state) { |
719 | spin_lock_irqsave(lp->indirect_lock, flags); |
720 | mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET); |
721 | mii_speed &= ~XTE_EMCFG_LINKSPD_MASK; |
722 | |
723 | switch (phy->speed) { |
724 | case SPEED_1000: |
725 | mii_speed |= XTE_EMCFG_LINKSPD_1000; |
726 | break; |
727 | case SPEED_100: |
728 | mii_speed |= XTE_EMCFG_LINKSPD_100; |
729 | break; |
730 | case SPEED_10: |
731 | mii_speed |= XTE_EMCFG_LINKSPD_10; |
732 | break; |
733 | } |
734 | |
735 | /* Write new speed setting out to TEMAC */ |
736 | temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, value: mii_speed); |
737 | spin_unlock_irqrestore(lock: lp->indirect_lock, flags); |
738 | |
739 | lp->last_link = link_state; |
740 | phy_print_status(phydev: phy); |
741 | } |
742 | } |
743 | |
744 | #ifdef CONFIG_64BIT |
745 | |
746 | static void ptr_to_txbd(void *p, struct cdmac_bd *bd) |
747 | { |
748 | bd->app3 = (u32)(((u64)p) >> 32); |
749 | bd->app4 = (u32)((u64)p & 0xFFFFFFFF); |
750 | } |
751 | |
752 | static void *ptr_from_txbd(struct cdmac_bd *bd) |
753 | { |
754 | return (void *)(((u64)(bd->app3) << 32) | bd->app4); |
755 | } |
756 | |
757 | #else |
758 | |
759 | static void ptr_to_txbd(void *p, struct cdmac_bd *bd) |
760 | { |
761 | bd->app4 = (u32)p; |
762 | } |
763 | |
764 | static void *ptr_from_txbd(struct cdmac_bd *bd) |
765 | { |
766 | return (void *)(bd->app4); |
767 | } |
768 | |
769 | #endif |
770 | |
771 | static void temac_start_xmit_done(struct net_device *ndev) |
772 | { |
773 | struct temac_local *lp = netdev_priv(dev: ndev); |
774 | struct cdmac_bd *cur_p; |
775 | unsigned int stat = 0; |
776 | struct sk_buff *skb; |
777 | |
778 | cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; |
779 | stat = be32_to_cpu(cur_p->app0); |
780 | |
781 | while (stat & STS_CTRL_APP0_CMPLT) { |
782 | /* Make sure that the other fields are read after bd is |
783 | * released by dma |
784 | */ |
785 | rmb(); |
786 | dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), |
787 | be32_to_cpu(cur_p->len), DMA_TO_DEVICE); |
788 | skb = (struct sk_buff *)ptr_from_txbd(bd: cur_p); |
789 | if (skb) |
790 | dev_consume_skb_irq(skb); |
791 | cur_p->app1 = 0; |
792 | cur_p->app2 = 0; |
793 | cur_p->app3 = 0; |
794 | cur_p->app4 = 0; |
795 | |
796 | ndev->stats.tx_packets++; |
797 | ndev->stats.tx_bytes += be32_to_cpu(cur_p->len); |
798 | |
799 | /* app0 must be visible last, as it is used to flag |
800 | * availability of the bd |
801 | */ |
802 | smp_mb(); |
803 | cur_p->app0 = 0; |
804 | |
805 | lp->tx_bd_ci++; |
806 | if (lp->tx_bd_ci >= lp->tx_bd_num) |
807 | lp->tx_bd_ci = 0; |
808 | |
809 | cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; |
810 | stat = be32_to_cpu(cur_p->app0); |
811 | } |
812 | |
813 | /* Matches barrier in temac_start_xmit */ |
814 | smp_mb(); |
815 | |
816 | netif_wake_queue(dev: ndev); |
817 | } |
818 | |
819 | static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) |
820 | { |
821 | struct cdmac_bd *cur_p; |
822 | int tail; |
823 | |
824 | tail = lp->tx_bd_tail; |
825 | cur_p = &lp->tx_bd_v[tail]; |
826 | |
827 | do { |
828 | if (cur_p->app0) |
829 | return NETDEV_TX_BUSY; |
830 | |
831 | /* Make sure to read next bd app0 after this one */ |
832 | rmb(); |
833 | |
834 | tail++; |
835 | if (tail >= lp->tx_bd_num) |
836 | tail = 0; |
837 | |
838 | cur_p = &lp->tx_bd_v[tail]; |
839 | num_frag--; |
840 | } while (num_frag >= 0); |
841 | |
842 | return 0; |
843 | } |
844 | |
845 | static netdev_tx_t |
846 | temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
847 | { |
848 | struct temac_local *lp = netdev_priv(dev: ndev); |
849 | struct cdmac_bd *cur_p; |
850 | dma_addr_t tail_p, skb_dma_addr; |
851 | int ii; |
852 | unsigned long num_frag; |
853 | skb_frag_t *frag; |
854 | |
855 | num_frag = skb_shinfo(skb)->nr_frags; |
856 | frag = &skb_shinfo(skb)->frags[0]; |
857 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
858 | |
859 | if (temac_check_tx_bd_space(lp, num_frag: num_frag + 1)) { |
860 | if (netif_queue_stopped(dev: ndev)) |
861 | return NETDEV_TX_BUSY; |
862 | |
863 | netif_stop_queue(dev: ndev); |
864 | |
865 | /* Matches barrier in temac_start_xmit_done */ |
866 | smp_mb(); |
867 | |
868 | /* Space might have just been freed - check again */ |
869 | if (temac_check_tx_bd_space(lp, num_frag: num_frag + 1)) |
870 | return NETDEV_TX_BUSY; |
871 | |
872 | netif_wake_queue(dev: ndev); |
873 | } |
874 | |
875 | cur_p->app0 = 0; |
876 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
877 | unsigned int csum_start_off = skb_checksum_start_offset(skb); |
878 | unsigned int csum_index_off = csum_start_off + skb->csum_offset; |
879 | |
880 | cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */ |
881 | cur_p->app1 = cpu_to_be32((csum_start_off << 16) |
882 | | csum_index_off); |
883 | cur_p->app2 = 0; /* initial checksum seed */ |
884 | } |
885 | |
886 | cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP); |
887 | skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, |
888 | skb_headlen(skb), DMA_TO_DEVICE); |
889 | cur_p->len = cpu_to_be32(skb_headlen(skb)); |
890 | if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) { |
891 | dev_kfree_skb_any(skb); |
892 | ndev->stats.tx_dropped++; |
893 | return NETDEV_TX_OK; |
894 | } |
895 | cur_p->phys = cpu_to_be32(skb_dma_addr); |
896 | |
897 | for (ii = 0; ii < num_frag; ii++) { |
898 | if (++lp->tx_bd_tail >= lp->tx_bd_num) |
899 | lp->tx_bd_tail = 0; |
900 | |
901 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
902 | skb_dma_addr = dma_map_single(ndev->dev.parent, |
903 | skb_frag_address(frag), |
904 | skb_frag_size(frag), |
905 | DMA_TO_DEVICE); |
906 | if (dma_mapping_error(dev: ndev->dev.parent, dma_addr: skb_dma_addr)) { |
907 | if (--lp->tx_bd_tail < 0) |
908 | lp->tx_bd_tail = lp->tx_bd_num - 1; |
909 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
910 | while (--ii >= 0) { |
911 | --frag; |
912 | dma_unmap_single(ndev->dev.parent, |
913 | be32_to_cpu(cur_p->phys), |
914 | skb_frag_size(frag), |
915 | DMA_TO_DEVICE); |
916 | if (--lp->tx_bd_tail < 0) |
917 | lp->tx_bd_tail = lp->tx_bd_num - 1; |
918 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
919 | } |
920 | dma_unmap_single(ndev->dev.parent, |
921 | be32_to_cpu(cur_p->phys), |
922 | skb_headlen(skb), DMA_TO_DEVICE); |
923 | dev_kfree_skb_any(skb); |
924 | ndev->stats.tx_dropped++; |
925 | return NETDEV_TX_OK; |
926 | } |
927 | cur_p->phys = cpu_to_be32(skb_dma_addr); |
928 | cur_p->len = cpu_to_be32(skb_frag_size(frag)); |
929 | cur_p->app0 = 0; |
930 | frag++; |
931 | } |
932 | cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP); |
933 | |
934 | /* Mark last fragment with skb address, so it can be consumed |
935 | * in temac_start_xmit_done() |
936 | */ |
937 | ptr_to_txbd(p: (void *)skb, bd: cur_p); |
938 | |
939 | tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; |
940 | lp->tx_bd_tail++; |
941 | if (lp->tx_bd_tail >= lp->tx_bd_num) |
942 | lp->tx_bd_tail = 0; |
943 | |
944 | skb_tx_timestamp(skb); |
945 | |
946 | /* Kick off the transfer */ |
947 | wmb(); |
948 | lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ |
949 | |
950 | if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) |
951 | netif_stop_queue(dev: ndev); |
952 | |
953 | return NETDEV_TX_OK; |
954 | } |
955 | |
956 | static int ll_temac_recv_buffers_available(struct temac_local *lp) |
957 | { |
958 | int available; |
959 | |
960 | if (!lp->rx_skb[lp->rx_bd_ci]) |
961 | return 0; |
962 | available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; |
963 | if (available <= 0) |
964 | available += lp->rx_bd_num; |
965 | return available; |
966 | } |
967 | |
968 | static void ll_temac_recv(struct net_device *ndev) |
969 | { |
970 | struct temac_local *lp = netdev_priv(dev: ndev); |
971 | unsigned long flags; |
972 | int rx_bd; |
973 | bool update_tail = false; |
974 | |
975 | spin_lock_irqsave(&lp->rx_lock, flags); |
976 | |
977 | /* Process all received buffers, passing them on network |
978 | * stack. After this, the buffer descriptors will be in an |
979 | * un-allocated stage, where no skb is allocated for it, and |
980 | * they are therefore not available for TEMAC/DMA. |
981 | */ |
982 | do { |
983 | struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci]; |
984 | struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci]; |
985 | unsigned int bdstat = be32_to_cpu(bd->app0); |
986 | int length; |
987 | |
988 | /* While this should not normally happen, we can end |
989 | * here when GFP_ATOMIC allocations fail, and we |
990 | * therefore have un-allocated buffers. |
991 | */ |
992 | if (!skb) |
993 | break; |
994 | |
995 | /* Loop over all completed buffer descriptors */ |
996 | if (!(bdstat & STS_CTRL_APP0_CMPLT)) |
997 | break; |
998 | |
999 | dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys), |
1000 | XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); |
1001 | /* The buffer is not valid for DMA anymore */ |
1002 | bd->phys = 0; |
1003 | bd->len = 0; |
1004 | |
1005 | length = be32_to_cpu(bd->app4) & 0x3FFF; |
1006 | skb_put(skb, len: length); |
1007 | skb->protocol = eth_type_trans(skb, dev: ndev); |
1008 | skb_checksum_none_assert(skb); |
1009 | |
1010 | /* if we're doing rx csum offload, set it up */ |
1011 | if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && |
1012 | (skb->protocol == htons(ETH_P_IP)) && |
1013 | (skb->len > 64)) { |
1014 | /* Convert from device endianness (be32) to cpu |
1015 | * endianness, and if necessary swap the bytes |
1016 | * (back) for proper IP checksum byte order |
1017 | * (be16). |
1018 | */ |
1019 | skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF); |
1020 | skb->ip_summed = CHECKSUM_COMPLETE; |
1021 | } |
1022 | |
1023 | if (!skb_defer_rx_timestamp(skb)) |
1024 | netif_rx(skb); |
1025 | /* The skb buffer is now owned by network stack above */ |
1026 | lp->rx_skb[lp->rx_bd_ci] = NULL; |
1027 | |
1028 | ndev->stats.rx_packets++; |
1029 | ndev->stats.rx_bytes += length; |
1030 | |
1031 | rx_bd = lp->rx_bd_ci; |
1032 | if (++lp->rx_bd_ci >= lp->rx_bd_num) |
1033 | lp->rx_bd_ci = 0; |
1034 | } while (rx_bd != lp->rx_bd_tail); |
1035 | |
1036 | /* DMA operations will halt when the last buffer descriptor is |
1037 | * processed (ie. the one pointed to by RX_TAILDESC_PTR). |
1038 | * When that happens, no more interrupt events will be |
1039 | * generated. No IRQ_COAL or IRQ_DLY, and not even an |
1040 | * IRQ_ERR. To avoid stalling, we schedule a delayed work |
1041 | * when there is a potential risk of that happening. The work |
1042 | * will call this function, and thus re-schedule itself until |
1043 | * enough buffers are available again. |
1044 | */ |
1045 | if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx) |
1046 | schedule_delayed_work(dwork: &lp->restart_work, HZ / 1000); |
1047 | |
1048 | /* Allocate new buffers for those buffer descriptors that were |
1049 | * passed to network stack. Note that GFP_ATOMIC allocations |
1050 | * can fail (e.g. when a larger burst of GFP_ATOMIC |
1051 | * allocations occurs), so while we try to allocate all |
1052 | * buffers in the same interrupt where they were processed, we |
1053 | * continue with what we could get in case of allocation |
1054 | * failure. Allocation of remaining buffers will be retried |
1055 | * in following calls. |
1056 | */ |
1057 | while (1) { |
1058 | struct sk_buff *skb; |
1059 | struct cdmac_bd *bd; |
1060 | dma_addr_t skb_dma_addr; |
1061 | |
1062 | rx_bd = lp->rx_bd_tail + 1; |
1063 | if (rx_bd >= lp->rx_bd_num) |
1064 | rx_bd = 0; |
1065 | bd = &lp->rx_bd_v[rx_bd]; |
1066 | |
1067 | if (bd->phys) |
1068 | break; /* All skb's allocated */ |
1069 | |
1070 | skb = netdev_alloc_skb_ip_align(dev: ndev, XTE_MAX_JUMBO_FRAME_SIZE); |
1071 | if (!skb) { |
1072 | dev_warn(&ndev->dev, "skb alloc failed\n" ); |
1073 | break; |
1074 | } |
1075 | |
1076 | skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, |
1077 | XTE_MAX_JUMBO_FRAME_SIZE, |
1078 | DMA_FROM_DEVICE); |
1079 | if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, |
1080 | skb_dma_addr))) { |
1081 | dev_kfree_skb_any(skb); |
1082 | break; |
1083 | } |
1084 | |
1085 | bd->phys = cpu_to_be32(skb_dma_addr); |
1086 | bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); |
1087 | bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); |
1088 | lp->rx_skb[rx_bd] = skb; |
1089 | |
1090 | lp->rx_bd_tail = rx_bd; |
1091 | update_tail = true; |
1092 | } |
1093 | |
1094 | /* Move tail pointer when buffers have been allocated */ |
1095 | if (update_tail) { |
1096 | lp->dma_out(lp, RX_TAILDESC_PTR, |
1097 | lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail); |
1098 | } |
1099 | |
1100 | spin_unlock_irqrestore(lock: &lp->rx_lock, flags); |
1101 | } |
1102 | |
1103 | /* Function scheduled to ensure a restart in case of DMA halt |
1104 | * condition caused by running out of buffer descriptors. |
1105 | */ |
1106 | static void ll_temac_restart_work_func(struct work_struct *work) |
1107 | { |
1108 | struct temac_local *lp = container_of(work, struct temac_local, |
1109 | restart_work.work); |
1110 | struct net_device *ndev = lp->ndev; |
1111 | |
1112 | ll_temac_recv(ndev); |
1113 | } |
1114 | |
1115 | static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev) |
1116 | { |
1117 | struct net_device *ndev = _ndev; |
1118 | struct temac_local *lp = netdev_priv(dev: ndev); |
1119 | unsigned int status; |
1120 | |
1121 | status = lp->dma_in(lp, TX_IRQ_REG); |
1122 | lp->dma_out(lp, TX_IRQ_REG, status); |
1123 | |
1124 | if (status & (IRQ_COAL | IRQ_DLY)) |
1125 | temac_start_xmit_done(ndev: lp->ndev); |
1126 | if (status & (IRQ_ERR | IRQ_DMAERR)) |
1127 | dev_err_ratelimited(&ndev->dev, |
1128 | "TX error 0x%x TX_CHNL_STS=0x%08x\n" , |
1129 | status, lp->dma_in(lp, TX_CHNL_STS)); |
1130 | |
1131 | return IRQ_HANDLED; |
1132 | } |
1133 | |
1134 | static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev) |
1135 | { |
1136 | struct net_device *ndev = _ndev; |
1137 | struct temac_local *lp = netdev_priv(dev: ndev); |
1138 | unsigned int status; |
1139 | |
1140 | /* Read and clear the status registers */ |
1141 | status = lp->dma_in(lp, RX_IRQ_REG); |
1142 | lp->dma_out(lp, RX_IRQ_REG, status); |
1143 | |
1144 | if (status & (IRQ_COAL | IRQ_DLY)) |
1145 | ll_temac_recv(ndev: lp->ndev); |
1146 | if (status & (IRQ_ERR | IRQ_DMAERR)) |
1147 | dev_err_ratelimited(&ndev->dev, |
1148 | "RX error 0x%x RX_CHNL_STS=0x%08x\n" , |
1149 | status, lp->dma_in(lp, RX_CHNL_STS)); |
1150 | |
1151 | return IRQ_HANDLED; |
1152 | } |
1153 | |
1154 | static int temac_open(struct net_device *ndev) |
1155 | { |
1156 | struct temac_local *lp = netdev_priv(dev: ndev); |
1157 | struct phy_device *phydev = NULL; |
1158 | int rc; |
1159 | |
1160 | dev_dbg(&ndev->dev, "temac_open()\n" ); |
1161 | |
1162 | if (lp->phy_node) { |
1163 | phydev = of_phy_connect(dev: lp->ndev, phy_np: lp->phy_node, |
1164 | hndlr: temac_adjust_link, flags: 0, iface: 0); |
1165 | if (!phydev) { |
1166 | dev_err(lp->dev, "of_phy_connect() failed\n" ); |
1167 | return -ENODEV; |
1168 | } |
1169 | phy_start(phydev); |
1170 | } else if (strlen(lp->phy_name) > 0) { |
1171 | phydev = phy_connect(dev: lp->ndev, bus_id: lp->phy_name, handler: temac_adjust_link, |
1172 | interface: lp->phy_interface); |
1173 | if (IS_ERR(ptr: phydev)) { |
1174 | dev_err(lp->dev, "phy_connect() failed\n" ); |
1175 | return PTR_ERR(ptr: phydev); |
1176 | } |
1177 | phy_start(phydev); |
1178 | } |
1179 | |
1180 | temac_device_reset(ndev); |
1181 | |
1182 | rc = request_irq(irq: lp->tx_irq, handler: ll_temac_tx_irq, flags: 0, name: ndev->name, dev: ndev); |
1183 | if (rc) |
1184 | goto err_tx_irq; |
1185 | rc = request_irq(irq: lp->rx_irq, handler: ll_temac_rx_irq, flags: 0, name: ndev->name, dev: ndev); |
1186 | if (rc) |
1187 | goto err_rx_irq; |
1188 | |
1189 | return 0; |
1190 | |
1191 | err_rx_irq: |
1192 | free_irq(lp->tx_irq, ndev); |
1193 | err_tx_irq: |
1194 | if (phydev) |
1195 | phy_disconnect(phydev); |
1196 | dev_err(lp->dev, "request_irq() failed\n" ); |
1197 | return rc; |
1198 | } |
1199 | |
1200 | static int temac_stop(struct net_device *ndev) |
1201 | { |
1202 | struct temac_local *lp = netdev_priv(dev: ndev); |
1203 | struct phy_device *phydev = ndev->phydev; |
1204 | |
1205 | dev_dbg(&ndev->dev, "temac_close()\n" ); |
1206 | |
1207 | cancel_delayed_work_sync(dwork: &lp->restart_work); |
1208 | |
1209 | free_irq(lp->tx_irq, ndev); |
1210 | free_irq(lp->rx_irq, ndev); |
1211 | |
1212 | if (phydev) |
1213 | phy_disconnect(phydev); |
1214 | |
1215 | temac_dma_bd_release(ndev); |
1216 | |
1217 | return 0; |
1218 | } |
1219 | |
1220 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1221 | static void |
1222 | temac_poll_controller(struct net_device *ndev) |
1223 | { |
1224 | struct temac_local *lp = netdev_priv(dev: ndev); |
1225 | |
1226 | disable_irq(irq: lp->tx_irq); |
1227 | disable_irq(irq: lp->rx_irq); |
1228 | |
1229 | ll_temac_rx_irq(irq: lp->tx_irq, ndev: ndev); |
1230 | ll_temac_tx_irq(irq: lp->rx_irq, ndev: ndev); |
1231 | |
1232 | enable_irq(irq: lp->tx_irq); |
1233 | enable_irq(irq: lp->rx_irq); |
1234 | } |
1235 | #endif |
1236 | |
1237 | static const struct net_device_ops temac_netdev_ops = { |
1238 | .ndo_open = temac_open, |
1239 | .ndo_stop = temac_stop, |
1240 | .ndo_start_xmit = temac_start_xmit, |
1241 | .ndo_set_rx_mode = temac_set_multicast_list, |
1242 | .ndo_set_mac_address = temac_set_mac_address, |
1243 | .ndo_validate_addr = eth_validate_addr, |
1244 | .ndo_eth_ioctl = phy_do_ioctl_running, |
1245 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1246 | .ndo_poll_controller = temac_poll_controller, |
1247 | #endif |
1248 | }; |
1249 | |
1250 | /* --------------------------------------------------------------------- |
1251 | * SYSFS device attributes |
1252 | */ |
1253 | static ssize_t temac_show_llink_regs(struct device *dev, |
1254 | struct device_attribute *attr, char *buf) |
1255 | { |
1256 | struct net_device *ndev = dev_get_drvdata(dev); |
1257 | struct temac_local *lp = netdev_priv(dev: ndev); |
1258 | int i, len = 0; |
1259 | |
1260 | for (i = 0; i < 0x11; i++) |
1261 | len += sprintf(buf: buf + len, fmt: "%.8x%s" , lp->dma_in(lp, i), |
1262 | (i % 8) == 7 ? "\n" : " " ); |
1263 | len += sprintf(buf: buf + len, fmt: "\n" ); |
1264 | |
1265 | return len; |
1266 | } |
1267 | |
1268 | static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL); |
1269 | |
1270 | static struct attribute *temac_device_attrs[] = { |
1271 | &dev_attr_llink_regs.attr, |
1272 | NULL, |
1273 | }; |
1274 | |
1275 | static const struct attribute_group temac_attr_group = { |
1276 | .attrs = temac_device_attrs, |
1277 | }; |
1278 | |
1279 | /* --------------------------------------------------------------------- |
1280 | * ethtool support |
1281 | */ |
1282 | |
1283 | static void |
1284 | ll_temac_ethtools_get_ringparam(struct net_device *ndev, |
1285 | struct ethtool_ringparam *ering, |
1286 | struct kernel_ethtool_ringparam *kernel_ering, |
1287 | struct netlink_ext_ack *extack) |
1288 | { |
1289 | struct temac_local *lp = netdev_priv(dev: ndev); |
1290 | |
1291 | ering->rx_max_pending = RX_BD_NUM_MAX; |
1292 | ering->rx_mini_max_pending = 0; |
1293 | ering->rx_jumbo_max_pending = 0; |
1294 | ering->tx_max_pending = TX_BD_NUM_MAX; |
1295 | ering->rx_pending = lp->rx_bd_num; |
1296 | ering->rx_mini_pending = 0; |
1297 | ering->rx_jumbo_pending = 0; |
1298 | ering->tx_pending = lp->tx_bd_num; |
1299 | } |
1300 | |
1301 | static int |
1302 | ll_temac_ethtools_set_ringparam(struct net_device *ndev, |
1303 | struct ethtool_ringparam *ering, |
1304 | struct kernel_ethtool_ringparam *kernel_ering, |
1305 | struct netlink_ext_ack *extack) |
1306 | { |
1307 | struct temac_local *lp = netdev_priv(dev: ndev); |
1308 | |
1309 | if (ering->rx_pending > RX_BD_NUM_MAX || |
1310 | ering->rx_mini_pending || |
1311 | ering->rx_jumbo_pending || |
1312 | ering->rx_pending > TX_BD_NUM_MAX) |
1313 | return -EINVAL; |
1314 | |
1315 | if (netif_running(dev: ndev)) |
1316 | return -EBUSY; |
1317 | |
1318 | lp->rx_bd_num = ering->rx_pending; |
1319 | lp->tx_bd_num = ering->tx_pending; |
1320 | return 0; |
1321 | } |
1322 | |
1323 | static int |
1324 | ll_temac_ethtools_get_coalesce(struct net_device *ndev, |
1325 | struct ethtool_coalesce *ec, |
1326 | struct kernel_ethtool_coalesce *kernel_coal, |
1327 | struct netlink_ext_ack *extack) |
1328 | { |
1329 | struct temac_local *lp = netdev_priv(dev: ndev); |
1330 | |
1331 | ec->rx_max_coalesced_frames = lp->coalesce_count_rx; |
1332 | ec->tx_max_coalesced_frames = lp->coalesce_count_tx; |
1333 | ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100; |
1334 | ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100; |
1335 | return 0; |
1336 | } |
1337 | |
1338 | static int |
1339 | ll_temac_ethtools_set_coalesce(struct net_device *ndev, |
1340 | struct ethtool_coalesce *ec, |
1341 | struct kernel_ethtool_coalesce *kernel_coal, |
1342 | struct netlink_ext_ack *extack) |
1343 | { |
1344 | struct temac_local *lp = netdev_priv(dev: ndev); |
1345 | |
1346 | if (netif_running(dev: ndev)) { |
1347 | netdev_err(dev: ndev, |
1348 | format: "Please stop netif before applying configuration\n" ); |
1349 | return -EFAULT; |
1350 | } |
1351 | |
1352 | if (ec->rx_max_coalesced_frames) |
1353 | lp->coalesce_count_rx = ec->rx_max_coalesced_frames; |
1354 | if (ec->tx_max_coalesced_frames) |
1355 | lp->coalesce_count_tx = ec->tx_max_coalesced_frames; |
1356 | /* With typical LocalLink clock speed of 200 MHz and |
1357 | * C_PRESCALAR=1023, each delay count corresponds to 5.12 us. |
1358 | */ |
1359 | if (ec->rx_coalesce_usecs) |
1360 | lp->coalesce_delay_rx = |
1361 | min(255U, (ec->rx_coalesce_usecs * 100) / 512); |
1362 | if (ec->tx_coalesce_usecs) |
1363 | lp->coalesce_delay_tx = |
1364 | min(255U, (ec->tx_coalesce_usecs * 100) / 512); |
1365 | |
1366 | return 0; |
1367 | } |
1368 | |
1369 | static const struct ethtool_ops temac_ethtool_ops = { |
1370 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
1371 | ETHTOOL_COALESCE_MAX_FRAMES, |
1372 | .nway_reset = phy_ethtool_nway_reset, |
1373 | .get_link = ethtool_op_get_link, |
1374 | .get_ts_info = ethtool_op_get_ts_info, |
1375 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
1376 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
1377 | .get_ringparam = ll_temac_ethtools_get_ringparam, |
1378 | .set_ringparam = ll_temac_ethtools_set_ringparam, |
1379 | .get_coalesce = ll_temac_ethtools_get_coalesce, |
1380 | .set_coalesce = ll_temac_ethtools_set_coalesce, |
1381 | }; |
1382 | |
1383 | static int temac_probe(struct platform_device *pdev) |
1384 | { |
1385 | struct ll_temac_platform_data *pdata = dev_get_platdata(dev: &pdev->dev); |
1386 | struct device_node *temac_np = dev_of_node(dev: &pdev->dev), *dma_np; |
1387 | struct temac_local *lp; |
1388 | struct net_device *ndev; |
1389 | u8 addr[ETH_ALEN]; |
1390 | __be32 *p; |
1391 | bool little_endian; |
1392 | int rc = 0; |
1393 | |
1394 | /* Init network device structure */ |
1395 | ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp)); |
1396 | if (!ndev) |
1397 | return -ENOMEM; |
1398 | |
1399 | platform_set_drvdata(pdev, data: ndev); |
1400 | SET_NETDEV_DEV(ndev, &pdev->dev); |
1401 | ndev->features = NETIF_F_SG; |
1402 | ndev->netdev_ops = &temac_netdev_ops; |
1403 | ndev->ethtool_ops = &temac_ethtool_ops; |
1404 | #if 0 |
1405 | ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */ |
1406 | ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */ |
1407 | ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */ |
1408 | ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */ |
1409 | ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */ |
1410 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */ |
1411 | ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */ |
1412 | ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */ |
1413 | ndev->features |= NETIF_F_GSO; /* Enable software GSO. */ |
1414 | ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */ |
1415 | ndev->features |= NETIF_F_LRO; /* large receive offload */ |
1416 | #endif |
1417 | |
1418 | /* setup temac private info structure */ |
1419 | lp = netdev_priv(dev: ndev); |
1420 | lp->ndev = ndev; |
1421 | lp->dev = &pdev->dev; |
1422 | lp->options = XTE_OPTION_DEFAULTS; |
1423 | lp->rx_bd_num = RX_BD_NUM_DEFAULT; |
1424 | lp->tx_bd_num = TX_BD_NUM_DEFAULT; |
1425 | spin_lock_init(&lp->rx_lock); |
1426 | INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); |
1427 | |
1428 | /* Setup mutex for synchronization of indirect register access */ |
1429 | if (pdata) { |
1430 | if (!pdata->indirect_lock) { |
1431 | dev_err(&pdev->dev, |
1432 | "indirect_lock missing in platform_data\n" ); |
1433 | return -EINVAL; |
1434 | } |
1435 | lp->indirect_lock = pdata->indirect_lock; |
1436 | } else { |
1437 | lp->indirect_lock = devm_kmalloc(dev: &pdev->dev, |
1438 | size: sizeof(*lp->indirect_lock), |
1439 | GFP_KERNEL); |
1440 | if (!lp->indirect_lock) |
1441 | return -ENOMEM; |
1442 | spin_lock_init(lp->indirect_lock); |
1443 | } |
1444 | |
1445 | /* map device registers */ |
1446 | lp->regs = devm_platform_ioremap_resource_byname(pdev, name: 0); |
1447 | if (IS_ERR(ptr: lp->regs)) { |
1448 | dev_err(&pdev->dev, "could not map TEMAC registers\n" ); |
1449 | return -ENOMEM; |
1450 | } |
1451 | |
1452 | /* Select register access functions with the specified |
1453 | * endianness mode. Default for OF devices is big-endian. |
1454 | */ |
1455 | little_endian = false; |
1456 | if (temac_np) |
1457 | little_endian = of_property_read_bool(np: temac_np, propname: "little-endian" ); |
1458 | else if (pdata) |
1459 | little_endian = pdata->reg_little_endian; |
1460 | |
1461 | if (little_endian) { |
1462 | lp->temac_ior = _temac_ior_le; |
1463 | lp->temac_iow = _temac_iow_le; |
1464 | } else { |
1465 | lp->temac_ior = _temac_ior_be; |
1466 | lp->temac_iow = _temac_iow_be; |
1467 | } |
1468 | |
1469 | /* Setup checksum offload, but default to off if not specified */ |
1470 | lp->temac_features = 0; |
1471 | if (temac_np) { |
1472 | p = (__be32 *)of_get_property(node: temac_np, name: "xlnx,txcsum" , NULL); |
1473 | if (p && be32_to_cpu(*p)) |
1474 | lp->temac_features |= TEMAC_FEATURE_TX_CSUM; |
1475 | p = (__be32 *)of_get_property(node: temac_np, name: "xlnx,rxcsum" , NULL); |
1476 | if (p && be32_to_cpu(*p)) |
1477 | lp->temac_features |= TEMAC_FEATURE_RX_CSUM; |
1478 | } else if (pdata) { |
1479 | if (pdata->txcsum) |
1480 | lp->temac_features |= TEMAC_FEATURE_TX_CSUM; |
1481 | if (pdata->rxcsum) |
1482 | lp->temac_features |= TEMAC_FEATURE_RX_CSUM; |
1483 | } |
1484 | if (lp->temac_features & TEMAC_FEATURE_TX_CSUM) |
1485 | /* Can checksum TCP/UDP over IPv4. */ |
1486 | ndev->features |= NETIF_F_IP_CSUM; |
1487 | |
1488 | /* Defaults for IRQ delay/coalescing setup. These are |
1489 | * configuration values, so does not belong in device-tree. |
1490 | */ |
1491 | lp->coalesce_delay_tx = 0x10; |
1492 | lp->coalesce_count_tx = 0x22; |
1493 | lp->coalesce_delay_rx = 0xff; |
1494 | lp->coalesce_count_rx = 0x07; |
1495 | |
1496 | /* Setup LocalLink DMA */ |
1497 | if (temac_np) { |
1498 | /* Find the DMA node, map the DMA registers, and |
1499 | * decode the DMA IRQs. |
1500 | */ |
1501 | dma_np = of_parse_phandle(np: temac_np, phandle_name: "llink-connected" , index: 0); |
1502 | if (!dma_np) { |
1503 | dev_err(&pdev->dev, "could not find DMA node\n" ); |
1504 | return -ENODEV; |
1505 | } |
1506 | |
1507 | /* Setup the DMA register accesses, could be DCR or |
1508 | * memory mapped. |
1509 | */ |
1510 | if (temac_dcr_setup(lp, op: pdev, np: dma_np)) { |
1511 | /* no DCR in the device tree, try non-DCR */ |
1512 | lp->sdma_regs = devm_of_iomap(dev: &pdev->dev, node: dma_np, index: 0, |
1513 | NULL); |
1514 | if (IS_ERR(ptr: lp->sdma_regs)) { |
1515 | dev_err(&pdev->dev, |
1516 | "unable to map DMA registers\n" ); |
1517 | of_node_put(node: dma_np); |
1518 | return PTR_ERR(ptr: lp->sdma_regs); |
1519 | } |
1520 | if (of_property_read_bool(np: dma_np, propname: "little-endian" )) { |
1521 | lp->dma_in = temac_dma_in32_le; |
1522 | lp->dma_out = temac_dma_out32_le; |
1523 | } else { |
1524 | lp->dma_in = temac_dma_in32_be; |
1525 | lp->dma_out = temac_dma_out32_be; |
1526 | } |
1527 | dev_dbg(&pdev->dev, "MEM base: %p\n" , lp->sdma_regs); |
1528 | } |
1529 | |
1530 | /* Get DMA RX and TX interrupts */ |
1531 | lp->rx_irq = irq_of_parse_and_map(node: dma_np, index: 0); |
1532 | lp->tx_irq = irq_of_parse_and_map(node: dma_np, index: 1); |
1533 | |
1534 | /* Finished with the DMA node; drop the reference */ |
1535 | of_node_put(node: dma_np); |
1536 | } else if (pdata) { |
1537 | /* 2nd memory resource specifies DMA registers */ |
1538 | lp->sdma_regs = devm_platform_ioremap_resource(pdev, index: 1); |
1539 | if (IS_ERR(ptr: lp->sdma_regs)) { |
1540 | dev_err(&pdev->dev, |
1541 | "could not map DMA registers\n" ); |
1542 | return PTR_ERR(ptr: lp->sdma_regs); |
1543 | } |
1544 | if (pdata->dma_little_endian) { |
1545 | lp->dma_in = temac_dma_in32_le; |
1546 | lp->dma_out = temac_dma_out32_le; |
1547 | } else { |
1548 | lp->dma_in = temac_dma_in32_be; |
1549 | lp->dma_out = temac_dma_out32_be; |
1550 | } |
1551 | |
1552 | /* Get DMA RX and TX interrupts */ |
1553 | lp->rx_irq = platform_get_irq(pdev, 0); |
1554 | lp->tx_irq = platform_get_irq(pdev, 1); |
1555 | |
1556 | /* IRQ delay/coalescing setup */ |
1557 | if (pdata->tx_irq_timeout || pdata->tx_irq_count) { |
1558 | lp->coalesce_delay_tx = pdata->tx_irq_timeout; |
1559 | lp->coalesce_count_tx = pdata->tx_irq_count; |
1560 | } |
1561 | if (pdata->rx_irq_timeout || pdata->rx_irq_count) { |
1562 | lp->coalesce_delay_rx = pdata->rx_irq_timeout; |
1563 | lp->coalesce_count_rx = pdata->rx_irq_count; |
1564 | } |
1565 | } |
1566 | |
1567 | /* Error handle returned DMA RX and TX interrupts */ |
1568 | if (lp->rx_irq <= 0) { |
1569 | rc = lp->rx_irq ?: -EINVAL; |
1570 | return dev_err_probe(dev: &pdev->dev, err: rc, |
1571 | fmt: "could not get DMA RX irq\n" ); |
1572 | } |
1573 | if (lp->tx_irq <= 0) { |
1574 | rc = lp->tx_irq ?: -EINVAL; |
1575 | return dev_err_probe(dev: &pdev->dev, err: rc, |
1576 | fmt: "could not get DMA TX irq\n" ); |
1577 | } |
1578 | |
1579 | if (temac_np) { |
1580 | /* Retrieve the MAC address */ |
1581 | rc = of_get_mac_address(np: temac_np, mac: addr); |
1582 | if (rc) { |
1583 | dev_err(&pdev->dev, "could not find MAC address\n" ); |
1584 | return -ENODEV; |
1585 | } |
1586 | temac_init_mac_address(ndev, address: addr); |
1587 | } else if (pdata) { |
1588 | temac_init_mac_address(ndev, address: pdata->mac_addr); |
1589 | } |
1590 | |
1591 | rc = temac_mdio_setup(lp, pdev); |
1592 | if (rc) |
1593 | dev_warn(&pdev->dev, "error registering MDIO bus\n" ); |
1594 | |
1595 | if (temac_np) { |
1596 | lp->phy_node = of_parse_phandle(np: temac_np, phandle_name: "phy-handle" , index: 0); |
1597 | if (lp->phy_node) |
1598 | dev_dbg(lp->dev, "using PHY node %pOF\n" , temac_np); |
1599 | } else if (pdata) { |
1600 | snprintf(buf: lp->phy_name, size: sizeof(lp->phy_name), |
1601 | PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr); |
1602 | lp->phy_interface = pdata->phy_interface; |
1603 | } |
1604 | |
1605 | /* Add the device attributes */ |
1606 | rc = sysfs_create_group(kobj: &lp->dev->kobj, grp: &temac_attr_group); |
1607 | if (rc) { |
1608 | dev_err(lp->dev, "Error creating sysfs files\n" ); |
1609 | goto err_sysfs_create; |
1610 | } |
1611 | |
1612 | rc = register_netdev(dev: lp->ndev); |
1613 | if (rc) { |
1614 | dev_err(lp->dev, "register_netdev() error (%i)\n" , rc); |
1615 | goto err_register_ndev; |
1616 | } |
1617 | |
1618 | return 0; |
1619 | |
1620 | err_register_ndev: |
1621 | sysfs_remove_group(kobj: &lp->dev->kobj, grp: &temac_attr_group); |
1622 | err_sysfs_create: |
1623 | if (lp->phy_node) |
1624 | of_node_put(node: lp->phy_node); |
1625 | temac_mdio_teardown(lp); |
1626 | return rc; |
1627 | } |
1628 | |
1629 | static void temac_remove(struct platform_device *pdev) |
1630 | { |
1631 | struct net_device *ndev = platform_get_drvdata(pdev); |
1632 | struct temac_local *lp = netdev_priv(dev: ndev); |
1633 | |
1634 | unregister_netdev(dev: ndev); |
1635 | sysfs_remove_group(kobj: &lp->dev->kobj, grp: &temac_attr_group); |
1636 | if (lp->phy_node) |
1637 | of_node_put(node: lp->phy_node); |
1638 | temac_mdio_teardown(lp); |
1639 | } |
1640 | |
1641 | static const struct of_device_id temac_of_match[] = { |
1642 | { .compatible = "xlnx,xps-ll-temac-1.01.b" , }, |
1643 | { .compatible = "xlnx,xps-ll-temac-2.00.a" , }, |
1644 | { .compatible = "xlnx,xps-ll-temac-2.02.a" , }, |
1645 | { .compatible = "xlnx,xps-ll-temac-2.03.a" , }, |
1646 | {}, |
1647 | }; |
1648 | MODULE_DEVICE_TABLE(of, temac_of_match); |
1649 | |
1650 | static struct platform_driver temac_driver = { |
1651 | .probe = temac_probe, |
1652 | .remove_new = temac_remove, |
1653 | .driver = { |
1654 | .name = "xilinx_temac" , |
1655 | .of_match_table = temac_of_match, |
1656 | }, |
1657 | }; |
1658 | |
1659 | module_platform_driver(temac_driver); |
1660 | |
1661 | MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver" ); |
1662 | MODULE_AUTHOR("Yoshio Kashiwagi" ); |
1663 | MODULE_LICENSE("GPL" ); |
1664 | |