1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. |
4 | * |
5 | * 2005-2010 (c) Aeroflex Gaisler AB |
6 | * |
7 | * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs |
8 | * available in the GRLIB VHDL IP core library. |
9 | * |
10 | * Full documentation of both cores can be found here: |
11 | * https://www.gaisler.com/products/grlib/grip.pdf |
12 | * |
13 | * The Gigabit version supports scatter/gather DMA, any alignment of |
14 | * buffers and checksum offloading. |
15 | * |
16 | * Contributors: Kristoffer Glembo |
17 | * Daniel Hellstrom |
18 | * Marko Isomaki |
19 | */ |
20 | |
21 | #include <linux/dma-mapping.h> |
22 | #include <linux/module.h> |
23 | #include <linux/uaccess.h> |
24 | #include <linux/interrupt.h> |
25 | #include <linux/netdevice.h> |
26 | #include <linux/etherdevice.h> |
27 | #include <linux/ethtool.h> |
28 | #include <linux/skbuff.h> |
29 | #include <linux/io.h> |
30 | #include <linux/crc32.h> |
31 | #include <linux/mii.h> |
32 | #include <linux/of.h> |
33 | #include <linux/of_net.h> |
34 | #include <linux/platform_device.h> |
35 | #include <linux/slab.h> |
36 | #include <asm/cacheflush.h> |
37 | #include <asm/byteorder.h> |
38 | |
39 | #ifdef CONFIG_SPARC |
40 | #include <asm/idprom.h> |
41 | #endif |
42 | |
43 | #include "greth.h" |
44 | |
45 | #define GRETH_DEF_MSG_ENABLE \ |
46 | (NETIF_MSG_DRV | \ |
47 | NETIF_MSG_PROBE | \ |
48 | NETIF_MSG_LINK | \ |
49 | NETIF_MSG_IFDOWN | \ |
50 | NETIF_MSG_IFUP | \ |
51 | NETIF_MSG_RX_ERR | \ |
52 | NETIF_MSG_TX_ERR) |
53 | |
54 | static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */ |
55 | module_param(greth_debug, int, 0); |
56 | MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value" ); |
57 | |
58 | /* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */ |
59 | static int macaddr[6]; |
60 | module_param_array(macaddr, int, NULL, 0); |
61 | MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address" ); |
62 | |
63 | static int greth_edcl = 1; |
64 | module_param(greth_edcl, int, 0); |
65 | MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used." ); |
66 | |
67 | static int greth_open(struct net_device *dev); |
68 | static netdev_tx_t greth_start_xmit(struct sk_buff *skb, |
69 | struct net_device *dev); |
70 | static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, |
71 | struct net_device *dev); |
72 | static int greth_rx(struct net_device *dev, int limit); |
73 | static int greth_rx_gbit(struct net_device *dev, int limit); |
74 | static void greth_clean_tx(struct net_device *dev); |
75 | static void greth_clean_tx_gbit(struct net_device *dev); |
76 | static irqreturn_t greth_interrupt(int irq, void *dev_id); |
77 | static int greth_close(struct net_device *dev); |
78 | static int greth_set_mac_add(struct net_device *dev, void *p); |
79 | static void greth_set_multicast_list(struct net_device *dev); |
80 | |
81 | #define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a)))) |
82 | #define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a))) |
83 | #define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v)))) |
84 | #define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v)))) |
85 | |
86 | #define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK) |
87 | #define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK) |
88 | #define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK) |
89 | |
90 | static void greth_print_rx_packet(void *addr, int len) |
91 | { |
92 | print_hex_dump(KERN_DEBUG, prefix_str: "RX: " , prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 1, |
93 | buf: addr, len, ascii: true); |
94 | } |
95 | |
96 | static void greth_print_tx_packet(struct sk_buff *skb) |
97 | { |
98 | int i; |
99 | int length; |
100 | |
101 | if (skb_shinfo(skb)->nr_frags == 0) |
102 | length = skb->len; |
103 | else |
104 | length = skb_headlen(skb); |
105 | |
106 | print_hex_dump(KERN_DEBUG, prefix_str: "TX: " , prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 1, |
107 | buf: skb->data, len: length, ascii: true); |
108 | |
109 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
110 | |
111 | print_hex_dump(KERN_DEBUG, prefix_str: "TX: " , prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 1, |
112 | buf: skb_frag_address(frag: &skb_shinfo(skb)->frags[i]), |
113 | len: skb_frag_size(frag: &skb_shinfo(skb)->frags[i]), ascii: true); |
114 | } |
115 | } |
116 | |
117 | static inline void greth_enable_tx(struct greth_private *greth) |
118 | { |
119 | wmb(); |
120 | GRETH_REGORIN(greth->regs->control, GRETH_TXEN); |
121 | } |
122 | |
123 | static inline void greth_enable_tx_and_irq(struct greth_private *greth) |
124 | { |
125 | wmb(); /* BDs must been written to memory before enabling TX */ |
126 | GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI); |
127 | } |
128 | |
129 | static inline void greth_disable_tx(struct greth_private *greth) |
130 | { |
131 | GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); |
132 | } |
133 | |
134 | static inline void greth_enable_rx(struct greth_private *greth) |
135 | { |
136 | wmb(); |
137 | GRETH_REGORIN(greth->regs->control, GRETH_RXEN); |
138 | } |
139 | |
140 | static inline void greth_disable_rx(struct greth_private *greth) |
141 | { |
142 | GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN); |
143 | } |
144 | |
145 | static inline void greth_enable_irqs(struct greth_private *greth) |
146 | { |
147 | GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI); |
148 | } |
149 | |
150 | static inline void greth_disable_irqs(struct greth_private *greth) |
151 | { |
152 | GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI)); |
153 | } |
154 | |
155 | static inline void greth_write_bd(u32 *bd, u32 val) |
156 | { |
157 | __raw_writel(cpu_to_be32(val), addr: bd); |
158 | } |
159 | |
160 | static inline u32 greth_read_bd(u32 *bd) |
161 | { |
162 | return be32_to_cpu(__raw_readl(bd)); |
163 | } |
164 | |
165 | static void greth_clean_rings(struct greth_private *greth) |
166 | { |
167 | int i; |
168 | struct greth_bd *rx_bdp = greth->rx_bd_base; |
169 | struct greth_bd *tx_bdp = greth->tx_bd_base; |
170 | |
171 | if (greth->gbit_mac) { |
172 | |
173 | /* Free and unmap RX buffers */ |
174 | for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) { |
175 | if (greth->rx_skbuff[i] != NULL) { |
176 | dev_kfree_skb(greth->rx_skbuff[i]); |
177 | dma_unmap_single(greth->dev, |
178 | greth_read_bd(&rx_bdp->addr), |
179 | MAX_FRAME_SIZE+NET_IP_ALIGN, |
180 | DMA_FROM_DEVICE); |
181 | } |
182 | } |
183 | |
184 | /* TX buffers */ |
185 | while (greth->tx_free < GRETH_TXBD_NUM) { |
186 | |
187 | struct sk_buff *skb = greth->tx_skbuff[greth->tx_last]; |
188 | int nr_frags = skb_shinfo(skb)->nr_frags; |
189 | tx_bdp = greth->tx_bd_base + greth->tx_last; |
190 | greth->tx_last = NEXT_TX(greth->tx_last); |
191 | |
192 | dma_unmap_single(greth->dev, |
193 | greth_read_bd(&tx_bdp->addr), |
194 | skb_headlen(skb), |
195 | DMA_TO_DEVICE); |
196 | |
197 | for (i = 0; i < nr_frags; i++) { |
198 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
199 | tx_bdp = greth->tx_bd_base + greth->tx_last; |
200 | |
201 | dma_unmap_page(greth->dev, |
202 | greth_read_bd(&tx_bdp->addr), |
203 | skb_frag_size(frag), |
204 | DMA_TO_DEVICE); |
205 | |
206 | greth->tx_last = NEXT_TX(greth->tx_last); |
207 | } |
208 | greth->tx_free += nr_frags+1; |
209 | dev_kfree_skb(skb); |
210 | } |
211 | |
212 | |
213 | } else { /* 10/100 Mbps MAC */ |
214 | |
215 | for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) { |
216 | kfree(objp: greth->rx_bufs[i]); |
217 | dma_unmap_single(greth->dev, |
218 | greth_read_bd(&rx_bdp->addr), |
219 | MAX_FRAME_SIZE, |
220 | DMA_FROM_DEVICE); |
221 | } |
222 | for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) { |
223 | kfree(objp: greth->tx_bufs[i]); |
224 | dma_unmap_single(greth->dev, |
225 | greth_read_bd(&tx_bdp->addr), |
226 | MAX_FRAME_SIZE, |
227 | DMA_TO_DEVICE); |
228 | } |
229 | } |
230 | } |
231 | |
232 | static int greth_init_rings(struct greth_private *greth) |
233 | { |
234 | struct sk_buff *skb; |
235 | struct greth_bd *rx_bd, *tx_bd; |
236 | u32 dma_addr; |
237 | int i; |
238 | |
239 | rx_bd = greth->rx_bd_base; |
240 | tx_bd = greth->tx_bd_base; |
241 | |
242 | /* Initialize descriptor rings and buffers */ |
243 | if (greth->gbit_mac) { |
244 | |
245 | for (i = 0; i < GRETH_RXBD_NUM; i++) { |
246 | skb = netdev_alloc_skb(dev: greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN); |
247 | if (skb == NULL) { |
248 | if (netif_msg_ifup(greth)) |
249 | dev_err(greth->dev, "Error allocating DMA ring.\n" ); |
250 | goto cleanup; |
251 | } |
252 | skb_reserve(skb, NET_IP_ALIGN); |
253 | dma_addr = dma_map_single(greth->dev, |
254 | skb->data, |
255 | MAX_FRAME_SIZE+NET_IP_ALIGN, |
256 | DMA_FROM_DEVICE); |
257 | |
258 | if (dma_mapping_error(dev: greth->dev, dma_addr)) { |
259 | if (netif_msg_ifup(greth)) |
260 | dev_err(greth->dev, "Could not create initial DMA mapping\n" ); |
261 | dev_kfree_skb(skb); |
262 | goto cleanup; |
263 | } |
264 | greth->rx_skbuff[i] = skb; |
265 | greth_write_bd(bd: &rx_bd[i].addr, val: dma_addr); |
266 | greth_write_bd(bd: &rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE); |
267 | } |
268 | |
269 | } else { |
270 | |
271 | /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */ |
272 | for (i = 0; i < GRETH_RXBD_NUM; i++) { |
273 | |
274 | greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL); |
275 | |
276 | if (greth->rx_bufs[i] == NULL) { |
277 | if (netif_msg_ifup(greth)) |
278 | dev_err(greth->dev, "Error allocating DMA ring.\n" ); |
279 | goto cleanup; |
280 | } |
281 | |
282 | dma_addr = dma_map_single(greth->dev, |
283 | greth->rx_bufs[i], |
284 | MAX_FRAME_SIZE, |
285 | DMA_FROM_DEVICE); |
286 | |
287 | if (dma_mapping_error(dev: greth->dev, dma_addr)) { |
288 | if (netif_msg_ifup(greth)) |
289 | dev_err(greth->dev, "Could not create initial DMA mapping\n" ); |
290 | goto cleanup; |
291 | } |
292 | greth_write_bd(bd: &rx_bd[i].addr, val: dma_addr); |
293 | greth_write_bd(bd: &rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE); |
294 | } |
295 | for (i = 0; i < GRETH_TXBD_NUM; i++) { |
296 | |
297 | greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL); |
298 | |
299 | if (greth->tx_bufs[i] == NULL) { |
300 | if (netif_msg_ifup(greth)) |
301 | dev_err(greth->dev, "Error allocating DMA ring.\n" ); |
302 | goto cleanup; |
303 | } |
304 | |
305 | dma_addr = dma_map_single(greth->dev, |
306 | greth->tx_bufs[i], |
307 | MAX_FRAME_SIZE, |
308 | DMA_TO_DEVICE); |
309 | |
310 | if (dma_mapping_error(dev: greth->dev, dma_addr)) { |
311 | if (netif_msg_ifup(greth)) |
312 | dev_err(greth->dev, "Could not create initial DMA mapping\n" ); |
313 | goto cleanup; |
314 | } |
315 | greth_write_bd(bd: &tx_bd[i].addr, val: dma_addr); |
316 | greth_write_bd(bd: &tx_bd[i].stat, val: 0); |
317 | } |
318 | } |
319 | greth_write_bd(bd: &rx_bd[GRETH_RXBD_NUM - 1].stat, |
320 | val: greth_read_bd(bd: &rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR); |
321 | |
322 | /* Initialize pointers. */ |
323 | greth->rx_cur = 0; |
324 | greth->tx_next = 0; |
325 | greth->tx_last = 0; |
326 | greth->tx_free = GRETH_TXBD_NUM; |
327 | |
328 | /* Initialize descriptor base address */ |
329 | GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys); |
330 | GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys); |
331 | |
332 | return 0; |
333 | |
334 | cleanup: |
335 | greth_clean_rings(greth); |
336 | return -ENOMEM; |
337 | } |
338 | |
339 | static int greth_open(struct net_device *dev) |
340 | { |
341 | struct greth_private *greth = netdev_priv(dev); |
342 | int err; |
343 | |
344 | err = greth_init_rings(greth); |
345 | if (err) { |
346 | if (netif_msg_ifup(greth)) |
347 | dev_err(&dev->dev, "Could not allocate memory for DMA rings\n" ); |
348 | return err; |
349 | } |
350 | |
351 | err = request_irq(irq: greth->irq, handler: greth_interrupt, flags: 0, name: "eth" , dev: (void *) dev); |
352 | if (err) { |
353 | if (netif_msg_ifup(greth)) |
354 | dev_err(&dev->dev, "Could not allocate interrupt %d\n" , dev->irq); |
355 | greth_clean_rings(greth); |
356 | return err; |
357 | } |
358 | |
359 | if (netif_msg_ifup(greth)) |
360 | dev_dbg(&dev->dev, " starting queue\n" ); |
361 | netif_start_queue(dev); |
362 | |
363 | GRETH_REGSAVE(greth->regs->status, 0xFF); |
364 | |
365 | napi_enable(n: &greth->napi); |
366 | |
367 | greth_enable_irqs(greth); |
368 | greth_enable_tx(greth); |
369 | greth_enable_rx(greth); |
370 | return 0; |
371 | |
372 | } |
373 | |
374 | static int greth_close(struct net_device *dev) |
375 | { |
376 | struct greth_private *greth = netdev_priv(dev); |
377 | |
378 | napi_disable(n: &greth->napi); |
379 | |
380 | greth_disable_irqs(greth); |
381 | greth_disable_tx(greth); |
382 | greth_disable_rx(greth); |
383 | |
384 | netif_stop_queue(dev); |
385 | |
386 | free_irq(greth->irq, (void *) dev); |
387 | |
388 | greth_clean_rings(greth); |
389 | |
390 | return 0; |
391 | } |
392 | |
393 | static netdev_tx_t |
394 | greth_start_xmit(struct sk_buff *skb, struct net_device *dev) |
395 | { |
396 | struct greth_private *greth = netdev_priv(dev); |
397 | struct greth_bd *bdp; |
398 | int err = NETDEV_TX_OK; |
399 | u32 status, dma_addr, ctrl; |
400 | unsigned long flags; |
401 | |
402 | /* Clean TX Ring */ |
403 | greth_clean_tx(dev: greth->netdev); |
404 | |
405 | if (unlikely(greth->tx_free <= 0)) { |
406 | spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ |
407 | ctrl = GRETH_REGLOAD(greth->regs->control); |
408 | /* Enable TX IRQ only if not already in poll() routine */ |
409 | if (ctrl & GRETH_RXI) |
410 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); |
411 | netif_stop_queue(dev); |
412 | spin_unlock_irqrestore(lock: &greth->devlock, flags); |
413 | return NETDEV_TX_BUSY; |
414 | } |
415 | |
416 | if (netif_msg_pktdata(greth)) |
417 | greth_print_tx_packet(skb); |
418 | |
419 | |
420 | if (unlikely(skb->len > MAX_FRAME_SIZE)) { |
421 | dev->stats.tx_errors++; |
422 | goto out; |
423 | } |
424 | |
425 | bdp = greth->tx_bd_base + greth->tx_next; |
426 | dma_addr = greth_read_bd(bd: &bdp->addr); |
427 | |
428 | memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); |
429 | |
430 | dma_sync_single_for_device(dev: greth->dev, addr: dma_addr, size: skb->len, dir: DMA_TO_DEVICE); |
431 | |
432 | status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); |
433 | greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN; |
434 | |
435 | /* Wrap around descriptor ring */ |
436 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) { |
437 | status |= GRETH_BD_WR; |
438 | } |
439 | |
440 | greth->tx_next = NEXT_TX(greth->tx_next); |
441 | greth->tx_free--; |
442 | |
443 | /* Write descriptor control word and enable transmission */ |
444 | greth_write_bd(bd: &bdp->stat, val: status); |
445 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ |
446 | greth_enable_tx(greth); |
447 | spin_unlock_irqrestore(lock: &greth->devlock, flags); |
448 | |
449 | out: |
450 | dev_kfree_skb(skb); |
451 | return err; |
452 | } |
453 | |
454 | static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next) |
455 | { |
456 | if (tx_next < tx_last) |
457 | return (tx_last - tx_next) - 1; |
458 | else |
459 | return GRETH_TXBD_NUM - (tx_next - tx_last) - 1; |
460 | } |
461 | |
462 | static netdev_tx_t |
463 | greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) |
464 | { |
465 | struct greth_private *greth = netdev_priv(dev); |
466 | struct greth_bd *bdp; |
467 | u32 status, dma_addr; |
468 | int curr_tx, nr_frags, i, err = NETDEV_TX_OK; |
469 | unsigned long flags; |
470 | u16 tx_last; |
471 | |
472 | nr_frags = skb_shinfo(skb)->nr_frags; |
473 | tx_last = greth->tx_last; |
474 | rmb(); /* tx_last is updated by the poll task */ |
475 | |
476 | if (greth_num_free_bds(tx_last, tx_next: greth->tx_next) < nr_frags + 1) { |
477 | netif_stop_queue(dev); |
478 | err = NETDEV_TX_BUSY; |
479 | goto out; |
480 | } |
481 | |
482 | if (netif_msg_pktdata(greth)) |
483 | greth_print_tx_packet(skb); |
484 | |
485 | if (unlikely(skb->len > MAX_FRAME_SIZE)) { |
486 | dev->stats.tx_errors++; |
487 | goto out; |
488 | } |
489 | |
490 | /* Save skb pointer. */ |
491 | greth->tx_skbuff[greth->tx_next] = skb; |
492 | |
493 | /* Linear buf */ |
494 | if (nr_frags != 0) |
495 | status = GRETH_TXBD_MORE; |
496 | else |
497 | status = GRETH_BD_IE; |
498 | |
499 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
500 | status |= GRETH_TXBD_CSALL; |
501 | status |= skb_headlen(skb) & GRETH_BD_LEN; |
502 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) |
503 | status |= GRETH_BD_WR; |
504 | |
505 | |
506 | bdp = greth->tx_bd_base + greth->tx_next; |
507 | greth_write_bd(bd: &bdp->stat, val: status); |
508 | dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); |
509 | |
510 | if (unlikely(dma_mapping_error(greth->dev, dma_addr))) |
511 | goto map_error; |
512 | |
513 | greth_write_bd(bd: &bdp->addr, val: dma_addr); |
514 | |
515 | curr_tx = NEXT_TX(greth->tx_next); |
516 | |
517 | /* Frags */ |
518 | for (i = 0; i < nr_frags; i++) { |
519 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
520 | greth->tx_skbuff[curr_tx] = NULL; |
521 | bdp = greth->tx_bd_base + curr_tx; |
522 | |
523 | status = GRETH_BD_EN; |
524 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
525 | status |= GRETH_TXBD_CSALL; |
526 | status |= skb_frag_size(frag) & GRETH_BD_LEN; |
527 | |
528 | /* Wrap around descriptor ring */ |
529 | if (curr_tx == GRETH_TXBD_NUM_MASK) |
530 | status |= GRETH_BD_WR; |
531 | |
532 | /* More fragments left */ |
533 | if (i < nr_frags - 1) |
534 | status |= GRETH_TXBD_MORE; |
535 | else |
536 | status |= GRETH_BD_IE; /* enable IRQ on last fragment */ |
537 | |
538 | greth_write_bd(bd: &bdp->stat, val: status); |
539 | |
540 | dma_addr = skb_frag_dma_map(dev: greth->dev, frag, offset: 0, size: skb_frag_size(frag), |
541 | dir: DMA_TO_DEVICE); |
542 | |
543 | if (unlikely(dma_mapping_error(greth->dev, dma_addr))) |
544 | goto frag_map_error; |
545 | |
546 | greth_write_bd(bd: &bdp->addr, val: dma_addr); |
547 | |
548 | curr_tx = NEXT_TX(curr_tx); |
549 | } |
550 | |
551 | wmb(); |
552 | |
553 | /* Enable the descriptor chain by enabling the first descriptor */ |
554 | bdp = greth->tx_bd_base + greth->tx_next; |
555 | greth_write_bd(bd: &bdp->stat, |
556 | val: greth_read_bd(bd: &bdp->stat) | GRETH_BD_EN); |
557 | |
558 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ |
559 | greth->tx_next = curr_tx; |
560 | greth_enable_tx_and_irq(greth); |
561 | spin_unlock_irqrestore(lock: &greth->devlock, flags); |
562 | |
563 | return NETDEV_TX_OK; |
564 | |
565 | frag_map_error: |
566 | /* Unmap SKB mappings that succeeded and disable descriptor */ |
567 | for (i = 0; greth->tx_next + i != curr_tx; i++) { |
568 | bdp = greth->tx_bd_base + greth->tx_next + i; |
569 | dma_unmap_single(greth->dev, |
570 | greth_read_bd(&bdp->addr), |
571 | greth_read_bd(&bdp->stat) & GRETH_BD_LEN, |
572 | DMA_TO_DEVICE); |
573 | greth_write_bd(bd: &bdp->stat, val: 0); |
574 | } |
575 | map_error: |
576 | if (net_ratelimit()) |
577 | dev_warn(greth->dev, "Could not create TX DMA mapping\n" ); |
578 | dev_kfree_skb(skb); |
579 | out: |
580 | return err; |
581 | } |
582 | |
583 | static irqreturn_t greth_interrupt(int irq, void *dev_id) |
584 | { |
585 | struct net_device *dev = dev_id; |
586 | struct greth_private *greth; |
587 | u32 status, ctrl; |
588 | irqreturn_t retval = IRQ_NONE; |
589 | |
590 | greth = netdev_priv(dev); |
591 | |
592 | spin_lock(lock: &greth->devlock); |
593 | |
594 | /* Get the interrupt events that caused us to be here. */ |
595 | status = GRETH_REGLOAD(greth->regs->status); |
596 | |
597 | /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be |
598 | * set regardless of whether IRQ is enabled or not. Especially |
599 | * important when shared IRQ. |
600 | */ |
601 | ctrl = GRETH_REGLOAD(greth->regs->control); |
602 | |
603 | /* Handle rx and tx interrupts through poll */ |
604 | if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) || |
605 | ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) { |
606 | retval = IRQ_HANDLED; |
607 | |
608 | /* Disable interrupts and schedule poll() */ |
609 | greth_disable_irqs(greth); |
610 | napi_schedule(n: &greth->napi); |
611 | } |
612 | |
613 | spin_unlock(lock: &greth->devlock); |
614 | |
615 | return retval; |
616 | } |
617 | |
618 | static void greth_clean_tx(struct net_device *dev) |
619 | { |
620 | struct greth_private *greth; |
621 | struct greth_bd *bdp; |
622 | u32 stat; |
623 | |
624 | greth = netdev_priv(dev); |
625 | |
626 | while (1) { |
627 | bdp = greth->tx_bd_base + greth->tx_last; |
628 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); |
629 | mb(); |
630 | stat = greth_read_bd(bd: &bdp->stat); |
631 | |
632 | if (unlikely(stat & GRETH_BD_EN)) |
633 | break; |
634 | |
635 | if (greth->tx_free == GRETH_TXBD_NUM) |
636 | break; |
637 | |
638 | /* Check status for errors */ |
639 | if (unlikely(stat & GRETH_TXBD_STATUS)) { |
640 | dev->stats.tx_errors++; |
641 | if (stat & GRETH_TXBD_ERR_AL) |
642 | dev->stats.tx_aborted_errors++; |
643 | if (stat & GRETH_TXBD_ERR_UE) |
644 | dev->stats.tx_fifo_errors++; |
645 | } |
646 | dev->stats.tx_packets++; |
647 | dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last]; |
648 | greth->tx_last = NEXT_TX(greth->tx_last); |
649 | greth->tx_free++; |
650 | } |
651 | |
652 | if (greth->tx_free > 0) { |
653 | netif_wake_queue(dev); |
654 | } |
655 | } |
656 | |
657 | static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) |
658 | { |
659 | /* Check status for errors */ |
660 | if (unlikely(stat & GRETH_TXBD_STATUS)) { |
661 | dev->stats.tx_errors++; |
662 | if (stat & GRETH_TXBD_ERR_AL) |
663 | dev->stats.tx_aborted_errors++; |
664 | if (stat & GRETH_TXBD_ERR_UE) |
665 | dev->stats.tx_fifo_errors++; |
666 | if (stat & GRETH_TXBD_ERR_LC) |
667 | dev->stats.tx_aborted_errors++; |
668 | } |
669 | dev->stats.tx_packets++; |
670 | } |
671 | |
672 | static void greth_clean_tx_gbit(struct net_device *dev) |
673 | { |
674 | struct greth_private *greth; |
675 | struct greth_bd *bdp, *bdp_last_frag; |
676 | struct sk_buff *skb = NULL; |
677 | u32 stat; |
678 | int nr_frags, i; |
679 | u16 tx_last; |
680 | |
681 | greth = netdev_priv(dev); |
682 | tx_last = greth->tx_last; |
683 | |
684 | while (tx_last != greth->tx_next) { |
685 | |
686 | skb = greth->tx_skbuff[tx_last]; |
687 | |
688 | nr_frags = skb_shinfo(skb)->nr_frags; |
689 | |
690 | /* We only clean fully completed SKBs */ |
691 | bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags); |
692 | |
693 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); |
694 | mb(); |
695 | stat = greth_read_bd(bd: &bdp_last_frag->stat); |
696 | |
697 | if (stat & GRETH_BD_EN) |
698 | break; |
699 | |
700 | greth->tx_skbuff[tx_last] = NULL; |
701 | |
702 | greth_update_tx_stats(dev, stat); |
703 | dev->stats.tx_bytes += skb->len; |
704 | |
705 | bdp = greth->tx_bd_base + tx_last; |
706 | |
707 | tx_last = NEXT_TX(tx_last); |
708 | |
709 | dma_unmap_single(greth->dev, |
710 | greth_read_bd(&bdp->addr), |
711 | skb_headlen(skb), |
712 | DMA_TO_DEVICE); |
713 | |
714 | for (i = 0; i < nr_frags; i++) { |
715 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
716 | bdp = greth->tx_bd_base + tx_last; |
717 | |
718 | dma_unmap_page(greth->dev, |
719 | greth_read_bd(&bdp->addr), |
720 | skb_frag_size(frag), |
721 | DMA_TO_DEVICE); |
722 | |
723 | tx_last = NEXT_TX(tx_last); |
724 | } |
725 | dev_kfree_skb(skb); |
726 | } |
727 | if (skb) { /* skb is set only if the above while loop was entered */ |
728 | wmb(); |
729 | greth->tx_last = tx_last; |
730 | |
731 | if (netif_queue_stopped(dev) && |
732 | (greth_num_free_bds(tx_last, tx_next: greth->tx_next) > |
733 | (MAX_SKB_FRAGS+1))) |
734 | netif_wake_queue(dev); |
735 | } |
736 | } |
737 | |
738 | static int greth_rx(struct net_device *dev, int limit) |
739 | { |
740 | struct greth_private *greth; |
741 | struct greth_bd *bdp; |
742 | struct sk_buff *skb; |
743 | int pkt_len; |
744 | int bad, count; |
745 | u32 status, dma_addr; |
746 | unsigned long flags; |
747 | |
748 | greth = netdev_priv(dev); |
749 | |
750 | for (count = 0; count < limit; ++count) { |
751 | |
752 | bdp = greth->rx_bd_base + greth->rx_cur; |
753 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); |
754 | mb(); |
755 | status = greth_read_bd(bd: &bdp->stat); |
756 | |
757 | if (unlikely(status & GRETH_BD_EN)) { |
758 | break; |
759 | } |
760 | |
761 | dma_addr = greth_read_bd(bd: &bdp->addr); |
762 | bad = 0; |
763 | |
764 | /* Check status for errors. */ |
765 | if (unlikely(status & GRETH_RXBD_STATUS)) { |
766 | if (status & GRETH_RXBD_ERR_FT) { |
767 | dev->stats.rx_length_errors++; |
768 | bad = 1; |
769 | } |
770 | if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) { |
771 | dev->stats.rx_frame_errors++; |
772 | bad = 1; |
773 | } |
774 | if (status & GRETH_RXBD_ERR_CRC) { |
775 | dev->stats.rx_crc_errors++; |
776 | bad = 1; |
777 | } |
778 | } |
779 | if (unlikely(bad)) { |
780 | dev->stats.rx_errors++; |
781 | |
782 | } else { |
783 | |
784 | pkt_len = status & GRETH_BD_LEN; |
785 | |
786 | skb = netdev_alloc_skb(dev, length: pkt_len + NET_IP_ALIGN); |
787 | |
788 | if (unlikely(skb == NULL)) { |
789 | |
790 | if (net_ratelimit()) |
791 | dev_warn(&dev->dev, "low on memory - " "packet dropped\n" ); |
792 | |
793 | dev->stats.rx_dropped++; |
794 | |
795 | } else { |
796 | skb_reserve(skb, NET_IP_ALIGN); |
797 | |
798 | dma_sync_single_for_cpu(dev: greth->dev, |
799 | addr: dma_addr, |
800 | size: pkt_len, |
801 | dir: DMA_FROM_DEVICE); |
802 | |
803 | if (netif_msg_pktdata(greth)) |
804 | greth_print_rx_packet(phys_to_virt(address: dma_addr), len: pkt_len); |
805 | |
806 | skb_put_data(skb, phys_to_virt(address: dma_addr), |
807 | len: pkt_len); |
808 | |
809 | skb->protocol = eth_type_trans(skb, dev); |
810 | dev->stats.rx_bytes += pkt_len; |
811 | dev->stats.rx_packets++; |
812 | netif_receive_skb(skb); |
813 | } |
814 | } |
815 | |
816 | status = GRETH_BD_EN | GRETH_BD_IE; |
817 | if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { |
818 | status |= GRETH_BD_WR; |
819 | } |
820 | |
821 | wmb(); |
822 | greth_write_bd(bd: &bdp->stat, val: status); |
823 | |
824 | dma_sync_single_for_device(dev: greth->dev, addr: dma_addr, MAX_FRAME_SIZE, dir: DMA_FROM_DEVICE); |
825 | |
826 | spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */ |
827 | greth_enable_rx(greth); |
828 | spin_unlock_irqrestore(lock: &greth->devlock, flags); |
829 | |
830 | greth->rx_cur = NEXT_RX(greth->rx_cur); |
831 | } |
832 | |
833 | return count; |
834 | } |
835 | |
836 | static inline int hw_checksummed(u32 status) |
837 | { |
838 | |
839 | if (status & GRETH_RXBD_IP_FRAG) |
840 | return 0; |
841 | |
842 | if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR) |
843 | return 0; |
844 | |
845 | if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR) |
846 | return 0; |
847 | |
848 | if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR) |
849 | return 0; |
850 | |
851 | return 1; |
852 | } |
853 | |
854 | static int greth_rx_gbit(struct net_device *dev, int limit) |
855 | { |
856 | struct greth_private *greth; |
857 | struct greth_bd *bdp; |
858 | struct sk_buff *skb, *newskb; |
859 | int pkt_len; |
860 | int bad, count = 0; |
861 | u32 status, dma_addr; |
862 | unsigned long flags; |
863 | |
864 | greth = netdev_priv(dev); |
865 | |
866 | for (count = 0; count < limit; ++count) { |
867 | |
868 | bdp = greth->rx_bd_base + greth->rx_cur; |
869 | skb = greth->rx_skbuff[greth->rx_cur]; |
870 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); |
871 | mb(); |
872 | status = greth_read_bd(bd: &bdp->stat); |
873 | bad = 0; |
874 | |
875 | if (status & GRETH_BD_EN) |
876 | break; |
877 | |
878 | /* Check status for errors. */ |
879 | if (unlikely(status & GRETH_RXBD_STATUS)) { |
880 | |
881 | if (status & GRETH_RXBD_ERR_FT) { |
882 | dev->stats.rx_length_errors++; |
883 | bad = 1; |
884 | } else if (status & |
885 | (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) { |
886 | dev->stats.rx_frame_errors++; |
887 | bad = 1; |
888 | } else if (status & GRETH_RXBD_ERR_CRC) { |
889 | dev->stats.rx_crc_errors++; |
890 | bad = 1; |
891 | } |
892 | } |
893 | |
894 | /* Allocate new skb to replace current, not needed if the |
895 | * current skb can be reused */ |
896 | if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { |
897 | skb_reserve(skb: newskb, NET_IP_ALIGN); |
898 | |
899 | dma_addr = dma_map_single(greth->dev, |
900 | newskb->data, |
901 | MAX_FRAME_SIZE + NET_IP_ALIGN, |
902 | DMA_FROM_DEVICE); |
903 | |
904 | if (!dma_mapping_error(dev: greth->dev, dma_addr)) { |
905 | /* Process the incoming frame. */ |
906 | pkt_len = status & GRETH_BD_LEN; |
907 | |
908 | dma_unmap_single(greth->dev, |
909 | greth_read_bd(&bdp->addr), |
910 | MAX_FRAME_SIZE + NET_IP_ALIGN, |
911 | DMA_FROM_DEVICE); |
912 | |
913 | if (netif_msg_pktdata(greth)) |
914 | greth_print_rx_packet(phys_to_virt(address: greth_read_bd(bd: &bdp->addr)), len: pkt_len); |
915 | |
916 | skb_put(skb, len: pkt_len); |
917 | |
918 | if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status)) |
919 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
920 | else |
921 | skb_checksum_none_assert(skb); |
922 | |
923 | skb->protocol = eth_type_trans(skb, dev); |
924 | dev->stats.rx_packets++; |
925 | dev->stats.rx_bytes += pkt_len; |
926 | netif_receive_skb(skb); |
927 | |
928 | greth->rx_skbuff[greth->rx_cur] = newskb; |
929 | greth_write_bd(bd: &bdp->addr, val: dma_addr); |
930 | } else { |
931 | if (net_ratelimit()) |
932 | dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n" ); |
933 | dev_kfree_skb(newskb); |
934 | /* reusing current skb, so it is a drop */ |
935 | dev->stats.rx_dropped++; |
936 | } |
937 | } else if (bad) { |
938 | /* Bad Frame transfer, the skb is reused */ |
939 | dev->stats.rx_dropped++; |
940 | } else { |
941 | /* Failed Allocating a new skb. This is rather stupid |
942 | * but the current "filled" skb is reused, as if |
943 | * transfer failure. One could argue that RX descriptor |
944 | * table handling should be divided into cleaning and |
945 | * filling as the TX part of the driver |
946 | */ |
947 | if (net_ratelimit()) |
948 | dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n" ); |
949 | /* reusing current skb, so it is a drop */ |
950 | dev->stats.rx_dropped++; |
951 | } |
952 | |
953 | status = GRETH_BD_EN | GRETH_BD_IE; |
954 | if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { |
955 | status |= GRETH_BD_WR; |
956 | } |
957 | |
958 | wmb(); |
959 | greth_write_bd(bd: &bdp->stat, val: status); |
960 | spin_lock_irqsave(&greth->devlock, flags); |
961 | greth_enable_rx(greth); |
962 | spin_unlock_irqrestore(lock: &greth->devlock, flags); |
963 | greth->rx_cur = NEXT_RX(greth->rx_cur); |
964 | } |
965 | |
966 | return count; |
967 | |
968 | } |
969 | |
970 | static int greth_poll(struct napi_struct *napi, int budget) |
971 | { |
972 | struct greth_private *greth; |
973 | int work_done = 0; |
974 | unsigned long flags; |
975 | u32 mask, ctrl; |
976 | greth = container_of(napi, struct greth_private, napi); |
977 | |
978 | restart_txrx_poll: |
979 | if (greth->gbit_mac) { |
980 | greth_clean_tx_gbit(dev: greth->netdev); |
981 | work_done += greth_rx_gbit(dev: greth->netdev, limit: budget - work_done); |
982 | } else { |
983 | if (netif_queue_stopped(dev: greth->netdev)) |
984 | greth_clean_tx(dev: greth->netdev); |
985 | work_done += greth_rx(dev: greth->netdev, limit: budget - work_done); |
986 | } |
987 | |
988 | if (work_done < budget) { |
989 | |
990 | spin_lock_irqsave(&greth->devlock, flags); |
991 | |
992 | ctrl = GRETH_REGLOAD(greth->regs->control); |
993 | if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) || |
994 | (!greth->gbit_mac && netif_queue_stopped(dev: greth->netdev))) { |
995 | GRETH_REGSAVE(greth->regs->control, |
996 | ctrl | GRETH_TXI | GRETH_RXI); |
997 | mask = GRETH_INT_RX | GRETH_INT_RE | |
998 | GRETH_INT_TX | GRETH_INT_TE; |
999 | } else { |
1000 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); |
1001 | mask = GRETH_INT_RX | GRETH_INT_RE; |
1002 | } |
1003 | |
1004 | if (GRETH_REGLOAD(greth->regs->status) & mask) { |
1005 | GRETH_REGSAVE(greth->regs->control, ctrl); |
1006 | spin_unlock_irqrestore(lock: &greth->devlock, flags); |
1007 | goto restart_txrx_poll; |
1008 | } else { |
1009 | napi_complete_done(n: napi, work_done); |
1010 | spin_unlock_irqrestore(lock: &greth->devlock, flags); |
1011 | } |
1012 | } |
1013 | |
1014 | return work_done; |
1015 | } |
1016 | |
1017 | static int greth_set_mac_add(struct net_device *dev, void *p) |
1018 | { |
1019 | struct sockaddr *addr = p; |
1020 | struct greth_private *greth; |
1021 | struct greth_regs *regs; |
1022 | |
1023 | greth = netdev_priv(dev); |
1024 | regs = greth->regs; |
1025 | |
1026 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
1027 | return -EADDRNOTAVAIL; |
1028 | |
1029 | eth_hw_addr_set(dev, addr: addr->sa_data); |
1030 | GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]); |
1031 | GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 | |
1032 | dev->dev_addr[4] << 8 | dev->dev_addr[5]); |
1033 | |
1034 | return 0; |
1035 | } |
1036 | |
1037 | static u32 greth_hash_get_index(__u8 *addr) |
1038 | { |
1039 | return (ether_crc(6, addr)) & 0x3F; |
1040 | } |
1041 | |
1042 | static void greth_set_hash_filter(struct net_device *dev) |
1043 | { |
1044 | struct netdev_hw_addr *ha; |
1045 | struct greth_private *greth = netdev_priv(dev); |
1046 | struct greth_regs *regs = greth->regs; |
1047 | u32 mc_filter[2]; |
1048 | unsigned int bitnr; |
1049 | |
1050 | mc_filter[0] = mc_filter[1] = 0; |
1051 | |
1052 | netdev_for_each_mc_addr(ha, dev) { |
1053 | bitnr = greth_hash_get_index(addr: ha->addr); |
1054 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); |
1055 | } |
1056 | |
1057 | GRETH_REGSAVE(regs->hash_msb, mc_filter[1]); |
1058 | GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]); |
1059 | } |
1060 | |
1061 | static void greth_set_multicast_list(struct net_device *dev) |
1062 | { |
1063 | int cfg; |
1064 | struct greth_private *greth = netdev_priv(dev); |
1065 | struct greth_regs *regs = greth->regs; |
1066 | |
1067 | cfg = GRETH_REGLOAD(regs->control); |
1068 | if (dev->flags & IFF_PROMISC) |
1069 | cfg |= GRETH_CTRL_PR; |
1070 | else |
1071 | cfg &= ~GRETH_CTRL_PR; |
1072 | |
1073 | if (greth->multicast) { |
1074 | if (dev->flags & IFF_ALLMULTI) { |
1075 | GRETH_REGSAVE(regs->hash_msb, -1); |
1076 | GRETH_REGSAVE(regs->hash_lsb, -1); |
1077 | cfg |= GRETH_CTRL_MCEN; |
1078 | GRETH_REGSAVE(regs->control, cfg); |
1079 | return; |
1080 | } |
1081 | |
1082 | if (netdev_mc_empty(dev)) { |
1083 | cfg &= ~GRETH_CTRL_MCEN; |
1084 | GRETH_REGSAVE(regs->control, cfg); |
1085 | return; |
1086 | } |
1087 | |
1088 | /* Setup multicast filter */ |
1089 | greth_set_hash_filter(dev); |
1090 | cfg |= GRETH_CTRL_MCEN; |
1091 | } |
1092 | GRETH_REGSAVE(regs->control, cfg); |
1093 | } |
1094 | |
1095 | static u32 greth_get_msglevel(struct net_device *dev) |
1096 | { |
1097 | struct greth_private *greth = netdev_priv(dev); |
1098 | return greth->msg_enable; |
1099 | } |
1100 | |
1101 | static void greth_set_msglevel(struct net_device *dev, u32 value) |
1102 | { |
1103 | struct greth_private *greth = netdev_priv(dev); |
1104 | greth->msg_enable = value; |
1105 | } |
1106 | |
1107 | static int greth_get_regs_len(struct net_device *dev) |
1108 | { |
1109 | return sizeof(struct greth_regs); |
1110 | } |
1111 | |
1112 | static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
1113 | { |
1114 | struct greth_private *greth = netdev_priv(dev); |
1115 | |
1116 | strscpy(p: info->driver, q: dev_driver_string(dev: greth->dev), |
1117 | size: sizeof(info->driver)); |
1118 | strscpy(p: info->bus_info, q: greth->dev->bus->name, size: sizeof(info->bus_info)); |
1119 | } |
1120 | |
1121 | static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) |
1122 | { |
1123 | int i; |
1124 | struct greth_private *greth = netdev_priv(dev); |
1125 | u32 __iomem *greth_regs = (u32 __iomem *) greth->regs; |
1126 | u32 *buff = p; |
1127 | |
1128 | for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++) |
1129 | buff[i] = greth_read_bd(bd: &greth_regs[i]); |
1130 | } |
1131 | |
1132 | static const struct ethtool_ops greth_ethtool_ops = { |
1133 | .get_msglevel = greth_get_msglevel, |
1134 | .set_msglevel = greth_set_msglevel, |
1135 | .get_drvinfo = greth_get_drvinfo, |
1136 | .get_regs_len = greth_get_regs_len, |
1137 | .get_regs = greth_get_regs, |
1138 | .get_link = ethtool_op_get_link, |
1139 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
1140 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
1141 | }; |
1142 | |
1143 | static struct net_device_ops greth_netdev_ops = { |
1144 | .ndo_open = greth_open, |
1145 | .ndo_stop = greth_close, |
1146 | .ndo_start_xmit = greth_start_xmit, |
1147 | .ndo_set_mac_address = greth_set_mac_add, |
1148 | .ndo_validate_addr = eth_validate_addr, |
1149 | }; |
1150 | |
1151 | static inline int wait_for_mdio(struct greth_private *greth) |
1152 | { |
1153 | unsigned long timeout = jiffies + 4*HZ/100; |
1154 | while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) { |
1155 | if (time_after(jiffies, timeout)) |
1156 | return 0; |
1157 | } |
1158 | return 1; |
1159 | } |
1160 | |
1161 | static int greth_mdio_read(struct mii_bus *bus, int phy, int reg) |
1162 | { |
1163 | struct greth_private *greth = bus->priv; |
1164 | int data; |
1165 | |
1166 | if (!wait_for_mdio(greth)) |
1167 | return -EBUSY; |
1168 | |
1169 | GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2); |
1170 | |
1171 | if (!wait_for_mdio(greth)) |
1172 | return -EBUSY; |
1173 | |
1174 | if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) { |
1175 | data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF; |
1176 | return data; |
1177 | |
1178 | } else { |
1179 | return -1; |
1180 | } |
1181 | } |
1182 | |
1183 | static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) |
1184 | { |
1185 | struct greth_private *greth = bus->priv; |
1186 | |
1187 | if (!wait_for_mdio(greth)) |
1188 | return -EBUSY; |
1189 | |
1190 | GRETH_REGSAVE(greth->regs->mdio, |
1191 | ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1); |
1192 | |
1193 | if (!wait_for_mdio(greth)) |
1194 | return -EBUSY; |
1195 | |
1196 | return 0; |
1197 | } |
1198 | |
1199 | static void greth_link_change(struct net_device *dev) |
1200 | { |
1201 | struct greth_private *greth = netdev_priv(dev); |
1202 | struct phy_device *phydev = dev->phydev; |
1203 | unsigned long flags; |
1204 | int status_change = 0; |
1205 | u32 ctrl; |
1206 | |
1207 | spin_lock_irqsave(&greth->devlock, flags); |
1208 | |
1209 | if (phydev->link) { |
1210 | |
1211 | if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { |
1212 | ctrl = GRETH_REGLOAD(greth->regs->control) & |
1213 | ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB); |
1214 | |
1215 | if (phydev->duplex) |
1216 | ctrl |= GRETH_CTRL_FD; |
1217 | |
1218 | if (phydev->speed == SPEED_100) |
1219 | ctrl |= GRETH_CTRL_SP; |
1220 | else if (phydev->speed == SPEED_1000) |
1221 | ctrl |= GRETH_CTRL_GB; |
1222 | |
1223 | GRETH_REGSAVE(greth->regs->control, ctrl); |
1224 | greth->speed = phydev->speed; |
1225 | greth->duplex = phydev->duplex; |
1226 | status_change = 1; |
1227 | } |
1228 | } |
1229 | |
1230 | if (phydev->link != greth->link) { |
1231 | if (!phydev->link) { |
1232 | greth->speed = 0; |
1233 | greth->duplex = -1; |
1234 | } |
1235 | greth->link = phydev->link; |
1236 | |
1237 | status_change = 1; |
1238 | } |
1239 | |
1240 | spin_unlock_irqrestore(lock: &greth->devlock, flags); |
1241 | |
1242 | if (status_change) { |
1243 | if (phydev->link) |
1244 | pr_debug("%s: link up (%d/%s)\n" , |
1245 | dev->name, phydev->speed, |
1246 | DUPLEX_FULL == phydev->duplex ? "Full" : "Half" ); |
1247 | else |
1248 | pr_debug("%s: link down\n" , dev->name); |
1249 | } |
1250 | } |
1251 | |
1252 | static int greth_mdio_probe(struct net_device *dev) |
1253 | { |
1254 | struct greth_private *greth = netdev_priv(dev); |
1255 | struct phy_device *phy = NULL; |
1256 | int ret; |
1257 | |
1258 | /* Find the first PHY */ |
1259 | phy = phy_find_first(bus: greth->mdio); |
1260 | |
1261 | if (!phy) { |
1262 | if (netif_msg_probe(greth)) |
1263 | dev_err(&dev->dev, "no PHY found\n" ); |
1264 | return -ENXIO; |
1265 | } |
1266 | |
1267 | ret = phy_connect_direct(dev, phydev: phy, handler: &greth_link_change, |
1268 | interface: greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII); |
1269 | if (ret) { |
1270 | if (netif_msg_ifup(greth)) |
1271 | dev_err(&dev->dev, "could not attach to PHY\n" ); |
1272 | return ret; |
1273 | } |
1274 | |
1275 | if (greth->gbit_mac) |
1276 | phy_set_max_speed(phydev: phy, SPEED_1000); |
1277 | else |
1278 | phy_set_max_speed(phydev: phy, SPEED_100); |
1279 | |
1280 | linkmode_copy(dst: phy->advertising, src: phy->supported); |
1281 | |
1282 | greth->link = 0; |
1283 | greth->speed = 0; |
1284 | greth->duplex = -1; |
1285 | |
1286 | return 0; |
1287 | } |
1288 | |
1289 | static int greth_mdio_init(struct greth_private *greth) |
1290 | { |
1291 | int ret; |
1292 | unsigned long timeout; |
1293 | struct net_device *ndev = greth->netdev; |
1294 | |
1295 | greth->mdio = mdiobus_alloc(); |
1296 | if (!greth->mdio) { |
1297 | return -ENOMEM; |
1298 | } |
1299 | |
1300 | greth->mdio->name = "greth-mdio" ; |
1301 | snprintf(buf: greth->mdio->id, MII_BUS_ID_SIZE, fmt: "%s-%d" , greth->mdio->name, greth->irq); |
1302 | greth->mdio->read = greth_mdio_read; |
1303 | greth->mdio->write = greth_mdio_write; |
1304 | greth->mdio->priv = greth; |
1305 | |
1306 | ret = mdiobus_register(greth->mdio); |
1307 | if (ret) { |
1308 | goto error; |
1309 | } |
1310 | |
1311 | ret = greth_mdio_probe(dev: greth->netdev); |
1312 | if (ret) { |
1313 | if (netif_msg_probe(greth)) |
1314 | dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n" ); |
1315 | goto unreg_mdio; |
1316 | } |
1317 | |
1318 | phy_start(phydev: ndev->phydev); |
1319 | |
1320 | /* If Ethernet debug link is used make autoneg happen right away */ |
1321 | if (greth->edcl && greth_edcl == 1) { |
1322 | phy_start_aneg(phydev: ndev->phydev); |
1323 | timeout = jiffies + 6*HZ; |
1324 | while (!phy_aneg_done(phydev: ndev->phydev) && |
1325 | time_before(jiffies, timeout)) { |
1326 | } |
1327 | phy_read_status(phydev: ndev->phydev); |
1328 | greth_link_change(dev: greth->netdev); |
1329 | } |
1330 | |
1331 | return 0; |
1332 | |
1333 | unreg_mdio: |
1334 | mdiobus_unregister(bus: greth->mdio); |
1335 | error: |
1336 | mdiobus_free(bus: greth->mdio); |
1337 | return ret; |
1338 | } |
1339 | |
1340 | /* Initialize the GRETH MAC */ |
1341 | static int greth_of_probe(struct platform_device *ofdev) |
1342 | { |
1343 | struct net_device *dev; |
1344 | struct greth_private *greth; |
1345 | struct greth_regs *regs; |
1346 | |
1347 | int i; |
1348 | int err; |
1349 | int tmp; |
1350 | u8 addr[ETH_ALEN]; |
1351 | unsigned long timeout; |
1352 | |
1353 | dev = alloc_etherdev(sizeof(struct greth_private)); |
1354 | |
1355 | if (dev == NULL) |
1356 | return -ENOMEM; |
1357 | |
1358 | greth = netdev_priv(dev); |
1359 | greth->netdev = dev; |
1360 | greth->dev = &ofdev->dev; |
1361 | |
1362 | if (greth_debug > 0) |
1363 | greth->msg_enable = greth_debug; |
1364 | else |
1365 | greth->msg_enable = GRETH_DEF_MSG_ENABLE; |
1366 | |
1367 | spin_lock_init(&greth->devlock); |
1368 | |
1369 | greth->regs = of_ioremap(&ofdev->resource[0], 0, |
1370 | resource_size(res: &ofdev->resource[0]), |
1371 | "grlib-greth regs" ); |
1372 | |
1373 | if (greth->regs == NULL) { |
1374 | if (netif_msg_probe(greth)) |
1375 | dev_err(greth->dev, "ioremap failure.\n" ); |
1376 | err = -EIO; |
1377 | goto error1; |
1378 | } |
1379 | |
1380 | regs = greth->regs; |
1381 | greth->irq = ofdev->archdata.irqs[0]; |
1382 | |
1383 | dev_set_drvdata(dev: greth->dev, data: dev); |
1384 | SET_NETDEV_DEV(dev, greth->dev); |
1385 | |
1386 | if (netif_msg_probe(greth)) |
1387 | dev_dbg(greth->dev, "resetting controller.\n" ); |
1388 | |
1389 | /* Reset the controller. */ |
1390 | GRETH_REGSAVE(regs->control, GRETH_RESET); |
1391 | |
1392 | /* Wait for MAC to reset itself */ |
1393 | timeout = jiffies + HZ/100; |
1394 | while (GRETH_REGLOAD(regs->control) & GRETH_RESET) { |
1395 | if (time_after(jiffies, timeout)) { |
1396 | err = -EIO; |
1397 | if (netif_msg_probe(greth)) |
1398 | dev_err(greth->dev, "timeout when waiting for reset.\n" ); |
1399 | goto error2; |
1400 | } |
1401 | } |
1402 | |
1403 | /* Get default PHY address */ |
1404 | greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F; |
1405 | |
1406 | /* Check if we have GBIT capable MAC */ |
1407 | tmp = GRETH_REGLOAD(regs->control); |
1408 | greth->gbit_mac = (tmp >> 27) & 1; |
1409 | |
1410 | /* Check for multicast capability */ |
1411 | greth->multicast = (tmp >> 25) & 1; |
1412 | |
1413 | greth->edcl = (tmp >> 31) & 1; |
1414 | |
1415 | /* If we have EDCL we disable the EDCL speed-duplex FSM so |
1416 | * it doesn't interfere with the software */ |
1417 | if (greth->edcl != 0) |
1418 | GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX); |
1419 | |
1420 | /* Check if MAC can handle MDIO interrupts */ |
1421 | greth->mdio_int_en = (tmp >> 26) & 1; |
1422 | |
1423 | err = greth_mdio_init(greth); |
1424 | if (err) { |
1425 | if (netif_msg_probe(greth)) |
1426 | dev_err(greth->dev, "failed to register MDIO bus\n" ); |
1427 | goto error2; |
1428 | } |
1429 | |
1430 | /* Allocate TX descriptor ring in coherent memory */ |
1431 | greth->tx_bd_base = dma_alloc_coherent(dev: greth->dev, size: 1024, |
1432 | dma_handle: &greth->tx_bd_base_phys, |
1433 | GFP_KERNEL); |
1434 | if (!greth->tx_bd_base) { |
1435 | err = -ENOMEM; |
1436 | goto error3; |
1437 | } |
1438 | |
1439 | /* Allocate RX descriptor ring in coherent memory */ |
1440 | greth->rx_bd_base = dma_alloc_coherent(dev: greth->dev, size: 1024, |
1441 | dma_handle: &greth->rx_bd_base_phys, |
1442 | GFP_KERNEL); |
1443 | if (!greth->rx_bd_base) { |
1444 | err = -ENOMEM; |
1445 | goto error4; |
1446 | } |
1447 | |
1448 | /* Get MAC address from: module param, OF property or ID prom */ |
1449 | for (i = 0; i < 6; i++) { |
1450 | if (macaddr[i] != 0) |
1451 | break; |
1452 | } |
1453 | if (i == 6) { |
1454 | err = of_get_mac_address(np: ofdev->dev.of_node, mac: addr); |
1455 | if (!err) { |
1456 | for (i = 0; i < 6; i++) |
1457 | macaddr[i] = (unsigned int) addr[i]; |
1458 | } else { |
1459 | #ifdef CONFIG_SPARC |
1460 | for (i = 0; i < 6; i++) |
1461 | macaddr[i] = (unsigned int) idprom->id_ethaddr[i]; |
1462 | #endif |
1463 | } |
1464 | } |
1465 | |
1466 | for (i = 0; i < 6; i++) |
1467 | addr[i] = macaddr[i]; |
1468 | eth_hw_addr_set(dev, addr); |
1469 | |
1470 | macaddr[5]++; |
1471 | |
1472 | if (!is_valid_ether_addr(addr: &dev->dev_addr[0])) { |
1473 | if (netif_msg_probe(greth)) |
1474 | dev_err(greth->dev, "no valid ethernet address, aborting.\n" ); |
1475 | err = -EINVAL; |
1476 | goto error5; |
1477 | } |
1478 | |
1479 | GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]); |
1480 | GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 | |
1481 | dev->dev_addr[4] << 8 | dev->dev_addr[5]); |
1482 | |
1483 | /* Clear all pending interrupts except PHY irq */ |
1484 | GRETH_REGSAVE(regs->status, 0xFF); |
1485 | |
1486 | if (greth->gbit_mac) { |
1487 | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | |
1488 | NETIF_F_RXCSUM; |
1489 | dev->features = dev->hw_features | NETIF_F_HIGHDMA; |
1490 | greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit; |
1491 | } |
1492 | |
1493 | if (greth->multicast) { |
1494 | greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list; |
1495 | dev->flags |= IFF_MULTICAST; |
1496 | } else { |
1497 | dev->flags &= ~IFF_MULTICAST; |
1498 | } |
1499 | |
1500 | dev->netdev_ops = &greth_netdev_ops; |
1501 | dev->ethtool_ops = &greth_ethtool_ops; |
1502 | |
1503 | err = register_netdev(dev); |
1504 | if (err) { |
1505 | if (netif_msg_probe(greth)) |
1506 | dev_err(greth->dev, "netdevice registration failed.\n" ); |
1507 | goto error5; |
1508 | } |
1509 | |
1510 | /* setup NAPI */ |
1511 | netif_napi_add(dev, napi: &greth->napi, poll: greth_poll); |
1512 | |
1513 | return 0; |
1514 | |
1515 | error5: |
1516 | dma_free_coherent(dev: greth->dev, size: 1024, cpu_addr: greth->rx_bd_base, dma_handle: greth->rx_bd_base_phys); |
1517 | error4: |
1518 | dma_free_coherent(dev: greth->dev, size: 1024, cpu_addr: greth->tx_bd_base, dma_handle: greth->tx_bd_base_phys); |
1519 | error3: |
1520 | mdiobus_unregister(bus: greth->mdio); |
1521 | error2: |
1522 | of_iounmap(&ofdev->resource[0], greth->regs, resource_size(res: &ofdev->resource[0])); |
1523 | error1: |
1524 | free_netdev(dev); |
1525 | return err; |
1526 | } |
1527 | |
1528 | static void greth_of_remove(struct platform_device *of_dev) |
1529 | { |
1530 | struct net_device *ndev = platform_get_drvdata(pdev: of_dev); |
1531 | struct greth_private *greth = netdev_priv(dev: ndev); |
1532 | |
1533 | /* Free descriptor areas */ |
1534 | dma_free_coherent(dev: &of_dev->dev, size: 1024, cpu_addr: greth->rx_bd_base, dma_handle: greth->rx_bd_base_phys); |
1535 | |
1536 | dma_free_coherent(dev: &of_dev->dev, size: 1024, cpu_addr: greth->tx_bd_base, dma_handle: greth->tx_bd_base_phys); |
1537 | |
1538 | if (ndev->phydev) |
1539 | phy_stop(phydev: ndev->phydev); |
1540 | mdiobus_unregister(bus: greth->mdio); |
1541 | |
1542 | unregister_netdev(dev: ndev); |
1543 | |
1544 | of_iounmap(&of_dev->resource[0], greth->regs, resource_size(res: &of_dev->resource[0])); |
1545 | |
1546 | free_netdev(dev: ndev); |
1547 | } |
1548 | |
1549 | static const struct of_device_id greth_of_match[] = { |
1550 | { |
1551 | .name = "GAISLER_ETHMAC" , |
1552 | }, |
1553 | { |
1554 | .name = "01_01d" , |
1555 | }, |
1556 | {}, |
1557 | }; |
1558 | |
1559 | MODULE_DEVICE_TABLE(of, greth_of_match); |
1560 | |
1561 | static struct platform_driver greth_of_driver = { |
1562 | .driver = { |
1563 | .name = "grlib-greth" , |
1564 | .of_match_table = greth_of_match, |
1565 | }, |
1566 | .probe = greth_of_probe, |
1567 | .remove_new = greth_of_remove, |
1568 | }; |
1569 | |
1570 | module_platform_driver(greth_of_driver); |
1571 | |
1572 | MODULE_AUTHOR("Aeroflex Gaisler AB." ); |
1573 | MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver" ); |
1574 | MODULE_LICENSE("GPL" ); |
1575 | |