1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) |
4 | * |
5 | * Driver for the ARC EMAC 10100 (hardware revision 5) |
6 | * |
7 | * Contributors: |
8 | * Amit Bhor |
9 | * Sameer Dhavale |
10 | * Vineet Gupta |
11 | */ |
12 | |
13 | #include <linux/crc32.h> |
14 | #include <linux/etherdevice.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/io.h> |
17 | #include <linux/module.h> |
18 | #include <linux/of.h> |
19 | #include <linux/of_address.h> |
20 | #include <linux/of_irq.h> |
21 | #include <linux/of_mdio.h> |
22 | #include <linux/of_net.h> |
23 | |
24 | #include "emac.h" |
25 | |
26 | static void arc_emac_restart(struct net_device *ndev); |
27 | |
28 | /** |
29 | * arc_emac_tx_avail - Return the number of available slots in the tx ring. |
30 | * @priv: Pointer to ARC EMAC private data structure. |
31 | * |
32 | * returns: the number of slots available for transmission in tx the ring. |
33 | */ |
34 | static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) |
35 | { |
36 | return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; |
37 | } |
38 | |
39 | /** |
40 | * arc_emac_adjust_link - Adjust the PHY link duplex. |
41 | * @ndev: Pointer to the net_device structure. |
42 | * |
43 | * This function is called to change the duplex setting after auto negotiation |
44 | * is done by the PHY. |
45 | */ |
46 | static void arc_emac_adjust_link(struct net_device *ndev) |
47 | { |
48 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
49 | struct phy_device *phy_dev = ndev->phydev; |
50 | unsigned int reg, state_changed = 0; |
51 | |
52 | if (priv->link != phy_dev->link) { |
53 | priv->link = phy_dev->link; |
54 | state_changed = 1; |
55 | } |
56 | |
57 | if (priv->speed != phy_dev->speed) { |
58 | priv->speed = phy_dev->speed; |
59 | state_changed = 1; |
60 | if (priv->set_mac_speed) |
61 | priv->set_mac_speed(priv, priv->speed); |
62 | } |
63 | |
64 | if (priv->duplex != phy_dev->duplex) { |
65 | reg = arc_reg_get(priv, reg: R_CTRL); |
66 | |
67 | if (phy_dev->duplex == DUPLEX_FULL) |
68 | reg |= ENFL_MASK; |
69 | else |
70 | reg &= ~ENFL_MASK; |
71 | |
72 | arc_reg_set(priv, reg: R_CTRL, value: reg); |
73 | priv->duplex = phy_dev->duplex; |
74 | state_changed = 1; |
75 | } |
76 | |
77 | if (state_changed) |
78 | phy_print_status(phydev: phy_dev); |
79 | } |
80 | |
81 | /** |
82 | * arc_emac_get_drvinfo - Get EMAC driver information. |
83 | * @ndev: Pointer to net_device structure. |
84 | * @info: Pointer to ethtool_drvinfo structure. |
85 | * |
86 | * This implements ethtool command for getting the driver information. |
87 | * Issue "ethtool -i ethX" under linux prompt to execute this function. |
88 | */ |
89 | static void arc_emac_get_drvinfo(struct net_device *ndev, |
90 | struct ethtool_drvinfo *info) |
91 | { |
92 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
93 | |
94 | strscpy(p: info->driver, q: priv->drv_name, size: sizeof(info->driver)); |
95 | } |
96 | |
97 | static const struct ethtool_ops arc_emac_ethtool_ops = { |
98 | .get_drvinfo = arc_emac_get_drvinfo, |
99 | .get_link = ethtool_op_get_link, |
100 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
101 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
102 | }; |
103 | |
104 | #define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) |
105 | |
106 | /** |
107 | * arc_emac_tx_clean - clears processed by EMAC Tx BDs. |
108 | * @ndev: Pointer to the network device. |
109 | */ |
110 | static void arc_emac_tx_clean(struct net_device *ndev) |
111 | { |
112 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
113 | struct net_device_stats *stats = &ndev->stats; |
114 | unsigned int i; |
115 | |
116 | for (i = 0; i < TX_BD_NUM; i++) { |
117 | unsigned int *txbd_dirty = &priv->txbd_dirty; |
118 | struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty]; |
119 | struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty]; |
120 | struct sk_buff *skb = tx_buff->skb; |
121 | unsigned int info = le32_to_cpu(txbd->info); |
122 | |
123 | if ((info & FOR_EMAC) || !txbd->data || !skb) |
124 | break; |
125 | |
126 | if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { |
127 | stats->tx_errors++; |
128 | stats->tx_dropped++; |
129 | |
130 | if (info & DEFR) |
131 | stats->tx_carrier_errors++; |
132 | |
133 | if (info & LTCL) |
134 | stats->collisions++; |
135 | |
136 | if (info & UFLO) |
137 | stats->tx_fifo_errors++; |
138 | } else if (likely(info & FIRST_OR_LAST_MASK)) { |
139 | stats->tx_packets++; |
140 | stats->tx_bytes += skb->len; |
141 | } |
142 | |
143 | dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), |
144 | dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); |
145 | |
146 | /* return the sk_buff to system */ |
147 | dev_consume_skb_irq(skb); |
148 | |
149 | txbd->data = 0; |
150 | txbd->info = 0; |
151 | tx_buff->skb = NULL; |
152 | |
153 | *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; |
154 | } |
155 | |
156 | /* Ensure that txbd_dirty is visible to tx() before checking |
157 | * for queue stopped. |
158 | */ |
159 | smp_mb(); |
160 | |
161 | if (netif_queue_stopped(dev: ndev) && arc_emac_tx_avail(priv)) |
162 | netif_wake_queue(dev: ndev); |
163 | } |
164 | |
165 | /** |
166 | * arc_emac_rx - processing of Rx packets. |
167 | * @ndev: Pointer to the network device. |
168 | * @budget: How many BDs to process on 1 call. |
169 | * |
170 | * returns: Number of processed BDs |
171 | * |
172 | * Iterate through Rx BDs and deliver received packages to upper layer. |
173 | */ |
174 | static int arc_emac_rx(struct net_device *ndev, int budget) |
175 | { |
176 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
177 | unsigned int work_done; |
178 | |
179 | for (work_done = 0; work_done < budget; work_done++) { |
180 | unsigned int *last_rx_bd = &priv->last_rx_bd; |
181 | struct net_device_stats *stats = &ndev->stats; |
182 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; |
183 | struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; |
184 | unsigned int pktlen, info = le32_to_cpu(rxbd->info); |
185 | struct sk_buff *skb; |
186 | dma_addr_t addr; |
187 | |
188 | if (unlikely((info & OWN_MASK) == FOR_EMAC)) |
189 | break; |
190 | |
191 | /* Make a note that we saw a packet at this BD. |
192 | * So next time, driver starts from this + 1 |
193 | */ |
194 | *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; |
195 | |
196 | if (unlikely((info & FIRST_OR_LAST_MASK) != |
197 | FIRST_OR_LAST_MASK)) { |
198 | /* We pre-allocate buffers of MTU size so incoming |
199 | * packets won't be split/chained. |
200 | */ |
201 | if (net_ratelimit()) |
202 | netdev_err(dev: ndev, format: "incomplete packet received\n" ); |
203 | |
204 | /* Return ownership to EMAC */ |
205 | rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
206 | stats->rx_errors++; |
207 | stats->rx_length_errors++; |
208 | continue; |
209 | } |
210 | |
211 | /* Prepare the BD for next cycle. netif_receive_skb() |
212 | * only if new skb was allocated and mapped to avoid holes |
213 | * in the RX fifo. |
214 | */ |
215 | skb = netdev_alloc_skb_ip_align(dev: ndev, EMAC_BUFFER_SIZE); |
216 | if (unlikely(!skb)) { |
217 | if (net_ratelimit()) |
218 | netdev_err(dev: ndev, format: "cannot allocate skb\n" ); |
219 | /* Return ownership to EMAC */ |
220 | rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
221 | stats->rx_errors++; |
222 | stats->rx_dropped++; |
223 | continue; |
224 | } |
225 | |
226 | addr = dma_map_single(&ndev->dev, (void *)skb->data, |
227 | EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); |
228 | if (dma_mapping_error(dev: &ndev->dev, dma_addr: addr)) { |
229 | if (net_ratelimit()) |
230 | netdev_err(dev: ndev, format: "cannot map dma buffer\n" ); |
231 | dev_kfree_skb(skb); |
232 | /* Return ownership to EMAC */ |
233 | rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
234 | stats->rx_errors++; |
235 | stats->rx_dropped++; |
236 | continue; |
237 | } |
238 | |
239 | /* unmap previosly mapped skb */ |
240 | dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), |
241 | dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); |
242 | |
243 | pktlen = info & LEN_MASK; |
244 | stats->rx_packets++; |
245 | stats->rx_bytes += pktlen; |
246 | skb_put(skb: rx_buff->skb, len: pktlen); |
247 | rx_buff->skb->dev = ndev; |
248 | rx_buff->skb->protocol = eth_type_trans(skb: rx_buff->skb, dev: ndev); |
249 | |
250 | netif_receive_skb(skb: rx_buff->skb); |
251 | |
252 | rx_buff->skb = skb; |
253 | dma_unmap_addr_set(rx_buff, addr, addr); |
254 | dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); |
255 | |
256 | rxbd->data = cpu_to_le32(addr); |
257 | |
258 | /* Make sure pointer to data buffer is set */ |
259 | wmb(); |
260 | |
261 | /* Return ownership to EMAC */ |
262 | rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
263 | } |
264 | |
265 | return work_done; |
266 | } |
267 | |
268 | /** |
269 | * arc_emac_rx_miss_handle - handle R_MISS register |
270 | * @ndev: Pointer to the net_device structure. |
271 | */ |
272 | static void arc_emac_rx_miss_handle(struct net_device *ndev) |
273 | { |
274 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
275 | struct net_device_stats *stats = &ndev->stats; |
276 | unsigned int miss; |
277 | |
278 | miss = arc_reg_get(priv, reg: R_MISS); |
279 | if (miss) { |
280 | stats->rx_errors += miss; |
281 | stats->rx_missed_errors += miss; |
282 | priv->rx_missed_errors += miss; |
283 | } |
284 | } |
285 | |
286 | /** |
287 | * arc_emac_rx_stall_check - check RX stall |
288 | * @ndev: Pointer to the net_device structure. |
289 | * @budget: How many BDs requested to process on 1 call. |
290 | * @work_done: How many BDs processed |
291 | * |
292 | * Under certain conditions EMAC stop reception of incoming packets and |
293 | * continuously increment R_MISS register instead of saving data into |
294 | * provided buffer. This function detect that condition and restart |
295 | * EMAC. |
296 | */ |
297 | static void arc_emac_rx_stall_check(struct net_device *ndev, |
298 | int budget, unsigned int work_done) |
299 | { |
300 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
301 | struct arc_emac_bd *rxbd; |
302 | |
303 | if (work_done) |
304 | priv->rx_missed_errors = 0; |
305 | |
306 | if (priv->rx_missed_errors && budget) { |
307 | rxbd = &priv->rxbd[priv->last_rx_bd]; |
308 | if (le32_to_cpu(rxbd->info) & FOR_EMAC) { |
309 | arc_emac_restart(ndev); |
310 | priv->rx_missed_errors = 0; |
311 | } |
312 | } |
313 | } |
314 | |
315 | /** |
316 | * arc_emac_poll - NAPI poll handler. |
317 | * @napi: Pointer to napi_struct structure. |
318 | * @budget: How many BDs to process on 1 call. |
319 | * |
320 | * returns: Number of processed BDs |
321 | */ |
322 | static int arc_emac_poll(struct napi_struct *napi, int budget) |
323 | { |
324 | struct net_device *ndev = napi->dev; |
325 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
326 | unsigned int work_done; |
327 | |
328 | arc_emac_tx_clean(ndev); |
329 | arc_emac_rx_miss_handle(ndev); |
330 | |
331 | work_done = arc_emac_rx(ndev, budget); |
332 | if (work_done < budget) { |
333 | napi_complete_done(n: napi, work_done); |
334 | arc_reg_or(priv, reg: R_ENABLE, RXINT_MASK | TXINT_MASK); |
335 | } |
336 | |
337 | arc_emac_rx_stall_check(ndev, budget, work_done); |
338 | |
339 | return work_done; |
340 | } |
341 | |
342 | /** |
343 | * arc_emac_intr - Global interrupt handler for EMAC. |
344 | * @irq: irq number. |
345 | * @dev_instance: device instance. |
346 | * |
347 | * returns: IRQ_HANDLED for all cases. |
348 | * |
349 | * ARC EMAC has only 1 interrupt line, and depending on bits raised in |
350 | * STATUS register we may tell what is a reason for interrupt to fire. |
351 | */ |
352 | static irqreturn_t arc_emac_intr(int irq, void *dev_instance) |
353 | { |
354 | struct net_device *ndev = dev_instance; |
355 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
356 | struct net_device_stats *stats = &ndev->stats; |
357 | unsigned int status; |
358 | |
359 | status = arc_reg_get(priv, reg: R_STATUS); |
360 | status &= ~MDIO_MASK; |
361 | |
362 | /* Reset all flags except "MDIO complete" */ |
363 | arc_reg_set(priv, reg: R_STATUS, value: status); |
364 | |
365 | if (status & (RXINT_MASK | TXINT_MASK)) { |
366 | if (likely(napi_schedule_prep(&priv->napi))) { |
367 | arc_reg_clr(priv, reg: R_ENABLE, RXINT_MASK | TXINT_MASK); |
368 | __napi_schedule(n: &priv->napi); |
369 | } |
370 | } |
371 | |
372 | if (status & ERR_MASK) { |
373 | /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding |
374 | * 8-bit error counter overrun. |
375 | */ |
376 | |
377 | if (status & MSER_MASK) { |
378 | stats->rx_missed_errors += 0x100; |
379 | stats->rx_errors += 0x100; |
380 | priv->rx_missed_errors += 0x100; |
381 | napi_schedule(n: &priv->napi); |
382 | } |
383 | |
384 | if (status & RXCR_MASK) { |
385 | stats->rx_crc_errors += 0x100; |
386 | stats->rx_errors += 0x100; |
387 | } |
388 | |
389 | if (status & RXFR_MASK) { |
390 | stats->rx_frame_errors += 0x100; |
391 | stats->rx_errors += 0x100; |
392 | } |
393 | |
394 | if (status & RXFL_MASK) { |
395 | stats->rx_over_errors += 0x100; |
396 | stats->rx_errors += 0x100; |
397 | } |
398 | } |
399 | |
400 | return IRQ_HANDLED; |
401 | } |
402 | |
403 | #ifdef CONFIG_NET_POLL_CONTROLLER |
404 | static void arc_emac_poll_controller(struct net_device *dev) |
405 | { |
406 | disable_irq(irq: dev->irq); |
407 | arc_emac_intr(irq: dev->irq, dev_instance: dev); |
408 | enable_irq(irq: dev->irq); |
409 | } |
410 | #endif |
411 | |
412 | /** |
413 | * arc_emac_open - Open the network device. |
414 | * @ndev: Pointer to the network device. |
415 | * |
416 | * returns: 0, on success or non-zero error value on failure. |
417 | * |
418 | * This function sets the MAC address, requests and enables an IRQ |
419 | * for the EMAC device and starts the Tx queue. |
420 | * It also connects to the phy device. |
421 | */ |
422 | static int arc_emac_open(struct net_device *ndev) |
423 | { |
424 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
425 | struct phy_device *phy_dev = ndev->phydev; |
426 | int i; |
427 | |
428 | phy_dev->autoneg = AUTONEG_ENABLE; |
429 | phy_dev->speed = 0; |
430 | phy_dev->duplex = 0; |
431 | linkmode_and(dst: phy_dev->advertising, a: phy_dev->advertising, |
432 | b: phy_dev->supported); |
433 | |
434 | priv->last_rx_bd = 0; |
435 | |
436 | /* Allocate and set buffers for Rx BD's */ |
437 | for (i = 0; i < RX_BD_NUM; i++) { |
438 | dma_addr_t addr; |
439 | unsigned int *last_rx_bd = &priv->last_rx_bd; |
440 | struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; |
441 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; |
442 | |
443 | rx_buff->skb = netdev_alloc_skb_ip_align(dev: ndev, |
444 | EMAC_BUFFER_SIZE); |
445 | if (unlikely(!rx_buff->skb)) |
446 | return -ENOMEM; |
447 | |
448 | addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, |
449 | EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); |
450 | if (dma_mapping_error(dev: &ndev->dev, dma_addr: addr)) { |
451 | netdev_err(dev: ndev, format: "cannot dma map\n" ); |
452 | dev_kfree_skb(rx_buff->skb); |
453 | return -ENOMEM; |
454 | } |
455 | dma_unmap_addr_set(rx_buff, addr, addr); |
456 | dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); |
457 | |
458 | rxbd->data = cpu_to_le32(addr); |
459 | |
460 | /* Make sure pointer to data buffer is set */ |
461 | wmb(); |
462 | |
463 | /* Return ownership to EMAC */ |
464 | rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
465 | |
466 | *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; |
467 | } |
468 | |
469 | priv->txbd_curr = 0; |
470 | priv->txbd_dirty = 0; |
471 | |
472 | /* Clean Tx BD's */ |
473 | memset(priv->txbd, 0, TX_RING_SZ); |
474 | |
475 | /* Initialize logical address filter */ |
476 | arc_reg_set(priv, reg: R_LAFL, value: 0); |
477 | arc_reg_set(priv, reg: R_LAFH, value: 0); |
478 | |
479 | /* Set BD ring pointers for device side */ |
480 | arc_reg_set(priv, reg: R_RX_RING, value: (unsigned int)priv->rxbd_dma); |
481 | arc_reg_set(priv, reg: R_TX_RING, value: (unsigned int)priv->txbd_dma); |
482 | |
483 | /* Enable interrupts */ |
484 | arc_reg_set(priv, reg: R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
485 | |
486 | /* Set CONTROL */ |
487 | arc_reg_set(priv, reg: R_CTRL, |
488 | value: (RX_BD_NUM << 24) | /* RX BD table length */ |
489 | (TX_BD_NUM << 16) | /* TX BD table length */ |
490 | TXRN_MASK | RXRN_MASK); |
491 | |
492 | napi_enable(n: &priv->napi); |
493 | |
494 | /* Enable EMAC */ |
495 | arc_reg_or(priv, reg: R_CTRL, EN_MASK); |
496 | |
497 | phy_start(phydev: ndev->phydev); |
498 | |
499 | netif_start_queue(dev: ndev); |
500 | |
501 | return 0; |
502 | } |
503 | |
504 | /** |
505 | * arc_emac_set_rx_mode - Change the receive filtering mode. |
506 | * @ndev: Pointer to the network device. |
507 | * |
508 | * This function enables/disables promiscuous or all-multicast mode |
509 | * and updates the multicast filtering list of the network device. |
510 | */ |
511 | static void arc_emac_set_rx_mode(struct net_device *ndev) |
512 | { |
513 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
514 | |
515 | if (ndev->flags & IFF_PROMISC) { |
516 | arc_reg_or(priv, reg: R_CTRL, PROM_MASK); |
517 | } else { |
518 | arc_reg_clr(priv, reg: R_CTRL, PROM_MASK); |
519 | |
520 | if (ndev->flags & IFF_ALLMULTI) { |
521 | arc_reg_set(priv, reg: R_LAFL, value: ~0); |
522 | arc_reg_set(priv, reg: R_LAFH, value: ~0); |
523 | } else if (ndev->flags & IFF_MULTICAST) { |
524 | struct netdev_hw_addr *ha; |
525 | unsigned int filter[2] = { 0, 0 }; |
526 | int bit; |
527 | |
528 | netdev_for_each_mc_addr(ha, ndev) { |
529 | bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; |
530 | filter[bit >> 5] |= 1 << (bit & 31); |
531 | } |
532 | |
533 | arc_reg_set(priv, reg: R_LAFL, value: filter[0]); |
534 | arc_reg_set(priv, reg: R_LAFH, value: filter[1]); |
535 | } else { |
536 | arc_reg_set(priv, reg: R_LAFL, value: 0); |
537 | arc_reg_set(priv, reg: R_LAFH, value: 0); |
538 | } |
539 | } |
540 | } |
541 | |
542 | /** |
543 | * arc_free_tx_queue - free skb from tx queue |
544 | * @ndev: Pointer to the network device. |
545 | * |
546 | * This function must be called while EMAC disable |
547 | */ |
548 | static void arc_free_tx_queue(struct net_device *ndev) |
549 | { |
550 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
551 | unsigned int i; |
552 | |
553 | for (i = 0; i < TX_BD_NUM; i++) { |
554 | struct arc_emac_bd *txbd = &priv->txbd[i]; |
555 | struct buffer_state *tx_buff = &priv->tx_buff[i]; |
556 | |
557 | if (tx_buff->skb) { |
558 | dma_unmap_single(&ndev->dev, |
559 | dma_unmap_addr(tx_buff, addr), |
560 | dma_unmap_len(tx_buff, len), |
561 | DMA_TO_DEVICE); |
562 | |
563 | /* return the sk_buff to system */ |
564 | dev_kfree_skb_irq(skb: tx_buff->skb); |
565 | } |
566 | |
567 | txbd->info = 0; |
568 | txbd->data = 0; |
569 | tx_buff->skb = NULL; |
570 | } |
571 | } |
572 | |
573 | /** |
574 | * arc_free_rx_queue - free skb from rx queue |
575 | * @ndev: Pointer to the network device. |
576 | * |
577 | * This function must be called while EMAC disable |
578 | */ |
579 | static void arc_free_rx_queue(struct net_device *ndev) |
580 | { |
581 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
582 | unsigned int i; |
583 | |
584 | for (i = 0; i < RX_BD_NUM; i++) { |
585 | struct arc_emac_bd *rxbd = &priv->rxbd[i]; |
586 | struct buffer_state *rx_buff = &priv->rx_buff[i]; |
587 | |
588 | if (rx_buff->skb) { |
589 | dma_unmap_single(&ndev->dev, |
590 | dma_unmap_addr(rx_buff, addr), |
591 | dma_unmap_len(rx_buff, len), |
592 | DMA_FROM_DEVICE); |
593 | |
594 | /* return the sk_buff to system */ |
595 | dev_kfree_skb_irq(skb: rx_buff->skb); |
596 | } |
597 | |
598 | rxbd->info = 0; |
599 | rxbd->data = 0; |
600 | rx_buff->skb = NULL; |
601 | } |
602 | } |
603 | |
604 | /** |
605 | * arc_emac_stop - Close the network device. |
606 | * @ndev: Pointer to the network device. |
607 | * |
608 | * This function stops the Tx queue, disables interrupts and frees the IRQ for |
609 | * the EMAC device. |
610 | * It also disconnects the PHY device associated with the EMAC device. |
611 | */ |
612 | static int arc_emac_stop(struct net_device *ndev) |
613 | { |
614 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
615 | |
616 | napi_disable(n: &priv->napi); |
617 | netif_stop_queue(dev: ndev); |
618 | |
619 | phy_stop(phydev: ndev->phydev); |
620 | |
621 | /* Disable interrupts */ |
622 | arc_reg_clr(priv, reg: R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
623 | |
624 | /* Disable EMAC */ |
625 | arc_reg_clr(priv, reg: R_CTRL, EN_MASK); |
626 | |
627 | /* Return the sk_buff to system */ |
628 | arc_free_tx_queue(ndev); |
629 | arc_free_rx_queue(ndev); |
630 | |
631 | return 0; |
632 | } |
633 | |
634 | /** |
635 | * arc_emac_stats - Get system network statistics. |
636 | * @ndev: Pointer to net_device structure. |
637 | * |
638 | * Returns the address of the device statistics structure. |
639 | * Statistics are updated in interrupt handler. |
640 | */ |
641 | static struct net_device_stats *arc_emac_stats(struct net_device *ndev) |
642 | { |
643 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
644 | struct net_device_stats *stats = &ndev->stats; |
645 | unsigned long miss, rxerr; |
646 | u8 rxcrc, rxfram, rxoflow; |
647 | |
648 | rxerr = arc_reg_get(priv, reg: R_RXERR); |
649 | miss = arc_reg_get(priv, reg: R_MISS); |
650 | |
651 | rxcrc = rxerr; |
652 | rxfram = rxerr >> 8; |
653 | rxoflow = rxerr >> 16; |
654 | |
655 | stats->rx_errors += miss; |
656 | stats->rx_errors += rxcrc + rxfram + rxoflow; |
657 | |
658 | stats->rx_over_errors += rxoflow; |
659 | stats->rx_frame_errors += rxfram; |
660 | stats->rx_crc_errors += rxcrc; |
661 | stats->rx_missed_errors += miss; |
662 | |
663 | return stats; |
664 | } |
665 | |
666 | /** |
667 | * arc_emac_tx - Starts the data transmission. |
668 | * @skb: sk_buff pointer that contains data to be Transmitted. |
669 | * @ndev: Pointer to net_device structure. |
670 | * |
671 | * returns: NETDEV_TX_OK, on success |
672 | * NETDEV_TX_BUSY, if any of the descriptors are not free. |
673 | * |
674 | * This function is invoked from upper layers to initiate transmission. |
675 | */ |
676 | static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) |
677 | { |
678 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
679 | unsigned int len, *txbd_curr = &priv->txbd_curr; |
680 | struct net_device_stats *stats = &ndev->stats; |
681 | __le32 *info = &priv->txbd[*txbd_curr].info; |
682 | dma_addr_t addr; |
683 | |
684 | if (skb_padto(skb, ETH_ZLEN)) |
685 | return NETDEV_TX_OK; |
686 | |
687 | len = max_t(unsigned int, ETH_ZLEN, skb->len); |
688 | |
689 | if (unlikely(!arc_emac_tx_avail(priv))) { |
690 | netif_stop_queue(dev: ndev); |
691 | netdev_err(dev: ndev, format: "BUG! Tx Ring full when queue awake!\n" ); |
692 | return NETDEV_TX_BUSY; |
693 | } |
694 | |
695 | addr = dma_map_single(&ndev->dev, (void *)skb->data, len, |
696 | DMA_TO_DEVICE); |
697 | |
698 | if (unlikely(dma_mapping_error(&ndev->dev, addr))) { |
699 | stats->tx_dropped++; |
700 | stats->tx_errors++; |
701 | dev_kfree_skb_any(skb); |
702 | return NETDEV_TX_OK; |
703 | } |
704 | dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); |
705 | dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); |
706 | |
707 | priv->txbd[*txbd_curr].data = cpu_to_le32(addr); |
708 | |
709 | /* Make sure pointer to data buffer is set */ |
710 | wmb(); |
711 | |
712 | skb_tx_timestamp(skb); |
713 | |
714 | *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); |
715 | |
716 | /* Make sure info word is set */ |
717 | wmb(); |
718 | |
719 | priv->tx_buff[*txbd_curr].skb = skb; |
720 | |
721 | /* Increment index to point to the next BD */ |
722 | *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; |
723 | |
724 | /* Ensure that tx_clean() sees the new txbd_curr before |
725 | * checking the queue status. This prevents an unneeded wake |
726 | * of the queue in tx_clean(). |
727 | */ |
728 | smp_mb(); |
729 | |
730 | if (!arc_emac_tx_avail(priv)) { |
731 | netif_stop_queue(dev: ndev); |
732 | /* Refresh tx_dirty */ |
733 | smp_mb(); |
734 | if (arc_emac_tx_avail(priv)) |
735 | netif_start_queue(dev: ndev); |
736 | } |
737 | |
738 | arc_reg_set(priv, reg: R_STATUS, TXPL_MASK); |
739 | |
740 | return NETDEV_TX_OK; |
741 | } |
742 | |
743 | static void arc_emac_set_address_internal(struct net_device *ndev) |
744 | { |
745 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
746 | unsigned int addr_low, addr_hi; |
747 | |
748 | addr_low = le32_to_cpu(*(__le32 *)&ndev->dev_addr[0]); |
749 | addr_hi = le16_to_cpu(*(__le16 *)&ndev->dev_addr[4]); |
750 | |
751 | arc_reg_set(priv, reg: R_ADDRL, value: addr_low); |
752 | arc_reg_set(priv, reg: R_ADDRH, value: addr_hi); |
753 | } |
754 | |
755 | /** |
756 | * arc_emac_set_address - Set the MAC address for this device. |
757 | * @ndev: Pointer to net_device structure. |
758 | * @p: 6 byte Address to be written as MAC address. |
759 | * |
760 | * This function copies the HW address from the sockaddr structure to the |
761 | * net_device structure and updates the address in HW. |
762 | * |
763 | * returns: -EBUSY if the net device is busy or 0 if the address is set |
764 | * successfully. |
765 | */ |
766 | static int arc_emac_set_address(struct net_device *ndev, void *p) |
767 | { |
768 | struct sockaddr *addr = p; |
769 | |
770 | if (netif_running(dev: ndev)) |
771 | return -EBUSY; |
772 | |
773 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
774 | return -EADDRNOTAVAIL; |
775 | |
776 | eth_hw_addr_set(dev: ndev, addr: addr->sa_data); |
777 | |
778 | arc_emac_set_address_internal(ndev); |
779 | |
780 | return 0; |
781 | } |
782 | |
783 | /** |
784 | * arc_emac_restart - Restart EMAC |
785 | * @ndev: Pointer to net_device structure. |
786 | * |
787 | * This function do hardware reset of EMAC in order to restore |
788 | * network packets reception. |
789 | */ |
790 | static void arc_emac_restart(struct net_device *ndev) |
791 | { |
792 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
793 | struct net_device_stats *stats = &ndev->stats; |
794 | int i; |
795 | |
796 | if (net_ratelimit()) |
797 | netdev_warn(dev: ndev, format: "restarting stalled EMAC\n" ); |
798 | |
799 | netif_stop_queue(dev: ndev); |
800 | |
801 | /* Disable interrupts */ |
802 | arc_reg_clr(priv, reg: R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
803 | |
804 | /* Disable EMAC */ |
805 | arc_reg_clr(priv, reg: R_CTRL, EN_MASK); |
806 | |
807 | /* Return the sk_buff to system */ |
808 | arc_free_tx_queue(ndev); |
809 | |
810 | /* Clean Tx BD's */ |
811 | priv->txbd_curr = 0; |
812 | priv->txbd_dirty = 0; |
813 | memset(priv->txbd, 0, TX_RING_SZ); |
814 | |
815 | for (i = 0; i < RX_BD_NUM; i++) { |
816 | struct arc_emac_bd *rxbd = &priv->rxbd[i]; |
817 | unsigned int info = le32_to_cpu(rxbd->info); |
818 | |
819 | if (!(info & FOR_EMAC)) { |
820 | stats->rx_errors++; |
821 | stats->rx_dropped++; |
822 | } |
823 | /* Return ownership to EMAC */ |
824 | rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); |
825 | } |
826 | priv->last_rx_bd = 0; |
827 | |
828 | /* Make sure info is visible to EMAC before enable */ |
829 | wmb(); |
830 | |
831 | /* Enable interrupts */ |
832 | arc_reg_set(priv, reg: R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); |
833 | |
834 | /* Enable EMAC */ |
835 | arc_reg_or(priv, reg: R_CTRL, EN_MASK); |
836 | |
837 | netif_start_queue(dev: ndev); |
838 | } |
839 | |
840 | static const struct net_device_ops arc_emac_netdev_ops = { |
841 | .ndo_open = arc_emac_open, |
842 | .ndo_stop = arc_emac_stop, |
843 | .ndo_start_xmit = arc_emac_tx, |
844 | .ndo_set_mac_address = arc_emac_set_address, |
845 | .ndo_get_stats = arc_emac_stats, |
846 | .ndo_set_rx_mode = arc_emac_set_rx_mode, |
847 | .ndo_eth_ioctl = phy_do_ioctl_running, |
848 | #ifdef CONFIG_NET_POLL_CONTROLLER |
849 | .ndo_poll_controller = arc_emac_poll_controller, |
850 | #endif |
851 | }; |
852 | |
853 | int arc_emac_probe(struct net_device *ndev, int interface) |
854 | { |
855 | struct device *dev = ndev->dev.parent; |
856 | struct resource res_regs; |
857 | struct device_node *phy_node; |
858 | struct phy_device *phydev = NULL; |
859 | struct arc_emac_priv *priv; |
860 | unsigned int id, clock_frequency, irq; |
861 | int err; |
862 | |
863 | /* Get PHY from device tree */ |
864 | phy_node = of_parse_phandle(np: dev->of_node, phandle_name: "phy" , index: 0); |
865 | if (!phy_node) { |
866 | dev_err(dev, "failed to retrieve phy description from device tree\n" ); |
867 | return -ENODEV; |
868 | } |
869 | |
870 | /* Get EMAC registers base address from device tree */ |
871 | err = of_address_to_resource(dev: dev->of_node, index: 0, r: &res_regs); |
872 | if (err) { |
873 | dev_err(dev, "failed to retrieve registers base from device tree\n" ); |
874 | err = -ENODEV; |
875 | goto out_put_node; |
876 | } |
877 | |
878 | /* Get IRQ from device tree */ |
879 | irq = irq_of_parse_and_map(node: dev->of_node, index: 0); |
880 | if (!irq) { |
881 | dev_err(dev, "failed to retrieve <irq> value from device tree\n" ); |
882 | err = -ENODEV; |
883 | goto out_put_node; |
884 | } |
885 | |
886 | ndev->netdev_ops = &arc_emac_netdev_ops; |
887 | ndev->ethtool_ops = &arc_emac_ethtool_ops; |
888 | ndev->watchdog_timeo = TX_TIMEOUT; |
889 | |
890 | priv = netdev_priv(dev: ndev); |
891 | priv->dev = dev; |
892 | |
893 | priv->regs = devm_ioremap_resource(dev, res: &res_regs); |
894 | if (IS_ERR(ptr: priv->regs)) { |
895 | err = PTR_ERR(ptr: priv->regs); |
896 | goto out_put_node; |
897 | } |
898 | |
899 | dev_dbg(dev, "Registers base address is 0x%p\n" , priv->regs); |
900 | |
901 | if (priv->clk) { |
902 | err = clk_prepare_enable(clk: priv->clk); |
903 | if (err) { |
904 | dev_err(dev, "failed to enable clock\n" ); |
905 | goto out_put_node; |
906 | } |
907 | |
908 | clock_frequency = clk_get_rate(clk: priv->clk); |
909 | } else { |
910 | /* Get CPU clock frequency from device tree */ |
911 | if (of_property_read_u32(np: dev->of_node, propname: "clock-frequency" , |
912 | out_value: &clock_frequency)) { |
913 | dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n" ); |
914 | err = -EINVAL; |
915 | goto out_put_node; |
916 | } |
917 | } |
918 | |
919 | id = arc_reg_get(priv, reg: R_ID); |
920 | |
921 | /* Check for EMAC revision 5 or 7, magic number */ |
922 | if (!(id == 0x0005fd02 || id == 0x0007fd02)) { |
923 | dev_err(dev, "ARC EMAC not detected, id=0x%x\n" , id); |
924 | err = -ENODEV; |
925 | goto out_clken; |
926 | } |
927 | dev_info(dev, "ARC EMAC detected with id: 0x%x\n" , id); |
928 | |
929 | /* Set poll rate so that it polls every 1 ms */ |
930 | arc_reg_set(priv, reg: R_POLLRATE, value: clock_frequency / 1000000); |
931 | |
932 | ndev->irq = irq; |
933 | dev_info(dev, "IRQ is %d\n" , ndev->irq); |
934 | |
935 | /* Register interrupt handler for device */ |
936 | err = devm_request_irq(dev, irq: ndev->irq, handler: arc_emac_intr, irqflags: 0, |
937 | devname: ndev->name, dev_id: ndev); |
938 | if (err) { |
939 | dev_err(dev, "could not allocate IRQ\n" ); |
940 | goto out_clken; |
941 | } |
942 | |
943 | /* Get MAC address from device tree */ |
944 | err = of_get_ethdev_address(np: dev->of_node, dev: ndev); |
945 | if (err) |
946 | eth_hw_addr_random(dev: ndev); |
947 | |
948 | arc_emac_set_address_internal(ndev); |
949 | dev_info(dev, "MAC address is now %pM\n" , ndev->dev_addr); |
950 | |
951 | /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ |
952 | priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ, |
953 | dma_handle: &priv->rxbd_dma, GFP_KERNEL); |
954 | |
955 | if (!priv->rxbd) { |
956 | dev_err(dev, "failed to allocate data buffers\n" ); |
957 | err = -ENOMEM; |
958 | goto out_clken; |
959 | } |
960 | |
961 | priv->txbd = priv->rxbd + RX_BD_NUM; |
962 | |
963 | priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; |
964 | dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n" , |
965 | (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); |
966 | |
967 | err = arc_mdio_probe(priv); |
968 | if (err) { |
969 | dev_err(dev, "failed to probe MII bus\n" ); |
970 | goto out_clken; |
971 | } |
972 | |
973 | phydev = of_phy_connect(dev: ndev, phy_np: phy_node, hndlr: arc_emac_adjust_link, flags: 0, |
974 | iface: interface); |
975 | if (!phydev) { |
976 | dev_err(dev, "of_phy_connect() failed\n" ); |
977 | err = -ENODEV; |
978 | goto out_mdio; |
979 | } |
980 | |
981 | dev_info(dev, "connected to %s phy with id 0x%x\n" , |
982 | phydev->drv->name, phydev->phy_id); |
983 | |
984 | netif_napi_add_weight(dev: ndev, napi: &priv->napi, poll: arc_emac_poll, |
985 | ARC_EMAC_NAPI_WEIGHT); |
986 | |
987 | err = register_netdev(dev: ndev); |
988 | if (err) { |
989 | dev_err(dev, "failed to register network device\n" ); |
990 | goto out_netif_api; |
991 | } |
992 | |
993 | of_node_put(node: phy_node); |
994 | return 0; |
995 | |
996 | out_netif_api: |
997 | netif_napi_del(napi: &priv->napi); |
998 | phy_disconnect(phydev); |
999 | out_mdio: |
1000 | arc_mdio_remove(priv); |
1001 | out_clken: |
1002 | if (priv->clk) |
1003 | clk_disable_unprepare(clk: priv->clk); |
1004 | out_put_node: |
1005 | of_node_put(node: phy_node); |
1006 | |
1007 | return err; |
1008 | } |
1009 | EXPORT_SYMBOL_GPL(arc_emac_probe); |
1010 | |
1011 | void arc_emac_remove(struct net_device *ndev) |
1012 | { |
1013 | struct arc_emac_priv *priv = netdev_priv(dev: ndev); |
1014 | |
1015 | phy_disconnect(phydev: ndev->phydev); |
1016 | arc_mdio_remove(priv); |
1017 | unregister_netdev(dev: ndev); |
1018 | netif_napi_del(napi: &priv->napi); |
1019 | |
1020 | if (!IS_ERR(ptr: priv->clk)) |
1021 | clk_disable_unprepare(clk: priv->clk); |
1022 | } |
1023 | EXPORT_SYMBOL_GPL(arc_emac_remove); |
1024 | |
1025 | MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>" ); |
1026 | MODULE_DESCRIPTION("ARC EMAC driver" ); |
1027 | MODULE_LICENSE("GPL" ); |
1028 | |