1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Altera Triple-Speed Ethernet MAC driver |
3 | * Copyright (C) 2008-2014 Altera Corporation. All rights reserved |
4 | * |
5 | * Contributors: |
6 | * Dalon Westergreen |
7 | * Thomas Chou |
8 | * Ian Abbott |
9 | * Yuriy Kozlov |
10 | * Tobias Klauser |
11 | * Andriy Smolskyy |
12 | * Roman Bulgakov |
13 | * Dmytro Mytarchuk |
14 | * Matthew Gerlach |
15 | * |
16 | * Original driver contributed by SLS. |
17 | * Major updates contributed by GlobalLogic |
18 | */ |
19 | |
20 | #include <linux/atomic.h> |
21 | #include <linux/delay.h> |
22 | #include <linux/etherdevice.h> |
23 | #include <linux/if_vlan.h> |
24 | #include <linux/init.h> |
25 | #include <linux/interrupt.h> |
26 | #include <linux/io.h> |
27 | #include <linux/kernel.h> |
28 | #include <linux/module.h> |
29 | #include <linux/mii.h> |
30 | #include <linux/mdio/mdio-regmap.h> |
31 | #include <linux/netdevice.h> |
32 | #include <linux/of.h> |
33 | #include <linux/of_mdio.h> |
34 | #include <linux/of_net.h> |
35 | #include <linux/pcs-lynx.h> |
36 | #include <linux/phy.h> |
37 | #include <linux/platform_device.h> |
38 | #include <linux/property.h> |
39 | #include <linux/regmap.h> |
40 | #include <linux/skbuff.h> |
41 | #include <asm/cacheflush.h> |
42 | |
43 | #include "altera_utils.h" |
44 | #include "altera_tse.h" |
45 | #include "altera_sgdma.h" |
46 | #include "altera_msgdma.h" |
47 | |
48 | static atomic_t instance_count = ATOMIC_INIT(~0); |
49 | /* Module parameters */ |
50 | static int debug = -1; |
51 | module_param(debug, int, 0644); |
52 | MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)" ); |
53 | |
54 | static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | |
55 | NETIF_MSG_LINK | NETIF_MSG_IFUP | |
56 | NETIF_MSG_IFDOWN); |
57 | |
58 | #define RX_DESCRIPTORS 64 |
59 | static int dma_rx_num = RX_DESCRIPTORS; |
60 | module_param(dma_rx_num, int, 0644); |
61 | MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list" ); |
62 | |
63 | #define TX_DESCRIPTORS 64 |
64 | static int dma_tx_num = TX_DESCRIPTORS; |
65 | module_param(dma_tx_num, int, 0644); |
66 | MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list" ); |
67 | |
68 | |
69 | #define POLL_PHY (-1) |
70 | |
71 | /* Make sure DMA buffer size is larger than the max frame size |
72 | * plus some alignment offset and a VLAN header. If the max frame size is |
73 | * 1518, a VLAN header would be additional 4 bytes and additional |
74 | * headroom for alignment is 2 bytes, 2048 is just fine. |
75 | */ |
76 | #define ALTERA_RXDMABUFFER_SIZE 2048 |
77 | |
78 | /* Allow network stack to resume queuing packets after we've |
79 | * finished transmitting at least 1/4 of the packets in the queue. |
80 | */ |
81 | #define TSE_TX_THRESH(x) (x->tx_ring_size / 4) |
82 | |
83 | #define TXQUEUESTOP_THRESHHOLD 2 |
84 | |
85 | static inline u32 tse_tx_avail(struct altera_tse_private *priv) |
86 | { |
87 | return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1; |
88 | } |
89 | |
90 | /* MDIO specific functions |
91 | */ |
92 | static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
93 | { |
94 | struct net_device *ndev = bus->priv; |
95 | struct altera_tse_private *priv = netdev_priv(dev: ndev); |
96 | |
97 | /* set MDIO address */ |
98 | csrwr32(val: (mii_id & 0x1f), mac: priv->mac_dev, |
99 | tse_csroffs(mdio_phy1_addr)); |
100 | |
101 | /* get the data */ |
102 | return csrrd32(mac: priv->mac_dev, |
103 | tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff; |
104 | } |
105 | |
106 | static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
107 | u16 value) |
108 | { |
109 | struct net_device *ndev = bus->priv; |
110 | struct altera_tse_private *priv = netdev_priv(dev: ndev); |
111 | |
112 | /* set MDIO address */ |
113 | csrwr32(val: (mii_id & 0x1f), mac: priv->mac_dev, |
114 | tse_csroffs(mdio_phy1_addr)); |
115 | |
116 | /* write the data */ |
117 | csrwr32(val: value, mac: priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4); |
118 | return 0; |
119 | } |
120 | |
121 | static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) |
122 | { |
123 | struct altera_tse_private *priv = netdev_priv(dev); |
124 | struct device_node *mdio_node = NULL; |
125 | struct device_node *child_node = NULL; |
126 | struct mii_bus *mdio = NULL; |
127 | int ret; |
128 | |
129 | for_each_child_of_node(priv->device->of_node, child_node) { |
130 | if (of_device_is_compatible(device: child_node, "altr,tse-mdio" )) { |
131 | mdio_node = child_node; |
132 | break; |
133 | } |
134 | } |
135 | |
136 | if (mdio_node) { |
137 | netdev_dbg(dev, "FOUND MDIO subnode\n" ); |
138 | } else { |
139 | netdev_dbg(dev, "NO MDIO subnode\n" ); |
140 | return 0; |
141 | } |
142 | |
143 | mdio = mdiobus_alloc(); |
144 | if (mdio == NULL) { |
145 | netdev_err(dev, format: "Error allocating MDIO bus\n" ); |
146 | ret = -ENOMEM; |
147 | goto put_node; |
148 | } |
149 | |
150 | mdio->name = ALTERA_TSE_RESOURCE_NAME; |
151 | mdio->read = &altera_tse_mdio_read; |
152 | mdio->write = &altera_tse_mdio_write; |
153 | snprintf(buf: mdio->id, MII_BUS_ID_SIZE, fmt: "%s-%u" , mdio->name, id); |
154 | |
155 | mdio->priv = dev; |
156 | mdio->parent = priv->device; |
157 | |
158 | ret = of_mdiobus_register(mdio, np: mdio_node); |
159 | if (ret != 0) { |
160 | netdev_err(dev, format: "Cannot register MDIO bus %s\n" , |
161 | mdio->id); |
162 | goto out_free_mdio; |
163 | } |
164 | of_node_put(node: mdio_node); |
165 | |
166 | if (netif_msg_drv(priv)) |
167 | netdev_info(dev, format: "MDIO bus %s: created\n" , mdio->id); |
168 | |
169 | priv->mdio = mdio; |
170 | return 0; |
171 | out_free_mdio: |
172 | mdiobus_free(bus: mdio); |
173 | mdio = NULL; |
174 | put_node: |
175 | of_node_put(node: mdio_node); |
176 | return ret; |
177 | } |
178 | |
179 | static void altera_tse_mdio_destroy(struct net_device *dev) |
180 | { |
181 | struct altera_tse_private *priv = netdev_priv(dev); |
182 | |
183 | if (priv->mdio == NULL) |
184 | return; |
185 | |
186 | if (netif_msg_drv(priv)) |
187 | netdev_info(dev, format: "MDIO bus %s: removed\n" , |
188 | priv->mdio->id); |
189 | |
190 | mdiobus_unregister(bus: priv->mdio); |
191 | mdiobus_free(bus: priv->mdio); |
192 | priv->mdio = NULL; |
193 | } |
194 | |
195 | static int tse_init_rx_buffer(struct altera_tse_private *priv, |
196 | struct tse_buffer *rxbuffer, int len) |
197 | { |
198 | rxbuffer->skb = netdev_alloc_skb_ip_align(dev: priv->dev, length: len); |
199 | if (!rxbuffer->skb) |
200 | return -ENOMEM; |
201 | |
202 | rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data, |
203 | len, |
204 | DMA_FROM_DEVICE); |
205 | |
206 | if (dma_mapping_error(dev: priv->device, dma_addr: rxbuffer->dma_addr)) { |
207 | netdev_err(dev: priv->dev, format: "%s: DMA mapping error\n" , __func__); |
208 | dev_kfree_skb_any(skb: rxbuffer->skb); |
209 | return -EINVAL; |
210 | } |
211 | rxbuffer->dma_addr &= (dma_addr_t)~3; |
212 | rxbuffer->len = len; |
213 | return 0; |
214 | } |
215 | |
216 | static void tse_free_rx_buffer(struct altera_tse_private *priv, |
217 | struct tse_buffer *rxbuffer) |
218 | { |
219 | dma_addr_t dma_addr = rxbuffer->dma_addr; |
220 | struct sk_buff *skb = rxbuffer->skb; |
221 | |
222 | if (skb != NULL) { |
223 | if (dma_addr) |
224 | dma_unmap_single(priv->device, dma_addr, |
225 | rxbuffer->len, |
226 | DMA_FROM_DEVICE); |
227 | dev_kfree_skb_any(skb); |
228 | rxbuffer->skb = NULL; |
229 | rxbuffer->dma_addr = 0; |
230 | } |
231 | } |
232 | |
233 | /* Unmap and free Tx buffer resources |
234 | */ |
235 | static void tse_free_tx_buffer(struct altera_tse_private *priv, |
236 | struct tse_buffer *buffer) |
237 | { |
238 | if (buffer->dma_addr) { |
239 | if (buffer->mapped_as_page) |
240 | dma_unmap_page(priv->device, buffer->dma_addr, |
241 | buffer->len, DMA_TO_DEVICE); |
242 | else |
243 | dma_unmap_single(priv->device, buffer->dma_addr, |
244 | buffer->len, DMA_TO_DEVICE); |
245 | buffer->dma_addr = 0; |
246 | } |
247 | if (buffer->skb) { |
248 | dev_kfree_skb_any(skb: buffer->skb); |
249 | buffer->skb = NULL; |
250 | } |
251 | } |
252 | |
253 | static int alloc_init_skbufs(struct altera_tse_private *priv) |
254 | { |
255 | unsigned int rx_descs = priv->rx_ring_size; |
256 | unsigned int tx_descs = priv->tx_ring_size; |
257 | int ret = -ENOMEM; |
258 | int i; |
259 | |
260 | /* Create Rx ring buffer */ |
261 | priv->rx_ring = kcalloc(n: rx_descs, size: sizeof(struct tse_buffer), |
262 | GFP_KERNEL); |
263 | if (!priv->rx_ring) |
264 | goto err_rx_ring; |
265 | |
266 | /* Create Tx ring buffer */ |
267 | priv->tx_ring = kcalloc(n: tx_descs, size: sizeof(struct tse_buffer), |
268 | GFP_KERNEL); |
269 | if (!priv->tx_ring) |
270 | goto err_tx_ring; |
271 | |
272 | priv->tx_cons = 0; |
273 | priv->tx_prod = 0; |
274 | |
275 | /* Init Rx ring */ |
276 | for (i = 0; i < rx_descs; i++) { |
277 | ret = tse_init_rx_buffer(priv, rxbuffer: &priv->rx_ring[i], |
278 | len: priv->rx_dma_buf_sz); |
279 | if (ret) |
280 | goto err_init_rx_buffers; |
281 | } |
282 | |
283 | priv->rx_cons = 0; |
284 | priv->rx_prod = 0; |
285 | |
286 | return 0; |
287 | err_init_rx_buffers: |
288 | while (--i >= 0) |
289 | tse_free_rx_buffer(priv, rxbuffer: &priv->rx_ring[i]); |
290 | kfree(objp: priv->tx_ring); |
291 | err_tx_ring: |
292 | kfree(objp: priv->rx_ring); |
293 | err_rx_ring: |
294 | return ret; |
295 | } |
296 | |
297 | static void free_skbufs(struct net_device *dev) |
298 | { |
299 | struct altera_tse_private *priv = netdev_priv(dev); |
300 | unsigned int rx_descs = priv->rx_ring_size; |
301 | unsigned int tx_descs = priv->tx_ring_size; |
302 | int i; |
303 | |
304 | /* Release the DMA TX/RX socket buffers */ |
305 | for (i = 0; i < rx_descs; i++) |
306 | tse_free_rx_buffer(priv, rxbuffer: &priv->rx_ring[i]); |
307 | for (i = 0; i < tx_descs; i++) |
308 | tse_free_tx_buffer(priv, buffer: &priv->tx_ring[i]); |
309 | |
310 | |
311 | kfree(objp: priv->tx_ring); |
312 | } |
313 | |
314 | /* Reallocate the skb for the reception process |
315 | */ |
316 | static inline void tse_rx_refill(struct altera_tse_private *priv) |
317 | { |
318 | unsigned int rxsize = priv->rx_ring_size; |
319 | unsigned int entry; |
320 | int ret; |
321 | |
322 | for (; priv->rx_cons - priv->rx_prod > 0; |
323 | priv->rx_prod++) { |
324 | entry = priv->rx_prod % rxsize; |
325 | if (likely(priv->rx_ring[entry].skb == NULL)) { |
326 | ret = tse_init_rx_buffer(priv, rxbuffer: &priv->rx_ring[entry], |
327 | len: priv->rx_dma_buf_sz); |
328 | if (unlikely(ret != 0)) |
329 | break; |
330 | priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]); |
331 | } |
332 | } |
333 | } |
334 | |
335 | /* Pull out the VLAN tag and fix up the packet |
336 | */ |
337 | static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb) |
338 | { |
339 | struct ethhdr *eth_hdr; |
340 | u16 vid; |
341 | |
342 | if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
343 | !__vlan_get_tag(skb, vlan_tci: &vid)) { |
344 | eth_hdr = (struct ethhdr *)skb->data; |
345 | memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); |
346 | skb_pull(skb, VLAN_HLEN); |
347 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vid); |
348 | } |
349 | } |
350 | |
351 | /* Receive a packet: retrieve and pass over to upper levels |
352 | */ |
353 | static int tse_rx(struct altera_tse_private *priv, int limit) |
354 | { |
355 | unsigned int entry = priv->rx_cons % priv->rx_ring_size; |
356 | unsigned int next_entry; |
357 | unsigned int count = 0; |
358 | struct sk_buff *skb; |
359 | u32 rxstatus; |
360 | u16 pktlength; |
361 | u16 pktstatus; |
362 | |
363 | /* Check for count < limit first as get_rx_status is changing |
364 | * the response-fifo so we must process the next packet |
365 | * after calling get_rx_status if a response is pending. |
366 | * (reading the last byte of the response pops the value from the fifo.) |
367 | */ |
368 | while ((count < limit) && |
369 | ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) { |
370 | pktstatus = rxstatus >> 16; |
371 | pktlength = rxstatus & 0xffff; |
372 | |
373 | if ((pktstatus & 0xFF) || (pktlength == 0)) |
374 | netdev_err(dev: priv->dev, |
375 | format: "RCV pktstatus %08X pktlength %08X\n" , |
376 | pktstatus, pktlength); |
377 | |
378 | /* DMA transfer from TSE starts with 2 additional bytes for |
379 | * IP payload alignment. Status returned by get_rx_status() |
380 | * contains DMA transfer length. Packet is 2 bytes shorter. |
381 | */ |
382 | pktlength -= 2; |
383 | |
384 | count++; |
385 | next_entry = (++priv->rx_cons) % priv->rx_ring_size; |
386 | |
387 | skb = priv->rx_ring[entry].skb; |
388 | if (unlikely(!skb)) { |
389 | netdev_err(dev: priv->dev, |
390 | format: "%s: Inconsistent Rx descriptor chain\n" , |
391 | __func__); |
392 | priv->dev->stats.rx_dropped++; |
393 | break; |
394 | } |
395 | priv->rx_ring[entry].skb = NULL; |
396 | |
397 | skb_put(skb, len: pktlength); |
398 | |
399 | dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, |
400 | priv->rx_ring[entry].len, DMA_FROM_DEVICE); |
401 | |
402 | if (netif_msg_pktdata(priv)) { |
403 | netdev_info(dev: priv->dev, format: "frame received %d bytes\n" , |
404 | pktlength); |
405 | print_hex_dump(KERN_ERR, prefix_str: "data: " , prefix_type: DUMP_PREFIX_OFFSET, |
406 | rowsize: 16, groupsize: 1, buf: skb->data, len: pktlength, ascii: true); |
407 | } |
408 | |
409 | tse_rx_vlan(dev: priv->dev, skb); |
410 | |
411 | skb->protocol = eth_type_trans(skb, dev: priv->dev); |
412 | skb_checksum_none_assert(skb); |
413 | |
414 | napi_gro_receive(napi: &priv->napi, skb); |
415 | |
416 | priv->dev->stats.rx_packets++; |
417 | priv->dev->stats.rx_bytes += pktlength; |
418 | |
419 | entry = next_entry; |
420 | |
421 | tse_rx_refill(priv); |
422 | } |
423 | |
424 | return count; |
425 | } |
426 | |
427 | /* Reclaim resources after transmission completes |
428 | */ |
429 | static int tse_tx_complete(struct altera_tse_private *priv) |
430 | { |
431 | unsigned int txsize = priv->tx_ring_size; |
432 | struct tse_buffer *tx_buff; |
433 | unsigned int entry; |
434 | int txcomplete = 0; |
435 | u32 ready; |
436 | |
437 | spin_lock(lock: &priv->tx_lock); |
438 | |
439 | ready = priv->dmaops->tx_completions(priv); |
440 | |
441 | /* Free sent buffers */ |
442 | while (ready && (priv->tx_cons != priv->tx_prod)) { |
443 | entry = priv->tx_cons % txsize; |
444 | tx_buff = &priv->tx_ring[entry]; |
445 | |
446 | if (netif_msg_tx_done(priv)) |
447 | netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n" , |
448 | __func__, priv->tx_prod, priv->tx_cons); |
449 | |
450 | if (likely(tx_buff->skb)) |
451 | priv->dev->stats.tx_packets++; |
452 | |
453 | tse_free_tx_buffer(priv, buffer: tx_buff); |
454 | priv->tx_cons++; |
455 | |
456 | txcomplete++; |
457 | ready--; |
458 | } |
459 | |
460 | if (unlikely(netif_queue_stopped(priv->dev) && |
461 | tse_tx_avail(priv) > TSE_TX_THRESH(priv))) { |
462 | if (netif_queue_stopped(dev: priv->dev) && |
463 | tse_tx_avail(priv) > TSE_TX_THRESH(priv)) { |
464 | if (netif_msg_tx_done(priv)) |
465 | netdev_dbg(priv->dev, "%s: restart transmit\n" , |
466 | __func__); |
467 | netif_wake_queue(dev: priv->dev); |
468 | } |
469 | } |
470 | |
471 | spin_unlock(lock: &priv->tx_lock); |
472 | return txcomplete; |
473 | } |
474 | |
475 | /* NAPI polling function |
476 | */ |
477 | static int tse_poll(struct napi_struct *napi, int budget) |
478 | { |
479 | struct altera_tse_private *priv = |
480 | container_of(napi, struct altera_tse_private, napi); |
481 | unsigned long int flags; |
482 | int rxcomplete = 0; |
483 | |
484 | tse_tx_complete(priv); |
485 | |
486 | rxcomplete = tse_rx(priv, limit: budget); |
487 | |
488 | if (rxcomplete < budget) { |
489 | |
490 | napi_complete_done(n: napi, work_done: rxcomplete); |
491 | |
492 | netdev_dbg(priv->dev, |
493 | "NAPI Complete, did %d packets with budget %d\n" , |
494 | rxcomplete, budget); |
495 | |
496 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); |
497 | priv->dmaops->enable_rxirq(priv); |
498 | priv->dmaops->enable_txirq(priv); |
499 | spin_unlock_irqrestore(lock: &priv->rxdma_irq_lock, flags); |
500 | } |
501 | return rxcomplete; |
502 | } |
503 | |
504 | /* DMA TX & RX FIFO interrupt routing |
505 | */ |
506 | static irqreturn_t altera_isr(int irq, void *dev_id) |
507 | { |
508 | struct net_device *dev = dev_id; |
509 | struct altera_tse_private *priv; |
510 | |
511 | if (unlikely(!dev)) { |
512 | pr_err("%s: invalid dev pointer\n" , __func__); |
513 | return IRQ_NONE; |
514 | } |
515 | priv = netdev_priv(dev); |
516 | |
517 | spin_lock(lock: &priv->rxdma_irq_lock); |
518 | /* reset IRQs */ |
519 | priv->dmaops->clear_rxirq(priv); |
520 | priv->dmaops->clear_txirq(priv); |
521 | spin_unlock(lock: &priv->rxdma_irq_lock); |
522 | |
523 | if (likely(napi_schedule_prep(&priv->napi))) { |
524 | spin_lock(lock: &priv->rxdma_irq_lock); |
525 | priv->dmaops->disable_rxirq(priv); |
526 | priv->dmaops->disable_txirq(priv); |
527 | spin_unlock(lock: &priv->rxdma_irq_lock); |
528 | __napi_schedule(n: &priv->napi); |
529 | } |
530 | |
531 | |
532 | return IRQ_HANDLED; |
533 | } |
534 | |
535 | /* Transmit a packet (called by the kernel). Dispatches |
536 | * either the SGDMA method for transmitting or the |
537 | * MSGDMA method, assumes no scatter/gather support, |
538 | * implying an assumption that there's only one |
539 | * physically contiguous fragment starting at |
540 | * skb->data, for length of skb_headlen(skb). |
541 | */ |
542 | static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev) |
543 | { |
544 | struct altera_tse_private *priv = netdev_priv(dev); |
545 | unsigned int nopaged_len = skb_headlen(skb); |
546 | unsigned int txsize = priv->tx_ring_size; |
547 | int nfrags = skb_shinfo(skb)->nr_frags; |
548 | struct tse_buffer *buffer = NULL; |
549 | netdev_tx_t ret = NETDEV_TX_OK; |
550 | dma_addr_t dma_addr; |
551 | unsigned int entry; |
552 | |
553 | spin_lock_bh(lock: &priv->tx_lock); |
554 | |
555 | if (unlikely(tse_tx_avail(priv) < nfrags + 1)) { |
556 | if (!netif_queue_stopped(dev)) { |
557 | netif_stop_queue(dev); |
558 | /* This is a hard error, log it. */ |
559 | netdev_err(dev: priv->dev, |
560 | format: "%s: Tx list full when queue awake\n" , |
561 | __func__); |
562 | } |
563 | ret = NETDEV_TX_BUSY; |
564 | goto out; |
565 | } |
566 | |
567 | /* Map the first skb fragment */ |
568 | entry = priv->tx_prod % txsize; |
569 | buffer = &priv->tx_ring[entry]; |
570 | |
571 | dma_addr = dma_map_single(priv->device, skb->data, nopaged_len, |
572 | DMA_TO_DEVICE); |
573 | if (dma_mapping_error(dev: priv->device, dma_addr)) { |
574 | netdev_err(dev: priv->dev, format: "%s: DMA mapping error\n" , __func__); |
575 | ret = NETDEV_TX_OK; |
576 | goto out; |
577 | } |
578 | |
579 | buffer->skb = skb; |
580 | buffer->dma_addr = dma_addr; |
581 | buffer->len = nopaged_len; |
582 | |
583 | priv->dmaops->tx_buffer(priv, buffer); |
584 | |
585 | skb_tx_timestamp(skb); |
586 | |
587 | priv->tx_prod++; |
588 | dev->stats.tx_bytes += skb->len; |
589 | |
590 | if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) { |
591 | if (netif_msg_hw(priv)) |
592 | netdev_dbg(priv->dev, "%s: stop transmitted packets\n" , |
593 | __func__); |
594 | netif_stop_queue(dev); |
595 | } |
596 | |
597 | out: |
598 | spin_unlock_bh(lock: &priv->tx_lock); |
599 | |
600 | return ret; |
601 | } |
602 | |
603 | static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev) |
604 | { |
605 | struct altera_tse_private *priv = netdev_priv(dev); |
606 | struct device_node *np = priv->device->of_node; |
607 | int ret; |
608 | |
609 | ret = of_get_phy_mode(np, interface: &priv->phy_iface); |
610 | |
611 | /* Avoid get phy addr and create mdio if no phy is present */ |
612 | if (ret) |
613 | return 0; |
614 | |
615 | /* try to get PHY address from device tree, use PHY autodetection if |
616 | * no valid address is given |
617 | */ |
618 | |
619 | if (of_property_read_u32(np: priv->device->of_node, propname: "phy-addr" , |
620 | out_value: &priv->phy_addr)) { |
621 | priv->phy_addr = POLL_PHY; |
622 | } |
623 | |
624 | if (!((priv->phy_addr == POLL_PHY) || |
625 | ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { |
626 | netdev_err(dev, format: "invalid phy-addr specified %d\n" , |
627 | priv->phy_addr); |
628 | return -ENODEV; |
629 | } |
630 | |
631 | /* Create/attach to MDIO bus */ |
632 | ret = altera_tse_mdio_create(dev, |
633 | id: atomic_add_return(i: 1, v: &instance_count)); |
634 | |
635 | if (ret) |
636 | return -ENODEV; |
637 | |
638 | return 0; |
639 | } |
640 | |
641 | static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr) |
642 | { |
643 | u32 msb; |
644 | u32 lsb; |
645 | |
646 | msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; |
647 | lsb = ((addr[5] << 8) | addr[4]) & 0xffff; |
648 | |
649 | /* Set primary MAC address */ |
650 | csrwr32(val: msb, mac: priv->mac_dev, tse_csroffs(mac_addr_0)); |
651 | csrwr32(val: lsb, mac: priv->mac_dev, tse_csroffs(mac_addr_1)); |
652 | } |
653 | |
654 | /* MAC software reset. |
655 | * When reset is triggered, the MAC function completes the current |
656 | * transmission or reception, and subsequently disables the transmit and |
657 | * receive logic, flushes the receive FIFO buffer, and resets the statistics |
658 | * counters. |
659 | */ |
660 | static int reset_mac(struct altera_tse_private *priv) |
661 | { |
662 | int counter; |
663 | u32 dat; |
664 | |
665 | dat = csrrd32(mac: priv->mac_dev, tse_csroffs(command_config)); |
666 | dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); |
667 | dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; |
668 | csrwr32(val: dat, mac: priv->mac_dev, tse_csroffs(command_config)); |
669 | |
670 | counter = 0; |
671 | while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { |
672 | if (tse_bit_is_clear(ioaddr: priv->mac_dev, tse_csroffs(command_config), |
673 | MAC_CMDCFG_SW_RESET)) |
674 | break; |
675 | udelay(1); |
676 | } |
677 | |
678 | if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { |
679 | dat = csrrd32(mac: priv->mac_dev, tse_csroffs(command_config)); |
680 | dat &= ~MAC_CMDCFG_SW_RESET; |
681 | csrwr32(val: dat, mac: priv->mac_dev, tse_csroffs(command_config)); |
682 | return -1; |
683 | } |
684 | return 0; |
685 | } |
686 | |
687 | /* Initialize MAC core registers |
688 | */ |
689 | static int init_mac(struct altera_tse_private *priv) |
690 | { |
691 | unsigned int cmd = 0; |
692 | u32 frm_length; |
693 | |
694 | /* Setup Rx FIFO */ |
695 | csrwr32(val: priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, |
696 | mac: priv->mac_dev, tse_csroffs(rx_section_empty)); |
697 | |
698 | csrwr32(ALTERA_TSE_RX_SECTION_FULL, mac: priv->mac_dev, |
699 | tse_csroffs(rx_section_full)); |
700 | |
701 | csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, mac: priv->mac_dev, |
702 | tse_csroffs(rx_almost_empty)); |
703 | |
704 | csrwr32(ALTERA_TSE_RX_ALMOST_FULL, mac: priv->mac_dev, |
705 | tse_csroffs(rx_almost_full)); |
706 | |
707 | /* Setup Tx FIFO */ |
708 | csrwr32(val: priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, |
709 | mac: priv->mac_dev, tse_csroffs(tx_section_empty)); |
710 | |
711 | csrwr32(ALTERA_TSE_TX_SECTION_FULL, mac: priv->mac_dev, |
712 | tse_csroffs(tx_section_full)); |
713 | |
714 | csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, mac: priv->mac_dev, |
715 | tse_csroffs(tx_almost_empty)); |
716 | |
717 | csrwr32(ALTERA_TSE_TX_ALMOST_FULL, mac: priv->mac_dev, |
718 | tse_csroffs(tx_almost_full)); |
719 | |
720 | /* MAC Address Configuration */ |
721 | tse_update_mac_addr(priv, addr: priv->dev->dev_addr); |
722 | |
723 | /* MAC Function Configuration */ |
724 | frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; |
725 | csrwr32(val: frm_length, mac: priv->mac_dev, tse_csroffs(frm_length)); |
726 | |
727 | csrwr32(ALTERA_TSE_TX_IPG_LENGTH, mac: priv->mac_dev, |
728 | tse_csroffs(tx_ipg_length)); |
729 | |
730 | /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit |
731 | * start address |
732 | */ |
733 | tse_set_bit(ioaddr: priv->mac_dev, tse_csroffs(rx_cmd_stat), |
734 | ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); |
735 | |
736 | tse_clear_bit(ioaddr: priv->mac_dev, tse_csroffs(tx_cmd_stat), |
737 | ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | |
738 | ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); |
739 | |
740 | /* Set the MAC options */ |
741 | cmd = csrrd32(mac: priv->mac_dev, tse_csroffs(command_config)); |
742 | cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ |
743 | cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ |
744 | cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames |
745 | * with CRC errors |
746 | */ |
747 | cmd |= MAC_CMDCFG_CNTL_FRM_ENA; |
748 | cmd &= ~MAC_CMDCFG_TX_ENA; |
749 | cmd &= ~MAC_CMDCFG_RX_ENA; |
750 | |
751 | /* Default speed and duplex setting, full/100 */ |
752 | cmd &= ~MAC_CMDCFG_HD_ENA; |
753 | cmd &= ~MAC_CMDCFG_ETH_SPEED; |
754 | cmd &= ~MAC_CMDCFG_ENA_10; |
755 | |
756 | csrwr32(val: cmd, mac: priv->mac_dev, tse_csroffs(command_config)); |
757 | |
758 | csrwr32(ALTERA_TSE_PAUSE_QUANTA, mac: priv->mac_dev, |
759 | tse_csroffs(pause_quanta)); |
760 | |
761 | if (netif_msg_hw(priv)) |
762 | dev_dbg(priv->device, |
763 | "MAC post-initialization: CMD_CONFIG = 0x%08x\n" , cmd); |
764 | |
765 | return 0; |
766 | } |
767 | |
768 | /* Start/stop MAC transmission logic |
769 | */ |
770 | static void tse_set_mac(struct altera_tse_private *priv, bool enable) |
771 | { |
772 | u32 value = csrrd32(mac: priv->mac_dev, tse_csroffs(command_config)); |
773 | |
774 | if (enable) |
775 | value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; |
776 | else |
777 | value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); |
778 | |
779 | csrwr32(val: value, mac: priv->mac_dev, tse_csroffs(command_config)); |
780 | } |
781 | |
782 | /* Change the MTU |
783 | */ |
784 | static int tse_change_mtu(struct net_device *dev, int new_mtu) |
785 | { |
786 | if (netif_running(dev)) { |
787 | netdev_err(dev, format: "must be stopped to change its MTU\n" ); |
788 | return -EBUSY; |
789 | } |
790 | |
791 | dev->mtu = new_mtu; |
792 | netdev_update_features(dev); |
793 | |
794 | return 0; |
795 | } |
796 | |
797 | static void altera_tse_set_mcfilter(struct net_device *dev) |
798 | { |
799 | struct altera_tse_private *priv = netdev_priv(dev); |
800 | struct netdev_hw_addr *ha; |
801 | int i; |
802 | |
803 | /* clear the hash filter */ |
804 | for (i = 0; i < 64; i++) |
805 | csrwr32(val: 0, mac: priv->mac_dev, tse_csroffs(hash_table) + i * 4); |
806 | |
807 | netdev_for_each_mc_addr(ha, dev) { |
808 | unsigned int hash = 0; |
809 | int mac_octet; |
810 | |
811 | for (mac_octet = 5; mac_octet >= 0; mac_octet--) { |
812 | unsigned char xor_bit = 0; |
813 | unsigned char octet = ha->addr[mac_octet]; |
814 | unsigned int bitshift; |
815 | |
816 | for (bitshift = 0; bitshift < 8; bitshift++) |
817 | xor_bit ^= ((octet >> bitshift) & 0x01); |
818 | |
819 | hash = (hash << 1) | xor_bit; |
820 | } |
821 | csrwr32(val: 1, mac: priv->mac_dev, tse_csroffs(hash_table) + hash * 4); |
822 | } |
823 | } |
824 | |
825 | |
826 | static void altera_tse_set_mcfilterall(struct net_device *dev) |
827 | { |
828 | struct altera_tse_private *priv = netdev_priv(dev); |
829 | int i; |
830 | |
831 | /* set the hash filter */ |
832 | for (i = 0; i < 64; i++) |
833 | csrwr32(val: 1, mac: priv->mac_dev, tse_csroffs(hash_table) + i * 4); |
834 | } |
835 | |
836 | /* Set or clear the multicast filter for this adapter |
837 | */ |
838 | static void tse_set_rx_mode_hashfilter(struct net_device *dev) |
839 | { |
840 | struct altera_tse_private *priv = netdev_priv(dev); |
841 | |
842 | spin_lock(lock: &priv->mac_cfg_lock); |
843 | |
844 | if (dev->flags & IFF_PROMISC) |
845 | tse_set_bit(ioaddr: priv->mac_dev, tse_csroffs(command_config), |
846 | MAC_CMDCFG_PROMIS_EN); |
847 | |
848 | if (dev->flags & IFF_ALLMULTI) |
849 | altera_tse_set_mcfilterall(dev); |
850 | else |
851 | altera_tse_set_mcfilter(dev); |
852 | |
853 | spin_unlock(lock: &priv->mac_cfg_lock); |
854 | } |
855 | |
856 | /* Set or clear the multicast filter for this adapter |
857 | */ |
858 | static void tse_set_rx_mode(struct net_device *dev) |
859 | { |
860 | struct altera_tse_private *priv = netdev_priv(dev); |
861 | |
862 | spin_lock(lock: &priv->mac_cfg_lock); |
863 | |
864 | if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || |
865 | !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) |
866 | tse_set_bit(ioaddr: priv->mac_dev, tse_csroffs(command_config), |
867 | MAC_CMDCFG_PROMIS_EN); |
868 | else |
869 | tse_clear_bit(ioaddr: priv->mac_dev, tse_csroffs(command_config), |
870 | MAC_CMDCFG_PROMIS_EN); |
871 | |
872 | spin_unlock(lock: &priv->mac_cfg_lock); |
873 | } |
874 | |
875 | /* Open and initialize the interface |
876 | */ |
877 | static int tse_open(struct net_device *dev) |
878 | { |
879 | struct altera_tse_private *priv = netdev_priv(dev); |
880 | unsigned long flags; |
881 | int ret = 0; |
882 | int i; |
883 | |
884 | /* Reset and configure TSE MAC and probe associated PHY */ |
885 | ret = priv->dmaops->init_dma(priv); |
886 | if (ret != 0) { |
887 | netdev_err(dev, format: "Cannot initialize DMA\n" ); |
888 | goto phy_error; |
889 | } |
890 | |
891 | if (netif_msg_ifup(priv)) |
892 | netdev_warn(dev, format: "device MAC address %pM\n" , |
893 | dev->dev_addr); |
894 | |
895 | if ((priv->revision < 0xd00) || (priv->revision > 0xe00)) |
896 | netdev_warn(dev, format: "TSE revision %x\n" , priv->revision); |
897 | |
898 | spin_lock(lock: &priv->mac_cfg_lock); |
899 | |
900 | ret = reset_mac(priv); |
901 | /* Note that reset_mac will fail if the clocks are gated by the PHY |
902 | * due to the PHY being put into isolation or power down mode. |
903 | * This is not an error if reset fails due to no clock. |
904 | */ |
905 | if (ret) |
906 | netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n" , ret); |
907 | |
908 | ret = init_mac(priv); |
909 | spin_unlock(lock: &priv->mac_cfg_lock); |
910 | if (ret) { |
911 | netdev_err(dev, format: "Cannot init MAC core (error: %d)\n" , ret); |
912 | goto alloc_skbuf_error; |
913 | } |
914 | |
915 | priv->dmaops->reset_dma(priv); |
916 | |
917 | /* Create and initialize the TX/RX descriptors chains. */ |
918 | priv->rx_ring_size = dma_rx_num; |
919 | priv->tx_ring_size = dma_tx_num; |
920 | ret = alloc_init_skbufs(priv); |
921 | if (ret) { |
922 | netdev_err(dev, format: "DMA descriptors initialization failed\n" ); |
923 | goto alloc_skbuf_error; |
924 | } |
925 | |
926 | |
927 | /* Register RX interrupt */ |
928 | ret = request_irq(irq: priv->rx_irq, handler: altera_isr, IRQF_SHARED, |
929 | name: dev->name, dev); |
930 | if (ret) { |
931 | netdev_err(dev, format: "Unable to register RX interrupt %d\n" , |
932 | priv->rx_irq); |
933 | goto init_error; |
934 | } |
935 | |
936 | /* Register TX interrupt */ |
937 | ret = request_irq(irq: priv->tx_irq, handler: altera_isr, IRQF_SHARED, |
938 | name: dev->name, dev); |
939 | if (ret) { |
940 | netdev_err(dev, format: "Unable to register TX interrupt %d\n" , |
941 | priv->tx_irq); |
942 | goto tx_request_irq_error; |
943 | } |
944 | |
945 | /* Enable DMA interrupts */ |
946 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); |
947 | priv->dmaops->enable_rxirq(priv); |
948 | priv->dmaops->enable_txirq(priv); |
949 | |
950 | /* Setup RX descriptor chain */ |
951 | for (i = 0; i < priv->rx_ring_size; i++) |
952 | priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]); |
953 | |
954 | spin_unlock_irqrestore(lock: &priv->rxdma_irq_lock, flags); |
955 | |
956 | ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, flags: 0); |
957 | if (ret) { |
958 | netdev_err(dev, format: "could not connect phylink (%d)\n" , ret); |
959 | goto tx_request_irq_error; |
960 | } |
961 | phylink_start(priv->phylink); |
962 | |
963 | napi_enable(n: &priv->napi); |
964 | netif_start_queue(dev); |
965 | |
966 | priv->dmaops->start_rxdma(priv); |
967 | |
968 | /* Start MAC Rx/Tx */ |
969 | spin_lock(lock: &priv->mac_cfg_lock); |
970 | tse_set_mac(priv, enable: true); |
971 | spin_unlock(lock: &priv->mac_cfg_lock); |
972 | |
973 | return 0; |
974 | |
975 | tx_request_irq_error: |
976 | free_irq(priv->rx_irq, dev); |
977 | init_error: |
978 | free_skbufs(dev); |
979 | alloc_skbuf_error: |
980 | phy_error: |
981 | return ret; |
982 | } |
983 | |
984 | /* Stop TSE MAC interface and put the device in an inactive state |
985 | */ |
986 | static int tse_shutdown(struct net_device *dev) |
987 | { |
988 | struct altera_tse_private *priv = netdev_priv(dev); |
989 | unsigned long int flags; |
990 | int ret; |
991 | |
992 | phylink_stop(priv->phylink); |
993 | phylink_disconnect_phy(priv->phylink); |
994 | netif_stop_queue(dev); |
995 | napi_disable(n: &priv->napi); |
996 | |
997 | /* Disable DMA interrupts */ |
998 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); |
999 | priv->dmaops->disable_rxirq(priv); |
1000 | priv->dmaops->disable_txirq(priv); |
1001 | spin_unlock_irqrestore(lock: &priv->rxdma_irq_lock, flags); |
1002 | |
1003 | /* Free the IRQ lines */ |
1004 | free_irq(priv->rx_irq, dev); |
1005 | free_irq(priv->tx_irq, dev); |
1006 | |
1007 | /* disable and reset the MAC, empties fifo */ |
1008 | spin_lock(lock: &priv->mac_cfg_lock); |
1009 | spin_lock(lock: &priv->tx_lock); |
1010 | |
1011 | ret = reset_mac(priv); |
1012 | /* Note that reset_mac will fail if the clocks are gated by the PHY |
1013 | * due to the PHY being put into isolation or power down mode. |
1014 | * This is not an error if reset fails due to no clock. |
1015 | */ |
1016 | if (ret) |
1017 | netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n" , ret); |
1018 | priv->dmaops->reset_dma(priv); |
1019 | free_skbufs(dev); |
1020 | |
1021 | spin_unlock(lock: &priv->tx_lock); |
1022 | spin_unlock(lock: &priv->mac_cfg_lock); |
1023 | |
1024 | priv->dmaops->uninit_dma(priv); |
1025 | |
1026 | return 0; |
1027 | } |
1028 | |
1029 | static struct net_device_ops altera_tse_netdev_ops = { |
1030 | .ndo_open = tse_open, |
1031 | .ndo_stop = tse_shutdown, |
1032 | .ndo_start_xmit = tse_start_xmit, |
1033 | .ndo_set_mac_address = eth_mac_addr, |
1034 | .ndo_set_rx_mode = tse_set_rx_mode, |
1035 | .ndo_change_mtu = tse_change_mtu, |
1036 | .ndo_validate_addr = eth_validate_addr, |
1037 | }; |
1038 | |
1039 | static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode, |
1040 | const struct phylink_link_state *state) |
1041 | { |
1042 | struct net_device *ndev = to_net_dev(config->dev); |
1043 | struct altera_tse_private *priv = netdev_priv(dev: ndev); |
1044 | |
1045 | spin_lock(lock: &priv->mac_cfg_lock); |
1046 | reset_mac(priv); |
1047 | tse_set_mac(priv, enable: true); |
1048 | spin_unlock(lock: &priv->mac_cfg_lock); |
1049 | } |
1050 | |
1051 | static void alt_tse_mac_link_down(struct phylink_config *config, |
1052 | unsigned int mode, phy_interface_t interface) |
1053 | { |
1054 | } |
1055 | |
1056 | static void alt_tse_mac_link_up(struct phylink_config *config, |
1057 | struct phy_device *phy, unsigned int mode, |
1058 | phy_interface_t interface, int speed, |
1059 | int duplex, bool tx_pause, bool rx_pause) |
1060 | { |
1061 | struct net_device *ndev = to_net_dev(config->dev); |
1062 | struct altera_tse_private *priv = netdev_priv(dev: ndev); |
1063 | u32 ctrl; |
1064 | |
1065 | ctrl = csrrd32(mac: priv->mac_dev, tse_csroffs(command_config)); |
1066 | ctrl &= ~(MAC_CMDCFG_ENA_10 | MAC_CMDCFG_ETH_SPEED | MAC_CMDCFG_HD_ENA); |
1067 | |
1068 | if (duplex == DUPLEX_HALF) |
1069 | ctrl |= MAC_CMDCFG_HD_ENA; |
1070 | |
1071 | if (speed == SPEED_1000) |
1072 | ctrl |= MAC_CMDCFG_ETH_SPEED; |
1073 | else if (speed == SPEED_10) |
1074 | ctrl |= MAC_CMDCFG_ENA_10; |
1075 | |
1076 | spin_lock(lock: &priv->mac_cfg_lock); |
1077 | csrwr32(val: ctrl, mac: priv->mac_dev, tse_csroffs(command_config)); |
1078 | spin_unlock(lock: &priv->mac_cfg_lock); |
1079 | } |
1080 | |
1081 | static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config, |
1082 | phy_interface_t interface) |
1083 | { |
1084 | struct net_device *ndev = to_net_dev(config->dev); |
1085 | struct altera_tse_private *priv = netdev_priv(dev: ndev); |
1086 | |
1087 | if (interface == PHY_INTERFACE_MODE_SGMII || |
1088 | interface == PHY_INTERFACE_MODE_1000BASEX) |
1089 | return priv->pcs; |
1090 | else |
1091 | return NULL; |
1092 | } |
1093 | |
1094 | static const struct phylink_mac_ops alt_tse_phylink_ops = { |
1095 | .mac_config = alt_tse_mac_config, |
1096 | .mac_link_down = alt_tse_mac_link_down, |
1097 | .mac_link_up = alt_tse_mac_link_up, |
1098 | .mac_select_pcs = alt_tse_select_pcs, |
1099 | }; |
1100 | |
1101 | static int request_and_map(struct platform_device *pdev, const char *name, |
1102 | struct resource **res, void __iomem **ptr) |
1103 | { |
1104 | struct device *device = &pdev->dev; |
1105 | struct resource *region; |
1106 | |
1107 | *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); |
1108 | if (*res == NULL) { |
1109 | dev_err(device, "resource %s not defined\n" , name); |
1110 | return -ENODEV; |
1111 | } |
1112 | |
1113 | region = devm_request_mem_region(device, (*res)->start, |
1114 | resource_size(*res), dev_name(device)); |
1115 | if (region == NULL) { |
1116 | dev_err(device, "unable to request %s\n" , name); |
1117 | return -EBUSY; |
1118 | } |
1119 | |
1120 | *ptr = devm_ioremap(dev: device, offset: region->start, |
1121 | size: resource_size(res: region)); |
1122 | if (*ptr == NULL) { |
1123 | dev_err(device, "ioremap of %s failed!" , name); |
1124 | return -ENOMEM; |
1125 | } |
1126 | |
1127 | return 0; |
1128 | } |
1129 | |
1130 | /* Probe Altera TSE MAC device |
1131 | */ |
1132 | static int altera_tse_probe(struct platform_device *pdev) |
1133 | { |
1134 | struct regmap_config pcs_regmap_cfg; |
1135 | struct altera_tse_private *priv; |
1136 | struct mdio_regmap_config mrc; |
1137 | struct resource *control_port; |
1138 | struct regmap *pcs_regmap; |
1139 | struct resource *dma_res; |
1140 | struct resource *pcs_res; |
1141 | struct mii_bus *pcs_bus; |
1142 | struct net_device *ndev; |
1143 | void __iomem *descmap; |
1144 | int ret = -ENODEV; |
1145 | |
1146 | ndev = alloc_etherdev(sizeof(struct altera_tse_private)); |
1147 | if (!ndev) { |
1148 | dev_err(&pdev->dev, "Could not allocate network device\n" ); |
1149 | return -ENODEV; |
1150 | } |
1151 | |
1152 | SET_NETDEV_DEV(ndev, &pdev->dev); |
1153 | |
1154 | priv = netdev_priv(dev: ndev); |
1155 | priv->device = &pdev->dev; |
1156 | priv->dev = ndev; |
1157 | priv->msg_enable = netif_msg_init(debug_value: debug, default_msg_enable_bits: default_msg_level); |
1158 | |
1159 | priv->dmaops = device_get_match_data(dev: &pdev->dev); |
1160 | |
1161 | if (priv->dmaops && |
1162 | priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) { |
1163 | /* Get the mapped address to the SGDMA descriptor memory */ |
1164 | ret = request_and_map(pdev, name: "s1" , res: &dma_res, ptr: &descmap); |
1165 | if (ret) |
1166 | goto err_free_netdev; |
1167 | |
1168 | /* Start of that memory is for transmit descriptors */ |
1169 | priv->tx_dma_desc = descmap; |
1170 | |
1171 | /* First half is for tx descriptors, other half for tx */ |
1172 | priv->txdescmem = resource_size(res: dma_res)/2; |
1173 | |
1174 | priv->txdescmem_busaddr = (dma_addr_t)dma_res->start; |
1175 | |
1176 | priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap + |
1177 | priv->txdescmem)); |
1178 | priv->rxdescmem = resource_size(res: dma_res)/2; |
1179 | priv->rxdescmem_busaddr = dma_res->start; |
1180 | priv->rxdescmem_busaddr += priv->txdescmem; |
1181 | |
1182 | if (upper_32_bits(priv->rxdescmem_busaddr)) { |
1183 | dev_dbg(priv->device, |
1184 | "SGDMA bus addresses greater than 32-bits\n" ); |
1185 | ret = -EINVAL; |
1186 | goto err_free_netdev; |
1187 | } |
1188 | if (upper_32_bits(priv->txdescmem_busaddr)) { |
1189 | dev_dbg(priv->device, |
1190 | "SGDMA bus addresses greater than 32-bits\n" ); |
1191 | ret = -EINVAL; |
1192 | goto err_free_netdev; |
1193 | } |
1194 | } else if (priv->dmaops && |
1195 | priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { |
1196 | ret = request_and_map(pdev, name: "rx_resp" , res: &dma_res, |
1197 | ptr: &priv->rx_dma_resp); |
1198 | if (ret) |
1199 | goto err_free_netdev; |
1200 | |
1201 | ret = request_and_map(pdev, name: "tx_desc" , res: &dma_res, |
1202 | ptr: &priv->tx_dma_desc); |
1203 | if (ret) |
1204 | goto err_free_netdev; |
1205 | |
1206 | priv->txdescmem = resource_size(res: dma_res); |
1207 | priv->txdescmem_busaddr = dma_res->start; |
1208 | |
1209 | ret = request_and_map(pdev, name: "rx_desc" , res: &dma_res, |
1210 | ptr: &priv->rx_dma_desc); |
1211 | if (ret) |
1212 | goto err_free_netdev; |
1213 | |
1214 | priv->rxdescmem = resource_size(res: dma_res); |
1215 | priv->rxdescmem_busaddr = dma_res->start; |
1216 | |
1217 | } else { |
1218 | ret = -ENODEV; |
1219 | goto err_free_netdev; |
1220 | } |
1221 | |
1222 | if (!dma_set_mask(dev: priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) { |
1223 | dma_set_coherent_mask(dev: priv->device, |
1224 | DMA_BIT_MASK(priv->dmaops->dmamask)); |
1225 | } else if (!dma_set_mask(dev: priv->device, DMA_BIT_MASK(32))) { |
1226 | dma_set_coherent_mask(dev: priv->device, DMA_BIT_MASK(32)); |
1227 | } else { |
1228 | ret = -EIO; |
1229 | goto err_free_netdev; |
1230 | } |
1231 | |
1232 | /* MAC address space */ |
1233 | ret = request_and_map(pdev, name: "control_port" , res: &control_port, |
1234 | ptr: (void __iomem **)&priv->mac_dev); |
1235 | if (ret) |
1236 | goto err_free_netdev; |
1237 | |
1238 | /* xSGDMA Rx Dispatcher address space */ |
1239 | ret = request_and_map(pdev, name: "rx_csr" , res: &dma_res, |
1240 | ptr: &priv->rx_dma_csr); |
1241 | if (ret) |
1242 | goto err_free_netdev; |
1243 | |
1244 | |
1245 | /* xSGDMA Tx Dispatcher address space */ |
1246 | ret = request_and_map(pdev, name: "tx_csr" , res: &dma_res, |
1247 | ptr: &priv->tx_dma_csr); |
1248 | if (ret) |
1249 | goto err_free_netdev; |
1250 | |
1251 | memset(&pcs_regmap_cfg, 0, sizeof(pcs_regmap_cfg)); |
1252 | memset(&mrc, 0, sizeof(mrc)); |
1253 | /* SGMII PCS address space. The location can vary depending on how the |
1254 | * IP is integrated. We can have a resource dedicated to it at a specific |
1255 | * address space, but if it's not the case, we fallback to the mdiophy0 |
1256 | * from the MAC's address space |
1257 | */ |
1258 | ret = request_and_map(pdev, name: "pcs" , res: &pcs_res, ptr: &priv->pcs_base); |
1259 | if (ret) { |
1260 | /* If we can't find a dedicated resource for the PCS, fallback |
1261 | * to the internal PCS, that has a different address stride |
1262 | */ |
1263 | priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0); |
1264 | pcs_regmap_cfg.reg_bits = 32; |
1265 | /* Values are MDIO-like values, on 16 bits */ |
1266 | pcs_regmap_cfg.val_bits = 16; |
1267 | pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(2); |
1268 | } else { |
1269 | pcs_regmap_cfg.reg_bits = 16; |
1270 | pcs_regmap_cfg.val_bits = 16; |
1271 | pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(1); |
1272 | } |
1273 | |
1274 | /* Create a regmap for the PCS so that it can be used by the PCS driver */ |
1275 | pcs_regmap = devm_regmap_init_mmio(&pdev->dev, priv->pcs_base, |
1276 | &pcs_regmap_cfg); |
1277 | if (IS_ERR(ptr: pcs_regmap)) { |
1278 | ret = PTR_ERR(ptr: pcs_regmap); |
1279 | goto err_free_netdev; |
1280 | } |
1281 | mrc.regmap = pcs_regmap; |
1282 | mrc.parent = &pdev->dev; |
1283 | mrc.valid_addr = 0x0; |
1284 | mrc.autoscan = false; |
1285 | |
1286 | /* Rx IRQ */ |
1287 | priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq" ); |
1288 | if (priv->rx_irq == -ENXIO) { |
1289 | dev_err(&pdev->dev, "cannot obtain Rx IRQ\n" ); |
1290 | ret = -ENXIO; |
1291 | goto err_free_netdev; |
1292 | } |
1293 | |
1294 | /* Tx IRQ */ |
1295 | priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq" ); |
1296 | if (priv->tx_irq == -ENXIO) { |
1297 | dev_err(&pdev->dev, "cannot obtain Tx IRQ\n" ); |
1298 | ret = -ENXIO; |
1299 | goto err_free_netdev; |
1300 | } |
1301 | |
1302 | /* get FIFO depths from device tree */ |
1303 | if (of_property_read_u32(np: pdev->dev.of_node, propname: "rx-fifo-depth" , |
1304 | out_value: &priv->rx_fifo_depth)) { |
1305 | dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n" ); |
1306 | ret = -ENXIO; |
1307 | goto err_free_netdev; |
1308 | } |
1309 | |
1310 | if (of_property_read_u32(np: pdev->dev.of_node, propname: "tx-fifo-depth" , |
1311 | out_value: &priv->tx_fifo_depth)) { |
1312 | dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n" ); |
1313 | ret = -ENXIO; |
1314 | goto err_free_netdev; |
1315 | } |
1316 | |
1317 | /* get hash filter settings for this instance */ |
1318 | priv->hash_filter = |
1319 | of_property_read_bool(np: pdev->dev.of_node, |
1320 | propname: "altr,has-hash-multicast-filter" ); |
1321 | |
1322 | /* Set hash filter to not set for now until the |
1323 | * multicast filter receive issue is debugged |
1324 | */ |
1325 | priv->hash_filter = 0; |
1326 | |
1327 | /* get supplemental address settings for this instance */ |
1328 | priv->added_unicast = |
1329 | of_property_read_bool(np: pdev->dev.of_node, |
1330 | propname: "altr,has-supplementary-unicast" ); |
1331 | |
1332 | priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN; |
1333 | /* Max MTU is 1500, ETH_DATA_LEN */ |
1334 | priv->dev->max_mtu = ETH_DATA_LEN; |
1335 | |
1336 | /* Get the max mtu from the device tree. Note that the |
1337 | * "max-frame-size" parameter is actually max mtu. Definition |
1338 | * in the ePAPR v1.1 spec and usage differ, so go with usage. |
1339 | */ |
1340 | of_property_read_u32(np: pdev->dev.of_node, propname: "max-frame-size" , |
1341 | out_value: &priv->dev->max_mtu); |
1342 | |
1343 | /* The DMA buffer size already accounts for an alignment bias |
1344 | * to avoid unaligned access exceptions for the NIOS processor, |
1345 | */ |
1346 | priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; |
1347 | |
1348 | /* get default MAC address from device tree */ |
1349 | ret = of_get_ethdev_address(np: pdev->dev.of_node, dev: ndev); |
1350 | if (ret) |
1351 | eth_hw_addr_random(dev: ndev); |
1352 | |
1353 | /* get phy addr and create mdio */ |
1354 | ret = altera_tse_phy_get_addr_mdio_create(dev: ndev); |
1355 | |
1356 | if (ret) |
1357 | goto err_free_netdev; |
1358 | |
1359 | /* initialize netdev */ |
1360 | ndev->mem_start = control_port->start; |
1361 | ndev->mem_end = control_port->end; |
1362 | ndev->netdev_ops = &altera_tse_netdev_ops; |
1363 | altera_tse_set_ethtool_ops(ndev); |
1364 | |
1365 | altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode; |
1366 | |
1367 | if (priv->hash_filter) |
1368 | altera_tse_netdev_ops.ndo_set_rx_mode = |
1369 | tse_set_rx_mode_hashfilter; |
1370 | |
1371 | /* Scatter/gather IO is not supported, |
1372 | * so it is turned off |
1373 | */ |
1374 | ndev->hw_features &= ~NETIF_F_SG; |
1375 | ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; |
1376 | |
1377 | /* VLAN offloading of tagging, stripping and filtering is not |
1378 | * supported by hardware, but driver will accommodate the |
1379 | * extra 4-byte VLAN tag for processing by upper layers |
1380 | */ |
1381 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; |
1382 | |
1383 | /* setup NAPI interface */ |
1384 | netif_napi_add(dev: ndev, napi: &priv->napi, poll: tse_poll); |
1385 | |
1386 | spin_lock_init(&priv->mac_cfg_lock); |
1387 | spin_lock_init(&priv->tx_lock); |
1388 | spin_lock_init(&priv->rxdma_irq_lock); |
1389 | |
1390 | netif_carrier_off(dev: ndev); |
1391 | ret = register_netdev(dev: ndev); |
1392 | if (ret) { |
1393 | dev_err(&pdev->dev, "failed to register TSE net device\n" ); |
1394 | goto err_register_netdev; |
1395 | } |
1396 | |
1397 | platform_set_drvdata(pdev, data: ndev); |
1398 | |
1399 | priv->revision = ioread32(&priv->mac_dev->megacore_revision); |
1400 | |
1401 | if (netif_msg_probe(priv)) |
1402 | dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n" , |
1403 | (priv->revision >> 8) & 0xff, |
1404 | priv->revision & 0xff, |
1405 | (unsigned long) control_port->start, priv->rx_irq, |
1406 | priv->tx_irq); |
1407 | |
1408 | snprintf(buf: mrc.name, MII_BUS_ID_SIZE, fmt: "%s-pcs-mii" , ndev->name); |
1409 | pcs_bus = devm_mdio_regmap_register(dev: &pdev->dev, config: &mrc); |
1410 | if (IS_ERR(ptr: pcs_bus)) { |
1411 | ret = PTR_ERR(ptr: pcs_bus); |
1412 | goto err_init_pcs; |
1413 | } |
1414 | |
1415 | priv->pcs = lynx_pcs_create_mdiodev(bus: pcs_bus, addr: 0); |
1416 | if (IS_ERR(ptr: priv->pcs)) { |
1417 | ret = PTR_ERR(ptr: priv->pcs); |
1418 | goto err_init_pcs; |
1419 | } |
1420 | |
1421 | priv->phylink_config.dev = &ndev->dev; |
1422 | priv->phylink_config.type = PHYLINK_NETDEV; |
1423 | priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | |
1424 | MAC_100 | MAC_1000FD; |
1425 | |
1426 | phy_interface_set_rgmii(intf: priv->phylink_config.supported_interfaces); |
1427 | __set_bit(PHY_INTERFACE_MODE_MII, |
1428 | priv->phylink_config.supported_interfaces); |
1429 | __set_bit(PHY_INTERFACE_MODE_GMII, |
1430 | priv->phylink_config.supported_interfaces); |
1431 | __set_bit(PHY_INTERFACE_MODE_SGMII, |
1432 | priv->phylink_config.supported_interfaces); |
1433 | __set_bit(PHY_INTERFACE_MODE_1000BASEX, |
1434 | priv->phylink_config.supported_interfaces); |
1435 | |
1436 | priv->phylink = phylink_create(&priv->phylink_config, |
1437 | of_fwnode_handle(priv->device->of_node), |
1438 | priv->phy_iface, &alt_tse_phylink_ops); |
1439 | if (IS_ERR(ptr: priv->phylink)) { |
1440 | dev_err(&pdev->dev, "failed to create phylink\n" ); |
1441 | ret = PTR_ERR(ptr: priv->phylink); |
1442 | goto err_init_phylink; |
1443 | } |
1444 | |
1445 | return 0; |
1446 | err_init_phylink: |
1447 | lynx_pcs_destroy(pcs: priv->pcs); |
1448 | err_init_pcs: |
1449 | unregister_netdev(dev: ndev); |
1450 | err_register_netdev: |
1451 | netif_napi_del(napi: &priv->napi); |
1452 | altera_tse_mdio_destroy(dev: ndev); |
1453 | err_free_netdev: |
1454 | free_netdev(dev: ndev); |
1455 | return ret; |
1456 | } |
1457 | |
1458 | /* Remove Altera TSE MAC device |
1459 | */ |
1460 | static void altera_tse_remove(struct platform_device *pdev) |
1461 | { |
1462 | struct net_device *ndev = platform_get_drvdata(pdev); |
1463 | struct altera_tse_private *priv = netdev_priv(dev: ndev); |
1464 | |
1465 | platform_set_drvdata(pdev, NULL); |
1466 | altera_tse_mdio_destroy(dev: ndev); |
1467 | unregister_netdev(dev: ndev); |
1468 | phylink_destroy(priv->phylink); |
1469 | lynx_pcs_destroy(pcs: priv->pcs); |
1470 | |
1471 | free_netdev(dev: ndev); |
1472 | } |
1473 | |
1474 | static const struct altera_dmaops altera_dtype_sgdma = { |
1475 | .altera_dtype = ALTERA_DTYPE_SGDMA, |
1476 | .dmamask = 32, |
1477 | .reset_dma = sgdma_reset, |
1478 | .enable_txirq = sgdma_enable_txirq, |
1479 | .enable_rxirq = sgdma_enable_rxirq, |
1480 | .disable_txirq = sgdma_disable_txirq, |
1481 | .disable_rxirq = sgdma_disable_rxirq, |
1482 | .clear_txirq = sgdma_clear_txirq, |
1483 | .clear_rxirq = sgdma_clear_rxirq, |
1484 | .tx_buffer = sgdma_tx_buffer, |
1485 | .tx_completions = sgdma_tx_completions, |
1486 | .add_rx_desc = sgdma_add_rx_desc, |
1487 | .get_rx_status = sgdma_rx_status, |
1488 | .init_dma = sgdma_initialize, |
1489 | .uninit_dma = sgdma_uninitialize, |
1490 | .start_rxdma = sgdma_start_rxdma, |
1491 | }; |
1492 | |
1493 | static const struct altera_dmaops altera_dtype_msgdma = { |
1494 | .altera_dtype = ALTERA_DTYPE_MSGDMA, |
1495 | .dmamask = 64, |
1496 | .reset_dma = msgdma_reset, |
1497 | .enable_txirq = msgdma_enable_txirq, |
1498 | .enable_rxirq = msgdma_enable_rxirq, |
1499 | .disable_txirq = msgdma_disable_txirq, |
1500 | .disable_rxirq = msgdma_disable_rxirq, |
1501 | .clear_txirq = msgdma_clear_txirq, |
1502 | .clear_rxirq = msgdma_clear_rxirq, |
1503 | .tx_buffer = msgdma_tx_buffer, |
1504 | .tx_completions = msgdma_tx_completions, |
1505 | .add_rx_desc = msgdma_add_rx_desc, |
1506 | .get_rx_status = msgdma_rx_status, |
1507 | .init_dma = msgdma_initialize, |
1508 | .uninit_dma = msgdma_uninitialize, |
1509 | .start_rxdma = msgdma_start_rxdma, |
1510 | }; |
1511 | |
1512 | static const struct of_device_id altera_tse_ids[] = { |
1513 | { .compatible = "altr,tse-msgdma-1.0" , .data = &altera_dtype_msgdma, }, |
1514 | { .compatible = "altr,tse-1.0" , .data = &altera_dtype_sgdma, }, |
1515 | { .compatible = "ALTR,tse-1.0" , .data = &altera_dtype_sgdma, }, |
1516 | {}, |
1517 | }; |
1518 | MODULE_DEVICE_TABLE(of, altera_tse_ids); |
1519 | |
1520 | static struct platform_driver altera_tse_driver = { |
1521 | .probe = altera_tse_probe, |
1522 | .remove_new = altera_tse_remove, |
1523 | .suspend = NULL, |
1524 | .resume = NULL, |
1525 | .driver = { |
1526 | .name = ALTERA_TSE_RESOURCE_NAME, |
1527 | .of_match_table = altera_tse_ids, |
1528 | }, |
1529 | }; |
1530 | |
1531 | module_platform_driver(altera_tse_driver); |
1532 | |
1533 | MODULE_AUTHOR("Altera Corporation" ); |
1534 | MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver" ); |
1535 | MODULE_LICENSE("GPL v2" ); |
1536 | |