1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Lantiq / Intel PMAC driver for XRX200 SoCs |
4 | * |
5 | * Copyright (C) 2010 Lantiq Deutschland |
6 | * Copyright (C) 2012 John Crispin <john@phrozen.org> |
7 | * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> |
8 | */ |
9 | |
10 | #include <linux/etherdevice.h> |
11 | #include <linux/module.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/clk.h> |
15 | #include <linux/delay.h> |
16 | |
17 | #include <linux/if_vlan.h> |
18 | |
19 | #include <linux/of_net.h> |
20 | #include <linux/of_platform.h> |
21 | |
22 | #include <xway_dma.h> |
23 | |
24 | /* DMA */ |
25 | #define XRX200_DMA_DATA_LEN (SZ_64K - 1) |
26 | #define XRX200_DMA_RX 0 |
27 | #define XRX200_DMA_TX 1 |
28 | #define XRX200_DMA_BURST_LEN 8 |
29 | |
30 | #define XRX200_DMA_PACKET_COMPLETE 0 |
31 | #define XRX200_DMA_PACKET_IN_PROGRESS 1 |
32 | |
33 | /* cpu port mac */ |
34 | #define PMAC_RX_IPG 0x0024 |
35 | #define PMAC_RX_IPG_MASK 0xf |
36 | |
37 | #define PMAC_HD_CTL 0x0000 |
38 | /* Add Ethernet header to packets from DMA to PMAC */ |
39 | #define PMAC_HD_CTL_ADD BIT(0) |
40 | /* Add VLAN tag to Packets from DMA to PMAC */ |
41 | #define PMAC_HD_CTL_TAG BIT(1) |
42 | /* Add CRC to packets from DMA to PMAC */ |
43 | #define PMAC_HD_CTL_AC BIT(2) |
44 | /* Add status header to packets from PMAC to DMA */ |
45 | #define PMAC_HD_CTL_AS BIT(3) |
46 | /* Remove CRC from packets from PMAC to DMA */ |
47 | #define PMAC_HD_CTL_RC BIT(4) |
48 | /* Remove Layer-2 header from packets from PMAC to DMA */ |
49 | #define PMAC_HD_CTL_RL2 BIT(5) |
50 | /* Status header is present from DMA to PMAC */ |
51 | #define PMAC_HD_CTL_RXSH BIT(6) |
52 | /* Add special tag from PMAC to switch */ |
53 | #define PMAC_HD_CTL_AST BIT(7) |
54 | /* Remove specail Tag from PMAC to DMA */ |
55 | #define PMAC_HD_CTL_RST BIT(8) |
56 | /* Check CRC from DMA to PMAC */ |
57 | #define PMAC_HD_CTL_CCRC BIT(9) |
58 | /* Enable reaction to Pause frames in the PMAC */ |
59 | #define PMAC_HD_CTL_FC BIT(10) |
60 | |
61 | struct xrx200_chan { |
62 | int tx_free; |
63 | |
64 | struct napi_struct napi; |
65 | struct ltq_dma_channel dma; |
66 | |
67 | union { |
68 | struct sk_buff *skb[LTQ_DESC_NUM]; |
69 | void *rx_buff[LTQ_DESC_NUM]; |
70 | }; |
71 | |
72 | struct sk_buff *skb_head; |
73 | struct sk_buff *skb_tail; |
74 | |
75 | struct xrx200_priv *priv; |
76 | }; |
77 | |
78 | struct xrx200_priv { |
79 | struct clk *clk; |
80 | |
81 | struct xrx200_chan chan_tx; |
82 | struct xrx200_chan chan_rx; |
83 | |
84 | u16 rx_buf_size; |
85 | u16 rx_skb_size; |
86 | |
87 | struct net_device *net_dev; |
88 | struct device *dev; |
89 | |
90 | __iomem void *pmac_reg; |
91 | }; |
92 | |
93 | static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset) |
94 | { |
95 | return __raw_readl(addr: priv->pmac_reg + offset); |
96 | } |
97 | |
98 | static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset) |
99 | { |
100 | __raw_writel(val, addr: priv->pmac_reg + offset); |
101 | } |
102 | |
103 | static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set, |
104 | u32 offset) |
105 | { |
106 | u32 val = xrx200_pmac_r32(priv, offset); |
107 | |
108 | val &= ~(clear); |
109 | val |= set; |
110 | xrx200_pmac_w32(priv, val, offset); |
111 | } |
112 | |
113 | static int xrx200_max_frame_len(int mtu) |
114 | { |
115 | return VLAN_ETH_HLEN + mtu; |
116 | } |
117 | |
118 | static int xrx200_buffer_size(int mtu) |
119 | { |
120 | return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN); |
121 | } |
122 | |
123 | static int xrx200_skb_size(u16 buf_size) |
124 | { |
125 | return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) + |
126 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
127 | } |
128 | |
129 | /* drop all the packets from the DMA ring */ |
130 | static void xrx200_flush_dma(struct xrx200_chan *ch) |
131 | { |
132 | int i; |
133 | |
134 | for (i = 0; i < LTQ_DESC_NUM; i++) { |
135 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; |
136 | |
137 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) |
138 | break; |
139 | |
140 | desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | |
141 | ch->priv->rx_buf_size; |
142 | ch->dma.desc++; |
143 | ch->dma.desc %= LTQ_DESC_NUM; |
144 | } |
145 | } |
146 | |
147 | static int xrx200_open(struct net_device *net_dev) |
148 | { |
149 | struct xrx200_priv *priv = netdev_priv(dev: net_dev); |
150 | |
151 | napi_enable(n: &priv->chan_tx.napi); |
152 | ltq_dma_open(&priv->chan_tx.dma); |
153 | ltq_dma_enable_irq(&priv->chan_tx.dma); |
154 | |
155 | napi_enable(n: &priv->chan_rx.napi); |
156 | ltq_dma_open(&priv->chan_rx.dma); |
157 | /* The boot loader does not always deactivate the receiving of frames |
158 | * on the ports and then some packets queue up in the PPE buffers. |
159 | * They already passed the PMAC so they do not have the tags |
160 | * configured here. Read the these packets here and drop them. |
161 | * The HW should have written them into memory after 10us |
162 | */ |
163 | usleep_range(min: 20, max: 40); |
164 | xrx200_flush_dma(ch: &priv->chan_rx); |
165 | ltq_dma_enable_irq(&priv->chan_rx.dma); |
166 | |
167 | netif_wake_queue(dev: net_dev); |
168 | |
169 | return 0; |
170 | } |
171 | |
172 | static int xrx200_close(struct net_device *net_dev) |
173 | { |
174 | struct xrx200_priv *priv = netdev_priv(dev: net_dev); |
175 | |
176 | netif_stop_queue(dev: net_dev); |
177 | |
178 | napi_disable(n: &priv->chan_rx.napi); |
179 | ltq_dma_close(&priv->chan_rx.dma); |
180 | |
181 | napi_disable(n: &priv->chan_tx.napi); |
182 | ltq_dma_close(&priv->chan_tx.dma); |
183 | |
184 | return 0; |
185 | } |
186 | |
187 | static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size)) |
188 | { |
189 | void *buf = ch->rx_buff[ch->dma.desc]; |
190 | struct xrx200_priv *priv = ch->priv; |
191 | dma_addr_t mapping; |
192 | int ret = 0; |
193 | |
194 | ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size); |
195 | if (!ch->rx_buff[ch->dma.desc]) { |
196 | ch->rx_buff[ch->dma.desc] = buf; |
197 | ret = -ENOMEM; |
198 | goto skip; |
199 | } |
200 | |
201 | mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc], |
202 | priv->rx_buf_size, DMA_FROM_DEVICE); |
203 | if (unlikely(dma_mapping_error(priv->dev, mapping))) { |
204 | skb_free_frag(addr: ch->rx_buff[ch->dma.desc]); |
205 | ch->rx_buff[ch->dma.desc] = buf; |
206 | ret = -ENOMEM; |
207 | goto skip; |
208 | } |
209 | |
210 | ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN; |
211 | /* Make sure the address is written before we give it to HW */ |
212 | wmb(); |
213 | skip: |
214 | ch->dma.desc_base[ch->dma.desc].ctl = |
215 | LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size; |
216 | |
217 | return ret; |
218 | } |
219 | |
220 | static int xrx200_hw_receive(struct xrx200_chan *ch) |
221 | { |
222 | struct xrx200_priv *priv = ch->priv; |
223 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; |
224 | void *buf = ch->rx_buff[ch->dma.desc]; |
225 | u32 ctl = desc->ctl; |
226 | int len = (ctl & LTQ_DMA_SIZE_MASK); |
227 | struct net_device *net_dev = priv->net_dev; |
228 | struct sk_buff *skb; |
229 | int ret; |
230 | |
231 | ret = xrx200_alloc_buf(ch, alloc: napi_alloc_frag); |
232 | |
233 | ch->dma.desc++; |
234 | ch->dma.desc %= LTQ_DESC_NUM; |
235 | |
236 | if (ret) { |
237 | net_dev->stats.rx_dropped++; |
238 | netdev_err(dev: net_dev, format: "failed to allocate new rx buffer\n" ); |
239 | return ret; |
240 | } |
241 | |
242 | skb = build_skb(data: buf, frag_size: priv->rx_skb_size); |
243 | if (!skb) { |
244 | skb_free_frag(addr: buf); |
245 | net_dev->stats.rx_dropped++; |
246 | return -ENOMEM; |
247 | } |
248 | |
249 | skb_reserve(skb, NET_SKB_PAD); |
250 | skb_put(skb, len); |
251 | |
252 | /* add buffers to skb via skb->frag_list */ |
253 | if (ctl & LTQ_DMA_SOP) { |
254 | ch->skb_head = skb; |
255 | ch->skb_tail = skb; |
256 | skb_reserve(skb, NET_IP_ALIGN); |
257 | } else if (ch->skb_head) { |
258 | if (ch->skb_head == ch->skb_tail) |
259 | skb_shinfo(ch->skb_tail)->frag_list = skb; |
260 | else |
261 | ch->skb_tail->next = skb; |
262 | ch->skb_tail = skb; |
263 | ch->skb_head->len += skb->len; |
264 | ch->skb_head->data_len += skb->len; |
265 | ch->skb_head->truesize += skb->truesize; |
266 | } |
267 | |
268 | if (ctl & LTQ_DMA_EOP) { |
269 | ch->skb_head->protocol = eth_type_trans(skb: ch->skb_head, dev: net_dev); |
270 | net_dev->stats.rx_packets++; |
271 | net_dev->stats.rx_bytes += ch->skb_head->len; |
272 | netif_receive_skb(skb: ch->skb_head); |
273 | ch->skb_head = NULL; |
274 | ch->skb_tail = NULL; |
275 | ret = XRX200_DMA_PACKET_COMPLETE; |
276 | } else { |
277 | ret = XRX200_DMA_PACKET_IN_PROGRESS; |
278 | } |
279 | |
280 | return ret; |
281 | } |
282 | |
283 | static int xrx200_poll_rx(struct napi_struct *napi, int budget) |
284 | { |
285 | struct xrx200_chan *ch = container_of(napi, |
286 | struct xrx200_chan, napi); |
287 | int rx = 0; |
288 | int ret; |
289 | |
290 | while (rx < budget) { |
291 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; |
292 | |
293 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { |
294 | ret = xrx200_hw_receive(ch); |
295 | if (ret == XRX200_DMA_PACKET_IN_PROGRESS) |
296 | continue; |
297 | if (ret != XRX200_DMA_PACKET_COMPLETE) |
298 | break; |
299 | rx++; |
300 | } else { |
301 | break; |
302 | } |
303 | } |
304 | |
305 | if (rx < budget) { |
306 | if (napi_complete_done(n: &ch->napi, work_done: rx)) |
307 | ltq_dma_enable_irq(&ch->dma); |
308 | } |
309 | |
310 | return rx; |
311 | } |
312 | |
313 | static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget) |
314 | { |
315 | struct xrx200_chan *ch = container_of(napi, |
316 | struct xrx200_chan, napi); |
317 | struct net_device *net_dev = ch->priv->net_dev; |
318 | int pkts = 0; |
319 | int bytes = 0; |
320 | |
321 | netif_tx_lock(dev: net_dev); |
322 | while (pkts < budget) { |
323 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free]; |
324 | |
325 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { |
326 | struct sk_buff *skb = ch->skb[ch->tx_free]; |
327 | |
328 | pkts++; |
329 | bytes += skb->len; |
330 | ch->skb[ch->tx_free] = NULL; |
331 | consume_skb(skb); |
332 | memset(&ch->dma.desc_base[ch->tx_free], 0, |
333 | sizeof(struct ltq_dma_desc)); |
334 | ch->tx_free++; |
335 | ch->tx_free %= LTQ_DESC_NUM; |
336 | } else { |
337 | break; |
338 | } |
339 | } |
340 | |
341 | net_dev->stats.tx_packets += pkts; |
342 | net_dev->stats.tx_bytes += bytes; |
343 | netdev_completed_queue(dev: ch->priv->net_dev, pkts, bytes); |
344 | |
345 | netif_tx_unlock(dev: net_dev); |
346 | if (netif_queue_stopped(dev: net_dev)) |
347 | netif_wake_queue(dev: net_dev); |
348 | |
349 | if (pkts < budget) { |
350 | if (napi_complete_done(n: &ch->napi, work_done: pkts)) |
351 | ltq_dma_enable_irq(&ch->dma); |
352 | } |
353 | |
354 | return pkts; |
355 | } |
356 | |
357 | static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb, |
358 | struct net_device *net_dev) |
359 | { |
360 | struct xrx200_priv *priv = netdev_priv(dev: net_dev); |
361 | struct xrx200_chan *ch = &priv->chan_tx; |
362 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; |
363 | u32 byte_offset; |
364 | dma_addr_t mapping; |
365 | int len; |
366 | |
367 | skb->dev = net_dev; |
368 | if (skb_put_padto(skb, ETH_ZLEN)) { |
369 | net_dev->stats.tx_dropped++; |
370 | return NETDEV_TX_OK; |
371 | } |
372 | |
373 | len = skb->len; |
374 | |
375 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { |
376 | netdev_err(dev: net_dev, format: "tx ring full\n" ); |
377 | netif_stop_queue(dev: net_dev); |
378 | return NETDEV_TX_BUSY; |
379 | } |
380 | |
381 | ch->skb[ch->dma.desc] = skb; |
382 | |
383 | mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); |
384 | if (unlikely(dma_mapping_error(priv->dev, mapping))) |
385 | goto err_drop; |
386 | |
387 | /* dma needs to start on a burst length value aligned address */ |
388 | byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4); |
389 | |
390 | desc->addr = mapping - byte_offset; |
391 | /* Make sure the address is written before we give it to HW */ |
392 | wmb(); |
393 | desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | |
394 | LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); |
395 | ch->dma.desc++; |
396 | ch->dma.desc %= LTQ_DESC_NUM; |
397 | if (ch->dma.desc == ch->tx_free) |
398 | netif_stop_queue(dev: net_dev); |
399 | |
400 | netdev_sent_queue(dev: net_dev, bytes: len); |
401 | |
402 | return NETDEV_TX_OK; |
403 | |
404 | err_drop: |
405 | dev_kfree_skb(skb); |
406 | net_dev->stats.tx_dropped++; |
407 | net_dev->stats.tx_errors++; |
408 | return NETDEV_TX_OK; |
409 | } |
410 | |
411 | static int |
412 | xrx200_change_mtu(struct net_device *net_dev, int new_mtu) |
413 | { |
414 | struct xrx200_priv *priv = netdev_priv(dev: net_dev); |
415 | struct xrx200_chan *ch_rx = &priv->chan_rx; |
416 | int old_mtu = net_dev->mtu; |
417 | bool running = false; |
418 | void *buff; |
419 | int curr_desc; |
420 | int ret = 0; |
421 | |
422 | net_dev->mtu = new_mtu; |
423 | priv->rx_buf_size = xrx200_buffer_size(mtu: new_mtu); |
424 | priv->rx_skb_size = xrx200_skb_size(buf_size: priv->rx_buf_size); |
425 | |
426 | if (new_mtu <= old_mtu) |
427 | return ret; |
428 | |
429 | running = netif_running(dev: net_dev); |
430 | if (running) { |
431 | napi_disable(n: &ch_rx->napi); |
432 | ltq_dma_close(&ch_rx->dma); |
433 | } |
434 | |
435 | xrx200_poll_rx(napi: &ch_rx->napi, budget: LTQ_DESC_NUM); |
436 | curr_desc = ch_rx->dma.desc; |
437 | |
438 | for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; |
439 | ch_rx->dma.desc++) { |
440 | buff = ch_rx->rx_buff[ch_rx->dma.desc]; |
441 | ret = xrx200_alloc_buf(ch: ch_rx, alloc: netdev_alloc_frag); |
442 | if (ret) { |
443 | net_dev->mtu = old_mtu; |
444 | priv->rx_buf_size = xrx200_buffer_size(mtu: old_mtu); |
445 | priv->rx_skb_size = xrx200_skb_size(buf_size: priv->rx_buf_size); |
446 | break; |
447 | } |
448 | skb_free_frag(addr: buff); |
449 | } |
450 | |
451 | ch_rx->dma.desc = curr_desc; |
452 | if (running) { |
453 | napi_enable(n: &ch_rx->napi); |
454 | ltq_dma_open(&ch_rx->dma); |
455 | ltq_dma_enable_irq(&ch_rx->dma); |
456 | } |
457 | |
458 | return ret; |
459 | } |
460 | |
461 | static const struct net_device_ops xrx200_netdev_ops = { |
462 | .ndo_open = xrx200_open, |
463 | .ndo_stop = xrx200_close, |
464 | .ndo_start_xmit = xrx200_start_xmit, |
465 | .ndo_change_mtu = xrx200_change_mtu, |
466 | .ndo_set_mac_address = eth_mac_addr, |
467 | .ndo_validate_addr = eth_validate_addr, |
468 | }; |
469 | |
470 | static irqreturn_t xrx200_dma_irq(int irq, void *ptr) |
471 | { |
472 | struct xrx200_chan *ch = ptr; |
473 | |
474 | if (napi_schedule_prep(n: &ch->napi)) { |
475 | ltq_dma_disable_irq(&ch->dma); |
476 | __napi_schedule(n: &ch->napi); |
477 | } |
478 | |
479 | ltq_dma_ack_irq(&ch->dma); |
480 | |
481 | return IRQ_HANDLED; |
482 | } |
483 | |
484 | static int xrx200_dma_init(struct xrx200_priv *priv) |
485 | { |
486 | struct xrx200_chan *ch_rx = &priv->chan_rx; |
487 | struct xrx200_chan *ch_tx = &priv->chan_tx; |
488 | int ret = 0; |
489 | int i; |
490 | |
491 | ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN, |
492 | XRX200_DMA_BURST_LEN); |
493 | |
494 | ch_rx->dma.nr = XRX200_DMA_RX; |
495 | ch_rx->dma.dev = priv->dev; |
496 | ch_rx->priv = priv; |
497 | |
498 | ltq_dma_alloc_rx(&ch_rx->dma); |
499 | for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; |
500 | ch_rx->dma.desc++) { |
501 | ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag); |
502 | if (ret) |
503 | goto rx_free; |
504 | } |
505 | ch_rx->dma.desc = 0; |
506 | ret = devm_request_irq(dev: priv->dev, irq: ch_rx->dma.irq, handler: xrx200_dma_irq, irqflags: 0, |
507 | devname: "xrx200_net_rx" , dev_id: &priv->chan_rx); |
508 | if (ret) { |
509 | dev_err(priv->dev, "failed to request RX irq %d\n" , |
510 | ch_rx->dma.irq); |
511 | goto rx_ring_free; |
512 | } |
513 | |
514 | ch_tx->dma.nr = XRX200_DMA_TX; |
515 | ch_tx->dma.dev = priv->dev; |
516 | ch_tx->priv = priv; |
517 | |
518 | ltq_dma_alloc_tx(&ch_tx->dma); |
519 | ret = devm_request_irq(dev: priv->dev, irq: ch_tx->dma.irq, handler: xrx200_dma_irq, irqflags: 0, |
520 | devname: "xrx200_net_tx" , dev_id: &priv->chan_tx); |
521 | if (ret) { |
522 | dev_err(priv->dev, "failed to request TX irq %d\n" , |
523 | ch_tx->dma.irq); |
524 | goto tx_free; |
525 | } |
526 | |
527 | return ret; |
528 | |
529 | tx_free: |
530 | ltq_dma_free(&ch_tx->dma); |
531 | |
532 | rx_ring_free: |
533 | /* free the allocated RX ring */ |
534 | for (i = 0; i < LTQ_DESC_NUM; i++) { |
535 | if (priv->chan_rx.skb[i]) |
536 | skb_free_frag(priv->chan_rx.rx_buff[i]); |
537 | } |
538 | |
539 | rx_free: |
540 | ltq_dma_free(&ch_rx->dma); |
541 | return ret; |
542 | } |
543 | |
544 | static void xrx200_hw_cleanup(struct xrx200_priv *priv) |
545 | { |
546 | int i; |
547 | |
548 | ltq_dma_free(&priv->chan_tx.dma); |
549 | ltq_dma_free(&priv->chan_rx.dma); |
550 | |
551 | /* free the allocated RX ring */ |
552 | for (i = 0; i < LTQ_DESC_NUM; i++) |
553 | skb_free_frag(priv->chan_rx.rx_buff[i]); |
554 | } |
555 | |
556 | static int xrx200_probe(struct platform_device *pdev) |
557 | { |
558 | struct device *dev = &pdev->dev; |
559 | struct device_node *np = dev->of_node; |
560 | struct xrx200_priv *priv; |
561 | struct net_device *net_dev; |
562 | int err; |
563 | |
564 | /* alloc the network device */ |
565 | net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv)); |
566 | if (!net_dev) |
567 | return -ENOMEM; |
568 | |
569 | priv = netdev_priv(dev: net_dev); |
570 | priv->net_dev = net_dev; |
571 | priv->dev = dev; |
572 | |
573 | net_dev->netdev_ops = &xrx200_netdev_ops; |
574 | SET_NETDEV_DEV(net_dev, dev); |
575 | net_dev->min_mtu = ETH_ZLEN; |
576 | net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(mtu: 0); |
577 | priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN); |
578 | priv->rx_skb_size = xrx200_skb_size(buf_size: priv->rx_buf_size); |
579 | |
580 | /* load the memory ranges */ |
581 | priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, index: 0, NULL); |
582 | if (IS_ERR(ptr: priv->pmac_reg)) |
583 | return PTR_ERR(ptr: priv->pmac_reg); |
584 | |
585 | priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx" ); |
586 | if (priv->chan_rx.dma.irq < 0) |
587 | return -ENOENT; |
588 | priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx" ); |
589 | if (priv->chan_tx.dma.irq < 0) |
590 | return -ENOENT; |
591 | |
592 | /* get the clock */ |
593 | priv->clk = devm_clk_get(dev, NULL); |
594 | if (IS_ERR(ptr: priv->clk)) { |
595 | dev_err(dev, "failed to get clock\n" ); |
596 | return PTR_ERR(ptr: priv->clk); |
597 | } |
598 | |
599 | err = of_get_ethdev_address(np, dev: net_dev); |
600 | if (err) |
601 | eth_hw_addr_random(dev: net_dev); |
602 | |
603 | /* bring up the dma engine and IP core */ |
604 | err = xrx200_dma_init(priv); |
605 | if (err) |
606 | return err; |
607 | |
608 | /* enable clock gate */ |
609 | err = clk_prepare_enable(clk: priv->clk); |
610 | if (err) |
611 | goto err_uninit_dma; |
612 | |
613 | /* set IPG to 12 */ |
614 | xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, set: 0xb, PMAC_RX_IPG); |
615 | |
616 | /* enable status header, enable CRC */ |
617 | xrx200_pmac_mask(priv, clear: 0, |
618 | PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | |
619 | PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC, |
620 | PMAC_HD_CTL); |
621 | |
622 | /* setup NAPI */ |
623 | netif_napi_add(dev: net_dev, napi: &priv->chan_rx.napi, poll: xrx200_poll_rx); |
624 | netif_napi_add_tx(dev: net_dev, napi: &priv->chan_tx.napi, |
625 | poll: xrx200_tx_housekeeping); |
626 | |
627 | platform_set_drvdata(pdev, data: priv); |
628 | |
629 | err = register_netdev(dev: net_dev); |
630 | if (err) |
631 | goto err_unprepare_clk; |
632 | |
633 | return 0; |
634 | |
635 | err_unprepare_clk: |
636 | clk_disable_unprepare(clk: priv->clk); |
637 | |
638 | err_uninit_dma: |
639 | xrx200_hw_cleanup(priv); |
640 | |
641 | return err; |
642 | } |
643 | |
644 | static void xrx200_remove(struct platform_device *pdev) |
645 | { |
646 | struct xrx200_priv *priv = platform_get_drvdata(pdev); |
647 | struct net_device *net_dev = priv->net_dev; |
648 | |
649 | /* free stack related instances */ |
650 | netif_stop_queue(dev: net_dev); |
651 | netif_napi_del(napi: &priv->chan_tx.napi); |
652 | netif_napi_del(napi: &priv->chan_rx.napi); |
653 | |
654 | /* remove the actual device */ |
655 | unregister_netdev(dev: net_dev); |
656 | |
657 | /* release the clock */ |
658 | clk_disable_unprepare(clk: priv->clk); |
659 | |
660 | /* shut down hardware */ |
661 | xrx200_hw_cleanup(priv); |
662 | } |
663 | |
664 | static const struct of_device_id xrx200_match[] = { |
665 | { .compatible = "lantiq,xrx200-net" }, |
666 | {}, |
667 | }; |
668 | MODULE_DEVICE_TABLE(of, xrx200_match); |
669 | |
670 | static struct platform_driver xrx200_driver = { |
671 | .probe = xrx200_probe, |
672 | .remove_new = xrx200_remove, |
673 | .driver = { |
674 | .name = "lantiq,xrx200-net" , |
675 | .of_match_table = xrx200_match, |
676 | }, |
677 | }; |
678 | |
679 | module_platform_driver(xrx200_driver); |
680 | |
681 | MODULE_AUTHOR("John Crispin <john@phrozen.org>" ); |
682 | MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet" ); |
683 | MODULE_LICENSE("GPL" ); |
684 | |