1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. |
3 | * Once again I am out to prove that every ethernet |
4 | * controller out there can be most efficiently programmed |
5 | * if you make it look like a LANCE. |
6 | * |
7 | * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net) |
8 | */ |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/types.h> |
13 | #include <linux/errno.h> |
14 | #include <linux/fcntl.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/ioport.h> |
17 | #include <linux/in.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/string.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/init.h> |
22 | #include <linux/crc32.h> |
23 | #include <linux/netdevice.h> |
24 | #include <linux/etherdevice.h> |
25 | #include <linux/skbuff.h> |
26 | #include <linux/ethtool.h> |
27 | #include <linux/bitops.h> |
28 | #include <linux/dma-mapping.h> |
29 | #include <linux/of.h> |
30 | #include <linux/pgtable.h> |
31 | #include <linux/platform_device.h> |
32 | |
33 | #include <asm/io.h> |
34 | #include <asm/dma.h> |
35 | #include <asm/byteorder.h> |
36 | #include <asm/idprom.h> |
37 | #include <asm/openprom.h> |
38 | #include <asm/oplib.h> |
39 | #include <asm/auxio.h> |
40 | #include <asm/irq.h> |
41 | |
42 | #include "sunqe.h" |
43 | |
44 | #define DRV_NAME "sunqe" |
45 | #define DRV_VERSION "4.1" |
46 | #define DRV_RELDATE "August 27, 2008" |
47 | #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" |
48 | |
49 | static char version[] = |
50 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n" ; |
51 | |
52 | MODULE_VERSION(DRV_VERSION); |
53 | MODULE_AUTHOR(DRV_AUTHOR); |
54 | MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver" ); |
55 | MODULE_LICENSE("GPL" ); |
56 | |
57 | static struct sunqec *root_qec_dev; |
58 | |
59 | static void qe_set_multicast(struct net_device *dev); |
60 | |
61 | #define QEC_RESET_TRIES 200 |
62 | |
63 | static inline int qec_global_reset(void __iomem *gregs) |
64 | { |
65 | int tries = QEC_RESET_TRIES; |
66 | |
67 | sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); |
68 | while (--tries) { |
69 | u32 tmp = sbus_readl(gregs + GLOB_CTRL); |
70 | if (tmp & GLOB_CTRL_RESET) { |
71 | udelay(20); |
72 | continue; |
73 | } |
74 | break; |
75 | } |
76 | if (tries) |
77 | return 0; |
78 | printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n" ); |
79 | return -1; |
80 | } |
81 | |
82 | #define MACE_RESET_RETRIES 200 |
83 | #define QE_RESET_RETRIES 200 |
84 | |
85 | static inline int qe_stop(struct sunqe *qep) |
86 | { |
87 | void __iomem *cregs = qep->qcregs; |
88 | void __iomem *mregs = qep->mregs; |
89 | int tries; |
90 | |
91 | /* Reset the MACE, then the QEC channel. */ |
92 | sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); |
93 | tries = MACE_RESET_RETRIES; |
94 | while (--tries) { |
95 | u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); |
96 | if (tmp & MREGS_BCONFIG_RESET) { |
97 | udelay(20); |
98 | continue; |
99 | } |
100 | break; |
101 | } |
102 | if (!tries) { |
103 | printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n" ); |
104 | return -1; |
105 | } |
106 | |
107 | sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); |
108 | tries = QE_RESET_RETRIES; |
109 | while (--tries) { |
110 | u32 tmp = sbus_readl(cregs + CREG_CTRL); |
111 | if (tmp & CREG_CTRL_RESET) { |
112 | udelay(20); |
113 | continue; |
114 | } |
115 | break; |
116 | } |
117 | if (!tries) { |
118 | printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n" ); |
119 | return -1; |
120 | } |
121 | return 0; |
122 | } |
123 | |
124 | static void qe_init_rings(struct sunqe *qep) |
125 | { |
126 | struct qe_init_block *qb = qep->qe_block; |
127 | struct sunqe_buffers *qbufs = qep->buffers; |
128 | __u32 qbufs_dvma = (__u32)qep->buffers_dvma; |
129 | int i; |
130 | |
131 | qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; |
132 | memset(qb, 0, sizeof(struct qe_init_block)); |
133 | memset(qbufs, 0, sizeof(struct sunqe_buffers)); |
134 | for (i = 0; i < RX_RING_SIZE; i++) { |
135 | qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); |
136 | qb->qe_rxd[i].rx_flags = |
137 | (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); |
138 | } |
139 | } |
140 | |
141 | static int qe_init(struct sunqe *qep, int from_irq) |
142 | { |
143 | struct sunqec *qecp = qep->parent; |
144 | void __iomem *cregs = qep->qcregs; |
145 | void __iomem *mregs = qep->mregs; |
146 | void __iomem *gregs = qecp->gregs; |
147 | const unsigned char *e = &qep->dev->dev_addr[0]; |
148 | __u32 qblk_dvma = (__u32)qep->qblock_dvma; |
149 | u32 tmp; |
150 | int i; |
151 | |
152 | /* Shut it up. */ |
153 | if (qe_stop(qep)) |
154 | return -EAGAIN; |
155 | |
156 | /* Setup initial rx/tx init block pointers. */ |
157 | sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); |
158 | sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); |
159 | |
160 | /* Enable/mask the various irq's. */ |
161 | sbus_writel(0, cregs + CREG_RIMASK); |
162 | sbus_writel(1, cregs + CREG_TIMASK); |
163 | |
164 | sbus_writel(0, cregs + CREG_QMASK); |
165 | sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); |
166 | |
167 | /* Setup the FIFO pointers into QEC local memory. */ |
168 | tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); |
169 | sbus_writel(tmp, cregs + CREG_RXRBUFPTR); |
170 | sbus_writel(tmp, cregs + CREG_RXWBUFPTR); |
171 | |
172 | tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + |
173 | sbus_readl(gregs + GLOB_RSIZE); |
174 | sbus_writel(tmp, cregs + CREG_TXRBUFPTR); |
175 | sbus_writel(tmp, cregs + CREG_TXWBUFPTR); |
176 | |
177 | /* Clear the channel collision counter. */ |
178 | sbus_writel(0, cregs + CREG_CCNT); |
179 | |
180 | /* For 10baseT, inter frame space nor throttle seems to be necessary. */ |
181 | sbus_writel(0, cregs + CREG_PIPG); |
182 | |
183 | /* Now dork with the AMD MACE. */ |
184 | sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); |
185 | sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); |
186 | sbus_writeb(0, mregs + MREGS_RXFCNTL); |
187 | |
188 | /* The QEC dma's the rx'd packets from local memory out to main memory, |
189 | * and therefore it interrupts when the packet reception is "complete". |
190 | * So don't listen for the MACE talking about it. |
191 | */ |
192 | sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); |
193 | sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); |
194 | sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | |
195 | MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), |
196 | mregs + MREGS_FCONFIG); |
197 | |
198 | /* Only usable interface on QuadEther is twisted pair. */ |
199 | sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); |
200 | |
201 | /* Tell MACE we are changing the ether address. */ |
202 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, |
203 | mregs + MREGS_IACONFIG); |
204 | while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) |
205 | barrier(); |
206 | sbus_writeb(e[0], mregs + MREGS_ETHADDR); |
207 | sbus_writeb(e[1], mregs + MREGS_ETHADDR); |
208 | sbus_writeb(e[2], mregs + MREGS_ETHADDR); |
209 | sbus_writeb(e[3], mregs + MREGS_ETHADDR); |
210 | sbus_writeb(e[4], mregs + MREGS_ETHADDR); |
211 | sbus_writeb(e[5], mregs + MREGS_ETHADDR); |
212 | |
213 | /* Clear out the address filter. */ |
214 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, |
215 | mregs + MREGS_IACONFIG); |
216 | while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) |
217 | barrier(); |
218 | for (i = 0; i < 8; i++) |
219 | sbus_writeb(0, mregs + MREGS_FILTER); |
220 | |
221 | /* Address changes are now complete. */ |
222 | sbus_writeb(0, mregs + MREGS_IACONFIG); |
223 | |
224 | qe_init_rings(qep); |
225 | |
226 | /* Wait a little bit for the link to come up... */ |
227 | mdelay(5); |
228 | if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { |
229 | int tries = 50; |
230 | |
231 | while (--tries) { |
232 | u8 tmp; |
233 | |
234 | mdelay(5); |
235 | barrier(); |
236 | tmp = sbus_readb(mregs + MREGS_PHYCONFIG); |
237 | if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) |
238 | break; |
239 | } |
240 | if (tries == 0) |
241 | printk(KERN_NOTICE "%s: Warning, link state is down.\n" , qep->dev->name); |
242 | } |
243 | |
244 | /* Missed packet counter is cleared on a read. */ |
245 | sbus_readb(mregs + MREGS_MPCNT); |
246 | |
247 | /* Reload multicast information, this will enable the receiver |
248 | * and transmitter. |
249 | */ |
250 | qe_set_multicast(dev: qep->dev); |
251 | |
252 | /* QEC should now start to show interrupts. */ |
253 | return 0; |
254 | } |
255 | |
256 | /* Grrr, certain error conditions completely lock up the AMD MACE, |
257 | * so when we get these we _must_ reset the chip. |
258 | */ |
259 | static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) |
260 | { |
261 | struct net_device *dev = qep->dev; |
262 | int mace_hwbug_workaround = 0; |
263 | |
264 | if (qe_status & CREG_STAT_EDEFER) { |
265 | printk(KERN_ERR "%s: Excessive transmit defers.\n" , dev->name); |
266 | dev->stats.tx_errors++; |
267 | } |
268 | |
269 | if (qe_status & CREG_STAT_CLOSS) { |
270 | printk(KERN_ERR "%s: Carrier lost, link down?\n" , dev->name); |
271 | dev->stats.tx_errors++; |
272 | dev->stats.tx_carrier_errors++; |
273 | } |
274 | |
275 | if (qe_status & CREG_STAT_ERETRIES) { |
276 | printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n" , dev->name); |
277 | dev->stats.tx_errors++; |
278 | mace_hwbug_workaround = 1; |
279 | } |
280 | |
281 | if (qe_status & CREG_STAT_LCOLL) { |
282 | printk(KERN_ERR "%s: Late transmit collision.\n" , dev->name); |
283 | dev->stats.tx_errors++; |
284 | dev->stats.collisions++; |
285 | mace_hwbug_workaround = 1; |
286 | } |
287 | |
288 | if (qe_status & CREG_STAT_FUFLOW) { |
289 | printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n" , dev->name); |
290 | dev->stats.tx_errors++; |
291 | mace_hwbug_workaround = 1; |
292 | } |
293 | |
294 | if (qe_status & CREG_STAT_JERROR) { |
295 | printk(KERN_ERR "%s: Jabber error.\n" , dev->name); |
296 | } |
297 | |
298 | if (qe_status & CREG_STAT_BERROR) { |
299 | printk(KERN_ERR "%s: Babble error.\n" , dev->name); |
300 | } |
301 | |
302 | if (qe_status & CREG_STAT_CCOFLOW) { |
303 | dev->stats.tx_errors += 256; |
304 | dev->stats.collisions += 256; |
305 | } |
306 | |
307 | if (qe_status & CREG_STAT_TXDERROR) { |
308 | printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n" , dev->name); |
309 | dev->stats.tx_errors++; |
310 | dev->stats.tx_aborted_errors++; |
311 | mace_hwbug_workaround = 1; |
312 | } |
313 | |
314 | if (qe_status & CREG_STAT_TXLERR) { |
315 | printk(KERN_ERR "%s: Transmit late error.\n" , dev->name); |
316 | dev->stats.tx_errors++; |
317 | mace_hwbug_workaround = 1; |
318 | } |
319 | |
320 | if (qe_status & CREG_STAT_TXPERR) { |
321 | printk(KERN_ERR "%s: Transmit DMA parity error.\n" , dev->name); |
322 | dev->stats.tx_errors++; |
323 | dev->stats.tx_aborted_errors++; |
324 | mace_hwbug_workaround = 1; |
325 | } |
326 | |
327 | if (qe_status & CREG_STAT_TXSERR) { |
328 | printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n" , dev->name); |
329 | dev->stats.tx_errors++; |
330 | dev->stats.tx_aborted_errors++; |
331 | mace_hwbug_workaround = 1; |
332 | } |
333 | |
334 | if (qe_status & CREG_STAT_RCCOFLOW) { |
335 | dev->stats.rx_errors += 256; |
336 | dev->stats.collisions += 256; |
337 | } |
338 | |
339 | if (qe_status & CREG_STAT_RUOFLOW) { |
340 | dev->stats.rx_errors += 256; |
341 | dev->stats.rx_over_errors += 256; |
342 | } |
343 | |
344 | if (qe_status & CREG_STAT_MCOFLOW) { |
345 | dev->stats.rx_errors += 256; |
346 | dev->stats.rx_missed_errors += 256; |
347 | } |
348 | |
349 | if (qe_status & CREG_STAT_RXFOFLOW) { |
350 | printk(KERN_ERR "%s: Receive fifo overflow.\n" , dev->name); |
351 | dev->stats.rx_errors++; |
352 | dev->stats.rx_over_errors++; |
353 | } |
354 | |
355 | if (qe_status & CREG_STAT_RLCOLL) { |
356 | printk(KERN_ERR "%s: Late receive collision.\n" , dev->name); |
357 | dev->stats.rx_errors++; |
358 | dev->stats.collisions++; |
359 | } |
360 | |
361 | if (qe_status & CREG_STAT_FCOFLOW) { |
362 | dev->stats.rx_errors += 256; |
363 | dev->stats.rx_frame_errors += 256; |
364 | } |
365 | |
366 | if (qe_status & CREG_STAT_CECOFLOW) { |
367 | dev->stats.rx_errors += 256; |
368 | dev->stats.rx_crc_errors += 256; |
369 | } |
370 | |
371 | if (qe_status & CREG_STAT_RXDROP) { |
372 | printk(KERN_ERR "%s: Receive packet dropped.\n" , dev->name); |
373 | dev->stats.rx_errors++; |
374 | dev->stats.rx_dropped++; |
375 | dev->stats.rx_missed_errors++; |
376 | } |
377 | |
378 | if (qe_status & CREG_STAT_RXSMALL) { |
379 | printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n" , dev->name); |
380 | dev->stats.rx_errors++; |
381 | dev->stats.rx_length_errors++; |
382 | } |
383 | |
384 | if (qe_status & CREG_STAT_RXLERR) { |
385 | printk(KERN_ERR "%s: Receive late error.\n" , dev->name); |
386 | dev->stats.rx_errors++; |
387 | mace_hwbug_workaround = 1; |
388 | } |
389 | |
390 | if (qe_status & CREG_STAT_RXPERR) { |
391 | printk(KERN_ERR "%s: Receive DMA parity error.\n" , dev->name); |
392 | dev->stats.rx_errors++; |
393 | dev->stats.rx_missed_errors++; |
394 | mace_hwbug_workaround = 1; |
395 | } |
396 | |
397 | if (qe_status & CREG_STAT_RXSERR) { |
398 | printk(KERN_ERR "%s: Receive DMA sbus error ack.\n" , dev->name); |
399 | dev->stats.rx_errors++; |
400 | dev->stats.rx_missed_errors++; |
401 | mace_hwbug_workaround = 1; |
402 | } |
403 | |
404 | if (mace_hwbug_workaround) |
405 | qe_init(qep, from_irq: 1); |
406 | return mace_hwbug_workaround; |
407 | } |
408 | |
409 | /* Per-QE receive interrupt service routine. Just like on the happy meal |
410 | * we receive directly into skb's with a small packet copy water mark. |
411 | */ |
412 | static void qe_rx(struct sunqe *qep) |
413 | { |
414 | struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; |
415 | struct net_device *dev = qep->dev; |
416 | struct qe_rxd *this; |
417 | struct sunqe_buffers *qbufs = qep->buffers; |
418 | __u32 qbufs_dvma = (__u32)qep->buffers_dvma; |
419 | int elem = qep->rx_new; |
420 | u32 flags; |
421 | |
422 | this = &rxbase[elem]; |
423 | while (!((flags = this->rx_flags) & RXD_OWN)) { |
424 | struct sk_buff *skb; |
425 | unsigned char *this_qbuf = |
426 | &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; |
427 | __u32 this_qbuf_dvma = qbufs_dvma + |
428 | qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); |
429 | struct qe_rxd *end_rxd = |
430 | &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; |
431 | int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ |
432 | |
433 | /* Check for errors. */ |
434 | if (len < ETH_ZLEN) { |
435 | dev->stats.rx_errors++; |
436 | dev->stats.rx_length_errors++; |
437 | dev->stats.rx_dropped++; |
438 | } else { |
439 | skb = netdev_alloc_skb(dev, length: len + 2); |
440 | if (skb == NULL) { |
441 | dev->stats.rx_dropped++; |
442 | } else { |
443 | skb_reserve(skb, len: 2); |
444 | skb_put(skb, len); |
445 | skb_copy_to_linear_data(skb, from: this_qbuf, |
446 | len); |
447 | skb->protocol = eth_type_trans(skb, dev: qep->dev); |
448 | netif_rx(skb); |
449 | dev->stats.rx_packets++; |
450 | dev->stats.rx_bytes += len; |
451 | } |
452 | } |
453 | end_rxd->rx_addr = this_qbuf_dvma; |
454 | end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); |
455 | |
456 | elem = NEXT_RX(elem); |
457 | this = &rxbase[elem]; |
458 | } |
459 | qep->rx_new = elem; |
460 | } |
461 | |
462 | static void qe_tx_reclaim(struct sunqe *qep); |
463 | |
464 | /* Interrupts for all QE's get filtered out via the QEC master controller, |
465 | * so we just run through each qe and check to see who is signaling |
466 | * and thus needs to be serviced. |
467 | */ |
468 | static irqreturn_t qec_interrupt(int irq, void *dev_id) |
469 | { |
470 | struct sunqec *qecp = dev_id; |
471 | u32 qec_status; |
472 | int channel = 0; |
473 | |
474 | /* Latch the status now. */ |
475 | qec_status = sbus_readl(qecp->gregs + GLOB_STAT); |
476 | while (channel < 4) { |
477 | if (qec_status & 0xf) { |
478 | struct sunqe *qep = qecp->qes[channel]; |
479 | u32 qe_status; |
480 | |
481 | qe_status = sbus_readl(qep->qcregs + CREG_STAT); |
482 | if (qe_status & CREG_STAT_ERRORS) { |
483 | if (qe_is_bolixed(qep, qe_status)) |
484 | goto next; |
485 | } |
486 | if (qe_status & CREG_STAT_RXIRQ) |
487 | qe_rx(qep); |
488 | if (netif_queue_stopped(dev: qep->dev) && |
489 | (qe_status & CREG_STAT_TXIRQ)) { |
490 | spin_lock(lock: &qep->lock); |
491 | qe_tx_reclaim(qep); |
492 | if (TX_BUFFS_AVAIL(qep) > 0) { |
493 | /* Wake net queue and return to |
494 | * lazy tx reclaim. |
495 | */ |
496 | netif_wake_queue(dev: qep->dev); |
497 | sbus_writel(1, qep->qcregs + CREG_TIMASK); |
498 | } |
499 | spin_unlock(lock: &qep->lock); |
500 | } |
501 | next: |
502 | ; |
503 | } |
504 | qec_status >>= 4; |
505 | channel++; |
506 | } |
507 | |
508 | return IRQ_HANDLED; |
509 | } |
510 | |
511 | static int qe_open(struct net_device *dev) |
512 | { |
513 | struct sunqe *qep = netdev_priv(dev); |
514 | |
515 | qep->mconfig = (MREGS_MCONFIG_TXENAB | |
516 | MREGS_MCONFIG_RXENAB | |
517 | MREGS_MCONFIG_MBAENAB); |
518 | return qe_init(qep, from_irq: 0); |
519 | } |
520 | |
521 | static int qe_close(struct net_device *dev) |
522 | { |
523 | struct sunqe *qep = netdev_priv(dev); |
524 | |
525 | qe_stop(qep); |
526 | return 0; |
527 | } |
528 | |
529 | /* Reclaim TX'd frames from the ring. This must always run under |
530 | * the IRQ protected qep->lock. |
531 | */ |
532 | static void qe_tx_reclaim(struct sunqe *qep) |
533 | { |
534 | struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; |
535 | int elem = qep->tx_old; |
536 | |
537 | while (elem != qep->tx_new) { |
538 | u32 flags = txbase[elem].tx_flags; |
539 | |
540 | if (flags & TXD_OWN) |
541 | break; |
542 | elem = NEXT_TX(elem); |
543 | } |
544 | qep->tx_old = elem; |
545 | } |
546 | |
547 | static void qe_tx_timeout(struct net_device *dev, unsigned int txqueue) |
548 | { |
549 | struct sunqe *qep = netdev_priv(dev); |
550 | int tx_full; |
551 | |
552 | spin_lock_irq(lock: &qep->lock); |
553 | |
554 | /* Try to reclaim, if that frees up some tx |
555 | * entries, we're fine. |
556 | */ |
557 | qe_tx_reclaim(qep); |
558 | tx_full = TX_BUFFS_AVAIL(qep) <= 0; |
559 | |
560 | spin_unlock_irq(lock: &qep->lock); |
561 | |
562 | if (! tx_full) |
563 | goto out; |
564 | |
565 | printk(KERN_ERR "%s: transmit timed out, resetting\n" , dev->name); |
566 | qe_init(qep, from_irq: 1); |
567 | |
568 | out: |
569 | netif_wake_queue(dev); |
570 | } |
571 | |
572 | /* Get a packet queued to go onto the wire. */ |
573 | static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev) |
574 | { |
575 | struct sunqe *qep = netdev_priv(dev); |
576 | struct sunqe_buffers *qbufs = qep->buffers; |
577 | __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma; |
578 | unsigned char *txbuf; |
579 | int len, entry; |
580 | |
581 | spin_lock_irq(lock: &qep->lock); |
582 | |
583 | qe_tx_reclaim(qep); |
584 | |
585 | len = skb->len; |
586 | entry = qep->tx_new; |
587 | |
588 | txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; |
589 | txbuf_dvma = qbufs_dvma + |
590 | qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); |
591 | |
592 | /* Avoid a race... */ |
593 | qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; |
594 | |
595 | skb_copy_from_linear_data(skb, to: txbuf, len); |
596 | |
597 | qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; |
598 | qep->qe_block->qe_txd[entry].tx_flags = |
599 | (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); |
600 | qep->tx_new = NEXT_TX(entry); |
601 | |
602 | /* Get it going. */ |
603 | sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); |
604 | |
605 | dev->stats.tx_packets++; |
606 | dev->stats.tx_bytes += len; |
607 | |
608 | if (TX_BUFFS_AVAIL(qep) <= 0) { |
609 | /* Halt the net queue and enable tx interrupts. |
610 | * When the tx queue empties the tx irq handler |
611 | * will wake up the queue and return us back to |
612 | * the lazy tx reclaim scheme. |
613 | */ |
614 | netif_stop_queue(dev); |
615 | sbus_writel(0, qep->qcregs + CREG_TIMASK); |
616 | } |
617 | spin_unlock_irq(lock: &qep->lock); |
618 | |
619 | dev_kfree_skb(skb); |
620 | |
621 | return NETDEV_TX_OK; |
622 | } |
623 | |
624 | static void qe_set_multicast(struct net_device *dev) |
625 | { |
626 | struct sunqe *qep = netdev_priv(dev); |
627 | struct netdev_hw_addr *ha; |
628 | u8 new_mconfig = qep->mconfig; |
629 | int i; |
630 | u32 crc; |
631 | |
632 | /* Lock out others. */ |
633 | netif_stop_queue(dev); |
634 | |
635 | if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { |
636 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, |
637 | qep->mregs + MREGS_IACONFIG); |
638 | while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) |
639 | barrier(); |
640 | for (i = 0; i < 8; i++) |
641 | sbus_writeb(0xff, qep->mregs + MREGS_FILTER); |
642 | sbus_writeb(0, qep->mregs + MREGS_IACONFIG); |
643 | } else if (dev->flags & IFF_PROMISC) { |
644 | new_mconfig |= MREGS_MCONFIG_PROMISC; |
645 | } else { |
646 | u16 hash_table[4]; |
647 | u8 *hbytes = (unsigned char *) &hash_table[0]; |
648 | |
649 | memset(hash_table, 0, sizeof(hash_table)); |
650 | netdev_for_each_mc_addr(ha, dev) { |
651 | crc = ether_crc_le(6, ha->addr); |
652 | crc >>= 26; |
653 | hash_table[crc >> 4] |= 1 << (crc & 0xf); |
654 | } |
655 | /* Program the qe with the new filter value. */ |
656 | sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, |
657 | qep->mregs + MREGS_IACONFIG); |
658 | while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) |
659 | barrier(); |
660 | for (i = 0; i < 8; i++) { |
661 | u8 tmp = *hbytes++; |
662 | sbus_writeb(tmp, qep->mregs + MREGS_FILTER); |
663 | } |
664 | sbus_writeb(0, qep->mregs + MREGS_IACONFIG); |
665 | } |
666 | |
667 | /* Any change of the logical address filter, the physical address, |
668 | * or enabling/disabling promiscuous mode causes the MACE to disable |
669 | * the receiver. So we must re-enable them here or else the MACE |
670 | * refuses to listen to anything on the network. Sheesh, took |
671 | * me a day or two to find this bug. |
672 | */ |
673 | qep->mconfig = new_mconfig; |
674 | sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); |
675 | |
676 | /* Let us get going again. */ |
677 | netif_wake_queue(dev); |
678 | } |
679 | |
680 | /* Ethtool support... */ |
681 | static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
682 | { |
683 | const struct linux_prom_registers *regs; |
684 | struct sunqe *qep = netdev_priv(dev); |
685 | struct platform_device *op; |
686 | |
687 | strscpy(p: info->driver, q: "sunqe" , size: sizeof(info->driver)); |
688 | strscpy(p: info->version, q: "3.0" , size: sizeof(info->version)); |
689 | |
690 | op = qep->op; |
691 | regs = of_get_property(node: op->dev.of_node, name: "reg" , NULL); |
692 | if (regs) |
693 | snprintf(buf: info->bus_info, size: sizeof(info->bus_info), fmt: "SBUS:%d" , |
694 | regs->which_io); |
695 | |
696 | } |
697 | |
698 | static u32 qe_get_link(struct net_device *dev) |
699 | { |
700 | struct sunqe *qep = netdev_priv(dev); |
701 | void __iomem *mregs = qep->mregs; |
702 | u8 phyconfig; |
703 | |
704 | spin_lock_irq(lock: &qep->lock); |
705 | phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); |
706 | spin_unlock_irq(lock: &qep->lock); |
707 | |
708 | return phyconfig & MREGS_PHYCONFIG_LSTAT; |
709 | } |
710 | |
711 | static const struct ethtool_ops qe_ethtool_ops = { |
712 | .get_drvinfo = qe_get_drvinfo, |
713 | .get_link = qe_get_link, |
714 | }; |
715 | |
716 | /* This is only called once at boot time for each card probed. */ |
717 | static void qec_init_once(struct sunqec *qecp, struct platform_device *op) |
718 | { |
719 | u8 bsizes = qecp->qec_bursts; |
720 | |
721 | if (sbus_can_burst64() && (bsizes & DMA_BURST64)) { |
722 | sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); |
723 | } else if (bsizes & DMA_BURST32) { |
724 | sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); |
725 | } else { |
726 | sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); |
727 | } |
728 | |
729 | /* Packetsize only used in 100baseT BigMAC configurations, |
730 | * set it to zero just to be on the safe side. |
731 | */ |
732 | sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); |
733 | |
734 | /* Set the local memsize register, divided up to one piece per QE channel. */ |
735 | sbus_writel((resource_size(res: &op->resource[1]) >> 2), |
736 | qecp->gregs + GLOB_MSIZE); |
737 | |
738 | /* Divide up the local QEC memory amongst the 4 QE receiver and |
739 | * transmitter FIFOs. Basically it is (total / 2 / num_channels). |
740 | */ |
741 | sbus_writel((resource_size(res: &op->resource[1]) >> 2) >> 1, |
742 | qecp->gregs + GLOB_TSIZE); |
743 | sbus_writel((resource_size(res: &op->resource[1]) >> 2) >> 1, |
744 | qecp->gregs + GLOB_RSIZE); |
745 | } |
746 | |
747 | static u8 qec_get_burst(struct device_node *dp) |
748 | { |
749 | u8 bsizes, bsizes_more; |
750 | |
751 | /* Find and set the burst sizes for the QEC, since it |
752 | * does the actual dma for all 4 channels. |
753 | */ |
754 | bsizes = of_getintprop_default(dp, "burst-sizes" , 0xff); |
755 | bsizes &= 0xff; |
756 | bsizes_more = of_getintprop_default(dp->parent, "burst-sizes" , 0xff); |
757 | |
758 | if (bsizes_more != 0xff) |
759 | bsizes &= bsizes_more; |
760 | if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || |
761 | (bsizes & DMA_BURST32)==0) |
762 | bsizes = (DMA_BURST32 - 1); |
763 | |
764 | return bsizes; |
765 | } |
766 | |
767 | static struct sunqec *get_qec(struct platform_device *child) |
768 | { |
769 | struct platform_device *op = to_platform_device(child->dev.parent); |
770 | struct sunqec *qecp; |
771 | |
772 | qecp = platform_get_drvdata(pdev: op); |
773 | if (!qecp) { |
774 | qecp = kzalloc(size: sizeof(struct sunqec), GFP_KERNEL); |
775 | if (qecp) { |
776 | u32 ctrl; |
777 | |
778 | qecp->op = op; |
779 | qecp->gregs = of_ioremap(&op->resource[0], 0, |
780 | GLOB_REG_SIZE, |
781 | "QEC Global Registers" ); |
782 | if (!qecp->gregs) |
783 | goto fail; |
784 | |
785 | /* Make sure the QEC is in MACE mode. */ |
786 | ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); |
787 | ctrl &= 0xf0000000; |
788 | if (ctrl != GLOB_CTRL_MMODE) { |
789 | printk(KERN_ERR "qec: Not in MACE mode!\n" ); |
790 | goto fail; |
791 | } |
792 | |
793 | if (qec_global_reset(gregs: qecp->gregs)) |
794 | goto fail; |
795 | |
796 | qecp->qec_bursts = qec_get_burst(dp: op->dev.of_node); |
797 | |
798 | qec_init_once(qecp, op); |
799 | |
800 | if (request_irq(irq: op->archdata.irqs[0], handler: qec_interrupt, |
801 | IRQF_SHARED, name: "qec" , dev: (void *) qecp)) { |
802 | printk(KERN_ERR "qec: Can't register irq.\n" ); |
803 | goto fail; |
804 | } |
805 | |
806 | platform_set_drvdata(pdev: op, data: qecp); |
807 | |
808 | qecp->next_module = root_qec_dev; |
809 | root_qec_dev = qecp; |
810 | } |
811 | } |
812 | |
813 | return qecp; |
814 | |
815 | fail: |
816 | if (qecp->gregs) |
817 | of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE); |
818 | kfree(objp: qecp); |
819 | return NULL; |
820 | } |
821 | |
822 | static const struct net_device_ops qec_ops = { |
823 | .ndo_open = qe_open, |
824 | .ndo_stop = qe_close, |
825 | .ndo_start_xmit = qe_start_xmit, |
826 | .ndo_set_rx_mode = qe_set_multicast, |
827 | .ndo_tx_timeout = qe_tx_timeout, |
828 | .ndo_set_mac_address = eth_mac_addr, |
829 | .ndo_validate_addr = eth_validate_addr, |
830 | }; |
831 | |
832 | static int qec_ether_init(struct platform_device *op) |
833 | { |
834 | static unsigned version_printed; |
835 | struct net_device *dev; |
836 | struct sunqec *qecp; |
837 | struct sunqe *qe; |
838 | int i, res; |
839 | |
840 | if (version_printed++ == 0) |
841 | printk(KERN_INFO "%s" , version); |
842 | |
843 | dev = alloc_etherdev(sizeof(struct sunqe)); |
844 | if (!dev) |
845 | return -ENOMEM; |
846 | |
847 | eth_hw_addr_set(dev, addr: idprom->id_ethaddr); |
848 | |
849 | qe = netdev_priv(dev); |
850 | |
851 | res = -ENODEV; |
852 | |
853 | i = of_getintprop_default(op->dev.of_node, "channel#" , -1); |
854 | if (i == -1) |
855 | goto fail; |
856 | qe->channel = i; |
857 | spin_lock_init(&qe->lock); |
858 | |
859 | qecp = get_qec(child: op); |
860 | if (!qecp) |
861 | goto fail; |
862 | |
863 | qecp->qes[qe->channel] = qe; |
864 | qe->dev = dev; |
865 | qe->parent = qecp; |
866 | qe->op = op; |
867 | |
868 | res = -ENOMEM; |
869 | qe->qcregs = of_ioremap(&op->resource[0], 0, |
870 | CREG_REG_SIZE, "QEC Channel Registers" ); |
871 | if (!qe->qcregs) { |
872 | printk(KERN_ERR "qe: Cannot map channel registers.\n" ); |
873 | goto fail; |
874 | } |
875 | |
876 | qe->mregs = of_ioremap(&op->resource[1], 0, |
877 | MREGS_REG_SIZE, "QE MACE Registers" ); |
878 | if (!qe->mregs) { |
879 | printk(KERN_ERR "qe: Cannot map MACE registers.\n" ); |
880 | goto fail; |
881 | } |
882 | |
883 | qe->qe_block = dma_alloc_coherent(dev: &op->dev, PAGE_SIZE, |
884 | dma_handle: &qe->qblock_dvma, GFP_ATOMIC); |
885 | qe->buffers = dma_alloc_coherent(dev: &op->dev, size: sizeof(struct sunqe_buffers), |
886 | dma_handle: &qe->buffers_dvma, GFP_ATOMIC); |
887 | if (qe->qe_block == NULL || qe->qblock_dvma == 0 || |
888 | qe->buffers == NULL || qe->buffers_dvma == 0) |
889 | goto fail; |
890 | |
891 | /* Stop this QE. */ |
892 | qe_stop(qep: qe); |
893 | |
894 | SET_NETDEV_DEV(dev, &op->dev); |
895 | |
896 | dev->watchdog_timeo = 5*HZ; |
897 | dev->irq = op->archdata.irqs[0]; |
898 | dev->dma = 0; |
899 | dev->ethtool_ops = &qe_ethtool_ops; |
900 | dev->netdev_ops = &qec_ops; |
901 | |
902 | res = register_netdev(dev); |
903 | if (res) |
904 | goto fail; |
905 | |
906 | platform_set_drvdata(pdev: op, data: qe); |
907 | |
908 | printk(KERN_INFO "%s: qe channel[%d] %pM\n" , dev->name, qe->channel, |
909 | dev->dev_addr); |
910 | return 0; |
911 | |
912 | fail: |
913 | if (qe->qcregs) |
914 | of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE); |
915 | if (qe->mregs) |
916 | of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE); |
917 | if (qe->qe_block) |
918 | dma_free_coherent(dev: &op->dev, PAGE_SIZE, |
919 | cpu_addr: qe->qe_block, dma_handle: qe->qblock_dvma); |
920 | if (qe->buffers) |
921 | dma_free_coherent(dev: &op->dev, |
922 | size: sizeof(struct sunqe_buffers), |
923 | cpu_addr: qe->buffers, |
924 | dma_handle: qe->buffers_dvma); |
925 | |
926 | free_netdev(dev); |
927 | |
928 | return res; |
929 | } |
930 | |
931 | static int qec_sbus_probe(struct platform_device *op) |
932 | { |
933 | return qec_ether_init(op); |
934 | } |
935 | |
936 | static void qec_sbus_remove(struct platform_device *op) |
937 | { |
938 | struct sunqe *qp = platform_get_drvdata(pdev: op); |
939 | struct net_device *net_dev = qp->dev; |
940 | |
941 | unregister_netdev(dev: net_dev); |
942 | |
943 | of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); |
944 | of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); |
945 | dma_free_coherent(dev: &op->dev, PAGE_SIZE, |
946 | cpu_addr: qp->qe_block, dma_handle: qp->qblock_dvma); |
947 | dma_free_coherent(dev: &op->dev, size: sizeof(struct sunqe_buffers), |
948 | cpu_addr: qp->buffers, dma_handle: qp->buffers_dvma); |
949 | |
950 | free_netdev(dev: net_dev); |
951 | } |
952 | |
953 | static const struct of_device_id qec_sbus_match[] = { |
954 | { |
955 | .name = "qe" , |
956 | }, |
957 | {}, |
958 | }; |
959 | |
960 | MODULE_DEVICE_TABLE(of, qec_sbus_match); |
961 | |
962 | static struct platform_driver qec_sbus_driver = { |
963 | .driver = { |
964 | .name = "qec" , |
965 | .of_match_table = qec_sbus_match, |
966 | }, |
967 | .probe = qec_sbus_probe, |
968 | .remove_new = qec_sbus_remove, |
969 | }; |
970 | |
971 | static int __init qec_init(void) |
972 | { |
973 | return platform_driver_register(&qec_sbus_driver); |
974 | } |
975 | |
976 | static void __exit qec_exit(void) |
977 | { |
978 | platform_driver_unregister(&qec_sbus_driver); |
979 | |
980 | while (root_qec_dev) { |
981 | struct sunqec *next = root_qec_dev->next_module; |
982 | struct platform_device *op = root_qec_dev->op; |
983 | |
984 | free_irq(op->archdata.irqs[0], (void *) root_qec_dev); |
985 | of_iounmap(&op->resource[0], root_qec_dev->gregs, |
986 | GLOB_REG_SIZE); |
987 | kfree(objp: root_qec_dev); |
988 | |
989 | root_qec_dev = next; |
990 | } |
991 | } |
992 | |
993 | module_init(qec_init); |
994 | module_exit(qec_exit); |
995 | |