1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ |
3 | * sungem.c: Sun GEM ethernet driver. |
4 | * |
5 | * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) |
6 | * |
7 | * Support for Apple GMAC and assorted PHYs, WOL, Power Management |
8 | * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) |
9 | * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. |
10 | * |
11 | * NAPI and NETPOLL support |
12 | * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) |
13 | * |
14 | */ |
15 | |
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
17 | |
18 | #include <linux/module.h> |
19 | #include <linux/kernel.h> |
20 | #include <linux/types.h> |
21 | #include <linux/fcntl.h> |
22 | #include <linux/interrupt.h> |
23 | #include <linux/ioport.h> |
24 | #include <linux/in.h> |
25 | #include <linux/sched.h> |
26 | #include <linux/string.h> |
27 | #include <linux/delay.h> |
28 | #include <linux/errno.h> |
29 | #include <linux/pci.h> |
30 | #include <linux/dma-mapping.h> |
31 | #include <linux/netdevice.h> |
32 | #include <linux/etherdevice.h> |
33 | #include <linux/skbuff.h> |
34 | #include <linux/mii.h> |
35 | #include <linux/ethtool.h> |
36 | #include <linux/crc32.h> |
37 | #include <linux/random.h> |
38 | #include <linux/workqueue.h> |
39 | #include <linux/if_vlan.h> |
40 | #include <linux/bitops.h> |
41 | #include <linux/mm.h> |
42 | #include <linux/gfp.h> |
43 | #include <linux/of.h> |
44 | |
45 | #include <asm/io.h> |
46 | #include <asm/byteorder.h> |
47 | #include <linux/uaccess.h> |
48 | #include <asm/irq.h> |
49 | |
50 | #ifdef CONFIG_SPARC |
51 | #include <asm/idprom.h> |
52 | #include <asm/prom.h> |
53 | #endif |
54 | |
55 | #ifdef CONFIG_PPC_PMAC |
56 | #include <asm/machdep.h> |
57 | #include <asm/pmac_feature.h> |
58 | #endif |
59 | |
60 | #include <linux/sungem_phy.h> |
61 | #include "sungem.h" |
62 | |
63 | #define STRIP_FCS |
64 | |
65 | #define DEFAULT_MSG (NETIF_MSG_DRV | \ |
66 | NETIF_MSG_PROBE | \ |
67 | NETIF_MSG_LINK) |
68 | |
69 | #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ |
70 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ |
71 | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ |
72 | SUPPORTED_Pause | SUPPORTED_Autoneg) |
73 | |
74 | #define DRV_NAME "sungem" |
75 | #define DRV_VERSION "1.0" |
76 | #define DRV_AUTHOR "David S. Miller <davem@redhat.com>" |
77 | |
78 | static char version[] = |
79 | DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n" ; |
80 | |
81 | MODULE_AUTHOR(DRV_AUTHOR); |
82 | MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver" ); |
83 | MODULE_LICENSE("GPL" ); |
84 | |
85 | #define GEM_MODULE_NAME "gem" |
86 | |
87 | static const struct pci_device_id gem_pci_tbl[] = { |
88 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, |
89 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
90 | |
91 | /* These models only differ from the original GEM in |
92 | * that their tx/rx fifos are of a different size and |
93 | * they only support 10/100 speeds. -DaveM |
94 | * |
95 | * Apple's GMAC does support gigabit on machines with |
96 | * the BCM54xx PHYs. -BenH |
97 | */ |
98 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, |
99 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
100 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, |
101 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
102 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, |
103 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
104 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, |
105 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
106 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, |
107 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
108 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, |
109 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
110 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, |
111 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
112 | {0, } |
113 | }; |
114 | |
115 | MODULE_DEVICE_TABLE(pci, gem_pci_tbl); |
116 | |
117 | static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg) |
118 | { |
119 | u32 cmd; |
120 | int limit = 10000; |
121 | |
122 | cmd = (1 << 30); |
123 | cmd |= (2 << 28); |
124 | cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; |
125 | cmd |= (reg << 18) & MIF_FRAME_REGAD; |
126 | cmd |= (MIF_FRAME_TAMSB); |
127 | writel(val: cmd, addr: gp->regs + MIF_FRAME); |
128 | |
129 | while (--limit) { |
130 | cmd = readl(addr: gp->regs + MIF_FRAME); |
131 | if (cmd & MIF_FRAME_TALSB) |
132 | break; |
133 | |
134 | udelay(10); |
135 | } |
136 | |
137 | if (!limit) |
138 | cmd = 0xffff; |
139 | |
140 | return cmd & MIF_FRAME_DATA; |
141 | } |
142 | |
143 | static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg) |
144 | { |
145 | struct gem *gp = netdev_priv(dev); |
146 | return __sungem_phy_read(gp, phy_addr: mii_id, reg); |
147 | } |
148 | |
149 | static inline u16 sungem_phy_read(struct gem *gp, int reg) |
150 | { |
151 | return __sungem_phy_read(gp, phy_addr: gp->mii_phy_addr, reg); |
152 | } |
153 | |
154 | static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val) |
155 | { |
156 | u32 cmd; |
157 | int limit = 10000; |
158 | |
159 | cmd = (1 << 30); |
160 | cmd |= (1 << 28); |
161 | cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; |
162 | cmd |= (reg << 18) & MIF_FRAME_REGAD; |
163 | cmd |= (MIF_FRAME_TAMSB); |
164 | cmd |= (val & MIF_FRAME_DATA); |
165 | writel(val: cmd, addr: gp->regs + MIF_FRAME); |
166 | |
167 | while (limit--) { |
168 | cmd = readl(addr: gp->regs + MIF_FRAME); |
169 | if (cmd & MIF_FRAME_TALSB) |
170 | break; |
171 | |
172 | udelay(10); |
173 | } |
174 | } |
175 | |
176 | static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val) |
177 | { |
178 | struct gem *gp = netdev_priv(dev); |
179 | __sungem_phy_write(gp, phy_addr: mii_id, reg, val: val & 0xffff); |
180 | } |
181 | |
182 | static inline void sungem_phy_write(struct gem *gp, int reg, u16 val) |
183 | { |
184 | __sungem_phy_write(gp, phy_addr: gp->mii_phy_addr, reg, val); |
185 | } |
186 | |
187 | static inline void gem_enable_ints(struct gem *gp) |
188 | { |
189 | /* Enable all interrupts but TXDONE */ |
190 | writel(GREG_STAT_TXDONE, addr: gp->regs + GREG_IMASK); |
191 | } |
192 | |
193 | static inline void gem_disable_ints(struct gem *gp) |
194 | { |
195 | /* Disable all interrupts, including TXDONE */ |
196 | writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, addr: gp->regs + GREG_IMASK); |
197 | (void)readl(addr: gp->regs + GREG_IMASK); /* write posting */ |
198 | } |
199 | |
200 | static void gem_get_cell(struct gem *gp) |
201 | { |
202 | BUG_ON(gp->cell_enabled < 0); |
203 | gp->cell_enabled++; |
204 | #ifdef CONFIG_PPC_PMAC |
205 | if (gp->cell_enabled == 1) { |
206 | mb(); |
207 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); |
208 | udelay(10); |
209 | } |
210 | #endif /* CONFIG_PPC_PMAC */ |
211 | } |
212 | |
213 | /* Turn off the chip's clock */ |
214 | static void gem_put_cell(struct gem *gp) |
215 | { |
216 | BUG_ON(gp->cell_enabled <= 0); |
217 | gp->cell_enabled--; |
218 | #ifdef CONFIG_PPC_PMAC |
219 | if (gp->cell_enabled == 0) { |
220 | mb(); |
221 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); |
222 | udelay(10); |
223 | } |
224 | #endif /* CONFIG_PPC_PMAC */ |
225 | } |
226 | |
227 | static inline void gem_netif_stop(struct gem *gp) |
228 | { |
229 | netif_trans_update(dev: gp->dev); /* prevent tx timeout */ |
230 | napi_disable(n: &gp->napi); |
231 | netif_tx_disable(dev: gp->dev); |
232 | } |
233 | |
234 | static inline void gem_netif_start(struct gem *gp) |
235 | { |
236 | /* NOTE: unconditional netif_wake_queue is only |
237 | * appropriate so long as all callers are assured to |
238 | * have free tx slots. |
239 | */ |
240 | netif_wake_queue(dev: gp->dev); |
241 | napi_enable(n: &gp->napi); |
242 | } |
243 | |
244 | static void gem_schedule_reset(struct gem *gp) |
245 | { |
246 | gp->reset_task_pending = 1; |
247 | schedule_work(work: &gp->reset_task); |
248 | } |
249 | |
250 | static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) |
251 | { |
252 | if (netif_msg_intr(gp)) |
253 | printk(KERN_DEBUG "%s: mif interrupt\n" , gp->dev->name); |
254 | } |
255 | |
256 | static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
257 | { |
258 | u32 pcs_istat = readl(addr: gp->regs + PCS_ISTAT); |
259 | u32 pcs_miistat; |
260 | |
261 | if (netif_msg_intr(gp)) |
262 | printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n" , |
263 | gp->dev->name, pcs_istat); |
264 | |
265 | if (!(pcs_istat & PCS_ISTAT_LSC)) { |
266 | netdev_err(dev, format: "PCS irq but no link status change???\n" ); |
267 | return 0; |
268 | } |
269 | |
270 | /* The link status bit latches on zero, so you must |
271 | * read it twice in such a case to see a transition |
272 | * to the link being up. |
273 | */ |
274 | pcs_miistat = readl(addr: gp->regs + PCS_MIISTAT); |
275 | if (!(pcs_miistat & PCS_MIISTAT_LS)) |
276 | pcs_miistat |= |
277 | (readl(addr: gp->regs + PCS_MIISTAT) & |
278 | PCS_MIISTAT_LS); |
279 | |
280 | if (pcs_miistat & PCS_MIISTAT_ANC) { |
281 | /* The remote-fault indication is only valid |
282 | * when autoneg has completed. |
283 | */ |
284 | if (pcs_miistat & PCS_MIISTAT_RF) |
285 | netdev_info(dev, format: "PCS AutoNEG complete, RemoteFault\n" ); |
286 | else |
287 | netdev_info(dev, format: "PCS AutoNEG complete\n" ); |
288 | } |
289 | |
290 | if (pcs_miistat & PCS_MIISTAT_LS) { |
291 | netdev_info(dev, format: "PCS link is now up\n" ); |
292 | netif_carrier_on(dev: gp->dev); |
293 | } else { |
294 | netdev_info(dev, format: "PCS link is now down\n" ); |
295 | netif_carrier_off(dev: gp->dev); |
296 | /* If this happens and the link timer is not running, |
297 | * reset so we re-negotiate. |
298 | */ |
299 | if (!timer_pending(timer: &gp->link_timer)) |
300 | return 1; |
301 | } |
302 | |
303 | return 0; |
304 | } |
305 | |
306 | static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
307 | { |
308 | u32 txmac_stat = readl(addr: gp->regs + MAC_TXSTAT); |
309 | |
310 | if (netif_msg_intr(gp)) |
311 | printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n" , |
312 | gp->dev->name, txmac_stat); |
313 | |
314 | /* Defer timer expiration is quite normal, |
315 | * don't even log the event. |
316 | */ |
317 | if ((txmac_stat & MAC_TXSTAT_DTE) && |
318 | !(txmac_stat & ~MAC_TXSTAT_DTE)) |
319 | return 0; |
320 | |
321 | if (txmac_stat & MAC_TXSTAT_URUN) { |
322 | netdev_err(dev, format: "TX MAC xmit underrun\n" ); |
323 | dev->stats.tx_fifo_errors++; |
324 | } |
325 | |
326 | if (txmac_stat & MAC_TXSTAT_MPE) { |
327 | netdev_err(dev, format: "TX MAC max packet size error\n" ); |
328 | dev->stats.tx_errors++; |
329 | } |
330 | |
331 | /* The rest are all cases of one of the 16-bit TX |
332 | * counters expiring. |
333 | */ |
334 | if (txmac_stat & MAC_TXSTAT_NCE) |
335 | dev->stats.collisions += 0x10000; |
336 | |
337 | if (txmac_stat & MAC_TXSTAT_ECE) { |
338 | dev->stats.tx_aborted_errors += 0x10000; |
339 | dev->stats.collisions += 0x10000; |
340 | } |
341 | |
342 | if (txmac_stat & MAC_TXSTAT_LCE) { |
343 | dev->stats.tx_aborted_errors += 0x10000; |
344 | dev->stats.collisions += 0x10000; |
345 | } |
346 | |
347 | /* We do not keep track of MAC_TXSTAT_FCE and |
348 | * MAC_TXSTAT_PCE events. |
349 | */ |
350 | return 0; |
351 | } |
352 | |
353 | /* When we get a RX fifo overflow, the RX unit in GEM is probably hung |
354 | * so we do the following. |
355 | * |
356 | * If any part of the reset goes wrong, we return 1 and that causes the |
357 | * whole chip to be reset. |
358 | */ |
359 | static int gem_rxmac_reset(struct gem *gp) |
360 | { |
361 | struct net_device *dev = gp->dev; |
362 | int limit, i; |
363 | u64 desc_dma; |
364 | u32 val; |
365 | |
366 | /* First, reset & disable MAC RX. */ |
367 | writel(MAC_RXRST_CMD, addr: gp->regs + MAC_RXRST); |
368 | for (limit = 0; limit < 5000; limit++) { |
369 | if (!(readl(addr: gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) |
370 | break; |
371 | udelay(10); |
372 | } |
373 | if (limit == 5000) { |
374 | netdev_err(dev, format: "RX MAC will not reset, resetting whole chip\n" ); |
375 | return 1; |
376 | } |
377 | |
378 | writel(val: gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, |
379 | addr: gp->regs + MAC_RXCFG); |
380 | for (limit = 0; limit < 5000; limit++) { |
381 | if (!(readl(addr: gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) |
382 | break; |
383 | udelay(10); |
384 | } |
385 | if (limit == 5000) { |
386 | netdev_err(dev, format: "RX MAC will not disable, resetting whole chip\n" ); |
387 | return 1; |
388 | } |
389 | |
390 | /* Second, disable RX DMA. */ |
391 | writel(val: 0, addr: gp->regs + RXDMA_CFG); |
392 | for (limit = 0; limit < 5000; limit++) { |
393 | if (!(readl(addr: gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) |
394 | break; |
395 | udelay(10); |
396 | } |
397 | if (limit == 5000) { |
398 | netdev_err(dev, format: "RX DMA will not disable, resetting whole chip\n" ); |
399 | return 1; |
400 | } |
401 | |
402 | mdelay(5); |
403 | |
404 | /* Execute RX reset command. */ |
405 | writel(val: gp->swrst_base | GREG_SWRST_RXRST, |
406 | addr: gp->regs + GREG_SWRST); |
407 | for (limit = 0; limit < 5000; limit++) { |
408 | if (!(readl(addr: gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) |
409 | break; |
410 | udelay(10); |
411 | } |
412 | if (limit == 5000) { |
413 | netdev_err(dev, format: "RX reset command will not execute, resetting whole chip\n" ); |
414 | return 1; |
415 | } |
416 | |
417 | /* Refresh the RX ring. */ |
418 | for (i = 0; i < RX_RING_SIZE; i++) { |
419 | struct gem_rxd *rxd = &gp->init_block->rxd[i]; |
420 | |
421 | if (gp->rx_skbs[i] == NULL) { |
422 | netdev_err(dev, format: "Parts of RX ring empty, resetting whole chip\n" ); |
423 | return 1; |
424 | } |
425 | |
426 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); |
427 | } |
428 | gp->rx_new = gp->rx_old = 0; |
429 | |
430 | /* Now we must reprogram the rest of RX unit. */ |
431 | desc_dma = (u64) gp->gblock_dvma; |
432 | desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); |
433 | writel(val: desc_dma >> 32, addr: gp->regs + RXDMA_DBHI); |
434 | writel(val: desc_dma & 0xffffffff, addr: gp->regs + RXDMA_DBLOW); |
435 | writel(RX_RING_SIZE - 4, addr: gp->regs + RXDMA_KICK); |
436 | val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | |
437 | (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); |
438 | writel(val, addr: gp->regs + RXDMA_CFG); |
439 | if (readl(addr: gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) |
440 | writel(val: ((5 & RXDMA_BLANK_IPKTS) | |
441 | ((8 << 12) & RXDMA_BLANK_ITIME)), |
442 | addr: gp->regs + RXDMA_BLANK); |
443 | else |
444 | writel(val: ((5 & RXDMA_BLANK_IPKTS) | |
445 | ((4 << 12) & RXDMA_BLANK_ITIME)), |
446 | addr: gp->regs + RXDMA_BLANK); |
447 | val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); |
448 | val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); |
449 | writel(val, addr: gp->regs + RXDMA_PTHRESH); |
450 | val = readl(addr: gp->regs + RXDMA_CFG); |
451 | writel(val: val | RXDMA_CFG_ENABLE, addr: gp->regs + RXDMA_CFG); |
452 | writel(MAC_RXSTAT_RCV, addr: gp->regs + MAC_RXMASK); |
453 | val = readl(addr: gp->regs + MAC_RXCFG); |
454 | writel(val: val | MAC_RXCFG_ENAB, addr: gp->regs + MAC_RXCFG); |
455 | |
456 | return 0; |
457 | } |
458 | |
459 | static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
460 | { |
461 | u32 rxmac_stat = readl(addr: gp->regs + MAC_RXSTAT); |
462 | int ret = 0; |
463 | |
464 | if (netif_msg_intr(gp)) |
465 | printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n" , |
466 | gp->dev->name, rxmac_stat); |
467 | |
468 | if (rxmac_stat & MAC_RXSTAT_OFLW) { |
469 | u32 smac = readl(addr: gp->regs + MAC_SMACHINE); |
470 | |
471 | netdev_err(dev, format: "RX MAC fifo overflow smac[%08x]\n" , smac); |
472 | dev->stats.rx_over_errors++; |
473 | dev->stats.rx_fifo_errors++; |
474 | |
475 | ret = gem_rxmac_reset(gp); |
476 | } |
477 | |
478 | if (rxmac_stat & MAC_RXSTAT_ACE) |
479 | dev->stats.rx_frame_errors += 0x10000; |
480 | |
481 | if (rxmac_stat & MAC_RXSTAT_CCE) |
482 | dev->stats.rx_crc_errors += 0x10000; |
483 | |
484 | if (rxmac_stat & MAC_RXSTAT_LCE) |
485 | dev->stats.rx_length_errors += 0x10000; |
486 | |
487 | /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE |
488 | * events. |
489 | */ |
490 | return ret; |
491 | } |
492 | |
493 | static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
494 | { |
495 | u32 mac_cstat = readl(addr: gp->regs + MAC_CSTAT); |
496 | |
497 | if (netif_msg_intr(gp)) |
498 | printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n" , |
499 | gp->dev->name, mac_cstat); |
500 | |
501 | /* This interrupt is just for pause frame and pause |
502 | * tracking. It is useful for diagnostics and debug |
503 | * but probably by default we will mask these events. |
504 | */ |
505 | if (mac_cstat & MAC_CSTAT_PS) |
506 | gp->pause_entered++; |
507 | |
508 | if (mac_cstat & MAC_CSTAT_PRCV) |
509 | gp->pause_last_time_recvd = (mac_cstat >> 16); |
510 | |
511 | return 0; |
512 | } |
513 | |
514 | static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
515 | { |
516 | u32 mif_status = readl(addr: gp->regs + MIF_STATUS); |
517 | u32 reg_val, changed_bits; |
518 | |
519 | reg_val = (mif_status & MIF_STATUS_DATA) >> 16; |
520 | changed_bits = (mif_status & MIF_STATUS_STAT); |
521 | |
522 | gem_handle_mif_event(gp, reg_val, changed_bits); |
523 | |
524 | return 0; |
525 | } |
526 | |
527 | static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
528 | { |
529 | u32 pci_estat = readl(addr: gp->regs + GREG_PCIESTAT); |
530 | |
531 | if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && |
532 | gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { |
533 | netdev_err(dev, format: "PCI error [%04x]" , pci_estat); |
534 | |
535 | if (pci_estat & GREG_PCIESTAT_BADACK) |
536 | pr_cont(" <No ACK64# during ABS64 cycle>" ); |
537 | if (pci_estat & GREG_PCIESTAT_DTRTO) |
538 | pr_cont(" <Delayed transaction timeout>" ); |
539 | if (pci_estat & GREG_PCIESTAT_OTHER) |
540 | pr_cont(" <other>" ); |
541 | pr_cont("\n" ); |
542 | } else { |
543 | pci_estat |= GREG_PCIESTAT_OTHER; |
544 | netdev_err(dev, format: "PCI error\n" ); |
545 | } |
546 | |
547 | if (pci_estat & GREG_PCIESTAT_OTHER) { |
548 | int pci_errs; |
549 | |
550 | /* Interrogate PCI config space for the |
551 | * true cause. |
552 | */ |
553 | pci_errs = pci_status_get_and_clear_errors(pdev: gp->pdev); |
554 | netdev_err(dev, format: "PCI status errors[%04x]\n" , pci_errs); |
555 | if (pci_errs & PCI_STATUS_PARITY) |
556 | netdev_err(dev, format: "PCI parity error detected\n" ); |
557 | if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT) |
558 | netdev_err(dev, format: "PCI target abort\n" ); |
559 | if (pci_errs & PCI_STATUS_REC_TARGET_ABORT) |
560 | netdev_err(dev, format: "PCI master acks target abort\n" ); |
561 | if (pci_errs & PCI_STATUS_REC_MASTER_ABORT) |
562 | netdev_err(dev, format: "PCI master abort\n" ); |
563 | if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR) |
564 | netdev_err(dev, format: "PCI system error SERR#\n" ); |
565 | if (pci_errs & PCI_STATUS_DETECTED_PARITY) |
566 | netdev_err(dev, format: "PCI parity error\n" ); |
567 | } |
568 | |
569 | /* For all PCI errors, we should reset the chip. */ |
570 | return 1; |
571 | } |
572 | |
573 | /* All non-normal interrupt conditions get serviced here. |
574 | * Returns non-zero if we should just exit the interrupt |
575 | * handler right now (ie. if we reset the card which invalidates |
576 | * all of the other original irq status bits). |
577 | */ |
578 | static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) |
579 | { |
580 | if (gem_status & GREG_STAT_RXNOBUF) { |
581 | /* Frame arrived, no free RX buffers available. */ |
582 | if (netif_msg_rx_err(gp)) |
583 | printk(KERN_DEBUG "%s: no buffer for rx frame\n" , |
584 | gp->dev->name); |
585 | dev->stats.rx_dropped++; |
586 | } |
587 | |
588 | if (gem_status & GREG_STAT_RXTAGERR) { |
589 | /* corrupt RX tag framing */ |
590 | if (netif_msg_rx_err(gp)) |
591 | printk(KERN_DEBUG "%s: corrupt rx tag framing\n" , |
592 | gp->dev->name); |
593 | dev->stats.rx_errors++; |
594 | |
595 | return 1; |
596 | } |
597 | |
598 | if (gem_status & GREG_STAT_PCS) { |
599 | if (gem_pcs_interrupt(dev, gp, gem_status)) |
600 | return 1; |
601 | } |
602 | |
603 | if (gem_status & GREG_STAT_TXMAC) { |
604 | if (gem_txmac_interrupt(dev, gp, gem_status)) |
605 | return 1; |
606 | } |
607 | |
608 | if (gem_status & GREG_STAT_RXMAC) { |
609 | if (gem_rxmac_interrupt(dev, gp, gem_status)) |
610 | return 1; |
611 | } |
612 | |
613 | if (gem_status & GREG_STAT_MAC) { |
614 | if (gem_mac_interrupt(dev, gp, gem_status)) |
615 | return 1; |
616 | } |
617 | |
618 | if (gem_status & GREG_STAT_MIF) { |
619 | if (gem_mif_interrupt(dev, gp, gem_status)) |
620 | return 1; |
621 | } |
622 | |
623 | if (gem_status & GREG_STAT_PCIERR) { |
624 | if (gem_pci_interrupt(dev, gp, gem_status)) |
625 | return 1; |
626 | } |
627 | |
628 | return 0; |
629 | } |
630 | |
631 | static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) |
632 | { |
633 | int entry, limit; |
634 | |
635 | entry = gp->tx_old; |
636 | limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); |
637 | while (entry != limit) { |
638 | struct sk_buff *skb; |
639 | struct gem_txd *txd; |
640 | dma_addr_t dma_addr; |
641 | u32 dma_len; |
642 | int frag; |
643 | |
644 | if (netif_msg_tx_done(gp)) |
645 | printk(KERN_DEBUG "%s: tx done, slot %d\n" , |
646 | gp->dev->name, entry); |
647 | skb = gp->tx_skbs[entry]; |
648 | if (skb_shinfo(skb)->nr_frags) { |
649 | int last = entry + skb_shinfo(skb)->nr_frags; |
650 | int walk = entry; |
651 | int incomplete = 0; |
652 | |
653 | last &= (TX_RING_SIZE - 1); |
654 | for (;;) { |
655 | walk = NEXT_TX(walk); |
656 | if (walk == limit) |
657 | incomplete = 1; |
658 | if (walk == last) |
659 | break; |
660 | } |
661 | if (incomplete) |
662 | break; |
663 | } |
664 | gp->tx_skbs[entry] = NULL; |
665 | dev->stats.tx_bytes += skb->len; |
666 | |
667 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { |
668 | txd = &gp->init_block->txd[entry]; |
669 | |
670 | dma_addr = le64_to_cpu(txd->buffer); |
671 | dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; |
672 | |
673 | dma_unmap_page(&gp->pdev->dev, dma_addr, dma_len, |
674 | DMA_TO_DEVICE); |
675 | entry = NEXT_TX(entry); |
676 | } |
677 | |
678 | dev->stats.tx_packets++; |
679 | dev_consume_skb_any(skb); |
680 | } |
681 | gp->tx_old = entry; |
682 | |
683 | /* Need to make the tx_old update visible to gem_start_xmit() |
684 | * before checking for netif_queue_stopped(). Without the |
685 | * memory barrier, there is a small possibility that gem_start_xmit() |
686 | * will miss it and cause the queue to be stopped forever. |
687 | */ |
688 | smp_mb(); |
689 | |
690 | if (unlikely(netif_queue_stopped(dev) && |
691 | TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) { |
692 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: 0); |
693 | |
694 | __netif_tx_lock(txq, smp_processor_id()); |
695 | if (netif_queue_stopped(dev) && |
696 | TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) |
697 | netif_wake_queue(dev); |
698 | __netif_tx_unlock(txq); |
699 | } |
700 | } |
701 | |
702 | static __inline__ void gem_post_rxds(struct gem *gp, int limit) |
703 | { |
704 | int cluster_start, curr, count, kick; |
705 | |
706 | cluster_start = curr = (gp->rx_new & ~(4 - 1)); |
707 | count = 0; |
708 | kick = -1; |
709 | dma_wmb(); |
710 | while (curr != limit) { |
711 | curr = NEXT_RX(curr); |
712 | if (++count == 4) { |
713 | struct gem_rxd *rxd = |
714 | &gp->init_block->rxd[cluster_start]; |
715 | for (;;) { |
716 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); |
717 | rxd++; |
718 | cluster_start = NEXT_RX(cluster_start); |
719 | if (cluster_start == curr) |
720 | break; |
721 | } |
722 | kick = curr; |
723 | count = 0; |
724 | } |
725 | } |
726 | if (kick >= 0) { |
727 | mb(); |
728 | writel(val: kick, addr: gp->regs + RXDMA_KICK); |
729 | } |
730 | } |
731 | |
732 | #define ALIGNED_RX_SKB_ADDR(addr) \ |
733 | ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) |
734 | static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size, |
735 | gfp_t gfp_flags) |
736 | { |
737 | struct sk_buff *skb = alloc_skb(size: size + 64, priority: gfp_flags); |
738 | |
739 | if (likely(skb)) { |
740 | unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); |
741 | skb_reserve(skb, len: offset); |
742 | } |
743 | return skb; |
744 | } |
745 | |
746 | static int gem_rx(struct gem *gp, int work_to_do) |
747 | { |
748 | struct net_device *dev = gp->dev; |
749 | int entry, drops, work_done = 0; |
750 | u32 done; |
751 | |
752 | if (netif_msg_rx_status(gp)) |
753 | printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n" , |
754 | gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); |
755 | |
756 | entry = gp->rx_new; |
757 | drops = 0; |
758 | done = readl(addr: gp->regs + RXDMA_DONE); |
759 | for (;;) { |
760 | struct gem_rxd *rxd = &gp->init_block->rxd[entry]; |
761 | struct sk_buff *skb; |
762 | u64 status = le64_to_cpu(rxd->status_word); |
763 | dma_addr_t dma_addr; |
764 | int len; |
765 | |
766 | if ((status & RXDCTRL_OWN) != 0) |
767 | break; |
768 | |
769 | if (work_done >= RX_RING_SIZE || work_done >= work_to_do) |
770 | break; |
771 | |
772 | /* When writing back RX descriptor, GEM writes status |
773 | * then buffer address, possibly in separate transactions. |
774 | * If we don't wait for the chip to write both, we could |
775 | * post a new buffer to this descriptor then have GEM spam |
776 | * on the buffer address. We sync on the RX completion |
777 | * register to prevent this from happening. |
778 | */ |
779 | if (entry == done) { |
780 | done = readl(addr: gp->regs + RXDMA_DONE); |
781 | if (entry == done) |
782 | break; |
783 | } |
784 | |
785 | /* We can now account for the work we're about to do */ |
786 | work_done++; |
787 | |
788 | skb = gp->rx_skbs[entry]; |
789 | |
790 | len = (status & RXDCTRL_BUFSZ) >> 16; |
791 | if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { |
792 | dev->stats.rx_errors++; |
793 | if (len < ETH_ZLEN) |
794 | dev->stats.rx_length_errors++; |
795 | if (len & RXDCTRL_BAD) |
796 | dev->stats.rx_crc_errors++; |
797 | |
798 | /* We'll just return it to GEM. */ |
799 | drop_it: |
800 | dev->stats.rx_dropped++; |
801 | goto next; |
802 | } |
803 | |
804 | dma_addr = le64_to_cpu(rxd->buffer); |
805 | if (len > RX_COPY_THRESHOLD) { |
806 | struct sk_buff *new_skb; |
807 | |
808 | new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); |
809 | if (new_skb == NULL) { |
810 | drops++; |
811 | goto drop_it; |
812 | } |
813 | dma_unmap_page(&gp->pdev->dev, dma_addr, |
814 | RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE); |
815 | gp->rx_skbs[entry] = new_skb; |
816 | skb_put(skb: new_skb, len: (gp->rx_buf_sz + RX_OFFSET)); |
817 | rxd->buffer = cpu_to_le64(dma_map_page(&gp->pdev->dev, |
818 | virt_to_page(new_skb->data), |
819 | offset_in_page(new_skb->data), |
820 | RX_BUF_ALLOC_SIZE(gp), |
821 | DMA_FROM_DEVICE)); |
822 | skb_reserve(skb: new_skb, RX_OFFSET); |
823 | |
824 | /* Trim the original skb for the netif. */ |
825 | skb_trim(skb, len); |
826 | } else { |
827 | struct sk_buff *copy_skb = netdev_alloc_skb(dev, length: len + 2); |
828 | |
829 | if (copy_skb == NULL) { |
830 | drops++; |
831 | goto drop_it; |
832 | } |
833 | |
834 | skb_reserve(skb: copy_skb, len: 2); |
835 | skb_put(skb: copy_skb, len); |
836 | dma_sync_single_for_cpu(dev: &gp->pdev->dev, addr: dma_addr, size: len, |
837 | dir: DMA_FROM_DEVICE); |
838 | skb_copy_from_linear_data(skb, to: copy_skb->data, len); |
839 | dma_sync_single_for_device(dev: &gp->pdev->dev, addr: dma_addr, |
840 | size: len, dir: DMA_FROM_DEVICE); |
841 | |
842 | /* We'll reuse the original ring buffer. */ |
843 | skb = copy_skb; |
844 | } |
845 | |
846 | if (likely(dev->features & NETIF_F_RXCSUM)) { |
847 | __sum16 csum; |
848 | |
849 | csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); |
850 | skb->csum = csum_unfold(n: csum); |
851 | skb->ip_summed = CHECKSUM_COMPLETE; |
852 | } |
853 | skb->protocol = eth_type_trans(skb, dev: gp->dev); |
854 | |
855 | napi_gro_receive(napi: &gp->napi, skb); |
856 | |
857 | dev->stats.rx_packets++; |
858 | dev->stats.rx_bytes += len; |
859 | |
860 | next: |
861 | entry = NEXT_RX(entry); |
862 | } |
863 | |
864 | gem_post_rxds(gp, limit: entry); |
865 | |
866 | gp->rx_new = entry; |
867 | |
868 | if (drops) |
869 | netdev_info(dev: gp->dev, format: "Memory squeeze, deferring packet\n" ); |
870 | |
871 | return work_done; |
872 | } |
873 | |
874 | static int gem_poll(struct napi_struct *napi, int budget) |
875 | { |
876 | struct gem *gp = container_of(napi, struct gem, napi); |
877 | struct net_device *dev = gp->dev; |
878 | int work_done; |
879 | |
880 | work_done = 0; |
881 | do { |
882 | /* Handle anomalies */ |
883 | if (unlikely(gp->status & GREG_STAT_ABNORMAL)) { |
884 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: 0); |
885 | int reset; |
886 | |
887 | /* We run the abnormal interrupt handling code with |
888 | * the Tx lock. It only resets the Rx portion of the |
889 | * chip, but we need to guard it against DMA being |
890 | * restarted by the link poll timer |
891 | */ |
892 | __netif_tx_lock(txq, smp_processor_id()); |
893 | reset = gem_abnormal_irq(dev, gp, gem_status: gp->status); |
894 | __netif_tx_unlock(txq); |
895 | if (reset) { |
896 | gem_schedule_reset(gp); |
897 | napi_complete(n: napi); |
898 | return work_done; |
899 | } |
900 | } |
901 | |
902 | /* Run TX completion thread */ |
903 | gem_tx(dev, gp, gem_status: gp->status); |
904 | |
905 | /* Run RX thread. We don't use any locking here, |
906 | * code willing to do bad things - like cleaning the |
907 | * rx ring - must call napi_disable(), which |
908 | * schedule_timeout()'s if polling is already disabled. |
909 | */ |
910 | work_done += gem_rx(gp, work_to_do: budget - work_done); |
911 | |
912 | if (work_done >= budget) |
913 | return work_done; |
914 | |
915 | gp->status = readl(addr: gp->regs + GREG_STAT); |
916 | } while (gp->status & GREG_STAT_NAPI); |
917 | |
918 | napi_complete_done(n: napi, work_done); |
919 | gem_enable_ints(gp); |
920 | |
921 | return work_done; |
922 | } |
923 | |
924 | static irqreturn_t gem_interrupt(int irq, void *dev_id) |
925 | { |
926 | struct net_device *dev = dev_id; |
927 | struct gem *gp = netdev_priv(dev); |
928 | |
929 | if (napi_schedule_prep(n: &gp->napi)) { |
930 | u32 gem_status = readl(addr: gp->regs + GREG_STAT); |
931 | |
932 | if (unlikely(gem_status == 0)) { |
933 | napi_enable(n: &gp->napi); |
934 | return IRQ_NONE; |
935 | } |
936 | if (netif_msg_intr(gp)) |
937 | printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n" , |
938 | gp->dev->name, gem_status); |
939 | |
940 | gp->status = gem_status; |
941 | gem_disable_ints(gp); |
942 | __napi_schedule(n: &gp->napi); |
943 | } |
944 | |
945 | /* If polling was disabled at the time we received that |
946 | * interrupt, we may return IRQ_HANDLED here while we |
947 | * should return IRQ_NONE. No big deal... |
948 | */ |
949 | return IRQ_HANDLED; |
950 | } |
951 | |
952 | #ifdef CONFIG_NET_POLL_CONTROLLER |
953 | static void gem_poll_controller(struct net_device *dev) |
954 | { |
955 | struct gem *gp = netdev_priv(dev); |
956 | |
957 | disable_irq(irq: gp->pdev->irq); |
958 | gem_interrupt(irq: gp->pdev->irq, dev_id: dev); |
959 | enable_irq(irq: gp->pdev->irq); |
960 | } |
961 | #endif |
962 | |
963 | static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue) |
964 | { |
965 | struct gem *gp = netdev_priv(dev); |
966 | |
967 | netdev_err(dev, format: "transmit timed out, resetting\n" ); |
968 | |
969 | netdev_err(dev, format: "TX_STATE[%08x:%08x:%08x]\n" , |
970 | readl(addr: gp->regs + TXDMA_CFG), |
971 | readl(addr: gp->regs + MAC_TXSTAT), |
972 | readl(addr: gp->regs + MAC_TXCFG)); |
973 | netdev_err(dev, format: "RX_STATE[%08x:%08x:%08x]\n" , |
974 | readl(addr: gp->regs + RXDMA_CFG), |
975 | readl(addr: gp->regs + MAC_RXSTAT), |
976 | readl(addr: gp->regs + MAC_RXCFG)); |
977 | |
978 | gem_schedule_reset(gp); |
979 | } |
980 | |
981 | static __inline__ int gem_intme(int entry) |
982 | { |
983 | /* Algorithm: IRQ every 1/2 of descriptors. */ |
984 | if (!(entry & ((TX_RING_SIZE>>1)-1))) |
985 | return 1; |
986 | |
987 | return 0; |
988 | } |
989 | |
990 | static netdev_tx_t gem_start_xmit(struct sk_buff *skb, |
991 | struct net_device *dev) |
992 | { |
993 | struct gem *gp = netdev_priv(dev); |
994 | int entry; |
995 | u64 ctrl; |
996 | |
997 | ctrl = 0; |
998 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
999 | const u64 csum_start_off = skb_checksum_start_offset(skb); |
1000 | const u64 csum_stuff_off = csum_start_off + skb->csum_offset; |
1001 | |
1002 | ctrl = (TXDCTRL_CENAB | |
1003 | (csum_start_off << 15) | |
1004 | (csum_stuff_off << 21)); |
1005 | } |
1006 | |
1007 | if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
1008 | /* This is a hard error, log it. */ |
1009 | if (!netif_queue_stopped(dev)) { |
1010 | netif_stop_queue(dev); |
1011 | netdev_err(dev, format: "BUG! Tx Ring full when queue awake!\n" ); |
1012 | } |
1013 | return NETDEV_TX_BUSY; |
1014 | } |
1015 | |
1016 | entry = gp->tx_new; |
1017 | gp->tx_skbs[entry] = skb; |
1018 | |
1019 | if (skb_shinfo(skb)->nr_frags == 0) { |
1020 | struct gem_txd *txd = &gp->init_block->txd[entry]; |
1021 | dma_addr_t mapping; |
1022 | u32 len; |
1023 | |
1024 | len = skb->len; |
1025 | mapping = dma_map_page(&gp->pdev->dev, |
1026 | virt_to_page(skb->data), |
1027 | offset_in_page(skb->data), |
1028 | len, DMA_TO_DEVICE); |
1029 | ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; |
1030 | if (gem_intme(entry)) |
1031 | ctrl |= TXDCTRL_INTME; |
1032 | txd->buffer = cpu_to_le64(mapping); |
1033 | dma_wmb(); |
1034 | txd->control_word = cpu_to_le64(ctrl); |
1035 | entry = NEXT_TX(entry); |
1036 | } else { |
1037 | struct gem_txd *txd; |
1038 | u32 first_len; |
1039 | u64 intme; |
1040 | dma_addr_t first_mapping; |
1041 | int frag, first_entry = entry; |
1042 | |
1043 | intme = 0; |
1044 | if (gem_intme(entry)) |
1045 | intme |= TXDCTRL_INTME; |
1046 | |
1047 | /* We must give this initial chunk to the device last. |
1048 | * Otherwise we could race with the device. |
1049 | */ |
1050 | first_len = skb_headlen(skb); |
1051 | first_mapping = dma_map_page(&gp->pdev->dev, |
1052 | virt_to_page(skb->data), |
1053 | offset_in_page(skb->data), |
1054 | first_len, DMA_TO_DEVICE); |
1055 | entry = NEXT_TX(entry); |
1056 | |
1057 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
1058 | const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; |
1059 | u32 len; |
1060 | dma_addr_t mapping; |
1061 | u64 this_ctrl; |
1062 | |
1063 | len = skb_frag_size(frag: this_frag); |
1064 | mapping = skb_frag_dma_map(dev: &gp->pdev->dev, frag: this_frag, |
1065 | offset: 0, size: len, dir: DMA_TO_DEVICE); |
1066 | this_ctrl = ctrl; |
1067 | if (frag == skb_shinfo(skb)->nr_frags - 1) |
1068 | this_ctrl |= TXDCTRL_EOF; |
1069 | |
1070 | txd = &gp->init_block->txd[entry]; |
1071 | txd->buffer = cpu_to_le64(mapping); |
1072 | dma_wmb(); |
1073 | txd->control_word = cpu_to_le64(this_ctrl | len); |
1074 | |
1075 | if (gem_intme(entry)) |
1076 | intme |= TXDCTRL_INTME; |
1077 | |
1078 | entry = NEXT_TX(entry); |
1079 | } |
1080 | txd = &gp->init_block->txd[first_entry]; |
1081 | txd->buffer = cpu_to_le64(first_mapping); |
1082 | dma_wmb(); |
1083 | txd->control_word = |
1084 | cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); |
1085 | } |
1086 | |
1087 | gp->tx_new = entry; |
1088 | if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) { |
1089 | netif_stop_queue(dev); |
1090 | |
1091 | /* netif_stop_queue() must be done before checking |
1092 | * tx index in TX_BUFFS_AVAIL() below, because |
1093 | * in gem_tx(), we update tx_old before checking for |
1094 | * netif_queue_stopped(). |
1095 | */ |
1096 | smp_mb(); |
1097 | if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) |
1098 | netif_wake_queue(dev); |
1099 | } |
1100 | if (netif_msg_tx_queued(gp)) |
1101 | printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n" , |
1102 | dev->name, entry, skb->len); |
1103 | mb(); |
1104 | writel(val: gp->tx_new, addr: gp->regs + TXDMA_KICK); |
1105 | |
1106 | return NETDEV_TX_OK; |
1107 | } |
1108 | |
1109 | static void gem_pcs_reset(struct gem *gp) |
1110 | { |
1111 | int limit; |
1112 | u32 val; |
1113 | |
1114 | /* Reset PCS unit. */ |
1115 | val = readl(addr: gp->regs + PCS_MIICTRL); |
1116 | val |= PCS_MIICTRL_RST; |
1117 | writel(val, addr: gp->regs + PCS_MIICTRL); |
1118 | |
1119 | limit = 32; |
1120 | while (readl(addr: gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { |
1121 | udelay(100); |
1122 | if (limit-- <= 0) |
1123 | break; |
1124 | } |
1125 | if (limit < 0) |
1126 | netdev_warn(dev: gp->dev, format: "PCS reset bit would not clear\n" ); |
1127 | } |
1128 | |
1129 | static void gem_pcs_reinit_adv(struct gem *gp) |
1130 | { |
1131 | u32 val; |
1132 | |
1133 | /* Make sure PCS is disabled while changing advertisement |
1134 | * configuration. |
1135 | */ |
1136 | val = readl(addr: gp->regs + PCS_CFG); |
1137 | val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); |
1138 | writel(val, addr: gp->regs + PCS_CFG); |
1139 | |
1140 | /* Advertise all capabilities except asymmetric |
1141 | * pause. |
1142 | */ |
1143 | val = readl(addr: gp->regs + PCS_MIIADV); |
1144 | val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | |
1145 | PCS_MIIADV_SP | PCS_MIIADV_AP); |
1146 | writel(val, addr: gp->regs + PCS_MIIADV); |
1147 | |
1148 | /* Enable and restart auto-negotiation, disable wrapback/loopback, |
1149 | * and re-enable PCS. |
1150 | */ |
1151 | val = readl(addr: gp->regs + PCS_MIICTRL); |
1152 | val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); |
1153 | val &= ~PCS_MIICTRL_WB; |
1154 | writel(val, addr: gp->regs + PCS_MIICTRL); |
1155 | |
1156 | val = readl(addr: gp->regs + PCS_CFG); |
1157 | val |= PCS_CFG_ENABLE; |
1158 | writel(val, addr: gp->regs + PCS_CFG); |
1159 | |
1160 | /* Make sure serialink loopback is off. The meaning |
1161 | * of this bit is logically inverted based upon whether |
1162 | * you are in Serialink or SERDES mode. |
1163 | */ |
1164 | val = readl(addr: gp->regs + PCS_SCTRL); |
1165 | if (gp->phy_type == phy_serialink) |
1166 | val &= ~PCS_SCTRL_LOOP; |
1167 | else |
1168 | val |= PCS_SCTRL_LOOP; |
1169 | writel(val, addr: gp->regs + PCS_SCTRL); |
1170 | } |
1171 | |
1172 | #define STOP_TRIES 32 |
1173 | |
1174 | static void gem_reset(struct gem *gp) |
1175 | { |
1176 | int limit; |
1177 | u32 val; |
1178 | |
1179 | /* Make sure we won't get any more interrupts */ |
1180 | writel(val: 0xffffffff, addr: gp->regs + GREG_IMASK); |
1181 | |
1182 | /* Reset the chip */ |
1183 | writel(val: gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, |
1184 | addr: gp->regs + GREG_SWRST); |
1185 | |
1186 | limit = STOP_TRIES; |
1187 | |
1188 | do { |
1189 | udelay(20); |
1190 | val = readl(addr: gp->regs + GREG_SWRST); |
1191 | if (limit-- <= 0) |
1192 | break; |
1193 | } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); |
1194 | |
1195 | if (limit < 0) |
1196 | netdev_err(dev: gp->dev, format: "SW reset is ghetto\n" ); |
1197 | |
1198 | if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) |
1199 | gem_pcs_reinit_adv(gp); |
1200 | } |
1201 | |
1202 | static void gem_start_dma(struct gem *gp) |
1203 | { |
1204 | u32 val; |
1205 | |
1206 | /* We are ready to rock, turn everything on. */ |
1207 | val = readl(addr: gp->regs + TXDMA_CFG); |
1208 | writel(val: val | TXDMA_CFG_ENABLE, addr: gp->regs + TXDMA_CFG); |
1209 | val = readl(addr: gp->regs + RXDMA_CFG); |
1210 | writel(val: val | RXDMA_CFG_ENABLE, addr: gp->regs + RXDMA_CFG); |
1211 | val = readl(addr: gp->regs + MAC_TXCFG); |
1212 | writel(val: val | MAC_TXCFG_ENAB, addr: gp->regs + MAC_TXCFG); |
1213 | val = readl(addr: gp->regs + MAC_RXCFG); |
1214 | writel(val: val | MAC_RXCFG_ENAB, addr: gp->regs + MAC_RXCFG); |
1215 | |
1216 | (void) readl(addr: gp->regs + MAC_RXCFG); |
1217 | udelay(100); |
1218 | |
1219 | gem_enable_ints(gp); |
1220 | |
1221 | writel(RX_RING_SIZE - 4, addr: gp->regs + RXDMA_KICK); |
1222 | } |
1223 | |
1224 | /* DMA won't be actually stopped before about 4ms tho ... |
1225 | */ |
1226 | static void gem_stop_dma(struct gem *gp) |
1227 | { |
1228 | u32 val; |
1229 | |
1230 | /* We are done rocking, turn everything off. */ |
1231 | val = readl(addr: gp->regs + TXDMA_CFG); |
1232 | writel(val: val & ~TXDMA_CFG_ENABLE, addr: gp->regs + TXDMA_CFG); |
1233 | val = readl(addr: gp->regs + RXDMA_CFG); |
1234 | writel(val: val & ~RXDMA_CFG_ENABLE, addr: gp->regs + RXDMA_CFG); |
1235 | val = readl(addr: gp->regs + MAC_TXCFG); |
1236 | writel(val: val & ~MAC_TXCFG_ENAB, addr: gp->regs + MAC_TXCFG); |
1237 | val = readl(addr: gp->regs + MAC_RXCFG); |
1238 | writel(val: val & ~MAC_RXCFG_ENAB, addr: gp->regs + MAC_RXCFG); |
1239 | |
1240 | (void) readl(addr: gp->regs + MAC_RXCFG); |
1241 | |
1242 | /* Need to wait a bit ... done by the caller */ |
1243 | } |
1244 | |
1245 | |
1246 | // XXX dbl check what that function should do when called on PCS PHY |
1247 | static void gem_begin_auto_negotiation(struct gem *gp, |
1248 | const struct ethtool_link_ksettings *ep) |
1249 | { |
1250 | u32 advertise, features; |
1251 | int autoneg; |
1252 | int speed; |
1253 | int duplex; |
1254 | u32 advertising; |
1255 | |
1256 | if (ep) |
1257 | ethtool_convert_link_mode_to_legacy_u32( |
1258 | legacy_u32: &advertising, src: ep->link_modes.advertising); |
1259 | |
1260 | if (gp->phy_type != phy_mii_mdio0 && |
1261 | gp->phy_type != phy_mii_mdio1) |
1262 | goto non_mii; |
1263 | |
1264 | /* Setup advertise */ |
1265 | if (found_mii_phy(gp)) |
1266 | features = gp->phy_mii.def->features; |
1267 | else |
1268 | features = 0; |
1269 | |
1270 | advertise = features & ADVERTISE_MASK; |
1271 | if (gp->phy_mii.advertising != 0) |
1272 | advertise &= gp->phy_mii.advertising; |
1273 | |
1274 | autoneg = gp->want_autoneg; |
1275 | speed = gp->phy_mii.speed; |
1276 | duplex = gp->phy_mii.duplex; |
1277 | |
1278 | /* Setup link parameters */ |
1279 | if (!ep) |
1280 | goto start_aneg; |
1281 | if (ep->base.autoneg == AUTONEG_ENABLE) { |
1282 | advertise = advertising; |
1283 | autoneg = 1; |
1284 | } else { |
1285 | autoneg = 0; |
1286 | speed = ep->base.speed; |
1287 | duplex = ep->base.duplex; |
1288 | } |
1289 | |
1290 | start_aneg: |
1291 | /* Sanitize settings based on PHY capabilities */ |
1292 | if ((features & SUPPORTED_Autoneg) == 0) |
1293 | autoneg = 0; |
1294 | if (speed == SPEED_1000 && |
1295 | !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) |
1296 | speed = SPEED_100; |
1297 | if (speed == SPEED_100 && |
1298 | !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) |
1299 | speed = SPEED_10; |
1300 | if (duplex == DUPLEX_FULL && |
1301 | !(features & (SUPPORTED_1000baseT_Full | |
1302 | SUPPORTED_100baseT_Full | |
1303 | SUPPORTED_10baseT_Full))) |
1304 | duplex = DUPLEX_HALF; |
1305 | if (speed == 0) |
1306 | speed = SPEED_10; |
1307 | |
1308 | /* If we are asleep, we don't try to actually setup the PHY, we |
1309 | * just store the settings |
1310 | */ |
1311 | if (!netif_device_present(dev: gp->dev)) { |
1312 | gp->phy_mii.autoneg = gp->want_autoneg = autoneg; |
1313 | gp->phy_mii.speed = speed; |
1314 | gp->phy_mii.duplex = duplex; |
1315 | return; |
1316 | } |
1317 | |
1318 | /* Configure PHY & start aneg */ |
1319 | gp->want_autoneg = autoneg; |
1320 | if (autoneg) { |
1321 | if (found_mii_phy(gp)) |
1322 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); |
1323 | gp->lstate = link_aneg; |
1324 | } else { |
1325 | if (found_mii_phy(gp)) |
1326 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); |
1327 | gp->lstate = link_force_ok; |
1328 | } |
1329 | |
1330 | non_mii: |
1331 | gp->timer_ticks = 0; |
1332 | mod_timer(timer: &gp->link_timer, expires: jiffies + ((12 * HZ) / 10)); |
1333 | } |
1334 | |
1335 | /* A link-up condition has occurred, initialize and enable the |
1336 | * rest of the chip. |
1337 | */ |
1338 | static int gem_set_link_modes(struct gem *gp) |
1339 | { |
1340 | struct netdev_queue *txq = netdev_get_tx_queue(dev: gp->dev, index: 0); |
1341 | int full_duplex, speed, pause; |
1342 | u32 val; |
1343 | |
1344 | full_duplex = 0; |
1345 | speed = SPEED_10; |
1346 | pause = 0; |
1347 | |
1348 | if (found_mii_phy(gp)) { |
1349 | if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) |
1350 | return 1; |
1351 | full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); |
1352 | speed = gp->phy_mii.speed; |
1353 | pause = gp->phy_mii.pause; |
1354 | } else if (gp->phy_type == phy_serialink || |
1355 | gp->phy_type == phy_serdes) { |
1356 | u32 pcs_lpa = readl(addr: gp->regs + PCS_MIILP); |
1357 | |
1358 | if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) |
1359 | full_duplex = 1; |
1360 | speed = SPEED_1000; |
1361 | } |
1362 | |
1363 | netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n" , |
1364 | speed, (full_duplex ? "full" : "half" )); |
1365 | |
1366 | |
1367 | /* We take the tx queue lock to avoid collisions between |
1368 | * this code, the tx path and the NAPI-driven error path |
1369 | */ |
1370 | __netif_tx_lock(txq, smp_processor_id()); |
1371 | |
1372 | val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); |
1373 | if (full_duplex) { |
1374 | val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); |
1375 | } else { |
1376 | /* MAC_TXCFG_NBO must be zero. */ |
1377 | } |
1378 | writel(val, addr: gp->regs + MAC_TXCFG); |
1379 | |
1380 | val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); |
1381 | if (!full_duplex && |
1382 | (gp->phy_type == phy_mii_mdio0 || |
1383 | gp->phy_type == phy_mii_mdio1)) { |
1384 | val |= MAC_XIFCFG_DISE; |
1385 | } else if (full_duplex) { |
1386 | val |= MAC_XIFCFG_FLED; |
1387 | } |
1388 | |
1389 | if (speed == SPEED_1000) |
1390 | val |= (MAC_XIFCFG_GMII); |
1391 | |
1392 | writel(val, addr: gp->regs + MAC_XIFCFG); |
1393 | |
1394 | /* If gigabit and half-duplex, enable carrier extension |
1395 | * mode. Else, disable it. |
1396 | */ |
1397 | if (speed == SPEED_1000 && !full_duplex) { |
1398 | val = readl(addr: gp->regs + MAC_TXCFG); |
1399 | writel(val: val | MAC_TXCFG_TCE, addr: gp->regs + MAC_TXCFG); |
1400 | |
1401 | val = readl(addr: gp->regs + MAC_RXCFG); |
1402 | writel(val: val | MAC_RXCFG_RCE, addr: gp->regs + MAC_RXCFG); |
1403 | } else { |
1404 | val = readl(addr: gp->regs + MAC_TXCFG); |
1405 | writel(val: val & ~MAC_TXCFG_TCE, addr: gp->regs + MAC_TXCFG); |
1406 | |
1407 | val = readl(addr: gp->regs + MAC_RXCFG); |
1408 | writel(val: val & ~MAC_RXCFG_RCE, addr: gp->regs + MAC_RXCFG); |
1409 | } |
1410 | |
1411 | if (gp->phy_type == phy_serialink || |
1412 | gp->phy_type == phy_serdes) { |
1413 | u32 pcs_lpa = readl(addr: gp->regs + PCS_MIILP); |
1414 | |
1415 | if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) |
1416 | pause = 1; |
1417 | } |
1418 | |
1419 | if (!full_duplex) |
1420 | writel(val: 512, addr: gp->regs + MAC_STIME); |
1421 | else |
1422 | writel(val: 64, addr: gp->regs + MAC_STIME); |
1423 | val = readl(addr: gp->regs + MAC_MCCFG); |
1424 | if (pause) |
1425 | val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); |
1426 | else |
1427 | val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); |
1428 | writel(val, addr: gp->regs + MAC_MCCFG); |
1429 | |
1430 | gem_start_dma(gp); |
1431 | |
1432 | __netif_tx_unlock(txq); |
1433 | |
1434 | if (netif_msg_link(gp)) { |
1435 | if (pause) { |
1436 | netdev_info(dev: gp->dev, |
1437 | format: "Pause is enabled (rxfifo: %d off: %d on: %d)\n" , |
1438 | gp->rx_fifo_sz, |
1439 | gp->rx_pause_off, |
1440 | gp->rx_pause_on); |
1441 | } else { |
1442 | netdev_info(dev: gp->dev, format: "Pause is disabled\n" ); |
1443 | } |
1444 | } |
1445 | |
1446 | return 0; |
1447 | } |
1448 | |
1449 | static int gem_mdio_link_not_up(struct gem *gp) |
1450 | { |
1451 | switch (gp->lstate) { |
1452 | case link_force_ret: |
1453 | netif_info(gp, link, gp->dev, |
1454 | "Autoneg failed again, keeping forced mode\n" ); |
1455 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, |
1456 | gp->last_forced_speed, DUPLEX_HALF); |
1457 | gp->timer_ticks = 5; |
1458 | gp->lstate = link_force_ok; |
1459 | return 0; |
1460 | case link_aneg: |
1461 | /* We try forced modes after a failed aneg only on PHYs that don't |
1462 | * have "magic_aneg" bit set, which means they internally do the |
1463 | * while forced-mode thingy. On these, we just restart aneg |
1464 | */ |
1465 | if (gp->phy_mii.def->magic_aneg) |
1466 | return 1; |
1467 | netif_info(gp, link, gp->dev, "switching to forced 100bt\n" ); |
1468 | /* Try forced modes. */ |
1469 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, |
1470 | DUPLEX_HALF); |
1471 | gp->timer_ticks = 5; |
1472 | gp->lstate = link_force_try; |
1473 | return 0; |
1474 | case link_force_try: |
1475 | /* Downgrade from 100 to 10 Mbps if necessary. |
1476 | * If already at 10Mbps, warn user about the |
1477 | * situation every 10 ticks. |
1478 | */ |
1479 | if (gp->phy_mii.speed == SPEED_100) { |
1480 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, |
1481 | DUPLEX_HALF); |
1482 | gp->timer_ticks = 5; |
1483 | netif_info(gp, link, gp->dev, |
1484 | "switching to forced 10bt\n" ); |
1485 | return 0; |
1486 | } else |
1487 | return 1; |
1488 | default: |
1489 | return 0; |
1490 | } |
1491 | } |
1492 | |
1493 | static void gem_link_timer(struct timer_list *t) |
1494 | { |
1495 | struct gem *gp = from_timer(gp, t, link_timer); |
1496 | struct net_device *dev = gp->dev; |
1497 | int restart_aneg = 0; |
1498 | |
1499 | /* There's no point doing anything if we're going to be reset */ |
1500 | if (gp->reset_task_pending) |
1501 | return; |
1502 | |
1503 | if (gp->phy_type == phy_serialink || |
1504 | gp->phy_type == phy_serdes) { |
1505 | u32 val = readl(addr: gp->regs + PCS_MIISTAT); |
1506 | |
1507 | if (!(val & PCS_MIISTAT_LS)) |
1508 | val = readl(addr: gp->regs + PCS_MIISTAT); |
1509 | |
1510 | if ((val & PCS_MIISTAT_LS) != 0) { |
1511 | if (gp->lstate == link_up) |
1512 | goto restart; |
1513 | |
1514 | gp->lstate = link_up; |
1515 | netif_carrier_on(dev); |
1516 | (void)gem_set_link_modes(gp); |
1517 | } |
1518 | goto restart; |
1519 | } |
1520 | if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { |
1521 | /* Ok, here we got a link. If we had it due to a forced |
1522 | * fallback, and we were configured for autoneg, we do |
1523 | * retry a short autoneg pass. If you know your hub is |
1524 | * broken, use ethtool ;) |
1525 | */ |
1526 | if (gp->lstate == link_force_try && gp->want_autoneg) { |
1527 | gp->lstate = link_force_ret; |
1528 | gp->last_forced_speed = gp->phy_mii.speed; |
1529 | gp->timer_ticks = 5; |
1530 | if (netif_msg_link(gp)) |
1531 | netdev_info(dev, |
1532 | format: "Got link after fallback, retrying autoneg once...\n" ); |
1533 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); |
1534 | } else if (gp->lstate != link_up) { |
1535 | gp->lstate = link_up; |
1536 | netif_carrier_on(dev); |
1537 | if (gem_set_link_modes(gp)) |
1538 | restart_aneg = 1; |
1539 | } |
1540 | } else { |
1541 | /* If the link was previously up, we restart the |
1542 | * whole process |
1543 | */ |
1544 | if (gp->lstate == link_up) { |
1545 | gp->lstate = link_down; |
1546 | netif_info(gp, link, dev, "Link down\n" ); |
1547 | netif_carrier_off(dev); |
1548 | gem_schedule_reset(gp); |
1549 | /* The reset task will restart the timer */ |
1550 | return; |
1551 | } else if (++gp->timer_ticks > 10) { |
1552 | if (found_mii_phy(gp)) |
1553 | restart_aneg = gem_mdio_link_not_up(gp); |
1554 | else |
1555 | restart_aneg = 1; |
1556 | } |
1557 | } |
1558 | if (restart_aneg) { |
1559 | gem_begin_auto_negotiation(gp, NULL); |
1560 | return; |
1561 | } |
1562 | restart: |
1563 | mod_timer(timer: &gp->link_timer, expires: jiffies + ((12 * HZ) / 10)); |
1564 | } |
1565 | |
1566 | static void gem_clean_rings(struct gem *gp) |
1567 | { |
1568 | struct gem_init_block *gb = gp->init_block; |
1569 | struct sk_buff *skb; |
1570 | int i; |
1571 | dma_addr_t dma_addr; |
1572 | |
1573 | for (i = 0; i < RX_RING_SIZE; i++) { |
1574 | struct gem_rxd *rxd; |
1575 | |
1576 | rxd = &gb->rxd[i]; |
1577 | if (gp->rx_skbs[i] != NULL) { |
1578 | skb = gp->rx_skbs[i]; |
1579 | dma_addr = le64_to_cpu(rxd->buffer); |
1580 | dma_unmap_page(&gp->pdev->dev, dma_addr, |
1581 | RX_BUF_ALLOC_SIZE(gp), |
1582 | DMA_FROM_DEVICE); |
1583 | dev_kfree_skb_any(skb); |
1584 | gp->rx_skbs[i] = NULL; |
1585 | } |
1586 | rxd->status_word = 0; |
1587 | dma_wmb(); |
1588 | rxd->buffer = 0; |
1589 | } |
1590 | |
1591 | for (i = 0; i < TX_RING_SIZE; i++) { |
1592 | if (gp->tx_skbs[i] != NULL) { |
1593 | struct gem_txd *txd; |
1594 | int frag; |
1595 | |
1596 | skb = gp->tx_skbs[i]; |
1597 | gp->tx_skbs[i] = NULL; |
1598 | |
1599 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { |
1600 | int ent = i & (TX_RING_SIZE - 1); |
1601 | |
1602 | txd = &gb->txd[ent]; |
1603 | dma_addr = le64_to_cpu(txd->buffer); |
1604 | dma_unmap_page(&gp->pdev->dev, dma_addr, |
1605 | le64_to_cpu(txd->control_word) & |
1606 | TXDCTRL_BUFSZ, DMA_TO_DEVICE); |
1607 | |
1608 | if (frag != skb_shinfo(skb)->nr_frags) |
1609 | i++; |
1610 | } |
1611 | dev_kfree_skb_any(skb); |
1612 | } |
1613 | } |
1614 | } |
1615 | |
1616 | static void gem_init_rings(struct gem *gp) |
1617 | { |
1618 | struct gem_init_block *gb = gp->init_block; |
1619 | struct net_device *dev = gp->dev; |
1620 | int i; |
1621 | dma_addr_t dma_addr; |
1622 | |
1623 | gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; |
1624 | |
1625 | gem_clean_rings(gp); |
1626 | |
1627 | gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, |
1628 | (unsigned)VLAN_ETH_FRAME_LEN); |
1629 | |
1630 | for (i = 0; i < RX_RING_SIZE; i++) { |
1631 | struct sk_buff *skb; |
1632 | struct gem_rxd *rxd = &gb->rxd[i]; |
1633 | |
1634 | skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL); |
1635 | if (!skb) { |
1636 | rxd->buffer = 0; |
1637 | rxd->status_word = 0; |
1638 | continue; |
1639 | } |
1640 | |
1641 | gp->rx_skbs[i] = skb; |
1642 | skb_put(skb, len: (gp->rx_buf_sz + RX_OFFSET)); |
1643 | dma_addr = dma_map_page(&gp->pdev->dev, |
1644 | virt_to_page(skb->data), |
1645 | offset_in_page(skb->data), |
1646 | RX_BUF_ALLOC_SIZE(gp), |
1647 | DMA_FROM_DEVICE); |
1648 | rxd->buffer = cpu_to_le64(dma_addr); |
1649 | dma_wmb(); |
1650 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); |
1651 | skb_reserve(skb, RX_OFFSET); |
1652 | } |
1653 | |
1654 | for (i = 0; i < TX_RING_SIZE; i++) { |
1655 | struct gem_txd *txd = &gb->txd[i]; |
1656 | |
1657 | txd->control_word = 0; |
1658 | dma_wmb(); |
1659 | txd->buffer = 0; |
1660 | } |
1661 | wmb(); |
1662 | } |
1663 | |
1664 | /* Init PHY interface and start link poll state machine */ |
1665 | static void gem_init_phy(struct gem *gp) |
1666 | { |
1667 | u32 mifcfg; |
1668 | |
1669 | /* Revert MIF CFG setting done on stop_phy */ |
1670 | mifcfg = readl(addr: gp->regs + MIF_CFG); |
1671 | mifcfg &= ~MIF_CFG_BBMODE; |
1672 | writel(val: mifcfg, addr: gp->regs + MIF_CFG); |
1673 | |
1674 | if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { |
1675 | int i; |
1676 | |
1677 | /* Those delays sucks, the HW seems to love them though, I'll |
1678 | * seriously consider breaking some locks here to be able |
1679 | * to schedule instead |
1680 | */ |
1681 | for (i = 0; i < 3; i++) { |
1682 | #ifdef CONFIG_PPC_PMAC |
1683 | pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); |
1684 | msleep(20); |
1685 | #endif |
1686 | /* Some PHYs used by apple have problem getting back to us, |
1687 | * we do an additional reset here |
1688 | */ |
1689 | sungem_phy_write(gp, MII_BMCR, BMCR_RESET); |
1690 | msleep(msecs: 20); |
1691 | if (sungem_phy_read(gp, MII_BMCR) != 0xffff) |
1692 | break; |
1693 | if (i == 2) |
1694 | netdev_warn(dev: gp->dev, format: "GMAC PHY not responding !\n" ); |
1695 | } |
1696 | } |
1697 | |
1698 | if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && |
1699 | gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { |
1700 | u32 val; |
1701 | |
1702 | /* Init datapath mode register. */ |
1703 | if (gp->phy_type == phy_mii_mdio0 || |
1704 | gp->phy_type == phy_mii_mdio1) { |
1705 | val = PCS_DMODE_MGM; |
1706 | } else if (gp->phy_type == phy_serialink) { |
1707 | val = PCS_DMODE_SM | PCS_DMODE_GMOE; |
1708 | } else { |
1709 | val = PCS_DMODE_ESM; |
1710 | } |
1711 | |
1712 | writel(val, addr: gp->regs + PCS_DMODE); |
1713 | } |
1714 | |
1715 | if (gp->phy_type == phy_mii_mdio0 || |
1716 | gp->phy_type == phy_mii_mdio1) { |
1717 | /* Reset and detect MII PHY */ |
1718 | sungem_phy_probe(phy: &gp->phy_mii, mii_id: gp->mii_phy_addr); |
1719 | |
1720 | /* Init PHY */ |
1721 | if (gp->phy_mii.def && gp->phy_mii.def->ops->init) |
1722 | gp->phy_mii.def->ops->init(&gp->phy_mii); |
1723 | } else { |
1724 | gem_pcs_reset(gp); |
1725 | gem_pcs_reinit_adv(gp); |
1726 | } |
1727 | |
1728 | /* Default aneg parameters */ |
1729 | gp->timer_ticks = 0; |
1730 | gp->lstate = link_down; |
1731 | netif_carrier_off(dev: gp->dev); |
1732 | |
1733 | /* Print things out */ |
1734 | if (gp->phy_type == phy_mii_mdio0 || |
1735 | gp->phy_type == phy_mii_mdio1) |
1736 | netdev_info(dev: gp->dev, format: "Found %s PHY\n" , |
1737 | gp->phy_mii.def ? gp->phy_mii.def->name : "no" ); |
1738 | |
1739 | gem_begin_auto_negotiation(gp, NULL); |
1740 | } |
1741 | |
1742 | static void gem_init_dma(struct gem *gp) |
1743 | { |
1744 | u64 desc_dma = (u64) gp->gblock_dvma; |
1745 | u32 val; |
1746 | |
1747 | val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); |
1748 | writel(val, addr: gp->regs + TXDMA_CFG); |
1749 | |
1750 | writel(val: desc_dma >> 32, addr: gp->regs + TXDMA_DBHI); |
1751 | writel(val: desc_dma & 0xffffffff, addr: gp->regs + TXDMA_DBLOW); |
1752 | desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); |
1753 | |
1754 | writel(val: 0, addr: gp->regs + TXDMA_KICK); |
1755 | |
1756 | val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | |
1757 | (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); |
1758 | writel(val, addr: gp->regs + RXDMA_CFG); |
1759 | |
1760 | writel(val: desc_dma >> 32, addr: gp->regs + RXDMA_DBHI); |
1761 | writel(val: desc_dma & 0xffffffff, addr: gp->regs + RXDMA_DBLOW); |
1762 | |
1763 | writel(RX_RING_SIZE - 4, addr: gp->regs + RXDMA_KICK); |
1764 | |
1765 | val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); |
1766 | val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); |
1767 | writel(val, addr: gp->regs + RXDMA_PTHRESH); |
1768 | |
1769 | if (readl(addr: gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) |
1770 | writel(val: ((5 & RXDMA_BLANK_IPKTS) | |
1771 | ((8 << 12) & RXDMA_BLANK_ITIME)), |
1772 | addr: gp->regs + RXDMA_BLANK); |
1773 | else |
1774 | writel(val: ((5 & RXDMA_BLANK_IPKTS) | |
1775 | ((4 << 12) & RXDMA_BLANK_ITIME)), |
1776 | addr: gp->regs + RXDMA_BLANK); |
1777 | } |
1778 | |
1779 | static u32 gem_setup_multicast(struct gem *gp) |
1780 | { |
1781 | u32 rxcfg = 0; |
1782 | int i; |
1783 | |
1784 | if ((gp->dev->flags & IFF_ALLMULTI) || |
1785 | (netdev_mc_count(gp->dev) > 256)) { |
1786 | for (i=0; i<16; i++) |
1787 | writel(val: 0xffff, addr: gp->regs + MAC_HASH0 + (i << 2)); |
1788 | rxcfg |= MAC_RXCFG_HFE; |
1789 | } else if (gp->dev->flags & IFF_PROMISC) { |
1790 | rxcfg |= MAC_RXCFG_PROM; |
1791 | } else { |
1792 | u16 hash_table[16]; |
1793 | u32 crc; |
1794 | struct netdev_hw_addr *ha; |
1795 | int i; |
1796 | |
1797 | memset(hash_table, 0, sizeof(hash_table)); |
1798 | netdev_for_each_mc_addr(ha, gp->dev) { |
1799 | crc = ether_crc_le(6, ha->addr); |
1800 | crc >>= 24; |
1801 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); |
1802 | } |
1803 | for (i=0; i<16; i++) |
1804 | writel(val: hash_table[i], addr: gp->regs + MAC_HASH0 + (i << 2)); |
1805 | rxcfg |= MAC_RXCFG_HFE; |
1806 | } |
1807 | |
1808 | return rxcfg; |
1809 | } |
1810 | |
1811 | static void gem_init_mac(struct gem *gp) |
1812 | { |
1813 | const unsigned char *e = &gp->dev->dev_addr[0]; |
1814 | |
1815 | writel(val: 0x1bf0, addr: gp->regs + MAC_SNDPAUSE); |
1816 | |
1817 | writel(val: 0x00, addr: gp->regs + MAC_IPG0); |
1818 | writel(val: 0x08, addr: gp->regs + MAC_IPG1); |
1819 | writel(val: 0x04, addr: gp->regs + MAC_IPG2); |
1820 | writel(val: 0x40, addr: gp->regs + MAC_STIME); |
1821 | writel(val: 0x40, addr: gp->regs + MAC_MINFSZ); |
1822 | |
1823 | /* Ethernet payload + header + FCS + optional VLAN tag. */ |
1824 | writel(val: 0x20000000 | (gp->rx_buf_sz + 4), addr: gp->regs + MAC_MAXFSZ); |
1825 | |
1826 | writel(val: 0x07, addr: gp->regs + MAC_PASIZE); |
1827 | writel(val: 0x04, addr: gp->regs + MAC_JAMSIZE); |
1828 | writel(val: 0x10, addr: gp->regs + MAC_ATTLIM); |
1829 | writel(val: 0x8808, addr: gp->regs + MAC_MCTYPE); |
1830 | |
1831 | writel(val: (e[5] | (e[4] << 8)) & 0x3ff, addr: gp->regs + MAC_RANDSEED); |
1832 | |
1833 | writel(val: (e[4] << 8) | e[5], addr: gp->regs + MAC_ADDR0); |
1834 | writel(val: (e[2] << 8) | e[3], addr: gp->regs + MAC_ADDR1); |
1835 | writel(val: (e[0] << 8) | e[1], addr: gp->regs + MAC_ADDR2); |
1836 | |
1837 | writel(val: 0, addr: gp->regs + MAC_ADDR3); |
1838 | writel(val: 0, addr: gp->regs + MAC_ADDR4); |
1839 | writel(val: 0, addr: gp->regs + MAC_ADDR5); |
1840 | |
1841 | writel(val: 0x0001, addr: gp->regs + MAC_ADDR6); |
1842 | writel(val: 0xc200, addr: gp->regs + MAC_ADDR7); |
1843 | writel(val: 0x0180, addr: gp->regs + MAC_ADDR8); |
1844 | |
1845 | writel(val: 0, addr: gp->regs + MAC_AFILT0); |
1846 | writel(val: 0, addr: gp->regs + MAC_AFILT1); |
1847 | writel(val: 0, addr: gp->regs + MAC_AFILT2); |
1848 | writel(val: 0, addr: gp->regs + MAC_AF21MSK); |
1849 | writel(val: 0, addr: gp->regs + MAC_AF0MSK); |
1850 | |
1851 | gp->mac_rx_cfg = gem_setup_multicast(gp); |
1852 | #ifdef STRIP_FCS |
1853 | gp->mac_rx_cfg |= MAC_RXCFG_SFCS; |
1854 | #endif |
1855 | writel(val: 0, addr: gp->regs + MAC_NCOLL); |
1856 | writel(val: 0, addr: gp->regs + MAC_FASUCC); |
1857 | writel(val: 0, addr: gp->regs + MAC_ECOLL); |
1858 | writel(val: 0, addr: gp->regs + MAC_LCOLL); |
1859 | writel(val: 0, addr: gp->regs + MAC_DTIMER); |
1860 | writel(val: 0, addr: gp->regs + MAC_PATMPS); |
1861 | writel(val: 0, addr: gp->regs + MAC_RFCTR); |
1862 | writel(val: 0, addr: gp->regs + MAC_LERR); |
1863 | writel(val: 0, addr: gp->regs + MAC_AERR); |
1864 | writel(val: 0, addr: gp->regs + MAC_FCSERR); |
1865 | writel(val: 0, addr: gp->regs + MAC_RXCVERR); |
1866 | |
1867 | /* Clear RX/TX/MAC/XIF config, we will set these up and enable |
1868 | * them once a link is established. |
1869 | */ |
1870 | writel(val: 0, addr: gp->regs + MAC_TXCFG); |
1871 | writel(val: gp->mac_rx_cfg, addr: gp->regs + MAC_RXCFG); |
1872 | writel(val: 0, addr: gp->regs + MAC_MCCFG); |
1873 | writel(val: 0, addr: gp->regs + MAC_XIFCFG); |
1874 | |
1875 | /* Setup MAC interrupts. We want to get all of the interesting |
1876 | * counter expiration events, but we do not want to hear about |
1877 | * normal rx/tx as the DMA engine tells us that. |
1878 | */ |
1879 | writel(MAC_TXSTAT_XMIT, addr: gp->regs + MAC_TXMASK); |
1880 | writel(MAC_RXSTAT_RCV, addr: gp->regs + MAC_RXMASK); |
1881 | |
1882 | /* Don't enable even the PAUSE interrupts for now, we |
1883 | * make no use of those events other than to record them. |
1884 | */ |
1885 | writel(val: 0xffffffff, addr: gp->regs + MAC_MCMASK); |
1886 | |
1887 | /* Don't enable GEM's WOL in normal operations |
1888 | */ |
1889 | if (gp->has_wol) |
1890 | writel(val: 0, addr: gp->regs + WOL_WAKECSR); |
1891 | } |
1892 | |
1893 | static void gem_init_pause_thresholds(struct gem *gp) |
1894 | { |
1895 | u32 cfg; |
1896 | |
1897 | /* Calculate pause thresholds. Setting the OFF threshold to the |
1898 | * full RX fifo size effectively disables PAUSE generation which |
1899 | * is what we do for 10/100 only GEMs which have FIFOs too small |
1900 | * to make real gains from PAUSE. |
1901 | */ |
1902 | if (gp->rx_fifo_sz <= (2 * 1024)) { |
1903 | gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; |
1904 | } else { |
1905 | int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; |
1906 | int off = (gp->rx_fifo_sz - (max_frame * 2)); |
1907 | int on = off - max_frame; |
1908 | |
1909 | gp->rx_pause_off = off; |
1910 | gp->rx_pause_on = on; |
1911 | } |
1912 | |
1913 | |
1914 | /* Configure the chip "burst" DMA mode & enable some |
1915 | * HW bug fixes on Apple version |
1916 | */ |
1917 | cfg = 0; |
1918 | if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) |
1919 | cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; |
1920 | #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) |
1921 | cfg |= GREG_CFG_IBURST; |
1922 | #endif |
1923 | cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); |
1924 | cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); |
1925 | writel(val: cfg, addr: gp->regs + GREG_CFG); |
1926 | |
1927 | /* If Infinite Burst didn't stick, then use different |
1928 | * thresholds (and Apple bug fixes don't exist) |
1929 | */ |
1930 | if (!(readl(addr: gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { |
1931 | cfg = ((2 << 1) & GREG_CFG_TXDMALIM); |
1932 | cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); |
1933 | writel(val: cfg, addr: gp->regs + GREG_CFG); |
1934 | } |
1935 | } |
1936 | |
1937 | static int gem_check_invariants(struct gem *gp) |
1938 | { |
1939 | struct pci_dev *pdev = gp->pdev; |
1940 | u32 mif_cfg; |
1941 | |
1942 | /* On Apple's sungem, we can't rely on registers as the chip |
1943 | * was been powered down by the firmware. The PHY is looked |
1944 | * up later on. |
1945 | */ |
1946 | if (pdev->vendor == PCI_VENDOR_ID_APPLE) { |
1947 | gp->phy_type = phy_mii_mdio0; |
1948 | gp->tx_fifo_sz = readl(addr: gp->regs + TXDMA_FSZ) * 64; |
1949 | gp->rx_fifo_sz = readl(addr: gp->regs + RXDMA_FSZ) * 64; |
1950 | gp->swrst_base = 0; |
1951 | |
1952 | mif_cfg = readl(addr: gp->regs + MIF_CFG); |
1953 | mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); |
1954 | mif_cfg |= MIF_CFG_MDI0; |
1955 | writel(val: mif_cfg, addr: gp->regs + MIF_CFG); |
1956 | writel(PCS_DMODE_MGM, addr: gp->regs + PCS_DMODE); |
1957 | writel(MAC_XIFCFG_OE, addr: gp->regs + MAC_XIFCFG); |
1958 | |
1959 | /* We hard-code the PHY address so we can properly bring it out of |
1960 | * reset later on, we can't really probe it at this point, though |
1961 | * that isn't an issue. |
1962 | */ |
1963 | if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) |
1964 | gp->mii_phy_addr = 1; |
1965 | else |
1966 | gp->mii_phy_addr = 0; |
1967 | |
1968 | return 0; |
1969 | } |
1970 | |
1971 | mif_cfg = readl(addr: gp->regs + MIF_CFG); |
1972 | |
1973 | if (pdev->vendor == PCI_VENDOR_ID_SUN && |
1974 | pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { |
1975 | /* One of the MII PHYs _must_ be present |
1976 | * as this chip has no gigabit PHY. |
1977 | */ |
1978 | if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { |
1979 | pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n" , |
1980 | mif_cfg); |
1981 | return -1; |
1982 | } |
1983 | } |
1984 | |
1985 | /* Determine initial PHY interface type guess. MDIO1 is the |
1986 | * external PHY and thus takes precedence over MDIO0. |
1987 | */ |
1988 | |
1989 | if (mif_cfg & MIF_CFG_MDI1) { |
1990 | gp->phy_type = phy_mii_mdio1; |
1991 | mif_cfg |= MIF_CFG_PSELECT; |
1992 | writel(val: mif_cfg, addr: gp->regs + MIF_CFG); |
1993 | } else if (mif_cfg & MIF_CFG_MDI0) { |
1994 | gp->phy_type = phy_mii_mdio0; |
1995 | mif_cfg &= ~MIF_CFG_PSELECT; |
1996 | writel(val: mif_cfg, addr: gp->regs + MIF_CFG); |
1997 | } else { |
1998 | #ifdef CONFIG_SPARC |
1999 | const char *p; |
2000 | |
2001 | p = of_get_property(gp->of_node, "shared-pins" , NULL); |
2002 | if (p && !strcmp(p, "serdes" )) |
2003 | gp->phy_type = phy_serdes; |
2004 | else |
2005 | #endif |
2006 | gp->phy_type = phy_serialink; |
2007 | } |
2008 | if (gp->phy_type == phy_mii_mdio1 || |
2009 | gp->phy_type == phy_mii_mdio0) { |
2010 | int i; |
2011 | |
2012 | for (i = 0; i < 32; i++) { |
2013 | gp->mii_phy_addr = i; |
2014 | if (sungem_phy_read(gp, MII_BMCR) != 0xffff) |
2015 | break; |
2016 | } |
2017 | if (i == 32) { |
2018 | if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { |
2019 | pr_err("RIO MII phy will not respond\n" ); |
2020 | return -1; |
2021 | } |
2022 | gp->phy_type = phy_serdes; |
2023 | } |
2024 | } |
2025 | |
2026 | /* Fetch the FIFO configurations now too. */ |
2027 | gp->tx_fifo_sz = readl(addr: gp->regs + TXDMA_FSZ) * 64; |
2028 | gp->rx_fifo_sz = readl(addr: gp->regs + RXDMA_FSZ) * 64; |
2029 | |
2030 | if (pdev->vendor == PCI_VENDOR_ID_SUN) { |
2031 | if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { |
2032 | if (gp->tx_fifo_sz != (9 * 1024) || |
2033 | gp->rx_fifo_sz != (20 * 1024)) { |
2034 | pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n" , |
2035 | gp->tx_fifo_sz, gp->rx_fifo_sz); |
2036 | return -1; |
2037 | } |
2038 | gp->swrst_base = 0; |
2039 | } else { |
2040 | if (gp->tx_fifo_sz != (2 * 1024) || |
2041 | gp->rx_fifo_sz != (2 * 1024)) { |
2042 | pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n" , |
2043 | gp->tx_fifo_sz, gp->rx_fifo_sz); |
2044 | return -1; |
2045 | } |
2046 | gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; |
2047 | } |
2048 | } |
2049 | |
2050 | return 0; |
2051 | } |
2052 | |
2053 | static void gem_reinit_chip(struct gem *gp) |
2054 | { |
2055 | /* Reset the chip */ |
2056 | gem_reset(gp); |
2057 | |
2058 | /* Make sure ints are disabled */ |
2059 | gem_disable_ints(gp); |
2060 | |
2061 | /* Allocate & setup ring buffers */ |
2062 | gem_init_rings(gp); |
2063 | |
2064 | /* Configure pause thresholds */ |
2065 | gem_init_pause_thresholds(gp); |
2066 | |
2067 | /* Init DMA & MAC engines */ |
2068 | gem_init_dma(gp); |
2069 | gem_init_mac(gp); |
2070 | } |
2071 | |
2072 | |
2073 | static void gem_stop_phy(struct gem *gp, int wol) |
2074 | { |
2075 | u32 mifcfg; |
2076 | |
2077 | /* Let the chip settle down a bit, it seems that helps |
2078 | * for sleep mode on some models |
2079 | */ |
2080 | msleep(msecs: 10); |
2081 | |
2082 | /* Make sure we aren't polling PHY status change. We |
2083 | * don't currently use that feature though |
2084 | */ |
2085 | mifcfg = readl(addr: gp->regs + MIF_CFG); |
2086 | mifcfg &= ~MIF_CFG_POLL; |
2087 | writel(val: mifcfg, addr: gp->regs + MIF_CFG); |
2088 | |
2089 | if (wol && gp->has_wol) { |
2090 | const unsigned char *e = &gp->dev->dev_addr[0]; |
2091 | u32 csr; |
2092 | |
2093 | /* Setup wake-on-lan for MAGIC packet */ |
2094 | writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, |
2095 | addr: gp->regs + MAC_RXCFG); |
2096 | writel(val: (e[4] << 8) | e[5], addr: gp->regs + WOL_MATCH0); |
2097 | writel(val: (e[2] << 8) | e[3], addr: gp->regs + WOL_MATCH1); |
2098 | writel(val: (e[0] << 8) | e[1], addr: gp->regs + WOL_MATCH2); |
2099 | |
2100 | writel(WOL_MCOUNT_N | WOL_MCOUNT_M, addr: gp->regs + WOL_MCOUNT); |
2101 | csr = WOL_WAKECSR_ENABLE; |
2102 | if ((readl(addr: gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) |
2103 | csr |= WOL_WAKECSR_MII; |
2104 | writel(val: csr, addr: gp->regs + WOL_WAKECSR); |
2105 | } else { |
2106 | writel(val: 0, addr: gp->regs + MAC_RXCFG); |
2107 | (void)readl(addr: gp->regs + MAC_RXCFG); |
2108 | /* Machine sleep will die in strange ways if we |
2109 | * dont wait a bit here, looks like the chip takes |
2110 | * some time to really shut down |
2111 | */ |
2112 | msleep(msecs: 10); |
2113 | } |
2114 | |
2115 | writel(val: 0, addr: gp->regs + MAC_TXCFG); |
2116 | writel(val: 0, addr: gp->regs + MAC_XIFCFG); |
2117 | writel(val: 0, addr: gp->regs + TXDMA_CFG); |
2118 | writel(val: 0, addr: gp->regs + RXDMA_CFG); |
2119 | |
2120 | if (!wol) { |
2121 | gem_reset(gp); |
2122 | writel(MAC_TXRST_CMD, addr: gp->regs + MAC_TXRST); |
2123 | writel(MAC_RXRST_CMD, addr: gp->regs + MAC_RXRST); |
2124 | |
2125 | if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) |
2126 | gp->phy_mii.def->ops->suspend(&gp->phy_mii); |
2127 | |
2128 | /* According to Apple, we must set the MDIO pins to this begnign |
2129 | * state or we may 1) eat more current, 2) damage some PHYs |
2130 | */ |
2131 | writel(val: mifcfg | MIF_CFG_BBMODE, addr: gp->regs + MIF_CFG); |
2132 | writel(val: 0, addr: gp->regs + MIF_BBCLK); |
2133 | writel(val: 0, addr: gp->regs + MIF_BBDATA); |
2134 | writel(val: 0, addr: gp->regs + MIF_BBOENAB); |
2135 | writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, addr: gp->regs + MAC_XIFCFG); |
2136 | (void) readl(addr: gp->regs + MAC_XIFCFG); |
2137 | } |
2138 | } |
2139 | |
2140 | static int gem_do_start(struct net_device *dev) |
2141 | { |
2142 | struct gem *gp = netdev_priv(dev); |
2143 | int rc; |
2144 | |
2145 | pci_set_master(dev: gp->pdev); |
2146 | |
2147 | /* Init & setup chip hardware */ |
2148 | gem_reinit_chip(gp); |
2149 | |
2150 | /* An interrupt might come in handy */ |
2151 | rc = request_irq(irq: gp->pdev->irq, handler: gem_interrupt, |
2152 | IRQF_SHARED, name: dev->name, dev: (void *)dev); |
2153 | if (rc) { |
2154 | netdev_err(dev, format: "failed to request irq !\n" ); |
2155 | |
2156 | gem_reset(gp); |
2157 | gem_clean_rings(gp); |
2158 | gem_put_cell(gp); |
2159 | return rc; |
2160 | } |
2161 | |
2162 | /* Mark us as attached again if we come from resume(), this has |
2163 | * no effect if we weren't detached and needs to be done now. |
2164 | */ |
2165 | netif_device_attach(dev); |
2166 | |
2167 | /* Restart NAPI & queues */ |
2168 | gem_netif_start(gp); |
2169 | |
2170 | /* Detect & init PHY, start autoneg etc... this will |
2171 | * eventually result in starting DMA operations when |
2172 | * the link is up |
2173 | */ |
2174 | gem_init_phy(gp); |
2175 | |
2176 | return 0; |
2177 | } |
2178 | |
2179 | static void gem_do_stop(struct net_device *dev, int wol) |
2180 | { |
2181 | struct gem *gp = netdev_priv(dev); |
2182 | |
2183 | /* Stop NAPI and stop tx queue */ |
2184 | gem_netif_stop(gp); |
2185 | |
2186 | /* Make sure ints are disabled. We don't care about |
2187 | * synchronizing as NAPI is disabled, thus a stray |
2188 | * interrupt will do nothing bad (our irq handler |
2189 | * just schedules NAPI) |
2190 | */ |
2191 | gem_disable_ints(gp); |
2192 | |
2193 | /* Stop the link timer */ |
2194 | del_timer_sync(timer: &gp->link_timer); |
2195 | |
2196 | /* We cannot cancel the reset task while holding the |
2197 | * rtnl lock, we'd get an A->B / B->A deadlock stituation |
2198 | * if we did. This is not an issue however as the reset |
2199 | * task is synchronized vs. us (rtnl_lock) and will do |
2200 | * nothing if the device is down or suspended. We do |
2201 | * still clear reset_task_pending to avoid a spurrious |
2202 | * reset later on in case we do resume before it gets |
2203 | * scheduled. |
2204 | */ |
2205 | gp->reset_task_pending = 0; |
2206 | |
2207 | /* If we are going to sleep with WOL */ |
2208 | gem_stop_dma(gp); |
2209 | msleep(msecs: 10); |
2210 | if (!wol) |
2211 | gem_reset(gp); |
2212 | msleep(msecs: 10); |
2213 | |
2214 | /* Get rid of rings */ |
2215 | gem_clean_rings(gp); |
2216 | |
2217 | /* No irq needed anymore */ |
2218 | free_irq(gp->pdev->irq, (void *) dev); |
2219 | |
2220 | /* Shut the PHY down eventually and setup WOL */ |
2221 | gem_stop_phy(gp, wol); |
2222 | } |
2223 | |
2224 | static void gem_reset_task(struct work_struct *work) |
2225 | { |
2226 | struct gem *gp = container_of(work, struct gem, reset_task); |
2227 | |
2228 | /* Lock out the network stack (essentially shield ourselves |
2229 | * against a racing open, close, control call, or suspend |
2230 | */ |
2231 | rtnl_lock(); |
2232 | |
2233 | /* Skip the reset task if suspended or closed, or if it's |
2234 | * been cancelled by gem_do_stop (see comment there) |
2235 | */ |
2236 | if (!netif_device_present(dev: gp->dev) || |
2237 | !netif_running(dev: gp->dev) || |
2238 | !gp->reset_task_pending) { |
2239 | rtnl_unlock(); |
2240 | return; |
2241 | } |
2242 | |
2243 | /* Stop the link timer */ |
2244 | del_timer_sync(timer: &gp->link_timer); |
2245 | |
2246 | /* Stop NAPI and tx */ |
2247 | gem_netif_stop(gp); |
2248 | |
2249 | /* Reset the chip & rings */ |
2250 | gem_reinit_chip(gp); |
2251 | if (gp->lstate == link_up) |
2252 | gem_set_link_modes(gp); |
2253 | |
2254 | /* Restart NAPI and Tx */ |
2255 | gem_netif_start(gp); |
2256 | |
2257 | /* We are back ! */ |
2258 | gp->reset_task_pending = 0; |
2259 | |
2260 | /* If the link is not up, restart autoneg, else restart the |
2261 | * polling timer |
2262 | */ |
2263 | if (gp->lstate != link_up) |
2264 | gem_begin_auto_negotiation(gp, NULL); |
2265 | else |
2266 | mod_timer(timer: &gp->link_timer, expires: jiffies + ((12 * HZ) / 10)); |
2267 | |
2268 | rtnl_unlock(); |
2269 | } |
2270 | |
2271 | static int gem_open(struct net_device *dev) |
2272 | { |
2273 | struct gem *gp = netdev_priv(dev); |
2274 | int rc; |
2275 | |
2276 | /* We allow open while suspended, we just do nothing, |
2277 | * the chip will be initialized in resume() |
2278 | */ |
2279 | if (netif_device_present(dev)) { |
2280 | /* Enable the cell */ |
2281 | gem_get_cell(gp); |
2282 | |
2283 | /* Make sure PCI access and bus master are enabled */ |
2284 | rc = pci_enable_device(dev: gp->pdev); |
2285 | if (rc) { |
2286 | netdev_err(dev, format: "Failed to enable chip on PCI bus !\n" ); |
2287 | |
2288 | /* Put cell and forget it for now, it will be considered |
2289 | *as still asleep, a new sleep cycle may bring it back |
2290 | */ |
2291 | gem_put_cell(gp); |
2292 | return -ENXIO; |
2293 | } |
2294 | return gem_do_start(dev); |
2295 | } |
2296 | |
2297 | return 0; |
2298 | } |
2299 | |
2300 | static int gem_close(struct net_device *dev) |
2301 | { |
2302 | struct gem *gp = netdev_priv(dev); |
2303 | |
2304 | if (netif_device_present(dev)) { |
2305 | gem_do_stop(dev, wol: 0); |
2306 | |
2307 | /* Make sure bus master is disabled */ |
2308 | pci_disable_device(dev: gp->pdev); |
2309 | |
2310 | /* Cell not needed neither if no WOL */ |
2311 | if (!gp->asleep_wol) |
2312 | gem_put_cell(gp); |
2313 | } |
2314 | return 0; |
2315 | } |
2316 | |
2317 | static int __maybe_unused gem_suspend(struct device *dev_d) |
2318 | { |
2319 | struct net_device *dev = dev_get_drvdata(dev: dev_d); |
2320 | struct gem *gp = netdev_priv(dev); |
2321 | |
2322 | /* Lock the network stack first to avoid racing with open/close, |
2323 | * reset task and setting calls |
2324 | */ |
2325 | rtnl_lock(); |
2326 | |
2327 | /* Not running, mark ourselves non-present, no need for |
2328 | * a lock here |
2329 | */ |
2330 | if (!netif_running(dev)) { |
2331 | netif_device_detach(dev); |
2332 | rtnl_unlock(); |
2333 | return 0; |
2334 | } |
2335 | netdev_info(dev, format: "suspending, WakeOnLan %s\n" , |
2336 | (gp->wake_on_lan && netif_running(dev)) ? |
2337 | "enabled" : "disabled" ); |
2338 | |
2339 | /* Tell the network stack we're gone. gem_do_stop() below will |
2340 | * synchronize with TX, stop NAPI etc... |
2341 | */ |
2342 | netif_device_detach(dev); |
2343 | |
2344 | /* Switch off chip, remember WOL setting */ |
2345 | gp->asleep_wol = !!gp->wake_on_lan; |
2346 | gem_do_stop(dev, wol: gp->asleep_wol); |
2347 | |
2348 | /* Cell not needed neither if no WOL */ |
2349 | if (!gp->asleep_wol) |
2350 | gem_put_cell(gp); |
2351 | |
2352 | /* Unlock the network stack */ |
2353 | rtnl_unlock(); |
2354 | |
2355 | return 0; |
2356 | } |
2357 | |
2358 | static int __maybe_unused gem_resume(struct device *dev_d) |
2359 | { |
2360 | struct net_device *dev = dev_get_drvdata(dev: dev_d); |
2361 | struct gem *gp = netdev_priv(dev); |
2362 | |
2363 | /* See locking comment in gem_suspend */ |
2364 | rtnl_lock(); |
2365 | |
2366 | /* Not running, mark ourselves present, no need for |
2367 | * a lock here |
2368 | */ |
2369 | if (!netif_running(dev)) { |
2370 | netif_device_attach(dev); |
2371 | rtnl_unlock(); |
2372 | return 0; |
2373 | } |
2374 | |
2375 | /* Enable the cell */ |
2376 | gem_get_cell(gp); |
2377 | |
2378 | /* Restart chip. If that fails there isn't much we can do, we |
2379 | * leave things stopped. |
2380 | */ |
2381 | gem_do_start(dev); |
2382 | |
2383 | /* If we had WOL enabled, the cell clock was never turned off during |
2384 | * sleep, so we end up beeing unbalanced. Fix that here |
2385 | */ |
2386 | if (gp->asleep_wol) |
2387 | gem_put_cell(gp); |
2388 | |
2389 | /* Unlock the network stack */ |
2390 | rtnl_unlock(); |
2391 | |
2392 | return 0; |
2393 | } |
2394 | |
2395 | static struct net_device_stats *gem_get_stats(struct net_device *dev) |
2396 | { |
2397 | struct gem *gp = netdev_priv(dev); |
2398 | |
2399 | /* I have seen this being called while the PM was in progress, |
2400 | * so we shield against this. Let's also not poke at registers |
2401 | * while the reset task is going on. |
2402 | * |
2403 | * TODO: Move stats collection elsewhere (link timer ?) and |
2404 | * make this a nop to avoid all those synchro issues |
2405 | */ |
2406 | if (!netif_device_present(dev) || !netif_running(dev)) |
2407 | goto bail; |
2408 | |
2409 | /* Better safe than sorry... */ |
2410 | if (WARN_ON(!gp->cell_enabled)) |
2411 | goto bail; |
2412 | |
2413 | dev->stats.rx_crc_errors += readl(addr: gp->regs + MAC_FCSERR); |
2414 | writel(val: 0, addr: gp->regs + MAC_FCSERR); |
2415 | |
2416 | dev->stats.rx_frame_errors += readl(addr: gp->regs + MAC_AERR); |
2417 | writel(val: 0, addr: gp->regs + MAC_AERR); |
2418 | |
2419 | dev->stats.rx_length_errors += readl(addr: gp->regs + MAC_LERR); |
2420 | writel(val: 0, addr: gp->regs + MAC_LERR); |
2421 | |
2422 | dev->stats.tx_aborted_errors += readl(addr: gp->regs + MAC_ECOLL); |
2423 | dev->stats.collisions += |
2424 | (readl(addr: gp->regs + MAC_ECOLL) + readl(addr: gp->regs + MAC_LCOLL)); |
2425 | writel(val: 0, addr: gp->regs + MAC_ECOLL); |
2426 | writel(val: 0, addr: gp->regs + MAC_LCOLL); |
2427 | bail: |
2428 | return &dev->stats; |
2429 | } |
2430 | |
2431 | static int gem_set_mac_address(struct net_device *dev, void *addr) |
2432 | { |
2433 | struct sockaddr *macaddr = (struct sockaddr *) addr; |
2434 | const unsigned char *e = &dev->dev_addr[0]; |
2435 | struct gem *gp = netdev_priv(dev); |
2436 | |
2437 | if (!is_valid_ether_addr(addr: macaddr->sa_data)) |
2438 | return -EADDRNOTAVAIL; |
2439 | |
2440 | eth_hw_addr_set(dev, addr: macaddr->sa_data); |
2441 | |
2442 | /* We'll just catch it later when the device is up'd or resumed */ |
2443 | if (!netif_running(dev) || !netif_device_present(dev)) |
2444 | return 0; |
2445 | |
2446 | /* Better safe than sorry... */ |
2447 | if (WARN_ON(!gp->cell_enabled)) |
2448 | return 0; |
2449 | |
2450 | writel(val: (e[4] << 8) | e[5], addr: gp->regs + MAC_ADDR0); |
2451 | writel(val: (e[2] << 8) | e[3], addr: gp->regs + MAC_ADDR1); |
2452 | writel(val: (e[0] << 8) | e[1], addr: gp->regs + MAC_ADDR2); |
2453 | |
2454 | return 0; |
2455 | } |
2456 | |
2457 | static void gem_set_multicast(struct net_device *dev) |
2458 | { |
2459 | struct gem *gp = netdev_priv(dev); |
2460 | u32 rxcfg, rxcfg_new; |
2461 | int limit = 10000; |
2462 | |
2463 | if (!netif_running(dev) || !netif_device_present(dev)) |
2464 | return; |
2465 | |
2466 | /* Better safe than sorry... */ |
2467 | if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled)) |
2468 | return; |
2469 | |
2470 | rxcfg = readl(addr: gp->regs + MAC_RXCFG); |
2471 | rxcfg_new = gem_setup_multicast(gp); |
2472 | #ifdef STRIP_FCS |
2473 | rxcfg_new |= MAC_RXCFG_SFCS; |
2474 | #endif |
2475 | gp->mac_rx_cfg = rxcfg_new; |
2476 | |
2477 | writel(val: rxcfg & ~MAC_RXCFG_ENAB, addr: gp->regs + MAC_RXCFG); |
2478 | while (readl(addr: gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { |
2479 | if (!limit--) |
2480 | break; |
2481 | udelay(10); |
2482 | } |
2483 | |
2484 | rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); |
2485 | rxcfg |= rxcfg_new; |
2486 | |
2487 | writel(val: rxcfg, addr: gp->regs + MAC_RXCFG); |
2488 | } |
2489 | |
2490 | /* Jumbo-grams don't seem to work :-( */ |
2491 | #define GEM_MIN_MTU ETH_MIN_MTU |
2492 | #if 1 |
2493 | #define GEM_MAX_MTU ETH_DATA_LEN |
2494 | #else |
2495 | #define GEM_MAX_MTU 9000 |
2496 | #endif |
2497 | |
2498 | static int gem_change_mtu(struct net_device *dev, int new_mtu) |
2499 | { |
2500 | struct gem *gp = netdev_priv(dev); |
2501 | |
2502 | dev->mtu = new_mtu; |
2503 | |
2504 | /* We'll just catch it later when the device is up'd or resumed */ |
2505 | if (!netif_running(dev) || !netif_device_present(dev)) |
2506 | return 0; |
2507 | |
2508 | /* Better safe than sorry... */ |
2509 | if (WARN_ON(!gp->cell_enabled)) |
2510 | return 0; |
2511 | |
2512 | gem_netif_stop(gp); |
2513 | gem_reinit_chip(gp); |
2514 | if (gp->lstate == link_up) |
2515 | gem_set_link_modes(gp); |
2516 | gem_netif_start(gp); |
2517 | |
2518 | return 0; |
2519 | } |
2520 | |
2521 | static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
2522 | { |
2523 | struct gem *gp = netdev_priv(dev); |
2524 | |
2525 | strscpy(p: info->driver, DRV_NAME, size: sizeof(info->driver)); |
2526 | strscpy(p: info->version, DRV_VERSION, size: sizeof(info->version)); |
2527 | strscpy(p: info->bus_info, q: pci_name(pdev: gp->pdev), size: sizeof(info->bus_info)); |
2528 | } |
2529 | |
2530 | static int gem_get_link_ksettings(struct net_device *dev, |
2531 | struct ethtool_link_ksettings *cmd) |
2532 | { |
2533 | struct gem *gp = netdev_priv(dev); |
2534 | u32 supported, advertising; |
2535 | |
2536 | if (gp->phy_type == phy_mii_mdio0 || |
2537 | gp->phy_type == phy_mii_mdio1) { |
2538 | if (gp->phy_mii.def) |
2539 | supported = gp->phy_mii.def->features; |
2540 | else |
2541 | supported = (SUPPORTED_10baseT_Half | |
2542 | SUPPORTED_10baseT_Full); |
2543 | |
2544 | /* XXX hardcoded stuff for now */ |
2545 | cmd->base.port = PORT_MII; |
2546 | cmd->base.phy_address = 0; /* XXX fixed PHYAD */ |
2547 | |
2548 | /* Return current PHY settings */ |
2549 | cmd->base.autoneg = gp->want_autoneg; |
2550 | cmd->base.speed = gp->phy_mii.speed; |
2551 | cmd->base.duplex = gp->phy_mii.duplex; |
2552 | advertising = gp->phy_mii.advertising; |
2553 | |
2554 | /* If we started with a forced mode, we don't have a default |
2555 | * advertise set, we need to return something sensible so |
2556 | * userland can re-enable autoneg properly. |
2557 | */ |
2558 | if (advertising == 0) |
2559 | advertising = supported; |
2560 | } else { // XXX PCS ? |
2561 | supported = |
2562 | (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | |
2563 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | |
2564 | SUPPORTED_Autoneg); |
2565 | advertising = supported; |
2566 | cmd->base.speed = 0; |
2567 | cmd->base.duplex = 0; |
2568 | cmd->base.port = 0; |
2569 | cmd->base.phy_address = 0; |
2570 | cmd->base.autoneg = 0; |
2571 | |
2572 | /* serdes means usually a Fibre connector, with most fixed */ |
2573 | if (gp->phy_type == phy_serdes) { |
2574 | cmd->base.port = PORT_FIBRE; |
2575 | supported = (SUPPORTED_1000baseT_Half | |
2576 | SUPPORTED_1000baseT_Full | |
2577 | SUPPORTED_FIBRE | SUPPORTED_Autoneg | |
2578 | SUPPORTED_Pause | SUPPORTED_Asym_Pause); |
2579 | advertising = supported; |
2580 | if (gp->lstate == link_up) |
2581 | cmd->base.speed = SPEED_1000; |
2582 | cmd->base.duplex = DUPLEX_FULL; |
2583 | cmd->base.autoneg = 1; |
2584 | } |
2585 | } |
2586 | |
2587 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.supported, |
2588 | legacy_u32: supported); |
2589 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.advertising, |
2590 | legacy_u32: advertising); |
2591 | |
2592 | return 0; |
2593 | } |
2594 | |
2595 | static int gem_set_link_ksettings(struct net_device *dev, |
2596 | const struct ethtool_link_ksettings *cmd) |
2597 | { |
2598 | struct gem *gp = netdev_priv(dev); |
2599 | u32 speed = cmd->base.speed; |
2600 | u32 advertising; |
2601 | |
2602 | ethtool_convert_link_mode_to_legacy_u32(legacy_u32: &advertising, |
2603 | src: cmd->link_modes.advertising); |
2604 | |
2605 | /* Verify the settings we care about. */ |
2606 | if (cmd->base.autoneg != AUTONEG_ENABLE && |
2607 | cmd->base.autoneg != AUTONEG_DISABLE) |
2608 | return -EINVAL; |
2609 | |
2610 | if (cmd->base.autoneg == AUTONEG_ENABLE && |
2611 | advertising == 0) |
2612 | return -EINVAL; |
2613 | |
2614 | if (cmd->base.autoneg == AUTONEG_DISABLE && |
2615 | ((speed != SPEED_1000 && |
2616 | speed != SPEED_100 && |
2617 | speed != SPEED_10) || |
2618 | (cmd->base.duplex != DUPLEX_HALF && |
2619 | cmd->base.duplex != DUPLEX_FULL))) |
2620 | return -EINVAL; |
2621 | |
2622 | /* Apply settings and restart link process. */ |
2623 | if (netif_device_present(dev: gp->dev)) { |
2624 | del_timer_sync(timer: &gp->link_timer); |
2625 | gem_begin_auto_negotiation(gp, ep: cmd); |
2626 | } |
2627 | |
2628 | return 0; |
2629 | } |
2630 | |
2631 | static int gem_nway_reset(struct net_device *dev) |
2632 | { |
2633 | struct gem *gp = netdev_priv(dev); |
2634 | |
2635 | if (!gp->want_autoneg) |
2636 | return -EINVAL; |
2637 | |
2638 | /* Restart link process */ |
2639 | if (netif_device_present(dev: gp->dev)) { |
2640 | del_timer_sync(timer: &gp->link_timer); |
2641 | gem_begin_auto_negotiation(gp, NULL); |
2642 | } |
2643 | |
2644 | return 0; |
2645 | } |
2646 | |
2647 | static u32 gem_get_msglevel(struct net_device *dev) |
2648 | { |
2649 | struct gem *gp = netdev_priv(dev); |
2650 | return gp->msg_enable; |
2651 | } |
2652 | |
2653 | static void gem_set_msglevel(struct net_device *dev, u32 value) |
2654 | { |
2655 | struct gem *gp = netdev_priv(dev); |
2656 | gp->msg_enable = value; |
2657 | } |
2658 | |
2659 | |
2660 | /* Add more when I understand how to program the chip */ |
2661 | /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ |
2662 | |
2663 | #define WOL_SUPPORTED_MASK (WAKE_MAGIC) |
2664 | |
2665 | static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2666 | { |
2667 | struct gem *gp = netdev_priv(dev); |
2668 | |
2669 | /* Add more when I understand how to program the chip */ |
2670 | if (gp->has_wol) { |
2671 | wol->supported = WOL_SUPPORTED_MASK; |
2672 | wol->wolopts = gp->wake_on_lan; |
2673 | } else { |
2674 | wol->supported = 0; |
2675 | wol->wolopts = 0; |
2676 | } |
2677 | } |
2678 | |
2679 | static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2680 | { |
2681 | struct gem *gp = netdev_priv(dev); |
2682 | |
2683 | if (!gp->has_wol) |
2684 | return -EOPNOTSUPP; |
2685 | gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; |
2686 | return 0; |
2687 | } |
2688 | |
2689 | static const struct ethtool_ops gem_ethtool_ops = { |
2690 | .get_drvinfo = gem_get_drvinfo, |
2691 | .get_link = ethtool_op_get_link, |
2692 | .nway_reset = gem_nway_reset, |
2693 | .get_msglevel = gem_get_msglevel, |
2694 | .set_msglevel = gem_set_msglevel, |
2695 | .get_wol = gem_get_wol, |
2696 | .set_wol = gem_set_wol, |
2697 | .get_link_ksettings = gem_get_link_ksettings, |
2698 | .set_link_ksettings = gem_set_link_ksettings, |
2699 | }; |
2700 | |
2701 | static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
2702 | { |
2703 | struct gem *gp = netdev_priv(dev); |
2704 | struct mii_ioctl_data *data = if_mii(rq: ifr); |
2705 | int rc = -EOPNOTSUPP; |
2706 | |
2707 | /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that |
2708 | * netif_device_present() is true and holds rtnl_lock for us |
2709 | * so we have nothing to worry about |
2710 | */ |
2711 | |
2712 | switch (cmd) { |
2713 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ |
2714 | data->phy_id = gp->mii_phy_addr; |
2715 | fallthrough; |
2716 | |
2717 | case SIOCGMIIREG: /* Read MII PHY register. */ |
2718 | data->val_out = __sungem_phy_read(gp, phy_addr: data->phy_id & 0x1f, |
2719 | reg: data->reg_num & 0x1f); |
2720 | rc = 0; |
2721 | break; |
2722 | |
2723 | case SIOCSMIIREG: /* Write MII PHY register. */ |
2724 | __sungem_phy_write(gp, phy_addr: data->phy_id & 0x1f, reg: data->reg_num & 0x1f, |
2725 | val: data->val_in); |
2726 | rc = 0; |
2727 | break; |
2728 | } |
2729 | return rc; |
2730 | } |
2731 | |
2732 | #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) |
2733 | /* Fetch MAC address from vital product data of PCI ROM. */ |
2734 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) |
2735 | { |
2736 | int this_offset; |
2737 | |
2738 | for (this_offset = 0x20; this_offset < len; this_offset++) { |
2739 | void __iomem *p = rom_base + this_offset; |
2740 | int i; |
2741 | |
2742 | if (readb(addr: p + 0) != 0x90 || |
2743 | readb(addr: p + 1) != 0x00 || |
2744 | readb(addr: p + 2) != 0x09 || |
2745 | readb(addr: p + 3) != 0x4e || |
2746 | readb(addr: p + 4) != 0x41 || |
2747 | readb(addr: p + 5) != 0x06) |
2748 | continue; |
2749 | |
2750 | this_offset += 6; |
2751 | p += 6; |
2752 | |
2753 | for (i = 0; i < 6; i++) |
2754 | dev_addr[i] = readb(addr: p + i); |
2755 | return 1; |
2756 | } |
2757 | return 0; |
2758 | } |
2759 | |
2760 | static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) |
2761 | { |
2762 | size_t size; |
2763 | void __iomem *p = pci_map_rom(pdev, size: &size); |
2764 | |
2765 | if (p) { |
2766 | int found; |
2767 | |
2768 | found = readb(addr: p) == 0x55 && |
2769 | readb(addr: p + 1) == 0xaa && |
2770 | find_eth_addr_in_vpd(rom_base: p, len: (64 * 1024), dev_addr); |
2771 | pci_unmap_rom(pdev, rom: p); |
2772 | if (found) |
2773 | return; |
2774 | } |
2775 | |
2776 | /* Sun MAC prefix then 3 random bytes. */ |
2777 | dev_addr[0] = 0x08; |
2778 | dev_addr[1] = 0x00; |
2779 | dev_addr[2] = 0x20; |
2780 | get_random_bytes(buf: dev_addr + 3, len: 3); |
2781 | } |
2782 | #endif /* not Sparc and not PPC */ |
2783 | |
2784 | static int gem_get_device_address(struct gem *gp) |
2785 | { |
2786 | #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) |
2787 | struct net_device *dev = gp->dev; |
2788 | const unsigned char *addr; |
2789 | |
2790 | addr = of_get_property(gp->of_node, "local-mac-address" , NULL); |
2791 | if (addr == NULL) { |
2792 | #ifdef CONFIG_SPARC |
2793 | addr = idprom->id_ethaddr; |
2794 | #else |
2795 | printk("\n" ); |
2796 | pr_err("%s: can't get mac-address\n" , dev->name); |
2797 | return -1; |
2798 | #endif |
2799 | } |
2800 | eth_hw_addr_set(dev, addr); |
2801 | #else |
2802 | u8 addr[ETH_ALEN]; |
2803 | |
2804 | get_gem_mac_nonobp(pdev: gp->pdev, dev_addr: addr); |
2805 | eth_hw_addr_set(dev: gp->dev, addr); |
2806 | #endif |
2807 | return 0; |
2808 | } |
2809 | |
2810 | static void gem_remove_one(struct pci_dev *pdev) |
2811 | { |
2812 | struct net_device *dev = pci_get_drvdata(pdev); |
2813 | |
2814 | if (dev) { |
2815 | struct gem *gp = netdev_priv(dev); |
2816 | |
2817 | unregister_netdev(dev); |
2818 | |
2819 | /* Ensure reset task is truly gone */ |
2820 | cancel_work_sync(work: &gp->reset_task); |
2821 | |
2822 | /* Free resources */ |
2823 | dma_free_coherent(dev: &pdev->dev, size: sizeof(struct gem_init_block), |
2824 | cpu_addr: gp->init_block, dma_handle: gp->gblock_dvma); |
2825 | iounmap(addr: gp->regs); |
2826 | pci_release_regions(pdev); |
2827 | free_netdev(dev); |
2828 | } |
2829 | } |
2830 | |
2831 | static const struct net_device_ops gem_netdev_ops = { |
2832 | .ndo_open = gem_open, |
2833 | .ndo_stop = gem_close, |
2834 | .ndo_start_xmit = gem_start_xmit, |
2835 | .ndo_get_stats = gem_get_stats, |
2836 | .ndo_set_rx_mode = gem_set_multicast, |
2837 | .ndo_eth_ioctl = gem_ioctl, |
2838 | .ndo_tx_timeout = gem_tx_timeout, |
2839 | .ndo_change_mtu = gem_change_mtu, |
2840 | .ndo_validate_addr = eth_validate_addr, |
2841 | .ndo_set_mac_address = gem_set_mac_address, |
2842 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2843 | .ndo_poll_controller = gem_poll_controller, |
2844 | #endif |
2845 | }; |
2846 | |
2847 | static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2848 | { |
2849 | unsigned long gemreg_base, gemreg_len; |
2850 | struct net_device *dev; |
2851 | struct gem *gp; |
2852 | int err, pci_using_dac; |
2853 | |
2854 | printk_once(KERN_INFO "%s" , version); |
2855 | |
2856 | /* Apple gmac note: during probe, the chip is powered up by |
2857 | * the arch code to allow the code below to work (and to let |
2858 | * the chip be probed on the config space. It won't stay powered |
2859 | * up until the interface is brought up however, so we can't rely |
2860 | * on register configuration done at this point. |
2861 | */ |
2862 | err = pci_enable_device(dev: pdev); |
2863 | if (err) { |
2864 | pr_err("Cannot enable MMIO operation, aborting\n" ); |
2865 | return err; |
2866 | } |
2867 | pci_set_master(dev: pdev); |
2868 | |
2869 | /* Configure DMA attributes. */ |
2870 | |
2871 | /* All of the GEM documentation states that 64-bit DMA addressing |
2872 | * is fully supported and should work just fine. However the |
2873 | * front end for RIO based GEMs is different and only supports |
2874 | * 32-bit addressing. |
2875 | * |
2876 | * For now we assume the various PPC GEMs are 32-bit only as well. |
2877 | */ |
2878 | if (pdev->vendor == PCI_VENDOR_ID_SUN && |
2879 | pdev->device == PCI_DEVICE_ID_SUN_GEM && |
2880 | !dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(64))) { |
2881 | pci_using_dac = 1; |
2882 | } else { |
2883 | err = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
2884 | if (err) { |
2885 | pr_err("No usable DMA configuration, aborting\n" ); |
2886 | goto err_disable_device; |
2887 | } |
2888 | pci_using_dac = 0; |
2889 | } |
2890 | |
2891 | gemreg_base = pci_resource_start(pdev, 0); |
2892 | gemreg_len = pci_resource_len(pdev, 0); |
2893 | |
2894 | if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { |
2895 | pr_err("Cannot find proper PCI device base address, aborting\n" ); |
2896 | err = -ENODEV; |
2897 | goto err_disable_device; |
2898 | } |
2899 | |
2900 | dev = alloc_etherdev(sizeof(*gp)); |
2901 | if (!dev) { |
2902 | err = -ENOMEM; |
2903 | goto err_disable_device; |
2904 | } |
2905 | SET_NETDEV_DEV(dev, &pdev->dev); |
2906 | |
2907 | gp = netdev_priv(dev); |
2908 | |
2909 | err = pci_request_regions(pdev, DRV_NAME); |
2910 | if (err) { |
2911 | pr_err("Cannot obtain PCI resources, aborting\n" ); |
2912 | goto err_out_free_netdev; |
2913 | } |
2914 | |
2915 | gp->pdev = pdev; |
2916 | gp->dev = dev; |
2917 | |
2918 | gp->msg_enable = DEFAULT_MSG; |
2919 | |
2920 | timer_setup(&gp->link_timer, gem_link_timer, 0); |
2921 | |
2922 | INIT_WORK(&gp->reset_task, gem_reset_task); |
2923 | |
2924 | gp->lstate = link_down; |
2925 | gp->timer_ticks = 0; |
2926 | netif_carrier_off(dev); |
2927 | |
2928 | gp->regs = ioremap(offset: gemreg_base, size: gemreg_len); |
2929 | if (!gp->regs) { |
2930 | pr_err("Cannot map device registers, aborting\n" ); |
2931 | err = -EIO; |
2932 | goto err_out_free_res; |
2933 | } |
2934 | |
2935 | /* On Apple, we want a reference to the Open Firmware device-tree |
2936 | * node. We use it for clock control. |
2937 | */ |
2938 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) |
2939 | gp->of_node = pci_device_to_OF_node(pdev); |
2940 | #endif |
2941 | |
2942 | /* Only Apple version supports WOL afaik */ |
2943 | if (pdev->vendor == PCI_VENDOR_ID_APPLE) |
2944 | gp->has_wol = 1; |
2945 | |
2946 | /* Make sure cell is enabled */ |
2947 | gem_get_cell(gp); |
2948 | |
2949 | /* Make sure everything is stopped and in init state */ |
2950 | gem_reset(gp); |
2951 | |
2952 | /* Fill up the mii_phy structure (even if we won't use it) */ |
2953 | gp->phy_mii.dev = dev; |
2954 | gp->phy_mii.mdio_read = _sungem_phy_read; |
2955 | gp->phy_mii.mdio_write = _sungem_phy_write; |
2956 | #ifdef CONFIG_PPC_PMAC |
2957 | gp->phy_mii.platform_data = gp->of_node; |
2958 | #endif |
2959 | /* By default, we start with autoneg */ |
2960 | gp->want_autoneg = 1; |
2961 | |
2962 | /* Check fifo sizes, PHY type, etc... */ |
2963 | if (gem_check_invariants(gp)) { |
2964 | err = -ENODEV; |
2965 | goto err_out_iounmap; |
2966 | } |
2967 | |
2968 | /* It is guaranteed that the returned buffer will be at least |
2969 | * PAGE_SIZE aligned. |
2970 | */ |
2971 | gp->init_block = dma_alloc_coherent(dev: &pdev->dev, size: sizeof(struct gem_init_block), |
2972 | dma_handle: &gp->gblock_dvma, GFP_KERNEL); |
2973 | if (!gp->init_block) { |
2974 | pr_err("Cannot allocate init block, aborting\n" ); |
2975 | err = -ENOMEM; |
2976 | goto err_out_iounmap; |
2977 | } |
2978 | |
2979 | err = gem_get_device_address(gp); |
2980 | if (err) |
2981 | goto err_out_free_consistent; |
2982 | |
2983 | dev->netdev_ops = &gem_netdev_ops; |
2984 | netif_napi_add(dev, napi: &gp->napi, poll: gem_poll); |
2985 | dev->ethtool_ops = &gem_ethtool_ops; |
2986 | dev->watchdog_timeo = 5 * HZ; |
2987 | dev->dma = 0; |
2988 | |
2989 | /* Set that now, in case PM kicks in now */ |
2990 | pci_set_drvdata(pdev, data: dev); |
2991 | |
2992 | /* We can do scatter/gather and HW checksum */ |
2993 | dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; |
2994 | dev->features = dev->hw_features; |
2995 | if (pci_using_dac) |
2996 | dev->features |= NETIF_F_HIGHDMA; |
2997 | |
2998 | /* MTU range: 68 - 1500 (Jumbo mode is broken) */ |
2999 | dev->min_mtu = GEM_MIN_MTU; |
3000 | dev->max_mtu = GEM_MAX_MTU; |
3001 | |
3002 | /* Register with kernel */ |
3003 | if (register_netdev(dev)) { |
3004 | pr_err("Cannot register net device, aborting\n" ); |
3005 | err = -ENOMEM; |
3006 | goto err_out_free_consistent; |
3007 | } |
3008 | |
3009 | /* Undo the get_cell with appropriate locking (we could use |
3010 | * ndo_init/uninit but that would be even more clumsy imho) |
3011 | */ |
3012 | rtnl_lock(); |
3013 | gem_put_cell(gp); |
3014 | rtnl_unlock(); |
3015 | |
3016 | netdev_info(dev, format: "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n" , |
3017 | dev->dev_addr); |
3018 | return 0; |
3019 | |
3020 | err_out_free_consistent: |
3021 | gem_remove_one(pdev); |
3022 | err_out_iounmap: |
3023 | gem_put_cell(gp); |
3024 | iounmap(addr: gp->regs); |
3025 | |
3026 | err_out_free_res: |
3027 | pci_release_regions(pdev); |
3028 | |
3029 | err_out_free_netdev: |
3030 | free_netdev(dev); |
3031 | err_disable_device: |
3032 | pci_disable_device(dev: pdev); |
3033 | return err; |
3034 | |
3035 | } |
3036 | |
3037 | static SIMPLE_DEV_PM_OPS(gem_pm_ops, gem_suspend, gem_resume); |
3038 | |
3039 | static struct pci_driver gem_driver = { |
3040 | .name = GEM_MODULE_NAME, |
3041 | .id_table = gem_pci_tbl, |
3042 | .probe = gem_init_one, |
3043 | .remove = gem_remove_one, |
3044 | .driver.pm = &gem_pm_ops, |
3045 | }; |
3046 | |
3047 | module_pci_driver(gem_driver); |
3048 | |