1 | /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */ |
2 | /* |
3 | Written 1998-2001 by Donald Becker. |
4 | |
5 | Current Maintainer: Kevin Brace <kevinbrace@bracecomputerlab.com> |
6 | |
7 | This software may be used and distributed according to the terms of |
8 | the GNU General Public License (GPL), incorporated herein by reference. |
9 | Drivers based on or derived from this code fall under the GPL and must |
10 | retain the authorship, copyright and license notice. This file is not |
11 | a complete program and may only be used when the entire operating |
12 | system is licensed under the GPL. |
13 | |
14 | This driver is designed for the VIA VT86C100A Rhine-I. |
15 | It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM |
16 | and management NIC 6105M). |
17 | |
18 | The author may be reached as becker@scyld.com, or C/O |
19 | Scyld Computing Corporation |
20 | 410 Severn Ave., Suite 210 |
21 | Annapolis MD 21403 |
22 | |
23 | |
24 | This driver contains some changes from the original Donald Becker |
25 | version. He may or may not be interested in bug reports on this |
26 | code. You can find his versions at: |
27 | http://www.scyld.com/network/via-rhine.html |
28 | [link no longer provides useful info -jgarzik] |
29 | |
30 | */ |
31 | |
32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
33 | |
34 | #define DRV_NAME "via-rhine" |
35 | |
36 | #include <linux/types.h> |
37 | |
38 | /* A few user-configurable values. |
39 | These may be modified when a driver module is loaded. */ |
40 | static int debug = 0; |
41 | #define RHINE_MSG_DEFAULT \ |
42 | (0x0000) |
43 | |
44 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. |
45 | Setting to > 1518 effectively disables this feature. */ |
46 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ |
47 | defined(CONFIG_SPARC) || defined(__ia64__) || \ |
48 | defined(__sh__) || defined(__mips__) |
49 | static int rx_copybreak = 1518; |
50 | #else |
51 | static int rx_copybreak; |
52 | #endif |
53 | |
54 | /* Work-around for broken BIOSes: they are unable to get the chip back out of |
55 | power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ |
56 | static bool avoid_D3; |
57 | |
58 | /* |
59 | * In case you are looking for 'options[]' or 'full_duplex[]', they |
60 | * are gone. Use ethtool(8) instead. |
61 | */ |
62 | |
63 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). |
64 | The Rhine has a 64 element 8390-like hash table. */ |
65 | static const int multicast_filter_limit = 32; |
66 | |
67 | |
68 | /* Operational parameters that are set at compile time. */ |
69 | |
70 | /* Keep the ring sizes a power of two for compile efficiency. |
71 | * The compiler will convert <unsigned>'%'<2^N> into a bit mask. |
72 | * Making the Tx ring too large decreases the effectiveness of channel |
73 | * bonding and packet priority. |
74 | * With BQL support, we can increase TX ring safely. |
75 | * There are no ill effects from too-large receive rings. |
76 | */ |
77 | #define TX_RING_SIZE 64 |
78 | #define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */ |
79 | #define RX_RING_SIZE 64 |
80 | |
81 | /* Operational parameters that usually are not changed. */ |
82 | |
83 | /* Time in jiffies before concluding the transmitter is hung. */ |
84 | #define TX_TIMEOUT (2*HZ) |
85 | |
86 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ |
87 | |
88 | #include <linux/module.h> |
89 | #include <linux/moduleparam.h> |
90 | #include <linux/kernel.h> |
91 | #include <linux/string.h> |
92 | #include <linux/timer.h> |
93 | #include <linux/errno.h> |
94 | #include <linux/ioport.h> |
95 | #include <linux/interrupt.h> |
96 | #include <linux/pci.h> |
97 | #include <linux/of.h> |
98 | #include <linux/of_irq.h> |
99 | #include <linux/platform_device.h> |
100 | #include <linux/dma-mapping.h> |
101 | #include <linux/netdevice.h> |
102 | #include <linux/etherdevice.h> |
103 | #include <linux/skbuff.h> |
104 | #include <linux/init.h> |
105 | #include <linux/delay.h> |
106 | #include <linux/mii.h> |
107 | #include <linux/ethtool.h> |
108 | #include <linux/crc32.h> |
109 | #include <linux/if_vlan.h> |
110 | #include <linux/bitops.h> |
111 | #include <linux/workqueue.h> |
112 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
113 | #include <asm/io.h> |
114 | #include <asm/irq.h> |
115 | #include <linux/uaccess.h> |
116 | #include <linux/dmi.h> |
117 | |
118 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>" ); |
119 | MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver" ); |
120 | MODULE_LICENSE("GPL" ); |
121 | |
122 | module_param(debug, int, 0); |
123 | module_param(rx_copybreak, int, 0); |
124 | module_param(avoid_D3, bool, 0); |
125 | MODULE_PARM_DESC(debug, "VIA Rhine debug message flags" ); |
126 | MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames" ); |
127 | MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)" ); |
128 | |
129 | #define MCAM_SIZE 32 |
130 | #define VCAM_SIZE 32 |
131 | |
132 | /* |
133 | Theory of Operation |
134 | |
135 | I. Board Compatibility |
136 | |
137 | This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet |
138 | controller. |
139 | |
140 | II. Board-specific settings |
141 | |
142 | Boards with this chip are functional only in a bus-master PCI slot. |
143 | |
144 | Many operational settings are loaded from the EEPROM to the Config word at |
145 | offset 0x78. For most of these settings, this driver assumes that they are |
146 | correct. |
147 | If this driver is compiled to use PCI memory space operations the EEPROM |
148 | must be configured to enable memory ops. |
149 | |
150 | III. Driver operation |
151 | |
152 | IIIa. Ring buffers |
153 | |
154 | This driver uses two statically allocated fixed-size descriptor lists |
155 | formed into rings by a branch from the final descriptor to the beginning of |
156 | the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. |
157 | |
158 | IIIb/c. Transmit/Receive Structure |
159 | |
160 | This driver attempts to use a zero-copy receive and transmit scheme. |
161 | |
162 | Alas, all data buffers are required to start on a 32 bit boundary, so |
163 | the driver must often copy transmit packets into bounce buffers. |
164 | |
165 | The driver allocates full frame size skbuffs for the Rx ring buffers at |
166 | open() time and passes the skb->data field to the chip as receive data |
167 | buffers. When an incoming frame is less than RX_COPYBREAK bytes long, |
168 | a fresh skbuff is allocated and the frame is copied to the new skbuff. |
169 | When the incoming frame is larger, the skbuff is passed directly up the |
170 | protocol stack. Buffers consumed this way are replaced by newly allocated |
171 | skbuffs in the last phase of rhine_rx(). |
172 | |
173 | The RX_COPYBREAK value is chosen to trade-off the memory wasted by |
174 | using a full-sized skbuff for small frames vs. the copying costs of larger |
175 | frames. New boards are typically used in generously configured machines |
176 | and the underfilled buffers have negligible impact compared to the benefit of |
177 | a single allocation size, so the default value of zero results in never |
178 | copying packets. When copying is done, the cost is usually mitigated by using |
179 | a combined copy/checksum routine. Copying also preloads the cache, which is |
180 | most useful with small frames. |
181 | |
182 | Since the VIA chips are only able to transfer data to buffers on 32 bit |
183 | boundaries, the IP header at offset 14 in an ethernet frame isn't |
184 | longword aligned for further processing. Copying these unaligned buffers |
185 | has the beneficial effect of 16-byte aligning the IP header. |
186 | |
187 | IIId. Synchronization |
188 | |
189 | The driver runs as two independent, single-threaded flows of control. One |
190 | is the send-packet routine, which enforces single-threaded use by the |
191 | netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler, |
192 | which is single threaded by the hardware and interrupt handling software. |
193 | |
194 | The send packet thread has partial control over the Tx ring. It locks the |
195 | netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in |
196 | the ring is not available it stops the transmit queue by |
197 | calling netif_stop_queue. |
198 | |
199 | The interrupt handler has exclusive control over the Rx ring and records stats |
200 | from the Tx ring. After reaping the stats, it marks the Tx queue entry as |
201 | empty by incrementing the dirty_tx mark. If at least half of the entries in |
202 | the Rx ring are available the transmit queue is woken up if it was stopped. |
203 | |
204 | IV. Notes |
205 | |
206 | IVb. References |
207 | |
208 | Preliminary VT86C100A manual from http://www.via.com.tw/ |
209 | http://www.scyld.com/expert/100mbps.html |
210 | http://www.scyld.com/expert/NWay.html |
211 | ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf |
212 | ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF |
213 | |
214 | |
215 | IVc. Errata |
216 | |
217 | The VT86C100A manual is not reliable information. |
218 | The 3043 chip does not handle unaligned transmit or receive buffers, resulting |
219 | in significant performance degradation for bounce buffer copies on transmit |
220 | and unaligned IP headers on receive. |
221 | The chip does not pad to minimum transmit length. |
222 | |
223 | */ |
224 | |
225 | |
226 | /* This table drives the PCI probe routines. It's mostly boilerplate in all |
227 | of the drivers, and will likely be provided by some future kernel. |
228 | Note the matching code -- the first table entry matchs all 56** cards but |
229 | second only the 1234 card. |
230 | */ |
231 | |
232 | enum rhine_revs { |
233 | VT86C100A = 0x00, |
234 | VTunknown0 = 0x20, |
235 | VT6102 = 0x40, |
236 | VT8231 = 0x50, /* Integrated MAC */ |
237 | VT8233 = 0x60, /* Integrated MAC */ |
238 | VT8235 = 0x74, /* Integrated MAC */ |
239 | VT8237 = 0x78, /* Integrated MAC */ |
240 | VT8251 = 0x7C, /* Integrated MAC */ |
241 | VT6105 = 0x80, |
242 | VT6105_B0 = 0x83, |
243 | VT6105L = 0x8A, |
244 | VT6107 = 0x8C, |
245 | VTunknown2 = 0x8E, |
246 | VT6105M = 0x90, /* Management adapter */ |
247 | }; |
248 | |
249 | enum rhine_quirks { |
250 | rqWOL = 0x0001, /* Wake-On-LAN support */ |
251 | rqForceReset = 0x0002, |
252 | rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ |
253 | rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ |
254 | rqRhineI = 0x0100, /* See comment below */ |
255 | rqIntPHY = 0x0200, /* Integrated PHY */ |
256 | rqMgmt = 0x0400, /* Management adapter */ |
257 | rqNeedEnMMIO = 0x0800, /* Whether the core needs to be |
258 | * switched from PIO mode to MMIO |
259 | * (only applies to PCI) |
260 | */ |
261 | }; |
262 | /* |
263 | * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable |
264 | * MMIO as well as for the collision counter and the Tx FIFO underflow |
265 | * indicator. In addition, Tx and Rx buffers need to 4 byte aligned. |
266 | */ |
267 | |
268 | /* Beware of PCI posted writes */ |
269 | #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) |
270 | |
271 | static const struct pci_device_id rhine_pci_tbl[] = { |
272 | { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ |
273 | { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ |
274 | { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ |
275 | { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */ |
276 | { } /* terminate list */ |
277 | }; |
278 | MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); |
279 | |
280 | /* OpenFirmware identifiers for platform-bus devices |
281 | * The .data field is currently only used to store quirks |
282 | */ |
283 | static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns; |
284 | static const struct of_device_id rhine_of_tbl[] = { |
285 | { .compatible = "via,vt8500-rhine" , .data = &vt8500_quirks }, |
286 | { } /* terminate list */ |
287 | }; |
288 | MODULE_DEVICE_TABLE(of, rhine_of_tbl); |
289 | |
290 | /* Offsets to the device registers. */ |
291 | enum register_offsets { |
292 | StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, |
293 | ChipCmd1=0x09, TQWake=0x0A, |
294 | IntrStatus=0x0C, IntrEnable=0x0E, |
295 | MulticastFilter0=0x10, MulticastFilter1=0x14, |
296 | RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, |
297 | MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F, |
298 | MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, |
299 | ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, |
300 | RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, |
301 | StickyHW=0x83, IntrStatus2=0x84, |
302 | CamMask=0x88, CamCon=0x92, CamAddr=0x93, |
303 | WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, |
304 | WOLcrClr1=0xA6, WOLcgClr=0xA7, |
305 | PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, |
306 | }; |
307 | |
308 | /* Bits in ConfigD */ |
309 | enum backoff_bits { |
310 | BackOptional=0x01, BackModify=0x02, |
311 | BackCaptureEffect=0x04, BackRandom=0x08 |
312 | }; |
313 | |
314 | /* Bits in the TxConfig (TCR) register */ |
315 | enum tcr_bits { |
316 | TCR_PQEN=0x01, |
317 | TCR_LB0=0x02, /* loopback[0] */ |
318 | TCR_LB1=0x04, /* loopback[1] */ |
319 | TCR_OFSET=0x08, |
320 | TCR_RTGOPT=0x10, |
321 | TCR_RTFT0=0x20, |
322 | TCR_RTFT1=0x40, |
323 | TCR_RTSF=0x80, |
324 | }; |
325 | |
326 | /* Bits in the CamCon (CAMC) register */ |
327 | enum camcon_bits { |
328 | CAMC_CAMEN=0x01, |
329 | CAMC_VCAMSL=0x02, |
330 | CAMC_CAMWR=0x04, |
331 | CAMC_CAMRD=0x08, |
332 | }; |
333 | |
334 | /* Bits in the PCIBusConfig1 (BCR1) register */ |
335 | enum bcr1_bits { |
336 | BCR1_POT0=0x01, |
337 | BCR1_POT1=0x02, |
338 | BCR1_POT2=0x04, |
339 | BCR1_CTFT0=0x08, |
340 | BCR1_CTFT1=0x10, |
341 | BCR1_CTSF=0x20, |
342 | BCR1_TXQNOBK=0x40, /* for VT6105 */ |
343 | BCR1_VIDFR=0x80, /* for VT6105 */ |
344 | BCR1_MED0=0x40, /* for VT6102 */ |
345 | BCR1_MED1=0x80, /* for VT6102 */ |
346 | }; |
347 | |
348 | /* Registers we check that mmio and reg are the same. */ |
349 | static const int mmio_verify_registers[] = { |
350 | RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, |
351 | 0 |
352 | }; |
353 | |
354 | /* Bits in the interrupt status/mask registers. */ |
355 | enum intr_status_bits { |
356 | IntrRxDone = 0x0001, |
357 | IntrTxDone = 0x0002, |
358 | IntrRxErr = 0x0004, |
359 | IntrTxError = 0x0008, |
360 | IntrRxEmpty = 0x0020, |
361 | IntrPCIErr = 0x0040, |
362 | IntrStatsMax = 0x0080, |
363 | IntrRxEarly = 0x0100, |
364 | IntrTxUnderrun = 0x0210, |
365 | IntrRxOverflow = 0x0400, |
366 | IntrRxDropped = 0x0800, |
367 | IntrRxNoBuf = 0x1000, |
368 | IntrTxAborted = 0x2000, |
369 | IntrLinkChange = 0x4000, |
370 | IntrRxWakeUp = 0x8000, |
371 | IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */ |
372 | IntrNormalSummary = IntrRxDone | IntrTxDone, |
373 | IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError | |
374 | IntrTxUnderrun, |
375 | }; |
376 | |
377 | /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ |
378 | enum wol_bits { |
379 | WOLucast = 0x10, |
380 | WOLmagic = 0x20, |
381 | WOLbmcast = 0x30, |
382 | WOLlnkon = 0x40, |
383 | WOLlnkoff = 0x80, |
384 | }; |
385 | |
386 | /* The Rx and Tx buffer descriptors. */ |
387 | struct rx_desc { |
388 | __le32 rx_status; |
389 | __le32 desc_length; /* Chain flag, Buffer/frame length */ |
390 | __le32 addr; |
391 | __le32 next_desc; |
392 | }; |
393 | struct tx_desc { |
394 | __le32 tx_status; |
395 | __le32 desc_length; /* Chain flag, Tx Config, Frame length */ |
396 | __le32 addr; |
397 | __le32 next_desc; |
398 | }; |
399 | |
400 | /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */ |
401 | #define TXDESC 0x00e08000 |
402 | |
403 | enum rx_status_bits { |
404 | RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F |
405 | }; |
406 | |
407 | /* Bits in *_desc.*_status */ |
408 | enum desc_status_bits { |
409 | DescOwn=0x80000000 |
410 | }; |
411 | |
412 | /* Bits in *_desc.*_length */ |
413 | enum desc_length_bits { |
414 | DescTag=0x00010000 |
415 | }; |
416 | |
417 | /* Bits in ChipCmd. */ |
418 | enum chip_cmd_bits { |
419 | CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, |
420 | CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40, |
421 | Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04, |
422 | Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, |
423 | }; |
424 | |
425 | struct rhine_stats { |
426 | u64 packets; |
427 | u64 bytes; |
428 | struct u64_stats_sync syncp; |
429 | }; |
430 | |
431 | struct rhine_private { |
432 | /* Bit mask for configured VLAN ids */ |
433 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
434 | |
435 | /* Descriptor rings */ |
436 | struct rx_desc *rx_ring; |
437 | struct tx_desc *tx_ring; |
438 | dma_addr_t rx_ring_dma; |
439 | dma_addr_t tx_ring_dma; |
440 | |
441 | /* The addresses of receive-in-place skbuffs. */ |
442 | struct sk_buff *rx_skbuff[RX_RING_SIZE]; |
443 | dma_addr_t rx_skbuff_dma[RX_RING_SIZE]; |
444 | |
445 | /* The saved address of a sent-in-place packet/buffer, for later free(). */ |
446 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; |
447 | dma_addr_t tx_skbuff_dma[TX_RING_SIZE]; |
448 | |
449 | /* Tx bounce buffers (Rhine-I only) */ |
450 | unsigned char *tx_buf[TX_RING_SIZE]; |
451 | unsigned char *tx_bufs; |
452 | dma_addr_t tx_bufs_dma; |
453 | |
454 | int irq; |
455 | long pioaddr; |
456 | struct net_device *dev; |
457 | struct napi_struct napi; |
458 | spinlock_t lock; |
459 | struct mutex task_lock; |
460 | bool task_enable; |
461 | struct work_struct slow_event_task; |
462 | struct work_struct reset_task; |
463 | |
464 | u32 msg_enable; |
465 | |
466 | /* Frequently used values: keep some adjacent for cache effect. */ |
467 | u32 quirks; |
468 | unsigned int cur_rx; |
469 | unsigned int cur_tx, dirty_tx; |
470 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ |
471 | struct rhine_stats rx_stats; |
472 | struct rhine_stats tx_stats; |
473 | u8 wolopts; |
474 | |
475 | u8 tx_thresh, rx_thresh; |
476 | |
477 | struct mii_if_info mii_if; |
478 | void __iomem *base; |
479 | }; |
480 | |
481 | #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0) |
482 | #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0) |
483 | #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0) |
484 | |
485 | #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x)) |
486 | #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x)) |
487 | #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x)) |
488 | |
489 | #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0) |
490 | #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0) |
491 | #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0) |
492 | |
493 | #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0) |
494 | #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0) |
495 | #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0) |
496 | |
497 | |
498 | static int mdio_read(struct net_device *dev, int phy_id, int location); |
499 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); |
500 | static int rhine_open(struct net_device *dev); |
501 | static void rhine_reset_task(struct work_struct *work); |
502 | static void rhine_slow_event_task(struct work_struct *work); |
503 | static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue); |
504 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, |
505 | struct net_device *dev); |
506 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance); |
507 | static void rhine_tx(struct net_device *dev); |
508 | static int rhine_rx(struct net_device *dev, int limit); |
509 | static void rhine_set_rx_mode(struct net_device *dev); |
510 | static void rhine_get_stats64(struct net_device *dev, |
511 | struct rtnl_link_stats64 *stats); |
512 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
513 | static const struct ethtool_ops netdev_ethtool_ops; |
514 | static int rhine_close(struct net_device *dev); |
515 | static int rhine_vlan_rx_add_vid(struct net_device *dev, |
516 | __be16 proto, u16 vid); |
517 | static int rhine_vlan_rx_kill_vid(struct net_device *dev, |
518 | __be16 proto, u16 vid); |
519 | static void rhine_restart_tx(struct net_device *dev); |
520 | |
521 | static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) |
522 | { |
523 | void __iomem *ioaddr = rp->base; |
524 | int i; |
525 | |
526 | for (i = 0; i < 1024; i++) { |
527 | bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask); |
528 | |
529 | if (low ^ has_mask_bits) |
530 | break; |
531 | udelay(10); |
532 | } |
533 | if (i > 64) { |
534 | netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " |
535 | "count: %04d\n" , low ? "low" : "high" , reg, mask, i); |
536 | } |
537 | } |
538 | |
539 | static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) |
540 | { |
541 | rhine_wait_bit(rp, reg, mask, low: false); |
542 | } |
543 | |
544 | static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) |
545 | { |
546 | rhine_wait_bit(rp, reg, mask, low: true); |
547 | } |
548 | |
549 | static u32 rhine_get_events(struct rhine_private *rp) |
550 | { |
551 | void __iomem *ioaddr = rp->base; |
552 | u32 intr_status; |
553 | |
554 | intr_status = ioread16(ioaddr + IntrStatus); |
555 | /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ |
556 | if (rp->quirks & rqStatusWBRace) |
557 | intr_status |= ioread8(ioaddr + IntrStatus2) << 16; |
558 | return intr_status; |
559 | } |
560 | |
561 | static void rhine_ack_events(struct rhine_private *rp, u32 mask) |
562 | { |
563 | void __iomem *ioaddr = rp->base; |
564 | |
565 | if (rp->quirks & rqStatusWBRace) |
566 | iowrite8(mask >> 16, ioaddr + IntrStatus2); |
567 | iowrite16(mask, ioaddr + IntrStatus); |
568 | } |
569 | |
570 | /* |
571 | * Get power related registers into sane state. |
572 | * Notify user about past WOL event. |
573 | */ |
574 | static void rhine_power_init(struct net_device *dev) |
575 | { |
576 | struct rhine_private *rp = netdev_priv(dev); |
577 | void __iomem *ioaddr = rp->base; |
578 | u16 wolstat; |
579 | |
580 | if (rp->quirks & rqWOL) { |
581 | /* Make sure chip is in power state D0 */ |
582 | iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW); |
583 | |
584 | /* Disable "force PME-enable" */ |
585 | iowrite8(0x80, ioaddr + WOLcgClr); |
586 | |
587 | /* Clear power-event config bits (WOL) */ |
588 | iowrite8(0xFF, ioaddr + WOLcrClr); |
589 | /* More recent cards can manage two additional patterns */ |
590 | if (rp->quirks & rq6patterns) |
591 | iowrite8(0x03, ioaddr + WOLcrClr1); |
592 | |
593 | /* Save power-event status bits */ |
594 | wolstat = ioread8(ioaddr + PwrcsrSet); |
595 | if (rp->quirks & rq6patterns) |
596 | wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8; |
597 | |
598 | /* Clear power-event status bits */ |
599 | iowrite8(0xFF, ioaddr + PwrcsrClr); |
600 | if (rp->quirks & rq6patterns) |
601 | iowrite8(0x03, ioaddr + PwrcsrClr1); |
602 | |
603 | if (wolstat) { |
604 | char *reason; |
605 | switch (wolstat) { |
606 | case WOLmagic: |
607 | reason = "Magic packet" ; |
608 | break; |
609 | case WOLlnkon: |
610 | reason = "Link went up" ; |
611 | break; |
612 | case WOLlnkoff: |
613 | reason = "Link went down" ; |
614 | break; |
615 | case WOLucast: |
616 | reason = "Unicast packet" ; |
617 | break; |
618 | case WOLbmcast: |
619 | reason = "Multicast/broadcast packet" ; |
620 | break; |
621 | default: |
622 | reason = "Unknown" ; |
623 | } |
624 | netdev_info(dev, format: "Woke system up. Reason: %s\n" , |
625 | reason); |
626 | } |
627 | } |
628 | } |
629 | |
630 | static void rhine_chip_reset(struct net_device *dev) |
631 | { |
632 | struct rhine_private *rp = netdev_priv(dev); |
633 | void __iomem *ioaddr = rp->base; |
634 | u8 cmd1; |
635 | |
636 | iowrite8(Cmd1Reset, ioaddr + ChipCmd1); |
637 | IOSYNC; |
638 | |
639 | if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { |
640 | netdev_info(dev, format: "Reset not complete yet. Trying harder.\n" ); |
641 | |
642 | /* Force reset */ |
643 | if (rp->quirks & rqForceReset) |
644 | iowrite8(0x40, ioaddr + MiscCmd); |
645 | |
646 | /* Reset can take somewhat longer (rare) */ |
647 | rhine_wait_bit_low(rp, reg: ChipCmd1, mask: Cmd1Reset); |
648 | } |
649 | |
650 | cmd1 = ioread8(ioaddr + ChipCmd1); |
651 | netif_info(rp, hw, dev, "Reset %s\n" , (cmd1 & Cmd1Reset) ? |
652 | "failed" : "succeeded" ); |
653 | } |
654 | |
655 | static void enable_mmio(long pioaddr, u32 quirks) |
656 | { |
657 | int n; |
658 | |
659 | if (quirks & rqNeedEnMMIO) { |
660 | if (quirks & rqRhineI) { |
661 | /* More recent docs say that this bit is reserved */ |
662 | n = inb(port: pioaddr + ConfigA) | 0x20; |
663 | outb(value: n, port: pioaddr + ConfigA); |
664 | } else { |
665 | n = inb(port: pioaddr + ConfigD) | 0x80; |
666 | outb(value: n, port: pioaddr + ConfigD); |
667 | } |
668 | } |
669 | } |
670 | |
671 | static inline int verify_mmio(struct device *hwdev, |
672 | long pioaddr, |
673 | void __iomem *ioaddr, |
674 | u32 quirks) |
675 | { |
676 | if (quirks & rqNeedEnMMIO) { |
677 | int i = 0; |
678 | |
679 | /* Check that selected MMIO registers match the PIO ones */ |
680 | while (mmio_verify_registers[i]) { |
681 | int reg = mmio_verify_registers[i++]; |
682 | unsigned char a = inb(port: pioaddr+reg); |
683 | unsigned char b = readb(addr: ioaddr+reg); |
684 | |
685 | if (a != b) { |
686 | dev_err(hwdev, |
687 | "MMIO do not match PIO [%02x] (%02x != %02x)\n" , |
688 | reg, a, b); |
689 | return -EIO; |
690 | } |
691 | } |
692 | } |
693 | return 0; |
694 | } |
695 | |
696 | /* |
697 | * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM |
698 | * (plus 0x6C for Rhine-I/II) |
699 | */ |
700 | static void rhine_reload_eeprom(long pioaddr, struct net_device *dev) |
701 | { |
702 | struct rhine_private *rp = netdev_priv(dev); |
703 | void __iomem *ioaddr = rp->base; |
704 | int i; |
705 | |
706 | outb(value: 0x20, port: pioaddr + MACRegEEcsr); |
707 | for (i = 0; i < 1024; i++) { |
708 | if (!(inb(port: pioaddr + MACRegEEcsr) & 0x20)) |
709 | break; |
710 | } |
711 | if (i > 512) |
712 | pr_info("%4d cycles used @ %s:%d\n" , i, __func__, __LINE__); |
713 | |
714 | /* |
715 | * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable |
716 | * MMIO. If reloading EEPROM was done first this could be avoided, but |
717 | * it is not known if that still works with the "win98-reboot" problem. |
718 | */ |
719 | enable_mmio(pioaddr, quirks: rp->quirks); |
720 | |
721 | /* Turn off EEPROM-controlled wake-up (magic packet) */ |
722 | if (rp->quirks & rqWOL) |
723 | iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA); |
724 | |
725 | } |
726 | |
727 | #ifdef CONFIG_NET_POLL_CONTROLLER |
728 | static void rhine_poll(struct net_device *dev) |
729 | { |
730 | struct rhine_private *rp = netdev_priv(dev); |
731 | const int irq = rp->irq; |
732 | |
733 | disable_irq(irq); |
734 | rhine_interrupt(irq, dev_instance: dev); |
735 | enable_irq(irq); |
736 | } |
737 | #endif |
738 | |
739 | static void rhine_kick_tx_threshold(struct rhine_private *rp) |
740 | { |
741 | if (rp->tx_thresh < 0xe0) { |
742 | void __iomem *ioaddr = rp->base; |
743 | |
744 | rp->tx_thresh += 0x20; |
745 | BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); |
746 | } |
747 | } |
748 | |
749 | static void rhine_tx_err(struct rhine_private *rp, u32 status) |
750 | { |
751 | struct net_device *dev = rp->dev; |
752 | |
753 | if (status & IntrTxAborted) { |
754 | netif_info(rp, tx_err, dev, |
755 | "Abort %08x, frame dropped\n" , status); |
756 | } |
757 | |
758 | if (status & IntrTxUnderrun) { |
759 | rhine_kick_tx_threshold(rp); |
760 | netif_info(rp, tx_err ,dev, "Transmitter underrun, " |
761 | "Tx threshold now %02x\n" , rp->tx_thresh); |
762 | } |
763 | |
764 | if (status & IntrTxDescRace) |
765 | netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n" ); |
766 | |
767 | if ((status & IntrTxError) && |
768 | (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { |
769 | rhine_kick_tx_threshold(rp); |
770 | netif_info(rp, tx_err, dev, "Unspecified error. " |
771 | "Tx threshold now %02x\n" , rp->tx_thresh); |
772 | } |
773 | |
774 | rhine_restart_tx(dev); |
775 | } |
776 | |
777 | static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) |
778 | { |
779 | void __iomem *ioaddr = rp->base; |
780 | struct net_device_stats *stats = &rp->dev->stats; |
781 | |
782 | stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs); |
783 | stats->rx_missed_errors += ioread16(ioaddr + RxMissed); |
784 | |
785 | /* |
786 | * Clears the "tally counters" for CRC errors and missed frames(?). |
787 | * It has been reported that some chips need a write of 0 to clear |
788 | * these, for others the counters are set to 1 when written to and |
789 | * instead cleared when read. So we clear them both ways ... |
790 | */ |
791 | iowrite32(0, ioaddr + RxMissed); |
792 | ioread16(ioaddr + RxCRCErrs); |
793 | ioread16(ioaddr + RxMissed); |
794 | } |
795 | |
796 | #define RHINE_EVENT_NAPI_RX (IntrRxDone | \ |
797 | IntrRxErr | \ |
798 | IntrRxEmpty | \ |
799 | IntrRxOverflow | \ |
800 | IntrRxDropped | \ |
801 | IntrRxNoBuf | \ |
802 | IntrRxWakeUp) |
803 | |
804 | #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \ |
805 | IntrTxAborted | \ |
806 | IntrTxUnderrun | \ |
807 | IntrTxDescRace) |
808 | #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR) |
809 | |
810 | #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \ |
811 | RHINE_EVENT_NAPI_TX | \ |
812 | IntrStatsMax) |
813 | #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange) |
814 | #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW) |
815 | |
816 | static int rhine_napipoll(struct napi_struct *napi, int budget) |
817 | { |
818 | struct rhine_private *rp = container_of(napi, struct rhine_private, napi); |
819 | struct net_device *dev = rp->dev; |
820 | void __iomem *ioaddr = rp->base; |
821 | u16 enable_mask = RHINE_EVENT & 0xffff; |
822 | int work_done = 0; |
823 | u32 status; |
824 | |
825 | status = rhine_get_events(rp); |
826 | rhine_ack_events(rp, mask: status & ~RHINE_EVENT_SLOW); |
827 | |
828 | if (status & RHINE_EVENT_NAPI_RX) |
829 | work_done += rhine_rx(dev, limit: budget); |
830 | |
831 | if (status & RHINE_EVENT_NAPI_TX) { |
832 | if (status & RHINE_EVENT_NAPI_TX_ERR) { |
833 | /* Avoid scavenging before Tx engine turned off */ |
834 | rhine_wait_bit_low(rp, reg: ChipCmd, mask: CmdTxOn); |
835 | if (ioread8(ioaddr + ChipCmd) & CmdTxOn) |
836 | netif_warn(rp, tx_err, dev, "Tx still on\n" ); |
837 | } |
838 | |
839 | rhine_tx(dev); |
840 | |
841 | if (status & RHINE_EVENT_NAPI_TX_ERR) |
842 | rhine_tx_err(rp, status); |
843 | } |
844 | |
845 | if (status & IntrStatsMax) { |
846 | spin_lock(lock: &rp->lock); |
847 | rhine_update_rx_crc_and_missed_errord(rp); |
848 | spin_unlock(lock: &rp->lock); |
849 | } |
850 | |
851 | if (status & RHINE_EVENT_SLOW) { |
852 | enable_mask &= ~RHINE_EVENT_SLOW; |
853 | schedule_work(work: &rp->slow_event_task); |
854 | } |
855 | |
856 | if (work_done < budget) { |
857 | napi_complete_done(n: napi, work_done); |
858 | iowrite16(enable_mask, ioaddr + IntrEnable); |
859 | } |
860 | return work_done; |
861 | } |
862 | |
863 | static void rhine_hw_init(struct net_device *dev, long pioaddr) |
864 | { |
865 | struct rhine_private *rp = netdev_priv(dev); |
866 | |
867 | /* Reset the chip to erase previous misconfiguration. */ |
868 | rhine_chip_reset(dev); |
869 | |
870 | /* Rhine-I needs extra time to recuperate before EEPROM reload */ |
871 | if (rp->quirks & rqRhineI) |
872 | msleep(msecs: 5); |
873 | |
874 | /* Reload EEPROM controlled bytes cleared by soft reset */ |
875 | if (dev_is_pci(dev->dev.parent)) |
876 | rhine_reload_eeprom(pioaddr, dev); |
877 | } |
878 | |
879 | static const struct net_device_ops rhine_netdev_ops = { |
880 | .ndo_open = rhine_open, |
881 | .ndo_stop = rhine_close, |
882 | .ndo_start_xmit = rhine_start_tx, |
883 | .ndo_get_stats64 = rhine_get_stats64, |
884 | .ndo_set_rx_mode = rhine_set_rx_mode, |
885 | .ndo_validate_addr = eth_validate_addr, |
886 | .ndo_set_mac_address = eth_mac_addr, |
887 | .ndo_eth_ioctl = netdev_ioctl, |
888 | .ndo_tx_timeout = rhine_tx_timeout, |
889 | .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid, |
890 | .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid, |
891 | #ifdef CONFIG_NET_POLL_CONTROLLER |
892 | .ndo_poll_controller = rhine_poll, |
893 | #endif |
894 | }; |
895 | |
896 | static int rhine_init_one_common(struct device *hwdev, u32 quirks, |
897 | long pioaddr, void __iomem *ioaddr, int irq) |
898 | { |
899 | struct net_device *dev; |
900 | struct rhine_private *rp; |
901 | int i, rc, phy_id; |
902 | u8 addr[ETH_ALEN]; |
903 | const char *name; |
904 | |
905 | /* this should always be supported */ |
906 | rc = dma_set_mask(dev: hwdev, DMA_BIT_MASK(32)); |
907 | if (rc) { |
908 | dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n" ); |
909 | goto err_out; |
910 | } |
911 | |
912 | dev = alloc_etherdev(sizeof(struct rhine_private)); |
913 | if (!dev) { |
914 | rc = -ENOMEM; |
915 | goto err_out; |
916 | } |
917 | SET_NETDEV_DEV(dev, hwdev); |
918 | |
919 | rp = netdev_priv(dev); |
920 | rp->dev = dev; |
921 | rp->quirks = quirks; |
922 | rp->pioaddr = pioaddr; |
923 | rp->base = ioaddr; |
924 | rp->irq = irq; |
925 | rp->msg_enable = netif_msg_init(debug_value: debug, RHINE_MSG_DEFAULT); |
926 | |
927 | phy_id = rp->quirks & rqIntPHY ? 1 : 0; |
928 | |
929 | u64_stats_init(syncp: &rp->tx_stats.syncp); |
930 | u64_stats_init(syncp: &rp->rx_stats.syncp); |
931 | |
932 | /* Get chip registers into a sane state */ |
933 | rhine_power_init(dev); |
934 | rhine_hw_init(dev, pioaddr); |
935 | |
936 | for (i = 0; i < 6; i++) |
937 | addr[i] = ioread8(ioaddr + StationAddr + i); |
938 | eth_hw_addr_set(dev, addr); |
939 | |
940 | if (!is_valid_ether_addr(addr: dev->dev_addr)) { |
941 | /* Report it and use a random ethernet address instead */ |
942 | netdev_err(dev, format: "Invalid MAC address: %pM\n" , dev->dev_addr); |
943 | eth_hw_addr_random(dev); |
944 | netdev_info(dev, format: "Using random MAC address: %pM\n" , |
945 | dev->dev_addr); |
946 | } |
947 | |
948 | /* For Rhine-I/II, phy_id is loaded from EEPROM */ |
949 | if (!phy_id) |
950 | phy_id = ioread8(ioaddr + 0x6C); |
951 | |
952 | spin_lock_init(&rp->lock); |
953 | mutex_init(&rp->task_lock); |
954 | INIT_WORK(&rp->reset_task, rhine_reset_task); |
955 | INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); |
956 | |
957 | rp->mii_if.dev = dev; |
958 | rp->mii_if.mdio_read = mdio_read; |
959 | rp->mii_if.mdio_write = mdio_write; |
960 | rp->mii_if.phy_id_mask = 0x1f; |
961 | rp->mii_if.reg_num_mask = 0x1f; |
962 | |
963 | /* The chip-specific entries in the device structure. */ |
964 | dev->netdev_ops = &rhine_netdev_ops; |
965 | dev->ethtool_ops = &netdev_ethtool_ops; |
966 | dev->watchdog_timeo = TX_TIMEOUT; |
967 | |
968 | netif_napi_add(dev, napi: &rp->napi, poll: rhine_napipoll); |
969 | |
970 | if (rp->quirks & rqRhineI) |
971 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; |
972 | |
973 | if (rp->quirks & rqMgmt) |
974 | dev->features |= NETIF_F_HW_VLAN_CTAG_TX | |
975 | NETIF_F_HW_VLAN_CTAG_RX | |
976 | NETIF_F_HW_VLAN_CTAG_FILTER; |
977 | |
978 | /* dev->name not defined before register_netdev()! */ |
979 | rc = register_netdev(dev); |
980 | if (rc) |
981 | goto err_out_free_netdev; |
982 | |
983 | if (rp->quirks & rqRhineI) |
984 | name = "Rhine" ; |
985 | else if (rp->quirks & rqStatusWBRace) |
986 | name = "Rhine II" ; |
987 | else if (rp->quirks & rqMgmt) |
988 | name = "Rhine III (Management Adapter)" ; |
989 | else |
990 | name = "Rhine III" ; |
991 | |
992 | netdev_info(dev, format: "VIA %s at %p, %pM, IRQ %d\n" , |
993 | name, ioaddr, dev->dev_addr, rp->irq); |
994 | |
995 | dev_set_drvdata(dev: hwdev, data: dev); |
996 | |
997 | { |
998 | u16 mii_cmd; |
999 | int mii_status = mdio_read(dev, phy_id, location: 1); |
1000 | mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; |
1001 | mdio_write(dev, phy_id, MII_BMCR, value: mii_cmd); |
1002 | if (mii_status != 0xffff && mii_status != 0x0000) { |
1003 | rp->mii_if.advertising = mdio_read(dev, phy_id, location: 4); |
1004 | netdev_info(dev, |
1005 | format: "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n" , |
1006 | phy_id, |
1007 | mii_status, rp->mii_if.advertising, |
1008 | mdio_read(dev, phy_id, location: 5)); |
1009 | |
1010 | /* set IFF_RUNNING */ |
1011 | if (mii_status & BMSR_LSTATUS) |
1012 | netif_carrier_on(dev); |
1013 | else |
1014 | netif_carrier_off(dev); |
1015 | |
1016 | } |
1017 | } |
1018 | rp->mii_if.phy_id = phy_id; |
1019 | if (avoid_D3) |
1020 | netif_info(rp, probe, dev, "No D3 power state at shutdown\n" ); |
1021 | |
1022 | return 0; |
1023 | |
1024 | err_out_free_netdev: |
1025 | free_netdev(dev); |
1026 | err_out: |
1027 | return rc; |
1028 | } |
1029 | |
1030 | static int rhine_init_one_pci(struct pci_dev *pdev, |
1031 | const struct pci_device_id *ent) |
1032 | { |
1033 | struct device *hwdev = &pdev->dev; |
1034 | int rc; |
1035 | long pioaddr, memaddr; |
1036 | void __iomem *ioaddr; |
1037 | int io_size = pdev->revision < VTunknown0 ? 128 : 256; |
1038 | |
1039 | /* This driver was written to use PCI memory space. Some early versions |
1040 | * of the Rhine may only work correctly with I/O space accesses. |
1041 | * TODO: determine for which revisions this is true and assign the flag |
1042 | * in code as opposed to this Kconfig option (???) |
1043 | */ |
1044 | #ifdef CONFIG_VIA_RHINE_MMIO |
1045 | u32 quirks = rqNeedEnMMIO; |
1046 | #else |
1047 | u32 quirks = 0; |
1048 | #endif |
1049 | |
1050 | rc = pci_enable_device(dev: pdev); |
1051 | if (rc) |
1052 | goto err_out; |
1053 | |
1054 | if (pdev->revision < VTunknown0) { |
1055 | quirks |= rqRhineI; |
1056 | } else if (pdev->revision >= VT6102) { |
1057 | quirks |= rqWOL | rqForceReset; |
1058 | if (pdev->revision < VT6105) { |
1059 | quirks |= rqStatusWBRace; |
1060 | } else { |
1061 | quirks |= rqIntPHY; |
1062 | if (pdev->revision >= VT6105_B0) |
1063 | quirks |= rq6patterns; |
1064 | if (pdev->revision >= VT6105M) |
1065 | quirks |= rqMgmt; |
1066 | } |
1067 | } |
1068 | |
1069 | /* sanity check */ |
1070 | if ((pci_resource_len(pdev, 0) < io_size) || |
1071 | (pci_resource_len(pdev, 1) < io_size)) { |
1072 | rc = -EIO; |
1073 | dev_err(hwdev, "Insufficient PCI resources, aborting\n" ); |
1074 | goto err_out_pci_disable; |
1075 | } |
1076 | |
1077 | pioaddr = pci_resource_start(pdev, 0); |
1078 | memaddr = pci_resource_start(pdev, 1); |
1079 | |
1080 | pci_set_master(dev: pdev); |
1081 | |
1082 | rc = pci_request_regions(pdev, DRV_NAME); |
1083 | if (rc) |
1084 | goto err_out_pci_disable; |
1085 | |
1086 | ioaddr = pci_iomap(dev: pdev, bar: (quirks & rqNeedEnMMIO ? 1 : 0), max: io_size); |
1087 | if (!ioaddr) { |
1088 | rc = -EIO; |
1089 | dev_err(hwdev, |
1090 | "ioremap failed for device %s, region 0x%X @ 0x%lX\n" , |
1091 | dev_name(hwdev), io_size, memaddr); |
1092 | goto err_out_free_res; |
1093 | } |
1094 | |
1095 | enable_mmio(pioaddr, quirks); |
1096 | |
1097 | rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks); |
1098 | if (rc) |
1099 | goto err_out_unmap; |
1100 | |
1101 | rc = rhine_init_one_common(hwdev: &pdev->dev, quirks, |
1102 | pioaddr, ioaddr, irq: pdev->irq); |
1103 | if (!rc) |
1104 | return 0; |
1105 | |
1106 | err_out_unmap: |
1107 | pci_iounmap(dev: pdev, ioaddr); |
1108 | err_out_free_res: |
1109 | pci_release_regions(pdev); |
1110 | err_out_pci_disable: |
1111 | pci_disable_device(dev: pdev); |
1112 | err_out: |
1113 | return rc; |
1114 | } |
1115 | |
1116 | static int rhine_init_one_platform(struct platform_device *pdev) |
1117 | { |
1118 | const u32 *quirks; |
1119 | int irq; |
1120 | void __iomem *ioaddr; |
1121 | |
1122 | quirks = of_device_get_match_data(dev: &pdev->dev); |
1123 | if (!quirks) |
1124 | return -EINVAL; |
1125 | |
1126 | ioaddr = devm_platform_ioremap_resource(pdev, index: 0); |
1127 | if (IS_ERR(ptr: ioaddr)) |
1128 | return PTR_ERR(ptr: ioaddr); |
1129 | |
1130 | irq = irq_of_parse_and_map(node: pdev->dev.of_node, index: 0); |
1131 | if (!irq) |
1132 | return -EINVAL; |
1133 | |
1134 | return rhine_init_one_common(hwdev: &pdev->dev, quirks: *quirks, |
1135 | pioaddr: (long)ioaddr, ioaddr, irq); |
1136 | } |
1137 | |
1138 | static int alloc_ring(struct net_device* dev) |
1139 | { |
1140 | struct rhine_private *rp = netdev_priv(dev); |
1141 | struct device *hwdev = dev->dev.parent; |
1142 | void *ring; |
1143 | dma_addr_t ring_dma; |
1144 | |
1145 | ring = dma_alloc_coherent(dev: hwdev, |
1146 | RX_RING_SIZE * sizeof(struct rx_desc) + |
1147 | TX_RING_SIZE * sizeof(struct tx_desc), |
1148 | dma_handle: &ring_dma, |
1149 | GFP_ATOMIC); |
1150 | if (!ring) { |
1151 | netdev_err(dev, format: "Could not allocate DMA memory\n" ); |
1152 | return -ENOMEM; |
1153 | } |
1154 | if (rp->quirks & rqRhineI) { |
1155 | rp->tx_bufs = dma_alloc_coherent(dev: hwdev, |
1156 | PKT_BUF_SZ * TX_RING_SIZE, |
1157 | dma_handle: &rp->tx_bufs_dma, |
1158 | GFP_ATOMIC); |
1159 | if (rp->tx_bufs == NULL) { |
1160 | dma_free_coherent(dev: hwdev, |
1161 | RX_RING_SIZE * sizeof(struct rx_desc) + |
1162 | TX_RING_SIZE * sizeof(struct tx_desc), |
1163 | cpu_addr: ring, dma_handle: ring_dma); |
1164 | return -ENOMEM; |
1165 | } |
1166 | } |
1167 | |
1168 | rp->rx_ring = ring; |
1169 | rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); |
1170 | rp->rx_ring_dma = ring_dma; |
1171 | rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); |
1172 | |
1173 | return 0; |
1174 | } |
1175 | |
1176 | static void free_ring(struct net_device* dev) |
1177 | { |
1178 | struct rhine_private *rp = netdev_priv(dev); |
1179 | struct device *hwdev = dev->dev.parent; |
1180 | |
1181 | dma_free_coherent(dev: hwdev, |
1182 | RX_RING_SIZE * sizeof(struct rx_desc) + |
1183 | TX_RING_SIZE * sizeof(struct tx_desc), |
1184 | cpu_addr: rp->rx_ring, dma_handle: rp->rx_ring_dma); |
1185 | rp->tx_ring = NULL; |
1186 | |
1187 | if (rp->tx_bufs) |
1188 | dma_free_coherent(dev: hwdev, PKT_BUF_SZ * TX_RING_SIZE, |
1189 | cpu_addr: rp->tx_bufs, dma_handle: rp->tx_bufs_dma); |
1190 | |
1191 | rp->tx_bufs = NULL; |
1192 | |
1193 | } |
1194 | |
1195 | struct rhine_skb_dma { |
1196 | struct sk_buff *skb; |
1197 | dma_addr_t dma; |
1198 | }; |
1199 | |
1200 | static inline int rhine_skb_dma_init(struct net_device *dev, |
1201 | struct rhine_skb_dma *sd) |
1202 | { |
1203 | struct rhine_private *rp = netdev_priv(dev); |
1204 | struct device *hwdev = dev->dev.parent; |
1205 | const int size = rp->rx_buf_sz; |
1206 | |
1207 | sd->skb = netdev_alloc_skb(dev, length: size); |
1208 | if (!sd->skb) |
1209 | return -ENOMEM; |
1210 | |
1211 | sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE); |
1212 | if (unlikely(dma_mapping_error(hwdev, sd->dma))) { |
1213 | netif_err(rp, drv, dev, "Rx DMA mapping failure\n" ); |
1214 | dev_kfree_skb_any(skb: sd->skb); |
1215 | return -EIO; |
1216 | } |
1217 | |
1218 | return 0; |
1219 | } |
1220 | |
1221 | static void rhine_reset_rbufs(struct rhine_private *rp) |
1222 | { |
1223 | int i; |
1224 | |
1225 | rp->cur_rx = 0; |
1226 | |
1227 | for (i = 0; i < RX_RING_SIZE; i++) |
1228 | rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); |
1229 | } |
1230 | |
1231 | static inline void rhine_skb_dma_nic_store(struct rhine_private *rp, |
1232 | struct rhine_skb_dma *sd, int entry) |
1233 | { |
1234 | rp->rx_skbuff_dma[entry] = sd->dma; |
1235 | rp->rx_skbuff[entry] = sd->skb; |
1236 | |
1237 | rp->rx_ring[entry].addr = cpu_to_le32(sd->dma); |
1238 | dma_wmb(); |
1239 | } |
1240 | |
1241 | static void free_rbufs(struct net_device* dev); |
1242 | |
1243 | static int alloc_rbufs(struct net_device *dev) |
1244 | { |
1245 | struct rhine_private *rp = netdev_priv(dev); |
1246 | dma_addr_t next; |
1247 | int rc, i; |
1248 | |
1249 | rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); |
1250 | next = rp->rx_ring_dma; |
1251 | |
1252 | /* Init the ring entries */ |
1253 | for (i = 0; i < RX_RING_SIZE; i++) { |
1254 | rp->rx_ring[i].rx_status = 0; |
1255 | rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); |
1256 | next += sizeof(struct rx_desc); |
1257 | rp->rx_ring[i].next_desc = cpu_to_le32(next); |
1258 | rp->rx_skbuff[i] = NULL; |
1259 | } |
1260 | /* Mark the last entry as wrapping the ring. */ |
1261 | rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); |
1262 | |
1263 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ |
1264 | for (i = 0; i < RX_RING_SIZE; i++) { |
1265 | struct rhine_skb_dma sd; |
1266 | |
1267 | rc = rhine_skb_dma_init(dev, sd: &sd); |
1268 | if (rc < 0) { |
1269 | free_rbufs(dev); |
1270 | goto out; |
1271 | } |
1272 | |
1273 | rhine_skb_dma_nic_store(rp, sd: &sd, entry: i); |
1274 | } |
1275 | |
1276 | rhine_reset_rbufs(rp); |
1277 | out: |
1278 | return rc; |
1279 | } |
1280 | |
1281 | static void free_rbufs(struct net_device* dev) |
1282 | { |
1283 | struct rhine_private *rp = netdev_priv(dev); |
1284 | struct device *hwdev = dev->dev.parent; |
1285 | int i; |
1286 | |
1287 | /* Free all the skbuffs in the Rx queue. */ |
1288 | for (i = 0; i < RX_RING_SIZE; i++) { |
1289 | rp->rx_ring[i].rx_status = 0; |
1290 | rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ |
1291 | if (rp->rx_skbuff[i]) { |
1292 | dma_unmap_single(hwdev, |
1293 | rp->rx_skbuff_dma[i], |
1294 | rp->rx_buf_sz, DMA_FROM_DEVICE); |
1295 | dev_kfree_skb(rp->rx_skbuff[i]); |
1296 | } |
1297 | rp->rx_skbuff[i] = NULL; |
1298 | } |
1299 | } |
1300 | |
1301 | static void alloc_tbufs(struct net_device* dev) |
1302 | { |
1303 | struct rhine_private *rp = netdev_priv(dev); |
1304 | dma_addr_t next; |
1305 | int i; |
1306 | |
1307 | rp->dirty_tx = rp->cur_tx = 0; |
1308 | next = rp->tx_ring_dma; |
1309 | for (i = 0; i < TX_RING_SIZE; i++) { |
1310 | rp->tx_skbuff[i] = NULL; |
1311 | rp->tx_ring[i].tx_status = 0; |
1312 | rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); |
1313 | next += sizeof(struct tx_desc); |
1314 | rp->tx_ring[i].next_desc = cpu_to_le32(next); |
1315 | if (rp->quirks & rqRhineI) |
1316 | rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; |
1317 | } |
1318 | rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); |
1319 | |
1320 | netdev_reset_queue(dev_queue: dev); |
1321 | } |
1322 | |
1323 | static void free_tbufs(struct net_device* dev) |
1324 | { |
1325 | struct rhine_private *rp = netdev_priv(dev); |
1326 | struct device *hwdev = dev->dev.parent; |
1327 | int i; |
1328 | |
1329 | for (i = 0; i < TX_RING_SIZE; i++) { |
1330 | rp->tx_ring[i].tx_status = 0; |
1331 | rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); |
1332 | rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ |
1333 | if (rp->tx_skbuff[i]) { |
1334 | if (rp->tx_skbuff_dma[i]) { |
1335 | dma_unmap_single(hwdev, |
1336 | rp->tx_skbuff_dma[i], |
1337 | rp->tx_skbuff[i]->len, |
1338 | DMA_TO_DEVICE); |
1339 | } |
1340 | dev_kfree_skb(rp->tx_skbuff[i]); |
1341 | } |
1342 | rp->tx_skbuff[i] = NULL; |
1343 | rp->tx_buf[i] = NULL; |
1344 | } |
1345 | } |
1346 | |
1347 | static void rhine_check_media(struct net_device *dev, unsigned int init_media) |
1348 | { |
1349 | struct rhine_private *rp = netdev_priv(dev); |
1350 | void __iomem *ioaddr = rp->base; |
1351 | |
1352 | if (!rp->mii_if.force_media) |
1353 | mii_check_media(mii: &rp->mii_if, netif_msg_link(rp), init_media); |
1354 | |
1355 | if (rp->mii_if.full_duplex) |
1356 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, |
1357 | ioaddr + ChipCmd1); |
1358 | else |
1359 | iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, |
1360 | ioaddr + ChipCmd1); |
1361 | |
1362 | netif_info(rp, link, dev, "force_media %d, carrier %d\n" , |
1363 | rp->mii_if.force_media, netif_carrier_ok(dev)); |
1364 | } |
1365 | |
1366 | /* Called after status of force_media possibly changed */ |
1367 | static void rhine_set_carrier(struct mii_if_info *mii) |
1368 | { |
1369 | struct net_device *dev = mii->dev; |
1370 | struct rhine_private *rp = netdev_priv(dev); |
1371 | |
1372 | if (mii->force_media) { |
1373 | /* autoneg is off: Link is always assumed to be up */ |
1374 | if (!netif_carrier_ok(dev)) |
1375 | netif_carrier_on(dev); |
1376 | } |
1377 | |
1378 | rhine_check_media(dev, init_media: 0); |
1379 | |
1380 | netif_info(rp, link, dev, "force_media %d, carrier %d\n" , |
1381 | mii->force_media, netif_carrier_ok(dev)); |
1382 | } |
1383 | |
1384 | /** |
1385 | * rhine_set_cam - set CAM multicast filters |
1386 | * @ioaddr: register block of this Rhine |
1387 | * @idx: multicast CAM index [0..MCAM_SIZE-1] |
1388 | * @addr: multicast address (6 bytes) |
1389 | * |
1390 | * Load addresses into multicast filters. |
1391 | */ |
1392 | static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr) |
1393 | { |
1394 | int i; |
1395 | |
1396 | iowrite8(CAMC_CAMEN, ioaddr + CamCon); |
1397 | wmb(); |
1398 | |
1399 | /* Paranoid -- idx out of range should never happen */ |
1400 | idx &= (MCAM_SIZE - 1); |
1401 | |
1402 | iowrite8((u8) idx, ioaddr + CamAddr); |
1403 | |
1404 | for (i = 0; i < 6; i++, addr++) |
1405 | iowrite8(*addr, ioaddr + MulticastFilter0 + i); |
1406 | udelay(10); |
1407 | wmb(); |
1408 | |
1409 | iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); |
1410 | udelay(10); |
1411 | |
1412 | iowrite8(0, ioaddr + CamCon); |
1413 | } |
1414 | |
1415 | /** |
1416 | * rhine_set_vlan_cam - set CAM VLAN filters |
1417 | * @ioaddr: register block of this Rhine |
1418 | * @idx: VLAN CAM index [0..VCAM_SIZE-1] |
1419 | * @addr: VLAN ID (2 bytes) |
1420 | * |
1421 | * Load addresses into VLAN filters. |
1422 | */ |
1423 | static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr) |
1424 | { |
1425 | iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); |
1426 | wmb(); |
1427 | |
1428 | /* Paranoid -- idx out of range should never happen */ |
1429 | idx &= (VCAM_SIZE - 1); |
1430 | |
1431 | iowrite8((u8) idx, ioaddr + CamAddr); |
1432 | |
1433 | iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6); |
1434 | udelay(10); |
1435 | wmb(); |
1436 | |
1437 | iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); |
1438 | udelay(10); |
1439 | |
1440 | iowrite8(0, ioaddr + CamCon); |
1441 | } |
1442 | |
1443 | /** |
1444 | * rhine_set_cam_mask - set multicast CAM mask |
1445 | * @ioaddr: register block of this Rhine |
1446 | * @mask: multicast CAM mask |
1447 | * |
1448 | * Mask sets multicast filters active/inactive. |
1449 | */ |
1450 | static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask) |
1451 | { |
1452 | iowrite8(CAMC_CAMEN, ioaddr + CamCon); |
1453 | wmb(); |
1454 | |
1455 | /* write mask */ |
1456 | iowrite32(mask, ioaddr + CamMask); |
1457 | |
1458 | /* disable CAMEN */ |
1459 | iowrite8(0, ioaddr + CamCon); |
1460 | } |
1461 | |
1462 | /** |
1463 | * rhine_set_vlan_cam_mask - set VLAN CAM mask |
1464 | * @ioaddr: register block of this Rhine |
1465 | * @mask: VLAN CAM mask |
1466 | * |
1467 | * Mask sets VLAN filters active/inactive. |
1468 | */ |
1469 | static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask) |
1470 | { |
1471 | iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); |
1472 | wmb(); |
1473 | |
1474 | /* write mask */ |
1475 | iowrite32(mask, ioaddr + CamMask); |
1476 | |
1477 | /* disable CAMEN */ |
1478 | iowrite8(0, ioaddr + CamCon); |
1479 | } |
1480 | |
1481 | /** |
1482 | * rhine_init_cam_filter - initialize CAM filters |
1483 | * @dev: network device |
1484 | * |
1485 | * Initialize (disable) hardware VLAN and multicast support on this |
1486 | * Rhine. |
1487 | */ |
1488 | static void rhine_init_cam_filter(struct net_device *dev) |
1489 | { |
1490 | struct rhine_private *rp = netdev_priv(dev); |
1491 | void __iomem *ioaddr = rp->base; |
1492 | |
1493 | /* Disable all CAMs */ |
1494 | rhine_set_vlan_cam_mask(ioaddr, mask: 0); |
1495 | rhine_set_cam_mask(ioaddr, mask: 0); |
1496 | |
1497 | /* disable hardware VLAN support */ |
1498 | BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig); |
1499 | BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); |
1500 | } |
1501 | |
1502 | /** |
1503 | * rhine_update_vcam - update VLAN CAM filters |
1504 | * @dev: rhine_private data of this Rhine |
1505 | * |
1506 | * Update VLAN CAM filters to match configuration change. |
1507 | */ |
1508 | static void rhine_update_vcam(struct net_device *dev) |
1509 | { |
1510 | struct rhine_private *rp = netdev_priv(dev); |
1511 | void __iomem *ioaddr = rp->base; |
1512 | u16 vid; |
1513 | u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */ |
1514 | unsigned int i = 0; |
1515 | |
1516 | for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { |
1517 | rhine_set_vlan_cam(ioaddr, idx: i, addr: (u8 *)&vid); |
1518 | vCAMmask |= 1 << i; |
1519 | if (++i >= VCAM_SIZE) |
1520 | break; |
1521 | } |
1522 | rhine_set_vlan_cam_mask(ioaddr, mask: vCAMmask); |
1523 | } |
1524 | |
1525 | static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
1526 | { |
1527 | struct rhine_private *rp = netdev_priv(dev); |
1528 | |
1529 | spin_lock_bh(lock: &rp->lock); |
1530 | set_bit(nr: vid, addr: rp->active_vlans); |
1531 | rhine_update_vcam(dev); |
1532 | spin_unlock_bh(lock: &rp->lock); |
1533 | return 0; |
1534 | } |
1535 | |
1536 | static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) |
1537 | { |
1538 | struct rhine_private *rp = netdev_priv(dev); |
1539 | |
1540 | spin_lock_bh(lock: &rp->lock); |
1541 | clear_bit(nr: vid, addr: rp->active_vlans); |
1542 | rhine_update_vcam(dev); |
1543 | spin_unlock_bh(lock: &rp->lock); |
1544 | return 0; |
1545 | } |
1546 | |
1547 | static void init_registers(struct net_device *dev) |
1548 | { |
1549 | struct rhine_private *rp = netdev_priv(dev); |
1550 | void __iomem *ioaddr = rp->base; |
1551 | int i; |
1552 | |
1553 | for (i = 0; i < 6; i++) |
1554 | iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); |
1555 | |
1556 | /* Initialize other registers. */ |
1557 | iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */ |
1558 | /* Configure initial FIFO thresholds. */ |
1559 | iowrite8(0x20, ioaddr + TxConfig); |
1560 | rp->tx_thresh = 0x20; |
1561 | rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ |
1562 | |
1563 | iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); |
1564 | iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); |
1565 | |
1566 | rhine_set_rx_mode(dev); |
1567 | |
1568 | if (rp->quirks & rqMgmt) |
1569 | rhine_init_cam_filter(dev); |
1570 | |
1571 | napi_enable(n: &rp->napi); |
1572 | |
1573 | iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable); |
1574 | |
1575 | iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), |
1576 | ioaddr + ChipCmd); |
1577 | rhine_check_media(dev, init_media: 1); |
1578 | } |
1579 | |
1580 | /* Enable MII link status auto-polling (required for IntrLinkChange) */ |
1581 | static void rhine_enable_linkmon(struct rhine_private *rp) |
1582 | { |
1583 | void __iomem *ioaddr = rp->base; |
1584 | |
1585 | iowrite8(0, ioaddr + MIICmd); |
1586 | iowrite8(MII_BMSR, ioaddr + MIIRegAddr); |
1587 | iowrite8(0x80, ioaddr + MIICmd); |
1588 | |
1589 | rhine_wait_bit_high(rp, reg: MIIRegAddr, mask: 0x20); |
1590 | |
1591 | iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); |
1592 | } |
1593 | |
1594 | /* Disable MII link status auto-polling (required for MDIO access) */ |
1595 | static void rhine_disable_linkmon(struct rhine_private *rp) |
1596 | { |
1597 | void __iomem *ioaddr = rp->base; |
1598 | |
1599 | iowrite8(0, ioaddr + MIICmd); |
1600 | |
1601 | if (rp->quirks & rqRhineI) { |
1602 | iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR |
1603 | |
1604 | /* Can be called from ISR. Evil. */ |
1605 | mdelay(1); |
1606 | |
1607 | /* 0x80 must be set immediately before turning it off */ |
1608 | iowrite8(0x80, ioaddr + MIICmd); |
1609 | |
1610 | rhine_wait_bit_high(rp, reg: MIIRegAddr, mask: 0x20); |
1611 | |
1612 | /* Heh. Now clear 0x80 again. */ |
1613 | iowrite8(0, ioaddr + MIICmd); |
1614 | } |
1615 | else |
1616 | rhine_wait_bit_high(rp, reg: MIIRegAddr, mask: 0x80); |
1617 | } |
1618 | |
1619 | /* Read and write over the MII Management Data I/O (MDIO) interface. */ |
1620 | |
1621 | static int mdio_read(struct net_device *dev, int phy_id, int regnum) |
1622 | { |
1623 | struct rhine_private *rp = netdev_priv(dev); |
1624 | void __iomem *ioaddr = rp->base; |
1625 | int result; |
1626 | |
1627 | rhine_disable_linkmon(rp); |
1628 | |
1629 | /* rhine_disable_linkmon already cleared MIICmd */ |
1630 | iowrite8(phy_id, ioaddr + MIIPhyAddr); |
1631 | iowrite8(regnum, ioaddr + MIIRegAddr); |
1632 | iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ |
1633 | rhine_wait_bit_low(rp, reg: MIICmd, mask: 0x40); |
1634 | result = ioread16(ioaddr + MIIData); |
1635 | |
1636 | rhine_enable_linkmon(rp); |
1637 | return result; |
1638 | } |
1639 | |
1640 | static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value) |
1641 | { |
1642 | struct rhine_private *rp = netdev_priv(dev); |
1643 | void __iomem *ioaddr = rp->base; |
1644 | |
1645 | rhine_disable_linkmon(rp); |
1646 | |
1647 | /* rhine_disable_linkmon already cleared MIICmd */ |
1648 | iowrite8(phy_id, ioaddr + MIIPhyAddr); |
1649 | iowrite8(regnum, ioaddr + MIIRegAddr); |
1650 | iowrite16(value, ioaddr + MIIData); |
1651 | iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ |
1652 | rhine_wait_bit_low(rp, reg: MIICmd, mask: 0x20); |
1653 | |
1654 | rhine_enable_linkmon(rp); |
1655 | } |
1656 | |
1657 | static void rhine_task_disable(struct rhine_private *rp) |
1658 | { |
1659 | mutex_lock(&rp->task_lock); |
1660 | rp->task_enable = false; |
1661 | mutex_unlock(lock: &rp->task_lock); |
1662 | |
1663 | cancel_work_sync(work: &rp->slow_event_task); |
1664 | cancel_work_sync(work: &rp->reset_task); |
1665 | } |
1666 | |
1667 | static void rhine_task_enable(struct rhine_private *rp) |
1668 | { |
1669 | mutex_lock(&rp->task_lock); |
1670 | rp->task_enable = true; |
1671 | mutex_unlock(lock: &rp->task_lock); |
1672 | } |
1673 | |
1674 | static int rhine_open(struct net_device *dev) |
1675 | { |
1676 | struct rhine_private *rp = netdev_priv(dev); |
1677 | void __iomem *ioaddr = rp->base; |
1678 | int rc; |
1679 | |
1680 | rc = request_irq(irq: rp->irq, handler: rhine_interrupt, IRQF_SHARED, name: dev->name, dev); |
1681 | if (rc) |
1682 | goto out; |
1683 | |
1684 | netif_dbg(rp, ifup, dev, "%s() irq %d\n" , __func__, rp->irq); |
1685 | |
1686 | rc = alloc_ring(dev); |
1687 | if (rc < 0) |
1688 | goto out_free_irq; |
1689 | |
1690 | rc = alloc_rbufs(dev); |
1691 | if (rc < 0) |
1692 | goto out_free_ring; |
1693 | |
1694 | alloc_tbufs(dev); |
1695 | enable_mmio(pioaddr: rp->pioaddr, quirks: rp->quirks); |
1696 | rhine_power_init(dev); |
1697 | rhine_chip_reset(dev); |
1698 | rhine_task_enable(rp); |
1699 | init_registers(dev); |
1700 | |
1701 | netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n" , |
1702 | __func__, ioread16(ioaddr + ChipCmd), |
1703 | mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); |
1704 | |
1705 | netif_start_queue(dev); |
1706 | |
1707 | out: |
1708 | return rc; |
1709 | |
1710 | out_free_ring: |
1711 | free_ring(dev); |
1712 | out_free_irq: |
1713 | free_irq(rp->irq, dev); |
1714 | goto out; |
1715 | } |
1716 | |
1717 | static void rhine_reset_task(struct work_struct *work) |
1718 | { |
1719 | struct rhine_private *rp = container_of(work, struct rhine_private, |
1720 | reset_task); |
1721 | struct net_device *dev = rp->dev; |
1722 | |
1723 | mutex_lock(&rp->task_lock); |
1724 | |
1725 | if (!rp->task_enable) |
1726 | goto out_unlock; |
1727 | |
1728 | napi_disable(n: &rp->napi); |
1729 | netif_tx_disable(dev); |
1730 | spin_lock_bh(lock: &rp->lock); |
1731 | |
1732 | /* clear all descriptors */ |
1733 | free_tbufs(dev); |
1734 | alloc_tbufs(dev); |
1735 | |
1736 | rhine_reset_rbufs(rp); |
1737 | |
1738 | /* Reinitialize the hardware. */ |
1739 | rhine_chip_reset(dev); |
1740 | init_registers(dev); |
1741 | |
1742 | spin_unlock_bh(lock: &rp->lock); |
1743 | |
1744 | netif_trans_update(dev); /* prevent tx timeout */ |
1745 | dev->stats.tx_errors++; |
1746 | netif_wake_queue(dev); |
1747 | |
1748 | out_unlock: |
1749 | mutex_unlock(lock: &rp->task_lock); |
1750 | } |
1751 | |
1752 | static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue) |
1753 | { |
1754 | struct rhine_private *rp = netdev_priv(dev); |
1755 | void __iomem *ioaddr = rp->base; |
1756 | |
1757 | netdev_warn(dev, format: "Transmit timed out, status %04x, PHY status %04x, resetting...\n" , |
1758 | ioread16(ioaddr + IntrStatus), |
1759 | mdio_read(dev, phy_id: rp->mii_if.phy_id, MII_BMSR)); |
1760 | |
1761 | schedule_work(work: &rp->reset_task); |
1762 | } |
1763 | |
1764 | static inline bool rhine_tx_queue_full(struct rhine_private *rp) |
1765 | { |
1766 | return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN; |
1767 | } |
1768 | |
1769 | static netdev_tx_t rhine_start_tx(struct sk_buff *skb, |
1770 | struct net_device *dev) |
1771 | { |
1772 | struct rhine_private *rp = netdev_priv(dev); |
1773 | struct device *hwdev = dev->dev.parent; |
1774 | void __iomem *ioaddr = rp->base; |
1775 | unsigned entry; |
1776 | |
1777 | /* Caution: the write order is important here, set the field |
1778 | with the "ownership" bits last. */ |
1779 | |
1780 | /* Calculate the next Tx descriptor entry. */ |
1781 | entry = rp->cur_tx % TX_RING_SIZE; |
1782 | |
1783 | if (skb_padto(skb, ETH_ZLEN)) |
1784 | return NETDEV_TX_OK; |
1785 | |
1786 | rp->tx_skbuff[entry] = skb; |
1787 | |
1788 | if ((rp->quirks & rqRhineI) && |
1789 | (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) { |
1790 | /* Must use alignment buffer. */ |
1791 | if (skb->len > PKT_BUF_SZ) { |
1792 | /* packet too long, drop it */ |
1793 | dev_kfree_skb_any(skb); |
1794 | rp->tx_skbuff[entry] = NULL; |
1795 | dev->stats.tx_dropped++; |
1796 | return NETDEV_TX_OK; |
1797 | } |
1798 | |
1799 | /* Padding is not copied and so must be redone. */ |
1800 | skb_copy_and_csum_dev(skb, to: rp->tx_buf[entry]); |
1801 | if (skb->len < ETH_ZLEN) |
1802 | memset(rp->tx_buf[entry] + skb->len, 0, |
1803 | ETH_ZLEN - skb->len); |
1804 | rp->tx_skbuff_dma[entry] = 0; |
1805 | rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + |
1806 | (rp->tx_buf[entry] - |
1807 | rp->tx_bufs)); |
1808 | } else { |
1809 | rp->tx_skbuff_dma[entry] = |
1810 | dma_map_single(hwdev, skb->data, skb->len, |
1811 | DMA_TO_DEVICE); |
1812 | if (dma_mapping_error(dev: hwdev, dma_addr: rp->tx_skbuff_dma[entry])) { |
1813 | dev_kfree_skb_any(skb); |
1814 | rp->tx_skbuff_dma[entry] = 0; |
1815 | dev->stats.tx_dropped++; |
1816 | return NETDEV_TX_OK; |
1817 | } |
1818 | rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); |
1819 | } |
1820 | |
1821 | rp->tx_ring[entry].desc_length = |
1822 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); |
1823 | |
1824 | if (unlikely(skb_vlan_tag_present(skb))) { |
1825 | u16 vid_pcp = skb_vlan_tag_get(skb); |
1826 | |
1827 | /* drop CFI/DEI bit, register needs VID and PCP */ |
1828 | vid_pcp = (vid_pcp & VLAN_VID_MASK) | |
1829 | ((vid_pcp & VLAN_PRIO_MASK) >> 1); |
1830 | rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); |
1831 | /* request tagging */ |
1832 | rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); |
1833 | } |
1834 | else |
1835 | rp->tx_ring[entry].tx_status = 0; |
1836 | |
1837 | netdev_sent_queue(dev, bytes: skb->len); |
1838 | /* lock eth irq */ |
1839 | dma_wmb(); |
1840 | rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); |
1841 | wmb(); |
1842 | |
1843 | rp->cur_tx++; |
1844 | /* |
1845 | * Nobody wants cur_tx write to rot for ages after the NIC will have |
1846 | * seen the transmit request, especially as the transmit completion |
1847 | * handler could miss it. |
1848 | */ |
1849 | smp_wmb(); |
1850 | |
1851 | /* Non-x86 Todo: explicitly flush cache lines here. */ |
1852 | |
1853 | if (skb_vlan_tag_present(skb)) |
1854 | /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ |
1855 | BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); |
1856 | |
1857 | /* Wake the potentially-idle transmit channel */ |
1858 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, |
1859 | ioaddr + ChipCmd1); |
1860 | IOSYNC; |
1861 | |
1862 | /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */ |
1863 | if (rhine_tx_queue_full(rp)) { |
1864 | netif_stop_queue(dev); |
1865 | smp_rmb(); |
1866 | /* Rejuvenate. */ |
1867 | if (!rhine_tx_queue_full(rp)) |
1868 | netif_wake_queue(dev); |
1869 | } |
1870 | |
1871 | netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n" , |
1872 | rp->cur_tx - 1, entry); |
1873 | |
1874 | return NETDEV_TX_OK; |
1875 | } |
1876 | |
1877 | static void rhine_irq_disable(struct rhine_private *rp) |
1878 | { |
1879 | iowrite16(0x0000, rp->base + IntrEnable); |
1880 | } |
1881 | |
1882 | /* The interrupt handler does all of the Rx thread work and cleans up |
1883 | after the Tx thread. */ |
1884 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance) |
1885 | { |
1886 | struct net_device *dev = dev_instance; |
1887 | struct rhine_private *rp = netdev_priv(dev); |
1888 | u32 status; |
1889 | int handled = 0; |
1890 | |
1891 | status = rhine_get_events(rp); |
1892 | |
1893 | netif_dbg(rp, intr, dev, "Interrupt, status %08x\n" , status); |
1894 | |
1895 | if (status & RHINE_EVENT) { |
1896 | handled = 1; |
1897 | |
1898 | rhine_irq_disable(rp); |
1899 | napi_schedule(n: &rp->napi); |
1900 | } |
1901 | |
1902 | if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) { |
1903 | netif_err(rp, intr, dev, "Something Wicked happened! %08x\n" , |
1904 | status); |
1905 | } |
1906 | |
1907 | return IRQ_RETVAL(handled); |
1908 | } |
1909 | |
1910 | /* This routine is logically part of the interrupt handler, but isolated |
1911 | for clarity. */ |
1912 | static void rhine_tx(struct net_device *dev) |
1913 | { |
1914 | struct rhine_private *rp = netdev_priv(dev); |
1915 | struct device *hwdev = dev->dev.parent; |
1916 | unsigned int pkts_compl = 0, bytes_compl = 0; |
1917 | unsigned int dirty_tx = rp->dirty_tx; |
1918 | unsigned int cur_tx; |
1919 | struct sk_buff *skb; |
1920 | |
1921 | /* |
1922 | * The race with rhine_start_tx does not matter here as long as the |
1923 | * driver enforces a value of cur_tx that was relevant when the |
1924 | * packet was scheduled to the network chipset. |
1925 | * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx. |
1926 | */ |
1927 | smp_rmb(); |
1928 | cur_tx = rp->cur_tx; |
1929 | /* find and cleanup dirty tx descriptors */ |
1930 | while (dirty_tx != cur_tx) { |
1931 | unsigned int entry = dirty_tx % TX_RING_SIZE; |
1932 | u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); |
1933 | |
1934 | netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n" , |
1935 | entry, txstatus); |
1936 | if (txstatus & DescOwn) |
1937 | break; |
1938 | skb = rp->tx_skbuff[entry]; |
1939 | if (txstatus & 0x8000) { |
1940 | netif_dbg(rp, tx_done, dev, |
1941 | "Transmit error, Tx status %08x\n" , txstatus); |
1942 | dev->stats.tx_errors++; |
1943 | if (txstatus & 0x0400) |
1944 | dev->stats.tx_carrier_errors++; |
1945 | if (txstatus & 0x0200) |
1946 | dev->stats.tx_window_errors++; |
1947 | if (txstatus & 0x0100) |
1948 | dev->stats.tx_aborted_errors++; |
1949 | if (txstatus & 0x0080) |
1950 | dev->stats.tx_heartbeat_errors++; |
1951 | if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || |
1952 | (txstatus & 0x0800) || (txstatus & 0x1000)) { |
1953 | dev->stats.tx_fifo_errors++; |
1954 | rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); |
1955 | break; /* Keep the skb - we try again */ |
1956 | } |
1957 | /* Transmitter restarted in 'abnormal' handler. */ |
1958 | } else { |
1959 | if (rp->quirks & rqRhineI) |
1960 | dev->stats.collisions += (txstatus >> 3) & 0x0F; |
1961 | else |
1962 | dev->stats.collisions += txstatus & 0x0F; |
1963 | netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n" , |
1964 | (txstatus >> 3) & 0xF, txstatus & 0xF); |
1965 | |
1966 | u64_stats_update_begin(syncp: &rp->tx_stats.syncp); |
1967 | rp->tx_stats.bytes += skb->len; |
1968 | rp->tx_stats.packets++; |
1969 | u64_stats_update_end(syncp: &rp->tx_stats.syncp); |
1970 | } |
1971 | /* Free the original skb. */ |
1972 | if (rp->tx_skbuff_dma[entry]) { |
1973 | dma_unmap_single(hwdev, |
1974 | rp->tx_skbuff_dma[entry], |
1975 | skb->len, |
1976 | DMA_TO_DEVICE); |
1977 | } |
1978 | bytes_compl += skb->len; |
1979 | pkts_compl++; |
1980 | dev_consume_skb_any(skb); |
1981 | rp->tx_skbuff[entry] = NULL; |
1982 | dirty_tx++; |
1983 | } |
1984 | |
1985 | rp->dirty_tx = dirty_tx; |
1986 | /* Pity we can't rely on the nearby BQL completion implicit barrier. */ |
1987 | smp_wmb(); |
1988 | |
1989 | netdev_completed_queue(dev, pkts: pkts_compl, bytes: bytes_compl); |
1990 | |
1991 | /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */ |
1992 | if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) { |
1993 | netif_wake_queue(dev); |
1994 | smp_rmb(); |
1995 | /* Rejuvenate. */ |
1996 | if (rhine_tx_queue_full(rp)) |
1997 | netif_stop_queue(dev); |
1998 | } |
1999 | } |
2000 | |
2001 | /** |
2002 | * rhine_get_vlan_tci - extract TCI from Rx data buffer |
2003 | * @skb: pointer to sk_buff |
2004 | * @data_size: used data area of the buffer including CRC |
2005 | * |
2006 | * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q |
2007 | * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte |
2008 | * aligned following the CRC. |
2009 | */ |
2010 | static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size) |
2011 | { |
2012 | u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; |
2013 | return be16_to_cpup(p: (__be16 *)trailer); |
2014 | } |
2015 | |
2016 | static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc, |
2017 | int data_size) |
2018 | { |
2019 | dma_rmb(); |
2020 | if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) { |
2021 | u16 vlan_tci; |
2022 | |
2023 | vlan_tci = rhine_get_vlan_tci(skb, data_size); |
2024 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); |
2025 | } |
2026 | } |
2027 | |
2028 | /* Process up to limit frames from receive ring */ |
2029 | static int rhine_rx(struct net_device *dev, int limit) |
2030 | { |
2031 | struct rhine_private *rp = netdev_priv(dev); |
2032 | struct device *hwdev = dev->dev.parent; |
2033 | int entry = rp->cur_rx % RX_RING_SIZE; |
2034 | int count; |
2035 | |
2036 | netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n" , __func__, |
2037 | entry, le32_to_cpu(rp->rx_ring[entry].rx_status)); |
2038 | |
2039 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ |
2040 | for (count = 0; count < limit; ++count) { |
2041 | struct rx_desc *desc = rp->rx_ring + entry; |
2042 | u32 desc_status = le32_to_cpu(desc->rx_status); |
2043 | int data_size = desc_status >> 16; |
2044 | |
2045 | if (desc_status & DescOwn) |
2046 | break; |
2047 | |
2048 | netif_dbg(rp, rx_status, dev, "%s() status %08x\n" , __func__, |
2049 | desc_status); |
2050 | |
2051 | if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { |
2052 | if ((desc_status & RxWholePkt) != RxWholePkt) { |
2053 | netdev_warn(dev, |
2054 | format: "Oversized Ethernet frame spanned multiple buffers, " |
2055 | "entry %#x length %d status %08x!\n" , |
2056 | entry, data_size, |
2057 | desc_status); |
2058 | dev->stats.rx_length_errors++; |
2059 | } else if (desc_status & RxErr) { |
2060 | /* There was a error. */ |
2061 | netif_dbg(rp, rx_err, dev, |
2062 | "%s() Rx error %08x\n" , __func__, |
2063 | desc_status); |
2064 | dev->stats.rx_errors++; |
2065 | if (desc_status & 0x0030) |
2066 | dev->stats.rx_length_errors++; |
2067 | if (desc_status & 0x0048) |
2068 | dev->stats.rx_fifo_errors++; |
2069 | if (desc_status & 0x0004) |
2070 | dev->stats.rx_frame_errors++; |
2071 | if (desc_status & 0x0002) { |
2072 | /* this can also be updated outside the interrupt handler */ |
2073 | spin_lock(lock: &rp->lock); |
2074 | dev->stats.rx_crc_errors++; |
2075 | spin_unlock(lock: &rp->lock); |
2076 | } |
2077 | } |
2078 | } else { |
2079 | /* Length should omit the CRC */ |
2080 | int pkt_len = data_size - 4; |
2081 | struct sk_buff *skb; |
2082 | |
2083 | /* Check if the packet is long enough to accept without |
2084 | copying to a minimally-sized skbuff. */ |
2085 | if (pkt_len < rx_copybreak) { |
2086 | skb = netdev_alloc_skb_ip_align(dev, length: pkt_len); |
2087 | if (unlikely(!skb)) |
2088 | goto drop; |
2089 | |
2090 | dma_sync_single_for_cpu(dev: hwdev, |
2091 | addr: rp->rx_skbuff_dma[entry], |
2092 | size: rp->rx_buf_sz, |
2093 | dir: DMA_FROM_DEVICE); |
2094 | |
2095 | skb_copy_to_linear_data(skb, |
2096 | from: rp->rx_skbuff[entry]->data, |
2097 | len: pkt_len); |
2098 | |
2099 | dma_sync_single_for_device(dev: hwdev, |
2100 | addr: rp->rx_skbuff_dma[entry], |
2101 | size: rp->rx_buf_sz, |
2102 | dir: DMA_FROM_DEVICE); |
2103 | } else { |
2104 | struct rhine_skb_dma sd; |
2105 | |
2106 | if (unlikely(rhine_skb_dma_init(dev, &sd) < 0)) |
2107 | goto drop; |
2108 | |
2109 | skb = rp->rx_skbuff[entry]; |
2110 | |
2111 | dma_unmap_single(hwdev, |
2112 | rp->rx_skbuff_dma[entry], |
2113 | rp->rx_buf_sz, |
2114 | DMA_FROM_DEVICE); |
2115 | rhine_skb_dma_nic_store(rp, sd: &sd, entry); |
2116 | } |
2117 | |
2118 | skb_put(skb, len: pkt_len); |
2119 | |
2120 | rhine_rx_vlan_tag(skb, desc, data_size); |
2121 | |
2122 | skb->protocol = eth_type_trans(skb, dev); |
2123 | |
2124 | netif_receive_skb(skb); |
2125 | |
2126 | u64_stats_update_begin(syncp: &rp->rx_stats.syncp); |
2127 | rp->rx_stats.bytes += pkt_len; |
2128 | rp->rx_stats.packets++; |
2129 | u64_stats_update_end(syncp: &rp->rx_stats.syncp); |
2130 | } |
2131 | give_descriptor_to_nic: |
2132 | desc->rx_status = cpu_to_le32(DescOwn); |
2133 | entry = (++rp->cur_rx) % RX_RING_SIZE; |
2134 | } |
2135 | |
2136 | return count; |
2137 | |
2138 | drop: |
2139 | dev->stats.rx_dropped++; |
2140 | goto give_descriptor_to_nic; |
2141 | } |
2142 | |
2143 | static void rhine_restart_tx(struct net_device *dev) { |
2144 | struct rhine_private *rp = netdev_priv(dev); |
2145 | void __iomem *ioaddr = rp->base; |
2146 | int entry = rp->dirty_tx % TX_RING_SIZE; |
2147 | u32 intr_status; |
2148 | |
2149 | /* |
2150 | * If new errors occurred, we need to sort them out before doing Tx. |
2151 | * In that case the ISR will be back here RSN anyway. |
2152 | */ |
2153 | intr_status = rhine_get_events(rp); |
2154 | |
2155 | if ((intr_status & IntrTxErrSummary) == 0) { |
2156 | |
2157 | /* We know better than the chip where it should continue. */ |
2158 | iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), |
2159 | ioaddr + TxRingPtr); |
2160 | |
2161 | iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, |
2162 | ioaddr + ChipCmd); |
2163 | |
2164 | if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) |
2165 | /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ |
2166 | BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); |
2167 | |
2168 | iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, |
2169 | ioaddr + ChipCmd1); |
2170 | IOSYNC; |
2171 | } |
2172 | else { |
2173 | /* This should never happen */ |
2174 | netif_warn(rp, tx_err, dev, "another error occurred %08x\n" , |
2175 | intr_status); |
2176 | } |
2177 | |
2178 | } |
2179 | |
2180 | static void rhine_slow_event_task(struct work_struct *work) |
2181 | { |
2182 | struct rhine_private *rp = |
2183 | container_of(work, struct rhine_private, slow_event_task); |
2184 | struct net_device *dev = rp->dev; |
2185 | u32 intr_status; |
2186 | |
2187 | mutex_lock(&rp->task_lock); |
2188 | |
2189 | if (!rp->task_enable) |
2190 | goto out_unlock; |
2191 | |
2192 | intr_status = rhine_get_events(rp); |
2193 | rhine_ack_events(rp, mask: intr_status & RHINE_EVENT_SLOW); |
2194 | |
2195 | if (intr_status & IntrLinkChange) |
2196 | rhine_check_media(dev, init_media: 0); |
2197 | |
2198 | if (intr_status & IntrPCIErr) |
2199 | netif_warn(rp, hw, dev, "PCI error\n" ); |
2200 | |
2201 | iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); |
2202 | |
2203 | out_unlock: |
2204 | mutex_unlock(lock: &rp->task_lock); |
2205 | } |
2206 | |
2207 | static void |
2208 | rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
2209 | { |
2210 | struct rhine_private *rp = netdev_priv(dev); |
2211 | unsigned int start; |
2212 | |
2213 | spin_lock_bh(lock: &rp->lock); |
2214 | rhine_update_rx_crc_and_missed_errord(rp); |
2215 | spin_unlock_bh(lock: &rp->lock); |
2216 | |
2217 | netdev_stats_to_stats64(stats64: stats, netdev_stats: &dev->stats); |
2218 | |
2219 | do { |
2220 | start = u64_stats_fetch_begin(syncp: &rp->rx_stats.syncp); |
2221 | stats->rx_packets = rp->rx_stats.packets; |
2222 | stats->rx_bytes = rp->rx_stats.bytes; |
2223 | } while (u64_stats_fetch_retry(syncp: &rp->rx_stats.syncp, start)); |
2224 | |
2225 | do { |
2226 | start = u64_stats_fetch_begin(syncp: &rp->tx_stats.syncp); |
2227 | stats->tx_packets = rp->tx_stats.packets; |
2228 | stats->tx_bytes = rp->tx_stats.bytes; |
2229 | } while (u64_stats_fetch_retry(syncp: &rp->tx_stats.syncp, start)); |
2230 | } |
2231 | |
2232 | static void rhine_set_rx_mode(struct net_device *dev) |
2233 | { |
2234 | struct rhine_private *rp = netdev_priv(dev); |
2235 | void __iomem *ioaddr = rp->base; |
2236 | u32 mc_filter[2]; /* Multicast hash filter */ |
2237 | u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */ |
2238 | struct netdev_hw_addr *ha; |
2239 | |
2240 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ |
2241 | rx_mode = 0x1C; |
2242 | iowrite32(0xffffffff, ioaddr + MulticastFilter0); |
2243 | iowrite32(0xffffffff, ioaddr + MulticastFilter1); |
2244 | } else if ((netdev_mc_count(dev) > multicast_filter_limit) || |
2245 | (dev->flags & IFF_ALLMULTI)) { |
2246 | /* Too many to match, or accept all multicasts. */ |
2247 | iowrite32(0xffffffff, ioaddr + MulticastFilter0); |
2248 | iowrite32(0xffffffff, ioaddr + MulticastFilter1); |
2249 | } else if (rp->quirks & rqMgmt) { |
2250 | int i = 0; |
2251 | u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ |
2252 | netdev_for_each_mc_addr(ha, dev) { |
2253 | if (i == MCAM_SIZE) |
2254 | break; |
2255 | rhine_set_cam(ioaddr, idx: i, addr: ha->addr); |
2256 | mCAMmask |= 1 << i; |
2257 | i++; |
2258 | } |
2259 | rhine_set_cam_mask(ioaddr, mask: mCAMmask); |
2260 | } else { |
2261 | memset(mc_filter, 0, sizeof(mc_filter)); |
2262 | netdev_for_each_mc_addr(ha, dev) { |
2263 | int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; |
2264 | |
2265 | mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); |
2266 | } |
2267 | iowrite32(mc_filter[0], ioaddr + MulticastFilter0); |
2268 | iowrite32(mc_filter[1], ioaddr + MulticastFilter1); |
2269 | } |
2270 | /* enable/disable VLAN receive filtering */ |
2271 | if (rp->quirks & rqMgmt) { |
2272 | if (dev->flags & IFF_PROMISC) |
2273 | BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); |
2274 | else |
2275 | BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1); |
2276 | } |
2277 | BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig); |
2278 | } |
2279 | |
2280 | static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
2281 | { |
2282 | struct device *hwdev = dev->dev.parent; |
2283 | |
2284 | strscpy(p: info->driver, DRV_NAME, size: sizeof(info->driver)); |
2285 | strscpy(p: info->bus_info, q: dev_name(dev: hwdev), size: sizeof(info->bus_info)); |
2286 | } |
2287 | |
2288 | static int netdev_get_link_ksettings(struct net_device *dev, |
2289 | struct ethtool_link_ksettings *cmd) |
2290 | { |
2291 | struct rhine_private *rp = netdev_priv(dev); |
2292 | |
2293 | mutex_lock(&rp->task_lock); |
2294 | mii_ethtool_get_link_ksettings(mii: &rp->mii_if, cmd); |
2295 | mutex_unlock(lock: &rp->task_lock); |
2296 | |
2297 | return 0; |
2298 | } |
2299 | |
2300 | static int netdev_set_link_ksettings(struct net_device *dev, |
2301 | const struct ethtool_link_ksettings *cmd) |
2302 | { |
2303 | struct rhine_private *rp = netdev_priv(dev); |
2304 | int rc; |
2305 | |
2306 | mutex_lock(&rp->task_lock); |
2307 | rc = mii_ethtool_set_link_ksettings(mii: &rp->mii_if, cmd); |
2308 | rhine_set_carrier(mii: &rp->mii_if); |
2309 | mutex_unlock(lock: &rp->task_lock); |
2310 | |
2311 | return rc; |
2312 | } |
2313 | |
2314 | static int netdev_nway_reset(struct net_device *dev) |
2315 | { |
2316 | struct rhine_private *rp = netdev_priv(dev); |
2317 | |
2318 | return mii_nway_restart(mii: &rp->mii_if); |
2319 | } |
2320 | |
2321 | static u32 netdev_get_link(struct net_device *dev) |
2322 | { |
2323 | struct rhine_private *rp = netdev_priv(dev); |
2324 | |
2325 | return mii_link_ok(mii: &rp->mii_if); |
2326 | } |
2327 | |
2328 | static u32 netdev_get_msglevel(struct net_device *dev) |
2329 | { |
2330 | struct rhine_private *rp = netdev_priv(dev); |
2331 | |
2332 | return rp->msg_enable; |
2333 | } |
2334 | |
2335 | static void netdev_set_msglevel(struct net_device *dev, u32 value) |
2336 | { |
2337 | struct rhine_private *rp = netdev_priv(dev); |
2338 | |
2339 | rp->msg_enable = value; |
2340 | } |
2341 | |
2342 | static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2343 | { |
2344 | struct rhine_private *rp = netdev_priv(dev); |
2345 | |
2346 | if (!(rp->quirks & rqWOL)) |
2347 | return; |
2348 | |
2349 | spin_lock_irq(lock: &rp->lock); |
2350 | wol->supported = WAKE_PHY | WAKE_MAGIC | |
2351 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ |
2352 | wol->wolopts = rp->wolopts; |
2353 | spin_unlock_irq(lock: &rp->lock); |
2354 | } |
2355 | |
2356 | static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2357 | { |
2358 | struct rhine_private *rp = netdev_priv(dev); |
2359 | u32 support = WAKE_PHY | WAKE_MAGIC | |
2360 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ |
2361 | |
2362 | if (!(rp->quirks & rqWOL)) |
2363 | return -EINVAL; |
2364 | |
2365 | if (wol->wolopts & ~support) |
2366 | return -EINVAL; |
2367 | |
2368 | spin_lock_irq(lock: &rp->lock); |
2369 | rp->wolopts = wol->wolopts; |
2370 | spin_unlock_irq(lock: &rp->lock); |
2371 | |
2372 | return 0; |
2373 | } |
2374 | |
2375 | static const struct ethtool_ops netdev_ethtool_ops = { |
2376 | .get_drvinfo = netdev_get_drvinfo, |
2377 | .nway_reset = netdev_nway_reset, |
2378 | .get_link = netdev_get_link, |
2379 | .get_msglevel = netdev_get_msglevel, |
2380 | .set_msglevel = netdev_set_msglevel, |
2381 | .get_wol = rhine_get_wol, |
2382 | .set_wol = rhine_set_wol, |
2383 | .get_link_ksettings = netdev_get_link_ksettings, |
2384 | .set_link_ksettings = netdev_set_link_ksettings, |
2385 | }; |
2386 | |
2387 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2388 | { |
2389 | struct rhine_private *rp = netdev_priv(dev); |
2390 | int rc; |
2391 | |
2392 | if (!netif_running(dev)) |
2393 | return -EINVAL; |
2394 | |
2395 | mutex_lock(&rp->task_lock); |
2396 | rc = generic_mii_ioctl(mii_if: &rp->mii_if, mii_data: if_mii(rq), cmd, NULL); |
2397 | rhine_set_carrier(mii: &rp->mii_if); |
2398 | mutex_unlock(lock: &rp->task_lock); |
2399 | |
2400 | return rc; |
2401 | } |
2402 | |
2403 | static int rhine_close(struct net_device *dev) |
2404 | { |
2405 | struct rhine_private *rp = netdev_priv(dev); |
2406 | void __iomem *ioaddr = rp->base; |
2407 | |
2408 | rhine_task_disable(rp); |
2409 | napi_disable(n: &rp->napi); |
2410 | netif_stop_queue(dev); |
2411 | |
2412 | netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n" , |
2413 | ioread16(ioaddr + ChipCmd)); |
2414 | |
2415 | /* Switch to loopback mode to avoid hardware races. */ |
2416 | iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); |
2417 | |
2418 | rhine_irq_disable(rp); |
2419 | |
2420 | /* Stop the chip's Tx and Rx processes. */ |
2421 | iowrite16(CmdStop, ioaddr + ChipCmd); |
2422 | |
2423 | free_irq(rp->irq, dev); |
2424 | free_rbufs(dev); |
2425 | free_tbufs(dev); |
2426 | free_ring(dev); |
2427 | |
2428 | return 0; |
2429 | } |
2430 | |
2431 | |
2432 | static void rhine_remove_one_pci(struct pci_dev *pdev) |
2433 | { |
2434 | struct net_device *dev = pci_get_drvdata(pdev); |
2435 | struct rhine_private *rp = netdev_priv(dev); |
2436 | |
2437 | unregister_netdev(dev); |
2438 | |
2439 | pci_iounmap(dev: pdev, rp->base); |
2440 | pci_release_regions(pdev); |
2441 | |
2442 | free_netdev(dev); |
2443 | pci_disable_device(dev: pdev); |
2444 | } |
2445 | |
2446 | static void rhine_remove_one_platform(struct platform_device *pdev) |
2447 | { |
2448 | struct net_device *dev = platform_get_drvdata(pdev); |
2449 | struct rhine_private *rp = netdev_priv(dev); |
2450 | |
2451 | unregister_netdev(dev); |
2452 | |
2453 | iounmap(addr: rp->base); |
2454 | |
2455 | free_netdev(dev); |
2456 | } |
2457 | |
2458 | static void rhine_shutdown_pci(struct pci_dev *pdev) |
2459 | { |
2460 | struct net_device *dev = pci_get_drvdata(pdev); |
2461 | struct rhine_private *rp = netdev_priv(dev); |
2462 | void __iomem *ioaddr = rp->base; |
2463 | |
2464 | if (!(rp->quirks & rqWOL)) |
2465 | return; /* Nothing to do for non-WOL adapters */ |
2466 | |
2467 | rhine_power_init(dev); |
2468 | |
2469 | /* Make sure we use pattern 0, 1 and not 4, 5 */ |
2470 | if (rp->quirks & rq6patterns) |
2471 | iowrite8(0x04, ioaddr + WOLcgClr); |
2472 | |
2473 | spin_lock(lock: &rp->lock); |
2474 | |
2475 | if (rp->wolopts & WAKE_MAGIC) { |
2476 | iowrite8(WOLmagic, ioaddr + WOLcrSet); |
2477 | /* |
2478 | * Turn EEPROM-controlled wake-up back on -- some hardware may |
2479 | * not cooperate otherwise. |
2480 | */ |
2481 | iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA); |
2482 | } |
2483 | |
2484 | if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) |
2485 | iowrite8(WOLbmcast, ioaddr + WOLcgSet); |
2486 | |
2487 | if (rp->wolopts & WAKE_PHY) |
2488 | iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet); |
2489 | |
2490 | if (rp->wolopts & WAKE_UCAST) |
2491 | iowrite8(WOLucast, ioaddr + WOLcrSet); |
2492 | |
2493 | if (rp->wolopts) { |
2494 | /* Enable legacy WOL (for old motherboards) */ |
2495 | iowrite8(0x01, ioaddr + PwcfgSet); |
2496 | iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); |
2497 | } |
2498 | |
2499 | spin_unlock(lock: &rp->lock); |
2500 | |
2501 | if (system_state == SYSTEM_POWER_OFF && !avoid_D3) { |
2502 | iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); |
2503 | |
2504 | pci_wake_from_d3(dev: pdev, enable: true); |
2505 | pci_set_power_state(dev: pdev, PCI_D3hot); |
2506 | } |
2507 | } |
2508 | |
2509 | #ifdef CONFIG_PM_SLEEP |
2510 | static int rhine_suspend(struct device *device) |
2511 | { |
2512 | struct net_device *dev = dev_get_drvdata(dev: device); |
2513 | struct rhine_private *rp = netdev_priv(dev); |
2514 | |
2515 | if (!netif_running(dev)) |
2516 | return 0; |
2517 | |
2518 | rhine_task_disable(rp); |
2519 | rhine_irq_disable(rp); |
2520 | napi_disable(n: &rp->napi); |
2521 | |
2522 | netif_device_detach(dev); |
2523 | |
2524 | if (dev_is_pci(device)) |
2525 | rhine_shutdown_pci(to_pci_dev(device)); |
2526 | |
2527 | return 0; |
2528 | } |
2529 | |
2530 | static int rhine_resume(struct device *device) |
2531 | { |
2532 | struct net_device *dev = dev_get_drvdata(dev: device); |
2533 | struct rhine_private *rp = netdev_priv(dev); |
2534 | |
2535 | if (!netif_running(dev)) |
2536 | return 0; |
2537 | |
2538 | enable_mmio(pioaddr: rp->pioaddr, quirks: rp->quirks); |
2539 | rhine_power_init(dev); |
2540 | free_tbufs(dev); |
2541 | alloc_tbufs(dev); |
2542 | rhine_reset_rbufs(rp); |
2543 | rhine_task_enable(rp); |
2544 | spin_lock_bh(lock: &rp->lock); |
2545 | init_registers(dev); |
2546 | spin_unlock_bh(lock: &rp->lock); |
2547 | |
2548 | netif_device_attach(dev); |
2549 | |
2550 | return 0; |
2551 | } |
2552 | |
2553 | static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume); |
2554 | #define RHINE_PM_OPS (&rhine_pm_ops) |
2555 | |
2556 | #else |
2557 | |
2558 | #define RHINE_PM_OPS NULL |
2559 | |
2560 | #endif /* !CONFIG_PM_SLEEP */ |
2561 | |
2562 | static struct pci_driver rhine_driver_pci = { |
2563 | .name = DRV_NAME, |
2564 | .id_table = rhine_pci_tbl, |
2565 | .probe = rhine_init_one_pci, |
2566 | .remove = rhine_remove_one_pci, |
2567 | .shutdown = rhine_shutdown_pci, |
2568 | .driver.pm = RHINE_PM_OPS, |
2569 | }; |
2570 | |
2571 | static struct platform_driver rhine_driver_platform = { |
2572 | .probe = rhine_init_one_platform, |
2573 | .remove_new = rhine_remove_one_platform, |
2574 | .driver = { |
2575 | .name = DRV_NAME, |
2576 | .of_match_table = rhine_of_tbl, |
2577 | .pm = RHINE_PM_OPS, |
2578 | } |
2579 | }; |
2580 | |
2581 | static const struct dmi_system_id rhine_dmi_table[] __initconst = { |
2582 | { |
2583 | .ident = "EPIA-M" , |
2584 | .matches = { |
2585 | DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc." ), |
2586 | DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG" ), |
2587 | }, |
2588 | }, |
2589 | { |
2590 | .ident = "KV7" , |
2591 | .matches = { |
2592 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD" ), |
2593 | DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG" ), |
2594 | }, |
2595 | }, |
2596 | { NULL } |
2597 | }; |
2598 | |
2599 | static int __init rhine_init(void) |
2600 | { |
2601 | int ret_pci, ret_platform; |
2602 | |
2603 | /* when a module, this is printed whether or not devices are found in probe */ |
2604 | if (dmi_check_system(list: rhine_dmi_table)) { |
2605 | /* these BIOSes fail at PXE boot if chip is in D3 */ |
2606 | avoid_D3 = true; |
2607 | pr_warn("Broken BIOS detected, avoid_D3 enabled\n" ); |
2608 | } |
2609 | else if (avoid_D3) |
2610 | pr_info("avoid_D3 set\n" ); |
2611 | |
2612 | ret_pci = pci_register_driver(&rhine_driver_pci); |
2613 | ret_platform = platform_driver_register(&rhine_driver_platform); |
2614 | if ((ret_pci < 0) && (ret_platform < 0)) |
2615 | return ret_pci; |
2616 | |
2617 | return 0; |
2618 | } |
2619 | |
2620 | |
2621 | static void __exit rhine_cleanup(void) |
2622 | { |
2623 | platform_driver_unregister(&rhine_driver_platform); |
2624 | pci_unregister_driver(dev: &rhine_driver_pci); |
2625 | } |
2626 | |
2627 | |
2628 | module_init(rhine_init); |
2629 | module_exit(rhine_cleanup); |
2630 | |