1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. |
4 | * |
5 | * Note: This driver is a cleanroom reimplementation based on reverse |
6 | * engineered documentation written by Carl-Daniel Hailfinger |
7 | * and Andrew de Quincey. |
8 | * |
9 | * NVIDIA, nForce and other NVIDIA marks are trademarks or registered |
10 | * trademarks of NVIDIA Corporation in the United States and other |
11 | * countries. |
12 | * |
13 | * Copyright (C) 2003,4,5 Manfred Spraul |
14 | * Copyright (C) 2004 Andrew de Quincey (wol support) |
15 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane |
16 | * IRQ rate fixes, bigendian fixes, cleanups, verification) |
17 | * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation |
18 | * |
19 | * Known bugs: |
20 | * We suspect that on some hardware no TX done interrupts are generated. |
21 | * This means recovery from netif_stop_queue only happens if the hw timer |
22 | * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) |
23 | * and the timer is active in the IRQMask, or if a rx packet arrives by chance. |
24 | * If your hardware reliably generates tx done interrupts, then you can remove |
25 | * DEV_NEED_TIMERIRQ from the driver_data flags. |
26 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
27 | * superfluous timer interrupts from the nic. |
28 | */ |
29 | |
30 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
31 | |
32 | #define FORCEDETH_VERSION "0.64" |
33 | #define DRV_NAME "forcedeth" |
34 | |
35 | #include <linux/module.h> |
36 | #include <linux/types.h> |
37 | #include <linux/pci.h> |
38 | #include <linux/interrupt.h> |
39 | #include <linux/netdevice.h> |
40 | #include <linux/etherdevice.h> |
41 | #include <linux/delay.h> |
42 | #include <linux/sched.h> |
43 | #include <linux/spinlock.h> |
44 | #include <linux/ethtool.h> |
45 | #include <linux/timer.h> |
46 | #include <linux/skbuff.h> |
47 | #include <linux/mii.h> |
48 | #include <linux/random.h> |
49 | #include <linux/if_vlan.h> |
50 | #include <linux/dma-mapping.h> |
51 | #include <linux/slab.h> |
52 | #include <linux/uaccess.h> |
53 | #include <linux/prefetch.h> |
54 | #include <linux/u64_stats_sync.h> |
55 | #include <linux/io.h> |
56 | |
57 | #include <asm/irq.h> |
58 | |
59 | #define TX_WORK_PER_LOOP NAPI_POLL_WEIGHT |
60 | #define RX_WORK_PER_LOOP NAPI_POLL_WEIGHT |
61 | |
62 | /* |
63 | * Hardware access: |
64 | */ |
65 | |
66 | #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */ |
67 | #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */ |
68 | #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */ |
69 | #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */ |
70 | #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */ |
71 | #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */ |
72 | #define DEV_HAS_MSI 0x0000040 /* device supports MSI */ |
73 | #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ |
74 | #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ |
75 | #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ |
76 | #define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */ |
77 | #define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */ |
78 | #define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */ |
79 | #define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */ |
80 | #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ |
81 | #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ |
82 | #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ |
83 | #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */ |
84 | #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */ |
85 | #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */ |
86 | #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */ |
87 | #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */ |
88 | #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */ |
89 | #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */ |
90 | #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */ |
91 | #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */ |
92 | #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */ |
93 | |
94 | enum { |
95 | NvRegIrqStatus = 0x000, |
96 | #define NVREG_IRQSTAT_MIIEVENT 0x040 |
97 | #define NVREG_IRQSTAT_MASK 0x83ff |
98 | NvRegIrqMask = 0x004, |
99 | #define NVREG_IRQ_RX_ERROR 0x0001 |
100 | #define NVREG_IRQ_RX 0x0002 |
101 | #define NVREG_IRQ_RX_NOBUF 0x0004 |
102 | #define NVREG_IRQ_TX_ERR 0x0008 |
103 | #define NVREG_IRQ_TX_OK 0x0010 |
104 | #define NVREG_IRQ_TIMER 0x0020 |
105 | #define NVREG_IRQ_LINK 0x0040 |
106 | #define NVREG_IRQ_RX_FORCED 0x0080 |
107 | #define NVREG_IRQ_TX_FORCED 0x0100 |
108 | #define NVREG_IRQ_RECOVER_ERROR 0x8200 |
109 | #define NVREG_IRQMASK_THROUGHPUT 0x00df |
110 | #define NVREG_IRQMASK_CPU 0x0060 |
111 | #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) |
112 | #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) |
113 | #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) |
114 | |
115 | NvRegUnknownSetupReg6 = 0x008, |
116 | #define NVREG_UNKSETUP6_VAL 3 |
117 | |
118 | /* |
119 | * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic |
120 | * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms |
121 | */ |
122 | NvRegPollingInterval = 0x00c, |
123 | #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */ |
124 | #define NVREG_POLL_DEFAULT_CPU 13 |
125 | NvRegMSIMap0 = 0x020, |
126 | NvRegMSIMap1 = 0x024, |
127 | NvRegMSIIrqMask = 0x030, |
128 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 |
129 | NvRegMisc1 = 0x080, |
130 | #define NVREG_MISC1_PAUSE_TX 0x01 |
131 | #define NVREG_MISC1_HD 0x02 |
132 | #define NVREG_MISC1_FORCE 0x3b0f3c |
133 | |
134 | NvRegMacReset = 0x34, |
135 | #define NVREG_MAC_RESET_ASSERT 0x0F3 |
136 | NvRegTransmitterControl = 0x084, |
137 | #define NVREG_XMITCTL_START 0x01 |
138 | #define NVREG_XMITCTL_MGMT_ST 0x40000000 |
139 | #define NVREG_XMITCTL_SYNC_MASK 0x000f0000 |
140 | #define NVREG_XMITCTL_SYNC_NOT_READY 0x0 |
141 | #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 |
142 | #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 |
143 | #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 |
144 | #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 |
145 | #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 |
146 | #define NVREG_XMITCTL_HOST_LOADED 0x00004000 |
147 | #define NVREG_XMITCTL_TX_PATH_EN 0x01000000 |
148 | #define NVREG_XMITCTL_DATA_START 0x00100000 |
149 | #define NVREG_XMITCTL_DATA_READY 0x00010000 |
150 | #define NVREG_XMITCTL_DATA_ERROR 0x00020000 |
151 | NvRegTransmitterStatus = 0x088, |
152 | #define NVREG_XMITSTAT_BUSY 0x01 |
153 | |
154 | NvRegPacketFilterFlags = 0x8c, |
155 | #define NVREG_PFF_PAUSE_RX 0x08 |
156 | #define NVREG_PFF_ALWAYS 0x7F0000 |
157 | #define NVREG_PFF_PROMISC 0x80 |
158 | #define NVREG_PFF_MYADDR 0x20 |
159 | #define NVREG_PFF_LOOPBACK 0x10 |
160 | |
161 | NvRegOffloadConfig = 0x90, |
162 | #define NVREG_OFFLOAD_HOMEPHY 0x601 |
163 | #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE |
164 | NvRegReceiverControl = 0x094, |
165 | #define NVREG_RCVCTL_START 0x01 |
166 | #define NVREG_RCVCTL_RX_PATH_EN 0x01000000 |
167 | NvRegReceiverStatus = 0x98, |
168 | #define NVREG_RCVSTAT_BUSY 0x01 |
169 | |
170 | NvRegSlotTime = 0x9c, |
171 | #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 |
172 | #define NVREG_SLOTTIME_10_100_FULL 0x00007f00 |
173 | #define NVREG_SLOTTIME_1000_FULL 0x0003ff00 |
174 | #define NVREG_SLOTTIME_HALF 0x0000ff00 |
175 | #define NVREG_SLOTTIME_DEFAULT 0x00007f00 |
176 | #define NVREG_SLOTTIME_MASK 0x000000ff |
177 | |
178 | NvRegTxDeferral = 0xA0, |
179 | #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f |
180 | #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f |
181 | #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f |
182 | #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f |
183 | #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f |
184 | #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 |
185 | NvRegRxDeferral = 0xA4, |
186 | #define NVREG_RX_DEFERRAL_DEFAULT 0x16 |
187 | NvRegMacAddrA = 0xA8, |
188 | NvRegMacAddrB = 0xAC, |
189 | NvRegMulticastAddrA = 0xB0, |
190 | #define NVREG_MCASTADDRA_FORCE 0x01 |
191 | NvRegMulticastAddrB = 0xB4, |
192 | NvRegMulticastMaskA = 0xB8, |
193 | #define NVREG_MCASTMASKA_NONE 0xffffffff |
194 | NvRegMulticastMaskB = 0xBC, |
195 | #define NVREG_MCASTMASKB_NONE 0xffff |
196 | |
197 | NvRegPhyInterface = 0xC0, |
198 | #define PHY_RGMII 0x10000000 |
199 | NvRegBackOffControl = 0xC4, |
200 | #define NVREG_BKOFFCTRL_DEFAULT 0x70000000 |
201 | #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff |
202 | #define NVREG_BKOFFCTRL_SELECT 24 |
203 | #define NVREG_BKOFFCTRL_GEAR 12 |
204 | |
205 | NvRegTxRingPhysAddr = 0x100, |
206 | NvRegRxRingPhysAddr = 0x104, |
207 | NvRegRingSizes = 0x108, |
208 | #define NVREG_RINGSZ_TXSHIFT 0 |
209 | #define NVREG_RINGSZ_RXSHIFT 16 |
210 | NvRegTransmitPoll = 0x10c, |
211 | #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 |
212 | NvRegLinkSpeed = 0x110, |
213 | #define NVREG_LINKSPEED_FORCE 0x10000 |
214 | #define NVREG_LINKSPEED_10 1000 |
215 | #define NVREG_LINKSPEED_100 100 |
216 | #define NVREG_LINKSPEED_1000 50 |
217 | #define NVREG_LINKSPEED_MASK (0xFFF) |
218 | NvRegUnknownSetupReg5 = 0x130, |
219 | #define NVREG_UNKSETUP5_BIT31 (1<<31) |
220 | NvRegTxWatermark = 0x13c, |
221 | #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 |
222 | #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 |
223 | #define NVREG_TX_WM_DESC2_3_1000 0xfe08000 |
224 | NvRegTxRxControl = 0x144, |
225 | #define NVREG_TXRXCTL_KICK 0x0001 |
226 | #define NVREG_TXRXCTL_BIT1 0x0002 |
227 | #define NVREG_TXRXCTL_BIT2 0x0004 |
228 | #define NVREG_TXRXCTL_IDLE 0x0008 |
229 | #define NVREG_TXRXCTL_RESET 0x0010 |
230 | #define NVREG_TXRXCTL_RXCHECK 0x0400 |
231 | #define NVREG_TXRXCTL_DESC_1 0 |
232 | #define NVREG_TXRXCTL_DESC_2 0x002100 |
233 | #define NVREG_TXRXCTL_DESC_3 0xc02200 |
234 | #define NVREG_TXRXCTL_VLANSTRIP 0x00040 |
235 | #define NVREG_TXRXCTL_VLANINS 0x00080 |
236 | NvRegTxRingPhysAddrHigh = 0x148, |
237 | NvRegRxRingPhysAddrHigh = 0x14C, |
238 | NvRegTxPauseFrame = 0x170, |
239 | #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 |
240 | #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 |
241 | #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 |
242 | #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 |
243 | NvRegTxPauseFrameLimit = 0x174, |
244 | #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 |
245 | NvRegMIIStatus = 0x180, |
246 | #define NVREG_MIISTAT_ERROR 0x0001 |
247 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 |
248 | #define NVREG_MIISTAT_MASK_RW 0x0007 |
249 | #define NVREG_MIISTAT_MASK_ALL 0x000f |
250 | NvRegMIIMask = 0x184, |
251 | #define NVREG_MII_LINKCHANGE 0x0008 |
252 | |
253 | NvRegAdapterControl = 0x188, |
254 | #define NVREG_ADAPTCTL_START 0x02 |
255 | #define NVREG_ADAPTCTL_LINKUP 0x04 |
256 | #define NVREG_ADAPTCTL_PHYVALID 0x40000 |
257 | #define NVREG_ADAPTCTL_RUNNING 0x100000 |
258 | #define NVREG_ADAPTCTL_PHYSHIFT 24 |
259 | NvRegMIISpeed = 0x18c, |
260 | #define NVREG_MIISPEED_BIT8 (1<<8) |
261 | #define NVREG_MIIDELAY 5 |
262 | NvRegMIIControl = 0x190, |
263 | #define NVREG_MIICTL_INUSE 0x08000 |
264 | #define NVREG_MIICTL_WRITE 0x00400 |
265 | #define NVREG_MIICTL_ADDRSHIFT 5 |
266 | NvRegMIIData = 0x194, |
267 | NvRegTxUnicast = 0x1a0, |
268 | NvRegTxMulticast = 0x1a4, |
269 | NvRegTxBroadcast = 0x1a8, |
270 | NvRegWakeUpFlags = 0x200, |
271 | #define NVREG_WAKEUPFLAGS_VAL 0x7770 |
272 | #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 |
273 | #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 |
274 | #define NVREG_WAKEUPFLAGS_D3SHIFT 12 |
275 | #define NVREG_WAKEUPFLAGS_D2SHIFT 8 |
276 | #define NVREG_WAKEUPFLAGS_D1SHIFT 4 |
277 | #define NVREG_WAKEUPFLAGS_D0SHIFT 0 |
278 | #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 |
279 | #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 |
280 | #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 |
281 | #define NVREG_WAKEUPFLAGS_ENABLE 0x1111 |
282 | |
283 | NvRegMgmtUnitGetVersion = 0x204, |
284 | #define NVREG_MGMTUNITGETVERSION 0x01 |
285 | NvRegMgmtUnitVersion = 0x208, |
286 | #define NVREG_MGMTUNITVERSION 0x08 |
287 | NvRegPowerCap = 0x268, |
288 | #define NVREG_POWERCAP_D3SUPP (1<<30) |
289 | #define NVREG_POWERCAP_D2SUPP (1<<26) |
290 | #define NVREG_POWERCAP_D1SUPP (1<<25) |
291 | NvRegPowerState = 0x26c, |
292 | #define NVREG_POWERSTATE_POWEREDUP 0x8000 |
293 | #define NVREG_POWERSTATE_VALID 0x0100 |
294 | #define NVREG_POWERSTATE_MASK 0x0003 |
295 | #define NVREG_POWERSTATE_D0 0x0000 |
296 | #define NVREG_POWERSTATE_D1 0x0001 |
297 | #define NVREG_POWERSTATE_D2 0x0002 |
298 | #define NVREG_POWERSTATE_D3 0x0003 |
299 | NvRegMgmtUnitControl = 0x278, |
300 | #define NVREG_MGMTUNITCONTROL_INUSE 0x20000 |
301 | NvRegTxCnt = 0x280, |
302 | NvRegTxZeroReXmt = 0x284, |
303 | NvRegTxOneReXmt = 0x288, |
304 | NvRegTxManyReXmt = 0x28c, |
305 | NvRegTxLateCol = 0x290, |
306 | NvRegTxUnderflow = 0x294, |
307 | NvRegTxLossCarrier = 0x298, |
308 | NvRegTxExcessDef = 0x29c, |
309 | NvRegTxRetryErr = 0x2a0, |
310 | NvRegRxFrameErr = 0x2a4, |
311 | = 0x2a8, |
312 | NvRegRxLateCol = 0x2ac, |
313 | NvRegRxRunt = 0x2b0, |
314 | NvRegRxFrameTooLong = 0x2b4, |
315 | NvRegRxOverflow = 0x2b8, |
316 | NvRegRxFCSErr = 0x2bc, |
317 | NvRegRxFrameAlignErr = 0x2c0, |
318 | NvRegRxLenErr = 0x2c4, |
319 | NvRegRxUnicast = 0x2c8, |
320 | NvRegRxMulticast = 0x2cc, |
321 | NvRegRxBroadcast = 0x2d0, |
322 | NvRegTxDef = 0x2d4, |
323 | NvRegTxFrame = 0x2d8, |
324 | NvRegRxCnt = 0x2dc, |
325 | NvRegTxPause = 0x2e0, |
326 | NvRegRxPause = 0x2e4, |
327 | NvRegRxDropFrame = 0x2e8, |
328 | NvRegVlanControl = 0x300, |
329 | #define NVREG_VLANCONTROL_ENABLE 0x2000 |
330 | NvRegMSIXMap0 = 0x3e0, |
331 | NvRegMSIXMap1 = 0x3e4, |
332 | NvRegMSIXIrqStatus = 0x3f0, |
333 | |
334 | NvRegPowerState2 = 0x600, |
335 | #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15 |
336 | #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 |
337 | #define NVREG_POWERSTATE2_PHY_RESET 0x0004 |
338 | #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00 |
339 | }; |
340 | |
341 | /* Big endian: should work, but is untested */ |
342 | struct ring_desc { |
343 | __le32 buf; |
344 | __le32 flaglen; |
345 | }; |
346 | |
347 | struct ring_desc_ex { |
348 | __le32 bufhigh; |
349 | __le32 buflow; |
350 | __le32 txvlan; |
351 | __le32 flaglen; |
352 | }; |
353 | |
354 | union ring_type { |
355 | struct ring_desc *orig; |
356 | struct ring_desc_ex *ex; |
357 | }; |
358 | |
359 | #define FLAG_MASK_V1 0xffff0000 |
360 | #define FLAG_MASK_V2 0xffffc000 |
361 | #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) |
362 | #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) |
363 | |
364 | #define NV_TX_LASTPACKET (1<<16) |
365 | #define NV_TX_RETRYERROR (1<<19) |
366 | #define NV_TX_RETRYCOUNT_MASK (0xF<<20) |
367 | #define NV_TX_FORCED_INTERRUPT (1<<24) |
368 | #define NV_TX_DEFERRED (1<<26) |
369 | #define NV_TX_CARRIERLOST (1<<27) |
370 | #define NV_TX_LATECOLLISION (1<<28) |
371 | #define NV_TX_UNDERFLOW (1<<29) |
372 | #define NV_TX_ERROR (1<<30) |
373 | #define NV_TX_VALID (1<<31) |
374 | |
375 | #define NV_TX2_LASTPACKET (1<<29) |
376 | #define NV_TX2_RETRYERROR (1<<18) |
377 | #define NV_TX2_RETRYCOUNT_MASK (0xF<<19) |
378 | #define NV_TX2_FORCED_INTERRUPT (1<<30) |
379 | #define NV_TX2_DEFERRED (1<<25) |
380 | #define NV_TX2_CARRIERLOST (1<<26) |
381 | #define NV_TX2_LATECOLLISION (1<<27) |
382 | #define NV_TX2_UNDERFLOW (1<<28) |
383 | /* error and valid are the same for both */ |
384 | #define NV_TX2_ERROR (1<<30) |
385 | #define NV_TX2_VALID (1<<31) |
386 | #define NV_TX2_TSO (1<<28) |
387 | #define NV_TX2_TSO_SHIFT 14 |
388 | #define NV_TX2_TSO_MAX_SHIFT 14 |
389 | #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) |
390 | #define NV_TX2_CHECKSUM_L3 (1<<27) |
391 | #define NV_TX2_CHECKSUM_L4 (1<<26) |
392 | |
393 | #define NV_TX3_VLAN_TAG_PRESENT (1<<18) |
394 | |
395 | #define NV_RX_DESCRIPTORVALID (1<<16) |
396 | #define NV_RX_MISSEDFRAME (1<<17) |
397 | #define NV_RX_SUBTRACT1 (1<<18) |
398 | #define NV_RX_ERROR1 (1<<23) |
399 | #define NV_RX_ERROR2 (1<<24) |
400 | #define NV_RX_ERROR3 (1<<25) |
401 | #define NV_RX_ERROR4 (1<<26) |
402 | #define NV_RX_CRCERR (1<<27) |
403 | #define NV_RX_OVERFLOW (1<<28) |
404 | #define NV_RX_FRAMINGERR (1<<29) |
405 | #define NV_RX_ERROR (1<<30) |
406 | #define NV_RX_AVAIL (1<<31) |
407 | #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR) |
408 | |
409 | #define NV_RX2_CHECKSUMMASK (0x1C000000) |
410 | #define NV_RX2_CHECKSUM_IP (0x10000000) |
411 | #define NV_RX2_CHECKSUM_IP_TCP (0x14000000) |
412 | #define NV_RX2_CHECKSUM_IP_UDP (0x18000000) |
413 | #define NV_RX2_DESCRIPTORVALID (1<<29) |
414 | #define NV_RX2_SUBTRACT1 (1<<25) |
415 | #define NV_RX2_ERROR1 (1<<18) |
416 | #define NV_RX2_ERROR2 (1<<19) |
417 | #define NV_RX2_ERROR3 (1<<20) |
418 | #define NV_RX2_ERROR4 (1<<21) |
419 | #define NV_RX2_CRCERR (1<<22) |
420 | #define NV_RX2_OVERFLOW (1<<23) |
421 | #define NV_RX2_FRAMINGERR (1<<24) |
422 | /* error and avail are the same for both */ |
423 | #define NV_RX2_ERROR (1<<30) |
424 | #define NV_RX2_AVAIL (1<<31) |
425 | #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR) |
426 | |
427 | #define NV_RX3_VLAN_TAG_PRESENT (1<<16) |
428 | #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) |
429 | |
430 | /* Miscellaneous hardware related defines: */ |
431 | #define NV_PCI_REGSZ_VER1 0x270 |
432 | #define NV_PCI_REGSZ_VER2 0x2d4 |
433 | #define NV_PCI_REGSZ_VER3 0x604 |
434 | #define NV_PCI_REGSZ_MAX 0x604 |
435 | |
436 | /* various timeout delays: all in usec */ |
437 | #define NV_TXRX_RESET_DELAY 4 |
438 | #define NV_TXSTOP_DELAY1 10 |
439 | #define NV_TXSTOP_DELAY1MAX 500000 |
440 | #define NV_TXSTOP_DELAY2 100 |
441 | #define NV_RXSTOP_DELAY1 10 |
442 | #define NV_RXSTOP_DELAY1MAX 500000 |
443 | #define NV_RXSTOP_DELAY2 100 |
444 | #define NV_SETUP5_DELAY 5 |
445 | #define NV_SETUP5_DELAYMAX 50000 |
446 | #define NV_POWERUP_DELAY 5 |
447 | #define NV_POWERUP_DELAYMAX 5000 |
448 | #define NV_MIIBUSY_DELAY 50 |
449 | #define NV_MIIPHY_DELAY 10 |
450 | #define NV_MIIPHY_DELAYMAX 10000 |
451 | #define NV_MAC_RESET_DELAY 64 |
452 | |
453 | #define NV_WAKEUPPATTERNS 5 |
454 | #define NV_WAKEUPMASKENTRIES 4 |
455 | |
456 | /* General driver defaults */ |
457 | #define NV_WATCHDOG_TIMEO (5*HZ) |
458 | |
459 | #define RX_RING_DEFAULT 512 |
460 | #define TX_RING_DEFAULT 256 |
461 | #define RX_RING_MIN 128 |
462 | #define TX_RING_MIN 64 |
463 | #define RING_MAX_DESC_VER_1 1024 |
464 | #define RING_MAX_DESC_VER_2_3 16384 |
465 | |
466 | /* rx/tx mac addr + type + vlan + align + slack*/ |
467 | #define (64) |
468 | /* even more slack. */ |
469 | #define NV_RX_ALLOC_PAD (64) |
470 | |
471 | /* maximum mtu size */ |
472 | #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ |
473 | #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ |
474 | |
475 | #define OOM_REFILL (1+HZ/20) |
476 | #define POLL_WAIT (1+HZ/100) |
477 | #define LINK_TIMEOUT (3*HZ) |
478 | #define STATS_INTERVAL (10*HZ) |
479 | |
480 | /* |
481 | * desc_ver values: |
482 | * The nic supports three different descriptor types: |
483 | * - DESC_VER_1: Original |
484 | * - DESC_VER_2: support for jumbo frames. |
485 | * - DESC_VER_3: 64-bit format. |
486 | */ |
487 | #define DESC_VER_1 1 |
488 | #define DESC_VER_2 2 |
489 | #define DESC_VER_3 3 |
490 | |
491 | /* PHY defines */ |
492 | #define PHY_OUI_MARVELL 0x5043 |
493 | #define PHY_OUI_CICADA 0x03f1 |
494 | #define PHY_OUI_VITESSE 0x01c1 |
495 | #define PHY_OUI_REALTEK 0x0732 |
496 | #define PHY_OUI_REALTEK2 0x0020 |
497 | #define PHYID1_OUI_MASK 0x03ff |
498 | #define PHYID1_OUI_SHFT 6 |
499 | #define PHYID2_OUI_MASK 0xfc00 |
500 | #define PHYID2_OUI_SHFT 10 |
501 | #define PHYID2_MODEL_MASK 0x03f0 |
502 | #define PHY_MODEL_REALTEK_8211 0x0110 |
503 | #define PHY_REV_MASK 0x0001 |
504 | #define PHY_REV_REALTEK_8211B 0x0000 |
505 | #define PHY_REV_REALTEK_8211C 0x0001 |
506 | #define PHY_MODEL_REALTEK_8201 0x0200 |
507 | #define PHY_MODEL_MARVELL_E3016 0x0220 |
508 | #define PHY_MARVELL_E3016_INITMASK 0x0300 |
509 | #define PHY_CICADA_INIT1 0x0f000 |
510 | #define PHY_CICADA_INIT2 0x0e00 |
511 | #define PHY_CICADA_INIT3 0x01000 |
512 | #define PHY_CICADA_INIT4 0x0200 |
513 | #define PHY_CICADA_INIT5 0x0004 |
514 | #define PHY_CICADA_INIT6 0x02000 |
515 | #define PHY_VITESSE_INIT_REG1 0x1f |
516 | #define PHY_VITESSE_INIT_REG2 0x10 |
517 | #define PHY_VITESSE_INIT_REG3 0x11 |
518 | #define PHY_VITESSE_INIT_REG4 0x12 |
519 | #define PHY_VITESSE_INIT_MSK1 0xc |
520 | #define PHY_VITESSE_INIT_MSK2 0x0180 |
521 | #define PHY_VITESSE_INIT1 0x52b5 |
522 | #define PHY_VITESSE_INIT2 0xaf8a |
523 | #define PHY_VITESSE_INIT3 0x8 |
524 | #define PHY_VITESSE_INIT4 0x8f8a |
525 | #define PHY_VITESSE_INIT5 0xaf86 |
526 | #define PHY_VITESSE_INIT6 0x8f86 |
527 | #define PHY_VITESSE_INIT7 0xaf82 |
528 | #define PHY_VITESSE_INIT8 0x0100 |
529 | #define PHY_VITESSE_INIT9 0x8f82 |
530 | #define PHY_VITESSE_INIT10 0x0 |
531 | #define PHY_REALTEK_INIT_REG1 0x1f |
532 | #define PHY_REALTEK_INIT_REG2 0x19 |
533 | #define PHY_REALTEK_INIT_REG3 0x13 |
534 | #define PHY_REALTEK_INIT_REG4 0x14 |
535 | #define PHY_REALTEK_INIT_REG5 0x18 |
536 | #define PHY_REALTEK_INIT_REG6 0x11 |
537 | #define PHY_REALTEK_INIT_REG7 0x01 |
538 | #define PHY_REALTEK_INIT1 0x0000 |
539 | #define PHY_REALTEK_INIT2 0x8e00 |
540 | #define PHY_REALTEK_INIT3 0x0001 |
541 | #define PHY_REALTEK_INIT4 0xad17 |
542 | #define PHY_REALTEK_INIT5 0xfb54 |
543 | #define PHY_REALTEK_INIT6 0xf5c7 |
544 | #define PHY_REALTEK_INIT7 0x1000 |
545 | #define PHY_REALTEK_INIT8 0x0003 |
546 | #define PHY_REALTEK_INIT9 0x0008 |
547 | #define PHY_REALTEK_INIT10 0x0005 |
548 | #define PHY_REALTEK_INIT11 0x0200 |
549 | #define PHY_REALTEK_INIT_MSK1 0x0003 |
550 | |
551 | #define PHY_GIGABIT 0x0100 |
552 | |
553 | #define PHY_TIMEOUT 0x1 |
554 | #define PHY_ERROR 0x2 |
555 | |
556 | #define PHY_100 0x1 |
557 | #define PHY_1000 0x2 |
558 | #define PHY_HALF 0x100 |
559 | |
560 | #define NV_PAUSEFRAME_RX_CAPABLE 0x0001 |
561 | #define NV_PAUSEFRAME_TX_CAPABLE 0x0002 |
562 | #define NV_PAUSEFRAME_RX_ENABLE 0x0004 |
563 | #define NV_PAUSEFRAME_TX_ENABLE 0x0008 |
564 | #define NV_PAUSEFRAME_RX_REQ 0x0010 |
565 | #define NV_PAUSEFRAME_TX_REQ 0x0020 |
566 | #define NV_PAUSEFRAME_AUTONEG 0x0040 |
567 | |
568 | /* MSI/MSI-X defines */ |
569 | #define NV_MSI_X_MAX_VECTORS 8 |
570 | #define NV_MSI_X_VECTORS_MASK 0x000f |
571 | #define NV_MSI_CAPABLE 0x0010 |
572 | #define NV_MSI_X_CAPABLE 0x0020 |
573 | #define NV_MSI_ENABLED 0x0040 |
574 | #define NV_MSI_X_ENABLED 0x0080 |
575 | |
576 | #define NV_MSI_X_VECTOR_ALL 0x0 |
577 | #define NV_MSI_X_VECTOR_RX 0x0 |
578 | #define NV_MSI_X_VECTOR_TX 0x1 |
579 | #define NV_MSI_X_VECTOR_OTHER 0x2 |
580 | |
581 | #define NV_MSI_PRIV_OFFSET 0x68 |
582 | #define NV_MSI_PRIV_VALUE 0xffffffff |
583 | |
584 | #define NV_RESTART_TX 0x1 |
585 | #define NV_RESTART_RX 0x2 |
586 | |
587 | #define NV_TX_LIMIT_COUNT 16 |
588 | |
589 | #define NV_DYNAMIC_THRESHOLD 4 |
590 | #define NV_DYNAMIC_MAX_QUIET_COUNT 2048 |
591 | |
592 | /* statistics */ |
593 | struct nv_ethtool_str { |
594 | char name[ETH_GSTRING_LEN]; |
595 | }; |
596 | |
597 | static const struct nv_ethtool_str nv_estats_str[] = { |
598 | { "tx_bytes" }, /* includes Ethernet FCS CRC */ |
599 | { "tx_zero_rexmt" }, |
600 | { "tx_one_rexmt" }, |
601 | { "tx_many_rexmt" }, |
602 | { "tx_late_collision" }, |
603 | { "tx_fifo_errors" }, |
604 | { "tx_carrier_errors" }, |
605 | { "tx_excess_deferral" }, |
606 | { "tx_retry_error" }, |
607 | { "rx_frame_error" }, |
608 | { "rx_extra_byte" }, |
609 | { "rx_late_collision" }, |
610 | { "rx_runt" }, |
611 | { "rx_frame_too_long" }, |
612 | { "rx_over_errors" }, |
613 | { "rx_crc_errors" }, |
614 | { "rx_frame_align_error" }, |
615 | { "rx_length_error" }, |
616 | { "rx_unicast" }, |
617 | { "rx_multicast" }, |
618 | { "rx_broadcast" }, |
619 | { "rx_packets" }, |
620 | { "rx_errors_total" }, |
621 | { "tx_errors_total" }, |
622 | |
623 | /* version 2 stats */ |
624 | { "tx_deferral" }, |
625 | { "tx_packets" }, |
626 | { "rx_bytes" }, /* includes Ethernet FCS CRC */ |
627 | { "tx_pause" }, |
628 | { "rx_pause" }, |
629 | { "rx_drop_frame" }, |
630 | |
631 | /* version 3 stats */ |
632 | { "tx_unicast" }, |
633 | { "tx_multicast" }, |
634 | { "tx_broadcast" } |
635 | }; |
636 | |
637 | struct nv_ethtool_stats { |
638 | u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */ |
639 | u64 tx_zero_rexmt; |
640 | u64 tx_one_rexmt; |
641 | u64 tx_many_rexmt; |
642 | u64 tx_late_collision; |
643 | u64 tx_fifo_errors; |
644 | u64 tx_carrier_errors; |
645 | u64 tx_excess_deferral; |
646 | u64 tx_retry_error; |
647 | u64 rx_frame_error; |
648 | u64 ; |
649 | u64 rx_late_collision; |
650 | u64 rx_runt; |
651 | u64 rx_frame_too_long; |
652 | u64 rx_over_errors; |
653 | u64 rx_crc_errors; |
654 | u64 rx_frame_align_error; |
655 | u64 rx_length_error; |
656 | u64 rx_unicast; |
657 | u64 rx_multicast; |
658 | u64 rx_broadcast; |
659 | u64 rx_packets; /* should be ifconfig->rx_packets */ |
660 | u64 rx_errors_total; |
661 | u64 tx_errors_total; |
662 | |
663 | /* version 2 stats */ |
664 | u64 tx_deferral; |
665 | u64 tx_packets; /* should be ifconfig->tx_packets */ |
666 | u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */ |
667 | u64 tx_pause; |
668 | u64 rx_pause; |
669 | u64 rx_drop_frame; |
670 | |
671 | /* version 3 stats */ |
672 | u64 tx_unicast; |
673 | u64 tx_multicast; |
674 | u64 tx_broadcast; |
675 | }; |
676 | |
677 | #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) |
678 | #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) |
679 | #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) |
680 | |
681 | /* diagnostics */ |
682 | #define NV_TEST_COUNT_BASE 3 |
683 | #define NV_TEST_COUNT_EXTENDED 4 |
684 | |
685 | static const struct nv_ethtool_str nv_etests_str[] = { |
686 | { "link (online/offline)" }, |
687 | { "register (offline) " }, |
688 | { "interrupt (offline) " }, |
689 | { "loopback (offline) " } |
690 | }; |
691 | |
692 | struct register_test { |
693 | __u32 reg; |
694 | __u32 mask; |
695 | }; |
696 | |
697 | static const struct register_test nv_registers_test[] = { |
698 | { NvRegUnknownSetupReg6, 0x01 }, |
699 | { NvRegMisc1, 0x03c }, |
700 | { NvRegOffloadConfig, 0x03ff }, |
701 | { NvRegMulticastAddrA, 0xffffffff }, |
702 | { NvRegTxWatermark, 0x0ff }, |
703 | { NvRegWakeUpFlags, 0x07777 }, |
704 | { 0, 0 } |
705 | }; |
706 | |
707 | struct nv_skb_map { |
708 | struct sk_buff *skb; |
709 | dma_addr_t dma; |
710 | unsigned int dma_len:31; |
711 | unsigned int dma_single:1; |
712 | struct ring_desc_ex *first_tx_desc; |
713 | struct nv_skb_map *next_tx_ctx; |
714 | }; |
715 | |
716 | struct nv_txrx_stats { |
717 | u64 stat_rx_packets; |
718 | u64 stat_rx_bytes; /* not always available in HW */ |
719 | u64 stat_rx_missed_errors; |
720 | u64 stat_rx_dropped; |
721 | u64 stat_tx_packets; /* not always available in HW */ |
722 | u64 stat_tx_bytes; |
723 | u64 stat_tx_dropped; |
724 | }; |
725 | |
726 | #define nv_txrx_stats_inc(member) \ |
727 | __this_cpu_inc(np->txrx_stats->member) |
728 | #define nv_txrx_stats_add(member, count) \ |
729 | __this_cpu_add(np->txrx_stats->member, (count)) |
730 | |
731 | /* |
732 | * SMP locking: |
733 | * All hardware access under netdev_priv(dev)->lock, except the performance |
734 | * critical parts: |
735 | * - rx is (pseudo-) lockless: it relies on the single-threading provided |
736 | * by the arch code for interrupts. |
737 | * - tx setup is lockless: it relies on netif_tx_lock. Actual submission |
738 | * needs netdev_priv(dev)->lock :-( |
739 | * - set_multicast_list: preparation lockless, relies on netif_tx_lock. |
740 | * |
741 | * Hardware stats updates are protected by hwstats_lock: |
742 | * - updated by nv_do_stats_poll (timer). This is meant to avoid |
743 | * integer wraparound in the NIC stats registers, at low frequency |
744 | * (0.1 Hz) |
745 | * - updated by nv_get_ethtool_stats + nv_get_stats64 |
746 | * |
747 | * Software stats are accessed only through 64b synchronization points |
748 | * and are not subject to other synchronization techniques (single |
749 | * update thread on the TX or RX paths). |
750 | */ |
751 | |
752 | /* in dev: base, irq */ |
753 | struct fe_priv { |
754 | spinlock_t lock; |
755 | |
756 | struct net_device *dev; |
757 | struct napi_struct napi; |
758 | |
759 | /* hardware stats are updated in syscall and timer */ |
760 | spinlock_t hwstats_lock; |
761 | struct nv_ethtool_stats estats; |
762 | |
763 | int in_shutdown; |
764 | u32 linkspeed; |
765 | int duplex; |
766 | int autoneg; |
767 | int fixed_mode; |
768 | int phyaddr; |
769 | int wolenabled; |
770 | unsigned int phy_oui; |
771 | unsigned int phy_model; |
772 | unsigned int phy_rev; |
773 | u16 gigabit; |
774 | int intr_test; |
775 | int recover_error; |
776 | int quiet_count; |
777 | |
778 | /* General data: RO fields */ |
779 | dma_addr_t ring_addr; |
780 | struct pci_dev *pci_dev; |
781 | u32 orig_mac[2]; |
782 | u32 events; |
783 | u32 irqmask; |
784 | u32 desc_ver; |
785 | u32 txrxctl_bits; |
786 | u32 vlanctl_bits; |
787 | u32 driver_data; |
788 | u32 device_id; |
789 | u32 register_size; |
790 | u32 mac_in_use; |
791 | int mgmt_version; |
792 | int mgmt_sema; |
793 | |
794 | void __iomem *base; |
795 | |
796 | /* rx specific fields. |
797 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
798 | */ |
799 | union ring_type get_rx, put_rx, last_rx; |
800 | struct nv_skb_map *get_rx_ctx, *put_rx_ctx; |
801 | struct nv_skb_map *last_rx_ctx; |
802 | struct nv_skb_map *rx_skb; |
803 | |
804 | union ring_type rx_ring; |
805 | unsigned int rx_buf_sz; |
806 | unsigned int pkt_limit; |
807 | struct timer_list oom_kick; |
808 | struct timer_list nic_poll; |
809 | struct timer_list stats_poll; |
810 | u32 nic_poll_irq; |
811 | int rx_ring_size; |
812 | |
813 | /* RX software stats */ |
814 | struct u64_stats_sync swstats_rx_syncp; |
815 | struct nv_txrx_stats __percpu *txrx_stats; |
816 | |
817 | /* media detection workaround. |
818 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
819 | */ |
820 | int need_linktimer; |
821 | unsigned long link_timeout; |
822 | /* |
823 | * tx specific fields. |
824 | */ |
825 | union ring_type get_tx, put_tx, last_tx; |
826 | struct nv_skb_map *get_tx_ctx, *put_tx_ctx; |
827 | struct nv_skb_map *last_tx_ctx; |
828 | struct nv_skb_map *tx_skb; |
829 | |
830 | union ring_type tx_ring; |
831 | u32 tx_flags; |
832 | int tx_ring_size; |
833 | int tx_limit; |
834 | u32 tx_pkts_in_progress; |
835 | struct nv_skb_map *tx_change_owner; |
836 | struct nv_skb_map *tx_end_flip; |
837 | int tx_stop; |
838 | |
839 | /* TX software stats */ |
840 | struct u64_stats_sync swstats_tx_syncp; |
841 | |
842 | /* msi/msi-x fields */ |
843 | u32 msi_flags; |
844 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; |
845 | |
846 | /* flow control */ |
847 | u32 pause_flags; |
848 | |
849 | /* power saved state */ |
850 | u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; |
851 | |
852 | /* for different msi-x irq type */ |
853 | char name_rx[IFNAMSIZ + 3]; /* -rx */ |
854 | char name_tx[IFNAMSIZ + 3]; /* -tx */ |
855 | char name_other[IFNAMSIZ + 6]; /* -other */ |
856 | }; |
857 | |
858 | /* |
859 | * Maximum number of loops until we assume that a bit in the irq mask |
860 | * is stuck. Overridable with module param. |
861 | */ |
862 | static int max_interrupt_work = 4; |
863 | |
864 | /* |
865 | * Optimization can be either throuput mode or cpu mode |
866 | * |
867 | * Throughput Mode: Every tx and rx packet will generate an interrupt. |
868 | * CPU Mode: Interrupts are controlled by a timer. |
869 | */ |
870 | enum { |
871 | NV_OPTIMIZATION_MODE_THROUGHPUT, |
872 | NV_OPTIMIZATION_MODE_CPU, |
873 | NV_OPTIMIZATION_MODE_DYNAMIC |
874 | }; |
875 | static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC; |
876 | |
877 | /* |
878 | * Poll interval for timer irq |
879 | * |
880 | * This interval determines how frequent an interrupt is generated. |
881 | * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] |
882 | * Min = 0, and Max = 65535 |
883 | */ |
884 | static int poll_interval = -1; |
885 | |
886 | /* |
887 | * MSI interrupts |
888 | */ |
889 | enum { |
890 | NV_MSI_INT_DISABLED, |
891 | NV_MSI_INT_ENABLED |
892 | }; |
893 | static int msi = NV_MSI_INT_ENABLED; |
894 | |
895 | /* |
896 | * MSIX interrupts |
897 | */ |
898 | enum { |
899 | NV_MSIX_INT_DISABLED, |
900 | NV_MSIX_INT_ENABLED |
901 | }; |
902 | static int msix = NV_MSIX_INT_ENABLED; |
903 | |
904 | /* |
905 | * DMA 64bit |
906 | */ |
907 | enum { |
908 | NV_DMA_64BIT_DISABLED, |
909 | NV_DMA_64BIT_ENABLED |
910 | }; |
911 | static int dma_64bit = NV_DMA_64BIT_ENABLED; |
912 | |
913 | /* |
914 | * Debug output control for tx_timeout |
915 | */ |
916 | static bool debug_tx_timeout = false; |
917 | |
918 | /* |
919 | * Crossover Detection |
920 | * Realtek 8201 phy + some OEM boards do not work properly. |
921 | */ |
922 | enum { |
923 | NV_CROSSOVER_DETECTION_DISABLED, |
924 | NV_CROSSOVER_DETECTION_ENABLED |
925 | }; |
926 | static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; |
927 | |
928 | /* |
929 | * Power down phy when interface is down (persists through reboot; |
930 | * older Linux and other OSes may not power it up again) |
931 | */ |
932 | static int phy_power_down; |
933 | |
934 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) |
935 | { |
936 | return netdev_priv(dev); |
937 | } |
938 | |
939 | static inline u8 __iomem *get_hwbase(struct net_device *dev) |
940 | { |
941 | return ((struct fe_priv *)netdev_priv(dev))->base; |
942 | } |
943 | |
944 | static inline void pci_push(u8 __iomem *base) |
945 | { |
946 | /* force out pending posted writes */ |
947 | readl(addr: base); |
948 | } |
949 | |
950 | static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) |
951 | { |
952 | return le32_to_cpu(prd->flaglen) |
953 | & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); |
954 | } |
955 | |
956 | static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) |
957 | { |
958 | return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; |
959 | } |
960 | |
961 | static bool nv_optimized(struct fe_priv *np) |
962 | { |
963 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
964 | return false; |
965 | return true; |
966 | } |
967 | |
968 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, |
969 | int delay, int delaymax) |
970 | { |
971 | u8 __iomem *base = get_hwbase(dev); |
972 | |
973 | pci_push(base); |
974 | do { |
975 | udelay(delay); |
976 | delaymax -= delay; |
977 | if (delaymax < 0) |
978 | return 1; |
979 | } while ((readl(addr: base + offset) & mask) != target); |
980 | return 0; |
981 | } |
982 | |
983 | #define NV_SETUP_RX_RING 0x01 |
984 | #define NV_SETUP_TX_RING 0x02 |
985 | |
986 | static inline u32 dma_low(dma_addr_t addr) |
987 | { |
988 | return addr; |
989 | } |
990 | |
991 | static inline u32 dma_high(dma_addr_t addr) |
992 | { |
993 | return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ |
994 | } |
995 | |
996 | static void setup_hw_rings(struct net_device *dev, int rxtx_flags) |
997 | { |
998 | struct fe_priv *np = get_nvpriv(dev); |
999 | u8 __iomem *base = get_hwbase(dev); |
1000 | |
1001 | if (!nv_optimized(np)) { |
1002 | if (rxtx_flags & NV_SETUP_RX_RING) |
1003 | writel(val: dma_low(addr: np->ring_addr), addr: base + NvRegRxRingPhysAddr); |
1004 | if (rxtx_flags & NV_SETUP_TX_RING) |
1005 | writel(val: dma_low(addr: np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), addr: base + NvRegTxRingPhysAddr); |
1006 | } else { |
1007 | if (rxtx_flags & NV_SETUP_RX_RING) { |
1008 | writel(val: dma_low(addr: np->ring_addr), addr: base + NvRegRxRingPhysAddr); |
1009 | writel(val: dma_high(addr: np->ring_addr), addr: base + NvRegRxRingPhysAddrHigh); |
1010 | } |
1011 | if (rxtx_flags & NV_SETUP_TX_RING) { |
1012 | writel(val: dma_low(addr: np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), addr: base + NvRegTxRingPhysAddr); |
1013 | writel(val: dma_high(addr: np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), addr: base + NvRegTxRingPhysAddrHigh); |
1014 | } |
1015 | } |
1016 | } |
1017 | |
1018 | static void free_rings(struct net_device *dev) |
1019 | { |
1020 | struct fe_priv *np = get_nvpriv(dev); |
1021 | |
1022 | if (!nv_optimized(np)) { |
1023 | if (np->rx_ring.orig) |
1024 | dma_free_coherent(dev: &np->pci_dev->dev, |
1025 | size: sizeof(struct ring_desc) * |
1026 | (np->rx_ring_size + |
1027 | np->tx_ring_size), |
1028 | cpu_addr: np->rx_ring.orig, dma_handle: np->ring_addr); |
1029 | } else { |
1030 | if (np->rx_ring.ex) |
1031 | dma_free_coherent(dev: &np->pci_dev->dev, |
1032 | size: sizeof(struct ring_desc_ex) * |
1033 | (np->rx_ring_size + |
1034 | np->tx_ring_size), |
1035 | cpu_addr: np->rx_ring.ex, dma_handle: np->ring_addr); |
1036 | } |
1037 | kfree(objp: np->rx_skb); |
1038 | kfree(objp: np->tx_skb); |
1039 | } |
1040 | |
1041 | static int using_multi_irqs(struct net_device *dev) |
1042 | { |
1043 | struct fe_priv *np = get_nvpriv(dev); |
1044 | |
1045 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
1046 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)) |
1047 | return 0; |
1048 | else |
1049 | return 1; |
1050 | } |
1051 | |
1052 | static void nv_txrx_gate(struct net_device *dev, bool gate) |
1053 | { |
1054 | struct fe_priv *np = get_nvpriv(dev); |
1055 | u8 __iomem *base = get_hwbase(dev); |
1056 | u32 powerstate; |
1057 | |
1058 | if (!np->mac_in_use && |
1059 | (np->driver_data & DEV_HAS_POWER_CNTRL)) { |
1060 | powerstate = readl(addr: base + NvRegPowerState2); |
1061 | if (gate) |
1062 | powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS; |
1063 | else |
1064 | powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS; |
1065 | writel(val: powerstate, addr: base + NvRegPowerState2); |
1066 | } |
1067 | } |
1068 | |
1069 | static void nv_enable_irq(struct net_device *dev) |
1070 | { |
1071 | struct fe_priv *np = get_nvpriv(dev); |
1072 | |
1073 | if (!using_multi_irqs(dev)) { |
1074 | if (np->msi_flags & NV_MSI_X_ENABLED) |
1075 | enable_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
1076 | else |
1077 | enable_irq(irq: np->pci_dev->irq); |
1078 | } else { |
1079 | enable_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
1080 | enable_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); |
1081 | enable_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); |
1082 | } |
1083 | } |
1084 | |
1085 | static void nv_disable_irq(struct net_device *dev) |
1086 | { |
1087 | struct fe_priv *np = get_nvpriv(dev); |
1088 | |
1089 | if (!using_multi_irqs(dev)) { |
1090 | if (np->msi_flags & NV_MSI_X_ENABLED) |
1091 | disable_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
1092 | else |
1093 | disable_irq(irq: np->pci_dev->irq); |
1094 | } else { |
1095 | disable_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
1096 | disable_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); |
1097 | disable_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); |
1098 | } |
1099 | } |
1100 | |
1101 | /* In MSIX mode, a write to irqmask behaves as XOR */ |
1102 | static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) |
1103 | { |
1104 | u8 __iomem *base = get_hwbase(dev); |
1105 | |
1106 | writel(val: mask, addr: base + NvRegIrqMask); |
1107 | } |
1108 | |
1109 | static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) |
1110 | { |
1111 | struct fe_priv *np = get_nvpriv(dev); |
1112 | u8 __iomem *base = get_hwbase(dev); |
1113 | |
1114 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
1115 | writel(val: mask, addr: base + NvRegIrqMask); |
1116 | } else { |
1117 | if (np->msi_flags & NV_MSI_ENABLED) |
1118 | writel(val: 0, addr: base + NvRegMSIIrqMask); |
1119 | writel(val: 0, addr: base + NvRegIrqMask); |
1120 | } |
1121 | } |
1122 | |
1123 | static void nv_napi_enable(struct net_device *dev) |
1124 | { |
1125 | struct fe_priv *np = get_nvpriv(dev); |
1126 | |
1127 | napi_enable(n: &np->napi); |
1128 | } |
1129 | |
1130 | static void nv_napi_disable(struct net_device *dev) |
1131 | { |
1132 | struct fe_priv *np = get_nvpriv(dev); |
1133 | |
1134 | napi_disable(n: &np->napi); |
1135 | } |
1136 | |
1137 | #define MII_READ (-1) |
1138 | /* mii_rw: read/write a register on the PHY. |
1139 | * |
1140 | * Caller must guarantee serialization |
1141 | */ |
1142 | static int mii_rw(struct net_device *dev, int addr, int miireg, int value) |
1143 | { |
1144 | u8 __iomem *base = get_hwbase(dev); |
1145 | u32 reg; |
1146 | int retval; |
1147 | |
1148 | writel(NVREG_MIISTAT_MASK_RW, addr: base + NvRegMIIStatus); |
1149 | |
1150 | reg = readl(addr: base + NvRegMIIControl); |
1151 | if (reg & NVREG_MIICTL_INUSE) { |
1152 | writel(NVREG_MIICTL_INUSE, addr: base + NvRegMIIControl); |
1153 | udelay(NV_MIIBUSY_DELAY); |
1154 | } |
1155 | |
1156 | reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; |
1157 | if (value != MII_READ) { |
1158 | writel(val: value, addr: base + NvRegMIIData); |
1159 | reg |= NVREG_MIICTL_WRITE; |
1160 | } |
1161 | writel(val: reg, addr: base + NvRegMIIControl); |
1162 | |
1163 | if (reg_delay(dev, offset: NvRegMIIControl, NVREG_MIICTL_INUSE, target: 0, |
1164 | NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) { |
1165 | retval = -1; |
1166 | } else if (value != MII_READ) { |
1167 | /* it was a write operation - fewer failures are detectable */ |
1168 | retval = 0; |
1169 | } else if (readl(addr: base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { |
1170 | retval = -1; |
1171 | } else { |
1172 | retval = readl(addr: base + NvRegMIIData); |
1173 | } |
1174 | |
1175 | return retval; |
1176 | } |
1177 | |
1178 | static int phy_reset(struct net_device *dev, u32 bmcr_setup) |
1179 | { |
1180 | struct fe_priv *np = netdev_priv(dev); |
1181 | u32 miicontrol; |
1182 | unsigned int tries = 0; |
1183 | |
1184 | miicontrol = BMCR_RESET | bmcr_setup; |
1185 | if (mii_rw(dev, addr: np->phyaddr, MII_BMCR, value: miicontrol)) |
1186 | return -1; |
1187 | |
1188 | /* wait for 500ms */ |
1189 | msleep(msecs: 500); |
1190 | |
1191 | /* must wait till reset is deasserted */ |
1192 | while (miicontrol & BMCR_RESET) { |
1193 | usleep_range(min: 10000, max: 20000); |
1194 | miicontrol = mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ); |
1195 | /* FIXME: 100 tries seem excessive */ |
1196 | if (tries++ > 100) |
1197 | return -1; |
1198 | } |
1199 | return 0; |
1200 | } |
1201 | |
1202 | static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np) |
1203 | { |
1204 | static const struct { |
1205 | int reg; |
1206 | int init; |
1207 | } ri[] = { |
1208 | { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 }, |
1209 | { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 }, |
1210 | { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 }, |
1211 | { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 }, |
1212 | { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 }, |
1213 | { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 }, |
1214 | { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 }, |
1215 | }; |
1216 | int i; |
1217 | |
1218 | for (i = 0; i < ARRAY_SIZE(ri); i++) { |
1219 | if (mii_rw(dev, addr: np->phyaddr, miireg: ri[i].reg, value: ri[i].init)) |
1220 | return PHY_ERROR; |
1221 | } |
1222 | |
1223 | return 0; |
1224 | } |
1225 | |
1226 | static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np) |
1227 | { |
1228 | u32 reg; |
1229 | u8 __iomem *base = get_hwbase(dev); |
1230 | u32 powerstate = readl(addr: base + NvRegPowerState2); |
1231 | |
1232 | /* need to perform hw phy reset */ |
1233 | powerstate |= NVREG_POWERSTATE2_PHY_RESET; |
1234 | writel(val: powerstate, addr: base + NvRegPowerState2); |
1235 | msleep(msecs: 25); |
1236 | |
1237 | powerstate &= ~NVREG_POWERSTATE2_PHY_RESET; |
1238 | writel(val: powerstate, addr: base + NvRegPowerState2); |
1239 | msleep(msecs: 25); |
1240 | |
1241 | reg = mii_rw(dev, addr: np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); |
1242 | reg |= PHY_REALTEK_INIT9; |
1243 | if (mii_rw(dev, addr: np->phyaddr, PHY_REALTEK_INIT_REG6, value: reg)) |
1244 | return PHY_ERROR; |
1245 | if (mii_rw(dev, addr: np->phyaddr, |
1246 | PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) |
1247 | return PHY_ERROR; |
1248 | reg = mii_rw(dev, addr: np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); |
1249 | if (!(reg & PHY_REALTEK_INIT11)) { |
1250 | reg |= PHY_REALTEK_INIT11; |
1251 | if (mii_rw(dev, addr: np->phyaddr, PHY_REALTEK_INIT_REG7, value: reg)) |
1252 | return PHY_ERROR; |
1253 | } |
1254 | if (mii_rw(dev, addr: np->phyaddr, |
1255 | PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) |
1256 | return PHY_ERROR; |
1257 | |
1258 | return 0; |
1259 | } |
1260 | |
1261 | static int init_realtek_8201(struct net_device *dev, struct fe_priv *np) |
1262 | { |
1263 | u32 phy_reserved; |
1264 | |
1265 | if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { |
1266 | phy_reserved = mii_rw(dev, addr: np->phyaddr, |
1267 | PHY_REALTEK_INIT_REG6, MII_READ); |
1268 | phy_reserved |= PHY_REALTEK_INIT7; |
1269 | if (mii_rw(dev, addr: np->phyaddr, |
1270 | PHY_REALTEK_INIT_REG6, value: phy_reserved)) |
1271 | return PHY_ERROR; |
1272 | } |
1273 | |
1274 | return 0; |
1275 | } |
1276 | |
1277 | static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np) |
1278 | { |
1279 | u32 phy_reserved; |
1280 | |
1281 | if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { |
1282 | if (mii_rw(dev, addr: np->phyaddr, |
1283 | PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) |
1284 | return PHY_ERROR; |
1285 | phy_reserved = mii_rw(dev, addr: np->phyaddr, |
1286 | PHY_REALTEK_INIT_REG2, MII_READ); |
1287 | phy_reserved &= ~PHY_REALTEK_INIT_MSK1; |
1288 | phy_reserved |= PHY_REALTEK_INIT3; |
1289 | if (mii_rw(dev, addr: np->phyaddr, |
1290 | PHY_REALTEK_INIT_REG2, value: phy_reserved)) |
1291 | return PHY_ERROR; |
1292 | if (mii_rw(dev, addr: np->phyaddr, |
1293 | PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) |
1294 | return PHY_ERROR; |
1295 | } |
1296 | |
1297 | return 0; |
1298 | } |
1299 | |
1300 | static int init_cicada(struct net_device *dev, struct fe_priv *np, |
1301 | u32 phyinterface) |
1302 | { |
1303 | u32 phy_reserved; |
1304 | |
1305 | if (phyinterface & PHY_RGMII) { |
1306 | phy_reserved = mii_rw(dev, addr: np->phyaddr, MII_RESV1, MII_READ); |
1307 | phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); |
1308 | phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); |
1309 | if (mii_rw(dev, addr: np->phyaddr, MII_RESV1, value: phy_reserved)) |
1310 | return PHY_ERROR; |
1311 | phy_reserved = mii_rw(dev, addr: np->phyaddr, MII_NCONFIG, MII_READ); |
1312 | phy_reserved |= PHY_CICADA_INIT5; |
1313 | if (mii_rw(dev, addr: np->phyaddr, MII_NCONFIG, value: phy_reserved)) |
1314 | return PHY_ERROR; |
1315 | } |
1316 | phy_reserved = mii_rw(dev, addr: np->phyaddr, MII_SREVISION, MII_READ); |
1317 | phy_reserved |= PHY_CICADA_INIT6; |
1318 | if (mii_rw(dev, addr: np->phyaddr, MII_SREVISION, value: phy_reserved)) |
1319 | return PHY_ERROR; |
1320 | |
1321 | return 0; |
1322 | } |
1323 | |
1324 | static int init_vitesse(struct net_device *dev, struct fe_priv *np) |
1325 | { |
1326 | u32 phy_reserved; |
1327 | |
1328 | if (mii_rw(dev, addr: np->phyaddr, |
1329 | PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) |
1330 | return PHY_ERROR; |
1331 | if (mii_rw(dev, addr: np->phyaddr, |
1332 | PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) |
1333 | return PHY_ERROR; |
1334 | phy_reserved = mii_rw(dev, addr: np->phyaddr, |
1335 | PHY_VITESSE_INIT_REG4, MII_READ); |
1336 | if (mii_rw(dev, addr: np->phyaddr, PHY_VITESSE_INIT_REG4, value: phy_reserved)) |
1337 | return PHY_ERROR; |
1338 | phy_reserved = mii_rw(dev, addr: np->phyaddr, |
1339 | PHY_VITESSE_INIT_REG3, MII_READ); |
1340 | phy_reserved &= ~PHY_VITESSE_INIT_MSK1; |
1341 | phy_reserved |= PHY_VITESSE_INIT3; |
1342 | if (mii_rw(dev, addr: np->phyaddr, PHY_VITESSE_INIT_REG3, value: phy_reserved)) |
1343 | return PHY_ERROR; |
1344 | if (mii_rw(dev, addr: np->phyaddr, |
1345 | PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) |
1346 | return PHY_ERROR; |
1347 | if (mii_rw(dev, addr: np->phyaddr, |
1348 | PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) |
1349 | return PHY_ERROR; |
1350 | phy_reserved = mii_rw(dev, addr: np->phyaddr, |
1351 | PHY_VITESSE_INIT_REG4, MII_READ); |
1352 | phy_reserved &= ~PHY_VITESSE_INIT_MSK1; |
1353 | phy_reserved |= PHY_VITESSE_INIT3; |
1354 | if (mii_rw(dev, addr: np->phyaddr, PHY_VITESSE_INIT_REG4, value: phy_reserved)) |
1355 | return PHY_ERROR; |
1356 | phy_reserved = mii_rw(dev, addr: np->phyaddr, |
1357 | PHY_VITESSE_INIT_REG3, MII_READ); |
1358 | if (mii_rw(dev, addr: np->phyaddr, PHY_VITESSE_INIT_REG3, value: phy_reserved)) |
1359 | return PHY_ERROR; |
1360 | if (mii_rw(dev, addr: np->phyaddr, |
1361 | PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) |
1362 | return PHY_ERROR; |
1363 | if (mii_rw(dev, addr: np->phyaddr, |
1364 | PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) |
1365 | return PHY_ERROR; |
1366 | phy_reserved = mii_rw(dev, addr: np->phyaddr, |
1367 | PHY_VITESSE_INIT_REG4, MII_READ); |
1368 | if (mii_rw(dev, addr: np->phyaddr, PHY_VITESSE_INIT_REG4, value: phy_reserved)) |
1369 | return PHY_ERROR; |
1370 | phy_reserved = mii_rw(dev, addr: np->phyaddr, |
1371 | PHY_VITESSE_INIT_REG3, MII_READ); |
1372 | phy_reserved &= ~PHY_VITESSE_INIT_MSK2; |
1373 | phy_reserved |= PHY_VITESSE_INIT8; |
1374 | if (mii_rw(dev, addr: np->phyaddr, PHY_VITESSE_INIT_REG3, value: phy_reserved)) |
1375 | return PHY_ERROR; |
1376 | if (mii_rw(dev, addr: np->phyaddr, |
1377 | PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) |
1378 | return PHY_ERROR; |
1379 | if (mii_rw(dev, addr: np->phyaddr, |
1380 | PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) |
1381 | return PHY_ERROR; |
1382 | |
1383 | return 0; |
1384 | } |
1385 | |
1386 | static int phy_init(struct net_device *dev) |
1387 | { |
1388 | struct fe_priv *np = get_nvpriv(dev); |
1389 | u8 __iomem *base = get_hwbase(dev); |
1390 | u32 phyinterface; |
1391 | u32 mii_status, mii_control, mii_control_1000, reg; |
1392 | |
1393 | /* phy errata for E3016 phy */ |
1394 | if (np->phy_model == PHY_MODEL_MARVELL_E3016) { |
1395 | reg = mii_rw(dev, addr: np->phyaddr, MII_NCONFIG, MII_READ); |
1396 | reg &= ~PHY_MARVELL_E3016_INITMASK; |
1397 | if (mii_rw(dev, addr: np->phyaddr, MII_NCONFIG, value: reg)) { |
1398 | netdev_info(dev, format: "%s: phy write to errata reg failed\n" , |
1399 | pci_name(pdev: np->pci_dev)); |
1400 | return PHY_ERROR; |
1401 | } |
1402 | } |
1403 | if (np->phy_oui == PHY_OUI_REALTEK) { |
1404 | if (np->phy_model == PHY_MODEL_REALTEK_8211 && |
1405 | np->phy_rev == PHY_REV_REALTEK_8211B) { |
1406 | if (init_realtek_8211b(dev, np)) { |
1407 | netdev_info(dev, format: "%s: phy init failed\n" , |
1408 | pci_name(pdev: np->pci_dev)); |
1409 | return PHY_ERROR; |
1410 | } |
1411 | } else if (np->phy_model == PHY_MODEL_REALTEK_8211 && |
1412 | np->phy_rev == PHY_REV_REALTEK_8211C) { |
1413 | if (init_realtek_8211c(dev, np)) { |
1414 | netdev_info(dev, format: "%s: phy init failed\n" , |
1415 | pci_name(pdev: np->pci_dev)); |
1416 | return PHY_ERROR; |
1417 | } |
1418 | } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { |
1419 | if (init_realtek_8201(dev, np)) { |
1420 | netdev_info(dev, format: "%s: phy init failed\n" , |
1421 | pci_name(pdev: np->pci_dev)); |
1422 | return PHY_ERROR; |
1423 | } |
1424 | } |
1425 | } |
1426 | |
1427 | /* set advertise register */ |
1428 | reg = mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, MII_READ); |
1429 | reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL | |
1430 | ADVERTISE_100HALF | ADVERTISE_100FULL | |
1431 | ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); |
1432 | if (mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, value: reg)) { |
1433 | netdev_info(dev, format: "%s: phy write to advertise failed\n" , |
1434 | pci_name(pdev: np->pci_dev)); |
1435 | return PHY_ERROR; |
1436 | } |
1437 | |
1438 | /* get phy interface type */ |
1439 | phyinterface = readl(addr: base + NvRegPhyInterface); |
1440 | |
1441 | /* see if gigabit phy */ |
1442 | mii_status = mii_rw(dev, addr: np->phyaddr, MII_BMSR, MII_READ); |
1443 | if (mii_status & PHY_GIGABIT) { |
1444 | np->gigabit = PHY_GIGABIT; |
1445 | mii_control_1000 = mii_rw(dev, addr: np->phyaddr, |
1446 | MII_CTRL1000, MII_READ); |
1447 | mii_control_1000 &= ~ADVERTISE_1000HALF; |
1448 | if (phyinterface & PHY_RGMII) |
1449 | mii_control_1000 |= ADVERTISE_1000FULL; |
1450 | else |
1451 | mii_control_1000 &= ~ADVERTISE_1000FULL; |
1452 | |
1453 | if (mii_rw(dev, addr: np->phyaddr, MII_CTRL1000, value: mii_control_1000)) { |
1454 | netdev_info(dev, format: "%s: phy init failed\n" , |
1455 | pci_name(pdev: np->pci_dev)); |
1456 | return PHY_ERROR; |
1457 | } |
1458 | } else |
1459 | np->gigabit = 0; |
1460 | |
1461 | mii_control = mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ); |
1462 | mii_control |= BMCR_ANENABLE; |
1463 | |
1464 | if (np->phy_oui == PHY_OUI_REALTEK && |
1465 | np->phy_model == PHY_MODEL_REALTEK_8211 && |
1466 | np->phy_rev == PHY_REV_REALTEK_8211C) { |
1467 | /* start autoneg since we already performed hw reset above */ |
1468 | mii_control |= BMCR_ANRESTART; |
1469 | if (mii_rw(dev, addr: np->phyaddr, MII_BMCR, value: mii_control)) { |
1470 | netdev_info(dev, format: "%s: phy init failed\n" , |
1471 | pci_name(pdev: np->pci_dev)); |
1472 | return PHY_ERROR; |
1473 | } |
1474 | } else { |
1475 | /* reset the phy |
1476 | * (certain phys need bmcr to be setup with reset) |
1477 | */ |
1478 | if (phy_reset(dev, bmcr_setup: mii_control)) { |
1479 | netdev_info(dev, format: "%s: phy reset failed\n" , |
1480 | pci_name(pdev: np->pci_dev)); |
1481 | return PHY_ERROR; |
1482 | } |
1483 | } |
1484 | |
1485 | /* phy vendor specific configuration */ |
1486 | if (np->phy_oui == PHY_OUI_CICADA) { |
1487 | if (init_cicada(dev, np, phyinterface)) { |
1488 | netdev_info(dev, format: "%s: phy init failed\n" , |
1489 | pci_name(pdev: np->pci_dev)); |
1490 | return PHY_ERROR; |
1491 | } |
1492 | } else if (np->phy_oui == PHY_OUI_VITESSE) { |
1493 | if (init_vitesse(dev, np)) { |
1494 | netdev_info(dev, format: "%s: phy init failed\n" , |
1495 | pci_name(pdev: np->pci_dev)); |
1496 | return PHY_ERROR; |
1497 | } |
1498 | } else if (np->phy_oui == PHY_OUI_REALTEK) { |
1499 | if (np->phy_model == PHY_MODEL_REALTEK_8211 && |
1500 | np->phy_rev == PHY_REV_REALTEK_8211B) { |
1501 | /* reset could have cleared these out, set them back */ |
1502 | if (init_realtek_8211b(dev, np)) { |
1503 | netdev_info(dev, format: "%s: phy init failed\n" , |
1504 | pci_name(pdev: np->pci_dev)); |
1505 | return PHY_ERROR; |
1506 | } |
1507 | } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { |
1508 | if (init_realtek_8201(dev, np) || |
1509 | init_realtek_8201_cross(dev, np)) { |
1510 | netdev_info(dev, format: "%s: phy init failed\n" , |
1511 | pci_name(pdev: np->pci_dev)); |
1512 | return PHY_ERROR; |
1513 | } |
1514 | } |
1515 | } |
1516 | |
1517 | /* some phys clear out pause advertisement on reset, set it back */ |
1518 | mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, value: reg); |
1519 | |
1520 | /* restart auto negotiation, power down phy */ |
1521 | mii_control = mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ); |
1522 | mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); |
1523 | if (phy_power_down) |
1524 | mii_control |= BMCR_PDOWN; |
1525 | if (mii_rw(dev, addr: np->phyaddr, MII_BMCR, value: mii_control)) |
1526 | return PHY_ERROR; |
1527 | |
1528 | return 0; |
1529 | } |
1530 | |
1531 | static void nv_start_rx(struct net_device *dev) |
1532 | { |
1533 | struct fe_priv *np = netdev_priv(dev); |
1534 | u8 __iomem *base = get_hwbase(dev); |
1535 | u32 rx_ctrl = readl(addr: base + NvRegReceiverControl); |
1536 | |
1537 | /* Already running? Stop it. */ |
1538 | if ((readl(addr: base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { |
1539 | rx_ctrl &= ~NVREG_RCVCTL_START; |
1540 | writel(val: rx_ctrl, addr: base + NvRegReceiverControl); |
1541 | pci_push(base); |
1542 | } |
1543 | writel(val: np->linkspeed, addr: base + NvRegLinkSpeed); |
1544 | pci_push(base); |
1545 | rx_ctrl |= NVREG_RCVCTL_START; |
1546 | if (np->mac_in_use) |
1547 | rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; |
1548 | writel(val: rx_ctrl, addr: base + NvRegReceiverControl); |
1549 | pci_push(base); |
1550 | } |
1551 | |
1552 | static void nv_stop_rx(struct net_device *dev) |
1553 | { |
1554 | struct fe_priv *np = netdev_priv(dev); |
1555 | u8 __iomem *base = get_hwbase(dev); |
1556 | u32 rx_ctrl = readl(addr: base + NvRegReceiverControl); |
1557 | |
1558 | if (!np->mac_in_use) |
1559 | rx_ctrl &= ~NVREG_RCVCTL_START; |
1560 | else |
1561 | rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; |
1562 | writel(val: rx_ctrl, addr: base + NvRegReceiverControl); |
1563 | if (reg_delay(dev, offset: NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, target: 0, |
1564 | NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX)) |
1565 | netdev_info(dev, format: "%s: ReceiverStatus remained busy\n" , |
1566 | __func__); |
1567 | |
1568 | udelay(NV_RXSTOP_DELAY2); |
1569 | if (!np->mac_in_use) |
1570 | writel(val: 0, addr: base + NvRegLinkSpeed); |
1571 | } |
1572 | |
1573 | static void nv_start_tx(struct net_device *dev) |
1574 | { |
1575 | struct fe_priv *np = netdev_priv(dev); |
1576 | u8 __iomem *base = get_hwbase(dev); |
1577 | u32 tx_ctrl = readl(addr: base + NvRegTransmitterControl); |
1578 | |
1579 | tx_ctrl |= NVREG_XMITCTL_START; |
1580 | if (np->mac_in_use) |
1581 | tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; |
1582 | writel(val: tx_ctrl, addr: base + NvRegTransmitterControl); |
1583 | pci_push(base); |
1584 | } |
1585 | |
1586 | static void nv_stop_tx(struct net_device *dev) |
1587 | { |
1588 | struct fe_priv *np = netdev_priv(dev); |
1589 | u8 __iomem *base = get_hwbase(dev); |
1590 | u32 tx_ctrl = readl(addr: base + NvRegTransmitterControl); |
1591 | |
1592 | if (!np->mac_in_use) |
1593 | tx_ctrl &= ~NVREG_XMITCTL_START; |
1594 | else |
1595 | tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; |
1596 | writel(val: tx_ctrl, addr: base + NvRegTransmitterControl); |
1597 | if (reg_delay(dev, offset: NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, target: 0, |
1598 | NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX)) |
1599 | netdev_info(dev, format: "%s: TransmitterStatus remained busy\n" , |
1600 | __func__); |
1601 | |
1602 | udelay(NV_TXSTOP_DELAY2); |
1603 | if (!np->mac_in_use) |
1604 | writel(readl(addr: base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, |
1605 | addr: base + NvRegTransmitPoll); |
1606 | } |
1607 | |
1608 | static void nv_start_rxtx(struct net_device *dev) |
1609 | { |
1610 | nv_start_rx(dev); |
1611 | nv_start_tx(dev); |
1612 | } |
1613 | |
1614 | static void nv_stop_rxtx(struct net_device *dev) |
1615 | { |
1616 | nv_stop_rx(dev); |
1617 | nv_stop_tx(dev); |
1618 | } |
1619 | |
1620 | static void nv_txrx_reset(struct net_device *dev) |
1621 | { |
1622 | struct fe_priv *np = netdev_priv(dev); |
1623 | u8 __iomem *base = get_hwbase(dev); |
1624 | |
1625 | writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, addr: base + NvRegTxRxControl); |
1626 | pci_push(base); |
1627 | udelay(NV_TXRX_RESET_DELAY); |
1628 | writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, addr: base + NvRegTxRxControl); |
1629 | pci_push(base); |
1630 | } |
1631 | |
1632 | static void nv_mac_reset(struct net_device *dev) |
1633 | { |
1634 | struct fe_priv *np = netdev_priv(dev); |
1635 | u8 __iomem *base = get_hwbase(dev); |
1636 | u32 temp1, temp2, temp3; |
1637 | |
1638 | writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, addr: base + NvRegTxRxControl); |
1639 | pci_push(base); |
1640 | |
1641 | /* save registers since they will be cleared on reset */ |
1642 | temp1 = readl(addr: base + NvRegMacAddrA); |
1643 | temp2 = readl(addr: base + NvRegMacAddrB); |
1644 | temp3 = readl(addr: base + NvRegTransmitPoll); |
1645 | |
1646 | writel(NVREG_MAC_RESET_ASSERT, addr: base + NvRegMacReset); |
1647 | pci_push(base); |
1648 | udelay(NV_MAC_RESET_DELAY); |
1649 | writel(val: 0, addr: base + NvRegMacReset); |
1650 | pci_push(base); |
1651 | udelay(NV_MAC_RESET_DELAY); |
1652 | |
1653 | /* restore saved registers */ |
1654 | writel(val: temp1, addr: base + NvRegMacAddrA); |
1655 | writel(val: temp2, addr: base + NvRegMacAddrB); |
1656 | writel(val: temp3, addr: base + NvRegTransmitPoll); |
1657 | |
1658 | writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, addr: base + NvRegTxRxControl); |
1659 | pci_push(base); |
1660 | } |
1661 | |
1662 | /* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */ |
1663 | static void nv_update_stats(struct net_device *dev) |
1664 | { |
1665 | struct fe_priv *np = netdev_priv(dev); |
1666 | u8 __iomem *base = get_hwbase(dev); |
1667 | |
1668 | lockdep_assert_held(&np->hwstats_lock); |
1669 | |
1670 | /* query hardware */ |
1671 | np->estats.tx_bytes += readl(addr: base + NvRegTxCnt); |
1672 | np->estats.tx_zero_rexmt += readl(addr: base + NvRegTxZeroReXmt); |
1673 | np->estats.tx_one_rexmt += readl(addr: base + NvRegTxOneReXmt); |
1674 | np->estats.tx_many_rexmt += readl(addr: base + NvRegTxManyReXmt); |
1675 | np->estats.tx_late_collision += readl(addr: base + NvRegTxLateCol); |
1676 | np->estats.tx_fifo_errors += readl(addr: base + NvRegTxUnderflow); |
1677 | np->estats.tx_carrier_errors += readl(addr: base + NvRegTxLossCarrier); |
1678 | np->estats.tx_excess_deferral += readl(addr: base + NvRegTxExcessDef); |
1679 | np->estats.tx_retry_error += readl(addr: base + NvRegTxRetryErr); |
1680 | np->estats.rx_frame_error += readl(addr: base + NvRegRxFrameErr); |
1681 | np->estats.rx_extra_byte += readl(addr: base + NvRegRxExtraByte); |
1682 | np->estats.rx_late_collision += readl(addr: base + NvRegRxLateCol); |
1683 | np->estats.rx_runt += readl(addr: base + NvRegRxRunt); |
1684 | np->estats.rx_frame_too_long += readl(addr: base + NvRegRxFrameTooLong); |
1685 | np->estats.rx_over_errors += readl(addr: base + NvRegRxOverflow); |
1686 | np->estats.rx_crc_errors += readl(addr: base + NvRegRxFCSErr); |
1687 | np->estats.rx_frame_align_error += readl(addr: base + NvRegRxFrameAlignErr); |
1688 | np->estats.rx_length_error += readl(addr: base + NvRegRxLenErr); |
1689 | np->estats.rx_unicast += readl(addr: base + NvRegRxUnicast); |
1690 | np->estats.rx_multicast += readl(addr: base + NvRegRxMulticast); |
1691 | np->estats.rx_broadcast += readl(addr: base + NvRegRxBroadcast); |
1692 | np->estats.rx_packets = |
1693 | np->estats.rx_unicast + |
1694 | np->estats.rx_multicast + |
1695 | np->estats.rx_broadcast; |
1696 | np->estats.rx_errors_total = |
1697 | np->estats.rx_crc_errors + |
1698 | np->estats.rx_over_errors + |
1699 | np->estats.rx_frame_error + |
1700 | (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + |
1701 | np->estats.rx_late_collision + |
1702 | np->estats.rx_runt + |
1703 | np->estats.rx_frame_too_long; |
1704 | np->estats.tx_errors_total = |
1705 | np->estats.tx_late_collision + |
1706 | np->estats.tx_fifo_errors + |
1707 | np->estats.tx_carrier_errors + |
1708 | np->estats.tx_excess_deferral + |
1709 | np->estats.tx_retry_error; |
1710 | |
1711 | if (np->driver_data & DEV_HAS_STATISTICS_V2) { |
1712 | np->estats.tx_deferral += readl(addr: base + NvRegTxDef); |
1713 | np->estats.tx_packets += readl(addr: base + NvRegTxFrame); |
1714 | np->estats.rx_bytes += readl(addr: base + NvRegRxCnt); |
1715 | np->estats.tx_pause += readl(addr: base + NvRegTxPause); |
1716 | np->estats.rx_pause += readl(addr: base + NvRegRxPause); |
1717 | np->estats.rx_drop_frame += readl(addr: base + NvRegRxDropFrame); |
1718 | np->estats.rx_errors_total += np->estats.rx_drop_frame; |
1719 | } |
1720 | |
1721 | if (np->driver_data & DEV_HAS_STATISTICS_V3) { |
1722 | np->estats.tx_unicast += readl(addr: base + NvRegTxUnicast); |
1723 | np->estats.tx_multicast += readl(addr: base + NvRegTxMulticast); |
1724 | np->estats.tx_broadcast += readl(addr: base + NvRegTxBroadcast); |
1725 | } |
1726 | } |
1727 | |
1728 | static void nv_get_stats(int cpu, struct fe_priv *np, |
1729 | struct rtnl_link_stats64 *storage) |
1730 | { |
1731 | struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu); |
1732 | unsigned int syncp_start; |
1733 | u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors; |
1734 | u64 tx_packets, tx_bytes, tx_dropped; |
1735 | |
1736 | do { |
1737 | syncp_start = u64_stats_fetch_begin(syncp: &np->swstats_rx_syncp); |
1738 | rx_packets = src->stat_rx_packets; |
1739 | rx_bytes = src->stat_rx_bytes; |
1740 | rx_dropped = src->stat_rx_dropped; |
1741 | rx_missed_errors = src->stat_rx_missed_errors; |
1742 | } while (u64_stats_fetch_retry(syncp: &np->swstats_rx_syncp, start: syncp_start)); |
1743 | |
1744 | storage->rx_packets += rx_packets; |
1745 | storage->rx_bytes += rx_bytes; |
1746 | storage->rx_dropped += rx_dropped; |
1747 | storage->rx_missed_errors += rx_missed_errors; |
1748 | |
1749 | do { |
1750 | syncp_start = u64_stats_fetch_begin(syncp: &np->swstats_tx_syncp); |
1751 | tx_packets = src->stat_tx_packets; |
1752 | tx_bytes = src->stat_tx_bytes; |
1753 | tx_dropped = src->stat_tx_dropped; |
1754 | } while (u64_stats_fetch_retry(syncp: &np->swstats_tx_syncp, start: syncp_start)); |
1755 | |
1756 | storage->tx_packets += tx_packets; |
1757 | storage->tx_bytes += tx_bytes; |
1758 | storage->tx_dropped += tx_dropped; |
1759 | } |
1760 | |
1761 | /* |
1762 | * nv_get_stats64: dev->ndo_get_stats64 function |
1763 | * Get latest stats value from the nic. |
1764 | * Called with read_lock(&dev_base_lock) held for read - |
1765 | * only synchronized against unregister_netdevice. |
1766 | */ |
1767 | static void |
1768 | nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) |
1769 | __acquires(&netdev_priv(dev)->hwstats_lock) |
1770 | __releases(&netdev_priv(dev)->hwstats_lock) |
1771 | { |
1772 | struct fe_priv *np = netdev_priv(dev); |
1773 | int cpu; |
1774 | |
1775 | /* |
1776 | * Note: because HW stats are not always available and for |
1777 | * consistency reasons, the following ifconfig stats are |
1778 | * managed by software: rx_bytes, tx_bytes, rx_packets and |
1779 | * tx_packets. The related hardware stats reported by ethtool |
1780 | * should be equivalent to these ifconfig stats, with 4 |
1781 | * additional bytes per packet (Ethernet FCS CRC), except for |
1782 | * tx_packets when TSO kicks in. |
1783 | */ |
1784 | |
1785 | /* software stats */ |
1786 | for_each_online_cpu(cpu) |
1787 | nv_get_stats(cpu, np, storage); |
1788 | |
1789 | /* If the nic supports hw counters then retrieve latest values */ |
1790 | if (np->driver_data & DEV_HAS_STATISTICS_V123) { |
1791 | spin_lock_bh(lock: &np->hwstats_lock); |
1792 | |
1793 | nv_update_stats(dev); |
1794 | |
1795 | /* generic stats */ |
1796 | storage->rx_errors = np->estats.rx_errors_total; |
1797 | storage->tx_errors = np->estats.tx_errors_total; |
1798 | |
1799 | /* meaningful only when NIC supports stats v3 */ |
1800 | storage->multicast = np->estats.rx_multicast; |
1801 | |
1802 | /* detailed rx_errors */ |
1803 | storage->rx_length_errors = np->estats.rx_length_error; |
1804 | storage->rx_over_errors = np->estats.rx_over_errors; |
1805 | storage->rx_crc_errors = np->estats.rx_crc_errors; |
1806 | storage->rx_frame_errors = np->estats.rx_frame_align_error; |
1807 | storage->rx_fifo_errors = np->estats.rx_drop_frame; |
1808 | |
1809 | /* detailed tx_errors */ |
1810 | storage->tx_carrier_errors = np->estats.tx_carrier_errors; |
1811 | storage->tx_fifo_errors = np->estats.tx_fifo_errors; |
1812 | |
1813 | spin_unlock_bh(lock: &np->hwstats_lock); |
1814 | } |
1815 | } |
1816 | |
1817 | /* |
1818 | * nv_alloc_rx: fill rx ring entries. |
1819 | * Return 1 if the allocations for the skbs failed and the |
1820 | * rx engine is without Available descriptors |
1821 | */ |
1822 | static int nv_alloc_rx(struct net_device *dev) |
1823 | { |
1824 | struct fe_priv *np = netdev_priv(dev); |
1825 | struct ring_desc *less_rx; |
1826 | |
1827 | less_rx = np->get_rx.orig; |
1828 | if (less_rx-- == np->rx_ring.orig) |
1829 | less_rx = np->last_rx.orig; |
1830 | |
1831 | while (np->put_rx.orig != less_rx) { |
1832 | struct sk_buff *skb = netdev_alloc_skb(dev, length: np->rx_buf_sz + NV_RX_ALLOC_PAD); |
1833 | if (likely(skb)) { |
1834 | np->put_rx_ctx->skb = skb; |
1835 | np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, |
1836 | skb->data, |
1837 | skb_tailroom(skb), |
1838 | DMA_FROM_DEVICE); |
1839 | if (unlikely(dma_mapping_error(&np->pci_dev->dev, |
1840 | np->put_rx_ctx->dma))) { |
1841 | kfree_skb(skb); |
1842 | goto packet_dropped; |
1843 | } |
1844 | np->put_rx_ctx->dma_len = skb_tailroom(skb); |
1845 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); |
1846 | wmb(); |
1847 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); |
1848 | if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) |
1849 | np->put_rx.orig = np->rx_ring.orig; |
1850 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) |
1851 | np->put_rx_ctx = np->rx_skb; |
1852 | } else { |
1853 | packet_dropped: |
1854 | u64_stats_update_begin(syncp: &np->swstats_rx_syncp); |
1855 | nv_txrx_stats_inc(stat_rx_dropped); |
1856 | u64_stats_update_end(syncp: &np->swstats_rx_syncp); |
1857 | return 1; |
1858 | } |
1859 | } |
1860 | return 0; |
1861 | } |
1862 | |
1863 | static int nv_alloc_rx_optimized(struct net_device *dev) |
1864 | { |
1865 | struct fe_priv *np = netdev_priv(dev); |
1866 | struct ring_desc_ex *less_rx; |
1867 | |
1868 | less_rx = np->get_rx.ex; |
1869 | if (less_rx-- == np->rx_ring.ex) |
1870 | less_rx = np->last_rx.ex; |
1871 | |
1872 | while (np->put_rx.ex != less_rx) { |
1873 | struct sk_buff *skb = netdev_alloc_skb(dev, length: np->rx_buf_sz + NV_RX_ALLOC_PAD); |
1874 | if (likely(skb)) { |
1875 | np->put_rx_ctx->skb = skb; |
1876 | np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, |
1877 | skb->data, |
1878 | skb_tailroom(skb), |
1879 | DMA_FROM_DEVICE); |
1880 | if (unlikely(dma_mapping_error(&np->pci_dev->dev, |
1881 | np->put_rx_ctx->dma))) { |
1882 | kfree_skb(skb); |
1883 | goto packet_dropped; |
1884 | } |
1885 | np->put_rx_ctx->dma_len = skb_tailroom(skb); |
1886 | np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); |
1887 | np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); |
1888 | wmb(); |
1889 | np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); |
1890 | if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) |
1891 | np->put_rx.ex = np->rx_ring.ex; |
1892 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) |
1893 | np->put_rx_ctx = np->rx_skb; |
1894 | } else { |
1895 | packet_dropped: |
1896 | u64_stats_update_begin(syncp: &np->swstats_rx_syncp); |
1897 | nv_txrx_stats_inc(stat_rx_dropped); |
1898 | u64_stats_update_end(syncp: &np->swstats_rx_syncp); |
1899 | return 1; |
1900 | } |
1901 | } |
1902 | return 0; |
1903 | } |
1904 | |
1905 | /* If rx bufs are exhausted called after 50ms to attempt to refresh */ |
1906 | static void nv_do_rx_refill(struct timer_list *t) |
1907 | { |
1908 | struct fe_priv *np = from_timer(np, t, oom_kick); |
1909 | |
1910 | /* Just reschedule NAPI rx processing */ |
1911 | napi_schedule(n: &np->napi); |
1912 | } |
1913 | |
1914 | static void nv_init_rx(struct net_device *dev) |
1915 | { |
1916 | struct fe_priv *np = netdev_priv(dev); |
1917 | int i; |
1918 | |
1919 | np->get_rx = np->rx_ring; |
1920 | np->put_rx = np->rx_ring; |
1921 | |
1922 | if (!nv_optimized(np)) |
1923 | np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; |
1924 | else |
1925 | np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; |
1926 | np->get_rx_ctx = np->rx_skb; |
1927 | np->put_rx_ctx = np->rx_skb; |
1928 | np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; |
1929 | |
1930 | for (i = 0; i < np->rx_ring_size; i++) { |
1931 | if (!nv_optimized(np)) { |
1932 | np->rx_ring.orig[i].flaglen = 0; |
1933 | np->rx_ring.orig[i].buf = 0; |
1934 | } else { |
1935 | np->rx_ring.ex[i].flaglen = 0; |
1936 | np->rx_ring.ex[i].txvlan = 0; |
1937 | np->rx_ring.ex[i].bufhigh = 0; |
1938 | np->rx_ring.ex[i].buflow = 0; |
1939 | } |
1940 | np->rx_skb[i].skb = NULL; |
1941 | np->rx_skb[i].dma = 0; |
1942 | } |
1943 | } |
1944 | |
1945 | static void nv_init_tx(struct net_device *dev) |
1946 | { |
1947 | struct fe_priv *np = netdev_priv(dev); |
1948 | int i; |
1949 | |
1950 | np->get_tx = np->tx_ring; |
1951 | np->put_tx = np->tx_ring; |
1952 | |
1953 | if (!nv_optimized(np)) |
1954 | np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; |
1955 | else |
1956 | np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; |
1957 | np->get_tx_ctx = np->tx_skb; |
1958 | np->put_tx_ctx = np->tx_skb; |
1959 | np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; |
1960 | netdev_reset_queue(dev_queue: np->dev); |
1961 | np->tx_pkts_in_progress = 0; |
1962 | np->tx_change_owner = NULL; |
1963 | np->tx_end_flip = NULL; |
1964 | np->tx_stop = 0; |
1965 | |
1966 | for (i = 0; i < np->tx_ring_size; i++) { |
1967 | if (!nv_optimized(np)) { |
1968 | np->tx_ring.orig[i].flaglen = 0; |
1969 | np->tx_ring.orig[i].buf = 0; |
1970 | } else { |
1971 | np->tx_ring.ex[i].flaglen = 0; |
1972 | np->tx_ring.ex[i].txvlan = 0; |
1973 | np->tx_ring.ex[i].bufhigh = 0; |
1974 | np->tx_ring.ex[i].buflow = 0; |
1975 | } |
1976 | np->tx_skb[i].skb = NULL; |
1977 | np->tx_skb[i].dma = 0; |
1978 | np->tx_skb[i].dma_len = 0; |
1979 | np->tx_skb[i].dma_single = 0; |
1980 | np->tx_skb[i].first_tx_desc = NULL; |
1981 | np->tx_skb[i].next_tx_ctx = NULL; |
1982 | } |
1983 | } |
1984 | |
1985 | static int nv_init_ring(struct net_device *dev) |
1986 | { |
1987 | struct fe_priv *np = netdev_priv(dev); |
1988 | |
1989 | nv_init_tx(dev); |
1990 | nv_init_rx(dev); |
1991 | |
1992 | if (!nv_optimized(np)) |
1993 | return nv_alloc_rx(dev); |
1994 | else |
1995 | return nv_alloc_rx_optimized(dev); |
1996 | } |
1997 | |
1998 | static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) |
1999 | { |
2000 | if (tx_skb->dma) { |
2001 | if (tx_skb->dma_single) |
2002 | dma_unmap_single(&np->pci_dev->dev, tx_skb->dma, |
2003 | tx_skb->dma_len, |
2004 | DMA_TO_DEVICE); |
2005 | else |
2006 | dma_unmap_page(&np->pci_dev->dev, tx_skb->dma, |
2007 | tx_skb->dma_len, |
2008 | DMA_TO_DEVICE); |
2009 | tx_skb->dma = 0; |
2010 | } |
2011 | } |
2012 | |
2013 | static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) |
2014 | { |
2015 | nv_unmap_txskb(np, tx_skb); |
2016 | if (tx_skb->skb) { |
2017 | dev_kfree_skb_any(skb: tx_skb->skb); |
2018 | tx_skb->skb = NULL; |
2019 | return 1; |
2020 | } |
2021 | return 0; |
2022 | } |
2023 | |
2024 | static void nv_drain_tx(struct net_device *dev) |
2025 | { |
2026 | struct fe_priv *np = netdev_priv(dev); |
2027 | unsigned int i; |
2028 | |
2029 | for (i = 0; i < np->tx_ring_size; i++) { |
2030 | if (!nv_optimized(np)) { |
2031 | np->tx_ring.orig[i].flaglen = 0; |
2032 | np->tx_ring.orig[i].buf = 0; |
2033 | } else { |
2034 | np->tx_ring.ex[i].flaglen = 0; |
2035 | np->tx_ring.ex[i].txvlan = 0; |
2036 | np->tx_ring.ex[i].bufhigh = 0; |
2037 | np->tx_ring.ex[i].buflow = 0; |
2038 | } |
2039 | if (nv_release_txskb(np, tx_skb: &np->tx_skb[i])) { |
2040 | u64_stats_update_begin(syncp: &np->swstats_tx_syncp); |
2041 | nv_txrx_stats_inc(stat_tx_dropped); |
2042 | u64_stats_update_end(syncp: &np->swstats_tx_syncp); |
2043 | } |
2044 | np->tx_skb[i].dma = 0; |
2045 | np->tx_skb[i].dma_len = 0; |
2046 | np->tx_skb[i].dma_single = 0; |
2047 | np->tx_skb[i].first_tx_desc = NULL; |
2048 | np->tx_skb[i].next_tx_ctx = NULL; |
2049 | } |
2050 | np->tx_pkts_in_progress = 0; |
2051 | np->tx_change_owner = NULL; |
2052 | np->tx_end_flip = NULL; |
2053 | } |
2054 | |
2055 | static void nv_drain_rx(struct net_device *dev) |
2056 | { |
2057 | struct fe_priv *np = netdev_priv(dev); |
2058 | int i; |
2059 | |
2060 | for (i = 0; i < np->rx_ring_size; i++) { |
2061 | if (!nv_optimized(np)) { |
2062 | np->rx_ring.orig[i].flaglen = 0; |
2063 | np->rx_ring.orig[i].buf = 0; |
2064 | } else { |
2065 | np->rx_ring.ex[i].flaglen = 0; |
2066 | np->rx_ring.ex[i].txvlan = 0; |
2067 | np->rx_ring.ex[i].bufhigh = 0; |
2068 | np->rx_ring.ex[i].buflow = 0; |
2069 | } |
2070 | wmb(); |
2071 | if (np->rx_skb[i].skb) { |
2072 | dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma, |
2073 | (skb_end_pointer(np->rx_skb[i].skb) - |
2074 | np->rx_skb[i].skb->data), |
2075 | DMA_FROM_DEVICE); |
2076 | dev_kfree_skb(np->rx_skb[i].skb); |
2077 | np->rx_skb[i].skb = NULL; |
2078 | } |
2079 | } |
2080 | } |
2081 | |
2082 | static void nv_drain_rxtx(struct net_device *dev) |
2083 | { |
2084 | nv_drain_tx(dev); |
2085 | nv_drain_rx(dev); |
2086 | } |
2087 | |
2088 | static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) |
2089 | { |
2090 | return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); |
2091 | } |
2092 | |
2093 | static void nv_legacybackoff_reseed(struct net_device *dev) |
2094 | { |
2095 | u8 __iomem *base = get_hwbase(dev); |
2096 | u32 reg; |
2097 | u32 low; |
2098 | int tx_status = 0; |
2099 | |
2100 | reg = readl(addr: base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; |
2101 | get_random_bytes(buf: &low, len: sizeof(low)); |
2102 | reg |= low & NVREG_SLOTTIME_MASK; |
2103 | |
2104 | /* Need to stop tx before change takes effect. |
2105 | * Caller has already gained np->lock. |
2106 | */ |
2107 | tx_status = readl(addr: base + NvRegTransmitterControl) & NVREG_XMITCTL_START; |
2108 | if (tx_status) |
2109 | nv_stop_tx(dev); |
2110 | nv_stop_rx(dev); |
2111 | writel(val: reg, addr: base + NvRegSlotTime); |
2112 | if (tx_status) |
2113 | nv_start_tx(dev); |
2114 | nv_start_rx(dev); |
2115 | } |
2116 | |
2117 | /* Gear Backoff Seeds */ |
2118 | #define BACKOFF_SEEDSET_ROWS 8 |
2119 | #define BACKOFF_SEEDSET_LFSRS 15 |
2120 | |
2121 | /* Known Good seed sets */ |
2122 | static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { |
2123 | {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, |
2124 | {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, |
2125 | {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, |
2126 | {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, |
2127 | {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, |
2128 | {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, |
2129 | {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, |
2130 | {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} }; |
2131 | |
2132 | static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { |
2133 | {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, |
2134 | {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, |
2135 | {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, |
2136 | {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, |
2137 | {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, |
2138 | {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, |
2139 | {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, |
2140 | {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} }; |
2141 | |
2142 | static void nv_gear_backoff_reseed(struct net_device *dev) |
2143 | { |
2144 | u8 __iomem *base = get_hwbase(dev); |
2145 | u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; |
2146 | u32 temp, seedset, combinedSeed; |
2147 | int i; |
2148 | |
2149 | /* Setup seed for free running LFSR */ |
2150 | /* We are going to read the time stamp counter 3 times |
2151 | and swizzle bits around to increase randomness */ |
2152 | get_random_bytes(buf: &miniseed1, len: sizeof(miniseed1)); |
2153 | miniseed1 &= 0x0fff; |
2154 | if (miniseed1 == 0) |
2155 | miniseed1 = 0xabc; |
2156 | |
2157 | get_random_bytes(buf: &miniseed2, len: sizeof(miniseed2)); |
2158 | miniseed2 &= 0x0fff; |
2159 | if (miniseed2 == 0) |
2160 | miniseed2 = 0xabc; |
2161 | miniseed2_reversed = |
2162 | ((miniseed2 & 0xF00) >> 8) | |
2163 | (miniseed2 & 0x0F0) | |
2164 | ((miniseed2 & 0x00F) << 8); |
2165 | |
2166 | get_random_bytes(buf: &miniseed3, len: sizeof(miniseed3)); |
2167 | miniseed3 &= 0x0fff; |
2168 | if (miniseed3 == 0) |
2169 | miniseed3 = 0xabc; |
2170 | miniseed3_reversed = |
2171 | ((miniseed3 & 0xF00) >> 8) | |
2172 | (miniseed3 & 0x0F0) | |
2173 | ((miniseed3 & 0x00F) << 8); |
2174 | |
2175 | combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | |
2176 | (miniseed2 ^ miniseed3_reversed); |
2177 | |
2178 | /* Seeds can not be zero */ |
2179 | if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) |
2180 | combinedSeed |= 0x08; |
2181 | if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) |
2182 | combinedSeed |= 0x8000; |
2183 | |
2184 | /* No need to disable tx here */ |
2185 | temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); |
2186 | temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; |
2187 | temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; |
2188 | writel(val: temp, addr: base + NvRegBackOffControl); |
2189 | |
2190 | /* Setup seeds for all gear LFSRs. */ |
2191 | get_random_bytes(buf: &seedset, len: sizeof(seedset)); |
2192 | seedset = seedset % BACKOFF_SEEDSET_ROWS; |
2193 | for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) { |
2194 | temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); |
2195 | temp |= main_seedset[seedset][i-1] & 0x3ff; |
2196 | temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); |
2197 | writel(val: temp, addr: base + NvRegBackOffControl); |
2198 | } |
2199 | } |
2200 | |
2201 | /* |
2202 | * nv_start_xmit: dev->hard_start_xmit function |
2203 | * Called with netif_tx_lock held. |
2204 | */ |
2205 | static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2206 | { |
2207 | struct fe_priv *np = netdev_priv(dev); |
2208 | u32 tx_flags = 0; |
2209 | u32 = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); |
2210 | unsigned int fragments = skb_shinfo(skb)->nr_frags; |
2211 | unsigned int i; |
2212 | u32 offset = 0; |
2213 | u32 bcnt; |
2214 | u32 size = skb_headlen(skb); |
2215 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
2216 | u32 empty_slots; |
2217 | struct ring_desc *put_tx; |
2218 | struct ring_desc *start_tx; |
2219 | struct ring_desc *prev_tx; |
2220 | struct nv_skb_map *prev_tx_ctx; |
2221 | struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL; |
2222 | unsigned long flags; |
2223 | netdev_tx_t ret = NETDEV_TX_OK; |
2224 | |
2225 | /* add fragments to entries count */ |
2226 | for (i = 0; i < fragments; i++) { |
2227 | u32 frag_size = skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); |
2228 | |
2229 | entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + |
2230 | ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
2231 | } |
2232 | |
2233 | spin_lock_irqsave(&np->lock, flags); |
2234 | empty_slots = nv_get_empty_tx_slots(np); |
2235 | if (unlikely(empty_slots <= entries)) { |
2236 | netif_stop_queue(dev); |
2237 | np->tx_stop = 1; |
2238 | spin_unlock_irqrestore(lock: &np->lock, flags); |
2239 | |
2240 | /* When normal packets and/or xmit_more packets fill up |
2241 | * tx_desc, it is necessary to trigger NIC tx reg. |
2242 | */ |
2243 | ret = NETDEV_TX_BUSY; |
2244 | goto txkick; |
2245 | } |
2246 | spin_unlock_irqrestore(lock: &np->lock, flags); |
2247 | |
2248 | start_tx = put_tx = np->put_tx.orig; |
2249 | |
2250 | /* setup the header buffer */ |
2251 | do { |
2252 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
2253 | np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, |
2254 | skb->data + offset, bcnt, |
2255 | DMA_TO_DEVICE); |
2256 | if (unlikely(dma_mapping_error(&np->pci_dev->dev, |
2257 | np->put_tx_ctx->dma))) { |
2258 | /* on DMA mapping error - drop the packet */ |
2259 | dev_kfree_skb_any(skb); |
2260 | u64_stats_update_begin(syncp: &np->swstats_tx_syncp); |
2261 | nv_txrx_stats_inc(stat_tx_dropped); |
2262 | u64_stats_update_end(syncp: &np->swstats_tx_syncp); |
2263 | |
2264 | ret = NETDEV_TX_OK; |
2265 | |
2266 | goto dma_error; |
2267 | } |
2268 | np->put_tx_ctx->dma_len = bcnt; |
2269 | np->put_tx_ctx->dma_single = 1; |
2270 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); |
2271 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
2272 | |
2273 | tx_flags = np->tx_flags; |
2274 | offset += bcnt; |
2275 | size -= bcnt; |
2276 | if (unlikely(put_tx++ == np->last_tx.orig)) |
2277 | put_tx = np->tx_ring.orig; |
2278 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
2279 | np->put_tx_ctx = np->tx_skb; |
2280 | } while (size); |
2281 | |
2282 | /* setup the fragments */ |
2283 | for (i = 0; i < fragments; i++) { |
2284 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2285 | u32 frag_size = skb_frag_size(frag); |
2286 | offset = 0; |
2287 | |
2288 | do { |
2289 | if (!start_tx_ctx) |
2290 | start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; |
2291 | |
2292 | bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; |
2293 | np->put_tx_ctx->dma = skb_frag_dma_map( |
2294 | dev: &np->pci_dev->dev, |
2295 | frag, offset, |
2296 | size: bcnt, |
2297 | dir: DMA_TO_DEVICE); |
2298 | if (unlikely(dma_mapping_error(&np->pci_dev->dev, |
2299 | np->put_tx_ctx->dma))) { |
2300 | |
2301 | /* Unwind the mapped fragments */ |
2302 | do { |
2303 | nv_unmap_txskb(np, tx_skb: start_tx_ctx); |
2304 | if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) |
2305 | tmp_tx_ctx = np->tx_skb; |
2306 | } while (tmp_tx_ctx != np->put_tx_ctx); |
2307 | dev_kfree_skb_any(skb); |
2308 | np->put_tx_ctx = start_tx_ctx; |
2309 | u64_stats_update_begin(syncp: &np->swstats_tx_syncp); |
2310 | nv_txrx_stats_inc(stat_tx_dropped); |
2311 | u64_stats_update_end(syncp: &np->swstats_tx_syncp); |
2312 | |
2313 | ret = NETDEV_TX_OK; |
2314 | |
2315 | goto dma_error; |
2316 | } |
2317 | |
2318 | np->put_tx_ctx->dma_len = bcnt; |
2319 | np->put_tx_ctx->dma_single = 0; |
2320 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); |
2321 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
2322 | |
2323 | offset += bcnt; |
2324 | frag_size -= bcnt; |
2325 | if (unlikely(put_tx++ == np->last_tx.orig)) |
2326 | put_tx = np->tx_ring.orig; |
2327 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
2328 | np->put_tx_ctx = np->tx_skb; |
2329 | } while (frag_size); |
2330 | } |
2331 | |
2332 | if (unlikely(put_tx == np->tx_ring.orig)) |
2333 | prev_tx = np->last_tx.orig; |
2334 | else |
2335 | prev_tx = put_tx - 1; |
2336 | |
2337 | if (unlikely(np->put_tx_ctx == np->tx_skb)) |
2338 | prev_tx_ctx = np->last_tx_ctx; |
2339 | else |
2340 | prev_tx_ctx = np->put_tx_ctx - 1; |
2341 | |
2342 | /* set last fragment flag */ |
2343 | prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); |
2344 | |
2345 | /* save skb in this slot's context area */ |
2346 | prev_tx_ctx->skb = skb; |
2347 | |
2348 | if (skb_is_gso(skb)) |
2349 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); |
2350 | else |
2351 | tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? |
2352 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; |
2353 | |
2354 | spin_lock_irqsave(&np->lock, flags); |
2355 | |
2356 | /* set tx flags */ |
2357 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
2358 | |
2359 | netdev_sent_queue(dev: np->dev, bytes: skb->len); |
2360 | |
2361 | skb_tx_timestamp(skb); |
2362 | |
2363 | np->put_tx.orig = put_tx; |
2364 | |
2365 | spin_unlock_irqrestore(lock: &np->lock, flags); |
2366 | |
2367 | txkick: |
2368 | if (netif_queue_stopped(dev) || !netdev_xmit_more()) { |
2369 | u32 txrxctl_kick; |
2370 | dma_error: |
2371 | txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; |
2372 | writel(val: txrxctl_kick, addr: get_hwbase(dev) + NvRegTxRxControl); |
2373 | } |
2374 | |
2375 | return ret; |
2376 | } |
2377 | |
2378 | static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, |
2379 | struct net_device *dev) |
2380 | { |
2381 | struct fe_priv *np = netdev_priv(dev); |
2382 | u32 tx_flags = 0; |
2383 | u32 ; |
2384 | unsigned int fragments = skb_shinfo(skb)->nr_frags; |
2385 | unsigned int i; |
2386 | u32 offset = 0; |
2387 | u32 bcnt; |
2388 | u32 size = skb_headlen(skb); |
2389 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
2390 | u32 empty_slots; |
2391 | struct ring_desc_ex *put_tx; |
2392 | struct ring_desc_ex *start_tx; |
2393 | struct ring_desc_ex *prev_tx; |
2394 | struct nv_skb_map *prev_tx_ctx; |
2395 | struct nv_skb_map *start_tx_ctx = NULL; |
2396 | struct nv_skb_map *tmp_tx_ctx = NULL; |
2397 | unsigned long flags; |
2398 | netdev_tx_t ret = NETDEV_TX_OK; |
2399 | |
2400 | /* add fragments to entries count */ |
2401 | for (i = 0; i < fragments; i++) { |
2402 | u32 frag_size = skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); |
2403 | |
2404 | entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + |
2405 | ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
2406 | } |
2407 | |
2408 | spin_lock_irqsave(&np->lock, flags); |
2409 | empty_slots = nv_get_empty_tx_slots(np); |
2410 | if (unlikely(empty_slots <= entries)) { |
2411 | netif_stop_queue(dev); |
2412 | np->tx_stop = 1; |
2413 | spin_unlock_irqrestore(lock: &np->lock, flags); |
2414 | |
2415 | /* When normal packets and/or xmit_more packets fill up |
2416 | * tx_desc, it is necessary to trigger NIC tx reg. |
2417 | */ |
2418 | ret = NETDEV_TX_BUSY; |
2419 | |
2420 | goto txkick; |
2421 | } |
2422 | spin_unlock_irqrestore(lock: &np->lock, flags); |
2423 | |
2424 | start_tx = put_tx = np->put_tx.ex; |
2425 | start_tx_ctx = np->put_tx_ctx; |
2426 | |
2427 | /* setup the header buffer */ |
2428 | do { |
2429 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; |
2430 | np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, |
2431 | skb->data + offset, bcnt, |
2432 | DMA_TO_DEVICE); |
2433 | if (unlikely(dma_mapping_error(&np->pci_dev->dev, |
2434 | np->put_tx_ctx->dma))) { |
2435 | /* on DMA mapping error - drop the packet */ |
2436 | dev_kfree_skb_any(skb); |
2437 | u64_stats_update_begin(syncp: &np->swstats_tx_syncp); |
2438 | nv_txrx_stats_inc(stat_tx_dropped); |
2439 | u64_stats_update_end(syncp: &np->swstats_tx_syncp); |
2440 | |
2441 | ret = NETDEV_TX_OK; |
2442 | |
2443 | goto dma_error; |
2444 | } |
2445 | np->put_tx_ctx->dma_len = bcnt; |
2446 | np->put_tx_ctx->dma_single = 1; |
2447 | put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); |
2448 | put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); |
2449 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
2450 | |
2451 | tx_flags = NV_TX2_VALID; |
2452 | offset += bcnt; |
2453 | size -= bcnt; |
2454 | if (unlikely(put_tx++ == np->last_tx.ex)) |
2455 | put_tx = np->tx_ring.ex; |
2456 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
2457 | np->put_tx_ctx = np->tx_skb; |
2458 | } while (size); |
2459 | |
2460 | /* setup the fragments */ |
2461 | for (i = 0; i < fragments; i++) { |
2462 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2463 | u32 frag_size = skb_frag_size(frag); |
2464 | offset = 0; |
2465 | |
2466 | do { |
2467 | bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; |
2468 | if (!start_tx_ctx) |
2469 | start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; |
2470 | np->put_tx_ctx->dma = skb_frag_dma_map( |
2471 | dev: &np->pci_dev->dev, |
2472 | frag, offset, |
2473 | size: bcnt, |
2474 | dir: DMA_TO_DEVICE); |
2475 | |
2476 | if (unlikely(dma_mapping_error(&np->pci_dev->dev, |
2477 | np->put_tx_ctx->dma))) { |
2478 | |
2479 | /* Unwind the mapped fragments */ |
2480 | do { |
2481 | nv_unmap_txskb(np, tx_skb: start_tx_ctx); |
2482 | if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) |
2483 | tmp_tx_ctx = np->tx_skb; |
2484 | } while (tmp_tx_ctx != np->put_tx_ctx); |
2485 | dev_kfree_skb_any(skb); |
2486 | np->put_tx_ctx = start_tx_ctx; |
2487 | u64_stats_update_begin(syncp: &np->swstats_tx_syncp); |
2488 | nv_txrx_stats_inc(stat_tx_dropped); |
2489 | u64_stats_update_end(syncp: &np->swstats_tx_syncp); |
2490 | |
2491 | ret = NETDEV_TX_OK; |
2492 | |
2493 | goto dma_error; |
2494 | } |
2495 | np->put_tx_ctx->dma_len = bcnt; |
2496 | np->put_tx_ctx->dma_single = 0; |
2497 | put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); |
2498 | put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); |
2499 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
2500 | |
2501 | offset += bcnt; |
2502 | frag_size -= bcnt; |
2503 | if (unlikely(put_tx++ == np->last_tx.ex)) |
2504 | put_tx = np->tx_ring.ex; |
2505 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
2506 | np->put_tx_ctx = np->tx_skb; |
2507 | } while (frag_size); |
2508 | } |
2509 | |
2510 | if (unlikely(put_tx == np->tx_ring.ex)) |
2511 | prev_tx = np->last_tx.ex; |
2512 | else |
2513 | prev_tx = put_tx - 1; |
2514 | |
2515 | if (unlikely(np->put_tx_ctx == np->tx_skb)) |
2516 | prev_tx_ctx = np->last_tx_ctx; |
2517 | else |
2518 | prev_tx_ctx = np->put_tx_ctx - 1; |
2519 | |
2520 | /* set last fragment flag */ |
2521 | prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); |
2522 | |
2523 | /* save skb in this slot's context area */ |
2524 | prev_tx_ctx->skb = skb; |
2525 | |
2526 | if (skb_is_gso(skb)) |
2527 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); |
2528 | else |
2529 | tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? |
2530 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; |
2531 | |
2532 | /* vlan tag */ |
2533 | if (skb_vlan_tag_present(skb)) |
2534 | start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | |
2535 | skb_vlan_tag_get(skb)); |
2536 | else |
2537 | start_tx->txvlan = 0; |
2538 | |
2539 | spin_lock_irqsave(&np->lock, flags); |
2540 | |
2541 | if (np->tx_limit) { |
2542 | /* Limit the number of outstanding tx. Setup all fragments, but |
2543 | * do not set the VALID bit on the first descriptor. Save a pointer |
2544 | * to that descriptor and also for next skb_map element. |
2545 | */ |
2546 | |
2547 | if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { |
2548 | if (!np->tx_change_owner) |
2549 | np->tx_change_owner = start_tx_ctx; |
2550 | |
2551 | /* remove VALID bit */ |
2552 | tx_flags &= ~NV_TX2_VALID; |
2553 | start_tx_ctx->first_tx_desc = start_tx; |
2554 | start_tx_ctx->next_tx_ctx = np->put_tx_ctx; |
2555 | np->tx_end_flip = np->put_tx_ctx; |
2556 | } else { |
2557 | np->tx_pkts_in_progress++; |
2558 | } |
2559 | } |
2560 | |
2561 | /* set tx flags */ |
2562 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
2563 | |
2564 | netdev_sent_queue(dev: np->dev, bytes: skb->len); |
2565 | |
2566 | skb_tx_timestamp(skb); |
2567 | |
2568 | np->put_tx.ex = put_tx; |
2569 | |
2570 | spin_unlock_irqrestore(lock: &np->lock, flags); |
2571 | |
2572 | txkick: |
2573 | if (netif_queue_stopped(dev) || !netdev_xmit_more()) { |
2574 | u32 txrxctl_kick; |
2575 | dma_error: |
2576 | txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; |
2577 | writel(val: txrxctl_kick, addr: get_hwbase(dev) + NvRegTxRxControl); |
2578 | } |
2579 | |
2580 | return ret; |
2581 | } |
2582 | |
2583 | static inline void nv_tx_flip_ownership(struct net_device *dev) |
2584 | { |
2585 | struct fe_priv *np = netdev_priv(dev); |
2586 | |
2587 | np->tx_pkts_in_progress--; |
2588 | if (np->tx_change_owner) { |
2589 | np->tx_change_owner->first_tx_desc->flaglen |= |
2590 | cpu_to_le32(NV_TX2_VALID); |
2591 | np->tx_pkts_in_progress++; |
2592 | |
2593 | np->tx_change_owner = np->tx_change_owner->next_tx_ctx; |
2594 | if (np->tx_change_owner == np->tx_end_flip) |
2595 | np->tx_change_owner = NULL; |
2596 | |
2597 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, addr: get_hwbase(dev) + NvRegTxRxControl); |
2598 | } |
2599 | } |
2600 | |
2601 | /* |
2602 | * nv_tx_done: check for completed packets, release the skbs. |
2603 | * |
2604 | * Caller must own np->lock. |
2605 | */ |
2606 | static int nv_tx_done(struct net_device *dev, int limit) |
2607 | { |
2608 | struct fe_priv *np = netdev_priv(dev); |
2609 | u32 flags; |
2610 | int tx_work = 0; |
2611 | struct ring_desc *orig_get_tx = np->get_tx.orig; |
2612 | unsigned int bytes_compl = 0; |
2613 | |
2614 | while ((np->get_tx.orig != np->put_tx.orig) && |
2615 | !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && |
2616 | (tx_work < limit)) { |
2617 | |
2618 | nv_unmap_txskb(np, tx_skb: np->get_tx_ctx); |
2619 | |
2620 | if (np->desc_ver == DESC_VER_1) { |
2621 | if (flags & NV_TX_LASTPACKET) { |
2622 | if (unlikely(flags & NV_TX_ERROR)) { |
2623 | if ((flags & NV_TX_RETRYERROR) |
2624 | && !(flags & NV_TX_RETRYCOUNT_MASK)) |
2625 | nv_legacybackoff_reseed(dev); |
2626 | } else { |
2627 | unsigned int len; |
2628 | |
2629 | u64_stats_update_begin(syncp: &np->swstats_tx_syncp); |
2630 | nv_txrx_stats_inc(stat_tx_packets); |
2631 | len = np->get_tx_ctx->skb->len; |
2632 | nv_txrx_stats_add(stat_tx_bytes, len); |
2633 | u64_stats_update_end(syncp: &np->swstats_tx_syncp); |
2634 | } |
2635 | bytes_compl += np->get_tx_ctx->skb->len; |
2636 | dev_kfree_skb_any(skb: np->get_tx_ctx->skb); |
2637 | np->get_tx_ctx->skb = NULL; |
2638 | tx_work++; |
2639 | } |
2640 | } else { |
2641 | if (flags & NV_TX2_LASTPACKET) { |
2642 | if (unlikely(flags & NV_TX2_ERROR)) { |
2643 | if ((flags & NV_TX2_RETRYERROR) |
2644 | && !(flags & NV_TX2_RETRYCOUNT_MASK)) |
2645 | nv_legacybackoff_reseed(dev); |
2646 | } else { |
2647 | unsigned int len; |
2648 | |
2649 | u64_stats_update_begin(syncp: &np->swstats_tx_syncp); |
2650 | nv_txrx_stats_inc(stat_tx_packets); |
2651 | len = np->get_tx_ctx->skb->len; |
2652 | nv_txrx_stats_add(stat_tx_bytes, len); |
2653 | u64_stats_update_end(syncp: &np->swstats_tx_syncp); |
2654 | } |
2655 | bytes_compl += np->get_tx_ctx->skb->len; |
2656 | dev_kfree_skb_any(skb: np->get_tx_ctx->skb); |
2657 | np->get_tx_ctx->skb = NULL; |
2658 | tx_work++; |
2659 | } |
2660 | } |
2661 | if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) |
2662 | np->get_tx.orig = np->tx_ring.orig; |
2663 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) |
2664 | np->get_tx_ctx = np->tx_skb; |
2665 | } |
2666 | |
2667 | netdev_completed_queue(dev: np->dev, pkts: tx_work, bytes: bytes_compl); |
2668 | |
2669 | if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { |
2670 | np->tx_stop = 0; |
2671 | netif_wake_queue(dev); |
2672 | } |
2673 | return tx_work; |
2674 | } |
2675 | |
2676 | static int nv_tx_done_optimized(struct net_device *dev, int limit) |
2677 | { |
2678 | struct fe_priv *np = netdev_priv(dev); |
2679 | u32 flags; |
2680 | int tx_work = 0; |
2681 | struct ring_desc_ex *orig_get_tx = np->get_tx.ex; |
2682 | unsigned long bytes_cleaned = 0; |
2683 | |
2684 | while ((np->get_tx.ex != np->put_tx.ex) && |
2685 | !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && |
2686 | (tx_work < limit)) { |
2687 | |
2688 | nv_unmap_txskb(np, tx_skb: np->get_tx_ctx); |
2689 | |
2690 | if (flags & NV_TX2_LASTPACKET) { |
2691 | if (unlikely(flags & NV_TX2_ERROR)) { |
2692 | if ((flags & NV_TX2_RETRYERROR) |
2693 | && !(flags & NV_TX2_RETRYCOUNT_MASK)) { |
2694 | if (np->driver_data & DEV_HAS_GEAR_MODE) |
2695 | nv_gear_backoff_reseed(dev); |
2696 | else |
2697 | nv_legacybackoff_reseed(dev); |
2698 | } |
2699 | } else { |
2700 | unsigned int len; |
2701 | |
2702 | u64_stats_update_begin(syncp: &np->swstats_tx_syncp); |
2703 | nv_txrx_stats_inc(stat_tx_packets); |
2704 | len = np->get_tx_ctx->skb->len; |
2705 | nv_txrx_stats_add(stat_tx_bytes, len); |
2706 | u64_stats_update_end(syncp: &np->swstats_tx_syncp); |
2707 | } |
2708 | |
2709 | bytes_cleaned += np->get_tx_ctx->skb->len; |
2710 | dev_kfree_skb_any(skb: np->get_tx_ctx->skb); |
2711 | np->get_tx_ctx->skb = NULL; |
2712 | tx_work++; |
2713 | |
2714 | if (np->tx_limit) |
2715 | nv_tx_flip_ownership(dev); |
2716 | } |
2717 | |
2718 | if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) |
2719 | np->get_tx.ex = np->tx_ring.ex; |
2720 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) |
2721 | np->get_tx_ctx = np->tx_skb; |
2722 | } |
2723 | |
2724 | netdev_completed_queue(dev: np->dev, pkts: tx_work, bytes: bytes_cleaned); |
2725 | |
2726 | if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { |
2727 | np->tx_stop = 0; |
2728 | netif_wake_queue(dev); |
2729 | } |
2730 | return tx_work; |
2731 | } |
2732 | |
2733 | /* |
2734 | * nv_tx_timeout: dev->tx_timeout function |
2735 | * Called with netif_tx_lock held. |
2736 | */ |
2737 | static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue) |
2738 | { |
2739 | struct fe_priv *np = netdev_priv(dev); |
2740 | u8 __iomem *base = get_hwbase(dev); |
2741 | u32 status; |
2742 | union ring_type put_tx; |
2743 | int saved_tx_limit; |
2744 | |
2745 | if (np->msi_flags & NV_MSI_X_ENABLED) |
2746 | status = readl(addr: base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; |
2747 | else |
2748 | status = readl(addr: base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; |
2749 | |
2750 | netdev_warn(dev, format: "Got tx_timeout. irq status: %08x\n" , status); |
2751 | |
2752 | if (unlikely(debug_tx_timeout)) { |
2753 | int i; |
2754 | |
2755 | netdev_info(dev, format: "Ring at %lx\n" , (unsigned long)np->ring_addr); |
2756 | netdev_info(dev, format: "Dumping tx registers\n" ); |
2757 | for (i = 0; i <= np->register_size; i += 32) { |
2758 | netdev_info(dev, |
2759 | format: "%3x: %08x %08x %08x %08x " |
2760 | "%08x %08x %08x %08x\n" , |
2761 | i, |
2762 | readl(addr: base + i + 0), readl(addr: base + i + 4), |
2763 | readl(addr: base + i + 8), readl(addr: base + i + 12), |
2764 | readl(addr: base + i + 16), readl(addr: base + i + 20), |
2765 | readl(addr: base + i + 24), readl(addr: base + i + 28)); |
2766 | } |
2767 | netdev_info(dev, format: "Dumping tx ring\n" ); |
2768 | for (i = 0; i < np->tx_ring_size; i += 4) { |
2769 | if (!nv_optimized(np)) { |
2770 | netdev_info(dev, |
2771 | format: "%03x: %08x %08x // %08x %08x " |
2772 | "// %08x %08x // %08x %08x\n" , |
2773 | i, |
2774 | le32_to_cpu(np->tx_ring.orig[i].buf), |
2775 | le32_to_cpu(np->tx_ring.orig[i].flaglen), |
2776 | le32_to_cpu(np->tx_ring.orig[i+1].buf), |
2777 | le32_to_cpu(np->tx_ring.orig[i+1].flaglen), |
2778 | le32_to_cpu(np->tx_ring.orig[i+2].buf), |
2779 | le32_to_cpu(np->tx_ring.orig[i+2].flaglen), |
2780 | le32_to_cpu(np->tx_ring.orig[i+3].buf), |
2781 | le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); |
2782 | } else { |
2783 | netdev_info(dev, |
2784 | format: "%03x: %08x %08x %08x " |
2785 | "// %08x %08x %08x " |
2786 | "// %08x %08x %08x " |
2787 | "// %08x %08x %08x\n" , |
2788 | i, |
2789 | le32_to_cpu(np->tx_ring.ex[i].bufhigh), |
2790 | le32_to_cpu(np->tx_ring.ex[i].buflow), |
2791 | le32_to_cpu(np->tx_ring.ex[i].flaglen), |
2792 | le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), |
2793 | le32_to_cpu(np->tx_ring.ex[i+1].buflow), |
2794 | le32_to_cpu(np->tx_ring.ex[i+1].flaglen), |
2795 | le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), |
2796 | le32_to_cpu(np->tx_ring.ex[i+2].buflow), |
2797 | le32_to_cpu(np->tx_ring.ex[i+2].flaglen), |
2798 | le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), |
2799 | le32_to_cpu(np->tx_ring.ex[i+3].buflow), |
2800 | le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); |
2801 | } |
2802 | } |
2803 | } |
2804 | |
2805 | spin_lock_irq(lock: &np->lock); |
2806 | |
2807 | /* 1) stop tx engine */ |
2808 | nv_stop_tx(dev); |
2809 | |
2810 | /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ |
2811 | saved_tx_limit = np->tx_limit; |
2812 | np->tx_limit = 0; /* prevent giving HW any limited pkts */ |
2813 | np->tx_stop = 0; /* prevent waking tx queue */ |
2814 | if (!nv_optimized(np)) |
2815 | nv_tx_done(dev, limit: np->tx_ring_size); |
2816 | else |
2817 | nv_tx_done_optimized(dev, limit: np->tx_ring_size); |
2818 | |
2819 | /* save current HW position */ |
2820 | if (np->tx_change_owner) |
2821 | put_tx.ex = np->tx_change_owner->first_tx_desc; |
2822 | else |
2823 | put_tx = np->put_tx; |
2824 | |
2825 | /* 3) clear all tx state */ |
2826 | nv_drain_tx(dev); |
2827 | nv_init_tx(dev); |
2828 | |
2829 | /* 4) restore state to current HW position */ |
2830 | np->get_tx = np->put_tx = put_tx; |
2831 | np->tx_limit = saved_tx_limit; |
2832 | |
2833 | /* 5) restart tx engine */ |
2834 | nv_start_tx(dev); |
2835 | netif_wake_queue(dev); |
2836 | spin_unlock_irq(lock: &np->lock); |
2837 | } |
2838 | |
2839 | /* |
2840 | * Called when the nic notices a mismatch between the actual data len on the |
2841 | * wire and the len indicated in the 802 header |
2842 | */ |
2843 | static int nv_getlen(struct net_device *dev, void *packet, int datalen) |
2844 | { |
2845 | int hdrlen; /* length of the 802 header */ |
2846 | int protolen; /* length as stored in the proto field */ |
2847 | |
2848 | /* 1) calculate len according to header */ |
2849 | if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { |
2850 | protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto); |
2851 | hdrlen = VLAN_HLEN; |
2852 | } else { |
2853 | protolen = ntohs(((struct ethhdr *)packet)->h_proto); |
2854 | hdrlen = ETH_HLEN; |
2855 | } |
2856 | if (protolen > ETH_DATA_LEN) |
2857 | return datalen; /* Value in proto field not a len, no checks possible */ |
2858 | |
2859 | protolen += hdrlen; |
2860 | /* consistency checks: */ |
2861 | if (datalen > ETH_ZLEN) { |
2862 | if (datalen >= protolen) { |
2863 | /* more data on wire than in 802 header, trim of |
2864 | * additional data. |
2865 | */ |
2866 | return protolen; |
2867 | } else { |
2868 | /* less data on wire than mentioned in header. |
2869 | * Discard the packet. |
2870 | */ |
2871 | return -1; |
2872 | } |
2873 | } else { |
2874 | /* short packet. Accept only if 802 values are also short */ |
2875 | if (protolen > ETH_ZLEN) { |
2876 | return -1; |
2877 | } |
2878 | return datalen; |
2879 | } |
2880 | } |
2881 | |
2882 | static void rx_missing_handler(u32 flags, struct fe_priv *np) |
2883 | { |
2884 | if (flags & NV_RX_MISSEDFRAME) { |
2885 | u64_stats_update_begin(syncp: &np->swstats_rx_syncp); |
2886 | nv_txrx_stats_inc(stat_rx_missed_errors); |
2887 | u64_stats_update_end(syncp: &np->swstats_rx_syncp); |
2888 | } |
2889 | } |
2890 | |
2891 | static int nv_rx_process(struct net_device *dev, int limit) |
2892 | { |
2893 | struct fe_priv *np = netdev_priv(dev); |
2894 | u32 flags; |
2895 | int rx_work = 0; |
2896 | struct sk_buff *skb; |
2897 | int len; |
2898 | |
2899 | while ((np->get_rx.orig != np->put_rx.orig) && |
2900 | !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && |
2901 | (rx_work < limit)) { |
2902 | |
2903 | /* |
2904 | * the packet is for us - immediately tear down the pci mapping. |
2905 | * TODO: check if a prefetch of the first cacheline improves |
2906 | * the performance. |
2907 | */ |
2908 | dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, |
2909 | np->get_rx_ctx->dma_len, |
2910 | DMA_FROM_DEVICE); |
2911 | skb = np->get_rx_ctx->skb; |
2912 | np->get_rx_ctx->skb = NULL; |
2913 | |
2914 | /* look at what we actually got: */ |
2915 | if (np->desc_ver == DESC_VER_1) { |
2916 | if (likely(flags & NV_RX_DESCRIPTORVALID)) { |
2917 | len = flags & LEN_MASK_V1; |
2918 | if (unlikely(flags & NV_RX_ERROR)) { |
2919 | if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { |
2920 | len = nv_getlen(dev, packet: skb->data, datalen: len); |
2921 | if (len < 0) { |
2922 | dev_kfree_skb(skb); |
2923 | goto next_pkt; |
2924 | } |
2925 | } |
2926 | /* framing errors are soft errors */ |
2927 | else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { |
2928 | if (flags & NV_RX_SUBTRACT1) |
2929 | len--; |
2930 | } |
2931 | /* the rest are hard errors */ |
2932 | else { |
2933 | rx_missing_handler(flags, np); |
2934 | dev_kfree_skb(skb); |
2935 | goto next_pkt; |
2936 | } |
2937 | } |
2938 | } else { |
2939 | dev_kfree_skb(skb); |
2940 | goto next_pkt; |
2941 | } |
2942 | } else { |
2943 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { |
2944 | len = flags & LEN_MASK_V2; |
2945 | if (unlikely(flags & NV_RX2_ERROR)) { |
2946 | if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { |
2947 | len = nv_getlen(dev, packet: skb->data, datalen: len); |
2948 | if (len < 0) { |
2949 | dev_kfree_skb(skb); |
2950 | goto next_pkt; |
2951 | } |
2952 | } |
2953 | /* framing errors are soft errors */ |
2954 | else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { |
2955 | if (flags & NV_RX2_SUBTRACT1) |
2956 | len--; |
2957 | } |
2958 | /* the rest are hard errors */ |
2959 | else { |
2960 | dev_kfree_skb(skb); |
2961 | goto next_pkt; |
2962 | } |
2963 | } |
2964 | if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ |
2965 | ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ |
2966 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2967 | } else { |
2968 | dev_kfree_skb(skb); |
2969 | goto next_pkt; |
2970 | } |
2971 | } |
2972 | /* got a valid packet - forward it to the network core */ |
2973 | skb_put(skb, len); |
2974 | skb->protocol = eth_type_trans(skb, dev); |
2975 | napi_gro_receive(napi: &np->napi, skb); |
2976 | u64_stats_update_begin(syncp: &np->swstats_rx_syncp); |
2977 | nv_txrx_stats_inc(stat_rx_packets); |
2978 | nv_txrx_stats_add(stat_rx_bytes, len); |
2979 | u64_stats_update_end(syncp: &np->swstats_rx_syncp); |
2980 | next_pkt: |
2981 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) |
2982 | np->get_rx.orig = np->rx_ring.orig; |
2983 | if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) |
2984 | np->get_rx_ctx = np->rx_skb; |
2985 | |
2986 | rx_work++; |
2987 | } |
2988 | |
2989 | return rx_work; |
2990 | } |
2991 | |
2992 | static int nv_rx_process_optimized(struct net_device *dev, int limit) |
2993 | { |
2994 | struct fe_priv *np = netdev_priv(dev); |
2995 | u32 flags; |
2996 | u32 vlanflags = 0; |
2997 | int rx_work = 0; |
2998 | struct sk_buff *skb; |
2999 | int len; |
3000 | |
3001 | while ((np->get_rx.ex != np->put_rx.ex) && |
3002 | !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && |
3003 | (rx_work < limit)) { |
3004 | |
3005 | /* |
3006 | * the packet is for us - immediately tear down the pci mapping. |
3007 | * TODO: check if a prefetch of the first cacheline improves |
3008 | * the performance. |
3009 | */ |
3010 | dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, |
3011 | np->get_rx_ctx->dma_len, |
3012 | DMA_FROM_DEVICE); |
3013 | skb = np->get_rx_ctx->skb; |
3014 | np->get_rx_ctx->skb = NULL; |
3015 | |
3016 | /* look at what we actually got: */ |
3017 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { |
3018 | len = flags & LEN_MASK_V2; |
3019 | if (unlikely(flags & NV_RX2_ERROR)) { |
3020 | if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { |
3021 | len = nv_getlen(dev, packet: skb->data, datalen: len); |
3022 | if (len < 0) { |
3023 | dev_kfree_skb(skb); |
3024 | goto next_pkt; |
3025 | } |
3026 | } |
3027 | /* framing errors are soft errors */ |
3028 | else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { |
3029 | if (flags & NV_RX2_SUBTRACT1) |
3030 | len--; |
3031 | } |
3032 | /* the rest are hard errors */ |
3033 | else { |
3034 | dev_kfree_skb(skb); |
3035 | goto next_pkt; |
3036 | } |
3037 | } |
3038 | |
3039 | if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ |
3040 | ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ |
3041 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3042 | |
3043 | /* got a valid packet - forward it to the network core */ |
3044 | skb_put(skb, len); |
3045 | skb->protocol = eth_type_trans(skb, dev); |
3046 | prefetch(skb->data); |
3047 | |
3048 | vlanflags = le32_to_cpu(np->get_rx.ex->buflow); |
3049 | |
3050 | /* |
3051 | * There's need to check for NETIF_F_HW_VLAN_CTAG_RX |
3052 | * here. Even if vlan rx accel is disabled, |
3053 | * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set. |
3054 | */ |
3055 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && |
3056 | vlanflags & NV_RX3_VLAN_TAG_PRESENT) { |
3057 | u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; |
3058 | |
3059 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vid); |
3060 | } |
3061 | napi_gro_receive(napi: &np->napi, skb); |
3062 | u64_stats_update_begin(syncp: &np->swstats_rx_syncp); |
3063 | nv_txrx_stats_inc(stat_rx_packets); |
3064 | nv_txrx_stats_add(stat_rx_bytes, len); |
3065 | u64_stats_update_end(syncp: &np->swstats_rx_syncp); |
3066 | } else { |
3067 | dev_kfree_skb(skb); |
3068 | } |
3069 | next_pkt: |
3070 | if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) |
3071 | np->get_rx.ex = np->rx_ring.ex; |
3072 | if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) |
3073 | np->get_rx_ctx = np->rx_skb; |
3074 | |
3075 | rx_work++; |
3076 | } |
3077 | |
3078 | return rx_work; |
3079 | } |
3080 | |
3081 | static void set_bufsize(struct net_device *dev) |
3082 | { |
3083 | struct fe_priv *np = netdev_priv(dev); |
3084 | |
3085 | if (dev->mtu <= ETH_DATA_LEN) |
3086 | np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; |
3087 | else |
3088 | np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; |
3089 | } |
3090 | |
3091 | /* |
3092 | * nv_change_mtu: dev->change_mtu function |
3093 | * Called with dev_base_lock held for read. |
3094 | */ |
3095 | static int nv_change_mtu(struct net_device *dev, int new_mtu) |
3096 | { |
3097 | struct fe_priv *np = netdev_priv(dev); |
3098 | int old_mtu; |
3099 | |
3100 | old_mtu = dev->mtu; |
3101 | dev->mtu = new_mtu; |
3102 | |
3103 | /* return early if the buffer sizes will not change */ |
3104 | if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) |
3105 | return 0; |
3106 | |
3107 | /* synchronized against open : rtnl_lock() held by caller */ |
3108 | if (netif_running(dev)) { |
3109 | u8 __iomem *base = get_hwbase(dev); |
3110 | /* |
3111 | * It seems that the nic preloads valid ring entries into an |
3112 | * internal buffer. The procedure for flushing everything is |
3113 | * guessed, there is probably a simpler approach. |
3114 | * Changing the MTU is a rare event, it shouldn't matter. |
3115 | */ |
3116 | nv_disable_irq(dev); |
3117 | nv_napi_disable(dev); |
3118 | netif_tx_lock_bh(dev); |
3119 | netif_addr_lock(dev); |
3120 | spin_lock(lock: &np->lock); |
3121 | /* stop engines */ |
3122 | nv_stop_rxtx(dev); |
3123 | nv_txrx_reset(dev); |
3124 | /* drain rx queue */ |
3125 | nv_drain_rxtx(dev); |
3126 | /* reinit driver view of the rx queue */ |
3127 | set_bufsize(dev); |
3128 | if (nv_init_ring(dev)) { |
3129 | if (!np->in_shutdown) |
3130 | mod_timer(timer: &np->oom_kick, expires: jiffies + OOM_REFILL); |
3131 | } |
3132 | /* reinit nic view of the rx queue */ |
3133 | writel(val: np->rx_buf_sz, addr: base + NvRegOffloadConfig); |
3134 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
3135 | writel(val: ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
3136 | addr: base + NvRegRingSizes); |
3137 | pci_push(base); |
3138 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, addr: get_hwbase(dev) + NvRegTxRxControl); |
3139 | pci_push(base); |
3140 | |
3141 | /* restart rx engine */ |
3142 | nv_start_rxtx(dev); |
3143 | spin_unlock(lock: &np->lock); |
3144 | netif_addr_unlock(dev); |
3145 | netif_tx_unlock_bh(dev); |
3146 | nv_napi_enable(dev); |
3147 | nv_enable_irq(dev); |
3148 | } |
3149 | return 0; |
3150 | } |
3151 | |
3152 | static void nv_copy_mac_to_hw(struct net_device *dev) |
3153 | { |
3154 | u8 __iomem *base = get_hwbase(dev); |
3155 | u32 mac[2]; |
3156 | |
3157 | mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + |
3158 | (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); |
3159 | mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); |
3160 | |
3161 | writel(val: mac[0], addr: base + NvRegMacAddrA); |
3162 | writel(val: mac[1], addr: base + NvRegMacAddrB); |
3163 | } |
3164 | |
3165 | /* |
3166 | * nv_set_mac_address: dev->set_mac_address function |
3167 | * Called with rtnl_lock() held. |
3168 | */ |
3169 | static int nv_set_mac_address(struct net_device *dev, void *addr) |
3170 | { |
3171 | struct fe_priv *np = netdev_priv(dev); |
3172 | struct sockaddr *macaddr = (struct sockaddr *)addr; |
3173 | |
3174 | if (!is_valid_ether_addr(addr: macaddr->sa_data)) |
3175 | return -EADDRNOTAVAIL; |
3176 | |
3177 | /* synchronized against open : rtnl_lock() held by caller */ |
3178 | eth_hw_addr_set(dev, addr: macaddr->sa_data); |
3179 | |
3180 | if (netif_running(dev)) { |
3181 | netif_tx_lock_bh(dev); |
3182 | netif_addr_lock(dev); |
3183 | spin_lock_irq(lock: &np->lock); |
3184 | |
3185 | /* stop rx engine */ |
3186 | nv_stop_rx(dev); |
3187 | |
3188 | /* set mac address */ |
3189 | nv_copy_mac_to_hw(dev); |
3190 | |
3191 | /* restart rx engine */ |
3192 | nv_start_rx(dev); |
3193 | spin_unlock_irq(lock: &np->lock); |
3194 | netif_addr_unlock(dev); |
3195 | netif_tx_unlock_bh(dev); |
3196 | } else { |
3197 | nv_copy_mac_to_hw(dev); |
3198 | } |
3199 | return 0; |
3200 | } |
3201 | |
3202 | /* |
3203 | * nv_set_multicast: dev->set_multicast function |
3204 | * Called with netif_tx_lock held. |
3205 | */ |
3206 | static void nv_set_multicast(struct net_device *dev) |
3207 | { |
3208 | struct fe_priv *np = netdev_priv(dev); |
3209 | u8 __iomem *base = get_hwbase(dev); |
3210 | u32 addr[2]; |
3211 | u32 mask[2]; |
3212 | u32 pff = readl(addr: base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; |
3213 | |
3214 | memset(addr, 0, sizeof(addr)); |
3215 | memset(mask, 0, sizeof(mask)); |
3216 | |
3217 | if (dev->flags & IFF_PROMISC) { |
3218 | pff |= NVREG_PFF_PROMISC; |
3219 | } else { |
3220 | pff |= NVREG_PFF_MYADDR; |
3221 | |
3222 | if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { |
3223 | u32 alwaysOff[2]; |
3224 | u32 alwaysOn[2]; |
3225 | |
3226 | alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; |
3227 | if (dev->flags & IFF_ALLMULTI) { |
3228 | alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; |
3229 | } else { |
3230 | struct netdev_hw_addr *ha; |
3231 | |
3232 | netdev_for_each_mc_addr(ha, dev) { |
3233 | unsigned char *hw_addr = ha->addr; |
3234 | u32 a, b; |
3235 | |
3236 | a = le32_to_cpu(*(__le32 *) hw_addr); |
3237 | b = le16_to_cpu(*(__le16 *) (&hw_addr[4])); |
3238 | alwaysOn[0] &= a; |
3239 | alwaysOff[0] &= ~a; |
3240 | alwaysOn[1] &= b; |
3241 | alwaysOff[1] &= ~b; |
3242 | } |
3243 | } |
3244 | addr[0] = alwaysOn[0]; |
3245 | addr[1] = alwaysOn[1]; |
3246 | mask[0] = alwaysOn[0] | alwaysOff[0]; |
3247 | mask[1] = alwaysOn[1] | alwaysOff[1]; |
3248 | } else { |
3249 | mask[0] = NVREG_MCASTMASKA_NONE; |
3250 | mask[1] = NVREG_MCASTMASKB_NONE; |
3251 | } |
3252 | } |
3253 | addr[0] |= NVREG_MCASTADDRA_FORCE; |
3254 | pff |= NVREG_PFF_ALWAYS; |
3255 | spin_lock_irq(lock: &np->lock); |
3256 | nv_stop_rx(dev); |
3257 | writel(val: addr[0], addr: base + NvRegMulticastAddrA); |
3258 | writel(val: addr[1], addr: base + NvRegMulticastAddrB); |
3259 | writel(val: mask[0], addr: base + NvRegMulticastMaskA); |
3260 | writel(val: mask[1], addr: base + NvRegMulticastMaskB); |
3261 | writel(val: pff, addr: base + NvRegPacketFilterFlags); |
3262 | nv_start_rx(dev); |
3263 | spin_unlock_irq(lock: &np->lock); |
3264 | } |
3265 | |
3266 | static void nv_update_pause(struct net_device *dev, u32 pause_flags) |
3267 | { |
3268 | struct fe_priv *np = netdev_priv(dev); |
3269 | u8 __iomem *base = get_hwbase(dev); |
3270 | |
3271 | np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); |
3272 | |
3273 | if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { |
3274 | u32 pff = readl(addr: base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; |
3275 | if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { |
3276 | writel(val: pff|NVREG_PFF_PAUSE_RX, addr: base + NvRegPacketFilterFlags); |
3277 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; |
3278 | } else { |
3279 | writel(val: pff, addr: base + NvRegPacketFilterFlags); |
3280 | } |
3281 | } |
3282 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { |
3283 | u32 regmisc = readl(addr: base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; |
3284 | if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { |
3285 | u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; |
3286 | if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) |
3287 | pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; |
3288 | if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { |
3289 | pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; |
3290 | /* limit the number of tx pause frames to a default of 8 */ |
3291 | writel(readl(addr: base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, addr: base + NvRegTxPauseFrameLimit); |
3292 | } |
3293 | writel(val: pause_enable, addr: base + NvRegTxPauseFrame); |
3294 | writel(val: regmisc|NVREG_MISC1_PAUSE_TX, addr: base + NvRegMisc1); |
3295 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; |
3296 | } else { |
3297 | writel(NVREG_TX_PAUSEFRAME_DISABLE, addr: base + NvRegTxPauseFrame); |
3298 | writel(val: regmisc, addr: base + NvRegMisc1); |
3299 | } |
3300 | } |
3301 | } |
3302 | |
3303 | static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex) |
3304 | { |
3305 | struct fe_priv *np = netdev_priv(dev); |
3306 | u8 __iomem *base = get_hwbase(dev); |
3307 | u32 phyreg, txreg; |
3308 | int mii_status; |
3309 | |
3310 | np->linkspeed = NVREG_LINKSPEED_FORCE|speed; |
3311 | np->duplex = duplex; |
3312 | |
3313 | /* see if gigabit phy */ |
3314 | mii_status = mii_rw(dev, addr: np->phyaddr, MII_BMSR, MII_READ); |
3315 | if (mii_status & PHY_GIGABIT) { |
3316 | np->gigabit = PHY_GIGABIT; |
3317 | phyreg = readl(addr: base + NvRegSlotTime); |
3318 | phyreg &= ~(0x3FF00); |
3319 | if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) |
3320 | phyreg |= NVREG_SLOTTIME_10_100_FULL; |
3321 | else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) |
3322 | phyreg |= NVREG_SLOTTIME_10_100_FULL; |
3323 | else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) |
3324 | phyreg |= NVREG_SLOTTIME_1000_FULL; |
3325 | writel(val: phyreg, addr: base + NvRegSlotTime); |
3326 | } |
3327 | |
3328 | phyreg = readl(addr: base + NvRegPhyInterface); |
3329 | phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); |
3330 | if (np->duplex == 0) |
3331 | phyreg |= PHY_HALF; |
3332 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) |
3333 | phyreg |= PHY_100; |
3334 | else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == |
3335 | NVREG_LINKSPEED_1000) |
3336 | phyreg |= PHY_1000; |
3337 | writel(val: phyreg, addr: base + NvRegPhyInterface); |
3338 | |
3339 | if (phyreg & PHY_RGMII) { |
3340 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == |
3341 | NVREG_LINKSPEED_1000) |
3342 | txreg = NVREG_TX_DEFERRAL_RGMII_1000; |
3343 | else |
3344 | txreg = NVREG_TX_DEFERRAL_RGMII_10_100; |
3345 | } else { |
3346 | txreg = NVREG_TX_DEFERRAL_DEFAULT; |
3347 | } |
3348 | writel(val: txreg, addr: base + NvRegTxDeferral); |
3349 | |
3350 | if (np->desc_ver == DESC_VER_1) { |
3351 | txreg = NVREG_TX_WM_DESC1_DEFAULT; |
3352 | } else { |
3353 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == |
3354 | NVREG_LINKSPEED_1000) |
3355 | txreg = NVREG_TX_WM_DESC2_3_1000; |
3356 | else |
3357 | txreg = NVREG_TX_WM_DESC2_3_DEFAULT; |
3358 | } |
3359 | writel(val: txreg, addr: base + NvRegTxWatermark); |
3360 | |
3361 | writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), |
3362 | addr: base + NvRegMisc1); |
3363 | pci_push(base); |
3364 | writel(val: np->linkspeed, addr: base + NvRegLinkSpeed); |
3365 | pci_push(base); |
3366 | } |
3367 | |
3368 | /** |
3369 | * nv_update_linkspeed - Setup the MAC according to the link partner |
3370 | * @dev: Network device to be configured |
3371 | * |
3372 | * The function queries the PHY and checks if there is a link partner. |
3373 | * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is |
3374 | * set to 10 MBit HD. |
3375 | * |
3376 | * The function returns 0 if there is no link partner and 1 if there is |
3377 | * a good link partner. |
3378 | */ |
3379 | static int nv_update_linkspeed(struct net_device *dev) |
3380 | { |
3381 | struct fe_priv *np = netdev_priv(dev); |
3382 | u8 __iomem *base = get_hwbase(dev); |
3383 | int adv = 0; |
3384 | int lpa = 0; |
3385 | int adv_lpa, adv_pause, lpa_pause; |
3386 | int newls = np->linkspeed; |
3387 | int newdup = np->duplex; |
3388 | int mii_status; |
3389 | u32 bmcr; |
3390 | int retval = 0; |
3391 | u32 control_1000, status_1000, phyreg, pause_flags, txreg; |
3392 | u32 txrxFlags = 0; |
3393 | u32 phy_exp; |
3394 | |
3395 | /* If device loopback is enabled, set carrier on and enable max link |
3396 | * speed. |
3397 | */ |
3398 | bmcr = mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ); |
3399 | if (bmcr & BMCR_LOOPBACK) { |
3400 | if (netif_running(dev)) { |
3401 | nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, duplex: 1); |
3402 | if (!netif_carrier_ok(dev)) |
3403 | netif_carrier_on(dev); |
3404 | } |
3405 | return 1; |
3406 | } |
3407 | |
3408 | /* BMSR_LSTATUS is latched, read it twice: |
3409 | * we want the current value. |
3410 | */ |
3411 | mii_rw(dev, addr: np->phyaddr, MII_BMSR, MII_READ); |
3412 | mii_status = mii_rw(dev, addr: np->phyaddr, MII_BMSR, MII_READ); |
3413 | |
3414 | if (!(mii_status & BMSR_LSTATUS)) { |
3415 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
3416 | newdup = 0; |
3417 | retval = 0; |
3418 | goto set_speed; |
3419 | } |
3420 | |
3421 | if (np->autoneg == 0) { |
3422 | if (np->fixed_mode & LPA_100FULL) { |
3423 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
3424 | newdup = 1; |
3425 | } else if (np->fixed_mode & LPA_100HALF) { |
3426 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
3427 | newdup = 0; |
3428 | } else if (np->fixed_mode & LPA_10FULL) { |
3429 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
3430 | newdup = 1; |
3431 | } else { |
3432 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
3433 | newdup = 0; |
3434 | } |
3435 | retval = 1; |
3436 | goto set_speed; |
3437 | } |
3438 | /* check auto negotiation is complete */ |
3439 | if (!(mii_status & BMSR_ANEGCOMPLETE)) { |
3440 | /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ |
3441 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
3442 | newdup = 0; |
3443 | retval = 0; |
3444 | goto set_speed; |
3445 | } |
3446 | |
3447 | adv = mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, MII_READ); |
3448 | lpa = mii_rw(dev, addr: np->phyaddr, MII_LPA, MII_READ); |
3449 | |
3450 | retval = 1; |
3451 | if (np->gigabit == PHY_GIGABIT) { |
3452 | control_1000 = mii_rw(dev, addr: np->phyaddr, MII_CTRL1000, MII_READ); |
3453 | status_1000 = mii_rw(dev, addr: np->phyaddr, MII_STAT1000, MII_READ); |
3454 | |
3455 | if ((control_1000 & ADVERTISE_1000FULL) && |
3456 | (status_1000 & LPA_1000FULL)) { |
3457 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; |
3458 | newdup = 1; |
3459 | goto set_speed; |
3460 | } |
3461 | } |
3462 | |
3463 | /* FIXME: handle parallel detection properly */ |
3464 | adv_lpa = lpa & adv; |
3465 | if (adv_lpa & LPA_100FULL) { |
3466 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
3467 | newdup = 1; |
3468 | } else if (adv_lpa & LPA_100HALF) { |
3469 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
3470 | newdup = 0; |
3471 | } else if (adv_lpa & LPA_10FULL) { |
3472 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
3473 | newdup = 1; |
3474 | } else if (adv_lpa & LPA_10HALF) { |
3475 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
3476 | newdup = 0; |
3477 | } else { |
3478 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
3479 | newdup = 0; |
3480 | } |
3481 | |
3482 | set_speed: |
3483 | if (np->duplex == newdup && np->linkspeed == newls) |
3484 | return retval; |
3485 | |
3486 | np->duplex = newdup; |
3487 | np->linkspeed = newls; |
3488 | |
3489 | /* The transmitter and receiver must be restarted for safe update */ |
3490 | if (readl(addr: base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { |
3491 | txrxFlags |= NV_RESTART_TX; |
3492 | nv_stop_tx(dev); |
3493 | } |
3494 | if (readl(addr: base + NvRegReceiverControl) & NVREG_RCVCTL_START) { |
3495 | txrxFlags |= NV_RESTART_RX; |
3496 | nv_stop_rx(dev); |
3497 | } |
3498 | |
3499 | if (np->gigabit == PHY_GIGABIT) { |
3500 | phyreg = readl(addr: base + NvRegSlotTime); |
3501 | phyreg &= ~(0x3FF00); |
3502 | if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || |
3503 | ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) |
3504 | phyreg |= NVREG_SLOTTIME_10_100_FULL; |
3505 | else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) |
3506 | phyreg |= NVREG_SLOTTIME_1000_FULL; |
3507 | writel(val: phyreg, addr: base + NvRegSlotTime); |
3508 | } |
3509 | |
3510 | phyreg = readl(addr: base + NvRegPhyInterface); |
3511 | phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); |
3512 | if (np->duplex == 0) |
3513 | phyreg |= PHY_HALF; |
3514 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) |
3515 | phyreg |= PHY_100; |
3516 | else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) |
3517 | phyreg |= PHY_1000; |
3518 | writel(val: phyreg, addr: base + NvRegPhyInterface); |
3519 | |
3520 | phy_exp = mii_rw(dev, addr: np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ |
3521 | if (phyreg & PHY_RGMII) { |
3522 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { |
3523 | txreg = NVREG_TX_DEFERRAL_RGMII_1000; |
3524 | } else { |
3525 | if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { |
3526 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) |
3527 | txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; |
3528 | else |
3529 | txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; |
3530 | } else { |
3531 | txreg = NVREG_TX_DEFERRAL_RGMII_10_100; |
3532 | } |
3533 | } |
3534 | } else { |
3535 | if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) |
3536 | txreg = NVREG_TX_DEFERRAL_MII_STRETCH; |
3537 | else |
3538 | txreg = NVREG_TX_DEFERRAL_DEFAULT; |
3539 | } |
3540 | writel(val: txreg, addr: base + NvRegTxDeferral); |
3541 | |
3542 | if (np->desc_ver == DESC_VER_1) { |
3543 | txreg = NVREG_TX_WM_DESC1_DEFAULT; |
3544 | } else { |
3545 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) |
3546 | txreg = NVREG_TX_WM_DESC2_3_1000; |
3547 | else |
3548 | txreg = NVREG_TX_WM_DESC2_3_DEFAULT; |
3549 | } |
3550 | writel(val: txreg, addr: base + NvRegTxWatermark); |
3551 | |
3552 | writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), |
3553 | addr: base + NvRegMisc1); |
3554 | pci_push(base); |
3555 | writel(val: np->linkspeed, addr: base + NvRegLinkSpeed); |
3556 | pci_push(base); |
3557 | |
3558 | pause_flags = 0; |
3559 | /* setup pause frame */ |
3560 | if (netif_running(dev) && (np->duplex != 0)) { |
3561 | if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { |
3562 | adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
3563 | lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); |
3564 | |
3565 | switch (adv_pause) { |
3566 | case ADVERTISE_PAUSE_CAP: |
3567 | if (lpa_pause & LPA_PAUSE_CAP) { |
3568 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; |
3569 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) |
3570 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; |
3571 | } |
3572 | break; |
3573 | case ADVERTISE_PAUSE_ASYM: |
3574 | if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM)) |
3575 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; |
3576 | break; |
3577 | case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM: |
3578 | if (lpa_pause & LPA_PAUSE_CAP) { |
3579 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; |
3580 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) |
3581 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; |
3582 | } |
3583 | if (lpa_pause == LPA_PAUSE_ASYM) |
3584 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; |
3585 | break; |
3586 | } |
3587 | } else { |
3588 | pause_flags = np->pause_flags; |
3589 | } |
3590 | } |
3591 | nv_update_pause(dev, pause_flags); |
3592 | |
3593 | if (txrxFlags & NV_RESTART_TX) |
3594 | nv_start_tx(dev); |
3595 | if (txrxFlags & NV_RESTART_RX) |
3596 | nv_start_rx(dev); |
3597 | |
3598 | return retval; |
3599 | } |
3600 | |
3601 | static void nv_linkchange(struct net_device *dev) |
3602 | { |
3603 | if (nv_update_linkspeed(dev)) { |
3604 | if (!netif_carrier_ok(dev)) { |
3605 | netif_carrier_on(dev); |
3606 | netdev_info(dev, format: "link up\n" ); |
3607 | nv_txrx_gate(dev, gate: false); |
3608 | nv_start_rx(dev); |
3609 | } |
3610 | } else { |
3611 | if (netif_carrier_ok(dev)) { |
3612 | netif_carrier_off(dev); |
3613 | netdev_info(dev, format: "link down\n" ); |
3614 | nv_txrx_gate(dev, gate: true); |
3615 | nv_stop_rx(dev); |
3616 | } |
3617 | } |
3618 | } |
3619 | |
3620 | static void nv_link_irq(struct net_device *dev) |
3621 | { |
3622 | u8 __iomem *base = get_hwbase(dev); |
3623 | u32 miistat; |
3624 | |
3625 | miistat = readl(addr: base + NvRegMIIStatus); |
3626 | writel(NVREG_MIISTAT_LINKCHANGE, addr: base + NvRegMIIStatus); |
3627 | |
3628 | if (miistat & (NVREG_MIISTAT_LINKCHANGE)) |
3629 | nv_linkchange(dev); |
3630 | } |
3631 | |
3632 | static void nv_msi_workaround(struct fe_priv *np) |
3633 | { |
3634 | |
3635 | /* Need to toggle the msi irq mask within the ethernet device, |
3636 | * otherwise, future interrupts will not be detected. |
3637 | */ |
3638 | if (np->msi_flags & NV_MSI_ENABLED) { |
3639 | u8 __iomem *base = np->base; |
3640 | |
3641 | writel(val: 0, addr: base + NvRegMSIIrqMask); |
3642 | writel(NVREG_MSI_VECTOR_0_ENABLED, addr: base + NvRegMSIIrqMask); |
3643 | } |
3644 | } |
3645 | |
3646 | static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work) |
3647 | { |
3648 | struct fe_priv *np = netdev_priv(dev); |
3649 | |
3650 | if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) { |
3651 | if (total_work > NV_DYNAMIC_THRESHOLD) { |
3652 | /* transition to poll based interrupts */ |
3653 | np->quiet_count = 0; |
3654 | if (np->irqmask != NVREG_IRQMASK_CPU) { |
3655 | np->irqmask = NVREG_IRQMASK_CPU; |
3656 | return 1; |
3657 | } |
3658 | } else { |
3659 | if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { |
3660 | np->quiet_count++; |
3661 | } else { |
3662 | /* reached a period of low activity, switch |
3663 | to per tx/rx packet interrupts */ |
3664 | if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { |
3665 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
3666 | return 1; |
3667 | } |
3668 | } |
3669 | } |
3670 | } |
3671 | return 0; |
3672 | } |
3673 | |
3674 | static irqreturn_t nv_nic_irq(int foo, void *data) |
3675 | { |
3676 | struct net_device *dev = (struct net_device *) data; |
3677 | struct fe_priv *np = netdev_priv(dev); |
3678 | u8 __iomem *base = get_hwbase(dev); |
3679 | |
3680 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
3681 | np->events = readl(addr: base + NvRegIrqStatus); |
3682 | writel(val: np->events, addr: base + NvRegIrqStatus); |
3683 | } else { |
3684 | np->events = readl(addr: base + NvRegMSIXIrqStatus); |
3685 | writel(val: np->events, addr: base + NvRegMSIXIrqStatus); |
3686 | } |
3687 | if (!(np->events & np->irqmask)) |
3688 | return IRQ_NONE; |
3689 | |
3690 | nv_msi_workaround(np); |
3691 | |
3692 | if (napi_schedule_prep(n: &np->napi)) { |
3693 | /* |
3694 | * Disable further irq's (msix not enabled with napi) |
3695 | */ |
3696 | writel(val: 0, addr: base + NvRegIrqMask); |
3697 | __napi_schedule(n: &np->napi); |
3698 | } |
3699 | |
3700 | return IRQ_HANDLED; |
3701 | } |
3702 | |
3703 | /* All _optimized functions are used to help increase performance |
3704 | * (reduce CPU and increase throughput). They use descripter version 3, |
3705 | * compiler directives, and reduce memory accesses. |
3706 | */ |
3707 | static irqreturn_t nv_nic_irq_optimized(int foo, void *data) |
3708 | { |
3709 | struct net_device *dev = (struct net_device *) data; |
3710 | struct fe_priv *np = netdev_priv(dev); |
3711 | u8 __iomem *base = get_hwbase(dev); |
3712 | |
3713 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
3714 | np->events = readl(addr: base + NvRegIrqStatus); |
3715 | writel(val: np->events, addr: base + NvRegIrqStatus); |
3716 | } else { |
3717 | np->events = readl(addr: base + NvRegMSIXIrqStatus); |
3718 | writel(val: np->events, addr: base + NvRegMSIXIrqStatus); |
3719 | } |
3720 | if (!(np->events & np->irqmask)) |
3721 | return IRQ_NONE; |
3722 | |
3723 | nv_msi_workaround(np); |
3724 | |
3725 | if (napi_schedule_prep(n: &np->napi)) { |
3726 | /* |
3727 | * Disable further irq's (msix not enabled with napi) |
3728 | */ |
3729 | writel(val: 0, addr: base + NvRegIrqMask); |
3730 | __napi_schedule(n: &np->napi); |
3731 | } |
3732 | |
3733 | return IRQ_HANDLED; |
3734 | } |
3735 | |
3736 | static irqreturn_t nv_nic_irq_tx(int foo, void *data) |
3737 | { |
3738 | struct net_device *dev = (struct net_device *) data; |
3739 | struct fe_priv *np = netdev_priv(dev); |
3740 | u8 __iomem *base = get_hwbase(dev); |
3741 | u32 events; |
3742 | int i; |
3743 | unsigned long flags; |
3744 | |
3745 | for (i = 0;; i++) { |
3746 | events = readl(addr: base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; |
3747 | writel(val: events, addr: base + NvRegMSIXIrqStatus); |
3748 | netdev_dbg(dev, "tx irq events: %08x\n" , events); |
3749 | if (!(events & np->irqmask)) |
3750 | break; |
3751 | |
3752 | spin_lock_irqsave(&np->lock, flags); |
3753 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); |
3754 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3755 | |
3756 | if (unlikely(i > max_interrupt_work)) { |
3757 | spin_lock_irqsave(&np->lock, flags); |
3758 | /* disable interrupts on the nic */ |
3759 | writel(NVREG_IRQ_TX_ALL, addr: base + NvRegIrqMask); |
3760 | pci_push(base); |
3761 | |
3762 | if (!np->in_shutdown) { |
3763 | np->nic_poll_irq |= NVREG_IRQ_TX_ALL; |
3764 | mod_timer(timer: &np->nic_poll, expires: jiffies + POLL_WAIT); |
3765 | } |
3766 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3767 | netdev_dbg(dev, "%s: too many iterations (%d)\n" , |
3768 | __func__, i); |
3769 | break; |
3770 | } |
3771 | |
3772 | } |
3773 | |
3774 | return IRQ_RETVAL(i); |
3775 | } |
3776 | |
3777 | static int nv_napi_poll(struct napi_struct *napi, int budget) |
3778 | { |
3779 | struct fe_priv *np = container_of(napi, struct fe_priv, napi); |
3780 | struct net_device *dev = np->dev; |
3781 | u8 __iomem *base = get_hwbase(dev); |
3782 | unsigned long flags; |
3783 | int retcode; |
3784 | int rx_count, tx_work = 0, rx_work = 0; |
3785 | |
3786 | do { |
3787 | if (!nv_optimized(np)) { |
3788 | spin_lock_irqsave(&np->lock, flags); |
3789 | tx_work += nv_tx_done(dev, limit: np->tx_ring_size); |
3790 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3791 | |
3792 | rx_count = nv_rx_process(dev, limit: budget - rx_work); |
3793 | retcode = nv_alloc_rx(dev); |
3794 | } else { |
3795 | spin_lock_irqsave(&np->lock, flags); |
3796 | tx_work += nv_tx_done_optimized(dev, limit: np->tx_ring_size); |
3797 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3798 | |
3799 | rx_count = nv_rx_process_optimized(dev, |
3800 | limit: budget - rx_work); |
3801 | retcode = nv_alloc_rx_optimized(dev); |
3802 | } |
3803 | } while (retcode == 0 && |
3804 | rx_count > 0 && (rx_work += rx_count) < budget); |
3805 | |
3806 | if (retcode) { |
3807 | spin_lock_irqsave(&np->lock, flags); |
3808 | if (!np->in_shutdown) |
3809 | mod_timer(timer: &np->oom_kick, expires: jiffies + OOM_REFILL); |
3810 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3811 | } |
3812 | |
3813 | nv_change_interrupt_mode(dev, total_work: tx_work + rx_work); |
3814 | |
3815 | if (unlikely(np->events & NVREG_IRQ_LINK)) { |
3816 | spin_lock_irqsave(&np->lock, flags); |
3817 | nv_link_irq(dev); |
3818 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3819 | } |
3820 | if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { |
3821 | spin_lock_irqsave(&np->lock, flags); |
3822 | nv_linkchange(dev); |
3823 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3824 | np->link_timeout = jiffies + LINK_TIMEOUT; |
3825 | } |
3826 | if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { |
3827 | spin_lock_irqsave(&np->lock, flags); |
3828 | if (!np->in_shutdown) { |
3829 | np->nic_poll_irq = np->irqmask; |
3830 | np->recover_error = 1; |
3831 | mod_timer(timer: &np->nic_poll, expires: jiffies + POLL_WAIT); |
3832 | } |
3833 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3834 | napi_complete(n: napi); |
3835 | return rx_work; |
3836 | } |
3837 | |
3838 | if (rx_work < budget) { |
3839 | /* re-enable interrupts |
3840 | (msix not enabled in napi) */ |
3841 | napi_complete_done(n: napi, work_done: rx_work); |
3842 | |
3843 | writel(val: np->irqmask, addr: base + NvRegIrqMask); |
3844 | } |
3845 | return rx_work; |
3846 | } |
3847 | |
3848 | static irqreturn_t nv_nic_irq_rx(int foo, void *data) |
3849 | { |
3850 | struct net_device *dev = (struct net_device *) data; |
3851 | struct fe_priv *np = netdev_priv(dev); |
3852 | u8 __iomem *base = get_hwbase(dev); |
3853 | u32 events; |
3854 | int i; |
3855 | unsigned long flags; |
3856 | |
3857 | for (i = 0;; i++) { |
3858 | events = readl(addr: base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; |
3859 | writel(val: events, addr: base + NvRegMSIXIrqStatus); |
3860 | netdev_dbg(dev, "rx irq events: %08x\n" , events); |
3861 | if (!(events & np->irqmask)) |
3862 | break; |
3863 | |
3864 | if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { |
3865 | if (unlikely(nv_alloc_rx_optimized(dev))) { |
3866 | spin_lock_irqsave(&np->lock, flags); |
3867 | if (!np->in_shutdown) |
3868 | mod_timer(timer: &np->oom_kick, expires: jiffies + OOM_REFILL); |
3869 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3870 | } |
3871 | } |
3872 | |
3873 | if (unlikely(i > max_interrupt_work)) { |
3874 | spin_lock_irqsave(&np->lock, flags); |
3875 | /* disable interrupts on the nic */ |
3876 | writel(NVREG_IRQ_RX_ALL, addr: base + NvRegIrqMask); |
3877 | pci_push(base); |
3878 | |
3879 | if (!np->in_shutdown) { |
3880 | np->nic_poll_irq |= NVREG_IRQ_RX_ALL; |
3881 | mod_timer(timer: &np->nic_poll, expires: jiffies + POLL_WAIT); |
3882 | } |
3883 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3884 | netdev_dbg(dev, "%s: too many iterations (%d)\n" , |
3885 | __func__, i); |
3886 | break; |
3887 | } |
3888 | } |
3889 | |
3890 | return IRQ_RETVAL(i); |
3891 | } |
3892 | |
3893 | static irqreturn_t nv_nic_irq_other(int foo, void *data) |
3894 | { |
3895 | struct net_device *dev = (struct net_device *) data; |
3896 | struct fe_priv *np = netdev_priv(dev); |
3897 | u8 __iomem *base = get_hwbase(dev); |
3898 | u32 events; |
3899 | int i; |
3900 | unsigned long flags; |
3901 | |
3902 | for (i = 0;; i++) { |
3903 | events = readl(addr: base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; |
3904 | writel(val: events, addr: base + NvRegMSIXIrqStatus); |
3905 | netdev_dbg(dev, "irq events: %08x\n" , events); |
3906 | if (!(events & np->irqmask)) |
3907 | break; |
3908 | |
3909 | /* check tx in case we reached max loop limit in tx isr */ |
3910 | spin_lock_irqsave(&np->lock, flags); |
3911 | nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); |
3912 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3913 | |
3914 | if (events & NVREG_IRQ_LINK) { |
3915 | spin_lock_irqsave(&np->lock, flags); |
3916 | nv_link_irq(dev); |
3917 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3918 | } |
3919 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { |
3920 | spin_lock_irqsave(&np->lock, flags); |
3921 | nv_linkchange(dev); |
3922 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3923 | np->link_timeout = jiffies + LINK_TIMEOUT; |
3924 | } |
3925 | if (events & NVREG_IRQ_RECOVER_ERROR) { |
3926 | spin_lock_irqsave(&np->lock, flags); |
3927 | /* disable interrupts on the nic */ |
3928 | writel(NVREG_IRQ_OTHER, addr: base + NvRegIrqMask); |
3929 | pci_push(base); |
3930 | |
3931 | if (!np->in_shutdown) { |
3932 | np->nic_poll_irq |= NVREG_IRQ_OTHER; |
3933 | np->recover_error = 1; |
3934 | mod_timer(timer: &np->nic_poll, expires: jiffies + POLL_WAIT); |
3935 | } |
3936 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3937 | break; |
3938 | } |
3939 | if (unlikely(i > max_interrupt_work)) { |
3940 | spin_lock_irqsave(&np->lock, flags); |
3941 | /* disable interrupts on the nic */ |
3942 | writel(NVREG_IRQ_OTHER, addr: base + NvRegIrqMask); |
3943 | pci_push(base); |
3944 | |
3945 | if (!np->in_shutdown) { |
3946 | np->nic_poll_irq |= NVREG_IRQ_OTHER; |
3947 | mod_timer(timer: &np->nic_poll, expires: jiffies + POLL_WAIT); |
3948 | } |
3949 | spin_unlock_irqrestore(lock: &np->lock, flags); |
3950 | netdev_dbg(dev, "%s: too many iterations (%d)\n" , |
3951 | __func__, i); |
3952 | break; |
3953 | } |
3954 | |
3955 | } |
3956 | |
3957 | return IRQ_RETVAL(i); |
3958 | } |
3959 | |
3960 | static irqreturn_t nv_nic_irq_test(int foo, void *data) |
3961 | { |
3962 | struct net_device *dev = (struct net_device *) data; |
3963 | struct fe_priv *np = netdev_priv(dev); |
3964 | u8 __iomem *base = get_hwbase(dev); |
3965 | u32 events; |
3966 | |
3967 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
3968 | events = readl(addr: base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; |
3969 | writel(val: events & NVREG_IRQ_TIMER, addr: base + NvRegIrqStatus); |
3970 | } else { |
3971 | events = readl(addr: base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; |
3972 | writel(val: events & NVREG_IRQ_TIMER, addr: base + NvRegMSIXIrqStatus); |
3973 | } |
3974 | pci_push(base); |
3975 | if (!(events & NVREG_IRQ_TIMER)) |
3976 | return IRQ_RETVAL(0); |
3977 | |
3978 | nv_msi_workaround(np); |
3979 | |
3980 | spin_lock(lock: &np->lock); |
3981 | np->intr_test = 1; |
3982 | spin_unlock(lock: &np->lock); |
3983 | |
3984 | return IRQ_RETVAL(1); |
3985 | } |
3986 | |
3987 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) |
3988 | { |
3989 | u8 __iomem *base = get_hwbase(dev); |
3990 | int i; |
3991 | u32 msixmap = 0; |
3992 | |
3993 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). |
3994 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents |
3995 | * the remaining 8 interrupts. |
3996 | */ |
3997 | for (i = 0; i < 8; i++) { |
3998 | if ((irqmask >> i) & 0x1) |
3999 | msixmap |= vector << (i << 2); |
4000 | } |
4001 | writel(readl(addr: base + NvRegMSIXMap0) | msixmap, addr: base + NvRegMSIXMap0); |
4002 | |
4003 | msixmap = 0; |
4004 | for (i = 0; i < 8; i++) { |
4005 | if ((irqmask >> (i + 8)) & 0x1) |
4006 | msixmap |= vector << (i << 2); |
4007 | } |
4008 | writel(readl(addr: base + NvRegMSIXMap1) | msixmap, addr: base + NvRegMSIXMap1); |
4009 | } |
4010 | |
4011 | static int nv_request_irq(struct net_device *dev, int intr_test) |
4012 | { |
4013 | struct fe_priv *np = get_nvpriv(dev); |
4014 | u8 __iomem *base = get_hwbase(dev); |
4015 | int ret; |
4016 | int i; |
4017 | irqreturn_t (*handler)(int foo, void *data); |
4018 | |
4019 | if (intr_test) { |
4020 | handler = nv_nic_irq_test; |
4021 | } else { |
4022 | if (nv_optimized(np)) |
4023 | handler = nv_nic_irq_optimized; |
4024 | else |
4025 | handler = nv_nic_irq; |
4026 | } |
4027 | |
4028 | if (np->msi_flags & NV_MSI_X_CAPABLE) { |
4029 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) |
4030 | np->msi_x_entry[i].entry = i; |
4031 | ret = pci_enable_msix_range(dev: np->pci_dev, |
4032 | entries: np->msi_x_entry, |
4033 | minvec: np->msi_flags & NV_MSI_X_VECTORS_MASK, |
4034 | maxvec: np->msi_flags & NV_MSI_X_VECTORS_MASK); |
4035 | if (ret > 0) { |
4036 | np->msi_flags |= NV_MSI_X_ENABLED; |
4037 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { |
4038 | /* Request irq for rx handling */ |
4039 | sprintf(buf: np->name_rx, fmt: "%s-rx" , dev->name); |
4040 | ret = request_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, |
4041 | handler: nv_nic_irq_rx, IRQF_SHARED, name: np->name_rx, dev); |
4042 | if (ret) { |
4043 | netdev_info(dev, |
4044 | format: "request_irq failed for rx %d\n" , |
4045 | ret); |
4046 | pci_disable_msix(dev: np->pci_dev); |
4047 | np->msi_flags &= ~NV_MSI_X_ENABLED; |
4048 | goto out_err; |
4049 | } |
4050 | /* Request irq for tx handling */ |
4051 | sprintf(buf: np->name_tx, fmt: "%s-tx" , dev->name); |
4052 | ret = request_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, |
4053 | handler: nv_nic_irq_tx, IRQF_SHARED, name: np->name_tx, dev); |
4054 | if (ret) { |
4055 | netdev_info(dev, |
4056 | format: "request_irq failed for tx %d\n" , |
4057 | ret); |
4058 | pci_disable_msix(dev: np->pci_dev); |
4059 | np->msi_flags &= ~NV_MSI_X_ENABLED; |
4060 | goto out_free_rx; |
4061 | } |
4062 | /* Request irq for link and timer handling */ |
4063 | sprintf(buf: np->name_other, fmt: "%s-other" , dev->name); |
4064 | ret = request_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, |
4065 | handler: nv_nic_irq_other, IRQF_SHARED, name: np->name_other, dev); |
4066 | if (ret) { |
4067 | netdev_info(dev, |
4068 | format: "request_irq failed for link %d\n" , |
4069 | ret); |
4070 | pci_disable_msix(dev: np->pci_dev); |
4071 | np->msi_flags &= ~NV_MSI_X_ENABLED; |
4072 | goto out_free_tx; |
4073 | } |
4074 | /* map interrupts to their respective vector */ |
4075 | writel(val: 0, addr: base + NvRegMSIXMap0); |
4076 | writel(val: 0, addr: base + NvRegMSIXMap1); |
4077 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); |
4078 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); |
4079 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); |
4080 | } else { |
4081 | /* Request irq for all interrupts */ |
4082 | ret = request_irq(irq: np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, |
4083 | handler, IRQF_SHARED, name: dev->name, dev); |
4084 | if (ret) { |
4085 | netdev_info(dev, |
4086 | format: "request_irq failed %d\n" , |
4087 | ret); |
4088 | pci_disable_msix(dev: np->pci_dev); |
4089 | np->msi_flags &= ~NV_MSI_X_ENABLED; |
4090 | goto out_err; |
4091 | } |
4092 | |
4093 | /* map interrupts to vector 0 */ |
4094 | writel(val: 0, addr: base + NvRegMSIXMap0); |
4095 | writel(val: 0, addr: base + NvRegMSIXMap1); |
4096 | } |
4097 | netdev_info(dev, format: "MSI-X enabled\n" ); |
4098 | return 0; |
4099 | } |
4100 | } |
4101 | if (np->msi_flags & NV_MSI_CAPABLE) { |
4102 | ret = pci_enable_msi(dev: np->pci_dev); |
4103 | if (ret == 0) { |
4104 | np->msi_flags |= NV_MSI_ENABLED; |
4105 | ret = request_irq(irq: np->pci_dev->irq, handler, IRQF_SHARED, name: dev->name, dev); |
4106 | if (ret) { |
4107 | netdev_info(dev, format: "request_irq failed %d\n" , |
4108 | ret); |
4109 | pci_disable_msi(dev: np->pci_dev); |
4110 | np->msi_flags &= ~NV_MSI_ENABLED; |
4111 | goto out_err; |
4112 | } |
4113 | |
4114 | /* map interrupts to vector 0 */ |
4115 | writel(val: 0, addr: base + NvRegMSIMap0); |
4116 | writel(val: 0, addr: base + NvRegMSIMap1); |
4117 | /* enable msi vector 0 */ |
4118 | writel(NVREG_MSI_VECTOR_0_ENABLED, addr: base + NvRegMSIIrqMask); |
4119 | netdev_info(dev, format: "MSI enabled\n" ); |
4120 | return 0; |
4121 | } |
4122 | } |
4123 | |
4124 | if (request_irq(irq: np->pci_dev->irq, handler, IRQF_SHARED, name: dev->name, dev) != 0) |
4125 | goto out_err; |
4126 | |
4127 | return 0; |
4128 | out_free_tx: |
4129 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); |
4130 | out_free_rx: |
4131 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); |
4132 | out_err: |
4133 | return 1; |
4134 | } |
4135 | |
4136 | static void nv_free_irq(struct net_device *dev) |
4137 | { |
4138 | struct fe_priv *np = get_nvpriv(dev); |
4139 | int i; |
4140 | |
4141 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
4142 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) |
4143 | free_irq(np->msi_x_entry[i].vector, dev); |
4144 | pci_disable_msix(dev: np->pci_dev); |
4145 | np->msi_flags &= ~NV_MSI_X_ENABLED; |
4146 | } else { |
4147 | free_irq(np->pci_dev->irq, dev); |
4148 | if (np->msi_flags & NV_MSI_ENABLED) { |
4149 | pci_disable_msi(dev: np->pci_dev); |
4150 | np->msi_flags &= ~NV_MSI_ENABLED; |
4151 | } |
4152 | } |
4153 | } |
4154 | |
4155 | static void nv_do_nic_poll(struct timer_list *t) |
4156 | { |
4157 | struct fe_priv *np = from_timer(np, t, nic_poll); |
4158 | struct net_device *dev = np->dev; |
4159 | u8 __iomem *base = get_hwbase(dev); |
4160 | u32 mask = 0; |
4161 | unsigned long flags; |
4162 | unsigned int irq = 0; |
4163 | |
4164 | /* |
4165 | * First disable irq(s) and then |
4166 | * reenable interrupts on the nic, we have to do this before calling |
4167 | * nv_nic_irq because that may decide to do otherwise |
4168 | */ |
4169 | |
4170 | if (!using_multi_irqs(dev)) { |
4171 | if (np->msi_flags & NV_MSI_X_ENABLED) |
4172 | irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector; |
4173 | else |
4174 | irq = np->pci_dev->irq; |
4175 | mask = np->irqmask; |
4176 | } else { |
4177 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { |
4178 | irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector; |
4179 | mask |= NVREG_IRQ_RX_ALL; |
4180 | } |
4181 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { |
4182 | irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector; |
4183 | mask |= NVREG_IRQ_TX_ALL; |
4184 | } |
4185 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { |
4186 | irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector; |
4187 | mask |= NVREG_IRQ_OTHER; |
4188 | } |
4189 | } |
4190 | |
4191 | disable_irq_nosync_lockdep_irqsave(irq, flags: &flags); |
4192 | synchronize_irq(irq); |
4193 | |
4194 | if (np->recover_error) { |
4195 | np->recover_error = 0; |
4196 | netdev_info(dev, format: "MAC in recoverable error state\n" ); |
4197 | if (netif_running(dev)) { |
4198 | netif_tx_lock_bh(dev); |
4199 | netif_addr_lock(dev); |
4200 | spin_lock(lock: &np->lock); |
4201 | /* stop engines */ |
4202 | nv_stop_rxtx(dev); |
4203 | if (np->driver_data & DEV_HAS_POWER_CNTRL) |
4204 | nv_mac_reset(dev); |
4205 | nv_txrx_reset(dev); |
4206 | /* drain rx queue */ |
4207 | nv_drain_rxtx(dev); |
4208 | /* reinit driver view of the rx queue */ |
4209 | set_bufsize(dev); |
4210 | if (nv_init_ring(dev)) { |
4211 | if (!np->in_shutdown) |
4212 | mod_timer(timer: &np->oom_kick, expires: jiffies + OOM_REFILL); |
4213 | } |
4214 | /* reinit nic view of the rx queue */ |
4215 | writel(val: np->rx_buf_sz, addr: base + NvRegOffloadConfig); |
4216 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
4217 | writel(val: ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
4218 | addr: base + NvRegRingSizes); |
4219 | pci_push(base); |
4220 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, addr: get_hwbase(dev) + NvRegTxRxControl); |
4221 | pci_push(base); |
4222 | /* clear interrupts */ |
4223 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
4224 | writel(NVREG_IRQSTAT_MASK, addr: base + NvRegIrqStatus); |
4225 | else |
4226 | writel(NVREG_IRQSTAT_MASK, addr: base + NvRegMSIXIrqStatus); |
4227 | |
4228 | /* restart rx engine */ |
4229 | nv_start_rxtx(dev); |
4230 | spin_unlock(lock: &np->lock); |
4231 | netif_addr_unlock(dev); |
4232 | netif_tx_unlock_bh(dev); |
4233 | } |
4234 | } |
4235 | |
4236 | writel(val: mask, addr: base + NvRegIrqMask); |
4237 | pci_push(base); |
4238 | |
4239 | if (!using_multi_irqs(dev)) { |
4240 | np->nic_poll_irq = 0; |
4241 | if (nv_optimized(np)) |
4242 | nv_nic_irq_optimized(foo: 0, data: dev); |
4243 | else |
4244 | nv_nic_irq(foo: 0, data: dev); |
4245 | } else { |
4246 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { |
4247 | np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; |
4248 | nv_nic_irq_rx(foo: 0, data: dev); |
4249 | } |
4250 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { |
4251 | np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; |
4252 | nv_nic_irq_tx(foo: 0, data: dev); |
4253 | } |
4254 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { |
4255 | np->nic_poll_irq &= ~NVREG_IRQ_OTHER; |
4256 | nv_nic_irq_other(foo: 0, data: dev); |
4257 | } |
4258 | } |
4259 | |
4260 | enable_irq_lockdep_irqrestore(irq, flags: &flags); |
4261 | } |
4262 | |
4263 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4264 | static void nv_poll_controller(struct net_device *dev) |
4265 | { |
4266 | struct fe_priv *np = netdev_priv(dev); |
4267 | |
4268 | nv_do_nic_poll(t: &np->nic_poll); |
4269 | } |
4270 | #endif |
4271 | |
4272 | static void nv_do_stats_poll(struct timer_list *t) |
4273 | __acquires(&netdev_priv(dev)->hwstats_lock) |
4274 | __releases(&netdev_priv(dev)->hwstats_lock) |
4275 | { |
4276 | struct fe_priv *np = from_timer(np, t, stats_poll); |
4277 | struct net_device *dev = np->dev; |
4278 | |
4279 | /* If lock is currently taken, the stats are being refreshed |
4280 | * and hence fresh enough */ |
4281 | if (spin_trylock(lock: &np->hwstats_lock)) { |
4282 | nv_update_stats(dev); |
4283 | spin_unlock(lock: &np->hwstats_lock); |
4284 | } |
4285 | |
4286 | if (!np->in_shutdown) |
4287 | mod_timer(timer: &np->stats_poll, |
4288 | expires: round_jiffies(j: jiffies + STATS_INTERVAL)); |
4289 | } |
4290 | |
4291 | static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
4292 | { |
4293 | struct fe_priv *np = netdev_priv(dev); |
4294 | strscpy(p: info->driver, DRV_NAME, size: sizeof(info->driver)); |
4295 | strscpy(p: info->version, FORCEDETH_VERSION, size: sizeof(info->version)); |
4296 | strscpy(p: info->bus_info, q: pci_name(pdev: np->pci_dev), size: sizeof(info->bus_info)); |
4297 | } |
4298 | |
4299 | static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) |
4300 | { |
4301 | struct fe_priv *np = netdev_priv(dev); |
4302 | wolinfo->supported = WAKE_MAGIC; |
4303 | |
4304 | spin_lock_irq(lock: &np->lock); |
4305 | if (np->wolenabled) |
4306 | wolinfo->wolopts = WAKE_MAGIC; |
4307 | spin_unlock_irq(lock: &np->lock); |
4308 | } |
4309 | |
4310 | static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) |
4311 | { |
4312 | struct fe_priv *np = netdev_priv(dev); |
4313 | u8 __iomem *base = get_hwbase(dev); |
4314 | u32 flags = 0; |
4315 | |
4316 | if (wolinfo->wolopts == 0) { |
4317 | np->wolenabled = 0; |
4318 | } else if (wolinfo->wolopts & WAKE_MAGIC) { |
4319 | np->wolenabled = 1; |
4320 | flags = NVREG_WAKEUPFLAGS_ENABLE; |
4321 | } |
4322 | if (netif_running(dev)) { |
4323 | spin_lock_irq(lock: &np->lock); |
4324 | writel(val: flags, addr: base + NvRegWakeUpFlags); |
4325 | spin_unlock_irq(lock: &np->lock); |
4326 | } |
4327 | device_set_wakeup_enable(dev: &np->pci_dev->dev, enable: np->wolenabled); |
4328 | return 0; |
4329 | } |
4330 | |
4331 | static int nv_get_link_ksettings(struct net_device *dev, |
4332 | struct ethtool_link_ksettings *cmd) |
4333 | { |
4334 | struct fe_priv *np = netdev_priv(dev); |
4335 | u32 speed, supported, advertising; |
4336 | int adv; |
4337 | |
4338 | spin_lock_irq(lock: &np->lock); |
4339 | cmd->base.port = PORT_MII; |
4340 | if (!netif_running(dev)) { |
4341 | /* We do not track link speed / duplex setting if the |
4342 | * interface is disabled. Force a link check */ |
4343 | if (nv_update_linkspeed(dev)) { |
4344 | netif_carrier_on(dev); |
4345 | } else { |
4346 | netif_carrier_off(dev); |
4347 | } |
4348 | } |
4349 | |
4350 | if (netif_carrier_ok(dev)) { |
4351 | switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) { |
4352 | case NVREG_LINKSPEED_10: |
4353 | speed = SPEED_10; |
4354 | break; |
4355 | case NVREG_LINKSPEED_100: |
4356 | speed = SPEED_100; |
4357 | break; |
4358 | case NVREG_LINKSPEED_1000: |
4359 | speed = SPEED_1000; |
4360 | break; |
4361 | default: |
4362 | speed = -1; |
4363 | break; |
4364 | } |
4365 | cmd->base.duplex = DUPLEX_HALF; |
4366 | if (np->duplex) |
4367 | cmd->base.duplex = DUPLEX_FULL; |
4368 | } else { |
4369 | speed = SPEED_UNKNOWN; |
4370 | cmd->base.duplex = DUPLEX_UNKNOWN; |
4371 | } |
4372 | cmd->base.speed = speed; |
4373 | cmd->base.autoneg = np->autoneg; |
4374 | |
4375 | advertising = ADVERTISED_MII; |
4376 | if (np->autoneg) { |
4377 | advertising |= ADVERTISED_Autoneg; |
4378 | adv = mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, MII_READ); |
4379 | if (adv & ADVERTISE_10HALF) |
4380 | advertising |= ADVERTISED_10baseT_Half; |
4381 | if (adv & ADVERTISE_10FULL) |
4382 | advertising |= ADVERTISED_10baseT_Full; |
4383 | if (adv & ADVERTISE_100HALF) |
4384 | advertising |= ADVERTISED_100baseT_Half; |
4385 | if (adv & ADVERTISE_100FULL) |
4386 | advertising |= ADVERTISED_100baseT_Full; |
4387 | if (np->gigabit == PHY_GIGABIT) { |
4388 | adv = mii_rw(dev, addr: np->phyaddr, MII_CTRL1000, MII_READ); |
4389 | if (adv & ADVERTISE_1000FULL) |
4390 | advertising |= ADVERTISED_1000baseT_Full; |
4391 | } |
4392 | } |
4393 | supported = (SUPPORTED_Autoneg | |
4394 | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | |
4395 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | |
4396 | SUPPORTED_MII); |
4397 | if (np->gigabit == PHY_GIGABIT) |
4398 | supported |= SUPPORTED_1000baseT_Full; |
4399 | |
4400 | cmd->base.phy_address = np->phyaddr; |
4401 | |
4402 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.supported, |
4403 | legacy_u32: supported); |
4404 | ethtool_convert_legacy_u32_to_link_mode(dst: cmd->link_modes.advertising, |
4405 | legacy_u32: advertising); |
4406 | |
4407 | /* ignore maxtxpkt, maxrxpkt for now */ |
4408 | spin_unlock_irq(lock: &np->lock); |
4409 | return 0; |
4410 | } |
4411 | |
4412 | static int nv_set_link_ksettings(struct net_device *dev, |
4413 | const struct ethtool_link_ksettings *cmd) |
4414 | { |
4415 | struct fe_priv *np = netdev_priv(dev); |
4416 | u32 speed = cmd->base.speed; |
4417 | u32 advertising; |
4418 | |
4419 | ethtool_convert_link_mode_to_legacy_u32(legacy_u32: &advertising, |
4420 | src: cmd->link_modes.advertising); |
4421 | |
4422 | if (cmd->base.port != PORT_MII) |
4423 | return -EINVAL; |
4424 | if (cmd->base.phy_address != np->phyaddr) { |
4425 | /* TODO: support switching between multiple phys. Should be |
4426 | * trivial, but not enabled due to lack of test hardware. */ |
4427 | return -EINVAL; |
4428 | } |
4429 | if (cmd->base.autoneg == AUTONEG_ENABLE) { |
4430 | u32 mask; |
4431 | |
4432 | mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | |
4433 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; |
4434 | if (np->gigabit == PHY_GIGABIT) |
4435 | mask |= ADVERTISED_1000baseT_Full; |
4436 | |
4437 | if ((advertising & mask) == 0) |
4438 | return -EINVAL; |
4439 | |
4440 | } else if (cmd->base.autoneg == AUTONEG_DISABLE) { |
4441 | /* Note: autonegotiation disable, speed 1000 intentionally |
4442 | * forbidden - no one should need that. */ |
4443 | |
4444 | if (speed != SPEED_10 && speed != SPEED_100) |
4445 | return -EINVAL; |
4446 | if (cmd->base.duplex != DUPLEX_HALF && |
4447 | cmd->base.duplex != DUPLEX_FULL) |
4448 | return -EINVAL; |
4449 | } else { |
4450 | return -EINVAL; |
4451 | } |
4452 | |
4453 | netif_carrier_off(dev); |
4454 | if (netif_running(dev)) { |
4455 | unsigned long flags; |
4456 | |
4457 | nv_disable_irq(dev); |
4458 | netif_tx_lock_bh(dev); |
4459 | netif_addr_lock(dev); |
4460 | /* with plain spinlock lockdep complains */ |
4461 | spin_lock_irqsave(&np->lock, flags); |
4462 | /* stop engines */ |
4463 | /* FIXME: |
4464 | * this can take some time, and interrupts are disabled |
4465 | * due to spin_lock_irqsave, but let's hope no daemon |
4466 | * is going to change the settings very often... |
4467 | * Worst case: |
4468 | * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX |
4469 | * + some minor delays, which is up to a second approximately |
4470 | */ |
4471 | nv_stop_rxtx(dev); |
4472 | spin_unlock_irqrestore(lock: &np->lock, flags); |
4473 | netif_addr_unlock(dev); |
4474 | netif_tx_unlock_bh(dev); |
4475 | } |
4476 | |
4477 | if (cmd->base.autoneg == AUTONEG_ENABLE) { |
4478 | int adv, bmcr; |
4479 | |
4480 | np->autoneg = 1; |
4481 | |
4482 | /* advertise only what has been requested */ |
4483 | adv = mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, MII_READ); |
4484 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
4485 | if (advertising & ADVERTISED_10baseT_Half) |
4486 | adv |= ADVERTISE_10HALF; |
4487 | if (advertising & ADVERTISED_10baseT_Full) |
4488 | adv |= ADVERTISE_10FULL; |
4489 | if (advertising & ADVERTISED_100baseT_Half) |
4490 | adv |= ADVERTISE_100HALF; |
4491 | if (advertising & ADVERTISED_100baseT_Full) |
4492 | adv |= ADVERTISE_100FULL; |
4493 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ |
4494 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
4495 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) |
4496 | adv |= ADVERTISE_PAUSE_ASYM; |
4497 | mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, value: adv); |
4498 | |
4499 | if (np->gigabit == PHY_GIGABIT) { |
4500 | adv = mii_rw(dev, addr: np->phyaddr, MII_CTRL1000, MII_READ); |
4501 | adv &= ~ADVERTISE_1000FULL; |
4502 | if (advertising & ADVERTISED_1000baseT_Full) |
4503 | adv |= ADVERTISE_1000FULL; |
4504 | mii_rw(dev, addr: np->phyaddr, MII_CTRL1000, value: adv); |
4505 | } |
4506 | |
4507 | if (netif_running(dev)) |
4508 | netdev_info(dev, format: "link down\n" ); |
4509 | bmcr = mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ); |
4510 | if (np->phy_model == PHY_MODEL_MARVELL_E3016) { |
4511 | bmcr |= BMCR_ANENABLE; |
4512 | /* reset the phy in order for settings to stick, |
4513 | * and cause autoneg to start */ |
4514 | if (phy_reset(dev, bmcr_setup: bmcr)) { |
4515 | netdev_info(dev, format: "phy reset failed\n" ); |
4516 | return -EINVAL; |
4517 | } |
4518 | } else { |
4519 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); |
4520 | mii_rw(dev, addr: np->phyaddr, MII_BMCR, value: bmcr); |
4521 | } |
4522 | } else { |
4523 | int adv, bmcr; |
4524 | |
4525 | np->autoneg = 0; |
4526 | |
4527 | adv = mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, MII_READ); |
4528 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
4529 | if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF) |
4530 | adv |= ADVERTISE_10HALF; |
4531 | if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL) |
4532 | adv |= ADVERTISE_10FULL; |
4533 | if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF) |
4534 | adv |= ADVERTISE_100HALF; |
4535 | if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL) |
4536 | adv |= ADVERTISE_100FULL; |
4537 | np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); |
4538 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */ |
4539 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
4540 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; |
4541 | } |
4542 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { |
4543 | adv |= ADVERTISE_PAUSE_ASYM; |
4544 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; |
4545 | } |
4546 | mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, value: adv); |
4547 | np->fixed_mode = adv; |
4548 | |
4549 | if (np->gigabit == PHY_GIGABIT) { |
4550 | adv = mii_rw(dev, addr: np->phyaddr, MII_CTRL1000, MII_READ); |
4551 | adv &= ~ADVERTISE_1000FULL; |
4552 | mii_rw(dev, addr: np->phyaddr, MII_CTRL1000, value: adv); |
4553 | } |
4554 | |
4555 | bmcr = mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ); |
4556 | bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); |
4557 | if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) |
4558 | bmcr |= BMCR_FULLDPLX; |
4559 | if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) |
4560 | bmcr |= BMCR_SPEED100; |
4561 | if (np->phy_oui == PHY_OUI_MARVELL) { |
4562 | /* reset the phy in order for forced mode settings to stick */ |
4563 | if (phy_reset(dev, bmcr_setup: bmcr)) { |
4564 | netdev_info(dev, format: "phy reset failed\n" ); |
4565 | return -EINVAL; |
4566 | } |
4567 | } else { |
4568 | mii_rw(dev, addr: np->phyaddr, MII_BMCR, value: bmcr); |
4569 | if (netif_running(dev)) { |
4570 | /* Wait a bit and then reconfigure the nic. */ |
4571 | udelay(10); |
4572 | nv_linkchange(dev); |
4573 | } |
4574 | } |
4575 | } |
4576 | |
4577 | if (netif_running(dev)) { |
4578 | nv_start_rxtx(dev); |
4579 | nv_enable_irq(dev); |
4580 | } |
4581 | |
4582 | return 0; |
4583 | } |
4584 | |
4585 | #define FORCEDETH_REGS_VER 1 |
4586 | |
4587 | static int nv_get_regs_len(struct net_device *dev) |
4588 | { |
4589 | struct fe_priv *np = netdev_priv(dev); |
4590 | return np->register_size; |
4591 | } |
4592 | |
4593 | static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) |
4594 | { |
4595 | struct fe_priv *np = netdev_priv(dev); |
4596 | u8 __iomem *base = get_hwbase(dev); |
4597 | u32 *rbuf = buf; |
4598 | int i; |
4599 | |
4600 | regs->version = FORCEDETH_REGS_VER; |
4601 | spin_lock_irq(lock: &np->lock); |
4602 | for (i = 0; i < np->register_size/sizeof(u32); i++) |
4603 | rbuf[i] = readl(addr: base + i*sizeof(u32)); |
4604 | spin_unlock_irq(lock: &np->lock); |
4605 | } |
4606 | |
4607 | static int nv_nway_reset(struct net_device *dev) |
4608 | { |
4609 | struct fe_priv *np = netdev_priv(dev); |
4610 | int ret; |
4611 | |
4612 | if (np->autoneg) { |
4613 | int bmcr; |
4614 | |
4615 | netif_carrier_off(dev); |
4616 | if (netif_running(dev)) { |
4617 | nv_disable_irq(dev); |
4618 | netif_tx_lock_bh(dev); |
4619 | netif_addr_lock(dev); |
4620 | spin_lock(lock: &np->lock); |
4621 | /* stop engines */ |
4622 | nv_stop_rxtx(dev); |
4623 | spin_unlock(lock: &np->lock); |
4624 | netif_addr_unlock(dev); |
4625 | netif_tx_unlock_bh(dev); |
4626 | netdev_info(dev, format: "link down\n" ); |
4627 | } |
4628 | |
4629 | bmcr = mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ); |
4630 | if (np->phy_model == PHY_MODEL_MARVELL_E3016) { |
4631 | bmcr |= BMCR_ANENABLE; |
4632 | /* reset the phy in order for settings to stick*/ |
4633 | if (phy_reset(dev, bmcr_setup: bmcr)) { |
4634 | netdev_info(dev, format: "phy reset failed\n" ); |
4635 | return -EINVAL; |
4636 | } |
4637 | } else { |
4638 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); |
4639 | mii_rw(dev, addr: np->phyaddr, MII_BMCR, value: bmcr); |
4640 | } |
4641 | |
4642 | if (netif_running(dev)) { |
4643 | nv_start_rxtx(dev); |
4644 | nv_enable_irq(dev); |
4645 | } |
4646 | ret = 0; |
4647 | } else { |
4648 | ret = -EINVAL; |
4649 | } |
4650 | |
4651 | return ret; |
4652 | } |
4653 | |
4654 | static void nv_get_ringparam(struct net_device *dev, |
4655 | struct ethtool_ringparam *ring, |
4656 | struct kernel_ethtool_ringparam *kernel_ring, |
4657 | struct netlink_ext_ack *extack) |
4658 | { |
4659 | struct fe_priv *np = netdev_priv(dev); |
4660 | |
4661 | ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; |
4662 | ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; |
4663 | |
4664 | ring->rx_pending = np->rx_ring_size; |
4665 | ring->tx_pending = np->tx_ring_size; |
4666 | } |
4667 | |
4668 | static int nv_set_ringparam(struct net_device *dev, |
4669 | struct ethtool_ringparam *ring, |
4670 | struct kernel_ethtool_ringparam *kernel_ring, |
4671 | struct netlink_ext_ack *extack) |
4672 | { |
4673 | struct fe_priv *np = netdev_priv(dev); |
4674 | u8 __iomem *base = get_hwbase(dev); |
4675 | u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; |
4676 | dma_addr_t ring_addr; |
4677 | |
4678 | if (ring->rx_pending < RX_RING_MIN || |
4679 | ring->tx_pending < TX_RING_MIN || |
4680 | ring->rx_mini_pending != 0 || |
4681 | ring->rx_jumbo_pending != 0 || |
4682 | (np->desc_ver == DESC_VER_1 && |
4683 | (ring->rx_pending > RING_MAX_DESC_VER_1 || |
4684 | ring->tx_pending > RING_MAX_DESC_VER_1)) || |
4685 | (np->desc_ver != DESC_VER_1 && |
4686 | (ring->rx_pending > RING_MAX_DESC_VER_2_3 || |
4687 | ring->tx_pending > RING_MAX_DESC_VER_2_3))) { |
4688 | return -EINVAL; |
4689 | } |
4690 | |
4691 | /* allocate new rings */ |
4692 | if (!nv_optimized(np)) { |
4693 | rxtx_ring = dma_alloc_coherent(dev: &np->pci_dev->dev, |
4694 | size: sizeof(struct ring_desc) * |
4695 | (ring->rx_pending + |
4696 | ring->tx_pending), |
4697 | dma_handle: &ring_addr, GFP_ATOMIC); |
4698 | } else { |
4699 | rxtx_ring = dma_alloc_coherent(dev: &np->pci_dev->dev, |
4700 | size: sizeof(struct ring_desc_ex) * |
4701 | (ring->rx_pending + |
4702 | ring->tx_pending), |
4703 | dma_handle: &ring_addr, GFP_ATOMIC); |
4704 | } |
4705 | rx_skbuff = kmalloc_array(n: ring->rx_pending, size: sizeof(struct nv_skb_map), |
4706 | GFP_KERNEL); |
4707 | tx_skbuff = kmalloc_array(n: ring->tx_pending, size: sizeof(struct nv_skb_map), |
4708 | GFP_KERNEL); |
4709 | if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { |
4710 | /* fall back to old rings */ |
4711 | if (!nv_optimized(np)) { |
4712 | if (rxtx_ring) |
4713 | dma_free_coherent(dev: &np->pci_dev->dev, |
4714 | size: sizeof(struct ring_desc) * |
4715 | (ring->rx_pending + |
4716 | ring->tx_pending), |
4717 | cpu_addr: rxtx_ring, dma_handle: ring_addr); |
4718 | } else { |
4719 | if (rxtx_ring) |
4720 | dma_free_coherent(dev: &np->pci_dev->dev, |
4721 | size: sizeof(struct ring_desc_ex) * |
4722 | (ring->rx_pending + |
4723 | ring->tx_pending), |
4724 | cpu_addr: rxtx_ring, dma_handle: ring_addr); |
4725 | } |
4726 | |
4727 | kfree(objp: rx_skbuff); |
4728 | kfree(objp: tx_skbuff); |
4729 | goto exit; |
4730 | } |
4731 | |
4732 | if (netif_running(dev)) { |
4733 | nv_disable_irq(dev); |
4734 | nv_napi_disable(dev); |
4735 | netif_tx_lock_bh(dev); |
4736 | netif_addr_lock(dev); |
4737 | spin_lock(lock: &np->lock); |
4738 | /* stop engines */ |
4739 | nv_stop_rxtx(dev); |
4740 | nv_txrx_reset(dev); |
4741 | /* drain queues */ |
4742 | nv_drain_rxtx(dev); |
4743 | /* delete queues */ |
4744 | free_rings(dev); |
4745 | } |
4746 | |
4747 | /* set new values */ |
4748 | np->rx_ring_size = ring->rx_pending; |
4749 | np->tx_ring_size = ring->tx_pending; |
4750 | |
4751 | if (!nv_optimized(np)) { |
4752 | np->rx_ring.orig = (struct ring_desc *)rxtx_ring; |
4753 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; |
4754 | } else { |
4755 | np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring; |
4756 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; |
4757 | } |
4758 | np->rx_skb = (struct nv_skb_map *)rx_skbuff; |
4759 | np->tx_skb = (struct nv_skb_map *)tx_skbuff; |
4760 | np->ring_addr = ring_addr; |
4761 | |
4762 | memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); |
4763 | memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); |
4764 | |
4765 | if (netif_running(dev)) { |
4766 | /* reinit driver view of the queues */ |
4767 | set_bufsize(dev); |
4768 | if (nv_init_ring(dev)) { |
4769 | if (!np->in_shutdown) |
4770 | mod_timer(timer: &np->oom_kick, expires: jiffies + OOM_REFILL); |
4771 | } |
4772 | |
4773 | /* reinit nic view of the queues */ |
4774 | writel(val: np->rx_buf_sz, addr: base + NvRegOffloadConfig); |
4775 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
4776 | writel(val: ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
4777 | addr: base + NvRegRingSizes); |
4778 | pci_push(base); |
4779 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, addr: get_hwbase(dev) + NvRegTxRxControl); |
4780 | pci_push(base); |
4781 | |
4782 | /* restart engines */ |
4783 | nv_start_rxtx(dev); |
4784 | spin_unlock(lock: &np->lock); |
4785 | netif_addr_unlock(dev); |
4786 | netif_tx_unlock_bh(dev); |
4787 | nv_napi_enable(dev); |
4788 | nv_enable_irq(dev); |
4789 | } |
4790 | return 0; |
4791 | exit: |
4792 | return -ENOMEM; |
4793 | } |
4794 | |
4795 | static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) |
4796 | { |
4797 | struct fe_priv *np = netdev_priv(dev); |
4798 | |
4799 | pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; |
4800 | pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; |
4801 | pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; |
4802 | } |
4803 | |
4804 | static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) |
4805 | { |
4806 | struct fe_priv *np = netdev_priv(dev); |
4807 | int adv, bmcr; |
4808 | |
4809 | if ((!np->autoneg && np->duplex == 0) || |
4810 | (np->autoneg && !pause->autoneg && np->duplex == 0)) { |
4811 | netdev_info(dev, format: "can not set pause settings when forced link is in half duplex\n" ); |
4812 | return -EINVAL; |
4813 | } |
4814 | if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { |
4815 | netdev_info(dev, format: "hardware does not support tx pause frames\n" ); |
4816 | return -EINVAL; |
4817 | } |
4818 | |
4819 | netif_carrier_off(dev); |
4820 | if (netif_running(dev)) { |
4821 | nv_disable_irq(dev); |
4822 | netif_tx_lock_bh(dev); |
4823 | netif_addr_lock(dev); |
4824 | spin_lock(lock: &np->lock); |
4825 | /* stop engines */ |
4826 | nv_stop_rxtx(dev); |
4827 | spin_unlock(lock: &np->lock); |
4828 | netif_addr_unlock(dev); |
4829 | netif_tx_unlock_bh(dev); |
4830 | } |
4831 | |
4832 | np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); |
4833 | if (pause->rx_pause) |
4834 | np->pause_flags |= NV_PAUSEFRAME_RX_REQ; |
4835 | if (pause->tx_pause) |
4836 | np->pause_flags |= NV_PAUSEFRAME_TX_REQ; |
4837 | |
4838 | if (np->autoneg && pause->autoneg) { |
4839 | np->pause_flags |= NV_PAUSEFRAME_AUTONEG; |
4840 | |
4841 | adv = mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, MII_READ); |
4842 | adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
4843 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ |
4844 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
4845 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) |
4846 | adv |= ADVERTISE_PAUSE_ASYM; |
4847 | mii_rw(dev, addr: np->phyaddr, MII_ADVERTISE, value: adv); |
4848 | |
4849 | if (netif_running(dev)) |
4850 | netdev_info(dev, format: "link down\n" ); |
4851 | bmcr = mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ); |
4852 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); |
4853 | mii_rw(dev, addr: np->phyaddr, MII_BMCR, value: bmcr); |
4854 | } else { |
4855 | np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); |
4856 | if (pause->rx_pause) |
4857 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; |
4858 | if (pause->tx_pause) |
4859 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; |
4860 | |
4861 | if (!netif_running(dev)) |
4862 | nv_update_linkspeed(dev); |
4863 | else |
4864 | nv_update_pause(dev, pause_flags: np->pause_flags); |
4865 | } |
4866 | |
4867 | if (netif_running(dev)) { |
4868 | nv_start_rxtx(dev); |
4869 | nv_enable_irq(dev); |
4870 | } |
4871 | return 0; |
4872 | } |
4873 | |
4874 | static int nv_set_loopback(struct net_device *dev, netdev_features_t features) |
4875 | { |
4876 | struct fe_priv *np = netdev_priv(dev); |
4877 | unsigned long flags; |
4878 | u32 miicontrol; |
4879 | int err, retval = 0; |
4880 | |
4881 | spin_lock_irqsave(&np->lock, flags); |
4882 | miicontrol = mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ); |
4883 | if (features & NETIF_F_LOOPBACK) { |
4884 | if (miicontrol & BMCR_LOOPBACK) { |
4885 | spin_unlock_irqrestore(lock: &np->lock, flags); |
4886 | netdev_info(dev, format: "Loopback already enabled\n" ); |
4887 | return 0; |
4888 | } |
4889 | nv_disable_irq(dev); |
4890 | /* Turn on loopback mode */ |
4891 | miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; |
4892 | err = mii_rw(dev, addr: np->phyaddr, MII_BMCR, value: miicontrol); |
4893 | if (err) { |
4894 | retval = PHY_ERROR; |
4895 | spin_unlock_irqrestore(lock: &np->lock, flags); |
4896 | phy_init(dev); |
4897 | } else { |
4898 | if (netif_running(dev)) { |
4899 | /* Force 1000 Mbps full-duplex */ |
4900 | nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, |
4901 | duplex: 1); |
4902 | /* Force link up */ |
4903 | netif_carrier_on(dev); |
4904 | } |
4905 | spin_unlock_irqrestore(lock: &np->lock, flags); |
4906 | netdev_info(dev, |
4907 | format: "Internal PHY loopback mode enabled.\n" ); |
4908 | } |
4909 | } else { |
4910 | if (!(miicontrol & BMCR_LOOPBACK)) { |
4911 | spin_unlock_irqrestore(lock: &np->lock, flags); |
4912 | netdev_info(dev, format: "Loopback already disabled\n" ); |
4913 | return 0; |
4914 | } |
4915 | nv_disable_irq(dev); |
4916 | /* Turn off loopback */ |
4917 | spin_unlock_irqrestore(lock: &np->lock, flags); |
4918 | netdev_info(dev, format: "Internal PHY loopback mode disabled.\n" ); |
4919 | phy_init(dev); |
4920 | } |
4921 | msleep(msecs: 500); |
4922 | spin_lock_irqsave(&np->lock, flags); |
4923 | nv_enable_irq(dev); |
4924 | spin_unlock_irqrestore(lock: &np->lock, flags); |
4925 | |
4926 | return retval; |
4927 | } |
4928 | |
4929 | static netdev_features_t nv_fix_features(struct net_device *dev, |
4930 | netdev_features_t features) |
4931 | { |
4932 | /* vlan is dependent on rx checksum offload */ |
4933 | if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) |
4934 | features |= NETIF_F_RXCSUM; |
4935 | |
4936 | return features; |
4937 | } |
4938 | |
4939 | static void nv_vlan_mode(struct net_device *dev, netdev_features_t features) |
4940 | { |
4941 | struct fe_priv *np = get_nvpriv(dev); |
4942 | |
4943 | spin_lock_irq(lock: &np->lock); |
4944 | |
4945 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
4946 | np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; |
4947 | else |
4948 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; |
4949 | |
4950 | if (features & NETIF_F_HW_VLAN_CTAG_TX) |
4951 | np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; |
4952 | else |
4953 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; |
4954 | |
4955 | writel(val: np->txrxctl_bits, addr: get_hwbase(dev) + NvRegTxRxControl); |
4956 | |
4957 | spin_unlock_irq(lock: &np->lock); |
4958 | } |
4959 | |
4960 | static int nv_set_features(struct net_device *dev, netdev_features_t features) |
4961 | { |
4962 | struct fe_priv *np = netdev_priv(dev); |
4963 | u8 __iomem *base = get_hwbase(dev); |
4964 | netdev_features_t changed = dev->features ^ features; |
4965 | int retval; |
4966 | |
4967 | if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) { |
4968 | retval = nv_set_loopback(dev, features); |
4969 | if (retval != 0) |
4970 | return retval; |
4971 | } |
4972 | |
4973 | if (changed & NETIF_F_RXCSUM) { |
4974 | spin_lock_irq(lock: &np->lock); |
4975 | |
4976 | if (features & NETIF_F_RXCSUM) |
4977 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
4978 | else |
4979 | np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; |
4980 | |
4981 | if (netif_running(dev)) |
4982 | writel(val: np->txrxctl_bits, addr: base + NvRegTxRxControl); |
4983 | |
4984 | spin_unlock_irq(lock: &np->lock); |
4985 | } |
4986 | |
4987 | if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)) |
4988 | nv_vlan_mode(dev, features); |
4989 | |
4990 | return 0; |
4991 | } |
4992 | |
4993 | static int nv_get_sset_count(struct net_device *dev, int sset) |
4994 | { |
4995 | struct fe_priv *np = netdev_priv(dev); |
4996 | |
4997 | switch (sset) { |
4998 | case ETH_SS_TEST: |
4999 | if (np->driver_data & DEV_HAS_TEST_EXTENDED) |
5000 | return NV_TEST_COUNT_EXTENDED; |
5001 | else |
5002 | return NV_TEST_COUNT_BASE; |
5003 | case ETH_SS_STATS: |
5004 | if (np->driver_data & DEV_HAS_STATISTICS_V3) |
5005 | return NV_DEV_STATISTICS_V3_COUNT; |
5006 | else if (np->driver_data & DEV_HAS_STATISTICS_V2) |
5007 | return NV_DEV_STATISTICS_V2_COUNT; |
5008 | else if (np->driver_data & DEV_HAS_STATISTICS_V1) |
5009 | return NV_DEV_STATISTICS_V1_COUNT; |
5010 | else |
5011 | return 0; |
5012 | default: |
5013 | return -EOPNOTSUPP; |
5014 | } |
5015 | } |
5016 | |
5017 | static void nv_get_ethtool_stats(struct net_device *dev, |
5018 | struct ethtool_stats *estats, u64 *buffer) |
5019 | __acquires(&netdev_priv(dev)->hwstats_lock) |
5020 | __releases(&netdev_priv(dev)->hwstats_lock) |
5021 | { |
5022 | struct fe_priv *np = netdev_priv(dev); |
5023 | |
5024 | spin_lock_bh(lock: &np->hwstats_lock); |
5025 | nv_update_stats(dev); |
5026 | memcpy(buffer, &np->estats, |
5027 | nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); |
5028 | spin_unlock_bh(lock: &np->hwstats_lock); |
5029 | } |
5030 | |
5031 | static int nv_link_test(struct net_device *dev) |
5032 | { |
5033 | struct fe_priv *np = netdev_priv(dev); |
5034 | int mii_status; |
5035 | |
5036 | mii_rw(dev, addr: np->phyaddr, MII_BMSR, MII_READ); |
5037 | mii_status = mii_rw(dev, addr: np->phyaddr, MII_BMSR, MII_READ); |
5038 | |
5039 | /* check phy link status */ |
5040 | if (!(mii_status & BMSR_LSTATUS)) |
5041 | return 0; |
5042 | else |
5043 | return 1; |
5044 | } |
5045 | |
5046 | static int nv_register_test(struct net_device *dev) |
5047 | { |
5048 | u8 __iomem *base = get_hwbase(dev); |
5049 | int i = 0; |
5050 | u32 orig_read, new_read; |
5051 | |
5052 | do { |
5053 | orig_read = readl(addr: base + nv_registers_test[i].reg); |
5054 | |
5055 | /* xor with mask to toggle bits */ |
5056 | orig_read ^= nv_registers_test[i].mask; |
5057 | |
5058 | writel(val: orig_read, addr: base + nv_registers_test[i].reg); |
5059 | |
5060 | new_read = readl(addr: base + nv_registers_test[i].reg); |
5061 | |
5062 | if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) |
5063 | return 0; |
5064 | |
5065 | /* restore original value */ |
5066 | orig_read ^= nv_registers_test[i].mask; |
5067 | writel(val: orig_read, addr: base + nv_registers_test[i].reg); |
5068 | |
5069 | } while (nv_registers_test[++i].reg != 0); |
5070 | |
5071 | return 1; |
5072 | } |
5073 | |
5074 | static int nv_interrupt_test(struct net_device *dev) |
5075 | { |
5076 | struct fe_priv *np = netdev_priv(dev); |
5077 | u8 __iomem *base = get_hwbase(dev); |
5078 | int ret = 1; |
5079 | int testcnt; |
5080 | u32 save_msi_flags, save_poll_interval = 0; |
5081 | |
5082 | if (netif_running(dev)) { |
5083 | /* free current irq */ |
5084 | nv_free_irq(dev); |
5085 | save_poll_interval = readl(addr: base+NvRegPollingInterval); |
5086 | } |
5087 | |
5088 | /* flag to test interrupt handler */ |
5089 | np->intr_test = 0; |
5090 | |
5091 | /* setup test irq */ |
5092 | save_msi_flags = np->msi_flags; |
5093 | np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; |
5094 | np->msi_flags |= 0x001; /* setup 1 vector */ |
5095 | if (nv_request_irq(dev, intr_test: 1)) |
5096 | return 0; |
5097 | |
5098 | /* setup timer interrupt */ |
5099 | writel(NVREG_POLL_DEFAULT_CPU, addr: base + NvRegPollingInterval); |
5100 | writel(NVREG_UNKSETUP6_VAL, addr: base + NvRegUnknownSetupReg6); |
5101 | |
5102 | nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); |
5103 | |
5104 | /* wait for at least one interrupt */ |
5105 | msleep(msecs: 100); |
5106 | |
5107 | spin_lock_irq(lock: &np->lock); |
5108 | |
5109 | /* flag should be set within ISR */ |
5110 | testcnt = np->intr_test; |
5111 | if (!testcnt) |
5112 | ret = 2; |
5113 | |
5114 | nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); |
5115 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
5116 | writel(NVREG_IRQSTAT_MASK, addr: base + NvRegIrqStatus); |
5117 | else |
5118 | writel(NVREG_IRQSTAT_MASK, addr: base + NvRegMSIXIrqStatus); |
5119 | |
5120 | spin_unlock_irq(lock: &np->lock); |
5121 | |
5122 | nv_free_irq(dev); |
5123 | |
5124 | np->msi_flags = save_msi_flags; |
5125 | |
5126 | if (netif_running(dev)) { |
5127 | writel(val: save_poll_interval, addr: base + NvRegPollingInterval); |
5128 | writel(NVREG_UNKSETUP6_VAL, addr: base + NvRegUnknownSetupReg6); |
5129 | /* restore original irq */ |
5130 | if (nv_request_irq(dev, intr_test: 0)) |
5131 | return 0; |
5132 | } |
5133 | |
5134 | return ret; |
5135 | } |
5136 | |
5137 | static int nv_loopback_test(struct net_device *dev) |
5138 | { |
5139 | struct fe_priv *np = netdev_priv(dev); |
5140 | u8 __iomem *base = get_hwbase(dev); |
5141 | struct sk_buff *tx_skb, *rx_skb; |
5142 | dma_addr_t test_dma_addr; |
5143 | u32 = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); |
5144 | u32 flags; |
5145 | int len, i, pkt_len; |
5146 | u8 *pkt_data; |
5147 | u32 filter_flags = 0; |
5148 | u32 misc1_flags = 0; |
5149 | int ret = 1; |
5150 | |
5151 | if (netif_running(dev)) { |
5152 | nv_disable_irq(dev); |
5153 | filter_flags = readl(addr: base + NvRegPacketFilterFlags); |
5154 | misc1_flags = readl(addr: base + NvRegMisc1); |
5155 | } else { |
5156 | nv_txrx_reset(dev); |
5157 | } |
5158 | |
5159 | /* reinit driver view of the rx queue */ |
5160 | set_bufsize(dev); |
5161 | nv_init_ring(dev); |
5162 | |
5163 | /* setup hardware for loopback */ |
5164 | writel(NVREG_MISC1_FORCE, addr: base + NvRegMisc1); |
5165 | writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, addr: base + NvRegPacketFilterFlags); |
5166 | |
5167 | /* reinit nic view of the rx queue */ |
5168 | writel(val: np->rx_buf_sz, addr: base + NvRegOffloadConfig); |
5169 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
5170 | writel(val: ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
5171 | addr: base + NvRegRingSizes); |
5172 | pci_push(base); |
5173 | |
5174 | /* restart rx engine */ |
5175 | nv_start_rxtx(dev); |
5176 | |
5177 | /* setup packet for tx */ |
5178 | pkt_len = ETH_DATA_LEN; |
5179 | tx_skb = netdev_alloc_skb(dev, length: pkt_len); |
5180 | if (!tx_skb) { |
5181 | ret = 0; |
5182 | goto out; |
5183 | } |
5184 | test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data, |
5185 | skb_tailroom(tx_skb), |
5186 | DMA_FROM_DEVICE); |
5187 | if (unlikely(dma_mapping_error(&np->pci_dev->dev, |
5188 | test_dma_addr))) { |
5189 | dev_kfree_skb_any(skb: tx_skb); |
5190 | goto out; |
5191 | } |
5192 | pkt_data = skb_put(skb: tx_skb, len: pkt_len); |
5193 | for (i = 0; i < pkt_len; i++) |
5194 | pkt_data[i] = (u8)(i & 0xff); |
5195 | |
5196 | if (!nv_optimized(np)) { |
5197 | np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); |
5198 | np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); |
5199 | } else { |
5200 | np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); |
5201 | np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); |
5202 | np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); |
5203 | } |
5204 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, addr: get_hwbase(dev) + NvRegTxRxControl); |
5205 | pci_push(base: get_hwbase(dev)); |
5206 | |
5207 | msleep(msecs: 500); |
5208 | |
5209 | /* check for rx of the packet */ |
5210 | if (!nv_optimized(np)) { |
5211 | flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); |
5212 | len = nv_descr_getlength(prd: &np->rx_ring.orig[0], v: np->desc_ver); |
5213 | |
5214 | } else { |
5215 | flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); |
5216 | len = nv_descr_getlength_ex(prd: &np->rx_ring.ex[0], v: np->desc_ver); |
5217 | } |
5218 | |
5219 | if (flags & NV_RX_AVAIL) { |
5220 | ret = 0; |
5221 | } else if (np->desc_ver == DESC_VER_1) { |
5222 | if (flags & NV_RX_ERROR) |
5223 | ret = 0; |
5224 | } else { |
5225 | if (flags & NV_RX2_ERROR) |
5226 | ret = 0; |
5227 | } |
5228 | |
5229 | if (ret) { |
5230 | if (len != pkt_len) { |
5231 | ret = 0; |
5232 | } else { |
5233 | rx_skb = np->rx_skb[0].skb; |
5234 | for (i = 0; i < pkt_len; i++) { |
5235 | if (rx_skb->data[i] != (u8)(i & 0xff)) { |
5236 | ret = 0; |
5237 | break; |
5238 | } |
5239 | } |
5240 | } |
5241 | } |
5242 | |
5243 | dma_unmap_single(&np->pci_dev->dev, test_dma_addr, |
5244 | (skb_end_pointer(tx_skb) - tx_skb->data), |
5245 | DMA_TO_DEVICE); |
5246 | dev_kfree_skb_any(skb: tx_skb); |
5247 | out: |
5248 | /* stop engines */ |
5249 | nv_stop_rxtx(dev); |
5250 | nv_txrx_reset(dev); |
5251 | /* drain rx queue */ |
5252 | nv_drain_rxtx(dev); |
5253 | |
5254 | if (netif_running(dev)) { |
5255 | writel(val: misc1_flags, addr: base + NvRegMisc1); |
5256 | writel(val: filter_flags, addr: base + NvRegPacketFilterFlags); |
5257 | nv_enable_irq(dev); |
5258 | } |
5259 | |
5260 | return ret; |
5261 | } |
5262 | |
5263 | static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) |
5264 | { |
5265 | struct fe_priv *np = netdev_priv(dev); |
5266 | u8 __iomem *base = get_hwbase(dev); |
5267 | int result, count; |
5268 | |
5269 | count = nv_get_sset_count(dev, sset: ETH_SS_TEST); |
5270 | memset(buffer, 0, count * sizeof(u64)); |
5271 | |
5272 | if (!nv_link_test(dev)) { |
5273 | test->flags |= ETH_TEST_FL_FAILED; |
5274 | buffer[0] = 1; |
5275 | } |
5276 | |
5277 | if (test->flags & ETH_TEST_FL_OFFLINE) { |
5278 | if (netif_running(dev)) { |
5279 | netif_stop_queue(dev); |
5280 | nv_napi_disable(dev); |
5281 | netif_tx_lock_bh(dev); |
5282 | netif_addr_lock(dev); |
5283 | spin_lock_irq(lock: &np->lock); |
5284 | nv_disable_hw_interrupts(dev, mask: np->irqmask); |
5285 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
5286 | writel(NVREG_IRQSTAT_MASK, addr: base + NvRegIrqStatus); |
5287 | else |
5288 | writel(NVREG_IRQSTAT_MASK, addr: base + NvRegMSIXIrqStatus); |
5289 | /* stop engines */ |
5290 | nv_stop_rxtx(dev); |
5291 | nv_txrx_reset(dev); |
5292 | /* drain rx queue */ |
5293 | nv_drain_rxtx(dev); |
5294 | spin_unlock_irq(lock: &np->lock); |
5295 | netif_addr_unlock(dev); |
5296 | netif_tx_unlock_bh(dev); |
5297 | } |
5298 | |
5299 | if (!nv_register_test(dev)) { |
5300 | test->flags |= ETH_TEST_FL_FAILED; |
5301 | buffer[1] = 1; |
5302 | } |
5303 | |
5304 | result = nv_interrupt_test(dev); |
5305 | if (result != 1) { |
5306 | test->flags |= ETH_TEST_FL_FAILED; |
5307 | buffer[2] = 1; |
5308 | } |
5309 | if (result == 0) { |
5310 | /* bail out */ |
5311 | return; |
5312 | } |
5313 | |
5314 | if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) { |
5315 | test->flags |= ETH_TEST_FL_FAILED; |
5316 | buffer[3] = 1; |
5317 | } |
5318 | |
5319 | if (netif_running(dev)) { |
5320 | /* reinit driver view of the rx queue */ |
5321 | set_bufsize(dev); |
5322 | if (nv_init_ring(dev)) { |
5323 | if (!np->in_shutdown) |
5324 | mod_timer(timer: &np->oom_kick, expires: jiffies + OOM_REFILL); |
5325 | } |
5326 | /* reinit nic view of the rx queue */ |
5327 | writel(val: np->rx_buf_sz, addr: base + NvRegOffloadConfig); |
5328 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
5329 | writel(val: ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
5330 | addr: base + NvRegRingSizes); |
5331 | pci_push(base); |
5332 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, addr: get_hwbase(dev) + NvRegTxRxControl); |
5333 | pci_push(base); |
5334 | /* restart rx engine */ |
5335 | nv_start_rxtx(dev); |
5336 | netif_start_queue(dev); |
5337 | nv_napi_enable(dev); |
5338 | nv_enable_hw_interrupts(dev, mask: np->irqmask); |
5339 | } |
5340 | } |
5341 | } |
5342 | |
5343 | static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) |
5344 | { |
5345 | switch (stringset) { |
5346 | case ETH_SS_STATS: |
5347 | memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); |
5348 | break; |
5349 | case ETH_SS_TEST: |
5350 | memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); |
5351 | break; |
5352 | } |
5353 | } |
5354 | |
5355 | static const struct ethtool_ops ops = { |
5356 | .get_drvinfo = nv_get_drvinfo, |
5357 | .get_link = ethtool_op_get_link, |
5358 | .get_wol = nv_get_wol, |
5359 | .set_wol = nv_set_wol, |
5360 | .get_regs_len = nv_get_regs_len, |
5361 | .get_regs = nv_get_regs, |
5362 | .nway_reset = nv_nway_reset, |
5363 | .get_ringparam = nv_get_ringparam, |
5364 | .set_ringparam = nv_set_ringparam, |
5365 | .get_pauseparam = nv_get_pauseparam, |
5366 | .set_pauseparam = nv_set_pauseparam, |
5367 | .get_strings = nv_get_strings, |
5368 | .get_ethtool_stats = nv_get_ethtool_stats, |
5369 | .get_sset_count = nv_get_sset_count, |
5370 | .self_test = nv_self_test, |
5371 | .get_ts_info = ethtool_op_get_ts_info, |
5372 | .get_link_ksettings = nv_get_link_ksettings, |
5373 | .set_link_ksettings = nv_set_link_ksettings, |
5374 | }; |
5375 | |
5376 | /* The mgmt unit and driver use a semaphore to access the phy during init */ |
5377 | static int nv_mgmt_acquire_sema(struct net_device *dev) |
5378 | { |
5379 | struct fe_priv *np = netdev_priv(dev); |
5380 | u8 __iomem *base = get_hwbase(dev); |
5381 | int i; |
5382 | u32 tx_ctrl, mgmt_sema; |
5383 | |
5384 | for (i = 0; i < 10; i++) { |
5385 | mgmt_sema = readl(addr: base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; |
5386 | if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) |
5387 | break; |
5388 | msleep(msecs: 500); |
5389 | } |
5390 | |
5391 | if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) |
5392 | return 0; |
5393 | |
5394 | for (i = 0; i < 2; i++) { |
5395 | tx_ctrl = readl(addr: base + NvRegTransmitterControl); |
5396 | tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; |
5397 | writel(val: tx_ctrl, addr: base + NvRegTransmitterControl); |
5398 | |
5399 | /* verify that semaphore was acquired */ |
5400 | tx_ctrl = readl(addr: base + NvRegTransmitterControl); |
5401 | if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && |
5402 | ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { |
5403 | np->mgmt_sema = 1; |
5404 | return 1; |
5405 | } else |
5406 | udelay(50); |
5407 | } |
5408 | |
5409 | return 0; |
5410 | } |
5411 | |
5412 | static void nv_mgmt_release_sema(struct net_device *dev) |
5413 | { |
5414 | struct fe_priv *np = netdev_priv(dev); |
5415 | u8 __iomem *base = get_hwbase(dev); |
5416 | u32 tx_ctrl; |
5417 | |
5418 | if (np->driver_data & DEV_HAS_MGMT_UNIT) { |
5419 | if (np->mgmt_sema) { |
5420 | tx_ctrl = readl(addr: base + NvRegTransmitterControl); |
5421 | tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ; |
5422 | writel(val: tx_ctrl, addr: base + NvRegTransmitterControl); |
5423 | } |
5424 | } |
5425 | } |
5426 | |
5427 | |
5428 | static int nv_mgmt_get_version(struct net_device *dev) |
5429 | { |
5430 | struct fe_priv *np = netdev_priv(dev); |
5431 | u8 __iomem *base = get_hwbase(dev); |
5432 | u32 data_ready = readl(addr: base + NvRegTransmitterControl); |
5433 | u32 data_ready2 = 0; |
5434 | unsigned long start; |
5435 | int ready = 0; |
5436 | |
5437 | writel(NVREG_MGMTUNITGETVERSION, addr: base + NvRegMgmtUnitGetVersion); |
5438 | writel(val: data_ready ^ NVREG_XMITCTL_DATA_START, addr: base + NvRegTransmitterControl); |
5439 | start = jiffies; |
5440 | while (time_before(jiffies, start + 5*HZ)) { |
5441 | data_ready2 = readl(addr: base + NvRegTransmitterControl); |
5442 | if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) { |
5443 | ready = 1; |
5444 | break; |
5445 | } |
5446 | schedule_timeout_uninterruptible(timeout: 1); |
5447 | } |
5448 | |
5449 | if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR)) |
5450 | return 0; |
5451 | |
5452 | np->mgmt_version = readl(addr: base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; |
5453 | |
5454 | return 1; |
5455 | } |
5456 | |
5457 | static int nv_open(struct net_device *dev) |
5458 | { |
5459 | struct fe_priv *np = netdev_priv(dev); |
5460 | u8 __iomem *base = get_hwbase(dev); |
5461 | int ret = 1; |
5462 | int oom, i; |
5463 | u32 low; |
5464 | |
5465 | /* power up phy */ |
5466 | mii_rw(dev, addr: np->phyaddr, MII_BMCR, |
5467 | value: mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); |
5468 | |
5469 | nv_txrx_gate(dev, gate: false); |
5470 | /* erase previous misconfiguration */ |
5471 | if (np->driver_data & DEV_HAS_POWER_CNTRL) |
5472 | nv_mac_reset(dev); |
5473 | writel(NVREG_MCASTADDRA_FORCE, addr: base + NvRegMulticastAddrA); |
5474 | writel(val: 0, addr: base + NvRegMulticastAddrB); |
5475 | writel(NVREG_MCASTMASKA_NONE, addr: base + NvRegMulticastMaskA); |
5476 | writel(NVREG_MCASTMASKB_NONE, addr: base + NvRegMulticastMaskB); |
5477 | writel(val: 0, addr: base + NvRegPacketFilterFlags); |
5478 | |
5479 | writel(val: 0, addr: base + NvRegTransmitterControl); |
5480 | writel(val: 0, addr: base + NvRegReceiverControl); |
5481 | |
5482 | writel(val: 0, addr: base + NvRegAdapterControl); |
5483 | |
5484 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) |
5485 | writel(NVREG_TX_PAUSEFRAME_DISABLE, addr: base + NvRegTxPauseFrame); |
5486 | |
5487 | /* initialize descriptor rings */ |
5488 | set_bufsize(dev); |
5489 | oom = nv_init_ring(dev); |
5490 | |
5491 | writel(val: 0, addr: base + NvRegLinkSpeed); |
5492 | writel(readl(addr: base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, addr: base + NvRegTransmitPoll); |
5493 | nv_txrx_reset(dev); |
5494 | writel(val: 0, addr: base + NvRegUnknownSetupReg6); |
5495 | |
5496 | np->in_shutdown = 0; |
5497 | |
5498 | /* give hw rings */ |
5499 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
5500 | writel(val: ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
5501 | addr: base + NvRegRingSizes); |
5502 | |
5503 | writel(val: np->linkspeed, addr: base + NvRegLinkSpeed); |
5504 | if (np->desc_ver == DESC_VER_1) |
5505 | writel(NVREG_TX_WM_DESC1_DEFAULT, addr: base + NvRegTxWatermark); |
5506 | else |
5507 | writel(NVREG_TX_WM_DESC2_3_DEFAULT, addr: base + NvRegTxWatermark); |
5508 | writel(val: np->txrxctl_bits, addr: base + NvRegTxRxControl); |
5509 | writel(val: np->vlanctl_bits, addr: base + NvRegVlanControl); |
5510 | pci_push(base); |
5511 | writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, addr: base + NvRegTxRxControl); |
5512 | if (reg_delay(dev, offset: NvRegUnknownSetupReg5, |
5513 | NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, |
5514 | NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX)) |
5515 | netdev_info(dev, |
5516 | format: "%s: SetupReg5, Bit 31 remained off\n" , __func__); |
5517 | |
5518 | writel(val: 0, addr: base + NvRegMIIMask); |
5519 | writel(NVREG_IRQSTAT_MASK, addr: base + NvRegIrqStatus); |
5520 | writel(NVREG_MIISTAT_MASK_ALL, addr: base + NvRegMIIStatus); |
5521 | |
5522 | writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, addr: base + NvRegMisc1); |
5523 | writel(readl(addr: base + NvRegTransmitterStatus), addr: base + NvRegTransmitterStatus); |
5524 | writel(NVREG_PFF_ALWAYS, addr: base + NvRegPacketFilterFlags); |
5525 | writel(val: np->rx_buf_sz, addr: base + NvRegOffloadConfig); |
5526 | |
5527 | writel(readl(addr: base + NvRegReceiverStatus), addr: base + NvRegReceiverStatus); |
5528 | |
5529 | get_random_bytes(buf: &low, len: sizeof(low)); |
5530 | low &= NVREG_SLOTTIME_MASK; |
5531 | if (np->desc_ver == DESC_VER_1) { |
5532 | writel(val: low|NVREG_SLOTTIME_DEFAULT, addr: base + NvRegSlotTime); |
5533 | } else { |
5534 | if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { |
5535 | /* setup legacy backoff */ |
5536 | writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, addr: base + NvRegSlotTime); |
5537 | } else { |
5538 | writel(NVREG_SLOTTIME_10_100_FULL, addr: base + NvRegSlotTime); |
5539 | nv_gear_backoff_reseed(dev); |
5540 | } |
5541 | } |
5542 | writel(NVREG_TX_DEFERRAL_DEFAULT, addr: base + NvRegTxDeferral); |
5543 | writel(NVREG_RX_DEFERRAL_DEFAULT, addr: base + NvRegRxDeferral); |
5544 | if (poll_interval == -1) { |
5545 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) |
5546 | writel(NVREG_POLL_DEFAULT_THROUGHPUT, addr: base + NvRegPollingInterval); |
5547 | else |
5548 | writel(NVREG_POLL_DEFAULT_CPU, addr: base + NvRegPollingInterval); |
5549 | } else |
5550 | writel(val: poll_interval & 0xFFFF, addr: base + NvRegPollingInterval); |
5551 | writel(NVREG_UNKSETUP6_VAL, addr: base + NvRegUnknownSetupReg6); |
5552 | writel(val: (np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, |
5553 | addr: base + NvRegAdapterControl); |
5554 | writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, addr: base + NvRegMIISpeed); |
5555 | writel(NVREG_MII_LINKCHANGE, addr: base + NvRegMIIMask); |
5556 | if (np->wolenabled) |
5557 | writel(NVREG_WAKEUPFLAGS_ENABLE , addr: base + NvRegWakeUpFlags); |
5558 | |
5559 | i = readl(addr: base + NvRegPowerState); |
5560 | if ((i & NVREG_POWERSTATE_POWEREDUP) == 0) |
5561 | writel(NVREG_POWERSTATE_POWEREDUP|i, addr: base + NvRegPowerState); |
5562 | |
5563 | pci_push(base); |
5564 | udelay(10); |
5565 | writel(readl(addr: base + NvRegPowerState) | NVREG_POWERSTATE_VALID, addr: base + NvRegPowerState); |
5566 | |
5567 | nv_disable_hw_interrupts(dev, mask: np->irqmask); |
5568 | pci_push(base); |
5569 | writel(NVREG_MIISTAT_MASK_ALL, addr: base + NvRegMIIStatus); |
5570 | writel(NVREG_IRQSTAT_MASK, addr: base + NvRegIrqStatus); |
5571 | pci_push(base); |
5572 | |
5573 | if (nv_request_irq(dev, intr_test: 0)) |
5574 | goto out_drain; |
5575 | |
5576 | /* ask for interrupts */ |
5577 | nv_enable_hw_interrupts(dev, mask: np->irqmask); |
5578 | |
5579 | spin_lock_irq(lock: &np->lock); |
5580 | writel(NVREG_MCASTADDRA_FORCE, addr: base + NvRegMulticastAddrA); |
5581 | writel(val: 0, addr: base + NvRegMulticastAddrB); |
5582 | writel(NVREG_MCASTMASKA_NONE, addr: base + NvRegMulticastMaskA); |
5583 | writel(NVREG_MCASTMASKB_NONE, addr: base + NvRegMulticastMaskB); |
5584 | writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, addr: base + NvRegPacketFilterFlags); |
5585 | /* One manual link speed update: Interrupts are enabled, future link |
5586 | * speed changes cause interrupts and are handled by nv_link_irq(). |
5587 | */ |
5588 | readl(addr: base + NvRegMIIStatus); |
5589 | writel(NVREG_MIISTAT_MASK_ALL, addr: base + NvRegMIIStatus); |
5590 | |
5591 | /* set linkspeed to invalid value, thus force nv_update_linkspeed |
5592 | * to init hw */ |
5593 | np->linkspeed = 0; |
5594 | ret = nv_update_linkspeed(dev); |
5595 | nv_start_rxtx(dev); |
5596 | netif_start_queue(dev); |
5597 | nv_napi_enable(dev); |
5598 | |
5599 | if (ret) { |
5600 | netif_carrier_on(dev); |
5601 | } else { |
5602 | netdev_info(dev, format: "no link during initialization\n" ); |
5603 | netif_carrier_off(dev); |
5604 | } |
5605 | if (oom) |
5606 | mod_timer(timer: &np->oom_kick, expires: jiffies + OOM_REFILL); |
5607 | |
5608 | /* start statistics timer */ |
5609 | if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) |
5610 | mod_timer(timer: &np->stats_poll, |
5611 | expires: round_jiffies(j: jiffies + STATS_INTERVAL)); |
5612 | |
5613 | spin_unlock_irq(lock: &np->lock); |
5614 | |
5615 | /* If the loopback feature was set while the device was down, make sure |
5616 | * that it's set correctly now. |
5617 | */ |
5618 | if (dev->features & NETIF_F_LOOPBACK) |
5619 | nv_set_loopback(dev, features: dev->features); |
5620 | |
5621 | return 0; |
5622 | out_drain: |
5623 | nv_drain_rxtx(dev); |
5624 | return ret; |
5625 | } |
5626 | |
5627 | static int nv_close(struct net_device *dev) |
5628 | { |
5629 | struct fe_priv *np = netdev_priv(dev); |
5630 | u8 __iomem *base; |
5631 | |
5632 | spin_lock_irq(lock: &np->lock); |
5633 | np->in_shutdown = 1; |
5634 | spin_unlock_irq(lock: &np->lock); |
5635 | nv_napi_disable(dev); |
5636 | synchronize_irq(irq: np->pci_dev->irq); |
5637 | |
5638 | del_timer_sync(timer: &np->oom_kick); |
5639 | del_timer_sync(timer: &np->nic_poll); |
5640 | del_timer_sync(timer: &np->stats_poll); |
5641 | |
5642 | netif_stop_queue(dev); |
5643 | spin_lock_irq(lock: &np->lock); |
5644 | nv_update_pause(dev, pause_flags: 0); /* otherwise stop_tx bricks NIC */ |
5645 | nv_stop_rxtx(dev); |
5646 | nv_txrx_reset(dev); |
5647 | |
5648 | /* disable interrupts on the nic or we will lock up */ |
5649 | base = get_hwbase(dev); |
5650 | nv_disable_hw_interrupts(dev, mask: np->irqmask); |
5651 | pci_push(base); |
5652 | |
5653 | spin_unlock_irq(lock: &np->lock); |
5654 | |
5655 | nv_free_irq(dev); |
5656 | |
5657 | nv_drain_rxtx(dev); |
5658 | |
5659 | if (np->wolenabled || !phy_power_down) { |
5660 | nv_txrx_gate(dev, gate: false); |
5661 | writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, addr: base + NvRegPacketFilterFlags); |
5662 | nv_start_rx(dev); |
5663 | } else { |
5664 | /* power down phy */ |
5665 | mii_rw(dev, addr: np->phyaddr, MII_BMCR, |
5666 | value: mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); |
5667 | nv_txrx_gate(dev, gate: true); |
5668 | } |
5669 | |
5670 | /* FIXME: power down nic */ |
5671 | |
5672 | return 0; |
5673 | } |
5674 | |
5675 | static const struct net_device_ops nv_netdev_ops = { |
5676 | .ndo_open = nv_open, |
5677 | .ndo_stop = nv_close, |
5678 | .ndo_get_stats64 = nv_get_stats64, |
5679 | .ndo_start_xmit = nv_start_xmit, |
5680 | .ndo_tx_timeout = nv_tx_timeout, |
5681 | .ndo_change_mtu = nv_change_mtu, |
5682 | .ndo_fix_features = nv_fix_features, |
5683 | .ndo_set_features = nv_set_features, |
5684 | .ndo_validate_addr = eth_validate_addr, |
5685 | .ndo_set_mac_address = nv_set_mac_address, |
5686 | .ndo_set_rx_mode = nv_set_multicast, |
5687 | #ifdef CONFIG_NET_POLL_CONTROLLER |
5688 | .ndo_poll_controller = nv_poll_controller, |
5689 | #endif |
5690 | }; |
5691 | |
5692 | static const struct net_device_ops nv_netdev_ops_optimized = { |
5693 | .ndo_open = nv_open, |
5694 | .ndo_stop = nv_close, |
5695 | .ndo_get_stats64 = nv_get_stats64, |
5696 | .ndo_start_xmit = nv_start_xmit_optimized, |
5697 | .ndo_tx_timeout = nv_tx_timeout, |
5698 | .ndo_change_mtu = nv_change_mtu, |
5699 | .ndo_fix_features = nv_fix_features, |
5700 | .ndo_set_features = nv_set_features, |
5701 | .ndo_validate_addr = eth_validate_addr, |
5702 | .ndo_set_mac_address = nv_set_mac_address, |
5703 | .ndo_set_rx_mode = nv_set_multicast, |
5704 | #ifdef CONFIG_NET_POLL_CONTROLLER |
5705 | .ndo_poll_controller = nv_poll_controller, |
5706 | #endif |
5707 | }; |
5708 | |
5709 | static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) |
5710 | { |
5711 | struct net_device *dev; |
5712 | struct fe_priv *np; |
5713 | unsigned long addr; |
5714 | u8 __iomem *base; |
5715 | int err, i; |
5716 | u32 powerstate, txreg; |
5717 | u32 phystate_orig = 0, phystate; |
5718 | int phyinitialized = 0; |
5719 | static int printed_version; |
5720 | u8 mac[ETH_ALEN]; |
5721 | |
5722 | if (!printed_version++) |
5723 | pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n" , |
5724 | FORCEDETH_VERSION); |
5725 | |
5726 | dev = alloc_etherdev(sizeof(struct fe_priv)); |
5727 | err = -ENOMEM; |
5728 | if (!dev) |
5729 | goto out; |
5730 | |
5731 | np = netdev_priv(dev); |
5732 | np->dev = dev; |
5733 | np->pci_dev = pci_dev; |
5734 | spin_lock_init(&np->lock); |
5735 | spin_lock_init(&np->hwstats_lock); |
5736 | SET_NETDEV_DEV(dev, &pci_dev->dev); |
5737 | u64_stats_init(syncp: &np->swstats_rx_syncp); |
5738 | u64_stats_init(syncp: &np->swstats_tx_syncp); |
5739 | np->txrx_stats = alloc_percpu(struct nv_txrx_stats); |
5740 | if (!np->txrx_stats) { |
5741 | pr_err("np->txrx_stats, alloc memory error.\n" ); |
5742 | err = -ENOMEM; |
5743 | goto out_alloc_percpu; |
5744 | } |
5745 | |
5746 | timer_setup(&np->oom_kick, nv_do_rx_refill, 0); |
5747 | timer_setup(&np->nic_poll, nv_do_nic_poll, 0); |
5748 | timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE); |
5749 | |
5750 | err = pci_enable_device(dev: pci_dev); |
5751 | if (err) |
5752 | goto out_free; |
5753 | |
5754 | pci_set_master(dev: pci_dev); |
5755 | |
5756 | err = pci_request_regions(pci_dev, DRV_NAME); |
5757 | if (err < 0) |
5758 | goto out_disable; |
5759 | |
5760 | if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) |
5761 | np->register_size = NV_PCI_REGSZ_VER3; |
5762 | else if (id->driver_data & DEV_HAS_STATISTICS_V1) |
5763 | np->register_size = NV_PCI_REGSZ_VER2; |
5764 | else |
5765 | np->register_size = NV_PCI_REGSZ_VER1; |
5766 | |
5767 | err = -EINVAL; |
5768 | addr = 0; |
5769 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
5770 | if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && |
5771 | pci_resource_len(pci_dev, i) >= np->register_size) { |
5772 | addr = pci_resource_start(pci_dev, i); |
5773 | break; |
5774 | } |
5775 | } |
5776 | if (i == DEVICE_COUNT_RESOURCE) { |
5777 | dev_info(&pci_dev->dev, "Couldn't find register window\n" ); |
5778 | goto out_relreg; |
5779 | } |
5780 | |
5781 | /* copy of driver data */ |
5782 | np->driver_data = id->driver_data; |
5783 | /* copy of device id */ |
5784 | np->device_id = id->device; |
5785 | |
5786 | /* handle different descriptor versions */ |
5787 | if (id->driver_data & DEV_HAS_HIGH_DMA) { |
5788 | /* packet format 3: supports 40-bit addressing */ |
5789 | np->desc_ver = DESC_VER_3; |
5790 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; |
5791 | if (dma_64bit) { |
5792 | if (dma_set_mask_and_coherent(dev: &pci_dev->dev, DMA_BIT_MASK(39))) |
5793 | dev_info(&pci_dev->dev, |
5794 | "64-bit DMA failed, using 32-bit addressing\n" ); |
5795 | else |
5796 | dev->features |= NETIF_F_HIGHDMA; |
5797 | } |
5798 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { |
5799 | /* packet format 2: supports jumbo frames */ |
5800 | np->desc_ver = DESC_VER_2; |
5801 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; |
5802 | } else { |
5803 | /* original packet format */ |
5804 | np->desc_ver = DESC_VER_1; |
5805 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; |
5806 | } |
5807 | |
5808 | np->pkt_limit = NV_PKTLIMIT_1; |
5809 | if (id->driver_data & DEV_HAS_LARGEDESC) |
5810 | np->pkt_limit = NV_PKTLIMIT_2; |
5811 | |
5812 | if (id->driver_data & DEV_HAS_CHECKSUM) { |
5813 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
5814 | dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | |
5815 | NETIF_F_TSO | NETIF_F_RXCSUM; |
5816 | } |
5817 | |
5818 | np->vlanctl_bits = 0; |
5819 | if (id->driver_data & DEV_HAS_VLAN) { |
5820 | np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; |
5821 | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | |
5822 | NETIF_F_HW_VLAN_CTAG_TX; |
5823 | } |
5824 | |
5825 | dev->features |= dev->hw_features; |
5826 | |
5827 | /* Add loopback capability to the device. */ |
5828 | dev->hw_features |= NETIF_F_LOOPBACK; |
5829 | |
5830 | /* MTU range: 64 - 1500 or 9100 */ |
5831 | dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN; |
5832 | dev->max_mtu = np->pkt_limit; |
5833 | |
5834 | np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; |
5835 | if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || |
5836 | (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || |
5837 | (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { |
5838 | np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; |
5839 | } |
5840 | |
5841 | err = -ENOMEM; |
5842 | np->base = ioremap(offset: addr, size: np->register_size); |
5843 | if (!np->base) |
5844 | goto out_relreg; |
5845 | |
5846 | np->rx_ring_size = RX_RING_DEFAULT; |
5847 | np->tx_ring_size = TX_RING_DEFAULT; |
5848 | |
5849 | if (!nv_optimized(np)) { |
5850 | np->rx_ring.orig = dma_alloc_coherent(dev: &pci_dev->dev, |
5851 | size: sizeof(struct ring_desc) * |
5852 | (np->rx_ring_size + |
5853 | np->tx_ring_size), |
5854 | dma_handle: &np->ring_addr, |
5855 | GFP_KERNEL); |
5856 | if (!np->rx_ring.orig) |
5857 | goto out_unmap; |
5858 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; |
5859 | } else { |
5860 | np->rx_ring.ex = dma_alloc_coherent(dev: &pci_dev->dev, |
5861 | size: sizeof(struct ring_desc_ex) * |
5862 | (np->rx_ring_size + |
5863 | np->tx_ring_size), |
5864 | dma_handle: &np->ring_addr, GFP_KERNEL); |
5865 | if (!np->rx_ring.ex) |
5866 | goto out_unmap; |
5867 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; |
5868 | } |
5869 | np->rx_skb = kcalloc(n: np->rx_ring_size, size: sizeof(struct nv_skb_map), GFP_KERNEL); |
5870 | np->tx_skb = kcalloc(n: np->tx_ring_size, size: sizeof(struct nv_skb_map), GFP_KERNEL); |
5871 | if (!np->rx_skb || !np->tx_skb) |
5872 | goto out_freering; |
5873 | |
5874 | if (!nv_optimized(np)) |
5875 | dev->netdev_ops = &nv_netdev_ops; |
5876 | else |
5877 | dev->netdev_ops = &nv_netdev_ops_optimized; |
5878 | |
5879 | netif_napi_add(dev, napi: &np->napi, poll: nv_napi_poll); |
5880 | dev->ethtool_ops = &ops; |
5881 | dev->watchdog_timeo = NV_WATCHDOG_TIMEO; |
5882 | |
5883 | pci_set_drvdata(pdev: pci_dev, data: dev); |
5884 | |
5885 | /* read the mac address */ |
5886 | base = get_hwbase(dev); |
5887 | np->orig_mac[0] = readl(addr: base + NvRegMacAddrA); |
5888 | np->orig_mac[1] = readl(addr: base + NvRegMacAddrB); |
5889 | |
5890 | /* check the workaround bit for correct mac address order */ |
5891 | txreg = readl(addr: base + NvRegTransmitPoll); |
5892 | if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { |
5893 | /* mac address is already in correct order */ |
5894 | mac[0] = (np->orig_mac[0] >> 0) & 0xff; |
5895 | mac[1] = (np->orig_mac[0] >> 8) & 0xff; |
5896 | mac[2] = (np->orig_mac[0] >> 16) & 0xff; |
5897 | mac[3] = (np->orig_mac[0] >> 24) & 0xff; |
5898 | mac[4] = (np->orig_mac[1] >> 0) & 0xff; |
5899 | mac[5] = (np->orig_mac[1] >> 8) & 0xff; |
5900 | } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { |
5901 | /* mac address is already in correct order */ |
5902 | mac[0] = (np->orig_mac[0] >> 0) & 0xff; |
5903 | mac[1] = (np->orig_mac[0] >> 8) & 0xff; |
5904 | mac[2] = (np->orig_mac[0] >> 16) & 0xff; |
5905 | mac[3] = (np->orig_mac[0] >> 24) & 0xff; |
5906 | mac[4] = (np->orig_mac[1] >> 0) & 0xff; |
5907 | mac[5] = (np->orig_mac[1] >> 8) & 0xff; |
5908 | /* |
5909 | * Set orig mac address back to the reversed version. |
5910 | * This flag will be cleared during low power transition. |
5911 | * Therefore, we should always put back the reversed address. |
5912 | */ |
5913 | np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) + |
5914 | (mac[3] << 16) + (mac[2] << 24); |
5915 | np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8); |
5916 | } else { |
5917 | /* need to reverse mac address to correct order */ |
5918 | mac[0] = (np->orig_mac[1] >> 8) & 0xff; |
5919 | mac[1] = (np->orig_mac[1] >> 0) & 0xff; |
5920 | mac[2] = (np->orig_mac[0] >> 24) & 0xff; |
5921 | mac[3] = (np->orig_mac[0] >> 16) & 0xff; |
5922 | mac[4] = (np->orig_mac[0] >> 8) & 0xff; |
5923 | mac[5] = (np->orig_mac[0] >> 0) & 0xff; |
5924 | writel(val: txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, addr: base + NvRegTransmitPoll); |
5925 | dev_dbg(&pci_dev->dev, |
5926 | "%s: set workaround bit for reversed mac addr\n" , |
5927 | __func__); |
5928 | } |
5929 | |
5930 | if (is_valid_ether_addr(addr: mac)) { |
5931 | eth_hw_addr_set(dev, addr: mac); |
5932 | } else { |
5933 | /* |
5934 | * Bad mac address. At least one bios sets the mac address |
5935 | * to 01:23:45:67:89:ab |
5936 | */ |
5937 | dev_err(&pci_dev->dev, |
5938 | "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n" , |
5939 | mac); |
5940 | eth_hw_addr_random(dev); |
5941 | dev_err(&pci_dev->dev, |
5942 | "Using random MAC address: %pM\n" , dev->dev_addr); |
5943 | } |
5944 | |
5945 | /* set mac address */ |
5946 | nv_copy_mac_to_hw(dev); |
5947 | |
5948 | /* disable WOL */ |
5949 | writel(val: 0, addr: base + NvRegWakeUpFlags); |
5950 | np->wolenabled = 0; |
5951 | device_set_wakeup_enable(dev: &pci_dev->dev, enable: false); |
5952 | |
5953 | if (id->driver_data & DEV_HAS_POWER_CNTRL) { |
5954 | |
5955 | /* take phy and nic out of low power mode */ |
5956 | powerstate = readl(addr: base + NvRegPowerState2); |
5957 | powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; |
5958 | if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) && |
5959 | pci_dev->revision >= 0xA3) |
5960 | powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; |
5961 | writel(val: powerstate, addr: base + NvRegPowerState2); |
5962 | } |
5963 | |
5964 | if (np->desc_ver == DESC_VER_1) |
5965 | np->tx_flags = NV_TX_VALID; |
5966 | else |
5967 | np->tx_flags = NV_TX2_VALID; |
5968 | |
5969 | np->msi_flags = 0; |
5970 | if ((id->driver_data & DEV_HAS_MSI) && msi) |
5971 | np->msi_flags |= NV_MSI_CAPABLE; |
5972 | |
5973 | if ((id->driver_data & DEV_HAS_MSI_X) && msix) { |
5974 | /* msix has had reported issues when modifying irqmask |
5975 | as in the case of napi, therefore, disable for now |
5976 | */ |
5977 | #if 0 |
5978 | np->msi_flags |= NV_MSI_X_CAPABLE; |
5979 | #endif |
5980 | } |
5981 | |
5982 | if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) { |
5983 | np->irqmask = NVREG_IRQMASK_CPU; |
5984 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
5985 | np->msi_flags |= 0x0001; |
5986 | } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC && |
5987 | !(id->driver_data & DEV_NEED_TIMERIRQ)) { |
5988 | /* start off in throughput mode */ |
5989 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
5990 | /* remove support for msix mode */ |
5991 | np->msi_flags &= ~NV_MSI_X_CAPABLE; |
5992 | } else { |
5993 | optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; |
5994 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
5995 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
5996 | np->msi_flags |= 0x0003; |
5997 | } |
5998 | |
5999 | if (id->driver_data & DEV_NEED_TIMERIRQ) |
6000 | np->irqmask |= NVREG_IRQ_TIMER; |
6001 | if (id->driver_data & DEV_NEED_LINKTIMER) { |
6002 | np->need_linktimer = 1; |
6003 | np->link_timeout = jiffies + LINK_TIMEOUT; |
6004 | } else { |
6005 | np->need_linktimer = 0; |
6006 | } |
6007 | |
6008 | /* Limit the number of tx's outstanding for hw bug */ |
6009 | if (id->driver_data & DEV_NEED_TX_LIMIT) { |
6010 | np->tx_limit = 1; |
6011 | if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && |
6012 | pci_dev->revision >= 0xA2) |
6013 | np->tx_limit = 0; |
6014 | } |
6015 | |
6016 | /* clear phy state and temporarily halt phy interrupts */ |
6017 | writel(val: 0, addr: base + NvRegMIIMask); |
6018 | phystate = readl(addr: base + NvRegAdapterControl); |
6019 | if (phystate & NVREG_ADAPTCTL_RUNNING) { |
6020 | phystate_orig = 1; |
6021 | phystate &= ~NVREG_ADAPTCTL_RUNNING; |
6022 | writel(val: phystate, addr: base + NvRegAdapterControl); |
6023 | } |
6024 | writel(NVREG_MIISTAT_MASK_ALL, addr: base + NvRegMIIStatus); |
6025 | |
6026 | if (id->driver_data & DEV_HAS_MGMT_UNIT) { |
6027 | /* management unit running on the mac? */ |
6028 | if ((readl(addr: base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) && |
6029 | (readl(addr: base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) && |
6030 | nv_mgmt_acquire_sema(dev) && |
6031 | nv_mgmt_get_version(dev)) { |
6032 | np->mac_in_use = 1; |
6033 | if (np->mgmt_version > 0) |
6034 | np->mac_in_use = readl(addr: base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; |
6035 | /* management unit setup the phy already? */ |
6036 | if (np->mac_in_use && |
6037 | ((readl(addr: base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == |
6038 | NVREG_XMITCTL_SYNC_PHY_INIT)) { |
6039 | /* phy is inited by mgmt unit */ |
6040 | phyinitialized = 1; |
6041 | } else { |
6042 | /* we need to init the phy */ |
6043 | } |
6044 | } |
6045 | } |
6046 | |
6047 | /* find a suitable phy */ |
6048 | for (i = 1; i <= 32; i++) { |
6049 | int id1, id2; |
6050 | int phyaddr = i & 0x1F; |
6051 | |
6052 | spin_lock_irq(lock: &np->lock); |
6053 | id1 = mii_rw(dev, addr: phyaddr, MII_PHYSID1, MII_READ); |
6054 | spin_unlock_irq(lock: &np->lock); |
6055 | if (id1 < 0 || id1 == 0xffff) |
6056 | continue; |
6057 | spin_lock_irq(lock: &np->lock); |
6058 | id2 = mii_rw(dev, addr: phyaddr, MII_PHYSID2, MII_READ); |
6059 | spin_unlock_irq(lock: &np->lock); |
6060 | if (id2 < 0 || id2 == 0xffff) |
6061 | continue; |
6062 | |
6063 | np->phy_model = id2 & PHYID2_MODEL_MASK; |
6064 | id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; |
6065 | id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; |
6066 | np->phyaddr = phyaddr; |
6067 | np->phy_oui = id1 | id2; |
6068 | |
6069 | /* Realtek hardcoded phy id1 to all zero's on certain phys */ |
6070 | if (np->phy_oui == PHY_OUI_REALTEK2) |
6071 | np->phy_oui = PHY_OUI_REALTEK; |
6072 | /* Setup phy revision for Realtek */ |
6073 | if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) |
6074 | np->phy_rev = mii_rw(dev, addr: phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; |
6075 | |
6076 | break; |
6077 | } |
6078 | if (i == 33) { |
6079 | dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n" ); |
6080 | goto out_error; |
6081 | } |
6082 | |
6083 | if (!phyinitialized) { |
6084 | /* reset it */ |
6085 | phy_init(dev); |
6086 | } else { |
6087 | /* see if it is a gigabit phy */ |
6088 | u32 mii_status = mii_rw(dev, addr: np->phyaddr, MII_BMSR, MII_READ); |
6089 | if (mii_status & PHY_GIGABIT) |
6090 | np->gigabit = PHY_GIGABIT; |
6091 | } |
6092 | |
6093 | /* set default link speed settings */ |
6094 | np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
6095 | np->duplex = 0; |
6096 | np->autoneg = 1; |
6097 | |
6098 | err = register_netdev(dev); |
6099 | if (err) { |
6100 | dev_info(&pci_dev->dev, "unable to register netdev: %d\n" , err); |
6101 | goto out_error; |
6102 | } |
6103 | |
6104 | netif_carrier_off(dev); |
6105 | |
6106 | /* Some NICs freeze when TX pause is enabled while NIC is |
6107 | * down, and this stays across warm reboots. The sequence |
6108 | * below should be enough to recover from that state. |
6109 | */ |
6110 | nv_update_pause(dev, pause_flags: 0); |
6111 | nv_start_tx(dev); |
6112 | nv_stop_tx(dev); |
6113 | |
6114 | if (id->driver_data & DEV_HAS_VLAN) |
6115 | nv_vlan_mode(dev, features: dev->features); |
6116 | |
6117 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n" , |
6118 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); |
6119 | |
6120 | dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n" , |
6121 | dev->features & NETIF_F_HIGHDMA ? "highdma " : "" , |
6122 | dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? |
6123 | "csum " : "" , |
6124 | dev->features & (NETIF_F_HW_VLAN_CTAG_RX | |
6125 | NETIF_F_HW_VLAN_CTAG_TX) ? |
6126 | "vlan " : "" , |
6127 | dev->features & (NETIF_F_LOOPBACK) ? |
6128 | "loopback " : "" , |
6129 | id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "" , |
6130 | id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "" , |
6131 | id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "" , |
6132 | np->gigabit == PHY_GIGABIT ? "gbit " : "" , |
6133 | np->need_linktimer ? "lnktim " : "" , |
6134 | np->msi_flags & NV_MSI_CAPABLE ? "msi " : "" , |
6135 | np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "" , |
6136 | np->desc_ver); |
6137 | |
6138 | return 0; |
6139 | |
6140 | out_error: |
6141 | nv_mgmt_release_sema(dev); |
6142 | if (phystate_orig) |
6143 | writel(val: phystate|NVREG_ADAPTCTL_RUNNING, addr: base + NvRegAdapterControl); |
6144 | out_freering: |
6145 | free_rings(dev); |
6146 | out_unmap: |
6147 | iounmap(addr: get_hwbase(dev)); |
6148 | out_relreg: |
6149 | pci_release_regions(pci_dev); |
6150 | out_disable: |
6151 | pci_disable_device(dev: pci_dev); |
6152 | out_free: |
6153 | free_percpu(pdata: np->txrx_stats); |
6154 | out_alloc_percpu: |
6155 | free_netdev(dev); |
6156 | out: |
6157 | return err; |
6158 | } |
6159 | |
6160 | static void nv_restore_phy(struct net_device *dev) |
6161 | { |
6162 | struct fe_priv *np = netdev_priv(dev); |
6163 | u16 phy_reserved, mii_control; |
6164 | |
6165 | if (np->phy_oui == PHY_OUI_REALTEK && |
6166 | np->phy_model == PHY_MODEL_REALTEK_8201 && |
6167 | phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { |
6168 | mii_rw(dev, addr: np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); |
6169 | phy_reserved = mii_rw(dev, addr: np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); |
6170 | phy_reserved &= ~PHY_REALTEK_INIT_MSK1; |
6171 | phy_reserved |= PHY_REALTEK_INIT8; |
6172 | mii_rw(dev, addr: np->phyaddr, PHY_REALTEK_INIT_REG2, value: phy_reserved); |
6173 | mii_rw(dev, addr: np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); |
6174 | |
6175 | /* restart auto negotiation */ |
6176 | mii_control = mii_rw(dev, addr: np->phyaddr, MII_BMCR, MII_READ); |
6177 | mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); |
6178 | mii_rw(dev, addr: np->phyaddr, MII_BMCR, value: mii_control); |
6179 | } |
6180 | } |
6181 | |
6182 | static void nv_restore_mac_addr(struct pci_dev *pci_dev) |
6183 | { |
6184 | struct net_device *dev = pci_get_drvdata(pdev: pci_dev); |
6185 | struct fe_priv *np = netdev_priv(dev); |
6186 | u8 __iomem *base = get_hwbase(dev); |
6187 | |
6188 | /* special op: write back the misordered MAC address - otherwise |
6189 | * the next nv_probe would see a wrong address. |
6190 | */ |
6191 | writel(val: np->orig_mac[0], addr: base + NvRegMacAddrA); |
6192 | writel(val: np->orig_mac[1], addr: base + NvRegMacAddrB); |
6193 | writel(readl(addr: base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, |
6194 | addr: base + NvRegTransmitPoll); |
6195 | } |
6196 | |
6197 | static void nv_remove(struct pci_dev *pci_dev) |
6198 | { |
6199 | struct net_device *dev = pci_get_drvdata(pdev: pci_dev); |
6200 | struct fe_priv *np = netdev_priv(dev); |
6201 | |
6202 | free_percpu(pdata: np->txrx_stats); |
6203 | |
6204 | unregister_netdev(dev); |
6205 | |
6206 | nv_restore_mac_addr(pci_dev); |
6207 | |
6208 | /* restore any phy related changes */ |
6209 | nv_restore_phy(dev); |
6210 | |
6211 | nv_mgmt_release_sema(dev); |
6212 | |
6213 | /* free all structures */ |
6214 | free_rings(dev); |
6215 | iounmap(addr: get_hwbase(dev)); |
6216 | pci_release_regions(pci_dev); |
6217 | pci_disable_device(dev: pci_dev); |
6218 | free_netdev(dev); |
6219 | } |
6220 | |
6221 | #ifdef CONFIG_PM_SLEEP |
6222 | static int nv_suspend(struct device *device) |
6223 | { |
6224 | struct net_device *dev = dev_get_drvdata(dev: device); |
6225 | struct fe_priv *np = netdev_priv(dev); |
6226 | u8 __iomem *base = get_hwbase(dev); |
6227 | int i; |
6228 | |
6229 | if (netif_running(dev)) { |
6230 | /* Gross. */ |
6231 | nv_close(dev); |
6232 | } |
6233 | netif_device_detach(dev); |
6234 | |
6235 | /* save non-pci configuration space */ |
6236 | for (i = 0; i <= np->register_size/sizeof(u32); i++) |
6237 | np->saved_config_space[i] = readl(addr: base + i*sizeof(u32)); |
6238 | |
6239 | return 0; |
6240 | } |
6241 | |
6242 | static int nv_resume(struct device *device) |
6243 | { |
6244 | struct pci_dev *pdev = to_pci_dev(device); |
6245 | struct net_device *dev = pci_get_drvdata(pdev); |
6246 | struct fe_priv *np = netdev_priv(dev); |
6247 | u8 __iomem *base = get_hwbase(dev); |
6248 | int i, rc = 0; |
6249 | |
6250 | /* restore non-pci configuration space */ |
6251 | for (i = 0; i <= np->register_size/sizeof(u32); i++) |
6252 | writel(val: np->saved_config_space[i], addr: base+i*sizeof(u32)); |
6253 | |
6254 | if (np->driver_data & DEV_NEED_MSI_FIX) |
6255 | pci_write_config_dword(dev: pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE); |
6256 | |
6257 | /* restore phy state, including autoneg */ |
6258 | phy_init(dev); |
6259 | |
6260 | netif_device_attach(dev); |
6261 | if (netif_running(dev)) { |
6262 | rc = nv_open(dev); |
6263 | nv_set_multicast(dev); |
6264 | } |
6265 | return rc; |
6266 | } |
6267 | |
6268 | static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); |
6269 | #define NV_PM_OPS (&nv_pm_ops) |
6270 | |
6271 | #else |
6272 | #define NV_PM_OPS NULL |
6273 | #endif /* CONFIG_PM_SLEEP */ |
6274 | |
6275 | #ifdef CONFIG_PM |
6276 | static void nv_shutdown(struct pci_dev *pdev) |
6277 | { |
6278 | struct net_device *dev = pci_get_drvdata(pdev); |
6279 | struct fe_priv *np = netdev_priv(dev); |
6280 | |
6281 | if (netif_running(dev)) |
6282 | nv_close(dev); |
6283 | |
6284 | /* |
6285 | * Restore the MAC so a kernel started by kexec won't get confused. |
6286 | * If we really go for poweroff, we must not restore the MAC, |
6287 | * otherwise the MAC for WOL will be reversed at least on some boards. |
6288 | */ |
6289 | if (system_state != SYSTEM_POWER_OFF) |
6290 | nv_restore_mac_addr(pci_dev: pdev); |
6291 | |
6292 | pci_disable_device(dev: pdev); |
6293 | /* |
6294 | * Apparently it is not possible to reinitialise from D3 hot, |
6295 | * only put the device into D3 if we really go for poweroff. |
6296 | */ |
6297 | if (system_state == SYSTEM_POWER_OFF) { |
6298 | pci_wake_from_d3(dev: pdev, enable: np->wolenabled); |
6299 | pci_set_power_state(dev: pdev, PCI_D3hot); |
6300 | } |
6301 | } |
6302 | #else |
6303 | #define nv_shutdown NULL |
6304 | #endif /* CONFIG_PM */ |
6305 | |
6306 | static const struct pci_device_id pci_tbl[] = { |
6307 | { /* nForce Ethernet Controller */ |
6308 | PCI_DEVICE(0x10DE, 0x01C3), |
6309 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
6310 | }, |
6311 | { /* nForce2 Ethernet Controller */ |
6312 | PCI_DEVICE(0x10DE, 0x0066), |
6313 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
6314 | }, |
6315 | { /* nForce3 Ethernet Controller */ |
6316 | PCI_DEVICE(0x10DE, 0x00D6), |
6317 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
6318 | }, |
6319 | { /* nForce3 Ethernet Controller */ |
6320 | PCI_DEVICE(0x10DE, 0x0086), |
6321 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
6322 | }, |
6323 | { /* nForce3 Ethernet Controller */ |
6324 | PCI_DEVICE(0x10DE, 0x008C), |
6325 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
6326 | }, |
6327 | { /* nForce3 Ethernet Controller */ |
6328 | PCI_DEVICE(0x10DE, 0x00E6), |
6329 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
6330 | }, |
6331 | { /* nForce3 Ethernet Controller */ |
6332 | PCI_DEVICE(0x10DE, 0x00DF), |
6333 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
6334 | }, |
6335 | { /* CK804 Ethernet Controller */ |
6336 | PCI_DEVICE(0x10DE, 0x0056), |
6337 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, |
6338 | }, |
6339 | { /* CK804 Ethernet Controller */ |
6340 | PCI_DEVICE(0x10DE, 0x0057), |
6341 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, |
6342 | }, |
6343 | { /* MCP04 Ethernet Controller */ |
6344 | PCI_DEVICE(0x10DE, 0x0037), |
6345 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, |
6346 | }, |
6347 | { /* MCP04 Ethernet Controller */ |
6348 | PCI_DEVICE(0x10DE, 0x0038), |
6349 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, |
6350 | }, |
6351 | { /* MCP51 Ethernet Controller */ |
6352 | PCI_DEVICE(0x10DE, 0x0268), |
6353 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, |
6354 | }, |
6355 | { /* MCP51 Ethernet Controller */ |
6356 | PCI_DEVICE(0x10DE, 0x0269), |
6357 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, |
6358 | }, |
6359 | { /* MCP55 Ethernet Controller */ |
6360 | PCI_DEVICE(0x10DE, 0x0372), |
6361 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, |
6362 | }, |
6363 | { /* MCP55 Ethernet Controller */ |
6364 | PCI_DEVICE(0x10DE, 0x0373), |
6365 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, |
6366 | }, |
6367 | { /* MCP61 Ethernet Controller */ |
6368 | PCI_DEVICE(0x10DE, 0x03E5), |
6369 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, |
6370 | }, |
6371 | { /* MCP61 Ethernet Controller */ |
6372 | PCI_DEVICE(0x10DE, 0x03E6), |
6373 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, |
6374 | }, |
6375 | { /* MCP61 Ethernet Controller */ |
6376 | PCI_DEVICE(0x10DE, 0x03EE), |
6377 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, |
6378 | }, |
6379 | { /* MCP61 Ethernet Controller */ |
6380 | PCI_DEVICE(0x10DE, 0x03EF), |
6381 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, |
6382 | }, |
6383 | { /* MCP65 Ethernet Controller */ |
6384 | PCI_DEVICE(0x10DE, 0x0450), |
6385 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6386 | }, |
6387 | { /* MCP65 Ethernet Controller */ |
6388 | PCI_DEVICE(0x10DE, 0x0451), |
6389 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6390 | }, |
6391 | { /* MCP65 Ethernet Controller */ |
6392 | PCI_DEVICE(0x10DE, 0x0452), |
6393 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6394 | }, |
6395 | { /* MCP65 Ethernet Controller */ |
6396 | PCI_DEVICE(0x10DE, 0x0453), |
6397 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6398 | }, |
6399 | { /* MCP67 Ethernet Controller */ |
6400 | PCI_DEVICE(0x10DE, 0x054C), |
6401 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6402 | }, |
6403 | { /* MCP67 Ethernet Controller */ |
6404 | PCI_DEVICE(0x10DE, 0x054D), |
6405 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6406 | }, |
6407 | { /* MCP67 Ethernet Controller */ |
6408 | PCI_DEVICE(0x10DE, 0x054E), |
6409 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6410 | }, |
6411 | { /* MCP67 Ethernet Controller */ |
6412 | PCI_DEVICE(0x10DE, 0x054F), |
6413 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6414 | }, |
6415 | { /* MCP73 Ethernet Controller */ |
6416 | PCI_DEVICE(0x10DE, 0x07DC), |
6417 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6418 | }, |
6419 | { /* MCP73 Ethernet Controller */ |
6420 | PCI_DEVICE(0x10DE, 0x07DD), |
6421 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6422 | }, |
6423 | { /* MCP73 Ethernet Controller */ |
6424 | PCI_DEVICE(0x10DE, 0x07DE), |
6425 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6426 | }, |
6427 | { /* MCP73 Ethernet Controller */ |
6428 | PCI_DEVICE(0x10DE, 0x07DF), |
6429 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, |
6430 | }, |
6431 | { /* MCP77 Ethernet Controller */ |
6432 | PCI_DEVICE(0x10DE, 0x0760), |
6433 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6434 | }, |
6435 | { /* MCP77 Ethernet Controller */ |
6436 | PCI_DEVICE(0x10DE, 0x0761), |
6437 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6438 | }, |
6439 | { /* MCP77 Ethernet Controller */ |
6440 | PCI_DEVICE(0x10DE, 0x0762), |
6441 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6442 | }, |
6443 | { /* MCP77 Ethernet Controller */ |
6444 | PCI_DEVICE(0x10DE, 0x0763), |
6445 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6446 | }, |
6447 | { /* MCP79 Ethernet Controller */ |
6448 | PCI_DEVICE(0x10DE, 0x0AB0), |
6449 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6450 | }, |
6451 | { /* MCP79 Ethernet Controller */ |
6452 | PCI_DEVICE(0x10DE, 0x0AB1), |
6453 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6454 | }, |
6455 | { /* MCP79 Ethernet Controller */ |
6456 | PCI_DEVICE(0x10DE, 0x0AB2), |
6457 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6458 | }, |
6459 | { /* MCP79 Ethernet Controller */ |
6460 | PCI_DEVICE(0x10DE, 0x0AB3), |
6461 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, |
6462 | }, |
6463 | { /* MCP89 Ethernet Controller */ |
6464 | PCI_DEVICE(0x10DE, 0x0D7D), |
6465 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, |
6466 | }, |
6467 | {0,}, |
6468 | }; |
6469 | |
6470 | static struct pci_driver forcedeth_pci_driver = { |
6471 | .name = DRV_NAME, |
6472 | .id_table = pci_tbl, |
6473 | .probe = nv_probe, |
6474 | .remove = nv_remove, |
6475 | .shutdown = nv_shutdown, |
6476 | .driver.pm = NV_PM_OPS, |
6477 | }; |
6478 | |
6479 | module_param(max_interrupt_work, int, 0); |
6480 | MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt" ); |
6481 | module_param(optimization_mode, int, 0); |
6482 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load." ); |
6483 | module_param(poll_interval, int, 0); |
6484 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535." ); |
6485 | module_param(msi, int, 0); |
6486 | MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0." ); |
6487 | module_param(msix, int, 0); |
6488 | MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0." ); |
6489 | module_param(dma_64bit, int, 0); |
6490 | MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0." ); |
6491 | module_param(phy_cross, int, 0); |
6492 | MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0." ); |
6493 | module_param(phy_power_down, int, 0); |
6494 | MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)." ); |
6495 | module_param(debug_tx_timeout, bool, 0); |
6496 | MODULE_PARM_DESC(debug_tx_timeout, |
6497 | "Dump tx related registers and ring when tx_timeout happens" ); |
6498 | |
6499 | module_pci_driver(forcedeth_pci_driver); |
6500 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>" ); |
6501 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver" ); |
6502 | MODULE_LICENSE("GPL" ); |
6503 | MODULE_DEVICE_TABLE(pci, pci_tbl); |
6504 | |