1 | /* |
2 | * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver |
3 | * |
4 | * Copyright 2008 JMicron Technology Corporation |
5 | * http://www.jmicron.com/ |
6 | * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org> |
7 | * |
8 | * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2 of the License. |
13 | * |
14 | * This program is distributed in the hope that it will be useful, |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | * GNU General Public License for more details. |
18 | * |
19 | * You should have received a copy of the GNU General Public License |
20 | * along with this program; if not, write to the Free Software |
21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
22 | * |
23 | */ |
24 | |
25 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
26 | |
27 | #include <linux/module.h> |
28 | #include <linux/kernel.h> |
29 | #include <linux/pci.h> |
30 | #include <linux/pci-aspm.h> |
31 | #include <linux/netdevice.h> |
32 | #include <linux/etherdevice.h> |
33 | #include <linux/ethtool.h> |
34 | #include <linux/mii.h> |
35 | #include <linux/crc32.h> |
36 | #include <linux/delay.h> |
37 | #include <linux/spinlock.h> |
38 | #include <linux/in.h> |
39 | #include <linux/ip.h> |
40 | #include <linux/ipv6.h> |
41 | #include <linux/tcp.h> |
42 | #include <linux/udp.h> |
43 | #include <linux/if_vlan.h> |
44 | #include <linux/slab.h> |
45 | #include <net/ip6_checksum.h> |
46 | #include "jme.h" |
47 | |
48 | static int force_pseudohp = -1; |
49 | static int no_pseudohp = -1; |
50 | static int no_extplug = -1; |
51 | module_param(force_pseudohp, int, 0); |
52 | MODULE_PARM_DESC(force_pseudohp, |
53 | "Enable pseudo hot-plug feature manually by driver instead of BIOS." ); |
54 | module_param(no_pseudohp, int, 0); |
55 | MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature." ); |
56 | module_param(no_extplug, int, 0); |
57 | MODULE_PARM_DESC(no_extplug, |
58 | "Do not use external plug signal for pseudo hot-plug." ); |
59 | |
60 | static int |
61 | jme_mdio_read(struct net_device *netdev, int phy, int reg) |
62 | { |
63 | struct jme_adapter *jme = netdev_priv(netdev); |
64 | int i, val, again = (reg == MII_BMSR) ? 1 : 0; |
65 | |
66 | read_again: |
67 | jwrite32(jme, JME_SMI, SMI_OP_REQ | |
68 | smi_phy_addr(phy) | |
69 | smi_reg_addr(reg)); |
70 | |
71 | wmb(); |
72 | for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { |
73 | udelay(20); |
74 | val = jread32(jme, JME_SMI); |
75 | if ((val & SMI_OP_REQ) == 0) |
76 | break; |
77 | } |
78 | |
79 | if (i == 0) { |
80 | pr_err("phy(%d) read timeout : %d\n" , phy, reg); |
81 | return 0; |
82 | } |
83 | |
84 | if (again--) |
85 | goto read_again; |
86 | |
87 | return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; |
88 | } |
89 | |
90 | static void |
91 | jme_mdio_write(struct net_device *netdev, |
92 | int phy, int reg, int val) |
93 | { |
94 | struct jme_adapter *jme = netdev_priv(netdev); |
95 | int i; |
96 | |
97 | jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | |
98 | ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | |
99 | smi_phy_addr(phy) | smi_reg_addr(reg)); |
100 | |
101 | wmb(); |
102 | for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { |
103 | udelay(20); |
104 | if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) |
105 | break; |
106 | } |
107 | |
108 | if (i == 0) |
109 | pr_err("phy(%d) write timeout : %d\n" , phy, reg); |
110 | } |
111 | |
112 | static inline void |
113 | jme_reset_phy_processor(struct jme_adapter *jme) |
114 | { |
115 | u32 val; |
116 | |
117 | jme_mdio_write(jme->dev, |
118 | jme->mii_if.phy_id, |
119 | MII_ADVERTISE, ADVERTISE_ALL | |
120 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
121 | |
122 | if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) |
123 | jme_mdio_write(jme->dev, |
124 | jme->mii_if.phy_id, |
125 | MII_CTRL1000, |
126 | ADVERTISE_1000FULL | ADVERTISE_1000HALF); |
127 | |
128 | val = jme_mdio_read(jme->dev, |
129 | jme->mii_if.phy_id, |
130 | MII_BMCR); |
131 | |
132 | jme_mdio_write(jme->dev, |
133 | jme->mii_if.phy_id, |
134 | MII_BMCR, val | BMCR_RESET); |
135 | } |
136 | |
137 | static void |
138 | jme_setup_wakeup_frame(struct jme_adapter *jme, |
139 | const u32 *mask, u32 crc, int fnr) |
140 | { |
141 | int i; |
142 | |
143 | /* |
144 | * Setup CRC pattern |
145 | */ |
146 | jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); |
147 | wmb(); |
148 | jwrite32(jme, JME_WFODP, crc); |
149 | wmb(); |
150 | |
151 | /* |
152 | * Setup Mask |
153 | */ |
154 | for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { |
155 | jwrite32(jme, JME_WFOI, |
156 | ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | |
157 | (fnr & WFOI_FRAME_SEL)); |
158 | wmb(); |
159 | jwrite32(jme, JME_WFODP, mask[i]); |
160 | wmb(); |
161 | } |
162 | } |
163 | |
164 | static inline void |
165 | jme_mac_rxclk_off(struct jme_adapter *jme) |
166 | { |
167 | jme->reg_gpreg1 |= GPREG1_RXCLKOFF; |
168 | jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); |
169 | } |
170 | |
171 | static inline void |
172 | jme_mac_rxclk_on(struct jme_adapter *jme) |
173 | { |
174 | jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF; |
175 | jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); |
176 | } |
177 | |
178 | static inline void |
179 | jme_mac_txclk_off(struct jme_adapter *jme) |
180 | { |
181 | jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC); |
182 | jwrite32f(jme, JME_GHC, jme->reg_ghc); |
183 | } |
184 | |
185 | static inline void |
186 | jme_mac_txclk_on(struct jme_adapter *jme) |
187 | { |
188 | u32 speed = jme->reg_ghc & GHC_SPEED; |
189 | if (speed == GHC_SPEED_1000M) |
190 | jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; |
191 | else |
192 | jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; |
193 | jwrite32f(jme, JME_GHC, jme->reg_ghc); |
194 | } |
195 | |
196 | static inline void |
197 | jme_reset_ghc_speed(struct jme_adapter *jme) |
198 | { |
199 | jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX); |
200 | jwrite32f(jme, JME_GHC, jme->reg_ghc); |
201 | } |
202 | |
203 | static inline void |
204 | jme_reset_250A2_workaround(struct jme_adapter *jme) |
205 | { |
206 | jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | |
207 | GPREG1_RSSPATCH); |
208 | jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); |
209 | } |
210 | |
211 | static inline void |
212 | jme_assert_ghc_reset(struct jme_adapter *jme) |
213 | { |
214 | jme->reg_ghc |= GHC_SWRST; |
215 | jwrite32f(jme, JME_GHC, jme->reg_ghc); |
216 | } |
217 | |
218 | static inline void |
219 | jme_clear_ghc_reset(struct jme_adapter *jme) |
220 | { |
221 | jme->reg_ghc &= ~GHC_SWRST; |
222 | jwrite32f(jme, JME_GHC, jme->reg_ghc); |
223 | } |
224 | |
225 | static void |
226 | jme_reset_mac_processor(struct jme_adapter *jme) |
227 | { |
228 | static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; |
229 | u32 crc = 0xCDCDCDCD; |
230 | u32 gpreg0; |
231 | int i; |
232 | |
233 | jme_reset_ghc_speed(jme); |
234 | jme_reset_250A2_workaround(jme); |
235 | |
236 | jme_mac_rxclk_on(jme); |
237 | jme_mac_txclk_on(jme); |
238 | udelay(1); |
239 | jme_assert_ghc_reset(jme); |
240 | udelay(1); |
241 | jme_mac_rxclk_off(jme); |
242 | jme_mac_txclk_off(jme); |
243 | udelay(1); |
244 | jme_clear_ghc_reset(jme); |
245 | udelay(1); |
246 | jme_mac_rxclk_on(jme); |
247 | jme_mac_txclk_on(jme); |
248 | udelay(1); |
249 | jme_mac_rxclk_off(jme); |
250 | jme_mac_txclk_off(jme); |
251 | |
252 | jwrite32(jme, JME_RXDBA_LO, 0x00000000); |
253 | jwrite32(jme, JME_RXDBA_HI, 0x00000000); |
254 | jwrite32(jme, JME_RXQDC, 0x00000000); |
255 | jwrite32(jme, JME_RXNDA, 0x00000000); |
256 | jwrite32(jme, JME_TXDBA_LO, 0x00000000); |
257 | jwrite32(jme, JME_TXDBA_HI, 0x00000000); |
258 | jwrite32(jme, JME_TXQDC, 0x00000000); |
259 | jwrite32(jme, JME_TXNDA, 0x00000000); |
260 | |
261 | jwrite32(jme, JME_RXMCHT_LO, 0x00000000); |
262 | jwrite32(jme, JME_RXMCHT_HI, 0x00000000); |
263 | for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) |
264 | jme_setup_wakeup_frame(jme, mask, crc, i); |
265 | if (jme->fpgaver) |
266 | gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; |
267 | else |
268 | gpreg0 = GPREG0_DEFAULT; |
269 | jwrite32(jme, JME_GPREG0, gpreg0); |
270 | } |
271 | |
272 | static inline void |
273 | jme_clear_pm_enable_wol(struct jme_adapter *jme) |
274 | { |
275 | jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); |
276 | } |
277 | |
278 | static inline void |
279 | jme_clear_pm_disable_wol(struct jme_adapter *jme) |
280 | { |
281 | jwrite32(jme, JME_PMCS, PMCS_STMASK); |
282 | } |
283 | |
284 | static int |
285 | jme_reload_eeprom(struct jme_adapter *jme) |
286 | { |
287 | u32 val; |
288 | int i; |
289 | |
290 | val = jread32(jme, JME_SMBCSR); |
291 | |
292 | if (val & SMBCSR_EEPROMD) { |
293 | val |= SMBCSR_CNACK; |
294 | jwrite32(jme, JME_SMBCSR, val); |
295 | val |= SMBCSR_RELOAD; |
296 | jwrite32(jme, JME_SMBCSR, val); |
297 | mdelay(12); |
298 | |
299 | for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { |
300 | mdelay(1); |
301 | if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) |
302 | break; |
303 | } |
304 | |
305 | if (i == 0) { |
306 | pr_err("eeprom reload timeout\n" ); |
307 | return -EIO; |
308 | } |
309 | } |
310 | |
311 | return 0; |
312 | } |
313 | |
314 | static void |
315 | jme_load_macaddr(struct net_device *netdev) |
316 | { |
317 | struct jme_adapter *jme = netdev_priv(netdev); |
318 | unsigned char macaddr[ETH_ALEN]; |
319 | u32 val; |
320 | |
321 | spin_lock_bh(&jme->macaddr_lock); |
322 | val = jread32(jme, JME_RXUMA_LO); |
323 | macaddr[0] = (val >> 0) & 0xFF; |
324 | macaddr[1] = (val >> 8) & 0xFF; |
325 | macaddr[2] = (val >> 16) & 0xFF; |
326 | macaddr[3] = (val >> 24) & 0xFF; |
327 | val = jread32(jme, JME_RXUMA_HI); |
328 | macaddr[4] = (val >> 0) & 0xFF; |
329 | macaddr[5] = (val >> 8) & 0xFF; |
330 | memcpy(netdev->dev_addr, macaddr, ETH_ALEN); |
331 | spin_unlock_bh(&jme->macaddr_lock); |
332 | } |
333 | |
334 | static inline void |
335 | jme_set_rx_pcc(struct jme_adapter *jme, int p) |
336 | { |
337 | switch (p) { |
338 | case PCC_OFF: |
339 | jwrite32(jme, JME_PCCRX0, |
340 | ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | |
341 | ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); |
342 | break; |
343 | case PCC_P1: |
344 | jwrite32(jme, JME_PCCRX0, |
345 | ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | |
346 | ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); |
347 | break; |
348 | case PCC_P2: |
349 | jwrite32(jme, JME_PCCRX0, |
350 | ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | |
351 | ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); |
352 | break; |
353 | case PCC_P3: |
354 | jwrite32(jme, JME_PCCRX0, |
355 | ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | |
356 | ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); |
357 | break; |
358 | default: |
359 | break; |
360 | } |
361 | wmb(); |
362 | |
363 | if (!(test_bit(JME_FLAG_POLL, &jme->flags))) |
364 | netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n" , p); |
365 | } |
366 | |
367 | static void |
368 | jme_start_irq(struct jme_adapter *jme) |
369 | { |
370 | register struct dynpcc_info *dpi = &(jme->dpi); |
371 | |
372 | jme_set_rx_pcc(jme, PCC_P1); |
373 | dpi->cur = PCC_P1; |
374 | dpi->attempt = PCC_P1; |
375 | dpi->cnt = 0; |
376 | |
377 | jwrite32(jme, JME_PCCTX, |
378 | ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | |
379 | ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | |
380 | PCCTXQ0_EN |
381 | ); |
382 | |
383 | /* |
384 | * Enable Interrupts |
385 | */ |
386 | jwrite32(jme, JME_IENS, INTR_ENABLE); |
387 | } |
388 | |
389 | static inline void |
390 | jme_stop_irq(struct jme_adapter *jme) |
391 | { |
392 | /* |
393 | * Disable Interrupts |
394 | */ |
395 | jwrite32f(jme, JME_IENC, INTR_ENABLE); |
396 | } |
397 | |
398 | static u32 |
399 | jme_linkstat_from_phy(struct jme_adapter *jme) |
400 | { |
401 | u32 phylink, bmsr; |
402 | |
403 | phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); |
404 | bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); |
405 | if (bmsr & BMSR_ANCOMP) |
406 | phylink |= PHY_LINK_AUTONEG_COMPLETE; |
407 | |
408 | return phylink; |
409 | } |
410 | |
411 | static inline void |
412 | jme_set_phyfifo_5level(struct jme_adapter *jme) |
413 | { |
414 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); |
415 | } |
416 | |
417 | static inline void |
418 | jme_set_phyfifo_8level(struct jme_adapter *jme) |
419 | { |
420 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); |
421 | } |
422 | |
423 | static int |
424 | jme_check_link(struct net_device *netdev, int testonly) |
425 | { |
426 | struct jme_adapter *jme = netdev_priv(netdev); |
427 | u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr; |
428 | char linkmsg[64]; |
429 | int rc = 0; |
430 | |
431 | linkmsg[0] = '\0'; |
432 | |
433 | if (jme->fpgaver) |
434 | phylink = jme_linkstat_from_phy(jme); |
435 | else |
436 | phylink = jread32(jme, JME_PHY_LINK); |
437 | |
438 | if (phylink & PHY_LINK_UP) { |
439 | if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { |
440 | /* |
441 | * If we did not enable AN |
442 | * Speed/Duplex Info should be obtained from SMI |
443 | */ |
444 | phylink = PHY_LINK_UP; |
445 | |
446 | bmcr = jme_mdio_read(jme->dev, |
447 | jme->mii_if.phy_id, |
448 | MII_BMCR); |
449 | |
450 | phylink |= ((bmcr & BMCR_SPEED1000) && |
451 | (bmcr & BMCR_SPEED100) == 0) ? |
452 | PHY_LINK_SPEED_1000M : |
453 | (bmcr & BMCR_SPEED100) ? |
454 | PHY_LINK_SPEED_100M : |
455 | PHY_LINK_SPEED_10M; |
456 | |
457 | phylink |= (bmcr & BMCR_FULLDPLX) ? |
458 | PHY_LINK_DUPLEX : 0; |
459 | |
460 | strcat(linkmsg, "Forced: " ); |
461 | } else { |
462 | /* |
463 | * Keep polling for speed/duplex resolve complete |
464 | */ |
465 | while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && |
466 | --cnt) { |
467 | |
468 | udelay(1); |
469 | |
470 | if (jme->fpgaver) |
471 | phylink = jme_linkstat_from_phy(jme); |
472 | else |
473 | phylink = jread32(jme, JME_PHY_LINK); |
474 | } |
475 | if (!cnt) |
476 | pr_err("Waiting speed resolve timeout\n" ); |
477 | |
478 | strcat(linkmsg, "ANed: " ); |
479 | } |
480 | |
481 | if (jme->phylink == phylink) { |
482 | rc = 1; |
483 | goto out; |
484 | } |
485 | if (testonly) |
486 | goto out; |
487 | |
488 | jme->phylink = phylink; |
489 | |
490 | /* |
491 | * The speed/duplex setting of jme->reg_ghc already cleared |
492 | * by jme_reset_mac_processor() |
493 | */ |
494 | switch (phylink & PHY_LINK_SPEED_MASK) { |
495 | case PHY_LINK_SPEED_10M: |
496 | jme->reg_ghc |= GHC_SPEED_10M; |
497 | strcat(linkmsg, "10 Mbps, " ); |
498 | break; |
499 | case PHY_LINK_SPEED_100M: |
500 | jme->reg_ghc |= GHC_SPEED_100M; |
501 | strcat(linkmsg, "100 Mbps, " ); |
502 | break; |
503 | case PHY_LINK_SPEED_1000M: |
504 | jme->reg_ghc |= GHC_SPEED_1000M; |
505 | strcat(linkmsg, "1000 Mbps, " ); |
506 | break; |
507 | default: |
508 | break; |
509 | } |
510 | |
511 | if (phylink & PHY_LINK_DUPLEX) { |
512 | jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); |
513 | jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX); |
514 | jme->reg_ghc |= GHC_DPX; |
515 | } else { |
516 | jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | |
517 | TXMCS_BACKOFF | |
518 | TXMCS_CARRIERSENSE | |
519 | TXMCS_COLLISION); |
520 | jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX); |
521 | } |
522 | |
523 | jwrite32(jme, JME_GHC, jme->reg_ghc); |
524 | |
525 | if (is_buggy250(jme->pdev->device, jme->chiprev)) { |
526 | jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | |
527 | GPREG1_RSSPATCH); |
528 | if (!(phylink & PHY_LINK_DUPLEX)) |
529 | jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH; |
530 | switch (phylink & PHY_LINK_SPEED_MASK) { |
531 | case PHY_LINK_SPEED_10M: |
532 | jme_set_phyfifo_8level(jme); |
533 | jme->reg_gpreg1 |= GPREG1_RSSPATCH; |
534 | break; |
535 | case PHY_LINK_SPEED_100M: |
536 | jme_set_phyfifo_5level(jme); |
537 | jme->reg_gpreg1 |= GPREG1_RSSPATCH; |
538 | break; |
539 | case PHY_LINK_SPEED_1000M: |
540 | jme_set_phyfifo_8level(jme); |
541 | break; |
542 | default: |
543 | break; |
544 | } |
545 | } |
546 | jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); |
547 | |
548 | strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? |
549 | "Full-Duplex, " : |
550 | "Half-Duplex, " ); |
551 | strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? |
552 | "MDI-X" : |
553 | "MDI" ); |
554 | netif_info(jme, link, jme->dev, "Link is up at %s\n" , linkmsg); |
555 | netif_carrier_on(netdev); |
556 | } else { |
557 | if (testonly) |
558 | goto out; |
559 | |
560 | netif_info(jme, link, jme->dev, "Link is down\n" ); |
561 | jme->phylink = 0; |
562 | netif_carrier_off(netdev); |
563 | } |
564 | |
565 | out: |
566 | return rc; |
567 | } |
568 | |
569 | static int |
570 | jme_setup_tx_resources(struct jme_adapter *jme) |
571 | { |
572 | struct jme_ring *txring = &(jme->txring[0]); |
573 | |
574 | txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), |
575 | TX_RING_ALLOC_SIZE(jme->tx_ring_size), |
576 | &(txring->dmaalloc), |
577 | GFP_ATOMIC); |
578 | |
579 | if (!txring->alloc) |
580 | goto err_set_null; |
581 | |
582 | /* |
583 | * 16 Bytes align |
584 | */ |
585 | txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), |
586 | RING_DESC_ALIGN); |
587 | txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); |
588 | txring->next_to_use = 0; |
589 | atomic_set(&txring->next_to_clean, 0); |
590 | atomic_set(&txring->nr_free, jme->tx_ring_size); |
591 | |
592 | txring->bufinf = kcalloc(jme->tx_ring_size, |
593 | sizeof(struct jme_buffer_info), |
594 | GFP_ATOMIC); |
595 | if (unlikely(!(txring->bufinf))) |
596 | goto err_free_txring; |
597 | |
598 | /* |
599 | * Initialize Transmit Descriptors |
600 | */ |
601 | memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size)); |
602 | |
603 | return 0; |
604 | |
605 | err_free_txring: |
606 | dma_free_coherent(&(jme->pdev->dev), |
607 | TX_RING_ALLOC_SIZE(jme->tx_ring_size), |
608 | txring->alloc, |
609 | txring->dmaalloc); |
610 | |
611 | err_set_null: |
612 | txring->desc = NULL; |
613 | txring->dmaalloc = 0; |
614 | txring->dma = 0; |
615 | txring->bufinf = NULL; |
616 | |
617 | return -ENOMEM; |
618 | } |
619 | |
620 | static void |
621 | jme_free_tx_resources(struct jme_adapter *jme) |
622 | { |
623 | int i; |
624 | struct jme_ring *txring = &(jme->txring[0]); |
625 | struct jme_buffer_info *txbi; |
626 | |
627 | if (txring->alloc) { |
628 | if (txring->bufinf) { |
629 | for (i = 0 ; i < jme->tx_ring_size ; ++i) { |
630 | txbi = txring->bufinf + i; |
631 | if (txbi->skb) { |
632 | dev_kfree_skb(txbi->skb); |
633 | txbi->skb = NULL; |
634 | } |
635 | txbi->mapping = 0; |
636 | txbi->len = 0; |
637 | txbi->nr_desc = 0; |
638 | txbi->start_xmit = 0; |
639 | } |
640 | kfree(txring->bufinf); |
641 | } |
642 | |
643 | dma_free_coherent(&(jme->pdev->dev), |
644 | TX_RING_ALLOC_SIZE(jme->tx_ring_size), |
645 | txring->alloc, |
646 | txring->dmaalloc); |
647 | |
648 | txring->alloc = NULL; |
649 | txring->desc = NULL; |
650 | txring->dmaalloc = 0; |
651 | txring->dma = 0; |
652 | txring->bufinf = NULL; |
653 | } |
654 | txring->next_to_use = 0; |
655 | atomic_set(&txring->next_to_clean, 0); |
656 | atomic_set(&txring->nr_free, 0); |
657 | } |
658 | |
659 | static inline void |
660 | jme_enable_tx_engine(struct jme_adapter *jme) |
661 | { |
662 | /* |
663 | * Select Queue 0 |
664 | */ |
665 | jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); |
666 | wmb(); |
667 | |
668 | /* |
669 | * Setup TX Queue 0 DMA Bass Address |
670 | */ |
671 | jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); |
672 | jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); |
673 | jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); |
674 | |
675 | /* |
676 | * Setup TX Descptor Count |
677 | */ |
678 | jwrite32(jme, JME_TXQDC, jme->tx_ring_size); |
679 | |
680 | /* |
681 | * Enable TX Engine |
682 | */ |
683 | wmb(); |
684 | jwrite32f(jme, JME_TXCS, jme->reg_txcs | |
685 | TXCS_SELECT_QUEUE0 | |
686 | TXCS_ENABLE); |
687 | |
688 | /* |
689 | * Start clock for TX MAC Processor |
690 | */ |
691 | jme_mac_txclk_on(jme); |
692 | } |
693 | |
694 | static inline void |
695 | jme_disable_tx_engine(struct jme_adapter *jme) |
696 | { |
697 | int i; |
698 | u32 val; |
699 | |
700 | /* |
701 | * Disable TX Engine |
702 | */ |
703 | jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); |
704 | wmb(); |
705 | |
706 | val = jread32(jme, JME_TXCS); |
707 | for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { |
708 | mdelay(1); |
709 | val = jread32(jme, JME_TXCS); |
710 | rmb(); |
711 | } |
712 | |
713 | if (!i) |
714 | pr_err("Disable TX engine timeout\n" ); |
715 | |
716 | /* |
717 | * Stop clock for TX MAC Processor |
718 | */ |
719 | jme_mac_txclk_off(jme); |
720 | } |
721 | |
722 | static void |
723 | jme_set_clean_rxdesc(struct jme_adapter *jme, int i) |
724 | { |
725 | struct jme_ring *rxring = &(jme->rxring[0]); |
726 | register struct rxdesc *rxdesc = rxring->desc; |
727 | struct jme_buffer_info *rxbi = rxring->bufinf; |
728 | rxdesc += i; |
729 | rxbi += i; |
730 | |
731 | rxdesc->dw[0] = 0; |
732 | rxdesc->dw[1] = 0; |
733 | rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); |
734 | rxdesc->desc1.bufaddrl = cpu_to_le32( |
735 | (__u64)rxbi->mapping & 0xFFFFFFFFUL); |
736 | rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); |
737 | if (jme->dev->features & NETIF_F_HIGHDMA) |
738 | rxdesc->desc1.flags = RXFLAG_64BIT; |
739 | wmb(); |
740 | rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; |
741 | } |
742 | |
743 | static int |
744 | jme_make_new_rx_buf(struct jme_adapter *jme, int i) |
745 | { |
746 | struct jme_ring *rxring = &(jme->rxring[0]); |
747 | struct jme_buffer_info *rxbi = rxring->bufinf + i; |
748 | struct sk_buff *skb; |
749 | dma_addr_t mapping; |
750 | |
751 | skb = netdev_alloc_skb(jme->dev, |
752 | jme->dev->mtu + RX_EXTRA_LEN); |
753 | if (unlikely(!skb)) |
754 | return -ENOMEM; |
755 | |
756 | mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), |
757 | offset_in_page(skb->data), skb_tailroom(skb), |
758 | PCI_DMA_FROMDEVICE); |
759 | if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { |
760 | dev_kfree_skb(skb); |
761 | return -ENOMEM; |
762 | } |
763 | |
764 | if (likely(rxbi->mapping)) |
765 | pci_unmap_page(jme->pdev, rxbi->mapping, |
766 | rxbi->len, PCI_DMA_FROMDEVICE); |
767 | |
768 | rxbi->skb = skb; |
769 | rxbi->len = skb_tailroom(skb); |
770 | rxbi->mapping = mapping; |
771 | return 0; |
772 | } |
773 | |
774 | static void |
775 | jme_free_rx_buf(struct jme_adapter *jme, int i) |
776 | { |
777 | struct jme_ring *rxring = &(jme->rxring[0]); |
778 | struct jme_buffer_info *rxbi = rxring->bufinf; |
779 | rxbi += i; |
780 | |
781 | if (rxbi->skb) { |
782 | pci_unmap_page(jme->pdev, |
783 | rxbi->mapping, |
784 | rxbi->len, |
785 | PCI_DMA_FROMDEVICE); |
786 | dev_kfree_skb(rxbi->skb); |
787 | rxbi->skb = NULL; |
788 | rxbi->mapping = 0; |
789 | rxbi->len = 0; |
790 | } |
791 | } |
792 | |
793 | static void |
794 | jme_free_rx_resources(struct jme_adapter *jme) |
795 | { |
796 | int i; |
797 | struct jme_ring *rxring = &(jme->rxring[0]); |
798 | |
799 | if (rxring->alloc) { |
800 | if (rxring->bufinf) { |
801 | for (i = 0 ; i < jme->rx_ring_size ; ++i) |
802 | jme_free_rx_buf(jme, i); |
803 | kfree(rxring->bufinf); |
804 | } |
805 | |
806 | dma_free_coherent(&(jme->pdev->dev), |
807 | RX_RING_ALLOC_SIZE(jme->rx_ring_size), |
808 | rxring->alloc, |
809 | rxring->dmaalloc); |
810 | rxring->alloc = NULL; |
811 | rxring->desc = NULL; |
812 | rxring->dmaalloc = 0; |
813 | rxring->dma = 0; |
814 | rxring->bufinf = NULL; |
815 | } |
816 | rxring->next_to_use = 0; |
817 | atomic_set(&rxring->next_to_clean, 0); |
818 | } |
819 | |
820 | static int |
821 | jme_setup_rx_resources(struct jme_adapter *jme) |
822 | { |
823 | int i; |
824 | struct jme_ring *rxring = &(jme->rxring[0]); |
825 | |
826 | rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), |
827 | RX_RING_ALLOC_SIZE(jme->rx_ring_size), |
828 | &(rxring->dmaalloc), |
829 | GFP_ATOMIC); |
830 | if (!rxring->alloc) |
831 | goto err_set_null; |
832 | |
833 | /* |
834 | * 16 Bytes align |
835 | */ |
836 | rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), |
837 | RING_DESC_ALIGN); |
838 | rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); |
839 | rxring->next_to_use = 0; |
840 | atomic_set(&rxring->next_to_clean, 0); |
841 | |
842 | rxring->bufinf = kcalloc(jme->rx_ring_size, |
843 | sizeof(struct jme_buffer_info), |
844 | GFP_ATOMIC); |
845 | if (unlikely(!(rxring->bufinf))) |
846 | goto err_free_rxring; |
847 | |
848 | /* |
849 | * Initiallize Receive Descriptors |
850 | */ |
851 | for (i = 0 ; i < jme->rx_ring_size ; ++i) { |
852 | if (unlikely(jme_make_new_rx_buf(jme, i))) { |
853 | jme_free_rx_resources(jme); |
854 | return -ENOMEM; |
855 | } |
856 | |
857 | jme_set_clean_rxdesc(jme, i); |
858 | } |
859 | |
860 | return 0; |
861 | |
862 | err_free_rxring: |
863 | dma_free_coherent(&(jme->pdev->dev), |
864 | RX_RING_ALLOC_SIZE(jme->rx_ring_size), |
865 | rxring->alloc, |
866 | rxring->dmaalloc); |
867 | err_set_null: |
868 | rxring->desc = NULL; |
869 | rxring->dmaalloc = 0; |
870 | rxring->dma = 0; |
871 | rxring->bufinf = NULL; |
872 | |
873 | return -ENOMEM; |
874 | } |
875 | |
876 | static inline void |
877 | jme_enable_rx_engine(struct jme_adapter *jme) |
878 | { |
879 | /* |
880 | * Select Queue 0 |
881 | */ |
882 | jwrite32(jme, JME_RXCS, jme->reg_rxcs | |
883 | RXCS_QUEUESEL_Q0); |
884 | wmb(); |
885 | |
886 | /* |
887 | * Setup RX DMA Bass Address |
888 | */ |
889 | jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); |
890 | jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); |
891 | jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); |
892 | |
893 | /* |
894 | * Setup RX Descriptor Count |
895 | */ |
896 | jwrite32(jme, JME_RXQDC, jme->rx_ring_size); |
897 | |
898 | /* |
899 | * Setup Unicast Filter |
900 | */ |
901 | jme_set_unicastaddr(jme->dev); |
902 | jme_set_multi(jme->dev); |
903 | |
904 | /* |
905 | * Enable RX Engine |
906 | */ |
907 | wmb(); |
908 | jwrite32f(jme, JME_RXCS, jme->reg_rxcs | |
909 | RXCS_QUEUESEL_Q0 | |
910 | RXCS_ENABLE | |
911 | RXCS_QST); |
912 | |
913 | /* |
914 | * Start clock for RX MAC Processor |
915 | */ |
916 | jme_mac_rxclk_on(jme); |
917 | } |
918 | |
919 | static inline void |
920 | jme_restart_rx_engine(struct jme_adapter *jme) |
921 | { |
922 | /* |
923 | * Start RX Engine |
924 | */ |
925 | jwrite32(jme, JME_RXCS, jme->reg_rxcs | |
926 | RXCS_QUEUESEL_Q0 | |
927 | RXCS_ENABLE | |
928 | RXCS_QST); |
929 | } |
930 | |
931 | static inline void |
932 | jme_disable_rx_engine(struct jme_adapter *jme) |
933 | { |
934 | int i; |
935 | u32 val; |
936 | |
937 | /* |
938 | * Disable RX Engine |
939 | */ |
940 | jwrite32(jme, JME_RXCS, jme->reg_rxcs); |
941 | wmb(); |
942 | |
943 | val = jread32(jme, JME_RXCS); |
944 | for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { |
945 | mdelay(1); |
946 | val = jread32(jme, JME_RXCS); |
947 | rmb(); |
948 | } |
949 | |
950 | if (!i) |
951 | pr_err("Disable RX engine timeout\n" ); |
952 | |
953 | /* |
954 | * Stop clock for RX MAC Processor |
955 | */ |
956 | jme_mac_rxclk_off(jme); |
957 | } |
958 | |
959 | static u16 |
960 | jme_udpsum(struct sk_buff *skb) |
961 | { |
962 | u16 csum = 0xFFFFu; |
963 | |
964 | if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) |
965 | return csum; |
966 | if (skb->protocol != htons(ETH_P_IP)) |
967 | return csum; |
968 | skb_set_network_header(skb, ETH_HLEN); |
969 | if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || |
970 | (skb->len < (ETH_HLEN + |
971 | (ip_hdr(skb)->ihl << 2) + |
972 | sizeof(struct udphdr)))) { |
973 | skb_reset_network_header(skb); |
974 | return csum; |
975 | } |
976 | skb_set_transport_header(skb, |
977 | ETH_HLEN + (ip_hdr(skb)->ihl << 2)); |
978 | csum = udp_hdr(skb)->check; |
979 | skb_reset_transport_header(skb); |
980 | skb_reset_network_header(skb); |
981 | |
982 | return csum; |
983 | } |
984 | |
985 | static int |
986 | jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb) |
987 | { |
988 | if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) |
989 | return false; |
990 | |
991 | if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) |
992 | == RXWBFLAG_TCPON)) { |
993 | if (flags & RXWBFLAG_IPV4) |
994 | netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n" ); |
995 | return false; |
996 | } |
997 | |
998 | if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) |
999 | == RXWBFLAG_UDPON) && jme_udpsum(skb)) { |
1000 | if (flags & RXWBFLAG_IPV4) |
1001 | netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n" ); |
1002 | return false; |
1003 | } |
1004 | |
1005 | if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) |
1006 | == RXWBFLAG_IPV4)) { |
1007 | netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n" ); |
1008 | return false; |
1009 | } |
1010 | |
1011 | return true; |
1012 | } |
1013 | |
1014 | static void |
1015 | jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) |
1016 | { |
1017 | struct jme_ring *rxring = &(jme->rxring[0]); |
1018 | struct rxdesc *rxdesc = rxring->desc; |
1019 | struct jme_buffer_info *rxbi = rxring->bufinf; |
1020 | struct sk_buff *skb; |
1021 | int framesize; |
1022 | |
1023 | rxdesc += idx; |
1024 | rxbi += idx; |
1025 | |
1026 | skb = rxbi->skb; |
1027 | pci_dma_sync_single_for_cpu(jme->pdev, |
1028 | rxbi->mapping, |
1029 | rxbi->len, |
1030 | PCI_DMA_FROMDEVICE); |
1031 | |
1032 | if (unlikely(jme_make_new_rx_buf(jme, idx))) { |
1033 | pci_dma_sync_single_for_device(jme->pdev, |
1034 | rxbi->mapping, |
1035 | rxbi->len, |
1036 | PCI_DMA_FROMDEVICE); |
1037 | |
1038 | ++(NET_STAT(jme).rx_dropped); |
1039 | } else { |
1040 | framesize = le16_to_cpu(rxdesc->descwb.framesize) |
1041 | - RX_PREPAD_SIZE; |
1042 | |
1043 | skb_reserve(skb, RX_PREPAD_SIZE); |
1044 | skb_put(skb, framesize); |
1045 | skb->protocol = eth_type_trans(skb, jme->dev); |
1046 | |
1047 | if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) |
1048 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1049 | else |
1050 | skb_checksum_none_assert(skb); |
1051 | |
1052 | if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { |
1053 | u16 vid = le16_to_cpu(rxdesc->descwb.vlan); |
1054 | |
1055 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
1056 | NET_STAT(jme).rx_bytes += 4; |
1057 | } |
1058 | jme->jme_rx(skb); |
1059 | |
1060 | if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == |
1061 | cpu_to_le16(RXWBFLAG_DEST_MUL)) |
1062 | ++(NET_STAT(jme).multicast); |
1063 | |
1064 | NET_STAT(jme).rx_bytes += framesize; |
1065 | ++(NET_STAT(jme).rx_packets); |
1066 | } |
1067 | |
1068 | jme_set_clean_rxdesc(jme, idx); |
1069 | |
1070 | } |
1071 | |
1072 | static int |
1073 | jme_process_receive(struct jme_adapter *jme, int limit) |
1074 | { |
1075 | struct jme_ring *rxring = &(jme->rxring[0]); |
1076 | struct rxdesc *rxdesc; |
1077 | int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; |
1078 | |
1079 | if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) |
1080 | goto out_inc; |
1081 | |
1082 | if (unlikely(atomic_read(&jme->link_changing) != 1)) |
1083 | goto out_inc; |
1084 | |
1085 | if (unlikely(!netif_carrier_ok(jme->dev))) |
1086 | goto out_inc; |
1087 | |
1088 | i = atomic_read(&rxring->next_to_clean); |
1089 | while (limit > 0) { |
1090 | rxdesc = rxring->desc; |
1091 | rxdesc += i; |
1092 | |
1093 | if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || |
1094 | !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) |
1095 | goto out; |
1096 | --limit; |
1097 | |
1098 | rmb(); |
1099 | desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; |
1100 | |
1101 | if (unlikely(desccnt > 1 || |
1102 | rxdesc->descwb.errstat & RXWBERR_ALLERR)) { |
1103 | |
1104 | if (rxdesc->descwb.errstat & RXWBERR_CRCERR) |
1105 | ++(NET_STAT(jme).rx_crc_errors); |
1106 | else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) |
1107 | ++(NET_STAT(jme).rx_fifo_errors); |
1108 | else |
1109 | ++(NET_STAT(jme).rx_errors); |
1110 | |
1111 | if (desccnt > 1) |
1112 | limit -= desccnt - 1; |
1113 | |
1114 | for (j = i, ccnt = desccnt ; ccnt-- ; ) { |
1115 | jme_set_clean_rxdesc(jme, j); |
1116 | j = (j + 1) & (mask); |
1117 | } |
1118 | |
1119 | } else { |
1120 | jme_alloc_and_feed_skb(jme, i); |
1121 | } |
1122 | |
1123 | i = (i + desccnt) & (mask); |
1124 | } |
1125 | |
1126 | out: |
1127 | atomic_set(&rxring->next_to_clean, i); |
1128 | |
1129 | out_inc: |
1130 | atomic_inc(&jme->rx_cleaning); |
1131 | |
1132 | return limit > 0 ? limit : 0; |
1133 | |
1134 | } |
1135 | |
1136 | static void |
1137 | jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) |
1138 | { |
1139 | if (likely(atmp == dpi->cur)) { |
1140 | dpi->cnt = 0; |
1141 | return; |
1142 | } |
1143 | |
1144 | if (dpi->attempt == atmp) { |
1145 | ++(dpi->cnt); |
1146 | } else { |
1147 | dpi->attempt = atmp; |
1148 | dpi->cnt = 0; |
1149 | } |
1150 | |
1151 | } |
1152 | |
1153 | static void |
1154 | jme_dynamic_pcc(struct jme_adapter *jme) |
1155 | { |
1156 | register struct dynpcc_info *dpi = &(jme->dpi); |
1157 | |
1158 | if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) |
1159 | jme_attempt_pcc(dpi, PCC_P3); |
1160 | else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || |
1161 | dpi->intr_cnt > PCC_INTR_THRESHOLD) |
1162 | jme_attempt_pcc(dpi, PCC_P2); |
1163 | else |
1164 | jme_attempt_pcc(dpi, PCC_P1); |
1165 | |
1166 | if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { |
1167 | if (dpi->attempt < dpi->cur) |
1168 | tasklet_schedule(&jme->rxclean_task); |
1169 | jme_set_rx_pcc(jme, dpi->attempt); |
1170 | dpi->cur = dpi->attempt; |
1171 | dpi->cnt = 0; |
1172 | } |
1173 | } |
1174 | |
1175 | static void |
1176 | jme_start_pcc_timer(struct jme_adapter *jme) |
1177 | { |
1178 | struct dynpcc_info *dpi = &(jme->dpi); |
1179 | dpi->last_bytes = NET_STAT(jme).rx_bytes; |
1180 | dpi->last_pkts = NET_STAT(jme).rx_packets; |
1181 | dpi->intr_cnt = 0; |
1182 | jwrite32(jme, JME_TMCSR, |
1183 | TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); |
1184 | } |
1185 | |
1186 | static inline void |
1187 | jme_stop_pcc_timer(struct jme_adapter *jme) |
1188 | { |
1189 | jwrite32(jme, JME_TMCSR, 0); |
1190 | } |
1191 | |
1192 | static void |
1193 | jme_shutdown_nic(struct jme_adapter *jme) |
1194 | { |
1195 | u32 phylink; |
1196 | |
1197 | phylink = jme_linkstat_from_phy(jme); |
1198 | |
1199 | if (!(phylink & PHY_LINK_UP)) { |
1200 | /* |
1201 | * Disable all interrupt before issue timer |
1202 | */ |
1203 | jme_stop_irq(jme); |
1204 | jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); |
1205 | } |
1206 | } |
1207 | |
1208 | static void |
1209 | jme_pcc_tasklet(unsigned long arg) |
1210 | { |
1211 | struct jme_adapter *jme = (struct jme_adapter *)arg; |
1212 | struct net_device *netdev = jme->dev; |
1213 | |
1214 | if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { |
1215 | jme_shutdown_nic(jme); |
1216 | return; |
1217 | } |
1218 | |
1219 | if (unlikely(!netif_carrier_ok(netdev) || |
1220 | (atomic_read(&jme->link_changing) != 1) |
1221 | )) { |
1222 | jme_stop_pcc_timer(jme); |
1223 | return; |
1224 | } |
1225 | |
1226 | if (!(test_bit(JME_FLAG_POLL, &jme->flags))) |
1227 | jme_dynamic_pcc(jme); |
1228 | |
1229 | jme_start_pcc_timer(jme); |
1230 | } |
1231 | |
1232 | static inline void |
1233 | jme_polling_mode(struct jme_adapter *jme) |
1234 | { |
1235 | jme_set_rx_pcc(jme, PCC_OFF); |
1236 | } |
1237 | |
1238 | static inline void |
1239 | jme_interrupt_mode(struct jme_adapter *jme) |
1240 | { |
1241 | jme_set_rx_pcc(jme, PCC_P1); |
1242 | } |
1243 | |
1244 | static inline int |
1245 | jme_pseudo_hotplug_enabled(struct jme_adapter *jme) |
1246 | { |
1247 | u32 apmc; |
1248 | apmc = jread32(jme, JME_APMC); |
1249 | return apmc & JME_APMC_PSEUDO_HP_EN; |
1250 | } |
1251 | |
1252 | static void |
1253 | jme_start_shutdown_timer(struct jme_adapter *jme) |
1254 | { |
1255 | u32 apmc; |
1256 | |
1257 | apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; |
1258 | apmc &= ~JME_APMC_EPIEN_CTRL; |
1259 | if (!no_extplug) { |
1260 | jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); |
1261 | wmb(); |
1262 | } |
1263 | jwrite32f(jme, JME_APMC, apmc); |
1264 | |
1265 | jwrite32f(jme, JME_TIMER2, 0); |
1266 | set_bit(JME_FLAG_SHUTDOWN, &jme->flags); |
1267 | jwrite32(jme, JME_TMCSR, |
1268 | TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); |
1269 | } |
1270 | |
1271 | static void |
1272 | jme_stop_shutdown_timer(struct jme_adapter *jme) |
1273 | { |
1274 | u32 apmc; |
1275 | |
1276 | jwrite32f(jme, JME_TMCSR, 0); |
1277 | jwrite32f(jme, JME_TIMER2, 0); |
1278 | clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); |
1279 | |
1280 | apmc = jread32(jme, JME_APMC); |
1281 | apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); |
1282 | jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); |
1283 | wmb(); |
1284 | jwrite32f(jme, JME_APMC, apmc); |
1285 | } |
1286 | |
1287 | static void |
1288 | jme_link_change_tasklet(unsigned long arg) |
1289 | { |
1290 | struct jme_adapter *jme = (struct jme_adapter *)arg; |
1291 | struct net_device *netdev = jme->dev; |
1292 | int rc; |
1293 | |
1294 | while (!atomic_dec_and_test(&jme->link_changing)) { |
1295 | atomic_inc(&jme->link_changing); |
1296 | netif_info(jme, intr, jme->dev, "Get link change lock failed\n" ); |
1297 | while (atomic_read(&jme->link_changing) != 1) |
1298 | netif_info(jme, intr, jme->dev, "Waiting link change lock\n" ); |
1299 | } |
1300 | |
1301 | if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) |
1302 | goto out; |
1303 | |
1304 | jme->old_mtu = netdev->mtu; |
1305 | netif_stop_queue(netdev); |
1306 | if (jme_pseudo_hotplug_enabled(jme)) |
1307 | jme_stop_shutdown_timer(jme); |
1308 | |
1309 | jme_stop_pcc_timer(jme); |
1310 | tasklet_disable(&jme->txclean_task); |
1311 | tasklet_disable(&jme->rxclean_task); |
1312 | tasklet_disable(&jme->rxempty_task); |
1313 | |
1314 | if (netif_carrier_ok(netdev)) { |
1315 | jme_disable_rx_engine(jme); |
1316 | jme_disable_tx_engine(jme); |
1317 | jme_reset_mac_processor(jme); |
1318 | jme_free_rx_resources(jme); |
1319 | jme_free_tx_resources(jme); |
1320 | |
1321 | if (test_bit(JME_FLAG_POLL, &jme->flags)) |
1322 | jme_polling_mode(jme); |
1323 | |
1324 | netif_carrier_off(netdev); |
1325 | } |
1326 | |
1327 | jme_check_link(netdev, 0); |
1328 | if (netif_carrier_ok(netdev)) { |
1329 | rc = jme_setup_rx_resources(jme); |
1330 | if (rc) { |
1331 | pr_err("Allocating resources for RX error, Device STOPPED!\n" ); |
1332 | goto out_enable_tasklet; |
1333 | } |
1334 | |
1335 | rc = jme_setup_tx_resources(jme); |
1336 | if (rc) { |
1337 | pr_err("Allocating resources for TX error, Device STOPPED!\n" ); |
1338 | goto err_out_free_rx_resources; |
1339 | } |
1340 | |
1341 | jme_enable_rx_engine(jme); |
1342 | jme_enable_tx_engine(jme); |
1343 | |
1344 | netif_start_queue(netdev); |
1345 | |
1346 | if (test_bit(JME_FLAG_POLL, &jme->flags)) |
1347 | jme_interrupt_mode(jme); |
1348 | |
1349 | jme_start_pcc_timer(jme); |
1350 | } else if (jme_pseudo_hotplug_enabled(jme)) { |
1351 | jme_start_shutdown_timer(jme); |
1352 | } |
1353 | |
1354 | goto out_enable_tasklet; |
1355 | |
1356 | err_out_free_rx_resources: |
1357 | jme_free_rx_resources(jme); |
1358 | out_enable_tasklet: |
1359 | tasklet_enable(&jme->txclean_task); |
1360 | tasklet_enable(&jme->rxclean_task); |
1361 | tasklet_enable(&jme->rxempty_task); |
1362 | out: |
1363 | atomic_inc(&jme->link_changing); |
1364 | } |
1365 | |
1366 | static void |
1367 | jme_rx_clean_tasklet(unsigned long arg) |
1368 | { |
1369 | struct jme_adapter *jme = (struct jme_adapter *)arg; |
1370 | struct dynpcc_info *dpi = &(jme->dpi); |
1371 | |
1372 | jme_process_receive(jme, jme->rx_ring_size); |
1373 | ++(dpi->intr_cnt); |
1374 | |
1375 | } |
1376 | |
1377 | static int |
1378 | jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) |
1379 | { |
1380 | struct jme_adapter *jme = jme_napi_priv(holder); |
1381 | int rest; |
1382 | |
1383 | rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); |
1384 | |
1385 | while (atomic_read(&jme->rx_empty) > 0) { |
1386 | atomic_dec(&jme->rx_empty); |
1387 | ++(NET_STAT(jme).rx_dropped); |
1388 | jme_restart_rx_engine(jme); |
1389 | } |
1390 | atomic_inc(&jme->rx_empty); |
1391 | |
1392 | if (rest) { |
1393 | JME_RX_COMPLETE(netdev, holder); |
1394 | jme_interrupt_mode(jme); |
1395 | } |
1396 | |
1397 | JME_NAPI_WEIGHT_SET(budget, rest); |
1398 | return JME_NAPI_WEIGHT_VAL(budget) - rest; |
1399 | } |
1400 | |
1401 | static void |
1402 | jme_rx_empty_tasklet(unsigned long arg) |
1403 | { |
1404 | struct jme_adapter *jme = (struct jme_adapter *)arg; |
1405 | |
1406 | if (unlikely(atomic_read(&jme->link_changing) != 1)) |
1407 | return; |
1408 | |
1409 | if (unlikely(!netif_carrier_ok(jme->dev))) |
1410 | return; |
1411 | |
1412 | netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n" ); |
1413 | |
1414 | jme_rx_clean_tasklet(arg); |
1415 | |
1416 | while (atomic_read(&jme->rx_empty) > 0) { |
1417 | atomic_dec(&jme->rx_empty); |
1418 | ++(NET_STAT(jme).rx_dropped); |
1419 | jme_restart_rx_engine(jme); |
1420 | } |
1421 | atomic_inc(&jme->rx_empty); |
1422 | } |
1423 | |
1424 | static void |
1425 | jme_wake_queue_if_stopped(struct jme_adapter *jme) |
1426 | { |
1427 | struct jme_ring *txring = &(jme->txring[0]); |
1428 | |
1429 | smp_wmb(); |
1430 | if (unlikely(netif_queue_stopped(jme->dev) && |
1431 | atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { |
1432 | netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n" ); |
1433 | netif_wake_queue(jme->dev); |
1434 | } |
1435 | |
1436 | } |
1437 | |
1438 | static void |
1439 | jme_tx_clean_tasklet(unsigned long arg) |
1440 | { |
1441 | struct jme_adapter *jme = (struct jme_adapter *)arg; |
1442 | struct jme_ring *txring = &(jme->txring[0]); |
1443 | struct txdesc *txdesc = txring->desc; |
1444 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; |
1445 | int i, j, cnt = 0, max, err, mask; |
1446 | |
1447 | tx_dbg(jme, "Into txclean\n" ); |
1448 | |
1449 | if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) |
1450 | goto out; |
1451 | |
1452 | if (unlikely(atomic_read(&jme->link_changing) != 1)) |
1453 | goto out; |
1454 | |
1455 | if (unlikely(!netif_carrier_ok(jme->dev))) |
1456 | goto out; |
1457 | |
1458 | max = jme->tx_ring_size - atomic_read(&txring->nr_free); |
1459 | mask = jme->tx_ring_mask; |
1460 | |
1461 | for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { |
1462 | |
1463 | ctxbi = txbi + i; |
1464 | |
1465 | if (likely(ctxbi->skb && |
1466 | !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { |
1467 | |
1468 | tx_dbg(jme, "txclean: %d+%d@%lu\n" , |
1469 | i, ctxbi->nr_desc, jiffies); |
1470 | |
1471 | err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; |
1472 | |
1473 | for (j = 1 ; j < ctxbi->nr_desc ; ++j) { |
1474 | ttxbi = txbi + ((i + j) & (mask)); |
1475 | txdesc[(i + j) & (mask)].dw[0] = 0; |
1476 | |
1477 | pci_unmap_page(jme->pdev, |
1478 | ttxbi->mapping, |
1479 | ttxbi->len, |
1480 | PCI_DMA_TODEVICE); |
1481 | |
1482 | ttxbi->mapping = 0; |
1483 | ttxbi->len = 0; |
1484 | } |
1485 | |
1486 | dev_kfree_skb(ctxbi->skb); |
1487 | |
1488 | cnt += ctxbi->nr_desc; |
1489 | |
1490 | if (unlikely(err)) { |
1491 | ++(NET_STAT(jme).tx_carrier_errors); |
1492 | } else { |
1493 | ++(NET_STAT(jme).tx_packets); |
1494 | NET_STAT(jme).tx_bytes += ctxbi->len; |
1495 | } |
1496 | |
1497 | ctxbi->skb = NULL; |
1498 | ctxbi->len = 0; |
1499 | ctxbi->start_xmit = 0; |
1500 | |
1501 | } else { |
1502 | break; |
1503 | } |
1504 | |
1505 | i = (i + ctxbi->nr_desc) & mask; |
1506 | |
1507 | ctxbi->nr_desc = 0; |
1508 | } |
1509 | |
1510 | tx_dbg(jme, "txclean: done %d@%lu\n" , i, jiffies); |
1511 | atomic_set(&txring->next_to_clean, i); |
1512 | atomic_add(cnt, &txring->nr_free); |
1513 | |
1514 | jme_wake_queue_if_stopped(jme); |
1515 | |
1516 | out: |
1517 | atomic_inc(&jme->tx_cleaning); |
1518 | } |
1519 | |
1520 | static void |
1521 | jme_intr_msi(struct jme_adapter *jme, u32 intrstat) |
1522 | { |
1523 | /* |
1524 | * Disable interrupt |
1525 | */ |
1526 | jwrite32f(jme, JME_IENC, INTR_ENABLE); |
1527 | |
1528 | if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { |
1529 | /* |
1530 | * Link change event is critical |
1531 | * all other events are ignored |
1532 | */ |
1533 | jwrite32(jme, JME_IEVE, intrstat); |
1534 | tasklet_schedule(&jme->linkch_task); |
1535 | goto out_reenable; |
1536 | } |
1537 | |
1538 | if (intrstat & INTR_TMINTR) { |
1539 | jwrite32(jme, JME_IEVE, INTR_TMINTR); |
1540 | tasklet_schedule(&jme->pcc_task); |
1541 | } |
1542 | |
1543 | if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { |
1544 | jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); |
1545 | tasklet_schedule(&jme->txclean_task); |
1546 | } |
1547 | |
1548 | if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { |
1549 | jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | |
1550 | INTR_PCCRX0 | |
1551 | INTR_RX0EMP)) | |
1552 | INTR_RX0); |
1553 | } |
1554 | |
1555 | if (test_bit(JME_FLAG_POLL, &jme->flags)) { |
1556 | if (intrstat & INTR_RX0EMP) |
1557 | atomic_inc(&jme->rx_empty); |
1558 | |
1559 | if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { |
1560 | if (likely(JME_RX_SCHEDULE_PREP(jme))) { |
1561 | jme_polling_mode(jme); |
1562 | JME_RX_SCHEDULE(jme); |
1563 | } |
1564 | } |
1565 | } else { |
1566 | if (intrstat & INTR_RX0EMP) { |
1567 | atomic_inc(&jme->rx_empty); |
1568 | tasklet_hi_schedule(&jme->rxempty_task); |
1569 | } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { |
1570 | tasklet_hi_schedule(&jme->rxclean_task); |
1571 | } |
1572 | } |
1573 | |
1574 | out_reenable: |
1575 | /* |
1576 | * Re-enable interrupt |
1577 | */ |
1578 | jwrite32f(jme, JME_IENS, INTR_ENABLE); |
1579 | } |
1580 | |
1581 | static irqreturn_t |
1582 | jme_intr(int irq, void *dev_id) |
1583 | { |
1584 | struct net_device *netdev = dev_id; |
1585 | struct jme_adapter *jme = netdev_priv(netdev); |
1586 | u32 intrstat; |
1587 | |
1588 | intrstat = jread32(jme, JME_IEVE); |
1589 | |
1590 | /* |
1591 | * Check if it's really an interrupt for us |
1592 | */ |
1593 | if (unlikely((intrstat & INTR_ENABLE) == 0)) |
1594 | return IRQ_NONE; |
1595 | |
1596 | /* |
1597 | * Check if the device still exist |
1598 | */ |
1599 | if (unlikely(intrstat == ~((typeof(intrstat))0))) |
1600 | return IRQ_NONE; |
1601 | |
1602 | jme_intr_msi(jme, intrstat); |
1603 | |
1604 | return IRQ_HANDLED; |
1605 | } |
1606 | |
1607 | static irqreturn_t |
1608 | jme_msi(int irq, void *dev_id) |
1609 | { |
1610 | struct net_device *netdev = dev_id; |
1611 | struct jme_adapter *jme = netdev_priv(netdev); |
1612 | u32 intrstat; |
1613 | |
1614 | intrstat = jread32(jme, JME_IEVE); |
1615 | |
1616 | jme_intr_msi(jme, intrstat); |
1617 | |
1618 | return IRQ_HANDLED; |
1619 | } |
1620 | |
1621 | static void |
1622 | jme_reset_link(struct jme_adapter *jme) |
1623 | { |
1624 | jwrite32(jme, JME_TMCSR, TMCSR_SWIT); |
1625 | } |
1626 | |
1627 | static void |
1628 | jme_restart_an(struct jme_adapter *jme) |
1629 | { |
1630 | u32 bmcr; |
1631 | |
1632 | spin_lock_bh(&jme->phy_lock); |
1633 | bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); |
1634 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); |
1635 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); |
1636 | spin_unlock_bh(&jme->phy_lock); |
1637 | } |
1638 | |
1639 | static int |
1640 | jme_request_irq(struct jme_adapter *jme) |
1641 | { |
1642 | int rc; |
1643 | struct net_device *netdev = jme->dev; |
1644 | irq_handler_t handler = jme_intr; |
1645 | int irq_flags = IRQF_SHARED; |
1646 | |
1647 | if (!pci_enable_msi(jme->pdev)) { |
1648 | set_bit(JME_FLAG_MSI, &jme->flags); |
1649 | handler = jme_msi; |
1650 | irq_flags = 0; |
1651 | } |
1652 | |
1653 | rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, |
1654 | netdev); |
1655 | if (rc) { |
1656 | netdev_err(netdev, |
1657 | "Unable to request %s interrupt (return: %d)\n" , |
1658 | test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx" , |
1659 | rc); |
1660 | |
1661 | if (test_bit(JME_FLAG_MSI, &jme->flags)) { |
1662 | pci_disable_msi(jme->pdev); |
1663 | clear_bit(JME_FLAG_MSI, &jme->flags); |
1664 | } |
1665 | } else { |
1666 | netdev->irq = jme->pdev->irq; |
1667 | } |
1668 | |
1669 | return rc; |
1670 | } |
1671 | |
1672 | static void |
1673 | jme_free_irq(struct jme_adapter *jme) |
1674 | { |
1675 | free_irq(jme->pdev->irq, jme->dev); |
1676 | if (test_bit(JME_FLAG_MSI, &jme->flags)) { |
1677 | pci_disable_msi(jme->pdev); |
1678 | clear_bit(JME_FLAG_MSI, &jme->flags); |
1679 | jme->dev->irq = jme->pdev->irq; |
1680 | } |
1681 | } |
1682 | |
1683 | static inline void |
1684 | jme_new_phy_on(struct jme_adapter *jme) |
1685 | { |
1686 | u32 reg; |
1687 | |
1688 | reg = jread32(jme, JME_PHY_PWR); |
1689 | reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | |
1690 | PHY_PWR_DWN2 | PHY_PWR_CLKSEL); |
1691 | jwrite32(jme, JME_PHY_PWR, reg); |
1692 | |
1693 | pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); |
1694 | reg &= ~PE1_GPREG0_PBG; |
1695 | reg |= PE1_GPREG0_ENBG; |
1696 | pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); |
1697 | } |
1698 | |
1699 | static inline void |
1700 | jme_new_phy_off(struct jme_adapter *jme) |
1701 | { |
1702 | u32 reg; |
1703 | |
1704 | reg = jread32(jme, JME_PHY_PWR); |
1705 | reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | |
1706 | PHY_PWR_DWN2 | PHY_PWR_CLKSEL; |
1707 | jwrite32(jme, JME_PHY_PWR, reg); |
1708 | |
1709 | pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); |
1710 | reg &= ~PE1_GPREG0_PBG; |
1711 | reg |= PE1_GPREG0_PDD3COLD; |
1712 | pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); |
1713 | } |
1714 | |
1715 | static inline void |
1716 | jme_phy_on(struct jme_adapter *jme) |
1717 | { |
1718 | u32 bmcr; |
1719 | |
1720 | bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); |
1721 | bmcr &= ~BMCR_PDOWN; |
1722 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); |
1723 | |
1724 | if (new_phy_power_ctrl(jme->chip_main_rev)) |
1725 | jme_new_phy_on(jme); |
1726 | } |
1727 | |
1728 | static inline void |
1729 | jme_phy_off(struct jme_adapter *jme) |
1730 | { |
1731 | u32 bmcr; |
1732 | |
1733 | bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); |
1734 | bmcr |= BMCR_PDOWN; |
1735 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); |
1736 | |
1737 | if (new_phy_power_ctrl(jme->chip_main_rev)) |
1738 | jme_new_phy_off(jme); |
1739 | } |
1740 | |
1741 | static int |
1742 | jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg) |
1743 | { |
1744 | u32 phy_addr; |
1745 | |
1746 | phy_addr = JM_PHY_SPEC_REG_READ | specreg; |
1747 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, |
1748 | phy_addr); |
1749 | return jme_mdio_read(jme->dev, jme->mii_if.phy_id, |
1750 | JM_PHY_SPEC_DATA_REG); |
1751 | } |
1752 | |
1753 | static void |
1754 | jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data) |
1755 | { |
1756 | u32 phy_addr; |
1757 | |
1758 | phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg; |
1759 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG, |
1760 | phy_data); |
1761 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, |
1762 | phy_addr); |
1763 | } |
1764 | |
1765 | static int |
1766 | jme_phy_calibration(struct jme_adapter *jme) |
1767 | { |
1768 | u32 ctrl1000, phy_data; |
1769 | |
1770 | jme_phy_off(jme); |
1771 | jme_phy_on(jme); |
1772 | /* Enabel PHY test mode 1 */ |
1773 | ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); |
1774 | ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; |
1775 | ctrl1000 |= PHY_GAD_TEST_MODE_1; |
1776 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); |
1777 | |
1778 | phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); |
1779 | phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0; |
1780 | phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH | |
1781 | JM_PHY_EXT_COMM_2_CALI_ENABLE; |
1782 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); |
1783 | msleep(20); |
1784 | phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); |
1785 | phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE | |
1786 | JM_PHY_EXT_COMM_2_CALI_MODE_0 | |
1787 | JM_PHY_EXT_COMM_2_CALI_LATCH); |
1788 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); |
1789 | |
1790 | /* Disable PHY test mode */ |
1791 | ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); |
1792 | ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; |
1793 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); |
1794 | return 0; |
1795 | } |
1796 | |
1797 | static int |
1798 | jme_phy_setEA(struct jme_adapter *jme) |
1799 | { |
1800 | u32 phy_comm0 = 0, phy_comm1 = 0; |
1801 | u8 nic_ctrl; |
1802 | |
1803 | pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl); |
1804 | if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE) |
1805 | return 0; |
1806 | |
1807 | switch (jme->pdev->device) { |
1808 | case PCI_DEVICE_ID_JMICRON_JMC250: |
1809 | if (((jme->chip_main_rev == 5) && |
1810 | ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || |
1811 | (jme->chip_sub_rev == 3))) || |
1812 | (jme->chip_main_rev >= 6)) { |
1813 | phy_comm0 = 0x008A; |
1814 | phy_comm1 = 0x4109; |
1815 | } |
1816 | if ((jme->chip_main_rev == 3) && |
1817 | ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) |
1818 | phy_comm0 = 0xE088; |
1819 | break; |
1820 | case PCI_DEVICE_ID_JMICRON_JMC260: |
1821 | if (((jme->chip_main_rev == 5) && |
1822 | ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || |
1823 | (jme->chip_sub_rev == 3))) || |
1824 | (jme->chip_main_rev >= 6)) { |
1825 | phy_comm0 = 0x008A; |
1826 | phy_comm1 = 0x4109; |
1827 | } |
1828 | if ((jme->chip_main_rev == 3) && |
1829 | ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) |
1830 | phy_comm0 = 0xE088; |
1831 | if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0)) |
1832 | phy_comm0 = 0x608A; |
1833 | if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2)) |
1834 | phy_comm0 = 0x408A; |
1835 | break; |
1836 | default: |
1837 | return -ENODEV; |
1838 | } |
1839 | if (phy_comm0) |
1840 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0); |
1841 | if (phy_comm1) |
1842 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1); |
1843 | |
1844 | return 0; |
1845 | } |
1846 | |
1847 | static int |
1848 | jme_open(struct net_device *netdev) |
1849 | { |
1850 | struct jme_adapter *jme = netdev_priv(netdev); |
1851 | int rc; |
1852 | |
1853 | jme_clear_pm_disable_wol(jme); |
1854 | JME_NAPI_ENABLE(jme); |
1855 | |
1856 | tasklet_init(&jme->linkch_task, jme_link_change_tasklet, |
1857 | (unsigned long) jme); |
1858 | tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet, |
1859 | (unsigned long) jme); |
1860 | tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet, |
1861 | (unsigned long) jme); |
1862 | tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet, |
1863 | (unsigned long) jme); |
1864 | |
1865 | rc = jme_request_irq(jme); |
1866 | if (rc) |
1867 | goto err_out; |
1868 | |
1869 | jme_start_irq(jme); |
1870 | |
1871 | jme_phy_on(jme); |
1872 | if (test_bit(JME_FLAG_SSET, &jme->flags)) |
1873 | jme_set_link_ksettings(netdev, &jme->old_cmd); |
1874 | else |
1875 | jme_reset_phy_processor(jme); |
1876 | jme_phy_calibration(jme); |
1877 | jme_phy_setEA(jme); |
1878 | jme_reset_link(jme); |
1879 | |
1880 | return 0; |
1881 | |
1882 | err_out: |
1883 | netif_stop_queue(netdev); |
1884 | netif_carrier_off(netdev); |
1885 | return rc; |
1886 | } |
1887 | |
1888 | static void |
1889 | jme_set_100m_half(struct jme_adapter *jme) |
1890 | { |
1891 | u32 bmcr, tmp; |
1892 | |
1893 | jme_phy_on(jme); |
1894 | bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); |
1895 | tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | |
1896 | BMCR_SPEED1000 | BMCR_FULLDPLX); |
1897 | tmp |= BMCR_SPEED100; |
1898 | |
1899 | if (bmcr != tmp) |
1900 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); |
1901 | |
1902 | if (jme->fpgaver) |
1903 | jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); |
1904 | else |
1905 | jwrite32(jme, JME_GHC, GHC_SPEED_100M); |
1906 | } |
1907 | |
1908 | #define JME_WAIT_LINK_TIME 2000 /* 2000ms */ |
1909 | static void |
1910 | jme_wait_link(struct jme_adapter *jme) |
1911 | { |
1912 | u32 phylink, to = JME_WAIT_LINK_TIME; |
1913 | |
1914 | msleep(1000); |
1915 | phylink = jme_linkstat_from_phy(jme); |
1916 | while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { |
1917 | usleep_range(10000, 11000); |
1918 | phylink = jme_linkstat_from_phy(jme); |
1919 | } |
1920 | } |
1921 | |
1922 | static void |
1923 | jme_powersave_phy(struct jme_adapter *jme) |
1924 | { |
1925 | if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) { |
1926 | jme_set_100m_half(jme); |
1927 | if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) |
1928 | jme_wait_link(jme); |
1929 | jme_clear_pm_enable_wol(jme); |
1930 | } else { |
1931 | jme_phy_off(jme); |
1932 | } |
1933 | } |
1934 | |
1935 | static int |
1936 | jme_close(struct net_device *netdev) |
1937 | { |
1938 | struct jme_adapter *jme = netdev_priv(netdev); |
1939 | |
1940 | netif_stop_queue(netdev); |
1941 | netif_carrier_off(netdev); |
1942 | |
1943 | jme_stop_irq(jme); |
1944 | jme_free_irq(jme); |
1945 | |
1946 | JME_NAPI_DISABLE(jme); |
1947 | |
1948 | tasklet_kill(&jme->linkch_task); |
1949 | tasklet_kill(&jme->txclean_task); |
1950 | tasklet_kill(&jme->rxclean_task); |
1951 | tasklet_kill(&jme->rxempty_task); |
1952 | |
1953 | jme_disable_rx_engine(jme); |
1954 | jme_disable_tx_engine(jme); |
1955 | jme_reset_mac_processor(jme); |
1956 | jme_free_rx_resources(jme); |
1957 | jme_free_tx_resources(jme); |
1958 | jme->phylink = 0; |
1959 | jme_phy_off(jme); |
1960 | |
1961 | return 0; |
1962 | } |
1963 | |
1964 | static int |
1965 | jme_alloc_txdesc(struct jme_adapter *jme, |
1966 | struct sk_buff *skb) |
1967 | { |
1968 | struct jme_ring *txring = &(jme->txring[0]); |
1969 | int idx, nr_alloc, mask = jme->tx_ring_mask; |
1970 | |
1971 | idx = txring->next_to_use; |
1972 | nr_alloc = skb_shinfo(skb)->nr_frags + 2; |
1973 | |
1974 | if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) |
1975 | return -1; |
1976 | |
1977 | atomic_sub(nr_alloc, &txring->nr_free); |
1978 | |
1979 | txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; |
1980 | |
1981 | return idx; |
1982 | } |
1983 | |
1984 | static int |
1985 | jme_fill_tx_map(struct pci_dev *pdev, |
1986 | struct txdesc *txdesc, |
1987 | struct jme_buffer_info *txbi, |
1988 | struct page *page, |
1989 | u32 page_offset, |
1990 | u32 len, |
1991 | bool hidma) |
1992 | { |
1993 | dma_addr_t dmaaddr; |
1994 | |
1995 | dmaaddr = pci_map_page(pdev, |
1996 | page, |
1997 | page_offset, |
1998 | len, |
1999 | PCI_DMA_TODEVICE); |
2000 | |
2001 | if (unlikely(pci_dma_mapping_error(pdev, dmaaddr))) |
2002 | return -EINVAL; |
2003 | |
2004 | pci_dma_sync_single_for_device(pdev, |
2005 | dmaaddr, |
2006 | len, |
2007 | PCI_DMA_TODEVICE); |
2008 | |
2009 | txdesc->dw[0] = 0; |
2010 | txdesc->dw[1] = 0; |
2011 | txdesc->desc2.flags = TXFLAG_OWN; |
2012 | txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; |
2013 | txdesc->desc2.datalen = cpu_to_le16(len); |
2014 | txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); |
2015 | txdesc->desc2.bufaddrl = cpu_to_le32( |
2016 | (__u64)dmaaddr & 0xFFFFFFFFUL); |
2017 | |
2018 | txbi->mapping = dmaaddr; |
2019 | txbi->len = len; |
2020 | return 0; |
2021 | } |
2022 | |
2023 | static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) |
2024 | { |
2025 | struct jme_ring *txring = &(jme->txring[0]); |
2026 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; |
2027 | int mask = jme->tx_ring_mask; |
2028 | int j; |
2029 | |
2030 | for (j = 0 ; j < count ; j++) { |
2031 | ctxbi = txbi + ((startidx + j + 2) & (mask)); |
2032 | pci_unmap_page(jme->pdev, |
2033 | ctxbi->mapping, |
2034 | ctxbi->len, |
2035 | PCI_DMA_TODEVICE); |
2036 | |
2037 | ctxbi->mapping = 0; |
2038 | ctxbi->len = 0; |
2039 | } |
2040 | } |
2041 | |
2042 | static int |
2043 | jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) |
2044 | { |
2045 | struct jme_ring *txring = &(jme->txring[0]); |
2046 | struct txdesc *txdesc = txring->desc, *ctxdesc; |
2047 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; |
2048 | bool hidma = jme->dev->features & NETIF_F_HIGHDMA; |
2049 | int i, nr_frags = skb_shinfo(skb)->nr_frags; |
2050 | int mask = jme->tx_ring_mask; |
2051 | const struct skb_frag_struct *frag; |
2052 | u32 len; |
2053 | int ret = 0; |
2054 | |
2055 | for (i = 0 ; i < nr_frags ; ++i) { |
2056 | frag = &skb_shinfo(skb)->frags[i]; |
2057 | ctxdesc = txdesc + ((idx + i + 2) & (mask)); |
2058 | ctxbi = txbi + ((idx + i + 2) & (mask)); |
2059 | |
2060 | ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, |
2061 | skb_frag_page(frag), |
2062 | frag->page_offset, skb_frag_size(frag), hidma); |
2063 | if (ret) { |
2064 | jme_drop_tx_map(jme, idx, i); |
2065 | goto out; |
2066 | } |
2067 | |
2068 | } |
2069 | |
2070 | len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; |
2071 | ctxdesc = txdesc + ((idx + 1) & (mask)); |
2072 | ctxbi = txbi + ((idx + 1) & (mask)); |
2073 | ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), |
2074 | offset_in_page(skb->data), len, hidma); |
2075 | if (ret) |
2076 | jme_drop_tx_map(jme, idx, i); |
2077 | |
2078 | out: |
2079 | return ret; |
2080 | |
2081 | } |
2082 | |
2083 | |
2084 | static int |
2085 | jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) |
2086 | { |
2087 | *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); |
2088 | if (*mss) { |
2089 | *flags |= TXFLAG_LSEN; |
2090 | |
2091 | if (skb->protocol == htons(ETH_P_IP)) { |
2092 | struct iphdr *iph = ip_hdr(skb); |
2093 | |
2094 | iph->check = 0; |
2095 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
2096 | iph->daddr, 0, |
2097 | IPPROTO_TCP, |
2098 | 0); |
2099 | } else { |
2100 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
2101 | |
2102 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr, |
2103 | &ip6h->daddr, 0, |
2104 | IPPROTO_TCP, |
2105 | 0); |
2106 | } |
2107 | |
2108 | return 0; |
2109 | } |
2110 | |
2111 | return 1; |
2112 | } |
2113 | |
2114 | static void |
2115 | jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) |
2116 | { |
2117 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2118 | u8 ip_proto; |
2119 | |
2120 | switch (skb->protocol) { |
2121 | case htons(ETH_P_IP): |
2122 | ip_proto = ip_hdr(skb)->protocol; |
2123 | break; |
2124 | case htons(ETH_P_IPV6): |
2125 | ip_proto = ipv6_hdr(skb)->nexthdr; |
2126 | break; |
2127 | default: |
2128 | ip_proto = 0; |
2129 | break; |
2130 | } |
2131 | |
2132 | switch (ip_proto) { |
2133 | case IPPROTO_TCP: |
2134 | *flags |= TXFLAG_TCPCS; |
2135 | break; |
2136 | case IPPROTO_UDP: |
2137 | *flags |= TXFLAG_UDPCS; |
2138 | break; |
2139 | default: |
2140 | netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n" ); |
2141 | break; |
2142 | } |
2143 | } |
2144 | } |
2145 | |
2146 | static inline void |
2147 | jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) |
2148 | { |
2149 | if (skb_vlan_tag_present(skb)) { |
2150 | *flags |= TXFLAG_TAGON; |
2151 | *vlan = cpu_to_le16(skb_vlan_tag_get(skb)); |
2152 | } |
2153 | } |
2154 | |
2155 | static int |
2156 | jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) |
2157 | { |
2158 | struct jme_ring *txring = &(jme->txring[0]); |
2159 | struct txdesc *txdesc; |
2160 | struct jme_buffer_info *txbi; |
2161 | u8 flags; |
2162 | int ret = 0; |
2163 | |
2164 | txdesc = (struct txdesc *)txring->desc + idx; |
2165 | txbi = txring->bufinf + idx; |
2166 | |
2167 | txdesc->dw[0] = 0; |
2168 | txdesc->dw[1] = 0; |
2169 | txdesc->dw[2] = 0; |
2170 | txdesc->dw[3] = 0; |
2171 | txdesc->desc1.pktsize = cpu_to_le16(skb->len); |
2172 | /* |
2173 | * Set OWN bit at final. |
2174 | * When kernel transmit faster than NIC. |
2175 | * And NIC trying to send this descriptor before we tell |
2176 | * it to start sending this TX queue. |
2177 | * Other fields are already filled correctly. |
2178 | */ |
2179 | wmb(); |
2180 | flags = TXFLAG_OWN | TXFLAG_INT; |
2181 | /* |
2182 | * Set checksum flags while not tso |
2183 | */ |
2184 | if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) |
2185 | jme_tx_csum(jme, skb, &flags); |
2186 | jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); |
2187 | ret = jme_map_tx_skb(jme, skb, idx); |
2188 | if (ret) |
2189 | return ret; |
2190 | |
2191 | txdesc->desc1.flags = flags; |
2192 | /* |
2193 | * Set tx buffer info after telling NIC to send |
2194 | * For better tx_clean timing |
2195 | */ |
2196 | wmb(); |
2197 | txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; |
2198 | txbi->skb = skb; |
2199 | txbi->len = skb->len; |
2200 | txbi->start_xmit = jiffies; |
2201 | if (!txbi->start_xmit) |
2202 | txbi->start_xmit = (0UL-1); |
2203 | |
2204 | return 0; |
2205 | } |
2206 | |
2207 | static void |
2208 | jme_stop_queue_if_full(struct jme_adapter *jme) |
2209 | { |
2210 | struct jme_ring *txring = &(jme->txring[0]); |
2211 | struct jme_buffer_info *txbi = txring->bufinf; |
2212 | int idx = atomic_read(&txring->next_to_clean); |
2213 | |
2214 | txbi += idx; |
2215 | |
2216 | smp_wmb(); |
2217 | if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { |
2218 | netif_stop_queue(jme->dev); |
2219 | netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n" ); |
2220 | smp_wmb(); |
2221 | if (atomic_read(&txring->nr_free) |
2222 | >= (jme->tx_wake_threshold)) { |
2223 | netif_wake_queue(jme->dev); |
2224 | netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n" ); |
2225 | } |
2226 | } |
2227 | |
2228 | if (unlikely(txbi->start_xmit && |
2229 | (jiffies - txbi->start_xmit) >= TX_TIMEOUT && |
2230 | txbi->skb)) { |
2231 | netif_stop_queue(jme->dev); |
2232 | netif_info(jme, tx_queued, jme->dev, |
2233 | "TX Queue Stopped %d@%lu\n" , idx, jiffies); |
2234 | } |
2235 | } |
2236 | |
2237 | /* |
2238 | * This function is already protected by netif_tx_lock() |
2239 | */ |
2240 | |
2241 | static netdev_tx_t |
2242 | jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) |
2243 | { |
2244 | struct jme_adapter *jme = netdev_priv(netdev); |
2245 | int idx; |
2246 | |
2247 | if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) { |
2248 | dev_kfree_skb_any(skb); |
2249 | ++(NET_STAT(jme).tx_dropped); |
2250 | return NETDEV_TX_OK; |
2251 | } |
2252 | |
2253 | idx = jme_alloc_txdesc(jme, skb); |
2254 | |
2255 | if (unlikely(idx < 0)) { |
2256 | netif_stop_queue(netdev); |
2257 | netif_err(jme, tx_err, jme->dev, |
2258 | "BUG! Tx ring full when queue awake!\n" ); |
2259 | |
2260 | return NETDEV_TX_BUSY; |
2261 | } |
2262 | |
2263 | if (jme_fill_tx_desc(jme, skb, idx)) |
2264 | return NETDEV_TX_OK; |
2265 | |
2266 | jwrite32(jme, JME_TXCS, jme->reg_txcs | |
2267 | TXCS_SELECT_QUEUE0 | |
2268 | TXCS_QUEUE0S | |
2269 | TXCS_ENABLE); |
2270 | |
2271 | tx_dbg(jme, "xmit: %d+%d@%lu\n" , |
2272 | idx, skb_shinfo(skb)->nr_frags + 2, jiffies); |
2273 | jme_stop_queue_if_full(jme); |
2274 | |
2275 | return NETDEV_TX_OK; |
2276 | } |
2277 | |
2278 | static void |
2279 | jme_set_unicastaddr(struct net_device *netdev) |
2280 | { |
2281 | struct jme_adapter *jme = netdev_priv(netdev); |
2282 | u32 val; |
2283 | |
2284 | val = (netdev->dev_addr[3] & 0xff) << 24 | |
2285 | (netdev->dev_addr[2] & 0xff) << 16 | |
2286 | (netdev->dev_addr[1] & 0xff) << 8 | |
2287 | (netdev->dev_addr[0] & 0xff); |
2288 | jwrite32(jme, JME_RXUMA_LO, val); |
2289 | val = (netdev->dev_addr[5] & 0xff) << 8 | |
2290 | (netdev->dev_addr[4] & 0xff); |
2291 | jwrite32(jme, JME_RXUMA_HI, val); |
2292 | } |
2293 | |
2294 | static int |
2295 | jme_set_macaddr(struct net_device *netdev, void *p) |
2296 | { |
2297 | struct jme_adapter *jme = netdev_priv(netdev); |
2298 | struct sockaddr *addr = p; |
2299 | |
2300 | if (netif_running(netdev)) |
2301 | return -EBUSY; |
2302 | |
2303 | spin_lock_bh(&jme->macaddr_lock); |
2304 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
2305 | jme_set_unicastaddr(netdev); |
2306 | spin_unlock_bh(&jme->macaddr_lock); |
2307 | |
2308 | return 0; |
2309 | } |
2310 | |
2311 | static void |
2312 | jme_set_multi(struct net_device *netdev) |
2313 | { |
2314 | struct jme_adapter *jme = netdev_priv(netdev); |
2315 | u32 mc_hash[2] = {}; |
2316 | |
2317 | spin_lock_bh(&jme->rxmcs_lock); |
2318 | |
2319 | jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; |
2320 | |
2321 | if (netdev->flags & IFF_PROMISC) { |
2322 | jme->reg_rxmcs |= RXMCS_ALLFRAME; |
2323 | } else if (netdev->flags & IFF_ALLMULTI) { |
2324 | jme->reg_rxmcs |= RXMCS_ALLMULFRAME; |
2325 | } else if (netdev->flags & IFF_MULTICAST) { |
2326 | struct netdev_hw_addr *ha; |
2327 | int bit_nr; |
2328 | |
2329 | jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; |
2330 | netdev_for_each_mc_addr(ha, netdev) { |
2331 | bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; |
2332 | mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); |
2333 | } |
2334 | |
2335 | jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); |
2336 | jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); |
2337 | } |
2338 | |
2339 | wmb(); |
2340 | jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); |
2341 | |
2342 | spin_unlock_bh(&jme->rxmcs_lock); |
2343 | } |
2344 | |
2345 | static int |
2346 | jme_change_mtu(struct net_device *netdev, int new_mtu) |
2347 | { |
2348 | struct jme_adapter *jme = netdev_priv(netdev); |
2349 | |
2350 | netdev->mtu = new_mtu; |
2351 | netdev_update_features(netdev); |
2352 | |
2353 | jme_restart_rx_engine(jme); |
2354 | jme_reset_link(jme); |
2355 | |
2356 | return 0; |
2357 | } |
2358 | |
2359 | static void |
2360 | jme_tx_timeout(struct net_device *netdev) |
2361 | { |
2362 | struct jme_adapter *jme = netdev_priv(netdev); |
2363 | |
2364 | jme->phylink = 0; |
2365 | jme_reset_phy_processor(jme); |
2366 | if (test_bit(JME_FLAG_SSET, &jme->flags)) |
2367 | jme_set_link_ksettings(netdev, &jme->old_cmd); |
2368 | |
2369 | /* |
2370 | * Force to Reset the link again |
2371 | */ |
2372 | jme_reset_link(jme); |
2373 | } |
2374 | |
2375 | static void |
2376 | jme_get_drvinfo(struct net_device *netdev, |
2377 | struct ethtool_drvinfo *info) |
2378 | { |
2379 | struct jme_adapter *jme = netdev_priv(netdev); |
2380 | |
2381 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
2382 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); |
2383 | strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info)); |
2384 | } |
2385 | |
2386 | static int |
2387 | jme_get_regs_len(struct net_device *netdev) |
2388 | { |
2389 | return JME_REG_LEN; |
2390 | } |
2391 | |
2392 | static void |
2393 | mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) |
2394 | { |
2395 | int i; |
2396 | |
2397 | for (i = 0 ; i < len ; i += 4) |
2398 | p[i >> 2] = jread32(jme, reg + i); |
2399 | } |
2400 | |
2401 | static void |
2402 | mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) |
2403 | { |
2404 | int i; |
2405 | u16 *p16 = (u16 *)p; |
2406 | |
2407 | for (i = 0 ; i < reg_nr ; ++i) |
2408 | p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); |
2409 | } |
2410 | |
2411 | static void |
2412 | jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) |
2413 | { |
2414 | struct jme_adapter *jme = netdev_priv(netdev); |
2415 | u32 *p32 = (u32 *)p; |
2416 | |
2417 | memset(p, 0xFF, JME_REG_LEN); |
2418 | |
2419 | regs->version = 1; |
2420 | mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); |
2421 | |
2422 | p32 += 0x100 >> 2; |
2423 | mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); |
2424 | |
2425 | p32 += 0x100 >> 2; |
2426 | mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); |
2427 | |
2428 | p32 += 0x100 >> 2; |
2429 | mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); |
2430 | |
2431 | p32 += 0x100 >> 2; |
2432 | mdio_memcpy(jme, p32, JME_PHY_REG_NR); |
2433 | } |
2434 | |
2435 | static int |
2436 | jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) |
2437 | { |
2438 | struct jme_adapter *jme = netdev_priv(netdev); |
2439 | |
2440 | ecmd->tx_coalesce_usecs = PCC_TX_TO; |
2441 | ecmd->tx_max_coalesced_frames = PCC_TX_CNT; |
2442 | |
2443 | if (test_bit(JME_FLAG_POLL, &jme->flags)) { |
2444 | ecmd->use_adaptive_rx_coalesce = false; |
2445 | ecmd->rx_coalesce_usecs = 0; |
2446 | ecmd->rx_max_coalesced_frames = 0; |
2447 | return 0; |
2448 | } |
2449 | |
2450 | ecmd->use_adaptive_rx_coalesce = true; |
2451 | |
2452 | switch (jme->dpi.cur) { |
2453 | case PCC_P1: |
2454 | ecmd->rx_coalesce_usecs = PCC_P1_TO; |
2455 | ecmd->rx_max_coalesced_frames = PCC_P1_CNT; |
2456 | break; |
2457 | case PCC_P2: |
2458 | ecmd->rx_coalesce_usecs = PCC_P2_TO; |
2459 | ecmd->rx_max_coalesced_frames = PCC_P2_CNT; |
2460 | break; |
2461 | case PCC_P3: |
2462 | ecmd->rx_coalesce_usecs = PCC_P3_TO; |
2463 | ecmd->rx_max_coalesced_frames = PCC_P3_CNT; |
2464 | break; |
2465 | default: |
2466 | break; |
2467 | } |
2468 | |
2469 | return 0; |
2470 | } |
2471 | |
2472 | static int |
2473 | jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) |
2474 | { |
2475 | struct jme_adapter *jme = netdev_priv(netdev); |
2476 | struct dynpcc_info *dpi = &(jme->dpi); |
2477 | |
2478 | if (netif_running(netdev)) |
2479 | return -EBUSY; |
2480 | |
2481 | if (ecmd->use_adaptive_rx_coalesce && |
2482 | test_bit(JME_FLAG_POLL, &jme->flags)) { |
2483 | clear_bit(JME_FLAG_POLL, &jme->flags); |
2484 | jme->jme_rx = netif_rx; |
2485 | dpi->cur = PCC_P1; |
2486 | dpi->attempt = PCC_P1; |
2487 | dpi->cnt = 0; |
2488 | jme_set_rx_pcc(jme, PCC_P1); |
2489 | jme_interrupt_mode(jme); |
2490 | } else if (!(ecmd->use_adaptive_rx_coalesce) && |
2491 | !(test_bit(JME_FLAG_POLL, &jme->flags))) { |
2492 | set_bit(JME_FLAG_POLL, &jme->flags); |
2493 | jme->jme_rx = netif_receive_skb; |
2494 | jme_interrupt_mode(jme); |
2495 | } |
2496 | |
2497 | return 0; |
2498 | } |
2499 | |
2500 | static void |
2501 | jme_get_pauseparam(struct net_device *netdev, |
2502 | struct ethtool_pauseparam *ecmd) |
2503 | { |
2504 | struct jme_adapter *jme = netdev_priv(netdev); |
2505 | u32 val; |
2506 | |
2507 | ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; |
2508 | ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; |
2509 | |
2510 | spin_lock_bh(&jme->phy_lock); |
2511 | val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); |
2512 | spin_unlock_bh(&jme->phy_lock); |
2513 | |
2514 | ecmd->autoneg = |
2515 | (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; |
2516 | } |
2517 | |
2518 | static int |
2519 | jme_set_pauseparam(struct net_device *netdev, |
2520 | struct ethtool_pauseparam *ecmd) |
2521 | { |
2522 | struct jme_adapter *jme = netdev_priv(netdev); |
2523 | u32 val; |
2524 | |
2525 | if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ |
2526 | (ecmd->tx_pause != 0)) { |
2527 | |
2528 | if (ecmd->tx_pause) |
2529 | jme->reg_txpfc |= TXPFC_PF_EN; |
2530 | else |
2531 | jme->reg_txpfc &= ~TXPFC_PF_EN; |
2532 | |
2533 | jwrite32(jme, JME_TXPFC, jme->reg_txpfc); |
2534 | } |
2535 | |
2536 | spin_lock_bh(&jme->rxmcs_lock); |
2537 | if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ |
2538 | (ecmd->rx_pause != 0)) { |
2539 | |
2540 | if (ecmd->rx_pause) |
2541 | jme->reg_rxmcs |= RXMCS_FLOWCTRL; |
2542 | else |
2543 | jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; |
2544 | |
2545 | jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); |
2546 | } |
2547 | spin_unlock_bh(&jme->rxmcs_lock); |
2548 | |
2549 | spin_lock_bh(&jme->phy_lock); |
2550 | val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); |
2551 | if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ |
2552 | (ecmd->autoneg != 0)) { |
2553 | |
2554 | if (ecmd->autoneg) |
2555 | val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
2556 | else |
2557 | val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
2558 | |
2559 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, |
2560 | MII_ADVERTISE, val); |
2561 | } |
2562 | spin_unlock_bh(&jme->phy_lock); |
2563 | |
2564 | return 0; |
2565 | } |
2566 | |
2567 | static void |
2568 | jme_get_wol(struct net_device *netdev, |
2569 | struct ethtool_wolinfo *wol) |
2570 | { |
2571 | struct jme_adapter *jme = netdev_priv(netdev); |
2572 | |
2573 | wol->supported = WAKE_MAGIC | WAKE_PHY; |
2574 | |
2575 | wol->wolopts = 0; |
2576 | |
2577 | if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) |
2578 | wol->wolopts |= WAKE_PHY; |
2579 | |
2580 | if (jme->reg_pmcs & PMCS_MFEN) |
2581 | wol->wolopts |= WAKE_MAGIC; |
2582 | |
2583 | } |
2584 | |
2585 | static int |
2586 | jme_set_wol(struct net_device *netdev, |
2587 | struct ethtool_wolinfo *wol) |
2588 | { |
2589 | struct jme_adapter *jme = netdev_priv(netdev); |
2590 | |
2591 | if (wol->wolopts & (WAKE_MAGICSECURE | |
2592 | WAKE_UCAST | |
2593 | WAKE_MCAST | |
2594 | WAKE_BCAST | |
2595 | WAKE_ARP)) |
2596 | return -EOPNOTSUPP; |
2597 | |
2598 | jme->reg_pmcs = 0; |
2599 | |
2600 | if (wol->wolopts & WAKE_PHY) |
2601 | jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; |
2602 | |
2603 | if (wol->wolopts & WAKE_MAGIC) |
2604 | jme->reg_pmcs |= PMCS_MFEN; |
2605 | |
2606 | return 0; |
2607 | } |
2608 | |
2609 | static int |
2610 | jme_get_link_ksettings(struct net_device *netdev, |
2611 | struct ethtool_link_ksettings *cmd) |
2612 | { |
2613 | struct jme_adapter *jme = netdev_priv(netdev); |
2614 | |
2615 | spin_lock_bh(&jme->phy_lock); |
2616 | mii_ethtool_get_link_ksettings(&jme->mii_if, cmd); |
2617 | spin_unlock_bh(&jme->phy_lock); |
2618 | return 0; |
2619 | } |
2620 | |
2621 | static int |
2622 | jme_set_link_ksettings(struct net_device *netdev, |
2623 | const struct ethtool_link_ksettings *cmd) |
2624 | { |
2625 | struct jme_adapter *jme = netdev_priv(netdev); |
2626 | int rc, fdc = 0; |
2627 | |
2628 | if (cmd->base.speed == SPEED_1000 && |
2629 | cmd->base.autoneg != AUTONEG_ENABLE) |
2630 | return -EINVAL; |
2631 | |
2632 | /* |
2633 | * Check If user changed duplex only while force_media. |
2634 | * Hardware would not generate link change interrupt. |
2635 | */ |
2636 | if (jme->mii_if.force_media && |
2637 | cmd->base.autoneg != AUTONEG_ENABLE && |
2638 | (jme->mii_if.full_duplex != cmd->base.duplex)) |
2639 | fdc = 1; |
2640 | |
2641 | spin_lock_bh(&jme->phy_lock); |
2642 | rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd); |
2643 | spin_unlock_bh(&jme->phy_lock); |
2644 | |
2645 | if (!rc) { |
2646 | if (fdc) |
2647 | jme_reset_link(jme); |
2648 | jme->old_cmd = *cmd; |
2649 | set_bit(JME_FLAG_SSET, &jme->flags); |
2650 | } |
2651 | |
2652 | return rc; |
2653 | } |
2654 | |
2655 | static int |
2656 | jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) |
2657 | { |
2658 | int rc; |
2659 | struct jme_adapter *jme = netdev_priv(netdev); |
2660 | struct mii_ioctl_data *mii_data = if_mii(rq); |
2661 | unsigned int duplex_chg; |
2662 | |
2663 | if (cmd == SIOCSMIIREG) { |
2664 | u16 val = mii_data->val_in; |
2665 | if (!(val & (BMCR_RESET|BMCR_ANENABLE)) && |
2666 | (val & BMCR_SPEED1000)) |
2667 | return -EINVAL; |
2668 | } |
2669 | |
2670 | spin_lock_bh(&jme->phy_lock); |
2671 | rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg); |
2672 | spin_unlock_bh(&jme->phy_lock); |
2673 | |
2674 | if (!rc && (cmd == SIOCSMIIREG)) { |
2675 | if (duplex_chg) |
2676 | jme_reset_link(jme); |
2677 | jme_get_link_ksettings(netdev, &jme->old_cmd); |
2678 | set_bit(JME_FLAG_SSET, &jme->flags); |
2679 | } |
2680 | |
2681 | return rc; |
2682 | } |
2683 | |
2684 | static u32 |
2685 | jme_get_link(struct net_device *netdev) |
2686 | { |
2687 | struct jme_adapter *jme = netdev_priv(netdev); |
2688 | return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; |
2689 | } |
2690 | |
2691 | static u32 |
2692 | jme_get_msglevel(struct net_device *netdev) |
2693 | { |
2694 | struct jme_adapter *jme = netdev_priv(netdev); |
2695 | return jme->msg_enable; |
2696 | } |
2697 | |
2698 | static void |
2699 | jme_set_msglevel(struct net_device *netdev, u32 value) |
2700 | { |
2701 | struct jme_adapter *jme = netdev_priv(netdev); |
2702 | jme->msg_enable = value; |
2703 | } |
2704 | |
2705 | static netdev_features_t |
2706 | jme_fix_features(struct net_device *netdev, netdev_features_t features) |
2707 | { |
2708 | if (netdev->mtu > 1900) |
2709 | features &= ~(NETIF_F_ALL_TSO | NETIF_F_CSUM_MASK); |
2710 | return features; |
2711 | } |
2712 | |
2713 | static int |
2714 | jme_set_features(struct net_device *netdev, netdev_features_t features) |
2715 | { |
2716 | struct jme_adapter *jme = netdev_priv(netdev); |
2717 | |
2718 | spin_lock_bh(&jme->rxmcs_lock); |
2719 | if (features & NETIF_F_RXCSUM) |
2720 | jme->reg_rxmcs |= RXMCS_CHECKSUM; |
2721 | else |
2722 | jme->reg_rxmcs &= ~RXMCS_CHECKSUM; |
2723 | jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); |
2724 | spin_unlock_bh(&jme->rxmcs_lock); |
2725 | |
2726 | return 0; |
2727 | } |
2728 | |
2729 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2730 | static void jme_netpoll(struct net_device *dev) |
2731 | { |
2732 | unsigned long flags; |
2733 | |
2734 | local_irq_save(flags); |
2735 | jme_intr(dev->irq, dev); |
2736 | local_irq_restore(flags); |
2737 | } |
2738 | #endif |
2739 | |
2740 | static int |
2741 | jme_nway_reset(struct net_device *netdev) |
2742 | { |
2743 | struct jme_adapter *jme = netdev_priv(netdev); |
2744 | jme_restart_an(jme); |
2745 | return 0; |
2746 | } |
2747 | |
2748 | static u8 |
2749 | jme_smb_read(struct jme_adapter *jme, unsigned int addr) |
2750 | { |
2751 | u32 val; |
2752 | int to; |
2753 | |
2754 | val = jread32(jme, JME_SMBCSR); |
2755 | to = JME_SMB_BUSY_TIMEOUT; |
2756 | while ((val & SMBCSR_BUSY) && --to) { |
2757 | msleep(1); |
2758 | val = jread32(jme, JME_SMBCSR); |
2759 | } |
2760 | if (!to) { |
2761 | netif_err(jme, hw, jme->dev, "SMB Bus Busy\n" ); |
2762 | return 0xFF; |
2763 | } |
2764 | |
2765 | jwrite32(jme, JME_SMBINTF, |
2766 | ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | |
2767 | SMBINTF_HWRWN_READ | |
2768 | SMBINTF_HWCMD); |
2769 | |
2770 | val = jread32(jme, JME_SMBINTF); |
2771 | to = JME_SMB_BUSY_TIMEOUT; |
2772 | while ((val & SMBINTF_HWCMD) && --to) { |
2773 | msleep(1); |
2774 | val = jread32(jme, JME_SMBINTF); |
2775 | } |
2776 | if (!to) { |
2777 | netif_err(jme, hw, jme->dev, "SMB Bus Busy\n" ); |
2778 | return 0xFF; |
2779 | } |
2780 | |
2781 | return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; |
2782 | } |
2783 | |
2784 | static void |
2785 | jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) |
2786 | { |
2787 | u32 val; |
2788 | int to; |
2789 | |
2790 | val = jread32(jme, JME_SMBCSR); |
2791 | to = JME_SMB_BUSY_TIMEOUT; |
2792 | while ((val & SMBCSR_BUSY) && --to) { |
2793 | msleep(1); |
2794 | val = jread32(jme, JME_SMBCSR); |
2795 | } |
2796 | if (!to) { |
2797 | netif_err(jme, hw, jme->dev, "SMB Bus Busy\n" ); |
2798 | return; |
2799 | } |
2800 | |
2801 | jwrite32(jme, JME_SMBINTF, |
2802 | ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | |
2803 | ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | |
2804 | SMBINTF_HWRWN_WRITE | |
2805 | SMBINTF_HWCMD); |
2806 | |
2807 | val = jread32(jme, JME_SMBINTF); |
2808 | to = JME_SMB_BUSY_TIMEOUT; |
2809 | while ((val & SMBINTF_HWCMD) && --to) { |
2810 | msleep(1); |
2811 | val = jread32(jme, JME_SMBINTF); |
2812 | } |
2813 | if (!to) { |
2814 | netif_err(jme, hw, jme->dev, "SMB Bus Busy\n" ); |
2815 | return; |
2816 | } |
2817 | |
2818 | mdelay(2); |
2819 | } |
2820 | |
2821 | static int |
2822 | jme_get_eeprom_len(struct net_device *netdev) |
2823 | { |
2824 | struct jme_adapter *jme = netdev_priv(netdev); |
2825 | u32 val; |
2826 | val = jread32(jme, JME_SMBCSR); |
2827 | return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; |
2828 | } |
2829 | |
2830 | static int |
2831 | jme_get_eeprom(struct net_device *netdev, |
2832 | struct ethtool_eeprom *eeprom, u8 *data) |
2833 | { |
2834 | struct jme_adapter *jme = netdev_priv(netdev); |
2835 | int i, offset = eeprom->offset, len = eeprom->len; |
2836 | |
2837 | /* |
2838 | * ethtool will check the boundary for us |
2839 | */ |
2840 | eeprom->magic = JME_EEPROM_MAGIC; |
2841 | for (i = 0 ; i < len ; ++i) |
2842 | data[i] = jme_smb_read(jme, i + offset); |
2843 | |
2844 | return 0; |
2845 | } |
2846 | |
2847 | static int |
2848 | jme_set_eeprom(struct net_device *netdev, |
2849 | struct ethtool_eeprom *eeprom, u8 *data) |
2850 | { |
2851 | struct jme_adapter *jme = netdev_priv(netdev); |
2852 | int i, offset = eeprom->offset, len = eeprom->len; |
2853 | |
2854 | if (eeprom->magic != JME_EEPROM_MAGIC) |
2855 | return -EINVAL; |
2856 | |
2857 | /* |
2858 | * ethtool will check the boundary for us |
2859 | */ |
2860 | for (i = 0 ; i < len ; ++i) |
2861 | jme_smb_write(jme, i + offset, data[i]); |
2862 | |
2863 | return 0; |
2864 | } |
2865 | |
2866 | static const struct ethtool_ops jme_ethtool_ops = { |
2867 | .get_drvinfo = jme_get_drvinfo, |
2868 | .get_regs_len = jme_get_regs_len, |
2869 | .get_regs = jme_get_regs, |
2870 | .get_coalesce = jme_get_coalesce, |
2871 | .set_coalesce = jme_set_coalesce, |
2872 | .get_pauseparam = jme_get_pauseparam, |
2873 | .set_pauseparam = jme_set_pauseparam, |
2874 | .get_wol = jme_get_wol, |
2875 | .set_wol = jme_set_wol, |
2876 | .get_link = jme_get_link, |
2877 | .get_msglevel = jme_get_msglevel, |
2878 | .set_msglevel = jme_set_msglevel, |
2879 | .nway_reset = jme_nway_reset, |
2880 | .get_eeprom_len = jme_get_eeprom_len, |
2881 | .get_eeprom = jme_get_eeprom, |
2882 | .set_eeprom = jme_set_eeprom, |
2883 | .get_link_ksettings = jme_get_link_ksettings, |
2884 | .set_link_ksettings = jme_set_link_ksettings, |
2885 | }; |
2886 | |
2887 | static int |
2888 | jme_pci_dma64(struct pci_dev *pdev) |
2889 | { |
2890 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && |
2891 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) |
2892 | if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) |
2893 | return 1; |
2894 | |
2895 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && |
2896 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) |
2897 | if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) |
2898 | return 1; |
2899 | |
2900 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) |
2901 | if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) |
2902 | return 0; |
2903 | |
2904 | return -1; |
2905 | } |
2906 | |
2907 | static inline void |
2908 | jme_phy_init(struct jme_adapter *jme) |
2909 | { |
2910 | u16 reg26; |
2911 | |
2912 | reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); |
2913 | jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); |
2914 | } |
2915 | |
2916 | static inline void |
2917 | jme_check_hw_ver(struct jme_adapter *jme) |
2918 | { |
2919 | u32 chipmode; |
2920 | |
2921 | chipmode = jread32(jme, JME_CHIPMODE); |
2922 | |
2923 | jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; |
2924 | jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; |
2925 | jme->chip_main_rev = jme->chiprev & 0xF; |
2926 | jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF; |
2927 | } |
2928 | |
2929 | static const struct net_device_ops jme_netdev_ops = { |
2930 | .ndo_open = jme_open, |
2931 | .ndo_stop = jme_close, |
2932 | .ndo_validate_addr = eth_validate_addr, |
2933 | .ndo_do_ioctl = jme_ioctl, |
2934 | .ndo_start_xmit = jme_start_xmit, |
2935 | .ndo_set_mac_address = jme_set_macaddr, |
2936 | .ndo_set_rx_mode = jme_set_multi, |
2937 | .ndo_change_mtu = jme_change_mtu, |
2938 | .ndo_tx_timeout = jme_tx_timeout, |
2939 | .ndo_fix_features = jme_fix_features, |
2940 | .ndo_set_features = jme_set_features, |
2941 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2942 | .ndo_poll_controller = jme_netpoll, |
2943 | #endif |
2944 | }; |
2945 | |
2946 | static int |
2947 | jme_init_one(struct pci_dev *pdev, |
2948 | const struct pci_device_id *ent) |
2949 | { |
2950 | int rc = 0, using_dac, i; |
2951 | struct net_device *netdev; |
2952 | struct jme_adapter *jme; |
2953 | u16 bmcr, bmsr; |
2954 | u32 apmc; |
2955 | |
2956 | /* |
2957 | * set up PCI device basics |
2958 | */ |
2959 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | |
2960 | PCIE_LINK_STATE_CLKPM); |
2961 | |
2962 | rc = pci_enable_device(pdev); |
2963 | if (rc) { |
2964 | pr_err("Cannot enable PCI device\n" ); |
2965 | goto err_out; |
2966 | } |
2967 | |
2968 | using_dac = jme_pci_dma64(pdev); |
2969 | if (using_dac < 0) { |
2970 | pr_err("Cannot set PCI DMA Mask\n" ); |
2971 | rc = -EIO; |
2972 | goto err_out_disable_pdev; |
2973 | } |
2974 | |
2975 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
2976 | pr_err("No PCI resource region found\n" ); |
2977 | rc = -ENOMEM; |
2978 | goto err_out_disable_pdev; |
2979 | } |
2980 | |
2981 | rc = pci_request_regions(pdev, DRV_NAME); |
2982 | if (rc) { |
2983 | pr_err("Cannot obtain PCI resource region\n" ); |
2984 | goto err_out_disable_pdev; |
2985 | } |
2986 | |
2987 | pci_set_master(pdev); |
2988 | |
2989 | /* |
2990 | * alloc and init net device |
2991 | */ |
2992 | netdev = alloc_etherdev(sizeof(*jme)); |
2993 | if (!netdev) { |
2994 | rc = -ENOMEM; |
2995 | goto err_out_release_regions; |
2996 | } |
2997 | netdev->netdev_ops = &jme_netdev_ops; |
2998 | netdev->ethtool_ops = &jme_ethtool_ops; |
2999 | netdev->watchdog_timeo = TX_TIMEOUT; |
3000 | netdev->hw_features = NETIF_F_IP_CSUM | |
3001 | NETIF_F_IPV6_CSUM | |
3002 | NETIF_F_SG | |
3003 | NETIF_F_TSO | |
3004 | NETIF_F_TSO6 | |
3005 | NETIF_F_RXCSUM; |
3006 | netdev->features = NETIF_F_IP_CSUM | |
3007 | NETIF_F_IPV6_CSUM | |
3008 | NETIF_F_SG | |
3009 | NETIF_F_TSO | |
3010 | NETIF_F_TSO6 | |
3011 | NETIF_F_HW_VLAN_CTAG_TX | |
3012 | NETIF_F_HW_VLAN_CTAG_RX; |
3013 | if (using_dac) |
3014 | netdev->features |= NETIF_F_HIGHDMA; |
3015 | |
3016 | /* MTU range: 1280 - 9202*/ |
3017 | netdev->min_mtu = IPV6_MIN_MTU; |
3018 | netdev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE - ETH_HLEN; |
3019 | |
3020 | SET_NETDEV_DEV(netdev, &pdev->dev); |
3021 | pci_set_drvdata(pdev, netdev); |
3022 | |
3023 | /* |
3024 | * init adapter info |
3025 | */ |
3026 | jme = netdev_priv(netdev); |
3027 | jme->pdev = pdev; |
3028 | jme->dev = netdev; |
3029 | jme->jme_rx = netif_rx; |
3030 | jme->old_mtu = netdev->mtu = 1500; |
3031 | jme->phylink = 0; |
3032 | jme->tx_ring_size = 1 << 10; |
3033 | jme->tx_ring_mask = jme->tx_ring_size - 1; |
3034 | jme->tx_wake_threshold = 1 << 9; |
3035 | jme->rx_ring_size = 1 << 9; |
3036 | jme->rx_ring_mask = jme->rx_ring_size - 1; |
3037 | jme->msg_enable = JME_DEF_MSG_ENABLE; |
3038 | jme->regs = ioremap(pci_resource_start(pdev, 0), |
3039 | pci_resource_len(pdev, 0)); |
3040 | if (!(jme->regs)) { |
3041 | pr_err("Mapping PCI resource region error\n" ); |
3042 | rc = -ENOMEM; |
3043 | goto err_out_free_netdev; |
3044 | } |
3045 | |
3046 | if (no_pseudohp) { |
3047 | apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; |
3048 | jwrite32(jme, JME_APMC, apmc); |
3049 | } else if (force_pseudohp) { |
3050 | apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; |
3051 | jwrite32(jme, JME_APMC, apmc); |
3052 | } |
3053 | |
3054 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) |
3055 | |
3056 | spin_lock_init(&jme->phy_lock); |
3057 | spin_lock_init(&jme->macaddr_lock); |
3058 | spin_lock_init(&jme->rxmcs_lock); |
3059 | |
3060 | atomic_set(&jme->link_changing, 1); |
3061 | atomic_set(&jme->rx_cleaning, 1); |
3062 | atomic_set(&jme->tx_cleaning, 1); |
3063 | atomic_set(&jme->rx_empty, 1); |
3064 | |
3065 | tasklet_init(&jme->pcc_task, |
3066 | jme_pcc_tasklet, |
3067 | (unsigned long) jme); |
3068 | jme->dpi.cur = PCC_P1; |
3069 | |
3070 | jme->reg_ghc = 0; |
3071 | jme->reg_rxcs = RXCS_DEFAULT; |
3072 | jme->reg_rxmcs = RXMCS_DEFAULT; |
3073 | jme->reg_txpfc = 0; |
3074 | jme->reg_pmcs = PMCS_MFEN; |
3075 | jme->reg_gpreg1 = GPREG1_DEFAULT; |
3076 | |
3077 | if (jme->reg_rxmcs & RXMCS_CHECKSUM) |
3078 | netdev->features |= NETIF_F_RXCSUM; |
3079 | |
3080 | /* |
3081 | * Get Max Read Req Size from PCI Config Space |
3082 | */ |
3083 | pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); |
3084 | jme->mrrs &= PCI_DCSR_MRRS_MASK; |
3085 | switch (jme->mrrs) { |
3086 | case MRRS_128B: |
3087 | jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; |
3088 | break; |
3089 | case MRRS_256B: |
3090 | jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; |
3091 | break; |
3092 | default: |
3093 | jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; |
3094 | break; |
3095 | } |
3096 | |
3097 | /* |
3098 | * Must check before reset_mac_processor |
3099 | */ |
3100 | jme_check_hw_ver(jme); |
3101 | jme->mii_if.dev = netdev; |
3102 | if (jme->fpgaver) { |
3103 | jme->mii_if.phy_id = 0; |
3104 | for (i = 1 ; i < 32 ; ++i) { |
3105 | bmcr = jme_mdio_read(netdev, i, MII_BMCR); |
3106 | bmsr = jme_mdio_read(netdev, i, MII_BMSR); |
3107 | if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { |
3108 | jme->mii_if.phy_id = i; |
3109 | break; |
3110 | } |
3111 | } |
3112 | |
3113 | if (!jme->mii_if.phy_id) { |
3114 | rc = -EIO; |
3115 | pr_err("Can not find phy_id\n" ); |
3116 | goto err_out_unmap; |
3117 | } |
3118 | |
3119 | jme->reg_ghc |= GHC_LINK_POLL; |
3120 | } else { |
3121 | jme->mii_if.phy_id = 1; |
3122 | } |
3123 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) |
3124 | jme->mii_if.supports_gmii = true; |
3125 | else |
3126 | jme->mii_if.supports_gmii = false; |
3127 | jme->mii_if.phy_id_mask = 0x1F; |
3128 | jme->mii_if.reg_num_mask = 0x1F; |
3129 | jme->mii_if.mdio_read = jme_mdio_read; |
3130 | jme->mii_if.mdio_write = jme_mdio_write; |
3131 | |
3132 | jme_clear_pm_disable_wol(jme); |
3133 | device_init_wakeup(&pdev->dev, true); |
3134 | |
3135 | jme_set_phyfifo_5level(jme); |
3136 | jme->pcirev = pdev->revision; |
3137 | if (!jme->fpgaver) |
3138 | jme_phy_init(jme); |
3139 | jme_phy_off(jme); |
3140 | |
3141 | /* |
3142 | * Reset MAC processor and reload EEPROM for MAC Address |
3143 | */ |
3144 | jme_reset_mac_processor(jme); |
3145 | rc = jme_reload_eeprom(jme); |
3146 | if (rc) { |
3147 | pr_err("Reload eeprom for reading MAC Address error\n" ); |
3148 | goto err_out_unmap; |
3149 | } |
3150 | jme_load_macaddr(netdev); |
3151 | |
3152 | /* |
3153 | * Tell stack that we are not ready to work until open() |
3154 | */ |
3155 | netif_carrier_off(netdev); |
3156 | |
3157 | rc = register_netdev(netdev); |
3158 | if (rc) { |
3159 | pr_err("Cannot register net device\n" ); |
3160 | goto err_out_unmap; |
3161 | } |
3162 | |
3163 | netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n" , |
3164 | (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? |
3165 | "JMC250 Gigabit Ethernet" : |
3166 | (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? |
3167 | "JMC260 Fast Ethernet" : "Unknown" , |
3168 | (jme->fpgaver != 0) ? " (FPGA)" : "" , |
3169 | (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, |
3170 | jme->pcirev, netdev->dev_addr); |
3171 | |
3172 | return 0; |
3173 | |
3174 | err_out_unmap: |
3175 | iounmap(jme->regs); |
3176 | err_out_free_netdev: |
3177 | free_netdev(netdev); |
3178 | err_out_release_regions: |
3179 | pci_release_regions(pdev); |
3180 | err_out_disable_pdev: |
3181 | pci_disable_device(pdev); |
3182 | err_out: |
3183 | return rc; |
3184 | } |
3185 | |
3186 | static void |
3187 | jme_remove_one(struct pci_dev *pdev) |
3188 | { |
3189 | struct net_device *netdev = pci_get_drvdata(pdev); |
3190 | struct jme_adapter *jme = netdev_priv(netdev); |
3191 | |
3192 | unregister_netdev(netdev); |
3193 | iounmap(jme->regs); |
3194 | free_netdev(netdev); |
3195 | pci_release_regions(pdev); |
3196 | pci_disable_device(pdev); |
3197 | |
3198 | } |
3199 | |
3200 | static void |
3201 | jme_shutdown(struct pci_dev *pdev) |
3202 | { |
3203 | struct net_device *netdev = pci_get_drvdata(pdev); |
3204 | struct jme_adapter *jme = netdev_priv(netdev); |
3205 | |
3206 | jme_powersave_phy(jme); |
3207 | pci_pme_active(pdev, true); |
3208 | } |
3209 | |
3210 | #ifdef CONFIG_PM_SLEEP |
3211 | static int |
3212 | jme_suspend(struct device *dev) |
3213 | { |
3214 | struct pci_dev *pdev = to_pci_dev(dev); |
3215 | struct net_device *netdev = pci_get_drvdata(pdev); |
3216 | struct jme_adapter *jme = netdev_priv(netdev); |
3217 | |
3218 | if (!netif_running(netdev)) |
3219 | return 0; |
3220 | |
3221 | atomic_dec(&jme->link_changing); |
3222 | |
3223 | netif_device_detach(netdev); |
3224 | netif_stop_queue(netdev); |
3225 | jme_stop_irq(jme); |
3226 | |
3227 | tasklet_disable(&jme->txclean_task); |
3228 | tasklet_disable(&jme->rxclean_task); |
3229 | tasklet_disable(&jme->rxempty_task); |
3230 | |
3231 | if (netif_carrier_ok(netdev)) { |
3232 | if (test_bit(JME_FLAG_POLL, &jme->flags)) |
3233 | jme_polling_mode(jme); |
3234 | |
3235 | jme_stop_pcc_timer(jme); |
3236 | jme_disable_rx_engine(jme); |
3237 | jme_disable_tx_engine(jme); |
3238 | jme_reset_mac_processor(jme); |
3239 | jme_free_rx_resources(jme); |
3240 | jme_free_tx_resources(jme); |
3241 | netif_carrier_off(netdev); |
3242 | jme->phylink = 0; |
3243 | } |
3244 | |
3245 | tasklet_enable(&jme->txclean_task); |
3246 | tasklet_enable(&jme->rxclean_task); |
3247 | tasklet_enable(&jme->rxempty_task); |
3248 | |
3249 | jme_powersave_phy(jme); |
3250 | |
3251 | return 0; |
3252 | } |
3253 | |
3254 | static int |
3255 | jme_resume(struct device *dev) |
3256 | { |
3257 | struct pci_dev *pdev = to_pci_dev(dev); |
3258 | struct net_device *netdev = pci_get_drvdata(pdev); |
3259 | struct jme_adapter *jme = netdev_priv(netdev); |
3260 | |
3261 | if (!netif_running(netdev)) |
3262 | return 0; |
3263 | |
3264 | jme_clear_pm_disable_wol(jme); |
3265 | jme_phy_on(jme); |
3266 | if (test_bit(JME_FLAG_SSET, &jme->flags)) |
3267 | jme_set_link_ksettings(netdev, &jme->old_cmd); |
3268 | else |
3269 | jme_reset_phy_processor(jme); |
3270 | jme_phy_calibration(jme); |
3271 | jme_phy_setEA(jme); |
3272 | netif_device_attach(netdev); |
3273 | |
3274 | atomic_inc(&jme->link_changing); |
3275 | |
3276 | jme_reset_link(jme); |
3277 | |
3278 | jme_start_irq(jme); |
3279 | |
3280 | return 0; |
3281 | } |
3282 | |
3283 | static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); |
3284 | #define JME_PM_OPS (&jme_pm_ops) |
3285 | |
3286 | #else |
3287 | |
3288 | #define JME_PM_OPS NULL |
3289 | #endif |
3290 | |
3291 | static const struct pci_device_id jme_pci_tbl[] = { |
3292 | { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, |
3293 | { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, |
3294 | { } |
3295 | }; |
3296 | |
3297 | static struct pci_driver jme_driver = { |
3298 | .name = DRV_NAME, |
3299 | .id_table = jme_pci_tbl, |
3300 | .probe = jme_init_one, |
3301 | .remove = jme_remove_one, |
3302 | .shutdown = jme_shutdown, |
3303 | .driver.pm = JME_PM_OPS, |
3304 | }; |
3305 | |
3306 | static int __init |
3307 | jme_init_module(void) |
3308 | { |
3309 | pr_info("JMicron JMC2XX ethernet driver version %s\n" , DRV_VERSION); |
3310 | return pci_register_driver(&jme_driver); |
3311 | } |
3312 | |
3313 | static void __exit |
3314 | jme_cleanup_module(void) |
3315 | { |
3316 | pci_unregister_driver(&jme_driver); |
3317 | } |
3318 | |
3319 | module_init(jme_init_module); |
3320 | module_exit(jme_cleanup_module); |
3321 | |
3322 | MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>" ); |
3323 | MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver" ); |
3324 | MODULE_LICENSE("GPL" ); |
3325 | MODULE_VERSION(DRV_VERSION); |
3326 | MODULE_DEVICE_TABLE(pci, jme_pci_tbl); |
3327 | |