1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver |
4 | * |
5 | * Copyright 2008 JMicron Technology Corporation |
6 | * https://www.jmicron.com/ |
7 | * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org> |
8 | * |
9 | * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | |
14 | #include <linux/module.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/pci.h> |
17 | #include <linux/netdevice.h> |
18 | #include <linux/etherdevice.h> |
19 | #include <linux/ethtool.h> |
20 | #include <linux/mii.h> |
21 | #include <linux/crc32.h> |
22 | #include <linux/delay.h> |
23 | #include <linux/spinlock.h> |
24 | #include <linux/in.h> |
25 | #include <linux/ip.h> |
26 | #include <linux/ipv6.h> |
27 | #include <linux/tcp.h> |
28 | #include <linux/udp.h> |
29 | #include <linux/if_vlan.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/jiffies.h> |
32 | #include <net/ip6_checksum.h> |
33 | #include "jme.h" |
34 | |
35 | static int force_pseudohp = -1; |
36 | static int no_pseudohp = -1; |
37 | static int no_extplug = -1; |
38 | module_param(force_pseudohp, int, 0); |
39 | MODULE_PARM_DESC(force_pseudohp, |
40 | "Enable pseudo hot-plug feature manually by driver instead of BIOS." ); |
41 | module_param(no_pseudohp, int, 0); |
42 | MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature." ); |
43 | module_param(no_extplug, int, 0); |
44 | MODULE_PARM_DESC(no_extplug, |
45 | "Do not use external plug signal for pseudo hot-plug." ); |
46 | |
47 | static int |
48 | jme_mdio_read(struct net_device *netdev, int phy, int reg) |
49 | { |
50 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
51 | int i, val, again = (reg == MII_BMSR) ? 1 : 0; |
52 | |
53 | read_again: |
54 | jwrite32(jme, reg: JME_SMI, val: SMI_OP_REQ | |
55 | smi_phy_addr(x: phy) | |
56 | smi_reg_addr(x: reg)); |
57 | |
58 | wmb(); |
59 | for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { |
60 | udelay(20); |
61 | val = jread32(jme, reg: JME_SMI); |
62 | if ((val & SMI_OP_REQ) == 0) |
63 | break; |
64 | } |
65 | |
66 | if (i == 0) { |
67 | pr_err("phy(%d) read timeout : %d\n" , phy, reg); |
68 | return 0; |
69 | } |
70 | |
71 | if (again--) |
72 | goto read_again; |
73 | |
74 | return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; |
75 | } |
76 | |
77 | static void |
78 | jme_mdio_write(struct net_device *netdev, |
79 | int phy, int reg, int val) |
80 | { |
81 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
82 | int i; |
83 | |
84 | jwrite32(jme, reg: JME_SMI, val: SMI_OP_WRITE | SMI_OP_REQ | |
85 | ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | |
86 | smi_phy_addr(x: phy) | smi_reg_addr(x: reg)); |
87 | |
88 | wmb(); |
89 | for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { |
90 | udelay(20); |
91 | if ((jread32(jme, reg: JME_SMI) & SMI_OP_REQ) == 0) |
92 | break; |
93 | } |
94 | |
95 | if (i == 0) |
96 | pr_err("phy(%d) write timeout : %d\n" , phy, reg); |
97 | } |
98 | |
99 | static inline void |
100 | jme_reset_phy_processor(struct jme_adapter *jme) |
101 | { |
102 | u32 val; |
103 | |
104 | jme_mdio_write(netdev: jme->dev, |
105 | phy: jme->mii_if.phy_id, |
106 | MII_ADVERTISE, ADVERTISE_ALL | |
107 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
108 | |
109 | if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) |
110 | jme_mdio_write(netdev: jme->dev, |
111 | phy: jme->mii_if.phy_id, |
112 | MII_CTRL1000, |
113 | ADVERTISE_1000FULL | ADVERTISE_1000HALF); |
114 | |
115 | val = jme_mdio_read(netdev: jme->dev, |
116 | phy: jme->mii_if.phy_id, |
117 | MII_BMCR); |
118 | |
119 | jme_mdio_write(netdev: jme->dev, |
120 | phy: jme->mii_if.phy_id, |
121 | MII_BMCR, val: val | BMCR_RESET); |
122 | } |
123 | |
124 | static void |
125 | jme_setup_wakeup_frame(struct jme_adapter *jme, |
126 | const u32 *mask, u32 crc, int fnr) |
127 | { |
128 | int i; |
129 | |
130 | /* |
131 | * Setup CRC pattern |
132 | */ |
133 | jwrite32(jme, reg: JME_WFOI, val: WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); |
134 | wmb(); |
135 | jwrite32(jme, reg: JME_WFODP, val: crc); |
136 | wmb(); |
137 | |
138 | /* |
139 | * Setup Mask |
140 | */ |
141 | for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { |
142 | jwrite32(jme, reg: JME_WFOI, |
143 | val: ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | |
144 | (fnr & WFOI_FRAME_SEL)); |
145 | wmb(); |
146 | jwrite32(jme, reg: JME_WFODP, val: mask[i]); |
147 | wmb(); |
148 | } |
149 | } |
150 | |
151 | static inline void |
152 | jme_mac_rxclk_off(struct jme_adapter *jme) |
153 | { |
154 | jme->reg_gpreg1 |= GPREG1_RXCLKOFF; |
155 | jwrite32f(jme, reg: JME_GPREG1, val: jme->reg_gpreg1); |
156 | } |
157 | |
158 | static inline void |
159 | jme_mac_rxclk_on(struct jme_adapter *jme) |
160 | { |
161 | jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF; |
162 | jwrite32f(jme, reg: JME_GPREG1, val: jme->reg_gpreg1); |
163 | } |
164 | |
165 | static inline void |
166 | jme_mac_txclk_off(struct jme_adapter *jme) |
167 | { |
168 | jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC); |
169 | jwrite32f(jme, reg: JME_GHC, val: jme->reg_ghc); |
170 | } |
171 | |
172 | static inline void |
173 | jme_mac_txclk_on(struct jme_adapter *jme) |
174 | { |
175 | u32 speed = jme->reg_ghc & GHC_SPEED; |
176 | if (speed == GHC_SPEED_1000M) |
177 | jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; |
178 | else |
179 | jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; |
180 | jwrite32f(jme, reg: JME_GHC, val: jme->reg_ghc); |
181 | } |
182 | |
183 | static inline void |
184 | jme_reset_ghc_speed(struct jme_adapter *jme) |
185 | { |
186 | jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX); |
187 | jwrite32f(jme, reg: JME_GHC, val: jme->reg_ghc); |
188 | } |
189 | |
190 | static inline void |
191 | jme_reset_250A2_workaround(struct jme_adapter *jme) |
192 | { |
193 | jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | |
194 | GPREG1_RSSPATCH); |
195 | jwrite32(jme, reg: JME_GPREG1, val: jme->reg_gpreg1); |
196 | } |
197 | |
198 | static inline void |
199 | jme_assert_ghc_reset(struct jme_adapter *jme) |
200 | { |
201 | jme->reg_ghc |= GHC_SWRST; |
202 | jwrite32f(jme, reg: JME_GHC, val: jme->reg_ghc); |
203 | } |
204 | |
205 | static inline void |
206 | jme_clear_ghc_reset(struct jme_adapter *jme) |
207 | { |
208 | jme->reg_ghc &= ~GHC_SWRST; |
209 | jwrite32f(jme, reg: JME_GHC, val: jme->reg_ghc); |
210 | } |
211 | |
212 | static void |
213 | jme_reset_mac_processor(struct jme_adapter *jme) |
214 | { |
215 | static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; |
216 | u32 crc = 0xCDCDCDCD; |
217 | u32 gpreg0; |
218 | int i; |
219 | |
220 | jme_reset_ghc_speed(jme); |
221 | jme_reset_250A2_workaround(jme); |
222 | |
223 | jme_mac_rxclk_on(jme); |
224 | jme_mac_txclk_on(jme); |
225 | udelay(1); |
226 | jme_assert_ghc_reset(jme); |
227 | udelay(1); |
228 | jme_mac_rxclk_off(jme); |
229 | jme_mac_txclk_off(jme); |
230 | udelay(1); |
231 | jme_clear_ghc_reset(jme); |
232 | udelay(1); |
233 | jme_mac_rxclk_on(jme); |
234 | jme_mac_txclk_on(jme); |
235 | udelay(1); |
236 | jme_mac_rxclk_off(jme); |
237 | jme_mac_txclk_off(jme); |
238 | |
239 | jwrite32(jme, reg: JME_RXDBA_LO, val: 0x00000000); |
240 | jwrite32(jme, reg: JME_RXDBA_HI, val: 0x00000000); |
241 | jwrite32(jme, reg: JME_RXQDC, val: 0x00000000); |
242 | jwrite32(jme, reg: JME_RXNDA, val: 0x00000000); |
243 | jwrite32(jme, reg: JME_TXDBA_LO, val: 0x00000000); |
244 | jwrite32(jme, reg: JME_TXDBA_HI, val: 0x00000000); |
245 | jwrite32(jme, reg: JME_TXQDC, val: 0x00000000); |
246 | jwrite32(jme, reg: JME_TXNDA, val: 0x00000000); |
247 | |
248 | jwrite32(jme, reg: JME_RXMCHT_LO, val: 0x00000000); |
249 | jwrite32(jme, reg: JME_RXMCHT_HI, val: 0x00000000); |
250 | for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) |
251 | jme_setup_wakeup_frame(jme, mask, crc, fnr: i); |
252 | if (jme->fpgaver) |
253 | gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; |
254 | else |
255 | gpreg0 = GPREG0_DEFAULT; |
256 | jwrite32(jme, reg: JME_GPREG0, val: gpreg0); |
257 | } |
258 | |
259 | static inline void |
260 | jme_clear_pm_enable_wol(struct jme_adapter *jme) |
261 | { |
262 | jwrite32(jme, reg: JME_PMCS, val: PMCS_STMASK | jme->reg_pmcs); |
263 | } |
264 | |
265 | static inline void |
266 | jme_clear_pm_disable_wol(struct jme_adapter *jme) |
267 | { |
268 | jwrite32(jme, reg: JME_PMCS, val: PMCS_STMASK); |
269 | } |
270 | |
271 | static int |
272 | jme_reload_eeprom(struct jme_adapter *jme) |
273 | { |
274 | u32 val; |
275 | int i; |
276 | |
277 | val = jread32(jme, reg: JME_SMBCSR); |
278 | |
279 | if (val & SMBCSR_EEPROMD) { |
280 | val |= SMBCSR_CNACK; |
281 | jwrite32(jme, reg: JME_SMBCSR, val); |
282 | val |= SMBCSR_RELOAD; |
283 | jwrite32(jme, reg: JME_SMBCSR, val); |
284 | mdelay(12); |
285 | |
286 | for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { |
287 | mdelay(1); |
288 | if ((jread32(jme, reg: JME_SMBCSR) & SMBCSR_RELOAD) == 0) |
289 | break; |
290 | } |
291 | |
292 | if (i == 0) { |
293 | pr_err("eeprom reload timeout\n" ); |
294 | return -EIO; |
295 | } |
296 | } |
297 | |
298 | return 0; |
299 | } |
300 | |
301 | static void |
302 | jme_load_macaddr(struct net_device *netdev) |
303 | { |
304 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
305 | unsigned char macaddr[ETH_ALEN]; |
306 | u32 val; |
307 | |
308 | spin_lock_bh(lock: &jme->macaddr_lock); |
309 | val = jread32(jme, reg: JME_RXUMA_LO); |
310 | macaddr[0] = (val >> 0) & 0xFF; |
311 | macaddr[1] = (val >> 8) & 0xFF; |
312 | macaddr[2] = (val >> 16) & 0xFF; |
313 | macaddr[3] = (val >> 24) & 0xFF; |
314 | val = jread32(jme, reg: JME_RXUMA_HI); |
315 | macaddr[4] = (val >> 0) & 0xFF; |
316 | macaddr[5] = (val >> 8) & 0xFF; |
317 | eth_hw_addr_set(dev: netdev, addr: macaddr); |
318 | spin_unlock_bh(lock: &jme->macaddr_lock); |
319 | } |
320 | |
321 | static inline void |
322 | jme_set_rx_pcc(struct jme_adapter *jme, int p) |
323 | { |
324 | switch (p) { |
325 | case PCC_OFF: |
326 | jwrite32(jme, reg: JME_PCCRX0, |
327 | val: ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | |
328 | ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); |
329 | break; |
330 | case PCC_P1: |
331 | jwrite32(jme, reg: JME_PCCRX0, |
332 | val: ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | |
333 | ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); |
334 | break; |
335 | case PCC_P2: |
336 | jwrite32(jme, reg: JME_PCCRX0, |
337 | val: ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | |
338 | ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); |
339 | break; |
340 | case PCC_P3: |
341 | jwrite32(jme, reg: JME_PCCRX0, |
342 | val: ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | |
343 | ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); |
344 | break; |
345 | default: |
346 | break; |
347 | } |
348 | wmb(); |
349 | |
350 | if (!(test_bit(JME_FLAG_POLL, &jme->flags))) |
351 | netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n" , p); |
352 | } |
353 | |
354 | static void |
355 | jme_start_irq(struct jme_adapter *jme) |
356 | { |
357 | register struct dynpcc_info *dpi = &(jme->dpi); |
358 | |
359 | jme_set_rx_pcc(jme, p: PCC_P1); |
360 | dpi->cur = PCC_P1; |
361 | dpi->attempt = PCC_P1; |
362 | dpi->cnt = 0; |
363 | |
364 | jwrite32(jme, reg: JME_PCCTX, |
365 | val: ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | |
366 | ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | |
367 | PCCTXQ0_EN |
368 | ); |
369 | |
370 | /* |
371 | * Enable Interrupts |
372 | */ |
373 | jwrite32(jme, reg: JME_IENS, val: INTR_ENABLE); |
374 | } |
375 | |
376 | static inline void |
377 | jme_stop_irq(struct jme_adapter *jme) |
378 | { |
379 | /* |
380 | * Disable Interrupts |
381 | */ |
382 | jwrite32f(jme, reg: JME_IENC, val: INTR_ENABLE); |
383 | } |
384 | |
385 | static u32 |
386 | jme_linkstat_from_phy(struct jme_adapter *jme) |
387 | { |
388 | u32 phylink, bmsr; |
389 | |
390 | phylink = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, reg: 17); |
391 | bmsr = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_BMSR); |
392 | if (bmsr & BMSR_ANCOMP) |
393 | phylink |= PHY_LINK_AUTONEG_COMPLETE; |
394 | |
395 | return phylink; |
396 | } |
397 | |
398 | static inline void |
399 | jme_set_phyfifo_5level(struct jme_adapter *jme) |
400 | { |
401 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, reg: 27, val: 0x0004); |
402 | } |
403 | |
404 | static inline void |
405 | jme_set_phyfifo_8level(struct jme_adapter *jme) |
406 | { |
407 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, reg: 27, val: 0x0000); |
408 | } |
409 | |
410 | static int |
411 | jme_check_link(struct net_device *netdev, int testonly) |
412 | { |
413 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
414 | u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr; |
415 | char linkmsg[64]; |
416 | int rc = 0; |
417 | |
418 | linkmsg[0] = '\0'; |
419 | |
420 | if (jme->fpgaver) |
421 | phylink = jme_linkstat_from_phy(jme); |
422 | else |
423 | phylink = jread32(jme, reg: JME_PHY_LINK); |
424 | |
425 | if (phylink & PHY_LINK_UP) { |
426 | if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { |
427 | /* |
428 | * If we did not enable AN |
429 | * Speed/Duplex Info should be obtained from SMI |
430 | */ |
431 | phylink = PHY_LINK_UP; |
432 | |
433 | bmcr = jme_mdio_read(netdev: jme->dev, |
434 | phy: jme->mii_if.phy_id, |
435 | MII_BMCR); |
436 | |
437 | phylink |= ((bmcr & BMCR_SPEED1000) && |
438 | (bmcr & BMCR_SPEED100) == 0) ? |
439 | PHY_LINK_SPEED_1000M : |
440 | (bmcr & BMCR_SPEED100) ? |
441 | PHY_LINK_SPEED_100M : |
442 | PHY_LINK_SPEED_10M; |
443 | |
444 | phylink |= (bmcr & BMCR_FULLDPLX) ? |
445 | PHY_LINK_DUPLEX : 0; |
446 | |
447 | strcat(p: linkmsg, q: "Forced: " ); |
448 | } else { |
449 | /* |
450 | * Keep polling for speed/duplex resolve complete |
451 | */ |
452 | while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && |
453 | --cnt) { |
454 | |
455 | udelay(1); |
456 | |
457 | if (jme->fpgaver) |
458 | phylink = jme_linkstat_from_phy(jme); |
459 | else |
460 | phylink = jread32(jme, reg: JME_PHY_LINK); |
461 | } |
462 | if (!cnt) |
463 | pr_err("Waiting speed resolve timeout\n" ); |
464 | |
465 | strcat(p: linkmsg, q: "ANed: " ); |
466 | } |
467 | |
468 | if (jme->phylink == phylink) { |
469 | rc = 1; |
470 | goto out; |
471 | } |
472 | if (testonly) |
473 | goto out; |
474 | |
475 | jme->phylink = phylink; |
476 | |
477 | /* |
478 | * The speed/duplex setting of jme->reg_ghc already cleared |
479 | * by jme_reset_mac_processor() |
480 | */ |
481 | switch (phylink & PHY_LINK_SPEED_MASK) { |
482 | case PHY_LINK_SPEED_10M: |
483 | jme->reg_ghc |= GHC_SPEED_10M; |
484 | strcat(p: linkmsg, q: "10 Mbps, " ); |
485 | break; |
486 | case PHY_LINK_SPEED_100M: |
487 | jme->reg_ghc |= GHC_SPEED_100M; |
488 | strcat(p: linkmsg, q: "100 Mbps, " ); |
489 | break; |
490 | case PHY_LINK_SPEED_1000M: |
491 | jme->reg_ghc |= GHC_SPEED_1000M; |
492 | strcat(p: linkmsg, q: "1000 Mbps, " ); |
493 | break; |
494 | default: |
495 | break; |
496 | } |
497 | |
498 | if (phylink & PHY_LINK_DUPLEX) { |
499 | jwrite32(jme, reg: JME_TXMCS, val: TXMCS_DEFAULT); |
500 | jwrite32(jme, reg: JME_TXTRHD, val: TXTRHD_FULLDUPLEX); |
501 | jme->reg_ghc |= GHC_DPX; |
502 | } else { |
503 | jwrite32(jme, reg: JME_TXMCS, val: TXMCS_DEFAULT | |
504 | TXMCS_BACKOFF | |
505 | TXMCS_CARRIERSENSE | |
506 | TXMCS_COLLISION); |
507 | jwrite32(jme, reg: JME_TXTRHD, val: TXTRHD_HALFDUPLEX); |
508 | } |
509 | |
510 | jwrite32(jme, reg: JME_GHC, val: jme->reg_ghc); |
511 | |
512 | if (is_buggy250(device: jme->pdev->device, chiprev: jme->chiprev)) { |
513 | jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | |
514 | GPREG1_RSSPATCH); |
515 | if (!(phylink & PHY_LINK_DUPLEX)) |
516 | jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH; |
517 | switch (phylink & PHY_LINK_SPEED_MASK) { |
518 | case PHY_LINK_SPEED_10M: |
519 | jme_set_phyfifo_8level(jme); |
520 | jme->reg_gpreg1 |= GPREG1_RSSPATCH; |
521 | break; |
522 | case PHY_LINK_SPEED_100M: |
523 | jme_set_phyfifo_5level(jme); |
524 | jme->reg_gpreg1 |= GPREG1_RSSPATCH; |
525 | break; |
526 | case PHY_LINK_SPEED_1000M: |
527 | jme_set_phyfifo_8level(jme); |
528 | break; |
529 | default: |
530 | break; |
531 | } |
532 | } |
533 | jwrite32(jme, reg: JME_GPREG1, val: jme->reg_gpreg1); |
534 | |
535 | strcat(p: linkmsg, q: (phylink & PHY_LINK_DUPLEX) ? |
536 | "Full-Duplex, " : |
537 | "Half-Duplex, " ); |
538 | strcat(p: linkmsg, q: (phylink & PHY_LINK_MDI_STAT) ? |
539 | "MDI-X" : |
540 | "MDI" ); |
541 | netif_info(jme, link, jme->dev, "Link is up at %s\n" , linkmsg); |
542 | netif_carrier_on(dev: netdev); |
543 | } else { |
544 | if (testonly) |
545 | goto out; |
546 | |
547 | netif_info(jme, link, jme->dev, "Link is down\n" ); |
548 | jme->phylink = 0; |
549 | netif_carrier_off(dev: netdev); |
550 | } |
551 | |
552 | out: |
553 | return rc; |
554 | } |
555 | |
556 | static int |
557 | jme_setup_tx_resources(struct jme_adapter *jme) |
558 | { |
559 | struct jme_ring *txring = &(jme->txring[0]); |
560 | |
561 | txring->alloc = dma_alloc_coherent(dev: &(jme->pdev->dev), |
562 | TX_RING_ALLOC_SIZE(jme->tx_ring_size), |
563 | dma_handle: &(txring->dmaalloc), |
564 | GFP_ATOMIC); |
565 | |
566 | if (!txring->alloc) |
567 | goto err_set_null; |
568 | |
569 | /* |
570 | * 16 Bytes align |
571 | */ |
572 | txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), |
573 | RING_DESC_ALIGN); |
574 | txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); |
575 | txring->next_to_use = 0; |
576 | atomic_set(v: &txring->next_to_clean, i: 0); |
577 | atomic_set(v: &txring->nr_free, i: jme->tx_ring_size); |
578 | |
579 | txring->bufinf = kcalloc(n: jme->tx_ring_size, |
580 | size: sizeof(struct jme_buffer_info), |
581 | GFP_ATOMIC); |
582 | if (unlikely(!(txring->bufinf))) |
583 | goto err_free_txring; |
584 | |
585 | return 0; |
586 | |
587 | err_free_txring: |
588 | dma_free_coherent(dev: &(jme->pdev->dev), |
589 | TX_RING_ALLOC_SIZE(jme->tx_ring_size), |
590 | cpu_addr: txring->alloc, |
591 | dma_handle: txring->dmaalloc); |
592 | |
593 | err_set_null: |
594 | txring->desc = NULL; |
595 | txring->dmaalloc = 0; |
596 | txring->dma = 0; |
597 | txring->bufinf = NULL; |
598 | |
599 | return -ENOMEM; |
600 | } |
601 | |
602 | static void |
603 | jme_free_tx_resources(struct jme_adapter *jme) |
604 | { |
605 | int i; |
606 | struct jme_ring *txring = &(jme->txring[0]); |
607 | struct jme_buffer_info *txbi; |
608 | |
609 | if (txring->alloc) { |
610 | if (txring->bufinf) { |
611 | for (i = 0 ; i < jme->tx_ring_size ; ++i) { |
612 | txbi = txring->bufinf + i; |
613 | if (txbi->skb) { |
614 | dev_kfree_skb(txbi->skb); |
615 | txbi->skb = NULL; |
616 | } |
617 | txbi->mapping = 0; |
618 | txbi->len = 0; |
619 | txbi->nr_desc = 0; |
620 | txbi->start_xmit = 0; |
621 | } |
622 | kfree(objp: txring->bufinf); |
623 | } |
624 | |
625 | dma_free_coherent(dev: &(jme->pdev->dev), |
626 | TX_RING_ALLOC_SIZE(jme->tx_ring_size), |
627 | cpu_addr: txring->alloc, |
628 | dma_handle: txring->dmaalloc); |
629 | |
630 | txring->alloc = NULL; |
631 | txring->desc = NULL; |
632 | txring->dmaalloc = 0; |
633 | txring->dma = 0; |
634 | txring->bufinf = NULL; |
635 | } |
636 | txring->next_to_use = 0; |
637 | atomic_set(v: &txring->next_to_clean, i: 0); |
638 | atomic_set(v: &txring->nr_free, i: 0); |
639 | } |
640 | |
641 | static inline void |
642 | jme_enable_tx_engine(struct jme_adapter *jme) |
643 | { |
644 | /* |
645 | * Select Queue 0 |
646 | */ |
647 | jwrite32(jme, reg: JME_TXCS, val: TXCS_DEFAULT | TXCS_SELECT_QUEUE0); |
648 | wmb(); |
649 | |
650 | /* |
651 | * Setup TX Queue 0 DMA Bass Address |
652 | */ |
653 | jwrite32(jme, reg: JME_TXDBA_LO, val: (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); |
654 | jwrite32(jme, reg: JME_TXDBA_HI, val: (__u64)(jme->txring[0].dma) >> 32); |
655 | jwrite32(jme, reg: JME_TXNDA, val: (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); |
656 | |
657 | /* |
658 | * Setup TX Descptor Count |
659 | */ |
660 | jwrite32(jme, reg: JME_TXQDC, val: jme->tx_ring_size); |
661 | |
662 | /* |
663 | * Enable TX Engine |
664 | */ |
665 | wmb(); |
666 | jwrite32f(jme, reg: JME_TXCS, val: jme->reg_txcs | |
667 | TXCS_SELECT_QUEUE0 | |
668 | TXCS_ENABLE); |
669 | |
670 | /* |
671 | * Start clock for TX MAC Processor |
672 | */ |
673 | jme_mac_txclk_on(jme); |
674 | } |
675 | |
676 | static inline void |
677 | jme_disable_tx_engine(struct jme_adapter *jme) |
678 | { |
679 | int i; |
680 | u32 val; |
681 | |
682 | /* |
683 | * Disable TX Engine |
684 | */ |
685 | jwrite32(jme, reg: JME_TXCS, val: jme->reg_txcs | TXCS_SELECT_QUEUE0); |
686 | wmb(); |
687 | |
688 | val = jread32(jme, reg: JME_TXCS); |
689 | for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { |
690 | mdelay(1); |
691 | val = jread32(jme, reg: JME_TXCS); |
692 | rmb(); |
693 | } |
694 | |
695 | if (!i) |
696 | pr_err("Disable TX engine timeout\n" ); |
697 | |
698 | /* |
699 | * Stop clock for TX MAC Processor |
700 | */ |
701 | jme_mac_txclk_off(jme); |
702 | } |
703 | |
704 | static void |
705 | jme_set_clean_rxdesc(struct jme_adapter *jme, int i) |
706 | { |
707 | struct jme_ring *rxring = &(jme->rxring[0]); |
708 | register struct rxdesc *rxdesc = rxring->desc; |
709 | struct jme_buffer_info *rxbi = rxring->bufinf; |
710 | rxdesc += i; |
711 | rxbi += i; |
712 | |
713 | rxdesc->dw[0] = 0; |
714 | rxdesc->dw[1] = 0; |
715 | rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); |
716 | rxdesc->desc1.bufaddrl = cpu_to_le32( |
717 | (__u64)rxbi->mapping & 0xFFFFFFFFUL); |
718 | rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); |
719 | if (jme->dev->features & NETIF_F_HIGHDMA) |
720 | rxdesc->desc1.flags = RXFLAG_64BIT; |
721 | wmb(); |
722 | rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; |
723 | } |
724 | |
725 | static int |
726 | jme_make_new_rx_buf(struct jme_adapter *jme, int i) |
727 | { |
728 | struct jme_ring *rxring = &(jme->rxring[0]); |
729 | struct jme_buffer_info *rxbi = rxring->bufinf + i; |
730 | struct sk_buff *skb; |
731 | dma_addr_t mapping; |
732 | |
733 | skb = netdev_alloc_skb(dev: jme->dev, |
734 | length: jme->dev->mtu + RX_EXTRA_LEN); |
735 | if (unlikely(!skb)) |
736 | return -ENOMEM; |
737 | |
738 | mapping = dma_map_page(&jme->pdev->dev, virt_to_page(skb->data), |
739 | offset_in_page(skb->data), skb_tailroom(skb), |
740 | DMA_FROM_DEVICE); |
741 | if (unlikely(dma_mapping_error(&jme->pdev->dev, mapping))) { |
742 | dev_kfree_skb(skb); |
743 | return -ENOMEM; |
744 | } |
745 | |
746 | if (likely(rxbi->mapping)) |
747 | dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, |
748 | DMA_FROM_DEVICE); |
749 | |
750 | rxbi->skb = skb; |
751 | rxbi->len = skb_tailroom(skb); |
752 | rxbi->mapping = mapping; |
753 | return 0; |
754 | } |
755 | |
756 | static void |
757 | jme_free_rx_buf(struct jme_adapter *jme, int i) |
758 | { |
759 | struct jme_ring *rxring = &(jme->rxring[0]); |
760 | struct jme_buffer_info *rxbi = rxring->bufinf; |
761 | rxbi += i; |
762 | |
763 | if (rxbi->skb) { |
764 | dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, |
765 | DMA_FROM_DEVICE); |
766 | dev_kfree_skb(rxbi->skb); |
767 | rxbi->skb = NULL; |
768 | rxbi->mapping = 0; |
769 | rxbi->len = 0; |
770 | } |
771 | } |
772 | |
773 | static void |
774 | jme_free_rx_resources(struct jme_adapter *jme) |
775 | { |
776 | int i; |
777 | struct jme_ring *rxring = &(jme->rxring[0]); |
778 | |
779 | if (rxring->alloc) { |
780 | if (rxring->bufinf) { |
781 | for (i = 0 ; i < jme->rx_ring_size ; ++i) |
782 | jme_free_rx_buf(jme, i); |
783 | kfree(objp: rxring->bufinf); |
784 | } |
785 | |
786 | dma_free_coherent(dev: &(jme->pdev->dev), |
787 | RX_RING_ALLOC_SIZE(jme->rx_ring_size), |
788 | cpu_addr: rxring->alloc, |
789 | dma_handle: rxring->dmaalloc); |
790 | rxring->alloc = NULL; |
791 | rxring->desc = NULL; |
792 | rxring->dmaalloc = 0; |
793 | rxring->dma = 0; |
794 | rxring->bufinf = NULL; |
795 | } |
796 | rxring->next_to_use = 0; |
797 | atomic_set(v: &rxring->next_to_clean, i: 0); |
798 | } |
799 | |
800 | static int |
801 | jme_setup_rx_resources(struct jme_adapter *jme) |
802 | { |
803 | int i; |
804 | struct jme_ring *rxring = &(jme->rxring[0]); |
805 | |
806 | rxring->alloc = dma_alloc_coherent(dev: &(jme->pdev->dev), |
807 | RX_RING_ALLOC_SIZE(jme->rx_ring_size), |
808 | dma_handle: &(rxring->dmaalloc), |
809 | GFP_ATOMIC); |
810 | if (!rxring->alloc) |
811 | goto err_set_null; |
812 | |
813 | /* |
814 | * 16 Bytes align |
815 | */ |
816 | rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), |
817 | RING_DESC_ALIGN); |
818 | rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); |
819 | rxring->next_to_use = 0; |
820 | atomic_set(v: &rxring->next_to_clean, i: 0); |
821 | |
822 | rxring->bufinf = kcalloc(n: jme->rx_ring_size, |
823 | size: sizeof(struct jme_buffer_info), |
824 | GFP_ATOMIC); |
825 | if (unlikely(!(rxring->bufinf))) |
826 | goto err_free_rxring; |
827 | |
828 | /* |
829 | * Initiallize Receive Descriptors |
830 | */ |
831 | for (i = 0 ; i < jme->rx_ring_size ; ++i) { |
832 | if (unlikely(jme_make_new_rx_buf(jme, i))) { |
833 | jme_free_rx_resources(jme); |
834 | return -ENOMEM; |
835 | } |
836 | |
837 | jme_set_clean_rxdesc(jme, i); |
838 | } |
839 | |
840 | return 0; |
841 | |
842 | err_free_rxring: |
843 | dma_free_coherent(dev: &(jme->pdev->dev), |
844 | RX_RING_ALLOC_SIZE(jme->rx_ring_size), |
845 | cpu_addr: rxring->alloc, |
846 | dma_handle: rxring->dmaalloc); |
847 | err_set_null: |
848 | rxring->desc = NULL; |
849 | rxring->dmaalloc = 0; |
850 | rxring->dma = 0; |
851 | rxring->bufinf = NULL; |
852 | |
853 | return -ENOMEM; |
854 | } |
855 | |
856 | static inline void |
857 | jme_enable_rx_engine(struct jme_adapter *jme) |
858 | { |
859 | /* |
860 | * Select Queue 0 |
861 | */ |
862 | jwrite32(jme, reg: JME_RXCS, val: jme->reg_rxcs | |
863 | RXCS_QUEUESEL_Q0); |
864 | wmb(); |
865 | |
866 | /* |
867 | * Setup RX DMA Bass Address |
868 | */ |
869 | jwrite32(jme, reg: JME_RXDBA_LO, val: (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); |
870 | jwrite32(jme, reg: JME_RXDBA_HI, val: (__u64)(jme->rxring[0].dma) >> 32); |
871 | jwrite32(jme, reg: JME_RXNDA, val: (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); |
872 | |
873 | /* |
874 | * Setup RX Descriptor Count |
875 | */ |
876 | jwrite32(jme, reg: JME_RXQDC, val: jme->rx_ring_size); |
877 | |
878 | /* |
879 | * Setup Unicast Filter |
880 | */ |
881 | jme_set_unicastaddr(netdev: jme->dev); |
882 | jme_set_multi(netdev: jme->dev); |
883 | |
884 | /* |
885 | * Enable RX Engine |
886 | */ |
887 | wmb(); |
888 | jwrite32f(jme, reg: JME_RXCS, val: jme->reg_rxcs | |
889 | RXCS_QUEUESEL_Q0 | |
890 | RXCS_ENABLE | |
891 | RXCS_QST); |
892 | |
893 | /* |
894 | * Start clock for RX MAC Processor |
895 | */ |
896 | jme_mac_rxclk_on(jme); |
897 | } |
898 | |
899 | static inline void |
900 | jme_restart_rx_engine(struct jme_adapter *jme) |
901 | { |
902 | /* |
903 | * Start RX Engine |
904 | */ |
905 | jwrite32(jme, reg: JME_RXCS, val: jme->reg_rxcs | |
906 | RXCS_QUEUESEL_Q0 | |
907 | RXCS_ENABLE | |
908 | RXCS_QST); |
909 | } |
910 | |
911 | static inline void |
912 | jme_disable_rx_engine(struct jme_adapter *jme) |
913 | { |
914 | int i; |
915 | u32 val; |
916 | |
917 | /* |
918 | * Disable RX Engine |
919 | */ |
920 | jwrite32(jme, reg: JME_RXCS, val: jme->reg_rxcs); |
921 | wmb(); |
922 | |
923 | val = jread32(jme, reg: JME_RXCS); |
924 | for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { |
925 | mdelay(1); |
926 | val = jread32(jme, reg: JME_RXCS); |
927 | rmb(); |
928 | } |
929 | |
930 | if (!i) |
931 | pr_err("Disable RX engine timeout\n" ); |
932 | |
933 | /* |
934 | * Stop clock for RX MAC Processor |
935 | */ |
936 | jme_mac_rxclk_off(jme); |
937 | } |
938 | |
939 | static u16 |
940 | jme_udpsum(struct sk_buff *skb) |
941 | { |
942 | u16 csum = 0xFFFFu; |
943 | |
944 | if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) |
945 | return csum; |
946 | if (skb->protocol != htons(ETH_P_IP)) |
947 | return csum; |
948 | skb_set_network_header(skb, ETH_HLEN); |
949 | if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || |
950 | (skb->len < (ETH_HLEN + |
951 | (ip_hdr(skb)->ihl << 2) + |
952 | sizeof(struct udphdr)))) { |
953 | skb_reset_network_header(skb); |
954 | return csum; |
955 | } |
956 | skb_set_transport_header(skb, |
957 | ETH_HLEN + (ip_hdr(skb)->ihl << 2)); |
958 | csum = udp_hdr(skb)->check; |
959 | skb_reset_transport_header(skb); |
960 | skb_reset_network_header(skb); |
961 | |
962 | return csum; |
963 | } |
964 | |
965 | static int |
966 | jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb) |
967 | { |
968 | if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) |
969 | return false; |
970 | |
971 | if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) |
972 | == RXWBFLAG_TCPON)) { |
973 | if (flags & RXWBFLAG_IPV4) |
974 | netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n" ); |
975 | return false; |
976 | } |
977 | |
978 | if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) |
979 | == RXWBFLAG_UDPON) && jme_udpsum(skb)) { |
980 | if (flags & RXWBFLAG_IPV4) |
981 | netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n" ); |
982 | return false; |
983 | } |
984 | |
985 | if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) |
986 | == RXWBFLAG_IPV4)) { |
987 | netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n" ); |
988 | return false; |
989 | } |
990 | |
991 | return true; |
992 | } |
993 | |
994 | static void |
995 | jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) |
996 | { |
997 | struct jme_ring *rxring = &(jme->rxring[0]); |
998 | struct rxdesc *rxdesc = rxring->desc; |
999 | struct jme_buffer_info *rxbi = rxring->bufinf; |
1000 | struct sk_buff *skb; |
1001 | int framesize; |
1002 | |
1003 | rxdesc += idx; |
1004 | rxbi += idx; |
1005 | |
1006 | skb = rxbi->skb; |
1007 | dma_sync_single_for_cpu(dev: &jme->pdev->dev, addr: rxbi->mapping, size: rxbi->len, |
1008 | dir: DMA_FROM_DEVICE); |
1009 | |
1010 | if (unlikely(jme_make_new_rx_buf(jme, idx))) { |
1011 | dma_sync_single_for_device(dev: &jme->pdev->dev, addr: rxbi->mapping, |
1012 | size: rxbi->len, dir: DMA_FROM_DEVICE); |
1013 | |
1014 | ++(NET_STAT(jme).rx_dropped); |
1015 | } else { |
1016 | framesize = le16_to_cpu(rxdesc->descwb.framesize) |
1017 | - RX_PREPAD_SIZE; |
1018 | |
1019 | skb_reserve(skb, RX_PREPAD_SIZE); |
1020 | skb_put(skb, len: framesize); |
1021 | skb->protocol = eth_type_trans(skb, dev: jme->dev); |
1022 | |
1023 | if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) |
1024 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1025 | else |
1026 | skb_checksum_none_assert(skb); |
1027 | |
1028 | if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { |
1029 | u16 vid = le16_to_cpu(rxdesc->descwb.vlan); |
1030 | |
1031 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vid); |
1032 | NET_STAT(jme).rx_bytes += 4; |
1033 | } |
1034 | jme->jme_rx(skb); |
1035 | |
1036 | if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == |
1037 | cpu_to_le16(RXWBFLAG_DEST_MUL)) |
1038 | ++(NET_STAT(jme).multicast); |
1039 | |
1040 | NET_STAT(jme).rx_bytes += framesize; |
1041 | ++(NET_STAT(jme).rx_packets); |
1042 | } |
1043 | |
1044 | jme_set_clean_rxdesc(jme, i: idx); |
1045 | |
1046 | } |
1047 | |
1048 | static int |
1049 | jme_process_receive(struct jme_adapter *jme, int limit) |
1050 | { |
1051 | struct jme_ring *rxring = &(jme->rxring[0]); |
1052 | struct rxdesc *rxdesc; |
1053 | int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; |
1054 | |
1055 | if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) |
1056 | goto out_inc; |
1057 | |
1058 | if (unlikely(atomic_read(&jme->link_changing) != 1)) |
1059 | goto out_inc; |
1060 | |
1061 | if (unlikely(!netif_carrier_ok(jme->dev))) |
1062 | goto out_inc; |
1063 | |
1064 | i = atomic_read(v: &rxring->next_to_clean); |
1065 | while (limit > 0) { |
1066 | rxdesc = rxring->desc; |
1067 | rxdesc += i; |
1068 | |
1069 | if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || |
1070 | !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) |
1071 | goto out; |
1072 | --limit; |
1073 | |
1074 | rmb(); |
1075 | desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; |
1076 | |
1077 | if (unlikely(desccnt > 1 || |
1078 | rxdesc->descwb.errstat & RXWBERR_ALLERR)) { |
1079 | |
1080 | if (rxdesc->descwb.errstat & RXWBERR_CRCERR) |
1081 | ++(NET_STAT(jme).rx_crc_errors); |
1082 | else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) |
1083 | ++(NET_STAT(jme).rx_fifo_errors); |
1084 | else |
1085 | ++(NET_STAT(jme).rx_errors); |
1086 | |
1087 | if (desccnt > 1) |
1088 | limit -= desccnt - 1; |
1089 | |
1090 | for (j = i, ccnt = desccnt ; ccnt-- ; ) { |
1091 | jme_set_clean_rxdesc(jme, i: j); |
1092 | j = (j + 1) & (mask); |
1093 | } |
1094 | |
1095 | } else { |
1096 | jme_alloc_and_feed_skb(jme, idx: i); |
1097 | } |
1098 | |
1099 | i = (i + desccnt) & (mask); |
1100 | } |
1101 | |
1102 | out: |
1103 | atomic_set(v: &rxring->next_to_clean, i); |
1104 | |
1105 | out_inc: |
1106 | atomic_inc(v: &jme->rx_cleaning); |
1107 | |
1108 | return limit > 0 ? limit : 0; |
1109 | |
1110 | } |
1111 | |
1112 | static void |
1113 | jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) |
1114 | { |
1115 | if (likely(atmp == dpi->cur)) { |
1116 | dpi->cnt = 0; |
1117 | return; |
1118 | } |
1119 | |
1120 | if (dpi->attempt == atmp) { |
1121 | ++(dpi->cnt); |
1122 | } else { |
1123 | dpi->attempt = atmp; |
1124 | dpi->cnt = 0; |
1125 | } |
1126 | |
1127 | } |
1128 | |
1129 | static void |
1130 | jme_dynamic_pcc(struct jme_adapter *jme) |
1131 | { |
1132 | register struct dynpcc_info *dpi = &(jme->dpi); |
1133 | |
1134 | if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) |
1135 | jme_attempt_pcc(dpi, atmp: PCC_P3); |
1136 | else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || |
1137 | dpi->intr_cnt > PCC_INTR_THRESHOLD) |
1138 | jme_attempt_pcc(dpi, atmp: PCC_P2); |
1139 | else |
1140 | jme_attempt_pcc(dpi, atmp: PCC_P1); |
1141 | |
1142 | if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { |
1143 | if (dpi->attempt < dpi->cur) |
1144 | tasklet_schedule(t: &jme->rxclean_task); |
1145 | jme_set_rx_pcc(jme, p: dpi->attempt); |
1146 | dpi->cur = dpi->attempt; |
1147 | dpi->cnt = 0; |
1148 | } |
1149 | } |
1150 | |
1151 | static void |
1152 | jme_start_pcc_timer(struct jme_adapter *jme) |
1153 | { |
1154 | struct dynpcc_info *dpi = &(jme->dpi); |
1155 | dpi->last_bytes = NET_STAT(jme).rx_bytes; |
1156 | dpi->last_pkts = NET_STAT(jme).rx_packets; |
1157 | dpi->intr_cnt = 0; |
1158 | jwrite32(jme, reg: JME_TMCSR, |
1159 | val: TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); |
1160 | } |
1161 | |
1162 | static inline void |
1163 | jme_stop_pcc_timer(struct jme_adapter *jme) |
1164 | { |
1165 | jwrite32(jme, reg: JME_TMCSR, val: 0); |
1166 | } |
1167 | |
1168 | static void |
1169 | jme_shutdown_nic(struct jme_adapter *jme) |
1170 | { |
1171 | u32 phylink; |
1172 | |
1173 | phylink = jme_linkstat_from_phy(jme); |
1174 | |
1175 | if (!(phylink & PHY_LINK_UP)) { |
1176 | /* |
1177 | * Disable all interrupt before issue timer |
1178 | */ |
1179 | jme_stop_irq(jme); |
1180 | jwrite32(jme, reg: JME_TIMER2, val: TMCSR_EN | 0xFFFFFE); |
1181 | } |
1182 | } |
1183 | |
1184 | static void |
1185 | jme_pcc_tasklet(struct tasklet_struct *t) |
1186 | { |
1187 | struct jme_adapter *jme = from_tasklet(jme, t, pcc_task); |
1188 | struct net_device *netdev = jme->dev; |
1189 | |
1190 | if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { |
1191 | jme_shutdown_nic(jme); |
1192 | return; |
1193 | } |
1194 | |
1195 | if (unlikely(!netif_carrier_ok(netdev) || |
1196 | (atomic_read(&jme->link_changing) != 1) |
1197 | )) { |
1198 | jme_stop_pcc_timer(jme); |
1199 | return; |
1200 | } |
1201 | |
1202 | if (!(test_bit(JME_FLAG_POLL, &jme->flags))) |
1203 | jme_dynamic_pcc(jme); |
1204 | |
1205 | jme_start_pcc_timer(jme); |
1206 | } |
1207 | |
1208 | static inline void |
1209 | jme_polling_mode(struct jme_adapter *jme) |
1210 | { |
1211 | jme_set_rx_pcc(jme, p: PCC_OFF); |
1212 | } |
1213 | |
1214 | static inline void |
1215 | jme_interrupt_mode(struct jme_adapter *jme) |
1216 | { |
1217 | jme_set_rx_pcc(jme, p: PCC_P1); |
1218 | } |
1219 | |
1220 | static inline int |
1221 | jme_pseudo_hotplug_enabled(struct jme_adapter *jme) |
1222 | { |
1223 | u32 apmc; |
1224 | apmc = jread32(jme, reg: JME_APMC); |
1225 | return apmc & JME_APMC_PSEUDO_HP_EN; |
1226 | } |
1227 | |
1228 | static void |
1229 | jme_start_shutdown_timer(struct jme_adapter *jme) |
1230 | { |
1231 | u32 apmc; |
1232 | |
1233 | apmc = jread32(jme, reg: JME_APMC) | JME_APMC_PCIE_SD_EN; |
1234 | apmc &= ~JME_APMC_EPIEN_CTRL; |
1235 | if (!no_extplug) { |
1236 | jwrite32f(jme, reg: JME_APMC, val: apmc | JME_APMC_EPIEN_CTRL_EN); |
1237 | wmb(); |
1238 | } |
1239 | jwrite32f(jme, reg: JME_APMC, val: apmc); |
1240 | |
1241 | jwrite32f(jme, reg: JME_TIMER2, val: 0); |
1242 | set_bit(nr: JME_FLAG_SHUTDOWN, addr: &jme->flags); |
1243 | jwrite32(jme, reg: JME_TMCSR, |
1244 | val: TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); |
1245 | } |
1246 | |
1247 | static void |
1248 | jme_stop_shutdown_timer(struct jme_adapter *jme) |
1249 | { |
1250 | u32 apmc; |
1251 | |
1252 | jwrite32f(jme, reg: JME_TMCSR, val: 0); |
1253 | jwrite32f(jme, reg: JME_TIMER2, val: 0); |
1254 | clear_bit(nr: JME_FLAG_SHUTDOWN, addr: &jme->flags); |
1255 | |
1256 | apmc = jread32(jme, reg: JME_APMC); |
1257 | apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); |
1258 | jwrite32f(jme, reg: JME_APMC, val: apmc | JME_APMC_EPIEN_CTRL_DIS); |
1259 | wmb(); |
1260 | jwrite32f(jme, reg: JME_APMC, val: apmc); |
1261 | } |
1262 | |
1263 | static void jme_link_change_work(struct work_struct *work) |
1264 | { |
1265 | struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task); |
1266 | struct net_device *netdev = jme->dev; |
1267 | int rc; |
1268 | |
1269 | while (!atomic_dec_and_test(v: &jme->link_changing)) { |
1270 | atomic_inc(v: &jme->link_changing); |
1271 | netif_info(jme, intr, jme->dev, "Get link change lock failed\n" ); |
1272 | while (atomic_read(v: &jme->link_changing) != 1) |
1273 | netif_info(jme, intr, jme->dev, "Waiting link change lock\n" ); |
1274 | } |
1275 | |
1276 | if (jme_check_link(netdev, testonly: 1) && jme->old_mtu == netdev->mtu) |
1277 | goto out; |
1278 | |
1279 | jme->old_mtu = netdev->mtu; |
1280 | netif_stop_queue(dev: netdev); |
1281 | if (jme_pseudo_hotplug_enabled(jme)) |
1282 | jme_stop_shutdown_timer(jme); |
1283 | |
1284 | jme_stop_pcc_timer(jme); |
1285 | tasklet_disable(t: &jme->txclean_task); |
1286 | tasklet_disable(t: &jme->rxclean_task); |
1287 | tasklet_disable(t: &jme->rxempty_task); |
1288 | |
1289 | if (netif_carrier_ok(dev: netdev)) { |
1290 | jme_disable_rx_engine(jme); |
1291 | jme_disable_tx_engine(jme); |
1292 | jme_reset_mac_processor(jme); |
1293 | jme_free_rx_resources(jme); |
1294 | jme_free_tx_resources(jme); |
1295 | |
1296 | if (test_bit(JME_FLAG_POLL, &jme->flags)) |
1297 | jme_polling_mode(jme); |
1298 | |
1299 | netif_carrier_off(dev: netdev); |
1300 | } |
1301 | |
1302 | jme_check_link(netdev, testonly: 0); |
1303 | if (netif_carrier_ok(dev: netdev)) { |
1304 | rc = jme_setup_rx_resources(jme); |
1305 | if (rc) { |
1306 | pr_err("Allocating resources for RX error, Device STOPPED!\n" ); |
1307 | goto out_enable_tasklet; |
1308 | } |
1309 | |
1310 | rc = jme_setup_tx_resources(jme); |
1311 | if (rc) { |
1312 | pr_err("Allocating resources for TX error, Device STOPPED!\n" ); |
1313 | goto err_out_free_rx_resources; |
1314 | } |
1315 | |
1316 | jme_enable_rx_engine(jme); |
1317 | jme_enable_tx_engine(jme); |
1318 | |
1319 | netif_start_queue(dev: netdev); |
1320 | |
1321 | if (test_bit(JME_FLAG_POLL, &jme->flags)) |
1322 | jme_interrupt_mode(jme); |
1323 | |
1324 | jme_start_pcc_timer(jme); |
1325 | } else if (jme_pseudo_hotplug_enabled(jme)) { |
1326 | jme_start_shutdown_timer(jme); |
1327 | } |
1328 | |
1329 | goto out_enable_tasklet; |
1330 | |
1331 | err_out_free_rx_resources: |
1332 | jme_free_rx_resources(jme); |
1333 | out_enable_tasklet: |
1334 | tasklet_enable(t: &jme->txclean_task); |
1335 | tasklet_enable(t: &jme->rxclean_task); |
1336 | tasklet_enable(t: &jme->rxempty_task); |
1337 | out: |
1338 | atomic_inc(v: &jme->link_changing); |
1339 | } |
1340 | |
1341 | static void |
1342 | jme_rx_clean_tasklet(struct tasklet_struct *t) |
1343 | { |
1344 | struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task); |
1345 | struct dynpcc_info *dpi = &(jme->dpi); |
1346 | |
1347 | jme_process_receive(jme, limit: jme->rx_ring_size); |
1348 | ++(dpi->intr_cnt); |
1349 | |
1350 | } |
1351 | |
1352 | static int |
1353 | jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) |
1354 | { |
1355 | struct jme_adapter *jme = jme_napi_priv(napi: holder); |
1356 | int rest; |
1357 | |
1358 | rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); |
1359 | |
1360 | while (atomic_read(v: &jme->rx_empty) > 0) { |
1361 | atomic_dec(v: &jme->rx_empty); |
1362 | ++(NET_STAT(jme).rx_dropped); |
1363 | jme_restart_rx_engine(jme); |
1364 | } |
1365 | atomic_inc(v: &jme->rx_empty); |
1366 | |
1367 | if (rest) { |
1368 | JME_RX_COMPLETE(netdev, holder); |
1369 | jme_interrupt_mode(jme); |
1370 | } |
1371 | |
1372 | JME_NAPI_WEIGHT_SET(budget, rest); |
1373 | return JME_NAPI_WEIGHT_VAL(budget) - rest; |
1374 | } |
1375 | |
1376 | static void |
1377 | jme_rx_empty_tasklet(struct tasklet_struct *t) |
1378 | { |
1379 | struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task); |
1380 | |
1381 | if (unlikely(atomic_read(&jme->link_changing) != 1)) |
1382 | return; |
1383 | |
1384 | if (unlikely(!netif_carrier_ok(jme->dev))) |
1385 | return; |
1386 | |
1387 | netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n" ); |
1388 | |
1389 | jme_rx_clean_tasklet(t: &jme->rxclean_task); |
1390 | |
1391 | while (atomic_read(v: &jme->rx_empty) > 0) { |
1392 | atomic_dec(v: &jme->rx_empty); |
1393 | ++(NET_STAT(jme).rx_dropped); |
1394 | jme_restart_rx_engine(jme); |
1395 | } |
1396 | atomic_inc(v: &jme->rx_empty); |
1397 | } |
1398 | |
1399 | static void |
1400 | jme_wake_queue_if_stopped(struct jme_adapter *jme) |
1401 | { |
1402 | struct jme_ring *txring = &(jme->txring[0]); |
1403 | |
1404 | smp_wmb(); |
1405 | if (unlikely(netif_queue_stopped(jme->dev) && |
1406 | atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { |
1407 | netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n" ); |
1408 | netif_wake_queue(dev: jme->dev); |
1409 | } |
1410 | |
1411 | } |
1412 | |
1413 | static void jme_tx_clean_tasklet(struct tasklet_struct *t) |
1414 | { |
1415 | struct jme_adapter *jme = from_tasklet(jme, t, txclean_task); |
1416 | struct jme_ring *txring = &(jme->txring[0]); |
1417 | struct txdesc *txdesc = txring->desc; |
1418 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; |
1419 | int i, j, cnt = 0, max, err, mask; |
1420 | |
1421 | tx_dbg(jme, "Into txclean\n" ); |
1422 | |
1423 | if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) |
1424 | goto out; |
1425 | |
1426 | if (unlikely(atomic_read(&jme->link_changing) != 1)) |
1427 | goto out; |
1428 | |
1429 | if (unlikely(!netif_carrier_ok(jme->dev))) |
1430 | goto out; |
1431 | |
1432 | max = jme->tx_ring_size - atomic_read(v: &txring->nr_free); |
1433 | mask = jme->tx_ring_mask; |
1434 | |
1435 | for (i = atomic_read(v: &txring->next_to_clean) ; cnt < max ; ) { |
1436 | |
1437 | ctxbi = txbi + i; |
1438 | |
1439 | if (likely(ctxbi->skb && |
1440 | !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { |
1441 | |
1442 | tx_dbg(jme, "txclean: %d+%d@%lu\n" , |
1443 | i, ctxbi->nr_desc, jiffies); |
1444 | |
1445 | err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; |
1446 | |
1447 | for (j = 1 ; j < ctxbi->nr_desc ; ++j) { |
1448 | ttxbi = txbi + ((i + j) & (mask)); |
1449 | txdesc[(i + j) & (mask)].dw[0] = 0; |
1450 | |
1451 | dma_unmap_page(&jme->pdev->dev, |
1452 | ttxbi->mapping, ttxbi->len, |
1453 | DMA_TO_DEVICE); |
1454 | |
1455 | ttxbi->mapping = 0; |
1456 | ttxbi->len = 0; |
1457 | } |
1458 | |
1459 | dev_kfree_skb(ctxbi->skb); |
1460 | |
1461 | cnt += ctxbi->nr_desc; |
1462 | |
1463 | if (unlikely(err)) { |
1464 | ++(NET_STAT(jme).tx_carrier_errors); |
1465 | } else { |
1466 | ++(NET_STAT(jme).tx_packets); |
1467 | NET_STAT(jme).tx_bytes += ctxbi->len; |
1468 | } |
1469 | |
1470 | ctxbi->skb = NULL; |
1471 | ctxbi->len = 0; |
1472 | ctxbi->start_xmit = 0; |
1473 | |
1474 | } else { |
1475 | break; |
1476 | } |
1477 | |
1478 | i = (i + ctxbi->nr_desc) & mask; |
1479 | |
1480 | ctxbi->nr_desc = 0; |
1481 | } |
1482 | |
1483 | tx_dbg(jme, "txclean: done %d@%lu\n" , i, jiffies); |
1484 | atomic_set(v: &txring->next_to_clean, i); |
1485 | atomic_add(i: cnt, v: &txring->nr_free); |
1486 | |
1487 | jme_wake_queue_if_stopped(jme); |
1488 | |
1489 | out: |
1490 | atomic_inc(v: &jme->tx_cleaning); |
1491 | } |
1492 | |
1493 | static void |
1494 | jme_intr_msi(struct jme_adapter *jme, u32 intrstat) |
1495 | { |
1496 | /* |
1497 | * Disable interrupt |
1498 | */ |
1499 | jwrite32f(jme, reg: JME_IENC, val: INTR_ENABLE); |
1500 | |
1501 | if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { |
1502 | /* |
1503 | * Link change event is critical |
1504 | * all other events are ignored |
1505 | */ |
1506 | jwrite32(jme, reg: JME_IEVE, val: intrstat); |
1507 | schedule_work(work: &jme->linkch_task); |
1508 | goto out_reenable; |
1509 | } |
1510 | |
1511 | if (intrstat & INTR_TMINTR) { |
1512 | jwrite32(jme, reg: JME_IEVE, val: INTR_TMINTR); |
1513 | tasklet_schedule(t: &jme->pcc_task); |
1514 | } |
1515 | |
1516 | if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { |
1517 | jwrite32(jme, reg: JME_IEVE, val: INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); |
1518 | tasklet_schedule(t: &jme->txclean_task); |
1519 | } |
1520 | |
1521 | if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { |
1522 | jwrite32(jme, reg: JME_IEVE, val: (intrstat & (INTR_PCCRX0TO | |
1523 | INTR_PCCRX0 | |
1524 | INTR_RX0EMP)) | |
1525 | INTR_RX0); |
1526 | } |
1527 | |
1528 | if (test_bit(JME_FLAG_POLL, &jme->flags)) { |
1529 | if (intrstat & INTR_RX0EMP) |
1530 | atomic_inc(v: &jme->rx_empty); |
1531 | |
1532 | if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { |
1533 | if (likely(JME_RX_SCHEDULE_PREP(jme))) { |
1534 | jme_polling_mode(jme); |
1535 | JME_RX_SCHEDULE(jme); |
1536 | } |
1537 | } |
1538 | } else { |
1539 | if (intrstat & INTR_RX0EMP) { |
1540 | atomic_inc(v: &jme->rx_empty); |
1541 | tasklet_hi_schedule(t: &jme->rxempty_task); |
1542 | } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { |
1543 | tasklet_hi_schedule(t: &jme->rxclean_task); |
1544 | } |
1545 | } |
1546 | |
1547 | out_reenable: |
1548 | /* |
1549 | * Re-enable interrupt |
1550 | */ |
1551 | jwrite32f(jme, reg: JME_IENS, val: INTR_ENABLE); |
1552 | } |
1553 | |
1554 | static irqreturn_t |
1555 | jme_intr(int irq, void *dev_id) |
1556 | { |
1557 | struct net_device *netdev = dev_id; |
1558 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
1559 | u32 intrstat; |
1560 | |
1561 | intrstat = jread32(jme, reg: JME_IEVE); |
1562 | |
1563 | /* |
1564 | * Check if it's really an interrupt for us |
1565 | */ |
1566 | if (unlikely((intrstat & INTR_ENABLE) == 0)) |
1567 | return IRQ_NONE; |
1568 | |
1569 | /* |
1570 | * Check if the device still exist |
1571 | */ |
1572 | if (unlikely(intrstat == ~((typeof(intrstat))0))) |
1573 | return IRQ_NONE; |
1574 | |
1575 | jme_intr_msi(jme, intrstat); |
1576 | |
1577 | return IRQ_HANDLED; |
1578 | } |
1579 | |
1580 | static irqreturn_t |
1581 | jme_msi(int irq, void *dev_id) |
1582 | { |
1583 | struct net_device *netdev = dev_id; |
1584 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
1585 | u32 intrstat; |
1586 | |
1587 | intrstat = jread32(jme, reg: JME_IEVE); |
1588 | |
1589 | jme_intr_msi(jme, intrstat); |
1590 | |
1591 | return IRQ_HANDLED; |
1592 | } |
1593 | |
1594 | static void |
1595 | jme_reset_link(struct jme_adapter *jme) |
1596 | { |
1597 | jwrite32(jme, reg: JME_TMCSR, val: TMCSR_SWIT); |
1598 | } |
1599 | |
1600 | static void |
1601 | jme_restart_an(struct jme_adapter *jme) |
1602 | { |
1603 | u32 bmcr; |
1604 | |
1605 | spin_lock_bh(lock: &jme->phy_lock); |
1606 | bmcr = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_BMCR); |
1607 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); |
1608 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_BMCR, val: bmcr); |
1609 | spin_unlock_bh(lock: &jme->phy_lock); |
1610 | } |
1611 | |
1612 | static int |
1613 | jme_request_irq(struct jme_adapter *jme) |
1614 | { |
1615 | int rc; |
1616 | struct net_device *netdev = jme->dev; |
1617 | irq_handler_t handler = jme_intr; |
1618 | int irq_flags = IRQF_SHARED; |
1619 | |
1620 | if (!pci_enable_msi(dev: jme->pdev)) { |
1621 | set_bit(nr: JME_FLAG_MSI, addr: &jme->flags); |
1622 | handler = jme_msi; |
1623 | irq_flags = 0; |
1624 | } |
1625 | |
1626 | rc = request_irq(irq: jme->pdev->irq, handler, flags: irq_flags, name: netdev->name, |
1627 | dev: netdev); |
1628 | if (rc) { |
1629 | netdev_err(dev: netdev, |
1630 | format: "Unable to request %s interrupt (return: %d)\n" , |
1631 | test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx" , |
1632 | rc); |
1633 | |
1634 | if (test_bit(JME_FLAG_MSI, &jme->flags)) { |
1635 | pci_disable_msi(dev: jme->pdev); |
1636 | clear_bit(nr: JME_FLAG_MSI, addr: &jme->flags); |
1637 | } |
1638 | } else { |
1639 | netdev->irq = jme->pdev->irq; |
1640 | } |
1641 | |
1642 | return rc; |
1643 | } |
1644 | |
1645 | static void |
1646 | jme_free_irq(struct jme_adapter *jme) |
1647 | { |
1648 | free_irq(jme->pdev->irq, jme->dev); |
1649 | if (test_bit(JME_FLAG_MSI, &jme->flags)) { |
1650 | pci_disable_msi(dev: jme->pdev); |
1651 | clear_bit(nr: JME_FLAG_MSI, addr: &jme->flags); |
1652 | jme->dev->irq = jme->pdev->irq; |
1653 | } |
1654 | } |
1655 | |
1656 | static inline void |
1657 | jme_new_phy_on(struct jme_adapter *jme) |
1658 | { |
1659 | u32 reg; |
1660 | |
1661 | reg = jread32(jme, reg: JME_PHY_PWR); |
1662 | reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | |
1663 | PHY_PWR_DWN2 | PHY_PWR_CLKSEL); |
1664 | jwrite32(jme, reg: JME_PHY_PWR, val: reg); |
1665 | |
1666 | pci_read_config_dword(dev: jme->pdev, PCI_PRIV_PE1, val: ®); |
1667 | reg &= ~PE1_GPREG0_PBG; |
1668 | reg |= PE1_GPREG0_ENBG; |
1669 | pci_write_config_dword(dev: jme->pdev, PCI_PRIV_PE1, val: reg); |
1670 | } |
1671 | |
1672 | static inline void |
1673 | jme_new_phy_off(struct jme_adapter *jme) |
1674 | { |
1675 | u32 reg; |
1676 | |
1677 | reg = jread32(jme, reg: JME_PHY_PWR); |
1678 | reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | |
1679 | PHY_PWR_DWN2 | PHY_PWR_CLKSEL; |
1680 | jwrite32(jme, reg: JME_PHY_PWR, val: reg); |
1681 | |
1682 | pci_read_config_dword(dev: jme->pdev, PCI_PRIV_PE1, val: ®); |
1683 | reg &= ~PE1_GPREG0_PBG; |
1684 | reg |= PE1_GPREG0_PDD3COLD; |
1685 | pci_write_config_dword(dev: jme->pdev, PCI_PRIV_PE1, val: reg); |
1686 | } |
1687 | |
1688 | static inline void |
1689 | jme_phy_on(struct jme_adapter *jme) |
1690 | { |
1691 | u32 bmcr; |
1692 | |
1693 | bmcr = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_BMCR); |
1694 | bmcr &= ~BMCR_PDOWN; |
1695 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_BMCR, val: bmcr); |
1696 | |
1697 | if (new_phy_power_ctrl(chip_main_rev: jme->chip_main_rev)) |
1698 | jme_new_phy_on(jme); |
1699 | } |
1700 | |
1701 | static inline void |
1702 | jme_phy_off(struct jme_adapter *jme) |
1703 | { |
1704 | u32 bmcr; |
1705 | |
1706 | bmcr = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_BMCR); |
1707 | bmcr |= BMCR_PDOWN; |
1708 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_BMCR, val: bmcr); |
1709 | |
1710 | if (new_phy_power_ctrl(chip_main_rev: jme->chip_main_rev)) |
1711 | jme_new_phy_off(jme); |
1712 | } |
1713 | |
1714 | static int |
1715 | jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg) |
1716 | { |
1717 | u32 phy_addr; |
1718 | |
1719 | phy_addr = JM_PHY_SPEC_REG_READ | specreg; |
1720 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, |
1721 | val: phy_addr); |
1722 | return jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, |
1723 | JM_PHY_SPEC_DATA_REG); |
1724 | } |
1725 | |
1726 | static void |
1727 | jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data) |
1728 | { |
1729 | u32 phy_addr; |
1730 | |
1731 | phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg; |
1732 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG, |
1733 | val: phy_data); |
1734 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, |
1735 | val: phy_addr); |
1736 | } |
1737 | |
1738 | static int |
1739 | jme_phy_calibration(struct jme_adapter *jme) |
1740 | { |
1741 | u32 ctrl1000, phy_data; |
1742 | |
1743 | jme_phy_off(jme); |
1744 | jme_phy_on(jme); |
1745 | /* Enabel PHY test mode 1 */ |
1746 | ctrl1000 = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_CTRL1000); |
1747 | ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; |
1748 | ctrl1000 |= PHY_GAD_TEST_MODE_1; |
1749 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_CTRL1000, val: ctrl1000); |
1750 | |
1751 | phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); |
1752 | phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0; |
1753 | phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH | |
1754 | JM_PHY_EXT_COMM_2_CALI_ENABLE; |
1755 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); |
1756 | msleep(msecs: 20); |
1757 | phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); |
1758 | phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE | |
1759 | JM_PHY_EXT_COMM_2_CALI_MODE_0 | |
1760 | JM_PHY_EXT_COMM_2_CALI_LATCH); |
1761 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); |
1762 | |
1763 | /* Disable PHY test mode */ |
1764 | ctrl1000 = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_CTRL1000); |
1765 | ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; |
1766 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_CTRL1000, val: ctrl1000); |
1767 | return 0; |
1768 | } |
1769 | |
1770 | static int |
1771 | jme_phy_setEA(struct jme_adapter *jme) |
1772 | { |
1773 | u32 phy_comm0 = 0, phy_comm1 = 0; |
1774 | u8 nic_ctrl; |
1775 | |
1776 | pci_read_config_byte(dev: jme->pdev, PCI_PRIV_SHARE_NICCTRL, val: &nic_ctrl); |
1777 | if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE) |
1778 | return 0; |
1779 | |
1780 | switch (jme->pdev->device) { |
1781 | case PCI_DEVICE_ID_JMICRON_JMC250: |
1782 | if (((jme->chip_main_rev == 5) && |
1783 | ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || |
1784 | (jme->chip_sub_rev == 3))) || |
1785 | (jme->chip_main_rev >= 6)) { |
1786 | phy_comm0 = 0x008A; |
1787 | phy_comm1 = 0x4109; |
1788 | } |
1789 | if ((jme->chip_main_rev == 3) && |
1790 | ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) |
1791 | phy_comm0 = 0xE088; |
1792 | break; |
1793 | case PCI_DEVICE_ID_JMICRON_JMC260: |
1794 | if (((jme->chip_main_rev == 5) && |
1795 | ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || |
1796 | (jme->chip_sub_rev == 3))) || |
1797 | (jme->chip_main_rev >= 6)) { |
1798 | phy_comm0 = 0x008A; |
1799 | phy_comm1 = 0x4109; |
1800 | } |
1801 | if ((jme->chip_main_rev == 3) && |
1802 | ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) |
1803 | phy_comm0 = 0xE088; |
1804 | if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0)) |
1805 | phy_comm0 = 0x608A; |
1806 | if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2)) |
1807 | phy_comm0 = 0x408A; |
1808 | break; |
1809 | default: |
1810 | return -ENODEV; |
1811 | } |
1812 | if (phy_comm0) |
1813 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_data: phy_comm0); |
1814 | if (phy_comm1) |
1815 | jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_data: phy_comm1); |
1816 | |
1817 | return 0; |
1818 | } |
1819 | |
1820 | static int |
1821 | jme_open(struct net_device *netdev) |
1822 | { |
1823 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
1824 | int rc; |
1825 | |
1826 | jme_clear_pm_disable_wol(jme); |
1827 | JME_NAPI_ENABLE(jme); |
1828 | |
1829 | tasklet_setup(t: &jme->txclean_task, callback: jme_tx_clean_tasklet); |
1830 | tasklet_setup(t: &jme->rxclean_task, callback: jme_rx_clean_tasklet); |
1831 | tasklet_setup(t: &jme->rxempty_task, callback: jme_rx_empty_tasklet); |
1832 | |
1833 | rc = jme_request_irq(jme); |
1834 | if (rc) |
1835 | goto err_out; |
1836 | |
1837 | jme_start_irq(jme); |
1838 | |
1839 | jme_phy_on(jme); |
1840 | if (test_bit(JME_FLAG_SSET, &jme->flags)) |
1841 | jme_set_link_ksettings(netdev, cmd: &jme->old_cmd); |
1842 | else |
1843 | jme_reset_phy_processor(jme); |
1844 | jme_phy_calibration(jme); |
1845 | jme_phy_setEA(jme); |
1846 | jme_reset_link(jme); |
1847 | |
1848 | return 0; |
1849 | |
1850 | err_out: |
1851 | netif_stop_queue(dev: netdev); |
1852 | netif_carrier_off(dev: netdev); |
1853 | return rc; |
1854 | } |
1855 | |
1856 | static void |
1857 | jme_set_100m_half(struct jme_adapter *jme) |
1858 | { |
1859 | u32 bmcr, tmp; |
1860 | |
1861 | jme_phy_on(jme); |
1862 | bmcr = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_BMCR); |
1863 | tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | |
1864 | BMCR_SPEED1000 | BMCR_FULLDPLX); |
1865 | tmp |= BMCR_SPEED100; |
1866 | |
1867 | if (bmcr != tmp) |
1868 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_BMCR, val: tmp); |
1869 | |
1870 | if (jme->fpgaver) |
1871 | jwrite32(jme, reg: JME_GHC, val: GHC_SPEED_100M | GHC_LINK_POLL); |
1872 | else |
1873 | jwrite32(jme, reg: JME_GHC, val: GHC_SPEED_100M); |
1874 | } |
1875 | |
1876 | #define JME_WAIT_LINK_TIME 2000 /* 2000ms */ |
1877 | static void |
1878 | jme_wait_link(struct jme_adapter *jme) |
1879 | { |
1880 | u32 phylink, to = JME_WAIT_LINK_TIME; |
1881 | |
1882 | msleep(msecs: 1000); |
1883 | phylink = jme_linkstat_from_phy(jme); |
1884 | while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { |
1885 | usleep_range(min: 10000, max: 11000); |
1886 | phylink = jme_linkstat_from_phy(jme); |
1887 | } |
1888 | } |
1889 | |
1890 | static void |
1891 | jme_powersave_phy(struct jme_adapter *jme) |
1892 | { |
1893 | if (jme->reg_pmcs && device_may_wakeup(dev: &jme->pdev->dev)) { |
1894 | jme_set_100m_half(jme); |
1895 | if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) |
1896 | jme_wait_link(jme); |
1897 | jme_clear_pm_enable_wol(jme); |
1898 | } else { |
1899 | jme_phy_off(jme); |
1900 | } |
1901 | } |
1902 | |
1903 | static int |
1904 | jme_close(struct net_device *netdev) |
1905 | { |
1906 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
1907 | |
1908 | netif_stop_queue(dev: netdev); |
1909 | netif_carrier_off(dev: netdev); |
1910 | |
1911 | jme_stop_irq(jme); |
1912 | jme_free_irq(jme); |
1913 | |
1914 | JME_NAPI_DISABLE(jme); |
1915 | |
1916 | cancel_work_sync(work: &jme->linkch_task); |
1917 | tasklet_kill(t: &jme->txclean_task); |
1918 | tasklet_kill(t: &jme->rxclean_task); |
1919 | tasklet_kill(t: &jme->rxempty_task); |
1920 | |
1921 | jme_disable_rx_engine(jme); |
1922 | jme_disable_tx_engine(jme); |
1923 | jme_reset_mac_processor(jme); |
1924 | jme_free_rx_resources(jme); |
1925 | jme_free_tx_resources(jme); |
1926 | jme->phylink = 0; |
1927 | jme_phy_off(jme); |
1928 | |
1929 | return 0; |
1930 | } |
1931 | |
1932 | static int |
1933 | jme_alloc_txdesc(struct jme_adapter *jme, |
1934 | struct sk_buff *skb) |
1935 | { |
1936 | struct jme_ring *txring = &(jme->txring[0]); |
1937 | int idx, nr_alloc, mask = jme->tx_ring_mask; |
1938 | |
1939 | idx = txring->next_to_use; |
1940 | nr_alloc = skb_shinfo(skb)->nr_frags + 2; |
1941 | |
1942 | if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) |
1943 | return -1; |
1944 | |
1945 | atomic_sub(i: nr_alloc, v: &txring->nr_free); |
1946 | |
1947 | txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; |
1948 | |
1949 | return idx; |
1950 | } |
1951 | |
1952 | static int |
1953 | jme_fill_tx_map(struct pci_dev *pdev, |
1954 | struct txdesc *txdesc, |
1955 | struct jme_buffer_info *txbi, |
1956 | struct page *page, |
1957 | u32 page_offset, |
1958 | u32 len, |
1959 | bool hidma) |
1960 | { |
1961 | dma_addr_t dmaaddr; |
1962 | |
1963 | dmaaddr = dma_map_page(&pdev->dev, page, page_offset, len, |
1964 | DMA_TO_DEVICE); |
1965 | |
1966 | if (unlikely(dma_mapping_error(&pdev->dev, dmaaddr))) |
1967 | return -EINVAL; |
1968 | |
1969 | dma_sync_single_for_device(dev: &pdev->dev, addr: dmaaddr, size: len, dir: DMA_TO_DEVICE); |
1970 | |
1971 | txdesc->dw[0] = 0; |
1972 | txdesc->dw[1] = 0; |
1973 | txdesc->desc2.flags = TXFLAG_OWN; |
1974 | txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; |
1975 | txdesc->desc2.datalen = cpu_to_le16(len); |
1976 | txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); |
1977 | txdesc->desc2.bufaddrl = cpu_to_le32( |
1978 | (__u64)dmaaddr & 0xFFFFFFFFUL); |
1979 | |
1980 | txbi->mapping = dmaaddr; |
1981 | txbi->len = len; |
1982 | return 0; |
1983 | } |
1984 | |
1985 | static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) |
1986 | { |
1987 | struct jme_ring *txring = &(jme->txring[0]); |
1988 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; |
1989 | int mask = jme->tx_ring_mask; |
1990 | int j; |
1991 | |
1992 | for (j = 0 ; j < count ; j++) { |
1993 | ctxbi = txbi + ((startidx + j + 2) & (mask)); |
1994 | dma_unmap_page(&jme->pdev->dev, ctxbi->mapping, ctxbi->len, |
1995 | DMA_TO_DEVICE); |
1996 | |
1997 | ctxbi->mapping = 0; |
1998 | ctxbi->len = 0; |
1999 | } |
2000 | } |
2001 | |
2002 | static int |
2003 | jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) |
2004 | { |
2005 | struct jme_ring *txring = &(jme->txring[0]); |
2006 | struct txdesc *txdesc = txring->desc, *ctxdesc; |
2007 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; |
2008 | bool hidma = jme->dev->features & NETIF_F_HIGHDMA; |
2009 | int i, nr_frags = skb_shinfo(skb)->nr_frags; |
2010 | int mask = jme->tx_ring_mask; |
2011 | u32 len; |
2012 | int ret = 0; |
2013 | |
2014 | for (i = 0 ; i < nr_frags ; ++i) { |
2015 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2016 | |
2017 | ctxdesc = txdesc + ((idx + i + 2) & (mask)); |
2018 | ctxbi = txbi + ((idx + i + 2) & (mask)); |
2019 | |
2020 | ret = jme_fill_tx_map(pdev: jme->pdev, txdesc: ctxdesc, txbi: ctxbi, |
2021 | page: skb_frag_page(frag), page_offset: skb_frag_off(frag), |
2022 | len: skb_frag_size(frag), hidma); |
2023 | if (ret) { |
2024 | jme_drop_tx_map(jme, startidx: idx, count: i); |
2025 | goto out; |
2026 | } |
2027 | } |
2028 | |
2029 | len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; |
2030 | ctxdesc = txdesc + ((idx + 1) & (mask)); |
2031 | ctxbi = txbi + ((idx + 1) & (mask)); |
2032 | ret = jme_fill_tx_map(pdev: jme->pdev, txdesc: ctxdesc, txbi: ctxbi, virt_to_page(skb->data), |
2033 | offset_in_page(skb->data), len, hidma); |
2034 | if (ret) |
2035 | jme_drop_tx_map(jme, startidx: idx, count: i); |
2036 | |
2037 | out: |
2038 | return ret; |
2039 | |
2040 | } |
2041 | |
2042 | |
2043 | static int |
2044 | jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) |
2045 | { |
2046 | *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); |
2047 | if (*mss) { |
2048 | *flags |= TXFLAG_LSEN; |
2049 | |
2050 | if (skb->protocol == htons(ETH_P_IP)) { |
2051 | struct iphdr *iph = ip_hdr(skb); |
2052 | |
2053 | iph->check = 0; |
2054 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(saddr: iph->saddr, |
2055 | daddr: iph->daddr, len: 0, |
2056 | IPPROTO_TCP, |
2057 | sum: 0); |
2058 | } else { |
2059 | tcp_v6_gso_csum_prep(skb); |
2060 | } |
2061 | |
2062 | return 0; |
2063 | } |
2064 | |
2065 | return 1; |
2066 | } |
2067 | |
2068 | static void |
2069 | jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) |
2070 | { |
2071 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2072 | u8 ip_proto; |
2073 | |
2074 | switch (skb->protocol) { |
2075 | case htons(ETH_P_IP): |
2076 | ip_proto = ip_hdr(skb)->protocol; |
2077 | break; |
2078 | case htons(ETH_P_IPV6): |
2079 | ip_proto = ipv6_hdr(skb)->nexthdr; |
2080 | break; |
2081 | default: |
2082 | ip_proto = 0; |
2083 | break; |
2084 | } |
2085 | |
2086 | switch (ip_proto) { |
2087 | case IPPROTO_TCP: |
2088 | *flags |= TXFLAG_TCPCS; |
2089 | break; |
2090 | case IPPROTO_UDP: |
2091 | *flags |= TXFLAG_UDPCS; |
2092 | break; |
2093 | default: |
2094 | netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n" ); |
2095 | break; |
2096 | } |
2097 | } |
2098 | } |
2099 | |
2100 | static inline void |
2101 | jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) |
2102 | { |
2103 | if (skb_vlan_tag_present(skb)) { |
2104 | *flags |= TXFLAG_TAGON; |
2105 | *vlan = cpu_to_le16(skb_vlan_tag_get(skb)); |
2106 | } |
2107 | } |
2108 | |
2109 | static int |
2110 | jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) |
2111 | { |
2112 | struct jme_ring *txring = &(jme->txring[0]); |
2113 | struct txdesc *txdesc; |
2114 | struct jme_buffer_info *txbi; |
2115 | u8 flags; |
2116 | int ret = 0; |
2117 | |
2118 | txdesc = (struct txdesc *)txring->desc + idx; |
2119 | txbi = txring->bufinf + idx; |
2120 | |
2121 | txdesc->dw[0] = 0; |
2122 | txdesc->dw[1] = 0; |
2123 | txdesc->dw[2] = 0; |
2124 | txdesc->dw[3] = 0; |
2125 | txdesc->desc1.pktsize = cpu_to_le16(skb->len); |
2126 | /* |
2127 | * Set OWN bit at final. |
2128 | * When kernel transmit faster than NIC. |
2129 | * And NIC trying to send this descriptor before we tell |
2130 | * it to start sending this TX queue. |
2131 | * Other fields are already filled correctly. |
2132 | */ |
2133 | wmb(); |
2134 | flags = TXFLAG_OWN | TXFLAG_INT; |
2135 | /* |
2136 | * Set checksum flags while not tso |
2137 | */ |
2138 | if (jme_tx_tso(skb, mss: &txdesc->desc1.mss, flags: &flags)) |
2139 | jme_tx_csum(jme, skb, flags: &flags); |
2140 | jme_tx_vlan(skb, vlan: &txdesc->desc1.vlan, flags: &flags); |
2141 | ret = jme_map_tx_skb(jme, skb, idx); |
2142 | if (ret) |
2143 | return ret; |
2144 | |
2145 | txdesc->desc1.flags = flags; |
2146 | /* |
2147 | * Set tx buffer info after telling NIC to send |
2148 | * For better tx_clean timing |
2149 | */ |
2150 | wmb(); |
2151 | txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; |
2152 | txbi->skb = skb; |
2153 | txbi->len = skb->len; |
2154 | txbi->start_xmit = jiffies; |
2155 | if (!txbi->start_xmit) |
2156 | txbi->start_xmit = (0UL-1); |
2157 | |
2158 | return 0; |
2159 | } |
2160 | |
2161 | static void |
2162 | jme_stop_queue_if_full(struct jme_adapter *jme) |
2163 | { |
2164 | struct jme_ring *txring = &(jme->txring[0]); |
2165 | struct jme_buffer_info *txbi = txring->bufinf; |
2166 | int idx = atomic_read(v: &txring->next_to_clean); |
2167 | |
2168 | txbi += idx; |
2169 | |
2170 | smp_wmb(); |
2171 | if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { |
2172 | netif_stop_queue(dev: jme->dev); |
2173 | netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n" ); |
2174 | smp_wmb(); |
2175 | if (atomic_read(v: &txring->nr_free) |
2176 | >= (jme->tx_wake_threshold)) { |
2177 | netif_wake_queue(dev: jme->dev); |
2178 | netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n" ); |
2179 | } |
2180 | } |
2181 | |
2182 | if (unlikely(txbi->start_xmit && |
2183 | time_is_before_eq_jiffies(txbi->start_xmit + TX_TIMEOUT) && |
2184 | txbi->skb)) { |
2185 | netif_stop_queue(dev: jme->dev); |
2186 | netif_info(jme, tx_queued, jme->dev, |
2187 | "TX Queue Stopped %d@%lu\n" , idx, jiffies); |
2188 | } |
2189 | } |
2190 | |
2191 | /* |
2192 | * This function is already protected by netif_tx_lock() |
2193 | */ |
2194 | |
2195 | static netdev_tx_t |
2196 | jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) |
2197 | { |
2198 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2199 | int idx; |
2200 | |
2201 | if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) { |
2202 | dev_kfree_skb_any(skb); |
2203 | ++(NET_STAT(jme).tx_dropped); |
2204 | return NETDEV_TX_OK; |
2205 | } |
2206 | |
2207 | idx = jme_alloc_txdesc(jme, skb); |
2208 | |
2209 | if (unlikely(idx < 0)) { |
2210 | netif_stop_queue(dev: netdev); |
2211 | netif_err(jme, tx_err, jme->dev, |
2212 | "BUG! Tx ring full when queue awake!\n" ); |
2213 | |
2214 | return NETDEV_TX_BUSY; |
2215 | } |
2216 | |
2217 | if (jme_fill_tx_desc(jme, skb, idx)) |
2218 | return NETDEV_TX_OK; |
2219 | |
2220 | jwrite32(jme, reg: JME_TXCS, val: jme->reg_txcs | |
2221 | TXCS_SELECT_QUEUE0 | |
2222 | TXCS_QUEUE0S | |
2223 | TXCS_ENABLE); |
2224 | |
2225 | tx_dbg(jme, "xmit: %d+%d@%lu\n" , |
2226 | idx, skb_shinfo(skb)->nr_frags + 2, jiffies); |
2227 | jme_stop_queue_if_full(jme); |
2228 | |
2229 | return NETDEV_TX_OK; |
2230 | } |
2231 | |
2232 | static void |
2233 | jme_set_unicastaddr(struct net_device *netdev) |
2234 | { |
2235 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2236 | u32 val; |
2237 | |
2238 | val = (netdev->dev_addr[3] & 0xff) << 24 | |
2239 | (netdev->dev_addr[2] & 0xff) << 16 | |
2240 | (netdev->dev_addr[1] & 0xff) << 8 | |
2241 | (netdev->dev_addr[0] & 0xff); |
2242 | jwrite32(jme, reg: JME_RXUMA_LO, val); |
2243 | val = (netdev->dev_addr[5] & 0xff) << 8 | |
2244 | (netdev->dev_addr[4] & 0xff); |
2245 | jwrite32(jme, reg: JME_RXUMA_HI, val); |
2246 | } |
2247 | |
2248 | static int |
2249 | jme_set_macaddr(struct net_device *netdev, void *p) |
2250 | { |
2251 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2252 | struct sockaddr *addr = p; |
2253 | |
2254 | if (netif_running(dev: netdev)) |
2255 | return -EBUSY; |
2256 | |
2257 | spin_lock_bh(lock: &jme->macaddr_lock); |
2258 | eth_hw_addr_set(dev: netdev, addr: addr->sa_data); |
2259 | jme_set_unicastaddr(netdev); |
2260 | spin_unlock_bh(lock: &jme->macaddr_lock); |
2261 | |
2262 | return 0; |
2263 | } |
2264 | |
2265 | static void |
2266 | jme_set_multi(struct net_device *netdev) |
2267 | { |
2268 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2269 | u32 mc_hash[2] = {}; |
2270 | |
2271 | spin_lock_bh(lock: &jme->rxmcs_lock); |
2272 | |
2273 | jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; |
2274 | |
2275 | if (netdev->flags & IFF_PROMISC) { |
2276 | jme->reg_rxmcs |= RXMCS_ALLFRAME; |
2277 | } else if (netdev->flags & IFF_ALLMULTI) { |
2278 | jme->reg_rxmcs |= RXMCS_ALLMULFRAME; |
2279 | } else if (netdev->flags & IFF_MULTICAST) { |
2280 | struct netdev_hw_addr *ha; |
2281 | int bit_nr; |
2282 | |
2283 | jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; |
2284 | netdev_for_each_mc_addr(ha, netdev) { |
2285 | bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; |
2286 | mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); |
2287 | } |
2288 | |
2289 | jwrite32(jme, reg: JME_RXMCHT_LO, val: mc_hash[0]); |
2290 | jwrite32(jme, reg: JME_RXMCHT_HI, val: mc_hash[1]); |
2291 | } |
2292 | |
2293 | wmb(); |
2294 | jwrite32(jme, reg: JME_RXMCS, val: jme->reg_rxmcs); |
2295 | |
2296 | spin_unlock_bh(lock: &jme->rxmcs_lock); |
2297 | } |
2298 | |
2299 | static int |
2300 | jme_change_mtu(struct net_device *netdev, int new_mtu) |
2301 | { |
2302 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2303 | |
2304 | netdev->mtu = new_mtu; |
2305 | netdev_update_features(dev: netdev); |
2306 | |
2307 | jme_restart_rx_engine(jme); |
2308 | jme_reset_link(jme); |
2309 | |
2310 | return 0; |
2311 | } |
2312 | |
2313 | static void |
2314 | jme_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
2315 | { |
2316 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2317 | |
2318 | jme->phylink = 0; |
2319 | jme_reset_phy_processor(jme); |
2320 | if (test_bit(JME_FLAG_SSET, &jme->flags)) |
2321 | jme_set_link_ksettings(netdev, cmd: &jme->old_cmd); |
2322 | |
2323 | /* |
2324 | * Force to Reset the link again |
2325 | */ |
2326 | jme_reset_link(jme); |
2327 | } |
2328 | |
2329 | static void |
2330 | jme_get_drvinfo(struct net_device *netdev, |
2331 | struct ethtool_drvinfo *info) |
2332 | { |
2333 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2334 | |
2335 | strscpy(p: info->driver, DRV_NAME, size: sizeof(info->driver)); |
2336 | strscpy(p: info->version, DRV_VERSION, size: sizeof(info->version)); |
2337 | strscpy(p: info->bus_info, q: pci_name(pdev: jme->pdev), size: sizeof(info->bus_info)); |
2338 | } |
2339 | |
2340 | static int |
2341 | jme_get_regs_len(struct net_device *netdev) |
2342 | { |
2343 | return JME_REG_LEN; |
2344 | } |
2345 | |
2346 | static void |
2347 | mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) |
2348 | { |
2349 | int i; |
2350 | |
2351 | for (i = 0 ; i < len ; i += 4) |
2352 | p[i >> 2] = jread32(jme, reg: reg + i); |
2353 | } |
2354 | |
2355 | static void |
2356 | mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) |
2357 | { |
2358 | int i; |
2359 | u16 *p16 = (u16 *)p; |
2360 | |
2361 | for (i = 0 ; i < reg_nr ; ++i) |
2362 | p16[i] = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, reg: i); |
2363 | } |
2364 | |
2365 | static void |
2366 | jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) |
2367 | { |
2368 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2369 | u32 *p32 = (u32 *)p; |
2370 | |
2371 | memset(p, 0xFF, JME_REG_LEN); |
2372 | |
2373 | regs->version = 1; |
2374 | mmapio_memcpy(jme, p: p32, reg: JME_MAC, len: JME_MAC_LEN); |
2375 | |
2376 | p32 += 0x100 >> 2; |
2377 | mmapio_memcpy(jme, p: p32, reg: JME_PHY, len: JME_PHY_LEN); |
2378 | |
2379 | p32 += 0x100 >> 2; |
2380 | mmapio_memcpy(jme, p: p32, reg: JME_MISC, len: JME_MISC_LEN); |
2381 | |
2382 | p32 += 0x100 >> 2; |
2383 | mmapio_memcpy(jme, p: p32, reg: JME_RSS, len: JME_RSS_LEN); |
2384 | |
2385 | p32 += 0x100 >> 2; |
2386 | mdio_memcpy(jme, p: p32, JME_PHY_REG_NR); |
2387 | } |
2388 | |
2389 | static int jme_get_coalesce(struct net_device *netdev, |
2390 | struct ethtool_coalesce *ecmd, |
2391 | struct kernel_ethtool_coalesce *kernel_coal, |
2392 | struct netlink_ext_ack *extack) |
2393 | { |
2394 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2395 | |
2396 | ecmd->tx_coalesce_usecs = PCC_TX_TO; |
2397 | ecmd->tx_max_coalesced_frames = PCC_TX_CNT; |
2398 | |
2399 | if (test_bit(JME_FLAG_POLL, &jme->flags)) { |
2400 | ecmd->use_adaptive_rx_coalesce = false; |
2401 | ecmd->rx_coalesce_usecs = 0; |
2402 | ecmd->rx_max_coalesced_frames = 0; |
2403 | return 0; |
2404 | } |
2405 | |
2406 | ecmd->use_adaptive_rx_coalesce = true; |
2407 | |
2408 | switch (jme->dpi.cur) { |
2409 | case PCC_P1: |
2410 | ecmd->rx_coalesce_usecs = PCC_P1_TO; |
2411 | ecmd->rx_max_coalesced_frames = PCC_P1_CNT; |
2412 | break; |
2413 | case PCC_P2: |
2414 | ecmd->rx_coalesce_usecs = PCC_P2_TO; |
2415 | ecmd->rx_max_coalesced_frames = PCC_P2_CNT; |
2416 | break; |
2417 | case PCC_P3: |
2418 | ecmd->rx_coalesce_usecs = PCC_P3_TO; |
2419 | ecmd->rx_max_coalesced_frames = PCC_P3_CNT; |
2420 | break; |
2421 | default: |
2422 | break; |
2423 | } |
2424 | |
2425 | return 0; |
2426 | } |
2427 | |
2428 | static int jme_set_coalesce(struct net_device *netdev, |
2429 | struct ethtool_coalesce *ecmd, |
2430 | struct kernel_ethtool_coalesce *kernel_coal, |
2431 | struct netlink_ext_ack *extack) |
2432 | { |
2433 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2434 | struct dynpcc_info *dpi = &(jme->dpi); |
2435 | |
2436 | if (netif_running(dev: netdev)) |
2437 | return -EBUSY; |
2438 | |
2439 | if (ecmd->use_adaptive_rx_coalesce && |
2440 | test_bit(JME_FLAG_POLL, &jme->flags)) { |
2441 | clear_bit(nr: JME_FLAG_POLL, addr: &jme->flags); |
2442 | jme->jme_rx = netif_rx; |
2443 | dpi->cur = PCC_P1; |
2444 | dpi->attempt = PCC_P1; |
2445 | dpi->cnt = 0; |
2446 | jme_set_rx_pcc(jme, p: PCC_P1); |
2447 | jme_interrupt_mode(jme); |
2448 | } else if (!(ecmd->use_adaptive_rx_coalesce) && |
2449 | !(test_bit(JME_FLAG_POLL, &jme->flags))) { |
2450 | set_bit(nr: JME_FLAG_POLL, addr: &jme->flags); |
2451 | jme->jme_rx = netif_receive_skb; |
2452 | jme_interrupt_mode(jme); |
2453 | } |
2454 | |
2455 | return 0; |
2456 | } |
2457 | |
2458 | static void |
2459 | jme_get_pauseparam(struct net_device *netdev, |
2460 | struct ethtool_pauseparam *ecmd) |
2461 | { |
2462 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2463 | u32 val; |
2464 | |
2465 | ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; |
2466 | ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; |
2467 | |
2468 | spin_lock_bh(lock: &jme->phy_lock); |
2469 | val = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_ADVERTISE); |
2470 | spin_unlock_bh(lock: &jme->phy_lock); |
2471 | |
2472 | ecmd->autoneg = |
2473 | (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; |
2474 | } |
2475 | |
2476 | static int |
2477 | jme_set_pauseparam(struct net_device *netdev, |
2478 | struct ethtool_pauseparam *ecmd) |
2479 | { |
2480 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2481 | u32 val; |
2482 | |
2483 | if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ |
2484 | (ecmd->tx_pause != 0)) { |
2485 | |
2486 | if (ecmd->tx_pause) |
2487 | jme->reg_txpfc |= TXPFC_PF_EN; |
2488 | else |
2489 | jme->reg_txpfc &= ~TXPFC_PF_EN; |
2490 | |
2491 | jwrite32(jme, reg: JME_TXPFC, val: jme->reg_txpfc); |
2492 | } |
2493 | |
2494 | spin_lock_bh(lock: &jme->rxmcs_lock); |
2495 | if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ |
2496 | (ecmd->rx_pause != 0)) { |
2497 | |
2498 | if (ecmd->rx_pause) |
2499 | jme->reg_rxmcs |= RXMCS_FLOWCTRL; |
2500 | else |
2501 | jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; |
2502 | |
2503 | jwrite32(jme, reg: JME_RXMCS, val: jme->reg_rxmcs); |
2504 | } |
2505 | spin_unlock_bh(lock: &jme->rxmcs_lock); |
2506 | |
2507 | spin_lock_bh(lock: &jme->phy_lock); |
2508 | val = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, MII_ADVERTISE); |
2509 | if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ |
2510 | (ecmd->autoneg != 0)) { |
2511 | |
2512 | if (ecmd->autoneg) |
2513 | val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
2514 | else |
2515 | val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
2516 | |
2517 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, |
2518 | MII_ADVERTISE, val); |
2519 | } |
2520 | spin_unlock_bh(lock: &jme->phy_lock); |
2521 | |
2522 | return 0; |
2523 | } |
2524 | |
2525 | static void |
2526 | jme_get_wol(struct net_device *netdev, |
2527 | struct ethtool_wolinfo *wol) |
2528 | { |
2529 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2530 | |
2531 | wol->supported = WAKE_MAGIC | WAKE_PHY; |
2532 | |
2533 | wol->wolopts = 0; |
2534 | |
2535 | if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) |
2536 | wol->wolopts |= WAKE_PHY; |
2537 | |
2538 | if (jme->reg_pmcs & PMCS_MFEN) |
2539 | wol->wolopts |= WAKE_MAGIC; |
2540 | |
2541 | } |
2542 | |
2543 | static int |
2544 | jme_set_wol(struct net_device *netdev, |
2545 | struct ethtool_wolinfo *wol) |
2546 | { |
2547 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2548 | |
2549 | if (wol->wolopts & (WAKE_MAGICSECURE | |
2550 | WAKE_UCAST | |
2551 | WAKE_MCAST | |
2552 | WAKE_BCAST | |
2553 | WAKE_ARP)) |
2554 | return -EOPNOTSUPP; |
2555 | |
2556 | jme->reg_pmcs = 0; |
2557 | |
2558 | if (wol->wolopts & WAKE_PHY) |
2559 | jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; |
2560 | |
2561 | if (wol->wolopts & WAKE_MAGIC) |
2562 | jme->reg_pmcs |= PMCS_MFEN; |
2563 | |
2564 | return 0; |
2565 | } |
2566 | |
2567 | static int |
2568 | jme_get_link_ksettings(struct net_device *netdev, |
2569 | struct ethtool_link_ksettings *cmd) |
2570 | { |
2571 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2572 | |
2573 | spin_lock_bh(lock: &jme->phy_lock); |
2574 | mii_ethtool_get_link_ksettings(mii: &jme->mii_if, cmd); |
2575 | spin_unlock_bh(lock: &jme->phy_lock); |
2576 | return 0; |
2577 | } |
2578 | |
2579 | static int |
2580 | jme_set_link_ksettings(struct net_device *netdev, |
2581 | const struct ethtool_link_ksettings *cmd) |
2582 | { |
2583 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2584 | int rc, fdc = 0; |
2585 | |
2586 | if (cmd->base.speed == SPEED_1000 && |
2587 | cmd->base.autoneg != AUTONEG_ENABLE) |
2588 | return -EINVAL; |
2589 | |
2590 | /* |
2591 | * Check If user changed duplex only while force_media. |
2592 | * Hardware would not generate link change interrupt. |
2593 | */ |
2594 | if (jme->mii_if.force_media && |
2595 | cmd->base.autoneg != AUTONEG_ENABLE && |
2596 | (jme->mii_if.full_duplex != cmd->base.duplex)) |
2597 | fdc = 1; |
2598 | |
2599 | spin_lock_bh(lock: &jme->phy_lock); |
2600 | rc = mii_ethtool_set_link_ksettings(mii: &jme->mii_if, cmd); |
2601 | spin_unlock_bh(lock: &jme->phy_lock); |
2602 | |
2603 | if (!rc) { |
2604 | if (fdc) |
2605 | jme_reset_link(jme); |
2606 | jme->old_cmd = *cmd; |
2607 | set_bit(nr: JME_FLAG_SSET, addr: &jme->flags); |
2608 | } |
2609 | |
2610 | return rc; |
2611 | } |
2612 | |
2613 | static int |
2614 | jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) |
2615 | { |
2616 | int rc; |
2617 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2618 | struct mii_ioctl_data *mii_data = if_mii(rq); |
2619 | unsigned int duplex_chg; |
2620 | |
2621 | if (cmd == SIOCSMIIREG) { |
2622 | u16 val = mii_data->val_in; |
2623 | if (!(val & (BMCR_RESET|BMCR_ANENABLE)) && |
2624 | (val & BMCR_SPEED1000)) |
2625 | return -EINVAL; |
2626 | } |
2627 | |
2628 | spin_lock_bh(lock: &jme->phy_lock); |
2629 | rc = generic_mii_ioctl(mii_if: &jme->mii_if, mii_data, cmd, duplex_changed: &duplex_chg); |
2630 | spin_unlock_bh(lock: &jme->phy_lock); |
2631 | |
2632 | if (!rc && (cmd == SIOCSMIIREG)) { |
2633 | if (duplex_chg) |
2634 | jme_reset_link(jme); |
2635 | jme_get_link_ksettings(netdev, cmd: &jme->old_cmd); |
2636 | set_bit(nr: JME_FLAG_SSET, addr: &jme->flags); |
2637 | } |
2638 | |
2639 | return rc; |
2640 | } |
2641 | |
2642 | static u32 |
2643 | jme_get_link(struct net_device *netdev) |
2644 | { |
2645 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2646 | return jread32(jme, reg: JME_PHY_LINK) & PHY_LINK_UP; |
2647 | } |
2648 | |
2649 | static u32 |
2650 | jme_get_msglevel(struct net_device *netdev) |
2651 | { |
2652 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2653 | return jme->msg_enable; |
2654 | } |
2655 | |
2656 | static void |
2657 | jme_set_msglevel(struct net_device *netdev, u32 value) |
2658 | { |
2659 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2660 | jme->msg_enable = value; |
2661 | } |
2662 | |
2663 | static netdev_features_t |
2664 | jme_fix_features(struct net_device *netdev, netdev_features_t features) |
2665 | { |
2666 | if (netdev->mtu > 1900) |
2667 | features &= ~(NETIF_F_ALL_TSO | NETIF_F_CSUM_MASK); |
2668 | return features; |
2669 | } |
2670 | |
2671 | static int |
2672 | jme_set_features(struct net_device *netdev, netdev_features_t features) |
2673 | { |
2674 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2675 | |
2676 | spin_lock_bh(lock: &jme->rxmcs_lock); |
2677 | if (features & NETIF_F_RXCSUM) |
2678 | jme->reg_rxmcs |= RXMCS_CHECKSUM; |
2679 | else |
2680 | jme->reg_rxmcs &= ~RXMCS_CHECKSUM; |
2681 | jwrite32(jme, reg: JME_RXMCS, val: jme->reg_rxmcs); |
2682 | spin_unlock_bh(lock: &jme->rxmcs_lock); |
2683 | |
2684 | return 0; |
2685 | } |
2686 | |
2687 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2688 | static void jme_netpoll(struct net_device *dev) |
2689 | { |
2690 | unsigned long flags; |
2691 | |
2692 | local_irq_save(flags); |
2693 | jme_intr(irq: dev->irq, dev_id: dev); |
2694 | local_irq_restore(flags); |
2695 | } |
2696 | #endif |
2697 | |
2698 | static int |
2699 | jme_nway_reset(struct net_device *netdev) |
2700 | { |
2701 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2702 | jme_restart_an(jme); |
2703 | return 0; |
2704 | } |
2705 | |
2706 | static u8 |
2707 | jme_smb_read(struct jme_adapter *jme, unsigned int addr) |
2708 | { |
2709 | u32 val; |
2710 | int to; |
2711 | |
2712 | val = jread32(jme, reg: JME_SMBCSR); |
2713 | to = JME_SMB_BUSY_TIMEOUT; |
2714 | while ((val & SMBCSR_BUSY) && --to) { |
2715 | msleep(msecs: 1); |
2716 | val = jread32(jme, reg: JME_SMBCSR); |
2717 | } |
2718 | if (!to) { |
2719 | netif_err(jme, hw, jme->dev, "SMB Bus Busy\n" ); |
2720 | return 0xFF; |
2721 | } |
2722 | |
2723 | jwrite32(jme, reg: JME_SMBINTF, |
2724 | val: ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | |
2725 | SMBINTF_HWRWN_READ | |
2726 | SMBINTF_HWCMD); |
2727 | |
2728 | val = jread32(jme, reg: JME_SMBINTF); |
2729 | to = JME_SMB_BUSY_TIMEOUT; |
2730 | while ((val & SMBINTF_HWCMD) && --to) { |
2731 | msleep(msecs: 1); |
2732 | val = jread32(jme, reg: JME_SMBINTF); |
2733 | } |
2734 | if (!to) { |
2735 | netif_err(jme, hw, jme->dev, "SMB Bus Busy\n" ); |
2736 | return 0xFF; |
2737 | } |
2738 | |
2739 | return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; |
2740 | } |
2741 | |
2742 | static void |
2743 | jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) |
2744 | { |
2745 | u32 val; |
2746 | int to; |
2747 | |
2748 | val = jread32(jme, reg: JME_SMBCSR); |
2749 | to = JME_SMB_BUSY_TIMEOUT; |
2750 | while ((val & SMBCSR_BUSY) && --to) { |
2751 | msleep(msecs: 1); |
2752 | val = jread32(jme, reg: JME_SMBCSR); |
2753 | } |
2754 | if (!to) { |
2755 | netif_err(jme, hw, jme->dev, "SMB Bus Busy\n" ); |
2756 | return; |
2757 | } |
2758 | |
2759 | jwrite32(jme, reg: JME_SMBINTF, |
2760 | val: ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | |
2761 | ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | |
2762 | SMBINTF_HWRWN_WRITE | |
2763 | SMBINTF_HWCMD); |
2764 | |
2765 | val = jread32(jme, reg: JME_SMBINTF); |
2766 | to = JME_SMB_BUSY_TIMEOUT; |
2767 | while ((val & SMBINTF_HWCMD) && --to) { |
2768 | msleep(msecs: 1); |
2769 | val = jread32(jme, reg: JME_SMBINTF); |
2770 | } |
2771 | if (!to) { |
2772 | netif_err(jme, hw, jme->dev, "SMB Bus Busy\n" ); |
2773 | return; |
2774 | } |
2775 | |
2776 | mdelay(2); |
2777 | } |
2778 | |
2779 | static int |
2780 | jme_get_eeprom_len(struct net_device *netdev) |
2781 | { |
2782 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2783 | u32 val; |
2784 | val = jread32(jme, reg: JME_SMBCSR); |
2785 | return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; |
2786 | } |
2787 | |
2788 | static int |
2789 | jme_get_eeprom(struct net_device *netdev, |
2790 | struct ethtool_eeprom *eeprom, u8 *data) |
2791 | { |
2792 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2793 | int i, offset = eeprom->offset, len = eeprom->len; |
2794 | |
2795 | /* |
2796 | * ethtool will check the boundary for us |
2797 | */ |
2798 | eeprom->magic = JME_EEPROM_MAGIC; |
2799 | for (i = 0 ; i < len ; ++i) |
2800 | data[i] = jme_smb_read(jme, addr: i + offset); |
2801 | |
2802 | return 0; |
2803 | } |
2804 | |
2805 | static int |
2806 | jme_set_eeprom(struct net_device *netdev, |
2807 | struct ethtool_eeprom *eeprom, u8 *data) |
2808 | { |
2809 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
2810 | int i, offset = eeprom->offset, len = eeprom->len; |
2811 | |
2812 | if (eeprom->magic != JME_EEPROM_MAGIC) |
2813 | return -EINVAL; |
2814 | |
2815 | /* |
2816 | * ethtool will check the boundary for us |
2817 | */ |
2818 | for (i = 0 ; i < len ; ++i) |
2819 | jme_smb_write(jme, addr: i + offset, data: data[i]); |
2820 | |
2821 | return 0; |
2822 | } |
2823 | |
2824 | static const struct ethtool_ops jme_ethtool_ops = { |
2825 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
2826 | ETHTOOL_COALESCE_MAX_FRAMES | |
2827 | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, |
2828 | .get_drvinfo = jme_get_drvinfo, |
2829 | .get_regs_len = jme_get_regs_len, |
2830 | .get_regs = jme_get_regs, |
2831 | .get_coalesce = jme_get_coalesce, |
2832 | .set_coalesce = jme_set_coalesce, |
2833 | .get_pauseparam = jme_get_pauseparam, |
2834 | .set_pauseparam = jme_set_pauseparam, |
2835 | .get_wol = jme_get_wol, |
2836 | .set_wol = jme_set_wol, |
2837 | .get_link = jme_get_link, |
2838 | .get_msglevel = jme_get_msglevel, |
2839 | .set_msglevel = jme_set_msglevel, |
2840 | .nway_reset = jme_nway_reset, |
2841 | .get_eeprom_len = jme_get_eeprom_len, |
2842 | .get_eeprom = jme_get_eeprom, |
2843 | .set_eeprom = jme_set_eeprom, |
2844 | .get_link_ksettings = jme_get_link_ksettings, |
2845 | .set_link_ksettings = jme_set_link_ksettings, |
2846 | }; |
2847 | |
2848 | static int |
2849 | jme_pci_dma64(struct pci_dev *pdev) |
2850 | { |
2851 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && |
2852 | !dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64))) |
2853 | return 1; |
2854 | |
2855 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && |
2856 | !dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(40))) |
2857 | return 1; |
2858 | |
2859 | if (!dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32))) |
2860 | return 0; |
2861 | |
2862 | return -1; |
2863 | } |
2864 | |
2865 | static inline void |
2866 | jme_phy_init(struct jme_adapter *jme) |
2867 | { |
2868 | u16 reg26; |
2869 | |
2870 | reg26 = jme_mdio_read(netdev: jme->dev, phy: jme->mii_if.phy_id, reg: 26); |
2871 | jme_mdio_write(netdev: jme->dev, phy: jme->mii_if.phy_id, reg: 26, val: reg26 | 0x1000); |
2872 | } |
2873 | |
2874 | static inline void |
2875 | jme_check_hw_ver(struct jme_adapter *jme) |
2876 | { |
2877 | u32 chipmode; |
2878 | |
2879 | chipmode = jread32(jme, reg: JME_CHIPMODE); |
2880 | |
2881 | jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; |
2882 | jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; |
2883 | jme->chip_main_rev = jme->chiprev & 0xF; |
2884 | jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF; |
2885 | } |
2886 | |
2887 | static const struct net_device_ops jme_netdev_ops = { |
2888 | .ndo_open = jme_open, |
2889 | .ndo_stop = jme_close, |
2890 | .ndo_validate_addr = eth_validate_addr, |
2891 | .ndo_eth_ioctl = jme_ioctl, |
2892 | .ndo_start_xmit = jme_start_xmit, |
2893 | .ndo_set_mac_address = jme_set_macaddr, |
2894 | .ndo_set_rx_mode = jme_set_multi, |
2895 | .ndo_change_mtu = jme_change_mtu, |
2896 | .ndo_tx_timeout = jme_tx_timeout, |
2897 | .ndo_fix_features = jme_fix_features, |
2898 | .ndo_set_features = jme_set_features, |
2899 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2900 | .ndo_poll_controller = jme_netpoll, |
2901 | #endif |
2902 | }; |
2903 | |
2904 | static int |
2905 | jme_init_one(struct pci_dev *pdev, |
2906 | const struct pci_device_id *ent) |
2907 | { |
2908 | int rc = 0, using_dac, i; |
2909 | struct net_device *netdev; |
2910 | struct jme_adapter *jme; |
2911 | u16 bmcr, bmsr; |
2912 | u32 apmc; |
2913 | |
2914 | /* |
2915 | * set up PCI device basics |
2916 | */ |
2917 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | |
2918 | PCIE_LINK_STATE_CLKPM); |
2919 | |
2920 | rc = pci_enable_device(dev: pdev); |
2921 | if (rc) { |
2922 | pr_err("Cannot enable PCI device\n" ); |
2923 | goto err_out; |
2924 | } |
2925 | |
2926 | using_dac = jme_pci_dma64(pdev); |
2927 | if (using_dac < 0) { |
2928 | pr_err("Cannot set PCI DMA Mask\n" ); |
2929 | rc = -EIO; |
2930 | goto err_out_disable_pdev; |
2931 | } |
2932 | |
2933 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
2934 | pr_err("No PCI resource region found\n" ); |
2935 | rc = -ENOMEM; |
2936 | goto err_out_disable_pdev; |
2937 | } |
2938 | |
2939 | rc = pci_request_regions(pdev, DRV_NAME); |
2940 | if (rc) { |
2941 | pr_err("Cannot obtain PCI resource region\n" ); |
2942 | goto err_out_disable_pdev; |
2943 | } |
2944 | |
2945 | pci_set_master(dev: pdev); |
2946 | |
2947 | /* |
2948 | * alloc and init net device |
2949 | */ |
2950 | netdev = alloc_etherdev(sizeof(*jme)); |
2951 | if (!netdev) { |
2952 | rc = -ENOMEM; |
2953 | goto err_out_release_regions; |
2954 | } |
2955 | netdev->netdev_ops = &jme_netdev_ops; |
2956 | netdev->ethtool_ops = &jme_ethtool_ops; |
2957 | netdev->watchdog_timeo = TX_TIMEOUT; |
2958 | netdev->hw_features = NETIF_F_IP_CSUM | |
2959 | NETIF_F_IPV6_CSUM | |
2960 | NETIF_F_SG | |
2961 | NETIF_F_TSO | |
2962 | NETIF_F_TSO6 | |
2963 | NETIF_F_RXCSUM; |
2964 | netdev->features = NETIF_F_IP_CSUM | |
2965 | NETIF_F_IPV6_CSUM | |
2966 | NETIF_F_SG | |
2967 | NETIF_F_TSO | |
2968 | NETIF_F_TSO6 | |
2969 | NETIF_F_HW_VLAN_CTAG_TX | |
2970 | NETIF_F_HW_VLAN_CTAG_RX; |
2971 | if (using_dac) |
2972 | netdev->features |= NETIF_F_HIGHDMA; |
2973 | |
2974 | /* MTU range: 1280 - 9202*/ |
2975 | netdev->min_mtu = IPV6_MIN_MTU; |
2976 | netdev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE - ETH_HLEN; |
2977 | |
2978 | SET_NETDEV_DEV(netdev, &pdev->dev); |
2979 | pci_set_drvdata(pdev, data: netdev); |
2980 | |
2981 | /* |
2982 | * init adapter info |
2983 | */ |
2984 | jme = netdev_priv(dev: netdev); |
2985 | jme->pdev = pdev; |
2986 | jme->dev = netdev; |
2987 | jme->jme_rx = netif_rx; |
2988 | jme->old_mtu = netdev->mtu = 1500; |
2989 | jme->phylink = 0; |
2990 | jme->tx_ring_size = 1 << 10; |
2991 | jme->tx_ring_mask = jme->tx_ring_size - 1; |
2992 | jme->tx_wake_threshold = 1 << 9; |
2993 | jme->rx_ring_size = 1 << 9; |
2994 | jme->rx_ring_mask = jme->rx_ring_size - 1; |
2995 | jme->msg_enable = JME_DEF_MSG_ENABLE; |
2996 | jme->regs = ioremap(pci_resource_start(pdev, 0), |
2997 | pci_resource_len(pdev, 0)); |
2998 | if (!(jme->regs)) { |
2999 | pr_err("Mapping PCI resource region error\n" ); |
3000 | rc = -ENOMEM; |
3001 | goto err_out_free_netdev; |
3002 | } |
3003 | |
3004 | if (no_pseudohp) { |
3005 | apmc = jread32(jme, reg: JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; |
3006 | jwrite32(jme, reg: JME_APMC, val: apmc); |
3007 | } else if (force_pseudohp) { |
3008 | apmc = jread32(jme, reg: JME_APMC) | JME_APMC_PSEUDO_HP_EN; |
3009 | jwrite32(jme, reg: JME_APMC, val: apmc); |
3010 | } |
3011 | |
3012 | netif_napi_add(dev: netdev, napi: &jme->napi, poll: jme_poll); |
3013 | |
3014 | spin_lock_init(&jme->phy_lock); |
3015 | spin_lock_init(&jme->macaddr_lock); |
3016 | spin_lock_init(&jme->rxmcs_lock); |
3017 | |
3018 | atomic_set(v: &jme->link_changing, i: 1); |
3019 | atomic_set(v: &jme->rx_cleaning, i: 1); |
3020 | atomic_set(v: &jme->tx_cleaning, i: 1); |
3021 | atomic_set(v: &jme->rx_empty, i: 1); |
3022 | |
3023 | tasklet_setup(t: &jme->pcc_task, callback: jme_pcc_tasklet); |
3024 | INIT_WORK(&jme->linkch_task, jme_link_change_work); |
3025 | jme->dpi.cur = PCC_P1; |
3026 | |
3027 | jme->reg_ghc = 0; |
3028 | jme->reg_rxcs = RXCS_DEFAULT; |
3029 | jme->reg_rxmcs = RXMCS_DEFAULT; |
3030 | jme->reg_txpfc = 0; |
3031 | jme->reg_pmcs = PMCS_MFEN; |
3032 | jme->reg_gpreg1 = GPREG1_DEFAULT; |
3033 | |
3034 | if (jme->reg_rxmcs & RXMCS_CHECKSUM) |
3035 | netdev->features |= NETIF_F_RXCSUM; |
3036 | |
3037 | /* |
3038 | * Get Max Read Req Size from PCI Config Space |
3039 | */ |
3040 | pci_read_config_byte(dev: pdev, PCI_DCSR_MRRS, val: &jme->mrrs); |
3041 | jme->mrrs &= PCI_DCSR_MRRS_MASK; |
3042 | switch (jme->mrrs) { |
3043 | case MRRS_128B: |
3044 | jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; |
3045 | break; |
3046 | case MRRS_256B: |
3047 | jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; |
3048 | break; |
3049 | default: |
3050 | jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; |
3051 | break; |
3052 | } |
3053 | |
3054 | /* |
3055 | * Must check before reset_mac_processor |
3056 | */ |
3057 | jme_check_hw_ver(jme); |
3058 | jme->mii_if.dev = netdev; |
3059 | if (jme->fpgaver) { |
3060 | jme->mii_if.phy_id = 0; |
3061 | for (i = 1 ; i < 32 ; ++i) { |
3062 | bmcr = jme_mdio_read(netdev, phy: i, MII_BMCR); |
3063 | bmsr = jme_mdio_read(netdev, phy: i, MII_BMSR); |
3064 | if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { |
3065 | jme->mii_if.phy_id = i; |
3066 | break; |
3067 | } |
3068 | } |
3069 | |
3070 | if (!jme->mii_if.phy_id) { |
3071 | rc = -EIO; |
3072 | pr_err("Can not find phy_id\n" ); |
3073 | goto err_out_unmap; |
3074 | } |
3075 | |
3076 | jme->reg_ghc |= GHC_LINK_POLL; |
3077 | } else { |
3078 | jme->mii_if.phy_id = 1; |
3079 | } |
3080 | if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) |
3081 | jme->mii_if.supports_gmii = true; |
3082 | else |
3083 | jme->mii_if.supports_gmii = false; |
3084 | jme->mii_if.phy_id_mask = 0x1F; |
3085 | jme->mii_if.reg_num_mask = 0x1F; |
3086 | jme->mii_if.mdio_read = jme_mdio_read; |
3087 | jme->mii_if.mdio_write = jme_mdio_write; |
3088 | |
3089 | jme_clear_pm_disable_wol(jme); |
3090 | device_init_wakeup(dev: &pdev->dev, enable: true); |
3091 | |
3092 | jme_set_phyfifo_5level(jme); |
3093 | jme->pcirev = pdev->revision; |
3094 | if (!jme->fpgaver) |
3095 | jme_phy_init(jme); |
3096 | jme_phy_off(jme); |
3097 | |
3098 | /* |
3099 | * Reset MAC processor and reload EEPROM for MAC Address |
3100 | */ |
3101 | jme_reset_mac_processor(jme); |
3102 | rc = jme_reload_eeprom(jme); |
3103 | if (rc) { |
3104 | pr_err("Reload eeprom for reading MAC Address error\n" ); |
3105 | goto err_out_unmap; |
3106 | } |
3107 | jme_load_macaddr(netdev); |
3108 | |
3109 | /* |
3110 | * Tell stack that we are not ready to work until open() |
3111 | */ |
3112 | netif_carrier_off(dev: netdev); |
3113 | |
3114 | rc = register_netdev(dev: netdev); |
3115 | if (rc) { |
3116 | pr_err("Cannot register net device\n" ); |
3117 | goto err_out_unmap; |
3118 | } |
3119 | |
3120 | netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n" , |
3121 | (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? |
3122 | "JMC250 Gigabit Ethernet" : |
3123 | (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? |
3124 | "JMC260 Fast Ethernet" : "Unknown" , |
3125 | (jme->fpgaver != 0) ? " (FPGA)" : "" , |
3126 | (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, |
3127 | jme->pcirev, netdev->dev_addr); |
3128 | |
3129 | return 0; |
3130 | |
3131 | err_out_unmap: |
3132 | iounmap(addr: jme->regs); |
3133 | err_out_free_netdev: |
3134 | free_netdev(dev: netdev); |
3135 | err_out_release_regions: |
3136 | pci_release_regions(pdev); |
3137 | err_out_disable_pdev: |
3138 | pci_disable_device(dev: pdev); |
3139 | err_out: |
3140 | return rc; |
3141 | } |
3142 | |
3143 | static void |
3144 | jme_remove_one(struct pci_dev *pdev) |
3145 | { |
3146 | struct net_device *netdev = pci_get_drvdata(pdev); |
3147 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
3148 | |
3149 | unregister_netdev(dev: netdev); |
3150 | iounmap(addr: jme->regs); |
3151 | free_netdev(dev: netdev); |
3152 | pci_release_regions(pdev); |
3153 | pci_disable_device(dev: pdev); |
3154 | |
3155 | } |
3156 | |
3157 | static void |
3158 | jme_shutdown(struct pci_dev *pdev) |
3159 | { |
3160 | struct net_device *netdev = pci_get_drvdata(pdev); |
3161 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
3162 | |
3163 | jme_powersave_phy(jme); |
3164 | pci_pme_active(dev: pdev, enable: true); |
3165 | } |
3166 | |
3167 | #ifdef CONFIG_PM_SLEEP |
3168 | static int |
3169 | jme_suspend(struct device *dev) |
3170 | { |
3171 | struct net_device *netdev = dev_get_drvdata(dev); |
3172 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
3173 | |
3174 | if (!netif_running(dev: netdev)) |
3175 | return 0; |
3176 | |
3177 | atomic_dec(v: &jme->link_changing); |
3178 | |
3179 | netif_device_detach(dev: netdev); |
3180 | netif_stop_queue(dev: netdev); |
3181 | jme_stop_irq(jme); |
3182 | |
3183 | tasklet_disable(t: &jme->txclean_task); |
3184 | tasklet_disable(t: &jme->rxclean_task); |
3185 | tasklet_disable(t: &jme->rxempty_task); |
3186 | |
3187 | if (netif_carrier_ok(dev: netdev)) { |
3188 | if (test_bit(JME_FLAG_POLL, &jme->flags)) |
3189 | jme_polling_mode(jme); |
3190 | |
3191 | jme_stop_pcc_timer(jme); |
3192 | jme_disable_rx_engine(jme); |
3193 | jme_disable_tx_engine(jme); |
3194 | jme_reset_mac_processor(jme); |
3195 | jme_free_rx_resources(jme); |
3196 | jme_free_tx_resources(jme); |
3197 | netif_carrier_off(dev: netdev); |
3198 | jme->phylink = 0; |
3199 | } |
3200 | |
3201 | tasklet_enable(t: &jme->txclean_task); |
3202 | tasklet_enable(t: &jme->rxclean_task); |
3203 | tasklet_enable(t: &jme->rxempty_task); |
3204 | |
3205 | jme_powersave_phy(jme); |
3206 | |
3207 | return 0; |
3208 | } |
3209 | |
3210 | static int |
3211 | jme_resume(struct device *dev) |
3212 | { |
3213 | struct net_device *netdev = dev_get_drvdata(dev); |
3214 | struct jme_adapter *jme = netdev_priv(dev: netdev); |
3215 | |
3216 | if (!netif_running(dev: netdev)) |
3217 | return 0; |
3218 | |
3219 | jme_clear_pm_disable_wol(jme); |
3220 | jme_phy_on(jme); |
3221 | if (test_bit(JME_FLAG_SSET, &jme->flags)) |
3222 | jme_set_link_ksettings(netdev, cmd: &jme->old_cmd); |
3223 | else |
3224 | jme_reset_phy_processor(jme); |
3225 | jme_phy_calibration(jme); |
3226 | jme_phy_setEA(jme); |
3227 | netif_device_attach(dev: netdev); |
3228 | |
3229 | atomic_inc(v: &jme->link_changing); |
3230 | |
3231 | jme_reset_link(jme); |
3232 | |
3233 | jme_start_irq(jme); |
3234 | |
3235 | return 0; |
3236 | } |
3237 | |
3238 | static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); |
3239 | #define JME_PM_OPS (&jme_pm_ops) |
3240 | |
3241 | #else |
3242 | |
3243 | #define JME_PM_OPS NULL |
3244 | #endif |
3245 | |
3246 | static const struct pci_device_id jme_pci_tbl[] = { |
3247 | { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, |
3248 | { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, |
3249 | { } |
3250 | }; |
3251 | |
3252 | static struct pci_driver jme_driver = { |
3253 | .name = DRV_NAME, |
3254 | .id_table = jme_pci_tbl, |
3255 | .probe = jme_init_one, |
3256 | .remove = jme_remove_one, |
3257 | .shutdown = jme_shutdown, |
3258 | .driver.pm = JME_PM_OPS, |
3259 | }; |
3260 | |
3261 | static int __init |
3262 | jme_init_module(void) |
3263 | { |
3264 | pr_info("JMicron JMC2XX ethernet driver version %s\n" , DRV_VERSION); |
3265 | return pci_register_driver(&jme_driver); |
3266 | } |
3267 | |
3268 | static void __exit |
3269 | jme_cleanup_module(void) |
3270 | { |
3271 | pci_unregister_driver(dev: &jme_driver); |
3272 | } |
3273 | |
3274 | module_init(jme_init_module); |
3275 | module_exit(jme_cleanup_module); |
3276 | |
3277 | MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>" ); |
3278 | MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver" ); |
3279 | MODULE_LICENSE("GPL" ); |
3280 | MODULE_VERSION(DRV_VERSION); |
3281 | MODULE_DEVICE_TABLE(pci, jme_pci_tbl); |
3282 | |