1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. |
3 | * |
4 | * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle |
5 | * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. |
6 | * |
7 | * References: |
8 | * o IOC3 ASIC specification 4.51, 1996-04-18 |
9 | * o IEEE 802.3 specification, 2000 edition |
10 | * o DP38840A Specification, National Semiconductor, March 1997 |
11 | * |
12 | * To do: |
13 | * |
14 | * o Use prefetching for large packets. What is a good lower limit for |
15 | * prefetching? |
16 | * o Use hardware checksums. |
17 | * o Which PHYs might possibly be attached to the IOC3 in real live, |
18 | * which workarounds are required for them? Do we ever have Lucent's? |
19 | * o For the 2.5 branch kill the mii-tool ioctls. |
20 | */ |
21 | |
22 | #define IOC3_NAME "ioc3-eth" |
23 | #define IOC3_VERSION "2.6.3-4" |
24 | |
25 | #include <linux/delay.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/mm.h> |
28 | #include <linux/errno.h> |
29 | #include <linux/module.h> |
30 | #include <linux/init.h> |
31 | #include <linux/crc16.h> |
32 | #include <linux/crc32.h> |
33 | #include <linux/mii.h> |
34 | #include <linux/in.h> |
35 | #include <linux/io.h> |
36 | #include <linux/ip.h> |
37 | #include <linux/tcp.h> |
38 | #include <linux/udp.h> |
39 | #include <linux/gfp.h> |
40 | #include <linux/netdevice.h> |
41 | #include <linux/etherdevice.h> |
42 | #include <linux/ethtool.h> |
43 | #include <linux/skbuff.h> |
44 | #include <linux/dma-mapping.h> |
45 | #include <linux/platform_device.h> |
46 | #include <linux/nvmem-consumer.h> |
47 | |
48 | #include <net/ip.h> |
49 | |
50 | #include <asm/sn/ioc3.h> |
51 | #include <asm/pci/bridge.h> |
52 | |
53 | #define CRC16_INIT 0 |
54 | #define CRC16_VALID 0xb001 |
55 | |
56 | /* Number of RX buffers. This is tunable in the range of 16 <= x < 512. |
57 | * The value must be a power of two. |
58 | */ |
59 | #define RX_BUFFS 64 |
60 | #define RX_RING_ENTRIES 512 /* fixed in hardware */ |
61 | #define RX_RING_MASK (RX_RING_ENTRIES - 1) |
62 | #define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64)) |
63 | |
64 | /* 128 TX buffers (not tunable) */ |
65 | #define TX_RING_ENTRIES 128 |
66 | #define TX_RING_MASK (TX_RING_ENTRIES - 1) |
67 | #define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd)) |
68 | |
69 | /* IOC3 does dma transfers in 128 byte blocks */ |
70 | #define IOC3_DMA_XFER_LEN 128UL |
71 | |
72 | /* Every RX buffer starts with 8 byte descriptor data */ |
73 | #define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN) |
74 | #define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN) |
75 | |
76 | #define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21) |
77 | #define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21) |
78 | |
79 | /* Private per NIC data of the driver. */ |
80 | struct ioc3_private { |
81 | struct ioc3_ethregs *regs; |
82 | struct device *dma_dev; |
83 | u32 *ssram; |
84 | unsigned long *rxr; /* pointer to receiver ring */ |
85 | void *tx_ring; |
86 | struct ioc3_etxd *txr; |
87 | dma_addr_t rxr_dma; |
88 | dma_addr_t txr_dma; |
89 | struct sk_buff *rx_skbs[RX_RING_ENTRIES]; |
90 | struct sk_buff *tx_skbs[TX_RING_ENTRIES]; |
91 | int rx_ci; /* RX consumer index */ |
92 | int rx_pi; /* RX producer index */ |
93 | int tx_ci; /* TX consumer index */ |
94 | int tx_pi; /* TX producer index */ |
95 | int txqlen; |
96 | u32 emcr, ehar_h, ehar_l; |
97 | spinlock_t ioc3_lock; |
98 | struct mii_if_info mii; |
99 | |
100 | /* Members used by autonegotiation */ |
101 | struct timer_list ioc3_timer; |
102 | }; |
103 | |
104 | static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
105 | static void ioc3_set_multicast_list(struct net_device *dev); |
106 | static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); |
107 | static void ioc3_timeout(struct net_device *dev, unsigned int txqueue); |
108 | static inline unsigned int ioc3_hash(const unsigned char *addr); |
109 | static void ioc3_start(struct ioc3_private *ip); |
110 | static inline void ioc3_stop(struct ioc3_private *ip); |
111 | static void ioc3_init(struct net_device *dev); |
112 | static int ioc3_alloc_rx_bufs(struct net_device *dev); |
113 | static void ioc3_free_rx_bufs(struct ioc3_private *ip); |
114 | static inline void ioc3_clean_tx_ring(struct ioc3_private *ip); |
115 | |
116 | static const struct ethtool_ops ioc3_ethtool_ops; |
117 | |
118 | static inline unsigned long aligned_rx_skb_addr(unsigned long addr) |
119 | { |
120 | return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL); |
121 | } |
122 | |
123 | static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb, |
124 | struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma) |
125 | { |
126 | struct sk_buff *new_skb; |
127 | dma_addr_t d; |
128 | int offset; |
129 | |
130 | new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC); |
131 | if (!new_skb) |
132 | return -ENOMEM; |
133 | |
134 | /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */ |
135 | offset = aligned_rx_skb_addr(addr: (unsigned long)new_skb->data); |
136 | if (offset) |
137 | skb_reserve(skb: new_skb, len: offset); |
138 | |
139 | d = dma_map_single(ip->dma_dev, new_skb->data, |
140 | RX_BUF_SIZE, DMA_FROM_DEVICE); |
141 | |
142 | if (dma_mapping_error(dev: ip->dma_dev, dma_addr: d)) { |
143 | dev_kfree_skb_any(skb: new_skb); |
144 | return -ENOMEM; |
145 | } |
146 | *rxb_dma = d; |
147 | *rxb = (struct ioc3_erxbuf *)new_skb->data; |
148 | skb_reserve(new_skb, RX_OFFSET); |
149 | *skb = new_skb; |
150 | |
151 | return 0; |
152 | } |
153 | |
154 | #ifdef CONFIG_PCI_XTALK_BRIDGE |
155 | static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) |
156 | { |
157 | return (addr & ~PCI64_ATTR_BAR) | attr; |
158 | } |
159 | |
160 | #define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT) |
161 | #else |
162 | static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) |
163 | { |
164 | return addr; |
165 | } |
166 | |
167 | #define ERBAR_VAL 0 |
168 | #endif |
169 | |
170 | static int ioc3eth_nvmem_match(struct device *dev, const void *data) |
171 | { |
172 | const char *name = dev_name(dev); |
173 | const char *prefix = data; |
174 | int prefix_len; |
175 | |
176 | prefix_len = strlen(prefix); |
177 | if (strlen(name) < (prefix_len + 3)) |
178 | return 0; |
179 | |
180 | if (memcmp(p: prefix, q: name, size: prefix_len) != 0) |
181 | return 0; |
182 | |
183 | /* found nvmem device which is attached to our ioc3 |
184 | * now check for one wire family code 09, 89 and 91 |
185 | */ |
186 | if (memcmp(p: name + prefix_len, q: "09-" , size: 3) == 0) |
187 | return 1; |
188 | if (memcmp(p: name + prefix_len, q: "89-" , size: 3) == 0) |
189 | return 1; |
190 | if (memcmp(p: name + prefix_len, q: "91-" , size: 3) == 0) |
191 | return 1; |
192 | |
193 | return 0; |
194 | } |
195 | |
196 | static int ioc3eth_get_mac_addr(struct resource *res, u8 mac_addr[6]) |
197 | { |
198 | struct nvmem_device *nvmem; |
199 | char prefix[24]; |
200 | u8 prom[16]; |
201 | int ret; |
202 | int i; |
203 | |
204 | snprintf(buf: prefix, size: sizeof(prefix), fmt: "ioc3-%012llx-" , |
205 | res->start & ~0xffff); |
206 | |
207 | nvmem = nvmem_device_find(data: prefix, match: ioc3eth_nvmem_match); |
208 | if (IS_ERR(ptr: nvmem)) |
209 | return PTR_ERR(ptr: nvmem); |
210 | |
211 | ret = nvmem_device_read(nvmem, offset: 0, bytes: 16, buf: prom); |
212 | nvmem_device_put(nvmem); |
213 | if (ret < 0) |
214 | return ret; |
215 | |
216 | /* check, if content is valid */ |
217 | if (prom[0] != 0x0a || |
218 | crc16(CRC16_INIT, buffer: prom, len: 13) != CRC16_VALID) |
219 | return -EINVAL; |
220 | |
221 | for (i = 0; i < 6; i++) |
222 | mac_addr[i] = prom[10 - i]; |
223 | |
224 | return 0; |
225 | } |
226 | |
227 | static void __ioc3_set_mac_address(struct net_device *dev) |
228 | { |
229 | struct ioc3_private *ip = netdev_priv(dev); |
230 | |
231 | writel(val: (dev->dev_addr[5] << 8) | |
232 | dev->dev_addr[4], |
233 | addr: &ip->regs->emar_h); |
234 | writel(val: (dev->dev_addr[3] << 24) | |
235 | (dev->dev_addr[2] << 16) | |
236 | (dev->dev_addr[1] << 8) | |
237 | dev->dev_addr[0], |
238 | addr: &ip->regs->emar_l); |
239 | } |
240 | |
241 | static int ioc3_set_mac_address(struct net_device *dev, void *addr) |
242 | { |
243 | struct ioc3_private *ip = netdev_priv(dev); |
244 | struct sockaddr *sa = addr; |
245 | |
246 | eth_hw_addr_set(dev, addr: sa->sa_data); |
247 | |
248 | spin_lock_irq(lock: &ip->ioc3_lock); |
249 | __ioc3_set_mac_address(dev); |
250 | spin_unlock_irq(lock: &ip->ioc3_lock); |
251 | |
252 | return 0; |
253 | } |
254 | |
255 | /* Caller must hold the ioc3_lock ever for MII readers. This is also |
256 | * used to protect the transmitter side but it's low contention. |
257 | */ |
258 | static int ioc3_mdio_read(struct net_device *dev, int phy, int reg) |
259 | { |
260 | struct ioc3_private *ip = netdev_priv(dev); |
261 | struct ioc3_ethregs *regs = ip->regs; |
262 | |
263 | while (readl(addr: ®s->micr) & MICR_BUSY) |
264 | ; |
265 | writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG, |
266 | ®s->micr); |
267 | while (readl(addr: ®s->micr) & MICR_BUSY) |
268 | ; |
269 | |
270 | return readl(addr: ®s->midr_r) & MIDR_DATA_MASK; |
271 | } |
272 | |
273 | static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data) |
274 | { |
275 | struct ioc3_private *ip = netdev_priv(dev); |
276 | struct ioc3_ethregs *regs = ip->regs; |
277 | |
278 | while (readl(addr: ®s->micr) & MICR_BUSY) |
279 | ; |
280 | writel(val: data, addr: ®s->midr_w); |
281 | writel((phy << MICR_PHYADDR_SHIFT) | reg, ®s->micr); |
282 | while (readl(addr: ®s->micr) & MICR_BUSY) |
283 | ; |
284 | } |
285 | |
286 | static int ioc3_mii_init(struct ioc3_private *ip); |
287 | |
288 | static struct net_device_stats *ioc3_get_stats(struct net_device *dev) |
289 | { |
290 | struct ioc3_private *ip = netdev_priv(dev); |
291 | struct ioc3_ethregs *regs = ip->regs; |
292 | |
293 | dev->stats.collisions += readl(addr: ®s->etcdc) & ETCDC_COLLCNT_MASK; |
294 | return &dev->stats; |
295 | } |
296 | |
297 | static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len) |
298 | { |
299 | struct ethhdr *eh = eth_hdr(skb); |
300 | unsigned int proto; |
301 | unsigned char *cp; |
302 | struct iphdr *ih; |
303 | u32 csum, ehsum; |
304 | u16 *ew; |
305 | |
306 | /* Did hardware handle the checksum at all? The cases we can handle |
307 | * are: |
308 | * |
309 | * - TCP and UDP checksums of IPv4 only. |
310 | * - IPv6 would be doable but we keep that for later ... |
311 | * - Only unfragmented packets. Did somebody already tell you |
312 | * fragmentation is evil? |
313 | * - don't care about packet size. Worst case when processing a |
314 | * malformed packet we'll try to access the packet at ip header + |
315 | * 64 bytes which is still inside the skb. Even in the unlikely |
316 | * case where the checksum is right the higher layers will still |
317 | * drop the packet as appropriate. |
318 | */ |
319 | if (eh->h_proto != htons(ETH_P_IP)) |
320 | return; |
321 | |
322 | ih = (struct iphdr *)((char *)eh + ETH_HLEN); |
323 | if (ip_is_fragment(iph: ih)) |
324 | return; |
325 | |
326 | proto = ih->protocol; |
327 | if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) |
328 | return; |
329 | |
330 | /* Same as tx - compute csum of pseudo header */ |
331 | csum = hwsum + |
332 | (ih->tot_len - (ih->ihl << 2)) + |
333 | htons((u16)ih->protocol) + |
334 | (ih->saddr >> 16) + (ih->saddr & 0xffff) + |
335 | (ih->daddr >> 16) + (ih->daddr & 0xffff); |
336 | |
337 | /* Sum up ethernet dest addr, src addr and protocol */ |
338 | ew = (u16 *)eh; |
339 | ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6]; |
340 | |
341 | ehsum = (ehsum & 0xffff) + (ehsum >> 16); |
342 | ehsum = (ehsum & 0xffff) + (ehsum >> 16); |
343 | |
344 | csum += 0xffff ^ ehsum; |
345 | |
346 | /* In the next step we also subtract the 1's complement |
347 | * checksum of the trailing ethernet CRC. |
348 | */ |
349 | cp = (char *)eh + len; /* points at trailing CRC */ |
350 | if (len & 1) { |
351 | csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]); |
352 | csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]); |
353 | } else { |
354 | csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]); |
355 | csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]); |
356 | } |
357 | |
358 | csum = (csum & 0xffff) + (csum >> 16); |
359 | csum = (csum & 0xffff) + (csum >> 16); |
360 | |
361 | if (csum == 0xffff) |
362 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
363 | } |
364 | |
365 | static inline void ioc3_rx(struct net_device *dev) |
366 | { |
367 | struct ioc3_private *ip = netdev_priv(dev); |
368 | struct sk_buff *skb, *new_skb; |
369 | int rx_entry, n_entry, len; |
370 | struct ioc3_erxbuf *rxb; |
371 | unsigned long *rxr; |
372 | dma_addr_t d; |
373 | u32 w0, err; |
374 | |
375 | rxr = ip->rxr; /* Ring base */ |
376 | rx_entry = ip->rx_ci; /* RX consume index */ |
377 | n_entry = ip->rx_pi; |
378 | |
379 | skb = ip->rx_skbs[rx_entry]; |
380 | rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); |
381 | w0 = be32_to_cpu(rxb->w0); |
382 | |
383 | while (w0 & ERXBUF_V) { |
384 | err = be32_to_cpu(rxb->err); /* It's valid ... */ |
385 | if (err & ERXBUF_GOODPKT) { |
386 | len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; |
387 | skb_put(skb, len); |
388 | skb->protocol = eth_type_trans(skb, dev); |
389 | |
390 | if (ioc3_alloc_skb(ip, skb: &new_skb, rxb: &rxb, rxb_dma: &d)) { |
391 | /* Ouch, drop packet and just recycle packet |
392 | * to keep the ring filled. |
393 | */ |
394 | dev->stats.rx_dropped++; |
395 | new_skb = skb; |
396 | d = rxr[rx_entry]; |
397 | goto next; |
398 | } |
399 | |
400 | if (likely(dev->features & NETIF_F_RXCSUM)) |
401 | ioc3_tcpudp_checksum(skb, |
402 | hwsum: w0 & ERXBUF_IPCKSUM_MASK, |
403 | len); |
404 | |
405 | dma_unmap_single(ip->dma_dev, rxr[rx_entry], |
406 | RX_BUF_SIZE, DMA_FROM_DEVICE); |
407 | |
408 | netif_rx(skb); |
409 | |
410 | ip->rx_skbs[rx_entry] = NULL; /* Poison */ |
411 | |
412 | dev->stats.rx_packets++; /* Statistics */ |
413 | dev->stats.rx_bytes += len; |
414 | } else { |
415 | /* The frame is invalid and the skb never |
416 | * reached the network layer so we can just |
417 | * recycle it. |
418 | */ |
419 | new_skb = skb; |
420 | d = rxr[rx_entry]; |
421 | dev->stats.rx_errors++; |
422 | } |
423 | if (err & ERXBUF_CRCERR) /* Statistics */ |
424 | dev->stats.rx_crc_errors++; |
425 | if (err & ERXBUF_FRAMERR) |
426 | dev->stats.rx_frame_errors++; |
427 | |
428 | next: |
429 | ip->rx_skbs[n_entry] = new_skb; |
430 | rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); |
431 | rxb->w0 = 0; /* Clear valid flag */ |
432 | n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */ |
433 | |
434 | /* Now go on to the next ring entry. */ |
435 | rx_entry = (rx_entry + 1) & RX_RING_MASK; |
436 | skb = ip->rx_skbs[rx_entry]; |
437 | rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); |
438 | w0 = be32_to_cpu(rxb->w0); |
439 | } |
440 | writel(val: (n_entry << 3) | ERPIR_ARM, addr: &ip->regs->erpir); |
441 | ip->rx_pi = n_entry; |
442 | ip->rx_ci = rx_entry; |
443 | } |
444 | |
445 | static inline void ioc3_tx(struct net_device *dev) |
446 | { |
447 | struct ioc3_private *ip = netdev_priv(dev); |
448 | struct ioc3_ethregs *regs = ip->regs; |
449 | unsigned long packets, bytes; |
450 | int tx_entry, o_entry; |
451 | struct sk_buff *skb; |
452 | u32 etcir; |
453 | |
454 | spin_lock(lock: &ip->ioc3_lock); |
455 | etcir = readl(addr: ®s->etcir); |
456 | |
457 | tx_entry = (etcir >> 7) & TX_RING_MASK; |
458 | o_entry = ip->tx_ci; |
459 | packets = 0; |
460 | bytes = 0; |
461 | |
462 | while (o_entry != tx_entry) { |
463 | packets++; |
464 | skb = ip->tx_skbs[o_entry]; |
465 | bytes += skb->len; |
466 | dev_consume_skb_irq(skb); |
467 | ip->tx_skbs[o_entry] = NULL; |
468 | |
469 | o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */ |
470 | |
471 | etcir = readl(addr: ®s->etcir); /* More pkts sent? */ |
472 | tx_entry = (etcir >> 7) & TX_RING_MASK; |
473 | } |
474 | |
475 | dev->stats.tx_packets += packets; |
476 | dev->stats.tx_bytes += bytes; |
477 | ip->txqlen -= packets; |
478 | |
479 | if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES) |
480 | netif_wake_queue(dev); |
481 | |
482 | ip->tx_ci = o_entry; |
483 | spin_unlock(lock: &ip->ioc3_lock); |
484 | } |
485 | |
486 | /* Deal with fatal IOC3 errors. This condition might be caused by a hard or |
487 | * software problems, so we should try to recover |
488 | * more gracefully if this ever happens. In theory we might be flooded |
489 | * with such error interrupts if something really goes wrong, so we might |
490 | * also consider to take the interface down. |
491 | */ |
492 | static void ioc3_error(struct net_device *dev, u32 eisr) |
493 | { |
494 | struct ioc3_private *ip = netdev_priv(dev); |
495 | |
496 | spin_lock(lock: &ip->ioc3_lock); |
497 | |
498 | if (eisr & EISR_RXOFLO) |
499 | net_err_ratelimited("%s: RX overflow.\n" , dev->name); |
500 | if (eisr & EISR_RXBUFOFLO) |
501 | net_err_ratelimited("%s: RX buffer overflow.\n" , dev->name); |
502 | if (eisr & EISR_RXMEMERR) |
503 | net_err_ratelimited("%s: RX PCI error.\n" , dev->name); |
504 | if (eisr & EISR_RXPARERR) |
505 | net_err_ratelimited("%s: RX SSRAM parity error.\n" , dev->name); |
506 | if (eisr & EISR_TXBUFUFLO) |
507 | net_err_ratelimited("%s: TX buffer underflow.\n" , dev->name); |
508 | if (eisr & EISR_TXMEMERR) |
509 | net_err_ratelimited("%s: TX PCI error.\n" , dev->name); |
510 | |
511 | ioc3_stop(ip); |
512 | ioc3_free_rx_bufs(ip); |
513 | ioc3_clean_tx_ring(ip); |
514 | |
515 | ioc3_init(dev); |
516 | if (ioc3_alloc_rx_bufs(dev)) { |
517 | netdev_err(dev, format: "%s: rx buffer allocation failed\n" , __func__); |
518 | spin_unlock(lock: &ip->ioc3_lock); |
519 | return; |
520 | } |
521 | ioc3_start(ip); |
522 | ioc3_mii_init(ip); |
523 | |
524 | netif_wake_queue(dev); |
525 | |
526 | spin_unlock(lock: &ip->ioc3_lock); |
527 | } |
528 | |
529 | /* The interrupt handler does all of the Rx thread work and cleans up |
530 | * after the Tx thread. |
531 | */ |
532 | static irqreturn_t ioc3_interrupt(int irq, void *dev_id) |
533 | { |
534 | struct ioc3_private *ip = netdev_priv(dev: dev_id); |
535 | struct ioc3_ethregs *regs = ip->regs; |
536 | u32 eisr; |
537 | |
538 | eisr = readl(addr: ®s->eisr); |
539 | writel(val: eisr, addr: ®s->eisr); |
540 | readl(addr: ®s->eisr); /* Flush */ |
541 | |
542 | if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | |
543 | EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) |
544 | ioc3_error(dev: dev_id, eisr); |
545 | if (eisr & EISR_RXTIMERINT) |
546 | ioc3_rx(dev: dev_id); |
547 | if (eisr & EISR_TXEXPLICIT) |
548 | ioc3_tx(dev: dev_id); |
549 | |
550 | return IRQ_HANDLED; |
551 | } |
552 | |
553 | static inline void ioc3_setup_duplex(struct ioc3_private *ip) |
554 | { |
555 | struct ioc3_ethregs *regs = ip->regs; |
556 | |
557 | spin_lock_irq(lock: &ip->ioc3_lock); |
558 | |
559 | if (ip->mii.full_duplex) { |
560 | writel(ETCSR_FD, ®s->etcsr); |
561 | ip->emcr |= EMCR_DUPLEX; |
562 | } else { |
563 | writel(ETCSR_HD, ®s->etcsr); |
564 | ip->emcr &= ~EMCR_DUPLEX; |
565 | } |
566 | writel(val: ip->emcr, addr: ®s->emcr); |
567 | |
568 | spin_unlock_irq(lock: &ip->ioc3_lock); |
569 | } |
570 | |
571 | static void ioc3_timer(struct timer_list *t) |
572 | { |
573 | struct ioc3_private *ip = from_timer(ip, t, ioc3_timer); |
574 | |
575 | /* Print the link status if it has changed */ |
576 | mii_check_media(mii: &ip->mii, ok_to_print: 1, init_media: 0); |
577 | ioc3_setup_duplex(ip); |
578 | |
579 | ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */ |
580 | add_timer(timer: &ip->ioc3_timer); |
581 | } |
582 | |
583 | /* Try to find a PHY. There is no apparent relation between the MII addresses |
584 | * in the SGI documentation and what we find in reality, so we simply probe |
585 | * for the PHY. |
586 | */ |
587 | static int ioc3_mii_init(struct ioc3_private *ip) |
588 | { |
589 | u16 word; |
590 | int i; |
591 | |
592 | for (i = 0; i < 32; i++) { |
593 | word = ioc3_mdio_read(dev: ip->mii.dev, phy: i, MII_PHYSID1); |
594 | |
595 | if (word != 0xffff && word != 0x0000) { |
596 | ip->mii.phy_id = i; |
597 | return 0; |
598 | } |
599 | } |
600 | ip->mii.phy_id = -1; |
601 | return -ENODEV; |
602 | } |
603 | |
604 | static void ioc3_mii_start(struct ioc3_private *ip) |
605 | { |
606 | ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */ |
607 | add_timer(timer: &ip->ioc3_timer); |
608 | } |
609 | |
610 | static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry) |
611 | { |
612 | struct ioc3_etxd *desc; |
613 | u32 cmd, bufcnt, len; |
614 | |
615 | desc = &ip->txr[entry]; |
616 | cmd = be32_to_cpu(desc->cmd); |
617 | bufcnt = be32_to_cpu(desc->bufcnt); |
618 | if (cmd & ETXD_B1V) { |
619 | len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT; |
620 | dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1), |
621 | len, DMA_TO_DEVICE); |
622 | } |
623 | if (cmd & ETXD_B2V) { |
624 | len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT; |
625 | dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2), |
626 | len, DMA_TO_DEVICE); |
627 | } |
628 | } |
629 | |
630 | static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) |
631 | { |
632 | struct sk_buff *skb; |
633 | int i; |
634 | |
635 | for (i = 0; i < TX_RING_ENTRIES; i++) { |
636 | skb = ip->tx_skbs[i]; |
637 | if (skb) { |
638 | ioc3_tx_unmap(ip, entry: i); |
639 | ip->tx_skbs[i] = NULL; |
640 | dev_kfree_skb_any(skb); |
641 | } |
642 | ip->txr[i].cmd = 0; |
643 | } |
644 | ip->tx_pi = 0; |
645 | ip->tx_ci = 0; |
646 | } |
647 | |
648 | static void ioc3_free_rx_bufs(struct ioc3_private *ip) |
649 | { |
650 | int rx_entry, n_entry; |
651 | struct sk_buff *skb; |
652 | |
653 | n_entry = ip->rx_ci; |
654 | rx_entry = ip->rx_pi; |
655 | |
656 | while (n_entry != rx_entry) { |
657 | skb = ip->rx_skbs[n_entry]; |
658 | if (skb) { |
659 | dma_unmap_single(ip->dma_dev, |
660 | be64_to_cpu(ip->rxr[n_entry]), |
661 | RX_BUF_SIZE, DMA_FROM_DEVICE); |
662 | dev_kfree_skb_any(skb); |
663 | } |
664 | n_entry = (n_entry + 1) & RX_RING_MASK; |
665 | } |
666 | } |
667 | |
668 | static int ioc3_alloc_rx_bufs(struct net_device *dev) |
669 | { |
670 | struct ioc3_private *ip = netdev_priv(dev); |
671 | struct ioc3_erxbuf *rxb; |
672 | dma_addr_t d; |
673 | int i; |
674 | |
675 | /* Now the rx buffers. The RX ring may be larger but |
676 | * we only allocate 16 buffers for now. Need to tune |
677 | * this for performance and memory later. |
678 | */ |
679 | for (i = 0; i < RX_BUFFS; i++) { |
680 | if (ioc3_alloc_skb(ip, skb: &ip->rx_skbs[i], rxb: &rxb, rxb_dma: &d)) |
681 | return -ENOMEM; |
682 | |
683 | rxb->w0 = 0; /* Clear valid flag */ |
684 | ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); |
685 | } |
686 | ip->rx_ci = 0; |
687 | ip->rx_pi = RX_BUFFS; |
688 | |
689 | return 0; |
690 | } |
691 | |
692 | static inline void ioc3_ssram_disc(struct ioc3_private *ip) |
693 | { |
694 | struct ioc3_ethregs *regs = ip->regs; |
695 | u32 *ssram0 = &ip->ssram[0x0000]; |
696 | u32 *ssram1 = &ip->ssram[0x4000]; |
697 | u32 pattern = 0x5555; |
698 | |
699 | /* Assume the larger size SSRAM and enable parity checking */ |
700 | writel(readl(®s->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), ®s->emcr); |
701 | readl(addr: ®s->emcr); /* Flush */ |
702 | |
703 | writel(val: pattern, addr: ssram0); |
704 | writel(~pattern & IOC3_SSRAM_DM, ssram1); |
705 | |
706 | if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern || |
707 | (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) { |
708 | /* set ssram size to 64 KB */ |
709 | ip->emcr |= EMCR_RAMPAR; |
710 | writel(readl(®s->emcr) & ~EMCR_BUFSIZ, ®s->emcr); |
711 | } else { |
712 | ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR; |
713 | } |
714 | } |
715 | |
716 | static void ioc3_init(struct net_device *dev) |
717 | { |
718 | struct ioc3_private *ip = netdev_priv(dev); |
719 | struct ioc3_ethregs *regs = ip->regs; |
720 | |
721 | del_timer_sync(timer: &ip->ioc3_timer); /* Kill if running */ |
722 | |
723 | writel(EMCR_RST, ®s->emcr); /* Reset */ |
724 | readl(addr: ®s->emcr); /* Flush WB */ |
725 | udelay(4); /* Give it time ... */ |
726 | writel(val: 0, addr: ®s->emcr); |
727 | readl(addr: ®s->emcr); |
728 | |
729 | /* Misc registers */ |
730 | writel(ERBAR_VAL, addr: ®s->erbar); |
731 | readl(addr: ®s->etcdc); /* Clear on read */ |
732 | writel(val: 15, addr: ®s->ercsr); /* RX low watermark */ |
733 | writel(val: 0, addr: ®s->ertr); /* Interrupt immediately */ |
734 | __ioc3_set_mac_address(dev); |
735 | writel(val: ip->ehar_h, addr: ®s->ehar_h); |
736 | writel(val: ip->ehar_l, addr: ®s->ehar_l); |
737 | writel(val: 42, addr: ®s->ersr); /* XXX should be random */ |
738 | } |
739 | |
740 | static void ioc3_start(struct ioc3_private *ip) |
741 | { |
742 | struct ioc3_ethregs *regs = ip->regs; |
743 | unsigned long ring; |
744 | |
745 | /* Now the rx ring base, consume & produce registers. */ |
746 | ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC); |
747 | writel(val: ring >> 32, addr: ®s->erbr_h); |
748 | writel(val: ring & 0xffffffff, addr: ®s->erbr_l); |
749 | writel(val: ip->rx_ci << 3, addr: ®s->ercir); |
750 | writel((ip->rx_pi << 3) | ERPIR_ARM, ®s->erpir); |
751 | |
752 | ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC); |
753 | |
754 | ip->txqlen = 0; /* nothing queued */ |
755 | |
756 | /* Now the tx ring base, consume & produce registers. */ |
757 | writel(val: ring >> 32, addr: ®s->etbr_h); |
758 | writel(val: ring & 0xffffffff, addr: ®s->etbr_l); |
759 | writel(val: ip->tx_pi << 7, addr: ®s->etpir); |
760 | writel(val: ip->tx_ci << 7, addr: ®s->etcir); |
761 | readl(addr: ®s->etcir); /* Flush */ |
762 | |
763 | ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN | |
764 | EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN; |
765 | writel(val: ip->emcr, addr: ®s->emcr); |
766 | writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | |
767 | EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | |
768 | EISR_TXEXPLICIT | EISR_TXMEMERR, ®s->eier); |
769 | readl(addr: ®s->eier); |
770 | } |
771 | |
772 | static inline void ioc3_stop(struct ioc3_private *ip) |
773 | { |
774 | struct ioc3_ethregs *regs = ip->regs; |
775 | |
776 | writel(val: 0, addr: ®s->emcr); /* Shutup */ |
777 | writel(val: 0, addr: ®s->eier); /* Disable interrupts */ |
778 | readl(addr: ®s->eier); /* Flush */ |
779 | } |
780 | |
781 | static int ioc3_open(struct net_device *dev) |
782 | { |
783 | struct ioc3_private *ip = netdev_priv(dev); |
784 | |
785 | ip->ehar_h = 0; |
786 | ip->ehar_l = 0; |
787 | |
788 | ioc3_init(dev); |
789 | if (ioc3_alloc_rx_bufs(dev)) { |
790 | netdev_err(dev, format: "%s: rx buffer allocation failed\n" , __func__); |
791 | return -ENOMEM; |
792 | } |
793 | ioc3_start(ip); |
794 | ioc3_mii_start(ip); |
795 | |
796 | netif_start_queue(dev); |
797 | return 0; |
798 | } |
799 | |
800 | static int ioc3_close(struct net_device *dev) |
801 | { |
802 | struct ioc3_private *ip = netdev_priv(dev); |
803 | |
804 | del_timer_sync(timer: &ip->ioc3_timer); |
805 | |
806 | netif_stop_queue(dev); |
807 | |
808 | ioc3_stop(ip); |
809 | |
810 | ioc3_free_rx_bufs(ip); |
811 | ioc3_clean_tx_ring(ip); |
812 | |
813 | return 0; |
814 | } |
815 | |
816 | static const struct net_device_ops ioc3_netdev_ops = { |
817 | .ndo_open = ioc3_open, |
818 | .ndo_stop = ioc3_close, |
819 | .ndo_start_xmit = ioc3_start_xmit, |
820 | .ndo_tx_timeout = ioc3_timeout, |
821 | .ndo_get_stats = ioc3_get_stats, |
822 | .ndo_set_rx_mode = ioc3_set_multicast_list, |
823 | .ndo_eth_ioctl = ioc3_ioctl, |
824 | .ndo_validate_addr = eth_validate_addr, |
825 | .ndo_set_mac_address = ioc3_set_mac_address, |
826 | }; |
827 | |
828 | static int ioc3eth_probe(struct platform_device *pdev) |
829 | { |
830 | u32 sw_physid1, sw_physid2, vendor, model, rev; |
831 | struct ioc3_private *ip; |
832 | struct net_device *dev; |
833 | struct resource *regs; |
834 | u8 mac_addr[6]; |
835 | int err; |
836 | |
837 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
838 | if (!regs) { |
839 | dev_err(&pdev->dev, "Invalid resource\n" ); |
840 | return -EINVAL; |
841 | } |
842 | /* get mac addr from one wire prom */ |
843 | if (ioc3eth_get_mac_addr(res: regs, mac_addr)) |
844 | return -EPROBE_DEFER; /* not available yet */ |
845 | |
846 | dev = alloc_etherdev(sizeof(struct ioc3_private)); |
847 | if (!dev) |
848 | return -ENOMEM; |
849 | |
850 | SET_NETDEV_DEV(dev, &pdev->dev); |
851 | |
852 | ip = netdev_priv(dev); |
853 | ip->dma_dev = pdev->dev.parent; |
854 | ip->regs = devm_platform_ioremap_resource(pdev, index: 0); |
855 | if (IS_ERR(ptr: ip->regs)) { |
856 | err = PTR_ERR(ptr: ip->regs); |
857 | goto out_free; |
858 | } |
859 | |
860 | ip->ssram = devm_platform_ioremap_resource(pdev, index: 1); |
861 | if (IS_ERR(ptr: ip->ssram)) { |
862 | err = PTR_ERR(ptr: ip->ssram); |
863 | goto out_free; |
864 | } |
865 | |
866 | dev->irq = platform_get_irq(pdev, 0); |
867 | if (dev->irq < 0) { |
868 | err = dev->irq; |
869 | goto out_free; |
870 | } |
871 | |
872 | if (devm_request_irq(dev: &pdev->dev, irq: dev->irq, handler: ioc3_interrupt, |
873 | IRQF_SHARED, devname: "ioc3-eth" , dev_id: dev)) { |
874 | dev_err(&pdev->dev, "Can't get irq %d\n" , dev->irq); |
875 | err = -ENODEV; |
876 | goto out_free; |
877 | } |
878 | |
879 | spin_lock_init(&ip->ioc3_lock); |
880 | timer_setup(&ip->ioc3_timer, ioc3_timer, 0); |
881 | |
882 | ioc3_stop(ip); |
883 | |
884 | /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */ |
885 | ip->rxr = dma_alloc_coherent(dev: ip->dma_dev, RX_RING_SIZE, dma_handle: &ip->rxr_dma, |
886 | GFP_KERNEL); |
887 | if (!ip->rxr) { |
888 | pr_err("ioc3-eth: rx ring allocation failed\n" ); |
889 | err = -ENOMEM; |
890 | goto out_stop; |
891 | } |
892 | |
893 | /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */ |
894 | ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, |
895 | &ip->txr_dma, GFP_KERNEL); |
896 | if (!ip->tx_ring) { |
897 | pr_err("ioc3-eth: tx ring allocation failed\n" ); |
898 | err = -ENOMEM; |
899 | goto out_stop; |
900 | } |
901 | /* Align TX ring */ |
902 | ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K); |
903 | ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K); |
904 | |
905 | ioc3_init(dev); |
906 | |
907 | ip->mii.phy_id_mask = 0x1f; |
908 | ip->mii.reg_num_mask = 0x1f; |
909 | ip->mii.dev = dev; |
910 | ip->mii.mdio_read = ioc3_mdio_read; |
911 | ip->mii.mdio_write = ioc3_mdio_write; |
912 | |
913 | ioc3_mii_init(ip); |
914 | |
915 | if (ip->mii.phy_id == -1) { |
916 | netdev_err(dev, format: "Didn't find a PHY, goodbye.\n" ); |
917 | err = -ENODEV; |
918 | goto out_stop; |
919 | } |
920 | |
921 | ioc3_mii_start(ip); |
922 | ioc3_ssram_disc(ip); |
923 | eth_hw_addr_set(dev, addr: mac_addr); |
924 | |
925 | /* The IOC3-specific entries in the device structure. */ |
926 | dev->watchdog_timeo = 5 * HZ; |
927 | dev->netdev_ops = &ioc3_netdev_ops; |
928 | dev->ethtool_ops = &ioc3_ethtool_ops; |
929 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; |
930 | dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA; |
931 | |
932 | sw_physid1 = ioc3_mdio_read(dev, phy: ip->mii.phy_id, MII_PHYSID1); |
933 | sw_physid2 = ioc3_mdio_read(dev, phy: ip->mii.phy_id, MII_PHYSID2); |
934 | |
935 | err = register_netdev(dev); |
936 | if (err) |
937 | goto out_stop; |
938 | |
939 | mii_check_media(mii: &ip->mii, ok_to_print: 1, init_media: 1); |
940 | ioc3_setup_duplex(ip); |
941 | |
942 | vendor = (sw_physid1 << 12) | (sw_physid2 >> 4); |
943 | model = (sw_physid2 >> 4) & 0x3f; |
944 | rev = sw_physid2 & 0xf; |
945 | netdev_info(dev, format: "Using PHY %d, vendor 0x%x, model %d, rev %d.\n" , |
946 | ip->mii.phy_id, vendor, model, rev); |
947 | netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n" , |
948 | ip->emcr & EMCR_BUFSIZ ? 128 : 64); |
949 | |
950 | return 0; |
951 | |
952 | out_stop: |
953 | del_timer_sync(timer: &ip->ioc3_timer); |
954 | if (ip->rxr) |
955 | dma_free_coherent(dev: ip->dma_dev, RX_RING_SIZE, cpu_addr: ip->rxr, |
956 | dma_handle: ip->rxr_dma); |
957 | if (ip->tx_ring) |
958 | dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, |
959 | ip->txr_dma); |
960 | out_free: |
961 | free_netdev(dev); |
962 | return err; |
963 | } |
964 | |
965 | static void ioc3eth_remove(struct platform_device *pdev) |
966 | { |
967 | struct net_device *dev = platform_get_drvdata(pdev); |
968 | struct ioc3_private *ip = netdev_priv(dev); |
969 | |
970 | dma_free_coherent(dev: ip->dma_dev, RX_RING_SIZE, cpu_addr: ip->rxr, dma_handle: ip->rxr_dma); |
971 | dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); |
972 | |
973 | unregister_netdev(dev); |
974 | del_timer_sync(timer: &ip->ioc3_timer); |
975 | free_netdev(dev); |
976 | } |
977 | |
978 | |
979 | static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
980 | { |
981 | struct ioc3_private *ip = netdev_priv(dev); |
982 | struct ioc3_etxd *desc; |
983 | unsigned long data; |
984 | unsigned int len; |
985 | int produce; |
986 | u32 w0 = 0; |
987 | |
988 | /* IOC3 has a fairly simple minded checksumming hardware which simply |
989 | * adds up the 1's complement checksum for the entire packet and |
990 | * inserts it at an offset which can be specified in the descriptor |
991 | * into the transmit packet. This means we have to compensate for the |
992 | * MAC header which should not be summed and the TCP/UDP pseudo headers |
993 | * manually. |
994 | */ |
995 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
996 | const struct iphdr *ih = ip_hdr(skb); |
997 | const int proto = ntohs(ih->protocol); |
998 | unsigned int csoff; |
999 | u32 csum, ehsum; |
1000 | u16 *eh; |
1001 | |
1002 | /* The MAC header. skb->mac seem the logic approach |
1003 | * to find the MAC header - except it's a NULL pointer ... |
1004 | */ |
1005 | eh = (u16 *)skb->data; |
1006 | |
1007 | /* Sum up dest addr, src addr and protocol */ |
1008 | ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6]; |
1009 | |
1010 | /* Skip IP header; it's sum is always zero and was |
1011 | * already filled in by ip_output.c |
1012 | */ |
1013 | csum = csum_tcpudp_nofold(saddr: ih->saddr, daddr: ih->daddr, |
1014 | len: ih->tot_len - (ih->ihl << 2), |
1015 | proto, sum: csum_fold(sum: ehsum)); |
1016 | |
1017 | csum = (csum & 0xffff) + (csum >> 16); /* Fold again */ |
1018 | csum = (csum & 0xffff) + (csum >> 16); |
1019 | |
1020 | csoff = ETH_HLEN + (ih->ihl << 2); |
1021 | if (proto == IPPROTO_UDP) { |
1022 | csoff += offsetof(struct udphdr, check); |
1023 | udp_hdr(skb)->check = csum; |
1024 | } |
1025 | if (proto == IPPROTO_TCP) { |
1026 | csoff += offsetof(struct tcphdr, check); |
1027 | tcp_hdr(skb)->check = csum; |
1028 | } |
1029 | |
1030 | w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); |
1031 | } |
1032 | |
1033 | spin_lock_irq(lock: &ip->ioc3_lock); |
1034 | |
1035 | data = (unsigned long)skb->data; |
1036 | len = skb->len; |
1037 | |
1038 | produce = ip->tx_pi; |
1039 | desc = &ip->txr[produce]; |
1040 | |
1041 | if (len <= 104) { |
1042 | /* Short packet, let's copy it directly into the ring. */ |
1043 | skb_copy_from_linear_data(skb, to: desc->data, len: skb->len); |
1044 | if (len < ETH_ZLEN) { |
1045 | /* Very short packet, pad with zeros at the end. */ |
1046 | memset(desc->data + len, 0, ETH_ZLEN - len); |
1047 | len = ETH_ZLEN; |
1048 | } |
1049 | desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0); |
1050 | desc->bufcnt = cpu_to_be32(len); |
1051 | } else if ((data ^ (data + len - 1)) & 0x4000) { |
1052 | unsigned long b2 = (data | 0x3fffUL) + 1UL; |
1053 | unsigned long s1 = b2 - data; |
1054 | unsigned long s2 = data + len - b2; |
1055 | dma_addr_t d1, d2; |
1056 | |
1057 | desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | |
1058 | ETXD_B1V | ETXD_B2V | w0); |
1059 | desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) | |
1060 | (s2 << ETXD_B2CNT_SHIFT)); |
1061 | d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE); |
1062 | if (dma_mapping_error(dev: ip->dma_dev, dma_addr: d1)) |
1063 | goto drop_packet; |
1064 | d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE); |
1065 | if (dma_mapping_error(dev: ip->dma_dev, dma_addr: d2)) { |
1066 | dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE); |
1067 | goto drop_packet; |
1068 | } |
1069 | desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF)); |
1070 | desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF)); |
1071 | } else { |
1072 | dma_addr_t d; |
1073 | |
1074 | /* Normal sized packet that doesn't cross a page boundary. */ |
1075 | desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0); |
1076 | desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT); |
1077 | d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE); |
1078 | if (dma_mapping_error(dev: ip->dma_dev, dma_addr: d)) |
1079 | goto drop_packet; |
1080 | desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF)); |
1081 | } |
1082 | |
1083 | mb(); /* make sure all descriptor changes are visible */ |
1084 | |
1085 | ip->tx_skbs[produce] = skb; /* Remember skb */ |
1086 | produce = (produce + 1) & TX_RING_MASK; |
1087 | ip->tx_pi = produce; |
1088 | writel(val: produce << 7, addr: &ip->regs->etpir); /* Fire ... */ |
1089 | |
1090 | ip->txqlen++; |
1091 | |
1092 | if (ip->txqlen >= (TX_RING_ENTRIES - 1)) |
1093 | netif_stop_queue(dev); |
1094 | |
1095 | spin_unlock_irq(lock: &ip->ioc3_lock); |
1096 | |
1097 | return NETDEV_TX_OK; |
1098 | |
1099 | drop_packet: |
1100 | dev_kfree_skb_any(skb); |
1101 | dev->stats.tx_dropped++; |
1102 | |
1103 | spin_unlock_irq(lock: &ip->ioc3_lock); |
1104 | |
1105 | return NETDEV_TX_OK; |
1106 | } |
1107 | |
1108 | static void ioc3_timeout(struct net_device *dev, unsigned int txqueue) |
1109 | { |
1110 | struct ioc3_private *ip = netdev_priv(dev); |
1111 | |
1112 | netdev_err(dev, format: "transmit timed out, resetting\n" ); |
1113 | |
1114 | spin_lock_irq(lock: &ip->ioc3_lock); |
1115 | |
1116 | ioc3_stop(ip); |
1117 | ioc3_free_rx_bufs(ip); |
1118 | ioc3_clean_tx_ring(ip); |
1119 | |
1120 | ioc3_init(dev); |
1121 | if (ioc3_alloc_rx_bufs(dev)) { |
1122 | netdev_err(dev, format: "%s: rx buffer allocation failed\n" , __func__); |
1123 | spin_unlock_irq(lock: &ip->ioc3_lock); |
1124 | return; |
1125 | } |
1126 | ioc3_start(ip); |
1127 | ioc3_mii_init(ip); |
1128 | ioc3_mii_start(ip); |
1129 | |
1130 | spin_unlock_irq(lock: &ip->ioc3_lock); |
1131 | |
1132 | netif_wake_queue(dev); |
1133 | } |
1134 | |
1135 | /* Given a multicast ethernet address, this routine calculates the |
1136 | * address's bit index in the logical address filter mask |
1137 | */ |
1138 | static inline unsigned int ioc3_hash(const unsigned char *addr) |
1139 | { |
1140 | unsigned int temp = 0; |
1141 | int bits; |
1142 | u32 crc; |
1143 | |
1144 | crc = ether_crc_le(ETH_ALEN, addr); |
1145 | |
1146 | crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */ |
1147 | for (bits = 6; --bits >= 0; ) { |
1148 | temp <<= 1; |
1149 | temp |= (crc & 0x1); |
1150 | crc >>= 1; |
1151 | } |
1152 | |
1153 | return temp; |
1154 | } |
1155 | |
1156 | static void ioc3_get_drvinfo(struct net_device *dev, |
1157 | struct ethtool_drvinfo *info) |
1158 | { |
1159 | strscpy(p: info->driver, IOC3_NAME, size: sizeof(info->driver)); |
1160 | strscpy(p: info->version, IOC3_VERSION, size: sizeof(info->version)); |
1161 | strscpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)), |
1162 | sizeof(info->bus_info)); |
1163 | } |
1164 | |
1165 | static int ioc3_get_link_ksettings(struct net_device *dev, |
1166 | struct ethtool_link_ksettings *cmd) |
1167 | { |
1168 | struct ioc3_private *ip = netdev_priv(dev); |
1169 | |
1170 | spin_lock_irq(lock: &ip->ioc3_lock); |
1171 | mii_ethtool_get_link_ksettings(mii: &ip->mii, cmd); |
1172 | spin_unlock_irq(lock: &ip->ioc3_lock); |
1173 | |
1174 | return 0; |
1175 | } |
1176 | |
1177 | static int ioc3_set_link_ksettings(struct net_device *dev, |
1178 | const struct ethtool_link_ksettings *cmd) |
1179 | { |
1180 | struct ioc3_private *ip = netdev_priv(dev); |
1181 | int rc; |
1182 | |
1183 | spin_lock_irq(lock: &ip->ioc3_lock); |
1184 | rc = mii_ethtool_set_link_ksettings(mii: &ip->mii, cmd); |
1185 | spin_unlock_irq(lock: &ip->ioc3_lock); |
1186 | |
1187 | return rc; |
1188 | } |
1189 | |
1190 | static int ioc3_nway_reset(struct net_device *dev) |
1191 | { |
1192 | struct ioc3_private *ip = netdev_priv(dev); |
1193 | int rc; |
1194 | |
1195 | spin_lock_irq(lock: &ip->ioc3_lock); |
1196 | rc = mii_nway_restart(mii: &ip->mii); |
1197 | spin_unlock_irq(lock: &ip->ioc3_lock); |
1198 | |
1199 | return rc; |
1200 | } |
1201 | |
1202 | static u32 ioc3_get_link(struct net_device *dev) |
1203 | { |
1204 | struct ioc3_private *ip = netdev_priv(dev); |
1205 | int rc; |
1206 | |
1207 | spin_lock_irq(lock: &ip->ioc3_lock); |
1208 | rc = mii_link_ok(mii: &ip->mii); |
1209 | spin_unlock_irq(lock: &ip->ioc3_lock); |
1210 | |
1211 | return rc; |
1212 | } |
1213 | |
1214 | static const struct ethtool_ops ioc3_ethtool_ops = { |
1215 | .get_drvinfo = ioc3_get_drvinfo, |
1216 | .nway_reset = ioc3_nway_reset, |
1217 | .get_link = ioc3_get_link, |
1218 | .get_link_ksettings = ioc3_get_link_ksettings, |
1219 | .set_link_ksettings = ioc3_set_link_ksettings, |
1220 | }; |
1221 | |
1222 | static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1223 | { |
1224 | struct ioc3_private *ip = netdev_priv(dev); |
1225 | int rc; |
1226 | |
1227 | spin_lock_irq(lock: &ip->ioc3_lock); |
1228 | rc = generic_mii_ioctl(mii_if: &ip->mii, mii_data: if_mii(rq), cmd, NULL); |
1229 | spin_unlock_irq(lock: &ip->ioc3_lock); |
1230 | |
1231 | return rc; |
1232 | } |
1233 | |
1234 | static void ioc3_set_multicast_list(struct net_device *dev) |
1235 | { |
1236 | struct ioc3_private *ip = netdev_priv(dev); |
1237 | struct ioc3_ethregs *regs = ip->regs; |
1238 | struct netdev_hw_addr *ha; |
1239 | u64 ehar = 0; |
1240 | |
1241 | spin_lock_irq(lock: &ip->ioc3_lock); |
1242 | |
1243 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ |
1244 | ip->emcr |= EMCR_PROMISC; |
1245 | writel(val: ip->emcr, addr: ®s->emcr); |
1246 | readl(addr: ®s->emcr); |
1247 | } else { |
1248 | ip->emcr &= ~EMCR_PROMISC; |
1249 | writel(val: ip->emcr, addr: ®s->emcr); /* Clear promiscuous. */ |
1250 | readl(addr: ®s->emcr); |
1251 | |
1252 | if ((dev->flags & IFF_ALLMULTI) || |
1253 | (netdev_mc_count(dev) > 64)) { |
1254 | /* Too many for hashing to make sense or we want all |
1255 | * multicast packets anyway, so skip computing all the |
1256 | * hashes and just accept all packets. |
1257 | */ |
1258 | ip->ehar_h = 0xffffffff; |
1259 | ip->ehar_l = 0xffffffff; |
1260 | } else { |
1261 | netdev_for_each_mc_addr(ha, dev) { |
1262 | ehar |= (1UL << ioc3_hash(addr: ha->addr)); |
1263 | } |
1264 | ip->ehar_h = ehar >> 32; |
1265 | ip->ehar_l = ehar & 0xffffffff; |
1266 | } |
1267 | writel(val: ip->ehar_h, addr: ®s->ehar_h); |
1268 | writel(val: ip->ehar_l, addr: ®s->ehar_l); |
1269 | } |
1270 | |
1271 | spin_unlock_irq(lock: &ip->ioc3_lock); |
1272 | } |
1273 | |
1274 | static struct platform_driver ioc3eth_driver = { |
1275 | .probe = ioc3eth_probe, |
1276 | .remove_new = ioc3eth_remove, |
1277 | .driver = { |
1278 | .name = "ioc3-eth" , |
1279 | } |
1280 | }; |
1281 | |
1282 | module_platform_driver(ioc3eth_driver); |
1283 | |
1284 | MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>" ); |
1285 | MODULE_DESCRIPTION("SGI IOC3 Ethernet driver" ); |
1286 | MODULE_LICENSE("GPL" ); |
1287 | |