1 | // SPDX-License-Identifier: GPL-1.0+ |
2 | /* 82596.c: A generic 82596 ethernet driver for linux. */ |
3 | /* |
4 | Based on Apricot.c |
5 | Written 1994 by Mark Evans. |
6 | This driver is for the Apricot 82596 bus-master interface |
7 | |
8 | Modularised 12/94 Mark Evans |
9 | |
10 | |
11 | Modified to support the 82596 ethernet chips on 680x0 VME boards. |
12 | by Richard Hirst <richard@sleepie.demon.co.uk> |
13 | Renamed to be 82596.c |
14 | |
15 | 980825: Changed to receive directly in to sk_buffs which are |
16 | allocated at open() time. Eliminates copy on incoming frames |
17 | (small ones are still copied). Shared data now held in a |
18 | non-cached page, so we can run on 68060 in copyback mode. |
19 | |
20 | TBD: |
21 | * look at deferring rx frames rather than discarding (as per tulip) |
22 | * handle tx ring full as per tulip |
23 | * performance test to tune rx_copybreak |
24 | |
25 | Most of my modifications relate to the braindead big-endian |
26 | implementation by Intel. When the i596 is operating in |
27 | 'big-endian' mode, it thinks a 32 bit value of 0x12345678 |
28 | should be stored as 0x56781234. This is a real pain, when |
29 | you have linked lists which are shared by the 680x0 and the |
30 | i596. |
31 | |
32 | Driver skeleton |
33 | Written 1993 by Donald Becker. |
34 | Copyright 1993 United States Government as represented by the Director, |
35 | National Security Agency. |
36 | |
37 | The author may be reached as becker@scyld.com, or C/O |
38 | Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 |
39 | |
40 | */ |
41 | |
42 | #include <linux/module.h> |
43 | #include <linux/kernel.h> |
44 | #include <linux/string.h> |
45 | #include <linux/errno.h> |
46 | #include <linux/ioport.h> |
47 | #include <linux/interrupt.h> |
48 | #include <linux/delay.h> |
49 | #include <linux/netdevice.h> |
50 | #include <linux/etherdevice.h> |
51 | #include <linux/skbuff.h> |
52 | #include <linux/init.h> |
53 | #include <linux/bitops.h> |
54 | #include <linux/gfp.h> |
55 | #include <linux/pgtable.h> |
56 | |
57 | #include <asm/io.h> |
58 | #include <asm/dma.h> |
59 | #include <asm/cacheflush.h> |
60 | |
61 | static char version[] __initdata = |
62 | "82596.c $Revision: 1.5 $\n" ; |
63 | |
64 | #define DRV_NAME "82596" |
65 | |
66 | /* DEBUG flags |
67 | */ |
68 | |
69 | #define DEB_INIT 0x0001 |
70 | #define DEB_PROBE 0x0002 |
71 | #define DEB_SERIOUS 0x0004 |
72 | #define DEB_ERRORS 0x0008 |
73 | #define DEB_MULTI 0x0010 |
74 | #define DEB_TDR 0x0020 |
75 | #define DEB_OPEN 0x0040 |
76 | #define DEB_RESET 0x0080 |
77 | #define DEB_ADDCMD 0x0100 |
78 | #define DEB_STATUS 0x0200 |
79 | #define DEB_STARTTX 0x0400 |
80 | #define DEB_RXADDR 0x0800 |
81 | #define DEB_TXADDR 0x1000 |
82 | #define DEB_RXFRAME 0x2000 |
83 | #define DEB_INTS 0x4000 |
84 | #define DEB_STRUCT 0x8000 |
85 | #define DEB_ANY 0xffff |
86 | |
87 | |
88 | #define DEB(x,y) if (i596_debug & (x)) y |
89 | |
90 | |
91 | #if IS_ENABLED(CONFIG_MVME16x_NET) |
92 | #define ENABLE_MVME16x_NET |
93 | #endif |
94 | #if IS_ENABLED(CONFIG_BVME6000_NET) |
95 | #define ENABLE_BVME6000_NET |
96 | #endif |
97 | |
98 | #ifdef ENABLE_MVME16x_NET |
99 | #include <asm/mvme16xhw.h> |
100 | #endif |
101 | #ifdef ENABLE_BVME6000_NET |
102 | #include <asm/bvme6000hw.h> |
103 | #endif |
104 | |
105 | /* |
106 | * Define various macros for Channel Attention, word swapping etc., dependent |
107 | * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel. |
108 | */ |
109 | |
110 | #ifdef __mc68000__ |
111 | #define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) |
112 | #define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) |
113 | #define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16))) |
114 | #define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) |
115 | #define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) |
116 | #define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) |
117 | #define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) |
118 | #define ISCP_BUSY 0x00010000 |
119 | #else |
120 | #error 82596.c: unknown architecture |
121 | #endif |
122 | |
123 | /* |
124 | * These were the intel versions, left here for reference. There |
125 | * are currently no x86 users of this legacy i82596 chip. |
126 | */ |
127 | #if 0 |
128 | #define WSWAPrfd(x) ((struct i596_rfd *)((long)x)) |
129 | #define WSWAPrbd(x) ((struct i596_rbd *)((long)x)) |
130 | #define WSWAPiscp(x) ((struct i596_iscp *)((long)x)) |
131 | #define WSWAPscb(x) ((struct i596_scb *)((long)x)) |
132 | #define WSWAPcmd(x) ((struct i596_cmd *)((long)x)) |
133 | #define WSWAPtbd(x) ((struct i596_tbd *)((long)x)) |
134 | #define WSWAPchar(x) ((char *)((long)x)) |
135 | #define ISCP_BUSY 0x0001 |
136 | #endif |
137 | |
138 | /* |
139 | * The MPU_PORT command allows direct access to the 82596. With PORT access |
140 | * the following commands are available (p5-18). The 32-bit port command |
141 | * must be word-swapped with the most significant word written first. |
142 | * This only applies to VME boards. |
143 | */ |
144 | #define PORT_RESET 0x00 /* reset 82596 */ |
145 | #define PORT_SELFTEST 0x01 /* selftest */ |
146 | #define PORT_ALTSCP 0x02 /* alternate SCB address */ |
147 | #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ |
148 | |
149 | static int i596_debug = (DEB_SERIOUS|DEB_PROBE); |
150 | |
151 | MODULE_AUTHOR("Richard Hirst" ); |
152 | MODULE_DESCRIPTION("i82596 driver" ); |
153 | MODULE_LICENSE("GPL" ); |
154 | |
155 | module_param(i596_debug, int, 0); |
156 | MODULE_PARM_DESC(i596_debug, "i82596 debug mask" ); |
157 | |
158 | |
159 | /* Copy frames shorter than rx_copybreak, otherwise pass on up in |
160 | * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). |
161 | */ |
162 | static int rx_copybreak = 100; |
163 | |
164 | #define PKT_BUF_SZ 1536 |
165 | #define MAX_MC_CNT 64 |
166 | |
167 | #define I596_TOTAL_SIZE 17 |
168 | |
169 | #define I596_NULL ((void *)0xffffffff) |
170 | |
171 | #define CMD_EOL 0x8000 /* The last command of the list, stop. */ |
172 | #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ |
173 | #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ |
174 | |
175 | #define CMD_FLEX 0x0008 /* Enable flexible memory model */ |
176 | |
177 | enum commands { |
178 | CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, |
179 | CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 |
180 | }; |
181 | |
182 | #define STAT_C 0x8000 /* Set to 0 after execution */ |
183 | #define STAT_B 0x4000 /* Command being executed */ |
184 | #define STAT_OK 0x2000 /* Command executed ok */ |
185 | #define STAT_A 0x1000 /* Command aborted */ |
186 | |
187 | #define CUC_START 0x0100 |
188 | #define CUC_RESUME 0x0200 |
189 | #define CUC_SUSPEND 0x0300 |
190 | #define CUC_ABORT 0x0400 |
191 | #define RX_START 0x0010 |
192 | #define RX_RESUME 0x0020 |
193 | #define RX_SUSPEND 0x0030 |
194 | #define RX_ABORT 0x0040 |
195 | |
196 | #define TX_TIMEOUT (HZ/20) |
197 | |
198 | |
199 | struct i596_reg { |
200 | unsigned short porthi; |
201 | unsigned short portlo; |
202 | unsigned long ca; |
203 | }; |
204 | |
205 | #define EOF 0x8000 |
206 | #define SIZE_MASK 0x3fff |
207 | |
208 | struct i596_tbd { |
209 | unsigned short size; |
210 | unsigned short pad; |
211 | struct i596_tbd *next; |
212 | char *data; |
213 | }; |
214 | |
215 | /* The command structure has two 'next' pointers; v_next is the address of |
216 | * the next command as seen by the CPU, b_next is the address of the next |
217 | * command as seen by the 82596. The b_next pointer, as used by the 82596 |
218 | * always references the status field of the next command, rather than the |
219 | * v_next field, because the 82596 is unaware of v_next. It may seem more |
220 | * logical to put v_next at the end of the structure, but we cannot do that |
221 | * because the 82596 expects other fields to be there, depending on command |
222 | * type. |
223 | */ |
224 | |
225 | struct i596_cmd { |
226 | struct i596_cmd *v_next; /* Address from CPUs viewpoint */ |
227 | unsigned short status; |
228 | unsigned short command; |
229 | struct i596_cmd *b_next; /* Address from i596 viewpoint */ |
230 | }; |
231 | |
232 | struct tx_cmd { |
233 | struct i596_cmd cmd; |
234 | struct i596_tbd *tbd; |
235 | unsigned short size; |
236 | unsigned short pad; |
237 | struct sk_buff *skb; /* So we can free it after tx */ |
238 | }; |
239 | |
240 | struct tdr_cmd { |
241 | struct i596_cmd cmd; |
242 | unsigned short status; |
243 | unsigned short pad; |
244 | }; |
245 | |
246 | struct mc_cmd { |
247 | struct i596_cmd cmd; |
248 | short mc_cnt; |
249 | char mc_addrs[MAX_MC_CNT*6]; |
250 | }; |
251 | |
252 | struct sa_cmd { |
253 | struct i596_cmd cmd; |
254 | char eth_addr[8]; |
255 | }; |
256 | |
257 | struct cf_cmd { |
258 | struct i596_cmd cmd; |
259 | char i596_config[16]; |
260 | }; |
261 | |
262 | struct i596_rfd { |
263 | unsigned short stat; |
264 | unsigned short cmd; |
265 | struct i596_rfd *b_next; /* Address from i596 viewpoint */ |
266 | struct i596_rbd *rbd; |
267 | unsigned short count; |
268 | unsigned short size; |
269 | struct i596_rfd *v_next; /* Address from CPUs viewpoint */ |
270 | struct i596_rfd *v_prev; |
271 | }; |
272 | |
273 | struct i596_rbd { |
274 | unsigned short count; |
275 | unsigned short zero1; |
276 | struct i596_rbd *b_next; |
277 | unsigned char *b_data; /* Address from i596 viewpoint */ |
278 | unsigned short size; |
279 | unsigned short zero2; |
280 | struct sk_buff *skb; |
281 | struct i596_rbd *v_next; |
282 | struct i596_rbd *b_addr; /* This rbd addr from i596 view */ |
283 | unsigned char *v_data; /* Address from CPUs viewpoint */ |
284 | }; |
285 | |
286 | #define TX_RING_SIZE 64 |
287 | #define RX_RING_SIZE 16 |
288 | |
289 | struct i596_scb { |
290 | unsigned short status; |
291 | unsigned short command; |
292 | struct i596_cmd *cmd; |
293 | struct i596_rfd *rfd; |
294 | unsigned long crc_err; |
295 | unsigned long align_err; |
296 | unsigned long resource_err; |
297 | unsigned long over_err; |
298 | unsigned long rcvdt_err; |
299 | unsigned long short_err; |
300 | unsigned short t_on; |
301 | unsigned short t_off; |
302 | }; |
303 | |
304 | struct i596_iscp { |
305 | unsigned long stat; |
306 | struct i596_scb *scb; |
307 | }; |
308 | |
309 | struct i596_scp { |
310 | unsigned long sysbus; |
311 | unsigned long pad; |
312 | struct i596_iscp *iscp; |
313 | }; |
314 | |
315 | struct i596_private { |
316 | volatile struct i596_scp scp; |
317 | volatile struct i596_iscp iscp; |
318 | volatile struct i596_scb scb; |
319 | struct sa_cmd sa_cmd; |
320 | struct cf_cmd cf_cmd; |
321 | struct tdr_cmd tdr_cmd; |
322 | struct mc_cmd mc_cmd; |
323 | unsigned long stat; |
324 | int last_restart __attribute__((aligned(4))); |
325 | struct i596_rfd *rfd_head; |
326 | struct i596_rbd *rbd_head; |
327 | struct i596_cmd *cmd_tail; |
328 | struct i596_cmd *cmd_head; |
329 | int cmd_backlog; |
330 | unsigned long last_cmd; |
331 | struct i596_rfd rfds[RX_RING_SIZE]; |
332 | struct i596_rbd rbds[RX_RING_SIZE]; |
333 | struct tx_cmd tx_cmds[TX_RING_SIZE]; |
334 | struct i596_tbd tbds[TX_RING_SIZE]; |
335 | int next_tx_cmd; |
336 | spinlock_t lock; |
337 | }; |
338 | |
339 | static char init_setup[] = |
340 | { |
341 | 0x8E, /* length, prefetch on */ |
342 | 0xC8, /* fifo to 8, monitor off */ |
343 | #ifdef CONFIG_VME |
344 | 0xc0, /* don't save bad frames */ |
345 | #else |
346 | 0x80, /* don't save bad frames */ |
347 | #endif |
348 | 0x2E, /* No source address insertion, 8 byte preamble */ |
349 | 0x00, /* priority and backoff defaults */ |
350 | 0x60, /* interframe spacing */ |
351 | 0x00, /* slot time LSB */ |
352 | 0xf2, /* slot time and retries */ |
353 | 0x00, /* promiscuous mode */ |
354 | 0x00, /* collision detect */ |
355 | 0x40, /* minimum frame length */ |
356 | 0xff, |
357 | 0x00, |
358 | 0x7f /* *multi IA */ }; |
359 | |
360 | static int i596_open(struct net_device *dev); |
361 | static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); |
362 | static irqreturn_t i596_interrupt(int irq, void *dev_id); |
363 | static int i596_close(struct net_device *dev); |
364 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); |
365 | static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue); |
366 | static void print_eth(unsigned char *buf, char *str); |
367 | static void set_multicast_list(struct net_device *dev); |
368 | |
369 | static int rx_ring_size = RX_RING_SIZE; |
370 | static int ticks_limit = 25; |
371 | static int max_cmd_backlog = TX_RING_SIZE-1; |
372 | |
373 | |
374 | static inline void CA(struct net_device *dev) |
375 | { |
376 | #ifdef ENABLE_MVME16x_NET |
377 | if (MACH_IS_MVME16x) { |
378 | ((struct i596_reg *) dev->base_addr)->ca = 1; |
379 | } |
380 | #endif |
381 | #ifdef ENABLE_BVME6000_NET |
382 | if (MACH_IS_BVME6000) { |
383 | volatile u32 i; |
384 | |
385 | i = *(volatile u32 *) (dev->base_addr); |
386 | } |
387 | #endif |
388 | } |
389 | |
390 | |
391 | static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x) |
392 | { |
393 | #ifdef ENABLE_MVME16x_NET |
394 | if (MACH_IS_MVME16x) { |
395 | struct i596_reg *p = (struct i596_reg *) (dev->base_addr); |
396 | p->porthi = ((c) | (u32) (x)) & 0xffff; |
397 | p->portlo = ((c) | (u32) (x)) >> 16; |
398 | } |
399 | #endif |
400 | #ifdef ENABLE_BVME6000_NET |
401 | if (MACH_IS_BVME6000) { |
402 | u32 v = (u32) (c) | (u32) (x); |
403 | v = ((u32) (v) << 16) | ((u32) (v) >> 16); |
404 | *(volatile u32 *) dev->base_addr = v; |
405 | udelay(1); |
406 | *(volatile u32 *) dev->base_addr = v; |
407 | } |
408 | #endif |
409 | } |
410 | |
411 | |
412 | static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) |
413 | { |
414 | while (--delcnt && lp->iscp.stat) |
415 | udelay(10); |
416 | if (!delcnt) { |
417 | printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n" , |
418 | dev->name, str, lp->scb.status, lp->scb.command); |
419 | return -1; |
420 | } |
421 | else |
422 | return 0; |
423 | } |
424 | |
425 | |
426 | static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) |
427 | { |
428 | while (--delcnt && lp->scb.command) |
429 | udelay(10); |
430 | if (!delcnt) { |
431 | printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n" , |
432 | dev->name, str, lp->scb.status, lp->scb.command); |
433 | return -1; |
434 | } |
435 | else |
436 | return 0; |
437 | } |
438 | |
439 | |
440 | static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str) |
441 | { |
442 | volatile struct i596_cmd *c = cmd; |
443 | |
444 | while (--delcnt && c->command) |
445 | udelay(10); |
446 | if (!delcnt) { |
447 | printk(KERN_ERR "%s: %s.\n" , dev->name, str); |
448 | return -1; |
449 | } |
450 | else |
451 | return 0; |
452 | } |
453 | |
454 | |
455 | static void i596_display_data(struct net_device *dev) |
456 | { |
457 | struct i596_private *lp = dev->ml_priv; |
458 | struct i596_cmd *cmd; |
459 | struct i596_rfd *rfd; |
460 | struct i596_rbd *rbd; |
461 | |
462 | printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n" , |
463 | &lp->scp, lp->scp.sysbus, lp->scp.iscp); |
464 | printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n" , |
465 | &lp->iscp, lp->iscp.stat, lp->iscp.scb); |
466 | printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x," |
467 | " .cmd = %p, .rfd = %p\n" , |
468 | &lp->scb, lp->scb.status, lp->scb.command, |
469 | lp->scb.cmd, lp->scb.rfd); |
470 | printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx," |
471 | " over %lx, rcvdt %lx, short %lx\n" , |
472 | lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err, |
473 | lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err); |
474 | cmd = lp->cmd_head; |
475 | while (cmd != I596_NULL) { |
476 | printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n" , |
477 | cmd, cmd->status, cmd->command, cmd->b_next); |
478 | cmd = cmd->v_next; |
479 | } |
480 | rfd = lp->rfd_head; |
481 | printk(KERN_ERR "rfd_head = %p\n" , rfd); |
482 | do { |
483 | printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p," |
484 | " count %04x\n" , |
485 | rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd, |
486 | rfd->count); |
487 | rfd = rfd->v_next; |
488 | } while (rfd != lp->rfd_head); |
489 | rbd = lp->rbd_head; |
490 | printk(KERN_ERR "rbd_head = %p\n" , rbd); |
491 | do { |
492 | printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n" , |
493 | rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size); |
494 | rbd = rbd->v_next; |
495 | } while (rbd != lp->rbd_head); |
496 | } |
497 | |
498 | |
499 | #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) |
500 | static irqreturn_t i596_error(int irq, void *dev_id) |
501 | { |
502 | struct net_device *dev = dev_id; |
503 | #ifdef ENABLE_MVME16x_NET |
504 | if (MACH_IS_MVME16x) { |
505 | volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; |
506 | |
507 | pcc2[0x28] = 1; |
508 | pcc2[0x2b] = 0x1d; |
509 | } |
510 | #endif |
511 | #ifdef ENABLE_BVME6000_NET |
512 | if (MACH_IS_BVME6000) { |
513 | volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; |
514 | |
515 | *ethirq = 1; |
516 | *ethirq = 3; |
517 | } |
518 | #endif |
519 | printk(KERN_ERR "%s: Error interrupt\n" , dev->name); |
520 | i596_display_data(dev); |
521 | return IRQ_HANDLED; |
522 | } |
523 | #endif |
524 | |
525 | static inline void remove_rx_bufs(struct net_device *dev) |
526 | { |
527 | struct i596_private *lp = dev->ml_priv; |
528 | struct i596_rbd *rbd; |
529 | int i; |
530 | |
531 | for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { |
532 | if (rbd->skb == NULL) |
533 | break; |
534 | dev_kfree_skb(rbd->skb); |
535 | rbd->skb = NULL; |
536 | } |
537 | } |
538 | |
539 | static inline int init_rx_bufs(struct net_device *dev) |
540 | { |
541 | struct i596_private *lp = dev->ml_priv; |
542 | int i; |
543 | struct i596_rfd *rfd; |
544 | struct i596_rbd *rbd; |
545 | |
546 | /* First build the Receive Buffer Descriptor List */ |
547 | |
548 | for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { |
549 | struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); |
550 | |
551 | if (skb == NULL) { |
552 | remove_rx_bufs(dev); |
553 | return -ENOMEM; |
554 | } |
555 | |
556 | rbd->v_next = rbd+1; |
557 | rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); |
558 | rbd->b_addr = WSWAPrbd(virt_to_bus(rbd)); |
559 | rbd->skb = skb; |
560 | rbd->v_data = skb->data; |
561 | rbd->b_data = WSWAPchar(virt_to_bus(skb->data)); |
562 | rbd->size = PKT_BUF_SZ; |
563 | #ifdef __mc68000__ |
564 | cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ); |
565 | #endif |
566 | } |
567 | lp->rbd_head = lp->rbds; |
568 | rbd = lp->rbds + rx_ring_size - 1; |
569 | rbd->v_next = lp->rbds; |
570 | rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds)); |
571 | |
572 | /* Now build the Receive Frame Descriptor List */ |
573 | |
574 | for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) { |
575 | rfd->rbd = I596_NULL; |
576 | rfd->v_next = rfd+1; |
577 | rfd->v_prev = rfd-1; |
578 | rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1)); |
579 | rfd->cmd = CMD_FLEX; |
580 | } |
581 | lp->rfd_head = lp->rfds; |
582 | lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); |
583 | rfd = lp->rfds; |
584 | rfd->rbd = lp->rbd_head; |
585 | rfd->v_prev = lp->rfds + rx_ring_size - 1; |
586 | rfd = lp->rfds + rx_ring_size - 1; |
587 | rfd->v_next = lp->rfds; |
588 | rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds)); |
589 | rfd->cmd = CMD_EOL|CMD_FLEX; |
590 | |
591 | return 0; |
592 | } |
593 | |
594 | |
595 | static void rebuild_rx_bufs(struct net_device *dev) |
596 | { |
597 | struct i596_private *lp = dev->ml_priv; |
598 | int i; |
599 | |
600 | /* Ensure rx frame/buffer descriptors are tidy */ |
601 | |
602 | for (i = 0; i < rx_ring_size; i++) { |
603 | lp->rfds[i].rbd = I596_NULL; |
604 | lp->rfds[i].cmd = CMD_FLEX; |
605 | } |
606 | lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX; |
607 | lp->rfd_head = lp->rfds; |
608 | lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); |
609 | lp->rbd_head = lp->rbds; |
610 | lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds)); |
611 | } |
612 | |
613 | |
614 | static int init_i596_mem(struct net_device *dev) |
615 | { |
616 | struct i596_private *lp = dev->ml_priv; |
617 | unsigned long flags; |
618 | |
619 | MPU_PORT(dev, PORT_RESET, NULL); |
620 | |
621 | udelay(100); /* Wait 100us - seems to help */ |
622 | |
623 | #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) |
624 | #ifdef ENABLE_MVME16x_NET |
625 | if (MACH_IS_MVME16x) { |
626 | volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; |
627 | |
628 | /* Disable all ints for now */ |
629 | pcc2[0x28] = 1; |
630 | pcc2[0x2a] = 0x48; |
631 | /* Following disables snooping. Snooping is not required |
632 | * as we make appropriate use of non-cached pages for |
633 | * shared data, and cache_push/cache_clear. |
634 | */ |
635 | pcc2[0x2b] = 0x08; |
636 | } |
637 | #endif |
638 | #ifdef ENABLE_BVME6000_NET |
639 | if (MACH_IS_BVME6000) { |
640 | volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; |
641 | |
642 | *ethirq = 1; |
643 | } |
644 | #endif |
645 | |
646 | /* change the scp address */ |
647 | |
648 | MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp)); |
649 | |
650 | #endif |
651 | |
652 | lp->last_cmd = jiffies; |
653 | |
654 | #ifdef ENABLE_MVME16x_NET |
655 | if (MACH_IS_MVME16x) |
656 | lp->scp.sysbus = 0x00000054; |
657 | #endif |
658 | #ifdef ENABLE_BVME6000_NET |
659 | if (MACH_IS_BVME6000) |
660 | lp->scp.sysbus = 0x0000004c; |
661 | #endif |
662 | |
663 | lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp)); |
664 | lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb)); |
665 | lp->iscp.stat = ISCP_BUSY; |
666 | lp->cmd_backlog = 0; |
667 | |
668 | lp->cmd_head = lp->scb.cmd = I596_NULL; |
669 | |
670 | #ifdef ENABLE_BVME6000_NET |
671 | if (MACH_IS_BVME6000) { |
672 | lp->scb.t_on = 7 * 25; |
673 | lp->scb.t_off = 1 * 25; |
674 | } |
675 | #endif |
676 | |
677 | DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n" , dev->name)); |
678 | |
679 | CA(dev); |
680 | |
681 | if (wait_istat(dev,lp,delcnt: 1000,str: "initialization timed out" )) |
682 | goto failed; |
683 | DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n" , dev->name)); |
684 | |
685 | /* Ensure rx frame/buffer descriptors are tidy */ |
686 | rebuild_rx_bufs(dev); |
687 | lp->scb.command = 0; |
688 | |
689 | #ifdef ENABLE_MVME16x_NET |
690 | if (MACH_IS_MVME16x) { |
691 | volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; |
692 | |
693 | /* Enable ints, etc. now */ |
694 | pcc2[0x2a] = 0x55; /* Edge sensitive */ |
695 | pcc2[0x2b] = 0x15; |
696 | } |
697 | #endif |
698 | #ifdef ENABLE_BVME6000_NET |
699 | if (MACH_IS_BVME6000) { |
700 | volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; |
701 | |
702 | *ethirq = 3; |
703 | } |
704 | #endif |
705 | |
706 | |
707 | DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n" , dev->name)); |
708 | memcpy(lp->cf_cmd.i596_config, init_setup, 14); |
709 | lp->cf_cmd.cmd.command = CmdConfigure; |
710 | i596_add_cmd(dev, cmd: &lp->cf_cmd.cmd); |
711 | |
712 | DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n" , dev->name)); |
713 | memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN); |
714 | lp->sa_cmd.cmd.command = CmdSASetup; |
715 | i596_add_cmd(dev, cmd: &lp->sa_cmd.cmd); |
716 | |
717 | DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n" , dev->name)); |
718 | lp->tdr_cmd.cmd.command = CmdTDR; |
719 | i596_add_cmd(dev, cmd: &lp->tdr_cmd.cmd); |
720 | |
721 | spin_lock_irqsave (&lp->lock, flags); |
722 | |
723 | if (wait_cmd(dev,lp,delcnt: 1000,str: "timed out waiting to issue RX_START" )) { |
724 | spin_unlock_irqrestore (lock: &lp->lock, flags); |
725 | goto failed; |
726 | } |
727 | DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n" , dev->name)); |
728 | lp->scb.command = RX_START; |
729 | CA(dev); |
730 | |
731 | spin_unlock_irqrestore (lock: &lp->lock, flags); |
732 | |
733 | if (wait_cmd(dev,lp,delcnt: 1000,str: "RX_START not processed" )) |
734 | goto failed; |
735 | DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n" , dev->name)); |
736 | return 0; |
737 | |
738 | failed: |
739 | printk(KERN_CRIT "%s: Failed to initialise 82596\n" , dev->name); |
740 | MPU_PORT(dev, PORT_RESET, NULL); |
741 | return -1; |
742 | } |
743 | |
744 | static inline int i596_rx(struct net_device *dev) |
745 | { |
746 | struct i596_private *lp = dev->ml_priv; |
747 | struct i596_rfd *rfd; |
748 | struct i596_rbd *rbd; |
749 | int frames = 0; |
750 | |
751 | DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n" , |
752 | lp->rfd_head, lp->rbd_head)); |
753 | |
754 | rfd = lp->rfd_head; /* Ref next frame to check */ |
755 | |
756 | while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ |
757 | if (rfd->rbd == I596_NULL) |
758 | rbd = I596_NULL; |
759 | else if (rfd->rbd == lp->rbd_head->b_addr) |
760 | rbd = lp->rbd_head; |
761 | else { |
762 | printk(KERN_CRIT "%s: rbd chain broken!\n" , dev->name); |
763 | /* XXX Now what? */ |
764 | rbd = I596_NULL; |
765 | } |
766 | DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n" , |
767 | rfd, rfd->rbd, rfd->stat)); |
768 | |
769 | if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) { |
770 | /* a good frame */ |
771 | int pkt_len = rbd->count & 0x3fff; |
772 | struct sk_buff *skb = rbd->skb; |
773 | int rx_in_place = 0; |
774 | |
775 | DEB(DEB_RXADDR,print_eth(rbd->v_data, "received" )); |
776 | frames++; |
777 | |
778 | /* Check if the packet is long enough to just accept |
779 | * without copying to a properly sized skbuff. |
780 | */ |
781 | |
782 | if (pkt_len > rx_copybreak) { |
783 | struct sk_buff *newskb; |
784 | |
785 | /* Get fresh skbuff to replace filled one. */ |
786 | newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); |
787 | if (newskb == NULL) { |
788 | skb = NULL; /* drop pkt */ |
789 | goto memory_squeeze; |
790 | } |
791 | /* Pass up the skb already on the Rx ring. */ |
792 | skb_put(skb, len: pkt_len); |
793 | rx_in_place = 1; |
794 | rbd->skb = newskb; |
795 | rbd->v_data = newskb->data; |
796 | rbd->b_data = WSWAPchar(virt_to_bus(newskb->data)); |
797 | #ifdef __mc68000__ |
798 | cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); |
799 | #endif |
800 | } else { |
801 | skb = netdev_alloc_skb(dev, length: pkt_len + 2); |
802 | } |
803 | memory_squeeze: |
804 | if (skb == NULL) { |
805 | /* XXX tulip.c can defer packets here!! */ |
806 | dev->stats.rx_dropped++; |
807 | } else { |
808 | if (!rx_in_place) { |
809 | /* 16 byte align the data fields */ |
810 | skb_reserve(skb, len: 2); |
811 | skb_put_data(skb, data: rbd->v_data, |
812 | len: pkt_len); |
813 | } |
814 | skb->protocol=eth_type_trans(skb,dev); |
815 | skb->len = pkt_len; |
816 | #ifdef __mc68000__ |
817 | cache_clear(virt_to_phys(rbd->skb->data), |
818 | pkt_len); |
819 | #endif |
820 | netif_rx(skb); |
821 | dev->stats.rx_packets++; |
822 | dev->stats.rx_bytes+=pkt_len; |
823 | } |
824 | } |
825 | else { |
826 | DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n" , |
827 | dev->name, rfd->stat)); |
828 | dev->stats.rx_errors++; |
829 | if ((rfd->stat) & 0x0001) |
830 | dev->stats.collisions++; |
831 | if ((rfd->stat) & 0x0080) |
832 | dev->stats.rx_length_errors++; |
833 | if ((rfd->stat) & 0x0100) |
834 | dev->stats.rx_over_errors++; |
835 | if ((rfd->stat) & 0x0200) |
836 | dev->stats.rx_fifo_errors++; |
837 | if ((rfd->stat) & 0x0400) |
838 | dev->stats.rx_frame_errors++; |
839 | if ((rfd->stat) & 0x0800) |
840 | dev->stats.rx_crc_errors++; |
841 | if ((rfd->stat) & 0x1000) |
842 | dev->stats.rx_length_errors++; |
843 | } |
844 | |
845 | /* Clear the buffer descriptor count and EOF + F flags */ |
846 | |
847 | if (rbd != I596_NULL && (rbd->count & 0x4000)) { |
848 | rbd->count = 0; |
849 | lp->rbd_head = rbd->v_next; |
850 | } |
851 | |
852 | /* Tidy the frame descriptor, marking it as end of list */ |
853 | |
854 | rfd->rbd = I596_NULL; |
855 | rfd->stat = 0; |
856 | rfd->cmd = CMD_EOL|CMD_FLEX; |
857 | rfd->count = 0; |
858 | |
859 | /* Remove end-of-list from old end descriptor */ |
860 | |
861 | rfd->v_prev->cmd = CMD_FLEX; |
862 | |
863 | /* Update record of next frame descriptor to process */ |
864 | |
865 | lp->scb.rfd = rfd->b_next; |
866 | lp->rfd_head = rfd->v_next; |
867 | rfd = lp->rfd_head; |
868 | } |
869 | |
870 | DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n" , frames)); |
871 | |
872 | return 0; |
873 | } |
874 | |
875 | |
876 | static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) |
877 | { |
878 | struct i596_cmd *ptr; |
879 | |
880 | while (lp->cmd_head != I596_NULL) { |
881 | ptr = lp->cmd_head; |
882 | lp->cmd_head = ptr->v_next; |
883 | lp->cmd_backlog--; |
884 | |
885 | switch ((ptr->command) & 0x7) { |
886 | case CmdTx: |
887 | { |
888 | struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; |
889 | struct sk_buff *skb = tx_cmd->skb; |
890 | |
891 | dev_kfree_skb(skb); |
892 | |
893 | dev->stats.tx_errors++; |
894 | dev->stats.tx_aborted_errors++; |
895 | |
896 | ptr->v_next = ptr->b_next = I596_NULL; |
897 | tx_cmd->cmd.command = 0; /* Mark as free */ |
898 | break; |
899 | } |
900 | default: |
901 | ptr->v_next = ptr->b_next = I596_NULL; |
902 | } |
903 | } |
904 | |
905 | wait_cmd(dev,lp,delcnt: 100,str: "i596_cleanup_cmd timed out" ); |
906 | lp->scb.cmd = I596_NULL; |
907 | } |
908 | |
909 | static void i596_reset(struct net_device *dev, struct i596_private *lp, |
910 | int ioaddr) |
911 | { |
912 | unsigned long flags; |
913 | |
914 | DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n" )); |
915 | |
916 | spin_lock_irqsave (&lp->lock, flags); |
917 | |
918 | wait_cmd(dev,lp,delcnt: 100,str: "i596_reset timed out" ); |
919 | |
920 | netif_stop_queue(dev); |
921 | |
922 | lp->scb.command = CUC_ABORT | RX_ABORT; |
923 | CA(dev); |
924 | |
925 | /* wait for shutdown */ |
926 | wait_cmd(dev,lp,delcnt: 1000,str: "i596_reset 2 timed out" ); |
927 | spin_unlock_irqrestore (lock: &lp->lock, flags); |
928 | |
929 | i596_cleanup_cmd(dev,lp); |
930 | i596_rx(dev); |
931 | |
932 | netif_start_queue(dev); |
933 | init_i596_mem(dev); |
934 | } |
935 | |
936 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) |
937 | { |
938 | struct i596_private *lp = dev->ml_priv; |
939 | int ioaddr = dev->base_addr; |
940 | unsigned long flags; |
941 | |
942 | DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n" )); |
943 | |
944 | cmd->status = 0; |
945 | cmd->command |= (CMD_EOL | CMD_INTR); |
946 | cmd->v_next = cmd->b_next = I596_NULL; |
947 | |
948 | spin_lock_irqsave (&lp->lock, flags); |
949 | |
950 | if (lp->cmd_head != I596_NULL) { |
951 | lp->cmd_tail->v_next = cmd; |
952 | lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status)); |
953 | } else { |
954 | lp->cmd_head = cmd; |
955 | wait_cmd(dev,lp,delcnt: 100,str: "i596_add_cmd timed out" ); |
956 | lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status)); |
957 | lp->scb.command = CUC_START; |
958 | CA(dev); |
959 | } |
960 | lp->cmd_tail = cmd; |
961 | lp->cmd_backlog++; |
962 | |
963 | spin_unlock_irqrestore (lock: &lp->lock, flags); |
964 | |
965 | if (lp->cmd_backlog > max_cmd_backlog) { |
966 | unsigned long tickssofar = jiffies - lp->last_cmd; |
967 | |
968 | if (tickssofar < ticks_limit) |
969 | return; |
970 | |
971 | printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n" , dev->name); |
972 | |
973 | i596_reset(dev, lp, ioaddr); |
974 | } |
975 | } |
976 | |
977 | static int i596_open(struct net_device *dev) |
978 | { |
979 | int res = 0; |
980 | |
981 | DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n" , dev->name, dev->irq)); |
982 | |
983 | if (request_irq(irq: dev->irq, handler: i596_interrupt, flags: 0, name: "i82596" , dev)) { |
984 | printk(KERN_ERR "%s: IRQ %d not free\n" , dev->name, dev->irq); |
985 | return -EAGAIN; |
986 | } |
987 | #ifdef ENABLE_MVME16x_NET |
988 | if (MACH_IS_MVME16x) { |
989 | if (request_irq(0x56, i596_error, 0, "i82596_error" , dev)) { |
990 | res = -EAGAIN; |
991 | goto err_irq_dev; |
992 | } |
993 | } |
994 | #endif |
995 | res = init_rx_bufs(dev); |
996 | if (res) |
997 | goto err_irq_56; |
998 | |
999 | netif_start_queue(dev); |
1000 | |
1001 | if (init_i596_mem(dev)) { |
1002 | res = -EAGAIN; |
1003 | goto err_queue; |
1004 | } |
1005 | |
1006 | return 0; |
1007 | |
1008 | err_queue: |
1009 | netif_stop_queue(dev); |
1010 | remove_rx_bufs(dev); |
1011 | err_irq_56: |
1012 | #ifdef ENABLE_MVME16x_NET |
1013 | free_irq(0x56, dev); |
1014 | err_irq_dev: |
1015 | #endif |
1016 | free_irq(dev->irq, dev); |
1017 | |
1018 | return res; |
1019 | } |
1020 | |
1021 | static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue) |
1022 | { |
1023 | struct i596_private *lp = dev->ml_priv; |
1024 | int ioaddr = dev->base_addr; |
1025 | |
1026 | /* Transmitter timeout, serious problems. */ |
1027 | DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n" , |
1028 | dev->name)); |
1029 | |
1030 | dev->stats.tx_errors++; |
1031 | |
1032 | /* Try to restart the adaptor */ |
1033 | if (lp->last_restart == dev->stats.tx_packets) { |
1034 | DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n" )); |
1035 | /* Shutdown and restart */ |
1036 | i596_reset (dev, lp, ioaddr); |
1037 | } else { |
1038 | /* Issue a channel attention signal */ |
1039 | DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n" )); |
1040 | lp->scb.command = CUC_START | RX_START; |
1041 | CA (dev); |
1042 | lp->last_restart = dev->stats.tx_packets; |
1043 | } |
1044 | |
1045 | netif_trans_update(dev); /* prevent tx timeout */ |
1046 | netif_wake_queue (dev); |
1047 | } |
1048 | |
1049 | static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1050 | { |
1051 | struct i596_private *lp = dev->ml_priv; |
1052 | struct tx_cmd *tx_cmd; |
1053 | struct i596_tbd *tbd; |
1054 | short length = skb->len; |
1055 | |
1056 | DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n" , |
1057 | dev->name, skb->len, skb->data)); |
1058 | |
1059 | if (skb->len < ETH_ZLEN) { |
1060 | if (skb_padto(skb, ETH_ZLEN)) |
1061 | return NETDEV_TX_OK; |
1062 | length = ETH_ZLEN; |
1063 | } |
1064 | netif_stop_queue(dev); |
1065 | |
1066 | tx_cmd = lp->tx_cmds + lp->next_tx_cmd; |
1067 | tbd = lp->tbds + lp->next_tx_cmd; |
1068 | |
1069 | if (tx_cmd->cmd.command) { |
1070 | printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n" , |
1071 | dev->name); |
1072 | dev->stats.tx_dropped++; |
1073 | |
1074 | dev_kfree_skb(skb); |
1075 | } else { |
1076 | if (++lp->next_tx_cmd == TX_RING_SIZE) |
1077 | lp->next_tx_cmd = 0; |
1078 | tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd)); |
1079 | tbd->next = I596_NULL; |
1080 | |
1081 | tx_cmd->cmd.command = CMD_FLEX | CmdTx; |
1082 | tx_cmd->skb = skb; |
1083 | |
1084 | tx_cmd->pad = 0; |
1085 | tx_cmd->size = 0; |
1086 | tbd->pad = 0; |
1087 | tbd->size = EOF | length; |
1088 | |
1089 | tbd->data = WSWAPchar(virt_to_bus(skb->data)); |
1090 | |
1091 | #ifdef __mc68000__ |
1092 | cache_push(virt_to_phys(skb->data), length); |
1093 | #endif |
1094 | DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued" )); |
1095 | i596_add_cmd(dev, cmd: &tx_cmd->cmd); |
1096 | |
1097 | dev->stats.tx_packets++; |
1098 | dev->stats.tx_bytes += length; |
1099 | } |
1100 | |
1101 | netif_start_queue(dev); |
1102 | |
1103 | return NETDEV_TX_OK; |
1104 | } |
1105 | |
1106 | static void print_eth(unsigned char *add, char *str) |
1107 | { |
1108 | printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n" , |
1109 | add, add + 6, add, add[12], add[13], str); |
1110 | } |
1111 | |
1112 | static const struct net_device_ops i596_netdev_ops = { |
1113 | .ndo_open = i596_open, |
1114 | .ndo_stop = i596_close, |
1115 | .ndo_start_xmit = i596_start_xmit, |
1116 | .ndo_set_rx_mode = set_multicast_list, |
1117 | .ndo_tx_timeout = i596_tx_timeout, |
1118 | .ndo_set_mac_address = eth_mac_addr, |
1119 | .ndo_validate_addr = eth_validate_addr, |
1120 | }; |
1121 | |
1122 | static struct net_device * __init i82596_probe(void) |
1123 | { |
1124 | struct net_device *dev; |
1125 | int i; |
1126 | struct i596_private *lp; |
1127 | char eth_addr[8]; |
1128 | static int probed; |
1129 | int err; |
1130 | |
1131 | if (probed) |
1132 | return ERR_PTR(error: -ENODEV); |
1133 | probed++; |
1134 | |
1135 | dev = alloc_etherdev(0); |
1136 | if (!dev) |
1137 | return ERR_PTR(error: -ENOMEM); |
1138 | |
1139 | #ifdef ENABLE_MVME16x_NET |
1140 | if (MACH_IS_MVME16x) { |
1141 | if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) { |
1142 | printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n" ); |
1143 | err = -ENODEV; |
1144 | goto out; |
1145 | } |
1146 | memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */ |
1147 | dev->base_addr = MVME_I596_BASE; |
1148 | dev->irq = (unsigned) MVME16x_IRQ_I596; |
1149 | goto found; |
1150 | } |
1151 | #endif |
1152 | #ifdef ENABLE_BVME6000_NET |
1153 | if (MACH_IS_BVME6000) { |
1154 | volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE; |
1155 | unsigned char msr = rtc[3]; |
1156 | int i; |
1157 | |
1158 | rtc[3] |= 0x80; |
1159 | for (i = 0; i < 6; i++) |
1160 | eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */ |
1161 | rtc[3] = msr; |
1162 | dev->base_addr = BVME_I596_BASE; |
1163 | dev->irq = (unsigned) BVME_IRQ_I596; |
1164 | goto found; |
1165 | } |
1166 | #endif |
1167 | err = -ENODEV; |
1168 | goto out; |
1169 | |
1170 | found: |
1171 | dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, order: 0); |
1172 | if (!dev->mem_start) { |
1173 | err = -ENOMEM; |
1174 | goto out1; |
1175 | } |
1176 | |
1177 | DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx," , dev->name, dev->base_addr)); |
1178 | |
1179 | for (i = 0; i < 6; i++) |
1180 | DEB(DEB_PROBE,printk(" %2.2X" , eth_addr[i])); |
1181 | eth_hw_addr_set(dev, addr: eth_addr); |
1182 | |
1183 | DEB(DEB_PROBE,printk(" IRQ %d.\n" , dev->irq)); |
1184 | |
1185 | DEB(DEB_PROBE,printk(KERN_INFO "%s" , version)); |
1186 | |
1187 | /* The 82596-specific entries in the device structure. */ |
1188 | dev->netdev_ops = &i596_netdev_ops; |
1189 | dev->watchdog_timeo = TX_TIMEOUT; |
1190 | |
1191 | dev->ml_priv = (void *)(dev->mem_start); |
1192 | |
1193 | lp = dev->ml_priv; |
1194 | DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), " |
1195 | "lp->scb at 0x%08lx\n" , |
1196 | dev->name, (unsigned long)lp, |
1197 | sizeof(struct i596_private), (unsigned long)&lp->scb)); |
1198 | memset((void *) lp, 0, sizeof(struct i596_private)); |
1199 | |
1200 | #ifdef __mc68000__ |
1201 | cache_push(virt_to_phys((void *)(dev->mem_start)), 4096); |
1202 | cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096); |
1203 | kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER); |
1204 | #endif |
1205 | lp->scb.command = 0; |
1206 | lp->scb.cmd = I596_NULL; |
1207 | lp->scb.rfd = I596_NULL; |
1208 | spin_lock_init(&lp->lock); |
1209 | |
1210 | err = register_netdev(dev); |
1211 | if (err) |
1212 | goto out2; |
1213 | return dev; |
1214 | out2: |
1215 | #ifdef __mc68000__ |
1216 | /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, |
1217 | * XXX which may be invalid (CONFIG_060_WRITETHROUGH) |
1218 | */ |
1219 | kernel_set_cachemode((void *)(dev->mem_start), 4096, |
1220 | IOMAP_FULL_CACHING); |
1221 | #endif |
1222 | free_page ((u32)(dev->mem_start)); |
1223 | out1: |
1224 | out: |
1225 | free_netdev(dev); |
1226 | return ERR_PTR(error: err); |
1227 | } |
1228 | |
1229 | static irqreturn_t i596_interrupt(int irq, void *dev_id) |
1230 | { |
1231 | struct net_device *dev = dev_id; |
1232 | struct i596_private *lp; |
1233 | short ioaddr; |
1234 | unsigned short status, ack_cmd = 0; |
1235 | int handled = 0; |
1236 | |
1237 | #ifdef ENABLE_BVME6000_NET |
1238 | if (MACH_IS_BVME6000) { |
1239 | if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) { |
1240 | i596_error(irq, dev_id); |
1241 | return IRQ_HANDLED; |
1242 | } |
1243 | } |
1244 | #endif |
1245 | if (dev == NULL) { |
1246 | printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n" , irq); |
1247 | return IRQ_NONE; |
1248 | } |
1249 | |
1250 | ioaddr = dev->base_addr; |
1251 | lp = dev->ml_priv; |
1252 | |
1253 | spin_lock (lock: &lp->lock); |
1254 | |
1255 | wait_cmd(dev,lp,delcnt: 100,str: "i596 interrupt, timeout" ); |
1256 | status = lp->scb.status; |
1257 | |
1258 | DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n" , |
1259 | dev->name, irq, status)); |
1260 | |
1261 | ack_cmd = status & 0xf000; |
1262 | |
1263 | if ((status & 0x8000) || (status & 0x2000)) { |
1264 | struct i596_cmd *ptr; |
1265 | |
1266 | handled = 1; |
1267 | if ((status & 0x8000)) |
1268 | DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n" , dev->name)); |
1269 | if ((status & 0x2000)) |
1270 | DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n" , dev->name, status & 0x0700)); |
1271 | |
1272 | while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) { |
1273 | ptr = lp->cmd_head; |
1274 | |
1275 | DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n" , |
1276 | lp->cmd_head->status, lp->cmd_head->command)); |
1277 | lp->cmd_head = ptr->v_next; |
1278 | lp->cmd_backlog--; |
1279 | |
1280 | switch ((ptr->command) & 0x7) { |
1281 | case CmdTx: |
1282 | { |
1283 | struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; |
1284 | struct sk_buff *skb = tx_cmd->skb; |
1285 | |
1286 | if ((ptr->status) & STAT_OK) { |
1287 | DEB(DEB_TXADDR,print_eth(skb->data, "tx-done" )); |
1288 | } else { |
1289 | dev->stats.tx_errors++; |
1290 | if ((ptr->status) & 0x0020) |
1291 | dev->stats.collisions++; |
1292 | if (!((ptr->status) & 0x0040)) |
1293 | dev->stats.tx_heartbeat_errors++; |
1294 | if ((ptr->status) & 0x0400) |
1295 | dev->stats.tx_carrier_errors++; |
1296 | if ((ptr->status) & 0x0800) |
1297 | dev->stats.collisions++; |
1298 | if ((ptr->status) & 0x1000) |
1299 | dev->stats.tx_aborted_errors++; |
1300 | } |
1301 | |
1302 | dev_consume_skb_irq(skb); |
1303 | |
1304 | tx_cmd->cmd.command = 0; /* Mark free */ |
1305 | break; |
1306 | } |
1307 | case CmdTDR: |
1308 | { |
1309 | unsigned short status = ((struct tdr_cmd *)ptr)->status; |
1310 | |
1311 | if (status & 0x8000) { |
1312 | DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n" , dev->name)); |
1313 | } else { |
1314 | if (status & 0x4000) |
1315 | printk(KERN_ERR "%s: Transceiver problem.\n" , dev->name); |
1316 | if (status & 0x2000) |
1317 | printk(KERN_ERR "%s: Termination problem.\n" , dev->name); |
1318 | if (status & 0x1000) |
1319 | printk(KERN_ERR "%s: Short circuit.\n" , dev->name); |
1320 | |
1321 | DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n" , dev->name, status & 0x07ff)); |
1322 | } |
1323 | break; |
1324 | } |
1325 | case CmdConfigure: |
1326 | case CmdMulticastList: |
1327 | /* Zap command so set_multicast_list() knows it is free */ |
1328 | ptr->command = 0; |
1329 | break; |
1330 | } |
1331 | ptr->v_next = ptr->b_next = I596_NULL; |
1332 | lp->last_cmd = jiffies; |
1333 | } |
1334 | |
1335 | ptr = lp->cmd_head; |
1336 | while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) { |
1337 | ptr->command &= 0x1fff; |
1338 | ptr = ptr->v_next; |
1339 | } |
1340 | |
1341 | if ((lp->cmd_head != I596_NULL)) |
1342 | ack_cmd |= CUC_START; |
1343 | lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status)); |
1344 | } |
1345 | if ((status & 0x1000) || (status & 0x4000)) { |
1346 | if ((status & 0x4000)) |
1347 | DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n" , dev->name)); |
1348 | i596_rx(dev); |
1349 | /* Only RX_START if stopped - RGH 07-07-96 */ |
1350 | if (status & 0x1000) { |
1351 | if (netif_running(dev)) { |
1352 | DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n" , dev->name, status)); |
1353 | ack_cmd |= RX_START; |
1354 | dev->stats.rx_errors++; |
1355 | dev->stats.rx_fifo_errors++; |
1356 | rebuild_rx_bufs(dev); |
1357 | } |
1358 | } |
1359 | } |
1360 | wait_cmd(dev,lp,delcnt: 100,str: "i596 interrupt, timeout" ); |
1361 | lp->scb.command = ack_cmd; |
1362 | |
1363 | #ifdef ENABLE_MVME16x_NET |
1364 | if (MACH_IS_MVME16x) { |
1365 | /* Ack the interrupt */ |
1366 | |
1367 | volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; |
1368 | |
1369 | pcc2[0x2a] |= 0x08; |
1370 | } |
1371 | #endif |
1372 | #ifdef ENABLE_BVME6000_NET |
1373 | if (MACH_IS_BVME6000) { |
1374 | volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; |
1375 | |
1376 | *ethirq = 1; |
1377 | *ethirq = 3; |
1378 | } |
1379 | #endif |
1380 | CA(dev); |
1381 | |
1382 | DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n" , dev->name)); |
1383 | |
1384 | spin_unlock (lock: &lp->lock); |
1385 | return IRQ_RETVAL(handled); |
1386 | } |
1387 | |
1388 | static int i596_close(struct net_device *dev) |
1389 | { |
1390 | struct i596_private *lp = dev->ml_priv; |
1391 | unsigned long flags; |
1392 | |
1393 | netif_stop_queue(dev); |
1394 | |
1395 | DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n" , |
1396 | dev->name, lp->scb.status)); |
1397 | |
1398 | spin_lock_irqsave(&lp->lock, flags); |
1399 | |
1400 | wait_cmd(dev,lp,delcnt: 100,str: "close1 timed out" ); |
1401 | lp->scb.command = CUC_ABORT | RX_ABORT; |
1402 | CA(dev); |
1403 | |
1404 | wait_cmd(dev,lp,delcnt: 100,str: "close2 timed out" ); |
1405 | |
1406 | spin_unlock_irqrestore(lock: &lp->lock, flags); |
1407 | DEB(DEB_STRUCT,i596_display_data(dev)); |
1408 | i596_cleanup_cmd(dev,lp); |
1409 | |
1410 | #ifdef ENABLE_MVME16x_NET |
1411 | if (MACH_IS_MVME16x) { |
1412 | volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; |
1413 | |
1414 | /* Disable all ints */ |
1415 | pcc2[0x28] = 1; |
1416 | pcc2[0x2a] = 0x40; |
1417 | pcc2[0x2b] = 0x40; /* Set snooping bits now! */ |
1418 | } |
1419 | #endif |
1420 | #ifdef ENABLE_BVME6000_NET |
1421 | if (MACH_IS_BVME6000) { |
1422 | volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; |
1423 | |
1424 | *ethirq = 1; |
1425 | } |
1426 | #endif |
1427 | |
1428 | #ifdef ENABLE_MVME16x_NET |
1429 | free_irq(0x56, dev); |
1430 | #endif |
1431 | free_irq(dev->irq, dev); |
1432 | remove_rx_bufs(dev); |
1433 | |
1434 | return 0; |
1435 | } |
1436 | |
1437 | /* |
1438 | * Set or clear the multicast filter for this adaptor. |
1439 | */ |
1440 | |
1441 | static void set_multicast_list(struct net_device *dev) |
1442 | { |
1443 | struct i596_private *lp = dev->ml_priv; |
1444 | int config = 0, cnt; |
1445 | |
1446 | DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n" , |
1447 | dev->name, netdev_mc_count(dev), |
1448 | dev->flags & IFF_PROMISC ? "ON" : "OFF" , |
1449 | dev->flags & IFF_ALLMULTI ? "ON" : "OFF" )); |
1450 | |
1451 | if (wait_cfg(dev, cmd: &lp->cf_cmd.cmd, delcnt: 1000, str: "config change request timed out" )) |
1452 | return; |
1453 | |
1454 | if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) { |
1455 | lp->cf_cmd.i596_config[8] |= 0x01; |
1456 | config = 1; |
1457 | } |
1458 | if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) { |
1459 | lp->cf_cmd.i596_config[8] &= ~0x01; |
1460 | config = 1; |
1461 | } |
1462 | if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) { |
1463 | lp->cf_cmd.i596_config[11] &= ~0x20; |
1464 | config = 1; |
1465 | } |
1466 | if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) { |
1467 | lp->cf_cmd.i596_config[11] |= 0x20; |
1468 | config = 1; |
1469 | } |
1470 | if (config) { |
1471 | lp->cf_cmd.cmd.command = CmdConfigure; |
1472 | i596_add_cmd(dev, cmd: &lp->cf_cmd.cmd); |
1473 | } |
1474 | |
1475 | cnt = netdev_mc_count(dev); |
1476 | if (cnt > MAX_MC_CNT) |
1477 | { |
1478 | cnt = MAX_MC_CNT; |
1479 | printk(KERN_ERR "%s: Only %d multicast addresses supported" , |
1480 | dev->name, cnt); |
1481 | } |
1482 | |
1483 | if (!netdev_mc_empty(dev)) { |
1484 | struct netdev_hw_addr *ha; |
1485 | unsigned char *cp; |
1486 | struct mc_cmd *cmd; |
1487 | |
1488 | if (wait_cfg(dev, cmd: &lp->mc_cmd.cmd, delcnt: 1000, str: "multicast list change request timed out" )) |
1489 | return; |
1490 | cmd = &lp->mc_cmd; |
1491 | cmd->cmd.command = CmdMulticastList; |
1492 | cmd->mc_cnt = cnt * ETH_ALEN; |
1493 | cp = cmd->mc_addrs; |
1494 | netdev_for_each_mc_addr(ha, dev) { |
1495 | if (!cnt--) |
1496 | break; |
1497 | memcpy(cp, ha->addr, ETH_ALEN); |
1498 | if (i596_debug > 1) |
1499 | DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n" , |
1500 | dev->name, cp)); |
1501 | cp += ETH_ALEN; |
1502 | } |
1503 | i596_add_cmd(dev, cmd: &cmd->cmd); |
1504 | } |
1505 | } |
1506 | |
1507 | static struct net_device *dev_82596; |
1508 | |
1509 | static int debug = -1; |
1510 | module_param(debug, int, 0); |
1511 | MODULE_PARM_DESC(debug, "i82596 debug mask" ); |
1512 | |
1513 | static int __init i82596_init(void) |
1514 | { |
1515 | if (debug >= 0) |
1516 | i596_debug = debug; |
1517 | dev_82596 = i82596_probe(); |
1518 | return PTR_ERR_OR_ZERO(ptr: dev_82596); |
1519 | } |
1520 | module_init(i82596_init); |
1521 | |
1522 | static void __exit i82596_cleanup(void) |
1523 | { |
1524 | unregister_netdev(dev: dev_82596); |
1525 | #ifdef __mc68000__ |
1526 | /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, |
1527 | * XXX which may be invalid (CONFIG_060_WRITETHROUGH) |
1528 | */ |
1529 | |
1530 | kernel_set_cachemode((void *)(dev_82596->mem_start), 4096, |
1531 | IOMAP_FULL_CACHING); |
1532 | #endif |
1533 | free_page ((u32)(dev_82596->mem_start)); |
1534 | free_netdev(dev: dev_82596); |
1535 | } |
1536 | module_exit(i82596_cleanup); |
1537 | |