1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ACENIC_H_
3#define _ACENIC_H_
4#include <linux/interrupt.h>
5
6
7/*
8 * Generate TX index update each time, when TX ring is closed.
9 * Normally, this is not useful, because results in more dma (and irqs
10 * without TX_COAL_INTS_ONLY).
11 */
12#define USE_TX_COAL_NOW 0
13
14/*
15 * Addressing:
16 *
17 * The Tigon uses 64-bit host addresses, regardless of their actual
18 * length, and it expects a big-endian format. For 32 bit systems the
19 * upper 32 bits of the address are simply ignored (zero), however for
20 * little endian 64 bit systems (Alpha) this looks strange with the
21 * two parts of the address word being swapped.
22 *
23 * The addresses are split in two 32 bit words for all architectures
24 * as some of them are in PCI shared memory and it is necessary to use
25 * readl/writel to access them.
26 *
27 * The addressing code is derived from Pete Wyckoff's work, but
28 * modified to deal properly with readl/writel usage.
29 */
30
31struct ace_regs {
32 u32 pad0[16]; /* PCI control registers */
33
34 u32 HostCtrl; /* 0x40 */
35 u32 LocalCtrl;
36
37 u32 pad1[2];
38
39 u32 MiscCfg; /* 0x50 */
40
41 u32 pad2[2];
42
43 u32 PciState;
44
45 u32 pad3[2]; /* 0x60 */
46
47 u32 WinBase;
48 u32 WinData;
49
50 u32 pad4[12]; /* 0x70 */
51
52 u32 DmaWriteState; /* 0xa0 */
53 u32 pad5[3];
54 u32 DmaReadState; /* 0xb0 */
55
56 u32 pad6[26];
57
58 u32 AssistState;
59
60 u32 pad7[8]; /* 0x120 */
61
62 u32 CpuCtrl; /* 0x140 */
63 u32 Pc;
64
65 u32 pad8[3];
66
67 u32 SramAddr; /* 0x154 */
68 u32 SramData;
69
70 u32 pad9[49];
71
72 u32 MacRxState; /* 0x220 */
73
74 u32 pad10[7];
75
76 u32 CpuBCtrl; /* 0x240 */
77 u32 PcB;
78
79 u32 pad11[3];
80
81 u32 SramBAddr; /* 0x254 */
82 u32 SramBData;
83
84 u32 pad12[105];
85
86 u32 pad13[32]; /* 0x400 */
87 u32 Stats[32];
88
89 u32 Mb0Hi; /* 0x500 */
90 u32 Mb0Lo;
91 u32 Mb1Hi;
92 u32 CmdPrd;
93 u32 Mb2Hi;
94 u32 TxPrd;
95 u32 Mb3Hi;
96 u32 RxStdPrd;
97 u32 Mb4Hi;
98 u32 RxJumboPrd;
99 u32 Mb5Hi;
100 u32 RxMiniPrd;
101 u32 Mb6Hi;
102 u32 Mb6Lo;
103 u32 Mb7Hi;
104 u32 Mb7Lo;
105 u32 Mb8Hi;
106 u32 Mb8Lo;
107 u32 Mb9Hi;
108 u32 Mb9Lo;
109 u32 MbAHi;
110 u32 MbALo;
111 u32 MbBHi;
112 u32 MbBLo;
113 u32 MbCHi;
114 u32 MbCLo;
115 u32 MbDHi;
116 u32 MbDLo;
117 u32 MbEHi;
118 u32 MbELo;
119 u32 MbFHi;
120 u32 MbFLo;
121
122 u32 pad14[32];
123
124 u32 MacAddrHi; /* 0x600 */
125 u32 MacAddrLo;
126 u32 InfoPtrHi;
127 u32 InfoPtrLo;
128 u32 MultiCastHi; /* 0x610 */
129 u32 MultiCastLo;
130 u32 ModeStat;
131 u32 DmaReadCfg;
132 u32 DmaWriteCfg; /* 0x620 */
133 u32 TxBufRat;
134 u32 EvtCsm;
135 u32 CmdCsm;
136 u32 TuneRxCoalTicks;/* 0x630 */
137 u32 TuneTxCoalTicks;
138 u32 TuneStatTicks;
139 u32 TuneMaxTxDesc;
140 u32 TuneMaxRxDesc; /* 0x640 */
141 u32 TuneTrace;
142 u32 TuneLink;
143 u32 TuneFastLink;
144 u32 TracePtr; /* 0x650 */
145 u32 TraceStrt;
146 u32 TraceLen;
147 u32 IfIdx;
148 u32 IfMtu; /* 0x660 */
149 u32 MaskInt;
150 u32 GigLnkState;
151 u32 FastLnkState;
152 u32 pad16[4]; /* 0x670 */
153 u32 RxRetCsm; /* 0x680 */
154
155 u32 pad17[31];
156
157 u32 CmdRng[64]; /* 0x700 */
158 u32 Window[0x200];
159};
160
161
162typedef struct {
163 u32 addrhi;
164 u32 addrlo;
165} aceaddr;
166
167
168#define ACE_WINDOW_SIZE 0x800
169
170#define ACE_JUMBO_MTU 9000
171#define ACE_STD_MTU 1500
172
173#define ACE_TRACE_SIZE 0x8000
174
175/*
176 * Host control register bits.
177 */
178
179#define IN_INT 0x01
180#define CLR_INT 0x02
181#define HW_RESET 0x08
182#define BYTE_SWAP 0x10
183#define WORD_SWAP 0x20
184#define MASK_INTS 0x40
185
186/*
187 * Local control register bits.
188 */
189
190#define EEPROM_DATA_IN 0x800000
191#define EEPROM_DATA_OUT 0x400000
192#define EEPROM_WRITE_ENABLE 0x200000
193#define EEPROM_CLK_OUT 0x100000
194
195#define EEPROM_BASE 0xa0000000
196
197#define EEPROM_WRITE_SELECT 0xa0
198#define EEPROM_READ_SELECT 0xa1
199
200#define SRAM_BANK_512K 0x200
201
202
203/*
204 * udelay() values for when clocking the eeprom
205 */
206#define ACE_SHORT_DELAY 2
207#define ACE_LONG_DELAY 4
208
209
210/*
211 * Misc Config bits
212 */
213
214#define SYNC_SRAM_TIMING 0x100000
215
216
217/*
218 * CPU state bits.
219 */
220
221#define CPU_RESET 0x01
222#define CPU_TRACE 0x02
223#define CPU_PROM_FAILED 0x10
224#define CPU_HALT 0x00010000
225#define CPU_HALTED 0xffff0000
226
227
228/*
229 * PCI State bits.
230 */
231
232#define DMA_READ_MAX_4 0x04
233#define DMA_READ_MAX_16 0x08
234#define DMA_READ_MAX_32 0x0c
235#define DMA_READ_MAX_64 0x10
236#define DMA_READ_MAX_128 0x14
237#define DMA_READ_MAX_256 0x18
238#define DMA_READ_MAX_1K 0x1c
239#define DMA_WRITE_MAX_4 0x20
240#define DMA_WRITE_MAX_16 0x40
241#define DMA_WRITE_MAX_32 0x60
242#define DMA_WRITE_MAX_64 0x80
243#define DMA_WRITE_MAX_128 0xa0
244#define DMA_WRITE_MAX_256 0xc0
245#define DMA_WRITE_MAX_1K 0xe0
246#define DMA_READ_WRITE_MASK 0xfc
247#define MEM_READ_MULTIPLE 0x00020000
248#define PCI_66MHZ 0x00080000
249#define PCI_32BIT 0x00100000
250#define DMA_WRITE_ALL_ALIGN 0x00800000
251#define READ_CMD_MEM 0x06000000
252#define WRITE_CMD_MEM 0x70000000
253
254
255/*
256 * Mode status
257 */
258
259#define ACE_BYTE_SWAP_BD 0x02
260#define ACE_WORD_SWAP_BD 0x04 /* not actually used */
261#define ACE_WARN 0x08
262#define ACE_BYTE_SWAP_DMA 0x10
263#define ACE_NO_JUMBO_FRAG 0x200
264#define ACE_FATAL 0x40000000
265
266
267/*
268 * DMA config
269 */
270
271#define DMA_THRESH_1W 0x10
272#define DMA_THRESH_2W 0x20
273#define DMA_THRESH_4W 0x40
274#define DMA_THRESH_8W 0x80
275#define DMA_THRESH_16W 0x100
276#define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */
277
278
279/*
280 * Tuning parameters
281 */
282
283#define TICKS_PER_SEC 1000000
284
285
286/*
287 * Link bits
288 */
289
290#define LNK_PREF 0x00008000
291#define LNK_10MB 0x00010000
292#define LNK_100MB 0x00020000
293#define LNK_1000MB 0x00040000
294#define LNK_FULL_DUPLEX 0x00080000
295#define LNK_HALF_DUPLEX 0x00100000
296#define LNK_TX_FLOW_CTL_Y 0x00200000
297#define LNK_NEG_ADVANCED 0x00400000
298#define LNK_RX_FLOW_CTL_Y 0x00800000
299#define LNK_NIC 0x01000000
300#define LNK_JAM 0x02000000
301#define LNK_JUMBO 0x04000000
302#define LNK_ALTEON 0x08000000
303#define LNK_NEG_FCTL 0x10000000
304#define LNK_NEGOTIATE 0x20000000
305#define LNK_ENABLE 0x40000000
306#define LNK_UP 0x80000000
307
308
309/*
310 * Event definitions
311 */
312
313#define EVT_RING_ENTRIES 256
314#define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event))
315
316struct event {
317#ifdef __LITTLE_ENDIAN_BITFIELD
318 u32 idx:12;
319 u32 code:12;
320 u32 evt:8;
321#else
322 u32 evt:8;
323 u32 code:12;
324 u32 idx:12;
325#endif
326 u32 pad;
327};
328
329
330/*
331 * Events
332 */
333
334#define E_FW_RUNNING 0x01
335#define E_STATS_UPDATED 0x04
336
337#define E_STATS_UPDATE 0x04
338
339#define E_LNK_STATE 0x06
340#define E_C_LINK_UP 0x01
341#define E_C_LINK_DOWN 0x02
342#define E_C_LINK_10_100 0x03
343
344#define E_ERROR 0x07
345#define E_C_ERR_INVAL_CMD 0x01
346#define E_C_ERR_UNIMP_CMD 0x02
347#define E_C_ERR_BAD_CFG 0x03
348
349#define E_MCAST_LIST 0x08
350#define E_C_MCAST_ADDR_ADD 0x01
351#define E_C_MCAST_ADDR_DEL 0x02
352
353#define E_RESET_JUMBO_RNG 0x09
354
355
356/*
357 * Commands
358 */
359
360#define CMD_RING_ENTRIES 64
361
362struct cmd {
363#ifdef __LITTLE_ENDIAN_BITFIELD
364 u32 idx:12;
365 u32 code:12;
366 u32 evt:8;
367#else
368 u32 evt:8;
369 u32 code:12;
370 u32 idx:12;
371#endif
372};
373
374
375#define C_HOST_STATE 0x01
376#define C_C_STACK_UP 0x01
377#define C_C_STACK_DOWN 0x02
378
379#define C_FDR_FILTERING 0x02
380#define C_C_FDR_FILT_ENABLE 0x01
381#define C_C_FDR_FILT_DISABLE 0x02
382
383#define C_SET_RX_PRD_IDX 0x03
384#define C_UPDATE_STATS 0x04
385#define C_RESET_JUMBO_RNG 0x05
386#define C_ADD_MULTICAST_ADDR 0x08
387#define C_DEL_MULTICAST_ADDR 0x09
388
389#define C_SET_PROMISC_MODE 0x0a
390#define C_C_PROMISC_ENABLE 0x01
391#define C_C_PROMISC_DISABLE 0x02
392
393#define C_LNK_NEGOTIATION 0x0b
394#define C_C_NEGOTIATE_BOTH 0x00
395#define C_C_NEGOTIATE_GIG 0x01
396#define C_C_NEGOTIATE_10_100 0x02
397
398#define C_SET_MAC_ADDR 0x0c
399#define C_CLEAR_PROFILE 0x0d
400
401#define C_SET_MULTICAST_MODE 0x0e
402#define C_C_MCAST_ENABLE 0x01
403#define C_C_MCAST_DISABLE 0x02
404
405#define C_CLEAR_STATS 0x0f
406#define C_SET_RX_JUMBO_PRD_IDX 0x10
407#define C_REFRESH_STATS 0x11
408
409
410/*
411 * Descriptor flags
412 */
413#define BD_FLG_TCP_UDP_SUM 0x01
414#define BD_FLG_IP_SUM 0x02
415#define BD_FLG_END 0x04
416#define BD_FLG_MORE 0x08
417#define BD_FLG_JUMBO 0x10
418#define BD_FLG_UCAST 0x20
419#define BD_FLG_MCAST 0x40
420#define BD_FLG_BCAST 0x60
421#define BD_FLG_TYP_MASK 0x60
422#define BD_FLG_IP_FRAG 0x80
423#define BD_FLG_IP_FRAG_END 0x100
424#define BD_FLG_VLAN_TAG 0x200
425#define BD_FLG_FRAME_ERROR 0x400
426#define BD_FLG_COAL_NOW 0x800
427#define BD_FLG_MINI 0x1000
428
429
430/*
431 * Ring Control block flags
432 */
433#define RCB_FLG_TCP_UDP_SUM 0x01
434#define RCB_FLG_IP_SUM 0x02
435#define RCB_FLG_NO_PSEUDO_HDR 0x08
436#define RCB_FLG_VLAN_ASSIST 0x10
437#define RCB_FLG_COAL_INT_ONLY 0x20
438#define RCB_FLG_TX_HOST_RING 0x40
439#define RCB_FLG_IEEE_SNAP_SUM 0x80
440#define RCB_FLG_EXT_RX_BD 0x100
441#define RCB_FLG_RNG_DISABLE 0x200
442
443
444/*
445 * TX ring - maximum TX ring entries for Tigon I's is 128
446 */
447#define MAX_TX_RING_ENTRIES 256
448#define TIGON_I_TX_RING_ENTRIES 128
449#define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
450#define TX_RING_BASE 0x3800
451
452struct tx_desc{
453 aceaddr addr;
454 u32 flagsize;
455#if 0
456/*
457 * This is in PCI shared mem and must be accessed with readl/writel
458 * real layout is:
459 */
460#if __LITTLE_ENDIAN
461 u16 flags;
462 u16 size;
463 u16 vlan;
464 u16 reserved;
465#else
466 u16 size;
467 u16 flags;
468 u16 reserved;
469 u16 vlan;
470#endif
471#endif
472 u32 vlanres;
473};
474
475
476#define RX_STD_RING_ENTRIES 512
477#define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
478
479#define RX_JUMBO_RING_ENTRIES 256
480#define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
481
482#define RX_MINI_RING_ENTRIES 1024
483#define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
484
485#define RX_RETURN_RING_ENTRIES 2048
486#define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \
487 sizeof(struct rx_desc))
488
489struct rx_desc{
490 aceaddr addr;
491#ifdef __LITTLE_ENDIAN
492 u16 size;
493 u16 idx;
494#else
495 u16 idx;
496 u16 size;
497#endif
498#ifdef __LITTLE_ENDIAN
499 u16 flags;
500 u16 type;
501#else
502 u16 type;
503 u16 flags;
504#endif
505#ifdef __LITTLE_ENDIAN
506 u16 tcp_udp_csum;
507 u16 ip_csum;
508#else
509 u16 ip_csum;
510 u16 tcp_udp_csum;
511#endif
512#ifdef __LITTLE_ENDIAN
513 u16 vlan;
514 u16 err_flags;
515#else
516 u16 err_flags;
517 u16 vlan;
518#endif
519 u32 reserved;
520 u32 opague;
521};
522
523
524/*
525 * This struct is shared with the NIC firmware.
526 */
527struct ring_ctrl {
528 aceaddr rngptr;
529#ifdef __LITTLE_ENDIAN
530 u16 flags;
531 u16 max_len;
532#else
533 u16 max_len;
534 u16 flags;
535#endif
536 u32 pad;
537};
538
539
540struct ace_mac_stats {
541 u32 excess_colls;
542 u32 coll_1;
543 u32 coll_2;
544 u32 coll_3;
545 u32 coll_4;
546 u32 coll_5;
547 u32 coll_6;
548 u32 coll_7;
549 u32 coll_8;
550 u32 coll_9;
551 u32 coll_10;
552 u32 coll_11;
553 u32 coll_12;
554 u32 coll_13;
555 u32 coll_14;
556 u32 coll_15;
557 u32 late_coll;
558 u32 defers;
559 u32 crc_err;
560 u32 underrun;
561 u32 crs_err;
562 u32 pad[3];
563 u32 drop_ula;
564 u32 drop_mc;
565 u32 drop_fc;
566 u32 drop_space;
567 u32 coll;
568 u32 kept_bc;
569 u32 kept_mc;
570 u32 kept_uc;
571};
572
573
574struct ace_info {
575 union {
576 u32 stats[256];
577 } s;
578 struct ring_ctrl evt_ctrl;
579 struct ring_ctrl cmd_ctrl;
580 struct ring_ctrl tx_ctrl;
581 struct ring_ctrl rx_std_ctrl;
582 struct ring_ctrl rx_jumbo_ctrl;
583 struct ring_ctrl rx_mini_ctrl;
584 struct ring_ctrl rx_return_ctrl;
585 aceaddr evt_prd_ptr;
586 aceaddr rx_ret_prd_ptr;
587 aceaddr tx_csm_ptr;
588 aceaddr stats2_ptr;
589};
590
591
592struct ring_info {
593 struct sk_buff *skb;
594 DEFINE_DMA_UNMAP_ADDR(mapping);
595};
596
597
598/*
599 * Funny... As soon as we add maplen on alpha, it starts to work
600 * much slower. Hmm... is it because struct does not fit to one cacheline?
601 * So, split tx_ring_info.
602 */
603struct tx_ring_info {
604 struct sk_buff *skb;
605 DEFINE_DMA_UNMAP_ADDR(mapping);
606 DEFINE_DMA_UNMAP_LEN(maplen);
607};
608
609
610/*
611 * struct ace_skb holding the rings of skb's. This is an awful lot of
612 * pointers, but I don't see any other smart mode to do this in an
613 * efficient manner ;-(
614 */
615struct ace_skb
616{
617 struct tx_ring_info tx_skbuff[MAX_TX_RING_ENTRIES];
618 struct ring_info rx_std_skbuff[RX_STD_RING_ENTRIES];
619 struct ring_info rx_mini_skbuff[RX_MINI_RING_ENTRIES];
620 struct ring_info rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES];
621};
622
623
624/*
625 * Struct private for the AceNIC.
626 *
627 * Elements are grouped so variables used by the tx handling goes
628 * together, and will go into the same cache lines etc. in order to
629 * avoid cache line contention between the rx and tx handling on SMP.
630 *
631 * Frequently accessed variables are put at the beginning of the
632 * struct to help the compiler generate better/shorter code.
633 */
634struct ace_private
635{
636 struct net_device *ndev; /* backpointer */
637 struct ace_info *info;
638 struct ace_regs __iomem *regs; /* register base */
639 struct ace_skb *skb;
640 dma_addr_t info_dma; /* 32/64 bit */
641
642 int version, link;
643 int promisc, mcast_all;
644
645 /*
646 * TX elements
647 */
648 struct tx_desc *tx_ring;
649 u32 tx_prd;
650 volatile u32 tx_ret_csm;
651 int tx_ring_entries;
652
653 /*
654 * RX elements
655 */
656 unsigned long std_refill_busy
657 __attribute__ ((aligned (SMP_CACHE_BYTES)));
658 unsigned long mini_refill_busy, jumbo_refill_busy;
659 atomic_t cur_rx_bufs;
660 atomic_t cur_mini_bufs;
661 atomic_t cur_jumbo_bufs;
662 u32 rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd;
663 u32 cur_rx;
664
665 struct rx_desc *rx_std_ring;
666 struct rx_desc *rx_jumbo_ring;
667 struct rx_desc *rx_mini_ring;
668 struct rx_desc *rx_return_ring;
669
670 int tasklet_pending, jumbo;
671 struct tasklet_struct ace_tasklet;
672
673 struct event *evt_ring;
674
675 volatile u32 *evt_prd, *rx_ret_prd, *tx_csm;
676
677 dma_addr_t tx_ring_dma; /* 32/64 bit */
678 dma_addr_t rx_ring_base_dma;
679 dma_addr_t evt_ring_dma;
680 dma_addr_t evt_prd_dma, rx_ret_prd_dma, tx_csm_dma;
681
682 unsigned char *trace_buf;
683 struct pci_dev *pdev;
684 struct net_device *next;
685 volatile int fw_running;
686 int board_idx;
687 u16 pci_command;
688 u8 pci_latency;
689 const char *name;
690#ifdef INDEX_DEBUG
691 spinlock_t debug_lock
692 __attribute__ ((aligned (SMP_CACHE_BYTES)));
693 u32 last_tx, last_std_rx, last_mini_rx;
694#endif
695 u8 firmware_major;
696 u8 firmware_minor;
697 u8 firmware_fix;
698 u32 firmware_start;
699};
700
701
702#define TX_RESERVED MAX_SKB_FRAGS
703
704static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
705{
706 return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
707}
708
709#define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
710#define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED)
711
712static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
713{
714 u64 baddr = (u64) addr;
715 aa->addrlo = baddr & 0xffffffff;
716 aa->addrhi = baddr >> 32;
717 wmb();
718}
719
720
721static inline void ace_set_txprd(struct ace_regs __iomem *regs,
722 struct ace_private *ap, u32 value)
723{
724#ifdef INDEX_DEBUG
725 unsigned long flags;
726 spin_lock_irqsave(&ap->debug_lock, flags);
727 writel(value, &regs->TxPrd);
728 if (value == ap->last_tx)
729 printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
730 "to tx producer (%i)\n", value);
731 ap->last_tx = value;
732 spin_unlock_irqrestore(&ap->debug_lock, flags);
733#else
734 writel(val: value, addr: &regs->TxPrd);
735#endif
736 wmb();
737}
738
739
740static inline void ace_mask_irq(struct net_device *dev)
741{
742 struct ace_private *ap = netdev_priv(dev);
743 struct ace_regs __iomem *regs = ap->regs;
744
745 if (ACE_IS_TIGON_I(ap))
746 writel(val: 1, addr: &regs->MaskInt);
747 else
748 writel(readl(addr: &regs->HostCtrl) | MASK_INTS, addr: &regs->HostCtrl);
749
750 ace_sync_irq(dev->irq);
751}
752
753
754static inline void ace_unmask_irq(struct net_device *dev)
755{
756 struct ace_private *ap = netdev_priv(dev);
757 struct ace_regs __iomem *regs = ap->regs;
758
759 if (ACE_IS_TIGON_I(ap))
760 writel(val: 0, addr: &regs->MaskInt);
761 else
762 writel(readl(addr: &regs->HostCtrl) & ~MASK_INTS, addr: &regs->HostCtrl);
763}
764
765
766/*
767 * Prototypes
768 */
769static int ace_init(struct net_device *dev);
770static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs);
771static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs);
772static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs);
773static irqreturn_t ace_interrupt(int irq, void *dev_id);
774static int ace_load_firmware(struct net_device *dev);
775static int ace_open(struct net_device *dev);
776static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
777 struct net_device *dev);
778static int ace_close(struct net_device *dev);
779static void ace_tasklet(struct tasklet_struct *t);
780static void ace_dump_trace(struct ace_private *ap);
781static void ace_set_multicast_list(struct net_device *dev);
782static int ace_change_mtu(struct net_device *dev, int new_mtu);
783static int ace_set_mac_addr(struct net_device *dev, void *p);
784static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
785static int ace_allocate_descriptors(struct net_device *dev);
786static void ace_free_descriptors(struct net_device *dev);
787static void ace_init_cleanup(struct net_device *dev);
788static struct net_device_stats *ace_get_stats(struct net_device *dev);
789static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
790
791#endif /* _ACENIC_H_ */
792

source code of linux/drivers/net/ethernet/alteon/acenic.h