1#ifndef _ACENIC_H_
2#define _ACENIC_H_
3
4
5/*
6 * Generate TX index update each time, when TX ring is closed.
7 * Normally, this is not useful, because results in more dma (and irqs
8 * without TX_COAL_INTS_ONLY).
9 */
10#define USE_TX_COAL_NOW	 0
11
12/*
13 * Addressing:
14 *
15 * The Tigon uses 64-bit host addresses, regardless of their actual
16 * length, and it expects a big-endian format. For 32 bit systems the
17 * upper 32 bits of the address are simply ignored (zero), however for
18 * little endian 64 bit systems (Alpha) this looks strange with the
19 * two parts of the address word being swapped.
20 *
21 * The addresses are split in two 32 bit words for all architectures
22 * as some of them are in PCI shared memory and it is necessary to use
23 * readl/writel to access them.
24 *
25 * The addressing code is derived from Pete Wyckoff's work, but
26 * modified to deal properly with readl/writel usage.
27 */
28
29struct ace_regs {
30	u32	pad0[16];	/* PCI control registers */
31
32	u32	HostCtrl;	/* 0x40 */
33	u32	LocalCtrl;
34
35	u32	pad1[2];
36
37	u32	MiscCfg;	/* 0x50 */
38
39	u32	pad2[2];
40
41	u32	PciState;
42
43	u32	pad3[2];	/* 0x60 */
44
45	u32	WinBase;
46	u32	WinData;
47
48	u32	pad4[12];	/* 0x70 */
49
50	u32	DmaWriteState;	/* 0xa0 */
51	u32	pad5[3];
52	u32	DmaReadState;	/* 0xb0 */
53
54	u32	pad6[26];
55
56	u32	AssistState;
57
58	u32	pad7[8];	/* 0x120 */
59
60	u32	CpuCtrl;	/* 0x140 */
61	u32	Pc;
62
63	u32	pad8[3];
64
65	u32	SramAddr;	/* 0x154 */
66	u32	SramData;
67
68	u32	pad9[49];
69
70	u32	MacRxState;	/* 0x220 */
71
72	u32	pad10[7];
73
74	u32	CpuBCtrl;	/* 0x240 */
75	u32	PcB;
76
77	u32	pad11[3];
78
79	u32	SramBAddr;	/* 0x254 */
80	u32	SramBData;
81
82	u32	pad12[105];
83
84	u32	pad13[32];	/* 0x400 */
85	u32	Stats[32];
86
87	u32	Mb0Hi;		/* 0x500 */
88	u32	Mb0Lo;
89	u32	Mb1Hi;
90	u32	CmdPrd;
91	u32	Mb2Hi;
92	u32	TxPrd;
93	u32	Mb3Hi;
94	u32	RxStdPrd;
95	u32	Mb4Hi;
96	u32	RxJumboPrd;
97	u32	Mb5Hi;
98	u32	RxMiniPrd;
99	u32	Mb6Hi;
100	u32	Mb6Lo;
101	u32	Mb7Hi;
102	u32	Mb7Lo;
103	u32	Mb8Hi;
104	u32	Mb8Lo;
105	u32	Mb9Hi;
106	u32	Mb9Lo;
107	u32	MbAHi;
108	u32	MbALo;
109	u32	MbBHi;
110	u32	MbBLo;
111	u32	MbCHi;
112	u32	MbCLo;
113	u32	MbDHi;
114	u32	MbDLo;
115	u32	MbEHi;
116	u32	MbELo;
117	u32	MbFHi;
118	u32	MbFLo;
119
120	u32	pad14[32];
121
122	u32	MacAddrHi;	/* 0x600 */
123	u32	MacAddrLo;
124	u32	InfoPtrHi;
125	u32	InfoPtrLo;
126	u32	MultiCastHi;	/* 0x610 */
127	u32	MultiCastLo;
128	u32	ModeStat;
129	u32	DmaReadCfg;
130	u32	DmaWriteCfg;	/* 0x620 */
131	u32	TxBufRat;
132	u32	EvtCsm;
133	u32	CmdCsm;
134	u32	TuneRxCoalTicks;/* 0x630 */
135	u32	TuneTxCoalTicks;
136	u32	TuneStatTicks;
137	u32	TuneMaxTxDesc;
138	u32	TuneMaxRxDesc;	/* 0x640 */
139	u32	TuneTrace;
140	u32	TuneLink;
141	u32	TuneFastLink;
142	u32	TracePtr;	/* 0x650 */
143	u32	TraceStrt;
144	u32	TraceLen;
145	u32	IfIdx;
146	u32	IfMtu;		/* 0x660 */
147	u32	MaskInt;
148	u32	GigLnkState;
149	u32	FastLnkState;
150	u32	pad16[4];	/* 0x670 */
151	u32	RxRetCsm;	/* 0x680 */
152
153	u32	pad17[31];
154
155	u32	CmdRng[64];	/* 0x700 */
156	u32	Window[0x200];
157};
158
159
160typedef struct {
161	u32 addrhi;
162	u32 addrlo;
163} aceaddr;
164
165
166#define ACE_WINDOW_SIZE	0x800
167
168#define ACE_JUMBO_MTU 9000
169#define ACE_STD_MTU 1500
170
171#define ACE_TRACE_SIZE 0x8000
172
173/*
174 * Host control register bits.
175 */
176
177#define IN_INT		0x01
178#define CLR_INT		0x02
179#define HW_RESET	0x08
180#define BYTE_SWAP	0x10
181#define WORD_SWAP	0x20
182#define MASK_INTS	0x40
183
184/*
185 * Local control register bits.
186 */
187
188#define EEPROM_DATA_IN		0x800000
189#define EEPROM_DATA_OUT		0x400000
190#define EEPROM_WRITE_ENABLE	0x200000
191#define EEPROM_CLK_OUT		0x100000
192
193#define EEPROM_BASE		0xa0000000
194
195#define EEPROM_WRITE_SELECT	0xa0
196#define EEPROM_READ_SELECT	0xa1
197
198#define SRAM_BANK_512K		0x200
199
200
201/*
202 * udelay() values for when clocking the eeprom
203 */
204#define ACE_SHORT_DELAY		2
205#define ACE_LONG_DELAY		4
206
207
208/*
209 * Misc Config bits
210 */
211
212#define SYNC_SRAM_TIMING	0x100000
213
214
215/*
216 * CPU state bits.
217 */
218
219#define CPU_RESET		0x01
220#define CPU_TRACE		0x02
221#define CPU_PROM_FAILED		0x10
222#define CPU_HALT		0x00010000
223#define CPU_HALTED		0xffff0000
224
225
226/*
227 * PCI State bits.
228 */
229
230#define DMA_READ_MAX_4		0x04
231#define DMA_READ_MAX_16		0x08
232#define DMA_READ_MAX_32		0x0c
233#define DMA_READ_MAX_64		0x10
234#define DMA_READ_MAX_128	0x14
235#define DMA_READ_MAX_256	0x18
236#define DMA_READ_MAX_1K		0x1c
237#define DMA_WRITE_MAX_4		0x20
238#define DMA_WRITE_MAX_16	0x40
239#define DMA_WRITE_MAX_32	0x60
240#define DMA_WRITE_MAX_64	0x80
241#define DMA_WRITE_MAX_128	0xa0
242#define DMA_WRITE_MAX_256	0xc0
243#define DMA_WRITE_MAX_1K	0xe0
244#define DMA_READ_WRITE_MASK	0xfc
245#define MEM_READ_MULTIPLE	0x00020000
246#define PCI_66MHZ		0x00080000
247#define PCI_32BIT		0x00100000
248#define DMA_WRITE_ALL_ALIGN	0x00800000
249#define READ_CMD_MEM		0x06000000
250#define WRITE_CMD_MEM		0x70000000
251
252
253/*
254 * Mode status
255 */
256
257#define ACE_BYTE_SWAP_BD	0x02
258#define ACE_WORD_SWAP_BD	0x04		/* not actually used */
259#define ACE_WARN		0x08
260#define ACE_BYTE_SWAP_DMA	0x10
261#define ACE_NO_JUMBO_FRAG	0x200
262#define ACE_FATAL		0x40000000
263
264
265/*
266 * DMA config
267 */
268
269#define DMA_THRESH_1W		0x10
270#define DMA_THRESH_2W		0x20
271#define DMA_THRESH_4W		0x40
272#define DMA_THRESH_8W		0x80
273#define DMA_THRESH_16W		0x100
274#define DMA_THRESH_32W		0x0	/* not described in doc, but exists. */
275
276
277/*
278 * Tuning parameters
279 */
280
281#define TICKS_PER_SEC		1000000
282
283
284/*
285 * Link bits
286 */
287
288#define LNK_PREF		0x00008000
289#define LNK_10MB		0x00010000
290#define LNK_100MB		0x00020000
291#define LNK_1000MB		0x00040000
292#define LNK_FULL_DUPLEX		0x00080000
293#define LNK_HALF_DUPLEX		0x00100000
294#define LNK_TX_FLOW_CTL_Y	0x00200000
295#define LNK_NEG_ADVANCED	0x00400000
296#define LNK_RX_FLOW_CTL_Y	0x00800000
297#define LNK_NIC			0x01000000
298#define LNK_JAM			0x02000000
299#define LNK_JUMBO		0x04000000
300#define LNK_ALTEON		0x08000000
301#define LNK_NEG_FCTL		0x10000000
302#define LNK_NEGOTIATE		0x20000000
303#define LNK_ENABLE		0x40000000
304#define LNK_UP			0x80000000
305
306
307/*
308 * Event definitions
309 */
310
311#define EVT_RING_ENTRIES	256
312#define EVT_RING_SIZE	(EVT_RING_ENTRIES * sizeof(struct event))
313
314struct event {
315#ifdef __LITTLE_ENDIAN_BITFIELD
316	u32	idx:12;
317	u32	code:12;
318	u32	evt:8;
319#else
320	u32	evt:8;
321	u32	code:12;
322	u32	idx:12;
323#endif
324	u32     pad;
325};
326
327
328/*
329 * Events
330 */
331
332#define E_FW_RUNNING		0x01
333#define E_STATS_UPDATED		0x04
334
335#define E_STATS_UPDATE		0x04
336
337#define E_LNK_STATE		0x06
338#define E_C_LINK_UP		0x01
339#define E_C_LINK_DOWN		0x02
340#define E_C_LINK_10_100		0x03
341
342#define E_ERROR			0x07
343#define E_C_ERR_INVAL_CMD	0x01
344#define E_C_ERR_UNIMP_CMD	0x02
345#define E_C_ERR_BAD_CFG		0x03
346
347#define E_MCAST_LIST		0x08
348#define E_C_MCAST_ADDR_ADD	0x01
349#define E_C_MCAST_ADDR_DEL	0x02
350
351#define E_RESET_JUMBO_RNG	0x09
352
353
354/*
355 * Commands
356 */
357
358#define CMD_RING_ENTRIES	64
359
360struct cmd {
361#ifdef __LITTLE_ENDIAN_BITFIELD
362	u32	idx:12;
363	u32	code:12;
364	u32	evt:8;
365#else
366	u32	evt:8;
367	u32	code:12;
368	u32	idx:12;
369#endif
370};
371
372
373#define C_HOST_STATE		0x01
374#define C_C_STACK_UP		0x01
375#define C_C_STACK_DOWN		0x02
376
377#define C_FDR_FILTERING		0x02
378#define C_C_FDR_FILT_ENABLE	0x01
379#define C_C_FDR_FILT_DISABLE	0x02
380
381#define C_SET_RX_PRD_IDX	0x03
382#define C_UPDATE_STATS		0x04
383#define C_RESET_JUMBO_RNG	0x05
384#define C_ADD_MULTICAST_ADDR	0x08
385#define C_DEL_MULTICAST_ADDR	0x09
386
387#define C_SET_PROMISC_MODE	0x0a
388#define C_C_PROMISC_ENABLE	0x01
389#define C_C_PROMISC_DISABLE	0x02
390
391#define C_LNK_NEGOTIATION	0x0b
392#define C_C_NEGOTIATE_BOTH	0x00
393#define C_C_NEGOTIATE_GIG	0x01
394#define C_C_NEGOTIATE_10_100	0x02
395
396#define C_SET_MAC_ADDR		0x0c
397#define C_CLEAR_PROFILE		0x0d
398
399#define C_SET_MULTICAST_MODE	0x0e
400#define C_C_MCAST_ENABLE	0x01
401#define C_C_MCAST_DISABLE	0x02
402
403#define C_CLEAR_STATS		0x0f
404#define C_SET_RX_JUMBO_PRD_IDX	0x10
405#define C_REFRESH_STATS		0x11
406
407
408/*
409 * Descriptor flags
410 */
411#define BD_FLG_TCP_UDP_SUM	0x01
412#define BD_FLG_IP_SUM		0x02
413#define BD_FLG_END		0x04
414#define BD_FLG_MORE		0x08
415#define BD_FLG_JUMBO		0x10
416#define BD_FLG_UCAST		0x20
417#define BD_FLG_MCAST		0x40
418#define BD_FLG_BCAST		0x60
419#define BD_FLG_TYP_MASK		0x60
420#define BD_FLG_IP_FRAG		0x80
421#define BD_FLG_IP_FRAG_END	0x100
422#define BD_FLG_VLAN_TAG		0x200
423#define BD_FLG_FRAME_ERROR	0x400
424#define BD_FLG_COAL_NOW		0x800
425#define BD_FLG_MINI		0x1000
426
427
428/*
429 * Ring Control block flags
430 */
431#define RCB_FLG_TCP_UDP_SUM	0x01
432#define RCB_FLG_IP_SUM		0x02
433#define RCB_FLG_NO_PSEUDO_HDR	0x08
434#define RCB_FLG_VLAN_ASSIST	0x10
435#define RCB_FLG_COAL_INT_ONLY	0x20
436#define RCB_FLG_TX_HOST_RING	0x40
437#define RCB_FLG_IEEE_SNAP_SUM	0x80
438#define RCB_FLG_EXT_RX_BD	0x100
439#define RCB_FLG_RNG_DISABLE	0x200
440
441
442/*
443 * TX ring - maximum TX ring entries for Tigon I's is 128
444 */
445#define MAX_TX_RING_ENTRIES	256
446#define TIGON_I_TX_RING_ENTRIES	128
447#define TX_RING_SIZE		(MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
448#define TX_RING_BASE		0x3800
449
450struct tx_desc{
451        aceaddr	addr;
452	u32	flagsize;
453	u32	vlanres;
454};
455
456
457#define RX_STD_RING_ENTRIES	512
458#define RX_STD_RING_SIZE	(RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
459
460#define RX_JUMBO_RING_ENTRIES	256
461#define RX_JUMBO_RING_SIZE	(RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
462
463#define RX_MINI_RING_ENTRIES	1024
464#define RX_MINI_RING_SIZE	(RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
465
466#define RX_RETURN_RING_ENTRIES	2048
467#define RX_RETURN_RING_SIZE	(RX_MAX_RETURN_RING_ENTRIES * \
468				 sizeof(struct rx_desc))
469
470struct rx_desc{
471	aceaddr	addr;
472#ifdef __LITTLE_ENDIAN
473	u16	size;
474	u16	idx;
475#else
476	u16	idx;
477	u16	size;
478#endif
479#ifdef __LITTLE_ENDIAN
480	u16	flags;
481	u16	type;
482#else
483	u16	type;
484	u16	flags;
485#endif
486#ifdef __LITTLE_ENDIAN
487	u16	tcp_udp_csum;
488	u16	ip_csum;
489#else
490	u16	ip_csum;
491	u16	tcp_udp_csum;
492#endif
493#ifdef __LITTLE_ENDIAN
494	u16	vlan;
495	u16	err_flags;
496#else
497	u16	err_flags;
498	u16	vlan;
499#endif
500	u32	reserved;
501	u32	opague;
502};
503
504
505/*
506 * This struct is shared with the NIC firmware.
507 */
508struct ring_ctrl {
509	aceaddr	rngptr;
510#ifdef __LITTLE_ENDIAN
511	u16	flags;
512	u16	max_len;
513#else
514	u16	max_len;
515	u16	flags;
516#endif
517	u32	pad;
518};
519
520
521struct ace_mac_stats {
522	u32 excess_colls;
523	u32 coll_1;
524	u32 coll_2;
525	u32 coll_3;
526	u32 coll_4;
527	u32 coll_5;
528	u32 coll_6;
529	u32 coll_7;
530	u32 coll_8;
531	u32 coll_9;
532	u32 coll_10;
533	u32 coll_11;
534	u32 coll_12;
535	u32 coll_13;
536	u32 coll_14;
537	u32 coll_15;
538	u32 late_coll;
539	u32 defers;
540	u32 crc_err;
541	u32 underrun;
542	u32 crs_err;
543	u32 pad[3];
544	u32 drop_ula;
545	u32 drop_mc;
546	u32 drop_fc;
547	u32 drop_space;
548	u32 coll;
549	u32 kept_bc;
550	u32 kept_mc;
551	u32 kept_uc;
552};
553
554
555struct ace_info {
556	union {
557		u32 stats[256];
558	} s;
559	struct ring_ctrl	evt_ctrl;
560	struct ring_ctrl	cmd_ctrl;
561	struct ring_ctrl	tx_ctrl;
562	struct ring_ctrl	rx_std_ctrl;
563	struct ring_ctrl	rx_jumbo_ctrl;
564	struct ring_ctrl	rx_mini_ctrl;
565	struct ring_ctrl	rx_return_ctrl;
566	aceaddr	evt_prd_ptr;
567	aceaddr	rx_ret_prd_ptr;
568	aceaddr	tx_csm_ptr;
569	aceaddr	stats2_ptr;
570};
571
572
573struct ring_info {
574	struct sk_buff		*skb;
575	DECLARE_PCI_UNMAP_ADDR(mapping)
576};
577
578
579/*
580 * Funny... As soon as we add maplen on alpha, it starts to work
581 * much slower. Hmm... is it because struct does not fit to one cacheline?
582 * So, split tx_ring_info.
583 */
584struct tx_ring_info {
585	struct sk_buff		*skb;
586	DECLARE_PCI_UNMAP_ADDR(mapping)
587	DECLARE_PCI_UNMAP_LEN(maplen)
588};
589
590
591/*
592 * struct ace_skb holding the rings of skb's. This is an awful lot of
593 * pointers, but I don't see any other smart mode to do this in an
594 * efficient manner ;-(
595 */
596struct ace_skb
597{
598	struct tx_ring_info	tx_skbuff[MAX_TX_RING_ENTRIES];
599	struct ring_info	rx_std_skbuff[RX_STD_RING_ENTRIES];
600	struct ring_info	rx_mini_skbuff[RX_MINI_RING_ENTRIES];
601	struct ring_info	rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES];
602};
603
604
605/*
606 * Struct private for the AceNIC.
607 *
608 * Elements are grouped so variables used by the tx handling goes
609 * together, and will go into the same cache lines etc. in order to
610 * avoid cache line contention between the rx and tx handling on SMP.
611 *
612 * Frequently accessed variables are put at the beginning of the
613 * struct to help the compiler generate better/shorter code.
614 */
615struct ace_private
616{
617	struct ace_info		*info;
618	struct ace_regs	__iomem	*regs;		/* register base */
619	struct ace_skb		*skb;
620	dma_addr_t		info_dma;	/* 32/64 bit */
621
622	int			version, link;
623	int			promisc, mcast_all;
624
625	/*
626	 * TX elements
627	 */
628	struct tx_desc		*tx_ring;
629	u32			tx_prd;
630	volatile u32		tx_ret_csm;
631	int			tx_ring_entries;
632
633	/*
634	 * RX elements
635	 */
636	unsigned long		std_refill_busy
637				__attribute__ ((aligned (SMP_CACHE_BYTES)));
638	unsigned long		mini_refill_busy, jumbo_refill_busy;
639	atomic_t		cur_rx_bufs;
640	atomic_t		cur_mini_bufs;
641	atomic_t		cur_jumbo_bufs;
642	u32			rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd;
643	u32			cur_rx;
644
645	struct rx_desc		*rx_std_ring;
646	struct rx_desc		*rx_jumbo_ring;
647	struct rx_desc		*rx_mini_ring;
648	struct rx_desc		*rx_return_ring;
649
650#if ACENIC_DO_VLAN
651	struct vlan_group	*vlgrp;
652#endif
653
654	int			tasklet_pending, jumbo;
655	struct tasklet_struct	ace_tasklet;
656
657	struct event		*evt_ring;
658
659	volatile u32		*evt_prd, *rx_ret_prd, *tx_csm;
660
661	dma_addr_t		tx_ring_dma;	/* 32/64 bit */
662	dma_addr_t		rx_ring_base_dma;
663	dma_addr_t		evt_ring_dma;
664	dma_addr_t		evt_prd_dma, rx_ret_prd_dma, tx_csm_dma;
665
666	unsigned char		*trace_buf;
667	struct pci_dev		*pdev;
668	struct net_device	*next;
669	volatile int		fw_running;
670	int			board_idx;
671	u16			pci_command;
672	u8			pci_latency;
673	const char		*name;
674#ifdef INDEX_DEBUG
675	spinlock_t		debug_lock
676				__attribute__ ((aligned (SMP_CACHE_BYTES)));
677	u32			last_tx, last_std_rx, last_mini_rx;
678#endif
679	struct net_device_stats stats;
680	int			pci_using_dac;
681};
682
683
684#define TX_RESERVED	MAX_SKB_FRAGS
685
686static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
687{
688	return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
689}
690
691#define tx_free(ap) 		tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
692#define tx_ring_full(ap, csm, prd)	(tx_space(ap, csm, prd) <= TX_RESERVED)
693
694static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
695{
696	u64 baddr = (u64) addr;
697	aa->addrlo = baddr & 0xffffffff;
698	aa->addrhi = baddr >> 32;
699	wmb();
700}
701
702
703static inline void ace_set_txprd(struct ace_regs __iomem *regs,
704				 struct ace_private *ap, u32 value)
705{
706#ifdef INDEX_DEBUG
707	unsigned long flags;
708	spin_lock_irqsave(&ap->debug_lock, flags);
709	writel(value, &regs->TxPrd);
710	if (value == ap->last_tx)
711		printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
712		       "to tx producer (%i)\n", value);
713	ap->last_tx = value;
714	spin_unlock_irqrestore(&ap->debug_lock, flags);
715#else
716	writel(value, &regs->TxPrd);
717#endif
718	wmb();
719}
720
721
722static inline void ace_mask_irq(struct net_device *dev)
723{
724	struct ace_private *ap = netdev_priv(dev);
725	struct ace_regs __iomem *regs = ap->regs;
726
727	if (ACE_IS_TIGON_I(ap))
728		writel(1, &regs->MaskInt);
729	else
730		writel(readl(&regs->HostCtrl) | MASK_INTS, &regs->HostCtrl);
731
732	ace_sync_irq(dev->irq);
733}
734
735
736static inline void ace_unmask_irq(struct net_device *dev)
737{
738	struct ace_private *ap = netdev_priv(dev);
739	struct ace_regs __iomem *regs = ap->regs;
740
741	if (ACE_IS_TIGON_I(ap))
742		writel(0, &regs->MaskInt);
743	else
744		writel(readl(&regs->HostCtrl) & ~MASK_INTS, &regs->HostCtrl);
745}
746
747
748/*
749 * Prototypes
750 */
751static int ace_init(struct net_device *dev);
752static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs);
753static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs);
754static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs);
755static irqreturn_t ace_interrupt(int irq, void *dev_id);
756static int ace_load_firmware(struct net_device *dev);
757static int ace_open(struct net_device *dev);
758static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev);
759static int ace_close(struct net_device *dev);
760static void ace_tasklet(unsigned long dev);
761static void ace_dump_trace(struct ace_private *ap);
762static void ace_set_multicast_list(struct net_device *dev);
763static int ace_change_mtu(struct net_device *dev, int new_mtu);
764static int ace_set_mac_addr(struct net_device *dev, void *p);
765static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
766static int ace_allocate_descriptors(struct net_device *dev);
767static void ace_free_descriptors(struct net_device *dev);
768static void ace_init_cleanup(struct net_device *dev);
769static struct net_device_stats *ace_get_stats(struct net_device *dev);
770static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
771#if ACENIC_DO_VLAN
772static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp);
773#endif
774
775#endif /* _ACENIC_H_ */
776