if_wb.c revision 1.37
1/*	$OpenBSD: if_wb.c,v 1.37 2007/05/25 21:27:15 krw Exp $	*/
2
3/*
4 * Copyright (c) 1997, 1998
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: src/sys/pci/if_wb.c,v 1.26 1999/09/25 17:29:02 wpaul Exp $
35 */
36
37/*
38 * Winbond fast ethernet PCI NIC driver
39 *
40 * Supports various cheap network adapters based on the Winbond W89C840F
41 * fast ethernet controller chip. This includes adapters manufactured by
42 * Winbond itself and some made by Linksys.
43 *
44 * Written by Bill Paul <wpaul@ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49/*
50 * The Winbond W89C840F chip is a bus master; in some ways it resembles
51 * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has
52 * one major difference which is that while the registers do many of
53 * the same things as a tulip adapter, the offsets are different: where
54 * tulip registers are typically spaced 8 bytes apart, the Winbond
55 * registers are spaced 4 bytes apart. The receiver filter is also
56 * programmed differently.
57 *
58 * Like the tulip, the Winbond chip uses small descriptors containing
59 * a status word, a control word and 32-bit areas that can either be used
60 * to point to two external data blocks, or to point to a single block
61 * and another descriptor in a linked list. Descriptors can be grouped
62 * together in blocks to form fixed length rings or can be chained
63 * together in linked lists. A single packet may be spread out over
64 * several descriptors if necessary.
65 *
66 * For the receive ring, this driver uses a linked list of descriptors,
67 * each pointing to a single mbuf cluster buffer, which us large enough
68 * to hold an entire packet. The link list is looped back to created a
69 * closed ring.
70 *
71 * For transmission, the driver creates a linked list of 'super descriptors'
72 * which each contain several individual descriptors linked toghether.
73 * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we
74 * abuse as fragment pointers. This allows us to use a buffer management
75 * scheme very similar to that used in the ThunderLAN and Etherlink XL
76 * drivers.
77 *
78 * Autonegotiation is performed using the external PHY via the MII bus.
79 * The sample boards I have all use a Davicom PHY.
80 *
81 * Note: the author of the Linux driver for the Winbond chip alludes
82 * to some sort of flaw in the chip's design that seems to mandate some
83 * drastic workaround which signigicantly impairs transmit performance.
84 * I have no idea what he's on about: transmit performance with all
85 * three of my test boards seems fine.
86 */
87
88#include "bpfilter.h"
89
90#include <sys/param.h>
91#include <sys/systm.h>
92#include <sys/sockio.h>
93#include <sys/mbuf.h>
94#include <sys/malloc.h>
95#include <sys/kernel.h>
96#include <sys/socket.h>
97#include <sys/device.h>
98#include <sys/queue.h>
99#include <sys/timeout.h>
100
101#include <net/if.h>
102#include <net/if_dl.h>
103#include <net/if_types.h>
104
105#ifdef INET
106#include <netinet/in.h>
107#include <netinet/in_systm.h>
108#include <netinet/in_var.h>
109#include <netinet/ip.h>
110#include <netinet/if_ether.h>
111#endif
112
113#include <net/if_media.h>
114
115#if NBPFILTER > 0
116#include <net/bpf.h>
117#endif
118
119#include <uvm/uvm_extern.h>		/* for vtophys */
120#define	VTOPHYS(v)	vtophys((vaddr_t)(v))
121
122#include <dev/mii/mii.h>
123#include <dev/mii/miivar.h>
124#include <dev/pci/pcireg.h>
125#include <dev/pci/pcivar.h>
126#include <dev/pci/pcidevs.h>
127
128#define WB_USEIOSPACE
129
130/* #define WB_BACKGROUND_AUTONEG */
131
132#include <dev/pci/if_wbreg.h>
133
134int wb_probe(struct device *, void *, void *);
135void wb_attach(struct device *, struct device *, void *);
136
137void wb_bfree(caddr_t, u_int, void *);
138int wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *,
139    struct mbuf *);
140int wb_encap(struct wb_softc *, struct wb_chain *,
141    struct mbuf *);
142
143void wb_rxeof(struct wb_softc *);
144void wb_rxeoc(struct wb_softc *);
145void wb_txeof(struct wb_softc *);
146void wb_txeoc(struct wb_softc *);
147int wb_intr(void *);
148void wb_tick(void *);
149void wb_start(struct ifnet *);
150int wb_ioctl(struct ifnet *, u_long, caddr_t);
151void wb_init(void *);
152void wb_stop(struct wb_softc *);
153void wb_watchdog(struct ifnet *);
154void wb_shutdown(void *);
155int wb_ifmedia_upd(struct ifnet *);
156void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157
158void wb_eeprom_putbyte(struct wb_softc *, int);
159void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *);
160void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int);
161void wb_mii_sync(struct wb_softc *);
162void wb_mii_send(struct wb_softc *, u_int32_t, int);
163int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *);
164int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *);
165
166void wb_setcfg(struct wb_softc *, u_int32_t);
167u_int8_t wb_calchash(caddr_t);
168void wb_setmulti(struct wb_softc *);
169void wb_reset(struct wb_softc *);
170void wb_fixmedia(struct wb_softc *);
171int wb_list_rx_init(struct wb_softc *);
172int wb_list_tx_init(struct wb_softc *);
173
174int wb_miibus_readreg(struct device *, int, int);
175void wb_miibus_writereg(struct device *, int, int, int);
176void wb_miibus_statchg(struct device *);
177
178#define WB_SETBIT(sc, reg, x)				\
179	CSR_WRITE_4(sc, reg,				\
180		CSR_READ_4(sc, reg) | x)
181
182#define WB_CLRBIT(sc, reg, x)				\
183	CSR_WRITE_4(sc, reg,				\
184		CSR_READ_4(sc, reg) & ~x)
185
186#define SIO_SET(x)					\
187	CSR_WRITE_4(sc, WB_SIO,				\
188		CSR_READ_4(sc, WB_SIO) | x)
189
190#define SIO_CLR(x)					\
191	CSR_WRITE_4(sc, WB_SIO,				\
192		CSR_READ_4(sc, WB_SIO) & ~x)
193
194/*
195 * Send a read command and address to the EEPROM, check for ACK.
196 */
197void wb_eeprom_putbyte(sc, addr)
198	struct wb_softc		*sc;
199	int			addr;
200{
201	int			d, i;
202
203	d = addr | WB_EECMD_READ;
204
205	/*
206	 * Feed in each bit and strobe the clock.
207	 */
208	for (i = 0x400; i; i >>= 1) {
209		if (d & i) {
210			SIO_SET(WB_SIO_EE_DATAIN);
211		} else {
212			SIO_CLR(WB_SIO_EE_DATAIN);
213		}
214		DELAY(100);
215		SIO_SET(WB_SIO_EE_CLK);
216		DELAY(150);
217		SIO_CLR(WB_SIO_EE_CLK);
218		DELAY(100);
219	}
220
221	return;
222}
223
224/*
225 * Read a word of data stored in the EEPROM at address 'addr.'
226 */
227void wb_eeprom_getword(sc, addr, dest)
228	struct wb_softc		*sc;
229	int			addr;
230	u_int16_t		*dest;
231{
232	int			i;
233	u_int16_t		word = 0;
234
235	/* Enter EEPROM access mode. */
236	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
237
238	/*
239	 * Send address of word we want to read.
240	 */
241	wb_eeprom_putbyte(sc, addr);
242
243	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
244
245	/*
246	 * Start reading bits from EEPROM.
247	 */
248	for (i = 0x8000; i; i >>= 1) {
249		SIO_SET(WB_SIO_EE_CLK);
250		DELAY(100);
251		if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT)
252			word |= i;
253		SIO_CLR(WB_SIO_EE_CLK);
254		DELAY(100);
255	}
256
257	/* Turn off EEPROM access mode. */
258	CSR_WRITE_4(sc, WB_SIO, 0);
259
260	*dest = word;
261
262	return;
263}
264
265/*
266 * Read a sequence of words from the EEPROM.
267 */
268void wb_read_eeprom(sc, dest, off, cnt, swap)
269	struct wb_softc		*sc;
270	caddr_t			dest;
271	int			off;
272	int			cnt;
273	int			swap;
274{
275	int			i;
276	u_int16_t		word = 0, *ptr;
277
278	for (i = 0; i < cnt; i++) {
279		wb_eeprom_getword(sc, off + i, &word);
280		ptr = (u_int16_t *)(dest + (i * 2));
281		if (swap)
282			*ptr = ntohs(word);
283		else
284			*ptr = word;
285	}
286
287	return;
288}
289
290/*
291 * Sync the PHYs by setting data bit and strobing the clock 32 times.
292 */
293void wb_mii_sync(sc)
294	struct wb_softc		*sc;
295{
296	int			i;
297
298	SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN);
299
300	for (i = 0; i < 32; i++) {
301		SIO_SET(WB_SIO_MII_CLK);
302		DELAY(1);
303		SIO_CLR(WB_SIO_MII_CLK);
304		DELAY(1);
305	}
306
307	return;
308}
309
310/*
311 * Clock a series of bits through the MII.
312 */
313void wb_mii_send(sc, bits, cnt)
314	struct wb_softc		*sc;
315	u_int32_t		bits;
316	int			cnt;
317{
318	int			i;
319
320	SIO_CLR(WB_SIO_MII_CLK);
321
322	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
323                if (bits & i) {
324			SIO_SET(WB_SIO_MII_DATAIN);
325                } else {
326			SIO_CLR(WB_SIO_MII_DATAIN);
327                }
328		DELAY(1);
329		SIO_CLR(WB_SIO_MII_CLK);
330		DELAY(1);
331		SIO_SET(WB_SIO_MII_CLK);
332	}
333}
334
335/*
336 * Read an PHY register through the MII.
337 */
338int wb_mii_readreg(sc, frame)
339	struct wb_softc		*sc;
340	struct wb_mii_frame	*frame;
341
342{
343	int			i, ack, s;
344
345	s = splnet();
346
347	/*
348	 * Set up frame for RX.
349	 */
350	frame->mii_stdelim = WB_MII_STARTDELIM;
351	frame->mii_opcode = WB_MII_READOP;
352	frame->mii_turnaround = 0;
353	frame->mii_data = 0;
354
355	CSR_WRITE_4(sc, WB_SIO, 0);
356
357	/*
358 	 * Turn on data xmit.
359	 */
360	SIO_SET(WB_SIO_MII_DIR);
361
362	wb_mii_sync(sc);
363
364	/*
365	 * Send command/address info.
366	 */
367	wb_mii_send(sc, frame->mii_stdelim, 2);
368	wb_mii_send(sc, frame->mii_opcode, 2);
369	wb_mii_send(sc, frame->mii_phyaddr, 5);
370	wb_mii_send(sc, frame->mii_regaddr, 5);
371
372	/* Idle bit */
373	SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN));
374	DELAY(1);
375	SIO_SET(WB_SIO_MII_CLK);
376	DELAY(1);
377
378	/* Turn off xmit. */
379	SIO_CLR(WB_SIO_MII_DIR);
380	/* Check for ack */
381	SIO_CLR(WB_SIO_MII_CLK);
382	DELAY(1);
383	ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT;
384	SIO_SET(WB_SIO_MII_CLK);
385	DELAY(1);
386	SIO_CLR(WB_SIO_MII_CLK);
387	DELAY(1);
388	SIO_SET(WB_SIO_MII_CLK);
389	DELAY(1);
390
391	/*
392	 * Now try reading data bits. If the ack failed, we still
393	 * need to clock through 16 cycles to keep the PHY(s) in sync.
394	 */
395	if (ack) {
396		for(i = 0; i < 16; i++) {
397			SIO_CLR(WB_SIO_MII_CLK);
398			DELAY(1);
399			SIO_SET(WB_SIO_MII_CLK);
400			DELAY(1);
401		}
402		goto fail;
403	}
404
405	for (i = 0x8000; i; i >>= 1) {
406		SIO_CLR(WB_SIO_MII_CLK);
407		DELAY(1);
408		if (!ack) {
409			if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT)
410				frame->mii_data |= i;
411			DELAY(1);
412		}
413		SIO_SET(WB_SIO_MII_CLK);
414		DELAY(1);
415	}
416
417fail:
418
419	SIO_CLR(WB_SIO_MII_CLK);
420	DELAY(1);
421	SIO_SET(WB_SIO_MII_CLK);
422	DELAY(1);
423
424	splx(s);
425
426	if (ack)
427		return(1);
428	return(0);
429}
430
431/*
432 * Write to a PHY register through the MII.
433 */
434int wb_mii_writereg(sc, frame)
435	struct wb_softc		*sc;
436	struct wb_mii_frame	*frame;
437
438{
439	int			s;
440
441	s = splnet();
442	/*
443	 * Set up frame for TX.
444	 */
445
446	frame->mii_stdelim = WB_MII_STARTDELIM;
447	frame->mii_opcode = WB_MII_WRITEOP;
448	frame->mii_turnaround = WB_MII_TURNAROUND;
449
450	/*
451 	 * Turn on data output.
452	 */
453	SIO_SET(WB_SIO_MII_DIR);
454
455	wb_mii_sync(sc);
456
457	wb_mii_send(sc, frame->mii_stdelim, 2);
458	wb_mii_send(sc, frame->mii_opcode, 2);
459	wb_mii_send(sc, frame->mii_phyaddr, 5);
460	wb_mii_send(sc, frame->mii_regaddr, 5);
461	wb_mii_send(sc, frame->mii_turnaround, 2);
462	wb_mii_send(sc, frame->mii_data, 16);
463
464	/* Idle bit. */
465	SIO_SET(WB_SIO_MII_CLK);
466	DELAY(1);
467	SIO_CLR(WB_SIO_MII_CLK);
468	DELAY(1);
469
470	/*
471	 * Turn off xmit.
472	 */
473	SIO_CLR(WB_SIO_MII_DIR);
474
475	splx(s);
476
477	return(0);
478}
479
480int
481wb_miibus_readreg(dev, phy, reg)
482	struct device *dev;
483	int phy, reg;
484{
485	struct wb_softc *sc = (struct wb_softc *)dev;
486	struct wb_mii_frame frame;
487
488	bzero((char *)&frame, sizeof(frame));
489
490	frame.mii_phyaddr = phy;
491	frame.mii_regaddr = reg;
492	wb_mii_readreg(sc, &frame);
493
494	return(frame.mii_data);
495}
496
497void
498wb_miibus_writereg(dev, phy, reg, data)
499	struct device *dev;
500	int phy, reg, data;
501{
502	struct wb_softc *sc = (struct wb_softc *)dev;
503	struct wb_mii_frame frame;
504
505	bzero((char *)&frame, sizeof(frame));
506
507	frame.mii_phyaddr = phy;
508	frame.mii_regaddr = reg;
509	frame.mii_data = data;
510
511	wb_mii_writereg(sc, &frame);
512
513	return;
514}
515
516void
517wb_miibus_statchg(dev)
518	struct device *dev;
519{
520	struct wb_softc *sc = (struct wb_softc *)dev;
521
522	wb_setcfg(sc, sc->sc_mii.mii_media_active);
523}
524
525/*
526 * Program the 64-bit multicast hash filter.
527 */
528void wb_setmulti(sc)
529	struct wb_softc		*sc;
530{
531	struct ifnet		*ifp;
532	int			h = 0;
533	u_int32_t		hashes[2] = { 0, 0 };
534	struct arpcom		*ac = &sc->arpcom;
535	struct ether_multi	*enm;
536	struct ether_multistep	step;
537	u_int32_t		rxfilt;
538	int			mcnt = 0;
539
540	ifp = &sc->arpcom.ac_if;
541
542	rxfilt = CSR_READ_4(sc, WB_NETCFG);
543
544allmulti:
545	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
546		rxfilt |= WB_NETCFG_RX_MULTI;
547		CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
548		CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF);
549		CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF);
550		return;
551	}
552
553	/* first, zot all the existing hash bits */
554	CSR_WRITE_4(sc, WB_MAR0, 0);
555	CSR_WRITE_4(sc, WB_MAR1, 0);
556
557	/* now program new ones */
558	ETHER_FIRST_MULTI(step, ac, enm);
559	while (enm != NULL) {
560		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
561			ifp->if_flags |= IFF_ALLMULTI;
562			goto allmulti;
563		}
564		h = ~(ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26);
565		if (h < 32)
566			hashes[0] |= (1 << h);
567		else
568			hashes[1] |= (1 << (h - 32));
569		mcnt++;
570		ETHER_NEXT_MULTI(step, enm);
571	}
572
573	if (mcnt)
574		rxfilt |= WB_NETCFG_RX_MULTI;
575	else
576		rxfilt &= ~WB_NETCFG_RX_MULTI;
577
578	CSR_WRITE_4(sc, WB_MAR0, hashes[0]);
579	CSR_WRITE_4(sc, WB_MAR1, hashes[1]);
580	CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
581
582	return;
583}
584
585/*
586 * The Winbond manual states that in order to fiddle with the
587 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
588 * first have to put the transmit and/or receive logic in the idle state.
589 */
590void
591wb_setcfg(sc, media)
592	struct wb_softc *sc;
593	u_int32_t media;
594{
595	int			i, restart = 0;
596
597	if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) {
598		restart = 1;
599		WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON));
600
601		for (i = 0; i < WB_TIMEOUT; i++) {
602			DELAY(10);
603			if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) &&
604				(CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE))
605				break;
606		}
607
608		if (i == WB_TIMEOUT)
609			printf("%s: failed to force tx and "
610				"rx to idle state\n", sc->sc_dev.dv_xname);
611	}
612
613	if (IFM_SUBTYPE(media) == IFM_10_T)
614		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
615	else
616		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
617
618	if ((media & IFM_GMASK) == IFM_FDX)
619		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
620	else
621		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
622
623	if (restart)
624		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON);
625
626	return;
627}
628
629void
630wb_reset(sc)
631	struct wb_softc *sc;
632{
633	int i;
634	struct mii_data *mii = &sc->sc_mii;
635
636	CSR_WRITE_4(sc, WB_NETCFG, 0);
637	CSR_WRITE_4(sc, WB_BUSCTL, 0);
638	CSR_WRITE_4(sc, WB_TXADDR, 0);
639	CSR_WRITE_4(sc, WB_RXADDR, 0);
640
641	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
642	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
643
644	for (i = 0; i < WB_TIMEOUT; i++) {
645		DELAY(10);
646		if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET))
647			break;
648	}
649	if (i == WB_TIMEOUT)
650		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
651
652	/* Wait a little while for the chip to get its brains in order. */
653	DELAY(1000);
654
655	if (mii->mii_instance) {
656		struct mii_softc *miisc;
657		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
658			mii_phy_reset(miisc);
659	}
660}
661
662void
663wb_fixmedia(sc)
664	struct wb_softc *sc;
665{
666	struct mii_data *mii = &sc->sc_mii;
667	u_int32_t media;
668
669	if (LIST_FIRST(&mii->mii_phys) == NULL)
670		return;
671
672	mii_pollstat(mii);
673	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) {
674		media = mii->mii_media_active & ~IFM_10_T;
675		media |= IFM_100_TX;
676	} if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
677		media = mii->mii_media_active & ~IFM_100_TX;
678		media |= IFM_10_T;
679	} else
680		return;
681
682	ifmedia_set(&mii->mii_media, media);
683}
684
685const struct pci_matchid wb_devices[] = {
686	{ PCI_VENDOR_WINBOND, PCI_PRODUCT_WINBOND_W89C840F },
687	{ PCI_VENDOR_COMPEX, PCI_PRODUCT_COMPEX_RL100ATX },
688};
689
690/*
691 * Probe for a Winbond chip. Check the PCI vendor and device
692 * IDs against our list and return a device name if we find a match.
693 */
694int
695wb_probe(parent, match, aux)
696	struct device *parent;
697	void *match, *aux;
698{
699	return (pci_matchbyid((struct pci_attach_args *)aux, wb_devices,
700	    sizeof(wb_devices)/sizeof(wb_devices[0])));
701}
702
703/*
704 * Attach the interface. Allocate softc structures, do ifmedia
705 * setup and ethernet/BPF attach.
706 */
707void
708wb_attach(parent, self, aux)
709	struct device *parent, *self;
710	void *aux;
711{
712	struct wb_softc *sc = (struct wb_softc *)self;
713	struct pci_attach_args *pa = aux;
714	pci_chipset_tag_t pc = pa->pa_pc;
715	pci_intr_handle_t ih;
716	const char *intrstr = NULL;
717	struct ifnet *ifp = &sc->arpcom.ac_if;
718	bus_size_t size;
719	int rseg;
720	pcireg_t command;
721	bus_dma_segment_t seg;
722	bus_dmamap_t dmamap;
723	caddr_t kva;
724
725	/*
726	 * Handle power management nonsense.
727	 */
728
729	command = pci_conf_read(pc, pa->pa_tag, WB_PCI_CAPID) & 0x000000FF;
730	if (command == 0x01) {
731
732		command = pci_conf_read(pc, pa->pa_tag, WB_PCI_PWRMGMTCTRL);
733		if (command & WB_PSTATE_MASK) {
734			u_int32_t		io, mem, irq;
735
736			/* Save important PCI config data. */
737			io = pci_conf_read(pc, pa->pa_tag, WB_PCI_LOIO);
738			mem = pci_conf_read(pc, pa->pa_tag, WB_PCI_LOMEM);
739			irq = pci_conf_read(pc, pa->pa_tag, WB_PCI_INTLINE);
740
741			/* Reset the power state. */
742			printf("%s: chip is in D%d power mode "
743			    "-- setting to D0\n", sc->sc_dev.dv_xname,
744			    command & WB_PSTATE_MASK);
745			command &= 0xFFFFFFFC;
746			pci_conf_write(pc, pa->pa_tag, WB_PCI_PWRMGMTCTRL,
747			    command);
748
749			/* Restore PCI config data. */
750			pci_conf_write(pc, pa->pa_tag, WB_PCI_LOIO, io);
751			pci_conf_write(pc, pa->pa_tag, WB_PCI_LOMEM, mem);
752			pci_conf_write(pc, pa->pa_tag, WB_PCI_INTLINE, irq);
753		}
754	}
755
756	/*
757	 * Map control/status registers.
758	 */
759
760#ifdef WB_USEIOSPACE
761	if (pci_mapreg_map(pa, WB_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
762	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)) {
763		printf(": can't map i/o space\n");
764		return;
765	}
766#else
767	if (pci_mapreg_map(pa, WB_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
768	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)){
769		printf(": can't map mem space\n");
770		return;
771	}
772#endif
773
774	/* Allocate interrupt */
775	if (pci_intr_map(pa, &ih)) {
776		printf(": couldn't map interrupt\n");
777		goto fail_1;
778	}
779	intrstr = pci_intr_string(pc, ih);
780	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wb_intr, sc,
781	    self->dv_xname);
782	if (sc->sc_ih == NULL) {
783		printf(": couldn't establish interrupt");
784		if (intrstr != NULL)
785			printf(" at %s", intrstr);
786		printf("\n");
787		goto fail_1;
788	}
789	printf(": %s", intrstr);
790
791	sc->wb_cachesize = pci_conf_read(pc, pa->pa_tag, WB_PCI_CACHELEN)&0xff;
792
793	/* Reset the adapter. */
794	wb_reset(sc);
795
796	/*
797	 * Get station address from the EEPROM.
798	 */
799	wb_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0, 3, 0);
800	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
801
802	if (bus_dmamem_alloc(pa->pa_dmat, sizeof(struct wb_list_data),
803	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
804		printf(": can't alloc list data\n");
805		goto fail_2;
806	}
807	if (bus_dmamem_map(pa->pa_dmat, &seg, rseg,
808	    sizeof(struct wb_list_data), &kva, BUS_DMA_NOWAIT)) {
809		printf(": can't map list data, size %d\n",
810		    sizeof(struct wb_list_data));
811		goto fail_3;
812	}
813	if (bus_dmamap_create(pa->pa_dmat, sizeof(struct wb_list_data), 1,
814	    sizeof(struct wb_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
815		printf(": can't create dma map\n");
816		goto fail_4;
817	}
818	if (bus_dmamap_load(pa->pa_dmat, dmamap, kva,
819	    sizeof(struct wb_list_data), NULL, BUS_DMA_NOWAIT)) {
820		printf(": can't load dma map\n");
821		goto fail_5;
822	}
823	sc->wb_ldata = (struct wb_list_data *)kva;
824	bzero(sc->wb_ldata, sizeof(struct wb_list_data));
825
826	ifp->if_softc = sc;
827	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
828	ifp->if_ioctl = wb_ioctl;
829	ifp->if_start = wb_start;
830	ifp->if_watchdog = wb_watchdog;
831	ifp->if_baudrate = 10000000;
832	IFQ_SET_MAXLEN(&ifp->if_snd, WB_TX_LIST_CNT - 1);
833	IFQ_SET_READY(&ifp->if_snd);
834
835	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
836
837	/*
838	 * Do ifmedia setup.
839	 */
840	wb_stop(sc);
841
842	ifmedia_init(&sc->sc_mii.mii_media, 0, wb_ifmedia_upd, wb_ifmedia_sts);
843	sc->sc_mii.mii_ifp = ifp;
844	sc->sc_mii.mii_readreg = wb_miibus_readreg;
845	sc->sc_mii.mii_writereg = wb_miibus_writereg;
846	sc->sc_mii.mii_statchg = wb_miibus_statchg;
847	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
848	    0);
849	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
850		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,0,NULL);
851		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
852	} else
853		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
854
855	/*
856	 * Call MI attach routines.
857	 */
858	if_attach(ifp);
859	ether_ifattach(ifp);
860
861	shutdownhook_establish(wb_shutdown, sc);
862	return;
863
864fail_5:
865	bus_dmamap_destroy(pa->pa_dmat, dmamap);
866
867fail_4:
868	bus_dmamem_unmap(pa->pa_dmat, kva,
869	    sizeof(struct wb_list_data));
870
871fail_3:
872	bus_dmamem_free(pa->pa_dmat, &seg, rseg);
873
874fail_2:
875	pci_intr_disestablish(pc, sc->sc_ih);
876
877fail_1:
878	bus_space_unmap(sc->wb_btag, sc->wb_bhandle, size);
879}
880
881/*
882 * Initialize the transmit descriptors.
883 */
884int wb_list_tx_init(sc)
885	struct wb_softc		*sc;
886{
887	struct wb_chain_data	*cd;
888	struct wb_list_data	*ld;
889	int			i;
890
891	cd = &sc->wb_cdata;
892	ld = sc->wb_ldata;
893
894	for (i = 0; i < WB_TX_LIST_CNT; i++) {
895		cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i];
896		if (i == (WB_TX_LIST_CNT - 1)) {
897			cd->wb_tx_chain[i].wb_nextdesc =
898				&cd->wb_tx_chain[0];
899		} else {
900			cd->wb_tx_chain[i].wb_nextdesc =
901				&cd->wb_tx_chain[i + 1];
902		}
903	}
904
905	cd->wb_tx_free = &cd->wb_tx_chain[0];
906	cd->wb_tx_tail = cd->wb_tx_head = NULL;
907
908	return(0);
909}
910
911
912/*
913 * Initialize the RX descriptors and allocate mbufs for them. Note that
914 * we arrange the descriptors in a closed ring, so that the last descriptor
915 * points back to the first.
916 */
917int wb_list_rx_init(sc)
918	struct wb_softc		*sc;
919{
920	struct wb_chain_data	*cd;
921	struct wb_list_data	*ld;
922	int			i;
923
924	cd = &sc->wb_cdata;
925	ld = sc->wb_ldata;
926
927	for (i = 0; i < WB_RX_LIST_CNT; i++) {
928		cd->wb_rx_chain[i].wb_ptr =
929			(struct wb_desc *)&ld->wb_rx_list[i];
930		cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i];
931		if (wb_newbuf(sc, &cd->wb_rx_chain[i], NULL) == ENOBUFS)
932			return(ENOBUFS);
933		if (i == (WB_RX_LIST_CNT - 1)) {
934			cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0];
935			ld->wb_rx_list[i].wb_next =
936					VTOPHYS(&ld->wb_rx_list[0]);
937		} else {
938			cd->wb_rx_chain[i].wb_nextdesc =
939					&cd->wb_rx_chain[i + 1];
940			ld->wb_rx_list[i].wb_next =
941					VTOPHYS(&ld->wb_rx_list[i + 1]);
942		}
943	}
944
945	cd->wb_rx_head = &cd->wb_rx_chain[0];
946
947	return(0);
948}
949
950void
951wb_bfree(buf, size, arg)
952	caddr_t			buf;
953	u_int			size;
954	void *arg;
955{
956}
957
958/*
959 * Initialize an RX descriptor and attach an MBUF cluster.
960 */
961int
962wb_newbuf(sc, c, m)
963	struct wb_softc *sc;
964	struct wb_chain_onefrag *c;
965	struct mbuf *m;
966{
967	struct mbuf		*m_new = NULL;
968
969	if (m == NULL) {
970		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
971		if (m_new == NULL)
972			return(ENOBUFS);
973		m_new->m_data = m_new->m_ext.ext_buf = c->wb_buf;
974		m_new->m_flags |= M_EXT;
975		m_new->m_ext.ext_size = m_new->m_pkthdr.len =
976		    m_new->m_len = WB_BUFBYTES;
977		m_new->m_ext.ext_free = wb_bfree;
978		m_new->m_ext.ext_arg = NULL;
979		MCLINITREFERENCE(m_new);
980	} else {
981		m_new = m;
982		m_new->m_len = m_new->m_pkthdr.len = WB_BUFBYTES;
983		m_new->m_data = m_new->m_ext.ext_buf;
984	}
985
986	m_adj(m_new, sizeof(u_int64_t));
987
988	c->wb_mbuf = m_new;
989	c->wb_ptr->wb_data = VTOPHYS(mtod(m_new, caddr_t));
990	c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | ETHER_MAX_DIX_LEN;
991	c->wb_ptr->wb_status = WB_RXSTAT;
992
993	return(0);
994}
995
996/*
997 * A frame has been uploaded: pass the resulting mbuf chain up to
998 * the higher level protocols.
999 */
1000void wb_rxeof(sc)
1001	struct wb_softc		*sc;
1002{
1003        struct mbuf		*m = NULL;
1004        struct ifnet		*ifp;
1005	struct wb_chain_onefrag	*cur_rx;
1006	int			total_len = 0;
1007	u_int32_t		rxstat;
1008
1009	ifp = &sc->arpcom.ac_if;
1010
1011	while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) &
1012							WB_RXSTAT_OWN)) {
1013		struct mbuf *m0 = NULL;
1014
1015		cur_rx = sc->wb_cdata.wb_rx_head;
1016		sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc;
1017
1018		m = cur_rx->wb_mbuf;
1019
1020		if ((rxstat & WB_RXSTAT_MIIERR) ||
1021		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) ||
1022		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > ETHER_MAX_DIX_LEN) ||
1023		    !(rxstat & WB_RXSTAT_LASTFRAG) ||
1024		    !(rxstat & WB_RXSTAT_RXCMP)) {
1025			ifp->if_ierrors++;
1026			wb_newbuf(sc, cur_rx, m);
1027			printf("%s: receiver babbling: possible chip "
1028				"bug, forcing reset\n", sc->sc_dev.dv_xname);
1029			wb_fixmedia(sc);
1030			wb_reset(sc);
1031			wb_init(sc);
1032			return;
1033		}
1034
1035		if (rxstat & WB_RXSTAT_RXERR) {
1036			ifp->if_ierrors++;
1037			wb_newbuf(sc, cur_rx, m);
1038			break;
1039		}
1040
1041		/* No errors; receive the packet. */
1042		total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status);
1043
1044		/*
1045		 * XXX The Winbond chip includes the CRC with every
1046		 * received frame, and there's no way to turn this
1047		 * behavior off (at least, I can't find anything in
1048	 	 * the manual that explains how to do it) so we have
1049		 * to trim off the CRC manually.
1050		 */
1051		total_len -= ETHER_CRC_LEN;
1052
1053		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1054		    total_len + ETHER_ALIGN, 0, ifp, NULL);
1055		wb_newbuf(sc, cur_rx, m);
1056		if (m0 == NULL) {
1057			ifp->if_ierrors++;
1058			break;
1059		}
1060		m_adj(m0, ETHER_ALIGN);
1061		m = m0;
1062
1063		ifp->if_ipackets++;
1064
1065#if NBPFILTER > 0
1066		/*
1067		 * Handle BPF listeners. Let the BPF user see the packet.
1068		 */
1069		if (ifp->if_bpf)
1070			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1071#endif
1072		/* pass it on. */
1073		ether_input_mbuf(ifp, m);
1074	}
1075
1076	return;
1077}
1078
1079void wb_rxeoc(sc)
1080	struct wb_softc		*sc;
1081{
1082	wb_rxeof(sc);
1083
1084	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1085	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
1086	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1087	if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND)
1088		CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1089
1090	return;
1091}
1092
1093/*
1094 * A frame was downloaded to the chip. It's safe for us to clean up
1095 * the list buffers.
1096 */
1097void wb_txeof(sc)
1098	struct wb_softc		*sc;
1099{
1100	struct wb_chain		*cur_tx;
1101	struct ifnet		*ifp;
1102
1103	ifp = &sc->arpcom.ac_if;
1104
1105	/* Clear the timeout timer. */
1106	ifp->if_timer = 0;
1107
1108	if (sc->wb_cdata.wb_tx_head == NULL)
1109		return;
1110
1111	/*
1112	 * Go through our tx list and free mbufs for those
1113	 * frames that have been transmitted.
1114	 */
1115	while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) {
1116		u_int32_t		txstat;
1117
1118		cur_tx = sc->wb_cdata.wb_tx_head;
1119		txstat = WB_TXSTATUS(cur_tx);
1120
1121		if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT)
1122			break;
1123
1124		if (txstat & WB_TXSTAT_TXERR) {
1125			ifp->if_oerrors++;
1126			if (txstat & WB_TXSTAT_ABORT)
1127				ifp->if_collisions++;
1128			if (txstat & WB_TXSTAT_LATECOLL)
1129				ifp->if_collisions++;
1130		}
1131
1132		ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3;
1133
1134		ifp->if_opackets++;
1135		m_freem(cur_tx->wb_mbuf);
1136		cur_tx->wb_mbuf = NULL;
1137
1138		if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) {
1139			sc->wb_cdata.wb_tx_head = NULL;
1140			sc->wb_cdata.wb_tx_tail = NULL;
1141			break;
1142		}
1143
1144		sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc;
1145	}
1146
1147	return;
1148}
1149
1150/*
1151 * TX 'end of channel' interrupt handler.
1152 */
1153void wb_txeoc(sc)
1154	struct wb_softc		*sc;
1155{
1156	struct ifnet		*ifp;
1157
1158	ifp = &sc->arpcom.ac_if;
1159
1160	ifp->if_timer = 0;
1161
1162	if (sc->wb_cdata.wb_tx_head == NULL) {
1163		ifp->if_flags &= ~IFF_OACTIVE;
1164		sc->wb_cdata.wb_tx_tail = NULL;
1165	} else {
1166		if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) {
1167			WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN;
1168			ifp->if_timer = 5;
1169			CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1170		}
1171	}
1172
1173	return;
1174}
1175
1176int wb_intr(arg)
1177	void			*arg;
1178{
1179	struct wb_softc		*sc;
1180	struct ifnet		*ifp;
1181	u_int32_t		status;
1182	int			r = 0;
1183
1184	sc = arg;
1185	ifp = &sc->arpcom.ac_if;
1186
1187	if (!(ifp->if_flags & IFF_UP))
1188		return (r);
1189
1190	/* Disable interrupts. */
1191	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1192
1193	for (;;) {
1194
1195		status = CSR_READ_4(sc, WB_ISR);
1196		if (status)
1197			CSR_WRITE_4(sc, WB_ISR, status);
1198
1199		if ((status & WB_INTRS) == 0)
1200			break;
1201
1202		r = 1;
1203
1204		if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) {
1205			ifp->if_ierrors++;
1206			wb_reset(sc);
1207			if (status & WB_ISR_RX_ERR)
1208				wb_fixmedia(sc);
1209			wb_init(sc);
1210			continue;
1211		}
1212
1213		if (status & WB_ISR_RX_OK)
1214			wb_rxeof(sc);
1215
1216		if (status & WB_ISR_RX_IDLE)
1217			wb_rxeoc(sc);
1218
1219		if (status & WB_ISR_TX_OK)
1220			wb_txeof(sc);
1221
1222		if (status & WB_ISR_TX_NOBUF)
1223			wb_txeoc(sc);
1224
1225		if (status & WB_ISR_TX_IDLE) {
1226			wb_txeof(sc);
1227			if (sc->wb_cdata.wb_tx_head != NULL) {
1228				WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1229				CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1230			}
1231		}
1232
1233		if (status & WB_ISR_TX_UNDERRUN) {
1234			ifp->if_oerrors++;
1235			wb_txeof(sc);
1236			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1237			/* Jack up TX threshold */
1238			sc->wb_txthresh += WB_TXTHRESH_CHUNK;
1239			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1240			WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1241			WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1242		}
1243
1244		if (status & WB_ISR_BUS_ERR) {
1245			wb_reset(sc);
1246			wb_init(sc);
1247		}
1248
1249	}
1250
1251	/* Re-enable interrupts. */
1252	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1253
1254	if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1255		wb_start(ifp);
1256	}
1257
1258	return (r);
1259}
1260
1261void
1262wb_tick(xsc)
1263	void *xsc;
1264{
1265	struct wb_softc *sc = xsc;
1266	int s;
1267
1268	s = splnet();
1269	mii_tick(&sc->sc_mii);
1270	splx(s);
1271	timeout_add(&sc->wb_tick_tmo, hz);
1272}
1273
1274/*
1275 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1276 * pointers to the fragment pointers.
1277 */
1278int wb_encap(sc, c, m_head)
1279	struct wb_softc		*sc;
1280	struct wb_chain		*c;
1281	struct mbuf		*m_head;
1282{
1283	int			frag = 0;
1284	struct wb_desc		*f = NULL;
1285	int			total_len;
1286	struct mbuf		*m;
1287
1288	/*
1289 	 * Start packing the mbufs in this chain into
1290	 * the fragment pointers. Stop when we run out
1291 	 * of fragments or hit the end of the mbuf chain.
1292	 */
1293	m = m_head;
1294	total_len = 0;
1295
1296	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1297		if (m->m_len != 0) {
1298			if (frag == WB_MAXFRAGS)
1299				break;
1300			total_len += m->m_len;
1301			f = &c->wb_ptr->wb_frag[frag];
1302			f->wb_ctl = WB_TXCTL_TLINK | m->m_len;
1303			if (frag == 0) {
1304				f->wb_ctl |= WB_TXCTL_FIRSTFRAG;
1305				f->wb_status = 0;
1306			} else
1307				f->wb_status = WB_TXSTAT_OWN;
1308			f->wb_next = VTOPHYS(&c->wb_ptr->wb_frag[frag + 1]);
1309			f->wb_data = VTOPHYS(mtod(m, vaddr_t));
1310			frag++;
1311		}
1312	}
1313
1314	/*
1315	 * Handle special case: we used up all 16 fragments,
1316	 * but we have more mbufs left in the chain. Copy the
1317	 * data into an mbuf cluster. Note that we don't
1318	 * bother clearing the values in the other fragment
1319	 * pointers/counters; it wouldn't gain us anything,
1320	 * and would waste cycles.
1321	 */
1322	if (m != NULL) {
1323		struct mbuf		*m_new = NULL;
1324
1325		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1326		if (m_new == NULL)
1327			return(1);
1328		if (m_head->m_pkthdr.len > MHLEN) {
1329			MCLGET(m_new, M_DONTWAIT);
1330			if (!(m_new->m_flags & M_EXT)) {
1331				m_freem(m_new);
1332				return(1);
1333			}
1334		}
1335		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1336					mtod(m_new, caddr_t));
1337		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1338		m_freem(m_head);
1339		m_head = m_new;
1340		f = &c->wb_ptr->wb_frag[0];
1341		f->wb_status = 0;
1342		f->wb_data = VTOPHYS(mtod(m_new, caddr_t));
1343		f->wb_ctl = total_len = m_new->m_len;
1344		f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG;
1345		frag = 1;
1346	}
1347
1348	if (total_len < WB_MIN_FRAMELEN) {
1349		f = &c->wb_ptr->wb_frag[frag];
1350		f->wb_ctl = WB_MIN_FRAMELEN - total_len;
1351		f->wb_data = VTOPHYS(&sc->wb_cdata.wb_pad);
1352		f->wb_ctl |= WB_TXCTL_TLINK;
1353		f->wb_status = WB_TXSTAT_OWN;
1354		frag++;
1355	}
1356
1357	c->wb_mbuf = m_head;
1358	c->wb_lastdesc = frag - 1;
1359	WB_TXCTL(c) |= WB_TXCTL_LASTFRAG;
1360	WB_TXNEXT(c) = VTOPHYS(&c->wb_nextdesc->wb_ptr->wb_frag[0]);
1361
1362	return(0);
1363}
1364
1365/*
1366 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1367 * to the mbuf data regions directly in the transmit lists. We also save a
1368 * copy of the pointers since the transmit list fragment pointers are
1369 * physical addresses.
1370 */
1371
1372void wb_start(ifp)
1373	struct ifnet		*ifp;
1374{
1375	struct wb_softc		*sc;
1376	struct mbuf		*m_head = NULL;
1377	struct wb_chain		*cur_tx = NULL, *start_tx;
1378
1379	sc = ifp->if_softc;
1380
1381	/*
1382	 * Check for an available queue slot. If there are none,
1383	 * punt.
1384	 */
1385	if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) {
1386		ifp->if_flags |= IFF_OACTIVE;
1387		return;
1388	}
1389
1390	start_tx = sc->wb_cdata.wb_tx_free;
1391
1392	while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) {
1393		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1394		if (m_head == NULL)
1395			break;
1396
1397		/* Pick a descriptor off the free list. */
1398		cur_tx = sc->wb_cdata.wb_tx_free;
1399		sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc;
1400
1401		/* Pack the data into the descriptor. */
1402		wb_encap(sc, cur_tx, m_head);
1403
1404		if (cur_tx != start_tx)
1405			WB_TXOWN(cur_tx) = WB_TXSTAT_OWN;
1406
1407#if NBPFILTER > 0
1408		/*
1409		 * If there's a BPF listener, bounce a copy of this frame
1410		 * to him.
1411		 */
1412		if (ifp->if_bpf)
1413			bpf_mtap(ifp->if_bpf, cur_tx->wb_mbuf,
1414			    BPF_DIRECTION_OUT);
1415#endif
1416	}
1417
1418	/*
1419	 * If there are no packets queued, bail.
1420	 */
1421	if (cur_tx == NULL)
1422		return;
1423
1424	/*
1425	 * Place the request for the upload interrupt
1426	 * in the last descriptor in the chain. This way, if
1427	 * we're chaining several packets at once, we'll only
1428	 * get an interrupt once for the whole chain rather than
1429	 * once for each packet.
1430	 */
1431	WB_TXCTL(cur_tx) |= WB_TXCTL_FINT;
1432	cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT;
1433	sc->wb_cdata.wb_tx_tail = cur_tx;
1434
1435	if (sc->wb_cdata.wb_tx_head == NULL) {
1436		sc->wb_cdata.wb_tx_head = start_tx;
1437		WB_TXOWN(start_tx) = WB_TXSTAT_OWN;
1438		CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1439	} else {
1440		/*
1441		 * We need to distinguish between the case where
1442		 * the own bit is clear because the chip cleared it
1443		 * and where the own bit is clear because we haven't
1444		 * set it yet. The magic value WB_UNSET is just some
1445		 * ramdomly chosen number which doesn't have the own
1446	 	 * bit set. When we actually transmit the frame, the
1447		 * status word will have _only_ the own bit set, so
1448		 * the txeoc handler will be able to tell if it needs
1449		 * to initiate another transmission to flush out pending
1450		 * frames.
1451		 */
1452		WB_TXOWN(start_tx) = WB_UNSENT;
1453	}
1454
1455	/*
1456	 * Set a timeout in case the chip goes out to lunch.
1457	 */
1458	ifp->if_timer = 5;
1459
1460	return;
1461}
1462
1463void wb_init(xsc)
1464	void			*xsc;
1465{
1466	struct wb_softc *sc = xsc;
1467	struct ifnet *ifp = &sc->arpcom.ac_if;
1468	int s, i;
1469
1470	s = splnet();
1471
1472	/*
1473	 * Cancel pending I/O and free all RX/TX buffers.
1474	 */
1475	wb_stop(sc);
1476	wb_reset(sc);
1477
1478	sc->wb_txthresh = WB_TXTHRESH_INIT;
1479
1480	/*
1481	 * Set cache alignment and burst length.
1482	 */
1483#ifdef foo
1484	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG);
1485	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1486	WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1487#endif
1488
1489	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION);
1490	WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG);
1491	switch(sc->wb_cachesize) {
1492	case 32:
1493		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG);
1494		break;
1495	case 16:
1496		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG);
1497		break;
1498	case 8:
1499		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG);
1500		break;
1501	case 0:
1502	default:
1503		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE);
1504		break;
1505	}
1506
1507	/* This doesn't tend to work too well at 100Mbps. */
1508	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON);
1509
1510	/* Init our MAC address */
1511	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1512		CSR_WRITE_1(sc, WB_NODE0 + i, sc->arpcom.ac_enaddr[i]);
1513	}
1514
1515	/* Init circular RX list. */
1516	if (wb_list_rx_init(sc) == ENOBUFS) {
1517		printf("%s: initialization failed: no "
1518			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1519		wb_stop(sc);
1520		splx(s);
1521		return;
1522	}
1523
1524	/* Init TX descriptors. */
1525	wb_list_tx_init(sc);
1526
1527	/* If we want promiscuous mode, set the allframes bit. */
1528	if (ifp->if_flags & IFF_PROMISC) {
1529		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1530	} else {
1531		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1532	}
1533
1534	/*
1535	 * Set capture broadcast bit to capture broadcast frames.
1536	 */
1537	if (ifp->if_flags & IFF_BROADCAST) {
1538		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1539	} else {
1540		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1541	}
1542
1543	/*
1544	 * Program the multicast filter, if necessary.
1545	 */
1546	wb_setmulti(sc);
1547
1548	/*
1549	 * Load the address of the RX list.
1550	 */
1551	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1552	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
1553
1554	/*
1555	 * Enable interrupts.
1556	 */
1557	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1558	CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF);
1559
1560	/* Enable receiver and transmitter. */
1561	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1562	CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1563
1564	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1565	CSR_WRITE_4(sc, WB_TXADDR, VTOPHYS(&sc->wb_ldata->wb_tx_list[0]));
1566	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1567
1568	ifp->if_flags |= IFF_RUNNING;
1569	ifp->if_flags &= ~IFF_OACTIVE;
1570
1571	splx(s);
1572
1573	timeout_set(&sc->wb_tick_tmo, wb_tick, sc);
1574	timeout_add(&sc->wb_tick_tmo, hz);
1575
1576	return;
1577}
1578
1579/*
1580 * Set media options.
1581 */
1582int
1583wb_ifmedia_upd(ifp)
1584	struct ifnet *ifp;
1585{
1586	struct wb_softc *sc = ifp->if_softc;
1587
1588	if (ifp->if_flags & IFF_UP)
1589		wb_init(sc);
1590
1591	return(0);
1592}
1593
1594/*
1595 * Report current media status.
1596 */
1597void
1598wb_ifmedia_sts(ifp, ifmr)
1599	struct ifnet		*ifp;
1600	struct ifmediareq	*ifmr;
1601{
1602	struct wb_softc *sc = ifp->if_softc;
1603	struct mii_data *mii = &sc->sc_mii;
1604
1605	mii_pollstat(mii);
1606	ifmr->ifm_active = mii->mii_media_active;
1607	ifmr->ifm_status = mii->mii_media_status;
1608}
1609
1610int wb_ioctl(ifp, command, data)
1611	struct ifnet		*ifp;
1612	u_long			command;
1613	caddr_t			data;
1614{
1615	struct wb_softc		*sc = ifp->if_softc;
1616	struct ifreq		*ifr = (struct ifreq *) data;
1617	struct ifaddr		*ifa = (struct ifaddr *)data;
1618	int			s, error = 0;
1619
1620	s = splnet();
1621
1622	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
1623		splx(s);
1624		return (error);
1625	}
1626
1627	switch(command) {
1628	case SIOCSIFADDR:
1629		ifp->if_flags |= IFF_UP;
1630		switch (ifa->ifa_addr->sa_family) {
1631#ifdef INET
1632		case AF_INET:
1633			wb_init(sc);
1634			arp_ifinit(&sc->arpcom, ifa);
1635			break;
1636#endif /* INET */
1637		default:
1638			wb_init(sc);
1639		}
1640		break;
1641	case SIOCSIFFLAGS:
1642		if (ifp->if_flags & IFF_UP) {
1643			wb_init(sc);
1644		} else {
1645			if (ifp->if_flags & IFF_RUNNING)
1646				wb_stop(sc);
1647		}
1648		error = 0;
1649		break;
1650	case SIOCADDMULTI:
1651	case SIOCDELMULTI:
1652		error = (command == SIOCADDMULTI) ?
1653		    ether_addmulti(ifr, &sc->arpcom) :
1654		    ether_delmulti(ifr, &sc->arpcom);
1655
1656		if (error == ENETRESET) {
1657			/*
1658			 * Multicast list has changed; set the hardware
1659			 * filter accordingly.
1660			 */
1661			if (ifp->if_flags & IFF_RUNNING)
1662				wb_setmulti(sc);
1663			error = 0;
1664		}
1665		break;
1666	case SIOCGIFMEDIA:
1667	case SIOCSIFMEDIA:
1668		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1669		break;
1670	default:
1671		error = ENOTTY;
1672		break;
1673	}
1674
1675	splx(s);
1676
1677	return(error);
1678}
1679
1680void wb_watchdog(ifp)
1681	struct ifnet		*ifp;
1682{
1683	struct wb_softc		*sc;
1684
1685	sc = ifp->if_softc;
1686
1687	ifp->if_oerrors++;
1688	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1689
1690#ifdef foo
1691	if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1692		printf("%s: no carrier - transceiver cable problem?\n",
1693		    sc->sc_dev.dv_xname);
1694#endif
1695	wb_stop(sc);
1696	wb_reset(sc);
1697	wb_init(sc);
1698
1699	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1700		wb_start(ifp);
1701
1702	return;
1703}
1704
1705/*
1706 * Stop the adapter and free any mbufs allocated to the
1707 * RX and TX lists.
1708 */
1709void wb_stop(sc)
1710	struct wb_softc		*sc;
1711{
1712	int			i;
1713	struct ifnet		*ifp;
1714
1715	ifp = &sc->arpcom.ac_if;
1716	ifp->if_timer = 0;
1717
1718	timeout_del(&sc->wb_tick_tmo);
1719
1720	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1721
1722	WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON));
1723	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1724	CSR_WRITE_4(sc, WB_TXADDR, 0x00000000);
1725	CSR_WRITE_4(sc, WB_RXADDR, 0x00000000);
1726
1727	/*
1728	 * Free data in the RX lists.
1729	 */
1730	for (i = 0; i < WB_RX_LIST_CNT; i++) {
1731		if (sc->wb_cdata.wb_rx_chain[i].wb_mbuf != NULL) {
1732			m_freem(sc->wb_cdata.wb_rx_chain[i].wb_mbuf);
1733			sc->wb_cdata.wb_rx_chain[i].wb_mbuf = NULL;
1734		}
1735	}
1736	bzero((char *)&sc->wb_ldata->wb_rx_list,
1737		sizeof(sc->wb_ldata->wb_rx_list));
1738
1739	/*
1740	 * Free the TX list buffers.
1741	 */
1742	for (i = 0; i < WB_TX_LIST_CNT; i++) {
1743		if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) {
1744			m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf);
1745			sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL;
1746		}
1747	}
1748
1749	bzero((char *)&sc->wb_ldata->wb_tx_list,
1750		sizeof(sc->wb_ldata->wb_tx_list));
1751}
1752
1753/*
1754 * Stop all chip I/O so that the kernel's probe routines don't
1755 * get confused by errant DMAs when rebooting.
1756 */
1757void wb_shutdown(arg)
1758	void			*arg;
1759{
1760	struct wb_softc		*sc = (struct wb_softc *)arg;
1761
1762	wb_stop(sc);
1763
1764	return;
1765}
1766
1767struct cfattach wb_ca = {
1768	sizeof(struct wb_softc), wb_probe, wb_attach
1769};
1770
1771struct cfdriver wb_cd = {
1772	0, "wb", DV_IFNET
1773};
1774