if_wb.c revision 1.47
1/*	$OpenBSD: if_wb.c,v 1.47 2011/04/03 15:36:03 jasper Exp $	*/
2
3/*
4 * Copyright (c) 1997, 1998
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: src/sys/pci/if_wb.c,v 1.26 1999/09/25 17:29:02 wpaul Exp $
35 */
36
37/*
38 * Winbond fast ethernet PCI NIC driver
39 *
40 * Supports various cheap network adapters based on the Winbond W89C840F
41 * fast ethernet controller chip. This includes adapters manufactured by
42 * Winbond itself and some made by Linksys.
43 *
44 * Written by Bill Paul <wpaul@ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49/*
50 * The Winbond W89C840F chip is a bus master; in some ways it resembles
51 * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has
52 * one major difference which is that while the registers do many of
53 * the same things as a tulip adapter, the offsets are different: where
54 * tulip registers are typically spaced 8 bytes apart, the Winbond
55 * registers are spaced 4 bytes apart. The receiver filter is also
56 * programmed differently.
57 *
58 * Like the tulip, the Winbond chip uses small descriptors containing
59 * a status word, a control word and 32-bit areas that can either be used
60 * to point to two external data blocks, or to point to a single block
61 * and another descriptor in a linked list. Descriptors can be grouped
62 * together in blocks to form fixed length rings or can be chained
63 * together in linked lists. A single packet may be spread out over
64 * several descriptors if necessary.
65 *
66 * For the receive ring, this driver uses a linked list of descriptors,
67 * each pointing to a single mbuf cluster buffer, which us large enough
68 * to hold an entire packet. The link list is looped back to created a
69 * closed ring.
70 *
71 * For transmission, the driver creates a linked list of 'super descriptors'
72 * which each contain several individual descriptors linked together.
73 * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we
74 * abuse as fragment pointers. This allows us to use a buffer management
75 * scheme very similar to that used in the ThunderLAN and Etherlink XL
76 * drivers.
77 *
78 * Autonegotiation is performed using the external PHY via the MII bus.
79 * The sample boards I have all use a Davicom PHY.
80 *
81 * Note: the author of the Linux driver for the Winbond chip alludes
82 * to some sort of flaw in the chip's design that seems to mandate some
83 * drastic workaround which significantly impairs transmit performance.
84 * I have no idea what he's on about: transmit performance with all
85 * three of my test boards seems fine.
86 */
87
88#include "bpfilter.h"
89
90#include <sys/param.h>
91#include <sys/systm.h>
92#include <sys/sockio.h>
93#include <sys/mbuf.h>
94#include <sys/malloc.h>
95#include <sys/kernel.h>
96#include <sys/socket.h>
97#include <sys/device.h>
98#include <sys/queue.h>
99#include <sys/timeout.h>
100
101#include <net/if.h>
102#include <net/if_dl.h>
103#include <net/if_types.h>
104
105#ifdef INET
106#include <netinet/in.h>
107#include <netinet/in_systm.h>
108#include <netinet/in_var.h>
109#include <netinet/ip.h>
110#include <netinet/if_ether.h>
111#endif
112
113#include <net/if_media.h>
114
115#if NBPFILTER > 0
116#include <net/bpf.h>
117#endif
118
119#include <uvm/uvm_extern.h>		/* for vtophys */
120#define	VTOPHYS(v)	vtophys((vaddr_t)(v))
121
122#include <dev/mii/mii.h>
123#include <dev/mii/miivar.h>
124#include <dev/pci/pcireg.h>
125#include <dev/pci/pcivar.h>
126#include <dev/pci/pcidevs.h>
127
128#define WB_USEIOSPACE
129
130/* #define WB_BACKGROUND_AUTONEG */
131
132#include <dev/pci/if_wbreg.h>
133
134int wb_probe(struct device *, void *, void *);
135void wb_attach(struct device *, struct device *, void *);
136
137void wb_bfree(caddr_t, u_int, void *);
138void wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *);
139int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *);
140
141void wb_rxeof(struct wb_softc *);
142void wb_rxeoc(struct wb_softc *);
143void wb_txeof(struct wb_softc *);
144void wb_txeoc(struct wb_softc *);
145int wb_intr(void *);
146void wb_tick(void *);
147void wb_start(struct ifnet *);
148int wb_ioctl(struct ifnet *, u_long, caddr_t);
149void wb_init(void *);
150void wb_stop(struct wb_softc *);
151void wb_watchdog(struct ifnet *);
152int wb_ifmedia_upd(struct ifnet *);
153void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *);
154
155void wb_eeprom_putbyte(struct wb_softc *, int);
156void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *);
157void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int);
158void wb_mii_sync(struct wb_softc *);
159void wb_mii_send(struct wb_softc *, u_int32_t, int);
160int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *);
161int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *);
162
163void wb_setcfg(struct wb_softc *, u_int32_t);
164u_int8_t wb_calchash(caddr_t);
165void wb_setmulti(struct wb_softc *);
166void wb_reset(struct wb_softc *);
167void wb_fixmedia(struct wb_softc *);
168int wb_list_rx_init(struct wb_softc *);
169int wb_list_tx_init(struct wb_softc *);
170
171int wb_miibus_readreg(struct device *, int, int);
172void wb_miibus_writereg(struct device *, int, int, int);
173void wb_miibus_statchg(struct device *);
174
175#define WB_SETBIT(sc, reg, x)				\
176	CSR_WRITE_4(sc, reg,				\
177		CSR_READ_4(sc, reg) | x)
178
179#define WB_CLRBIT(sc, reg, x)				\
180	CSR_WRITE_4(sc, reg,				\
181		CSR_READ_4(sc, reg) & ~x)
182
183#define SIO_SET(x)					\
184	CSR_WRITE_4(sc, WB_SIO,				\
185		CSR_READ_4(sc, WB_SIO) | x)
186
187#define SIO_CLR(x)					\
188	CSR_WRITE_4(sc, WB_SIO,				\
189		CSR_READ_4(sc, WB_SIO) & ~x)
190
191/*
192 * Send a read command and address to the EEPROM, check for ACK.
193 */
194void wb_eeprom_putbyte(sc, addr)
195	struct wb_softc		*sc;
196	int			addr;
197{
198	int			d, i;
199
200	d = addr | WB_EECMD_READ;
201
202	/*
203	 * Feed in each bit and strobe the clock.
204	 */
205	for (i = 0x400; i; i >>= 1) {
206		if (d & i) {
207			SIO_SET(WB_SIO_EE_DATAIN);
208		} else {
209			SIO_CLR(WB_SIO_EE_DATAIN);
210		}
211		DELAY(100);
212		SIO_SET(WB_SIO_EE_CLK);
213		DELAY(150);
214		SIO_CLR(WB_SIO_EE_CLK);
215		DELAY(100);
216	}
217
218	return;
219}
220
221/*
222 * Read a word of data stored in the EEPROM at address 'addr.'
223 */
224void wb_eeprom_getword(sc, addr, dest)
225	struct wb_softc		*sc;
226	int			addr;
227	u_int16_t		*dest;
228{
229	int			i;
230	u_int16_t		word = 0;
231
232	/* Enter EEPROM access mode. */
233	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
234
235	/*
236	 * Send address of word we want to read.
237	 */
238	wb_eeprom_putbyte(sc, addr);
239
240	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
241
242	/*
243	 * Start reading bits from EEPROM.
244	 */
245	for (i = 0x8000; i; i >>= 1) {
246		SIO_SET(WB_SIO_EE_CLK);
247		DELAY(100);
248		if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT)
249			word |= i;
250		SIO_CLR(WB_SIO_EE_CLK);
251		DELAY(100);
252	}
253
254	/* Turn off EEPROM access mode. */
255	CSR_WRITE_4(sc, WB_SIO, 0);
256
257	*dest = word;
258
259	return;
260}
261
262/*
263 * Read a sequence of words from the EEPROM.
264 */
265void wb_read_eeprom(sc, dest, off, cnt, swap)
266	struct wb_softc		*sc;
267	caddr_t			dest;
268	int			off;
269	int			cnt;
270	int			swap;
271{
272	int			i;
273	u_int16_t		word = 0, *ptr;
274
275	for (i = 0; i < cnt; i++) {
276		wb_eeprom_getword(sc, off + i, &word);
277		ptr = (u_int16_t *)(dest + (i * 2));
278		if (swap)
279			*ptr = ntohs(word);
280		else
281			*ptr = word;
282	}
283
284	return;
285}
286
287/*
288 * Sync the PHYs by setting data bit and strobing the clock 32 times.
289 */
290void wb_mii_sync(sc)
291	struct wb_softc		*sc;
292{
293	int			i;
294
295	SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN);
296
297	for (i = 0; i < 32; i++) {
298		SIO_SET(WB_SIO_MII_CLK);
299		DELAY(1);
300		SIO_CLR(WB_SIO_MII_CLK);
301		DELAY(1);
302	}
303
304	return;
305}
306
307/*
308 * Clock a series of bits through the MII.
309 */
310void wb_mii_send(sc, bits, cnt)
311	struct wb_softc		*sc;
312	u_int32_t		bits;
313	int			cnt;
314{
315	int			i;
316
317	SIO_CLR(WB_SIO_MII_CLK);
318
319	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
320                if (bits & i) {
321			SIO_SET(WB_SIO_MII_DATAIN);
322                } else {
323			SIO_CLR(WB_SIO_MII_DATAIN);
324                }
325		DELAY(1);
326		SIO_CLR(WB_SIO_MII_CLK);
327		DELAY(1);
328		SIO_SET(WB_SIO_MII_CLK);
329	}
330}
331
332/*
333 * Read an PHY register through the MII.
334 */
335int wb_mii_readreg(sc, frame)
336	struct wb_softc		*sc;
337	struct wb_mii_frame	*frame;
338
339{
340	int			i, ack, s;
341
342	s = splnet();
343
344	/*
345	 * Set up frame for RX.
346	 */
347	frame->mii_stdelim = WB_MII_STARTDELIM;
348	frame->mii_opcode = WB_MII_READOP;
349	frame->mii_turnaround = 0;
350	frame->mii_data = 0;
351
352	CSR_WRITE_4(sc, WB_SIO, 0);
353
354	/*
355 	 * Turn on data xmit.
356	 */
357	SIO_SET(WB_SIO_MII_DIR);
358
359	wb_mii_sync(sc);
360
361	/*
362	 * Send command/address info.
363	 */
364	wb_mii_send(sc, frame->mii_stdelim, 2);
365	wb_mii_send(sc, frame->mii_opcode, 2);
366	wb_mii_send(sc, frame->mii_phyaddr, 5);
367	wb_mii_send(sc, frame->mii_regaddr, 5);
368
369	/* Idle bit */
370	SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN));
371	DELAY(1);
372	SIO_SET(WB_SIO_MII_CLK);
373	DELAY(1);
374
375	/* Turn off xmit. */
376	SIO_CLR(WB_SIO_MII_DIR);
377	/* Check for ack */
378	SIO_CLR(WB_SIO_MII_CLK);
379	DELAY(1);
380	ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT;
381	SIO_SET(WB_SIO_MII_CLK);
382	DELAY(1);
383	SIO_CLR(WB_SIO_MII_CLK);
384	DELAY(1);
385	SIO_SET(WB_SIO_MII_CLK);
386	DELAY(1);
387
388	/*
389	 * Now try reading data bits. If the ack failed, we still
390	 * need to clock through 16 cycles to keep the PHY(s) in sync.
391	 */
392	if (ack) {
393		for(i = 0; i < 16; i++) {
394			SIO_CLR(WB_SIO_MII_CLK);
395			DELAY(1);
396			SIO_SET(WB_SIO_MII_CLK);
397			DELAY(1);
398		}
399		goto fail;
400	}
401
402	for (i = 0x8000; i; i >>= 1) {
403		SIO_CLR(WB_SIO_MII_CLK);
404		DELAY(1);
405		if (!ack) {
406			if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT)
407				frame->mii_data |= i;
408			DELAY(1);
409		}
410		SIO_SET(WB_SIO_MII_CLK);
411		DELAY(1);
412	}
413
414fail:
415
416	SIO_CLR(WB_SIO_MII_CLK);
417	DELAY(1);
418	SIO_SET(WB_SIO_MII_CLK);
419	DELAY(1);
420
421	splx(s);
422
423	if (ack)
424		return(1);
425	return(0);
426}
427
428/*
429 * Write to a PHY register through the MII.
430 */
431int wb_mii_writereg(sc, frame)
432	struct wb_softc		*sc;
433	struct wb_mii_frame	*frame;
434
435{
436	int			s;
437
438	s = splnet();
439	/*
440	 * Set up frame for TX.
441	 */
442
443	frame->mii_stdelim = WB_MII_STARTDELIM;
444	frame->mii_opcode = WB_MII_WRITEOP;
445	frame->mii_turnaround = WB_MII_TURNAROUND;
446
447	/*
448 	 * Turn on data output.
449	 */
450	SIO_SET(WB_SIO_MII_DIR);
451
452	wb_mii_sync(sc);
453
454	wb_mii_send(sc, frame->mii_stdelim, 2);
455	wb_mii_send(sc, frame->mii_opcode, 2);
456	wb_mii_send(sc, frame->mii_phyaddr, 5);
457	wb_mii_send(sc, frame->mii_regaddr, 5);
458	wb_mii_send(sc, frame->mii_turnaround, 2);
459	wb_mii_send(sc, frame->mii_data, 16);
460
461	/* Idle bit. */
462	SIO_SET(WB_SIO_MII_CLK);
463	DELAY(1);
464	SIO_CLR(WB_SIO_MII_CLK);
465	DELAY(1);
466
467	/*
468	 * Turn off xmit.
469	 */
470	SIO_CLR(WB_SIO_MII_DIR);
471
472	splx(s);
473
474	return(0);
475}
476
477int
478wb_miibus_readreg(dev, phy, reg)
479	struct device *dev;
480	int phy, reg;
481{
482	struct wb_softc *sc = (struct wb_softc *)dev;
483	struct wb_mii_frame frame;
484
485	bzero((char *)&frame, sizeof(frame));
486
487	frame.mii_phyaddr = phy;
488	frame.mii_regaddr = reg;
489	wb_mii_readreg(sc, &frame);
490
491	return(frame.mii_data);
492}
493
494void
495wb_miibus_writereg(dev, phy, reg, data)
496	struct device *dev;
497	int phy, reg, data;
498{
499	struct wb_softc *sc = (struct wb_softc *)dev;
500	struct wb_mii_frame frame;
501
502	bzero((char *)&frame, sizeof(frame));
503
504	frame.mii_phyaddr = phy;
505	frame.mii_regaddr = reg;
506	frame.mii_data = data;
507
508	wb_mii_writereg(sc, &frame);
509
510	return;
511}
512
513void
514wb_miibus_statchg(dev)
515	struct device *dev;
516{
517	struct wb_softc *sc = (struct wb_softc *)dev;
518
519	wb_setcfg(sc, sc->sc_mii.mii_media_active);
520}
521
522/*
523 * Program the 64-bit multicast hash filter.
524 */
525void wb_setmulti(sc)
526	struct wb_softc		*sc;
527{
528	struct ifnet		*ifp;
529	int			h = 0;
530	u_int32_t		hashes[2] = { 0, 0 };
531	struct arpcom		*ac = &sc->arpcom;
532	struct ether_multi	*enm;
533	struct ether_multistep	step;
534	u_int32_t		rxfilt;
535	int			mcnt = 0;
536
537	ifp = &sc->arpcom.ac_if;
538
539	rxfilt = CSR_READ_4(sc, WB_NETCFG);
540
541allmulti:
542	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
543		rxfilt |= WB_NETCFG_RX_MULTI;
544		CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
545		CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF);
546		CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF);
547		return;
548	}
549
550	/* first, zot all the existing hash bits */
551	CSR_WRITE_4(sc, WB_MAR0, 0);
552	CSR_WRITE_4(sc, WB_MAR1, 0);
553
554	/* now program new ones */
555	ETHER_FIRST_MULTI(step, ac, enm);
556	while (enm != NULL) {
557		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
558			ifp->if_flags |= IFF_ALLMULTI;
559			goto allmulti;
560		}
561		h = ~(ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26);
562		if (h < 32)
563			hashes[0] |= (1 << h);
564		else
565			hashes[1] |= (1 << (h - 32));
566		mcnt++;
567		ETHER_NEXT_MULTI(step, enm);
568	}
569
570	if (mcnt)
571		rxfilt |= WB_NETCFG_RX_MULTI;
572	else
573		rxfilt &= ~WB_NETCFG_RX_MULTI;
574
575	CSR_WRITE_4(sc, WB_MAR0, hashes[0]);
576	CSR_WRITE_4(sc, WB_MAR1, hashes[1]);
577	CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
578
579	return;
580}
581
582/*
583 * The Winbond manual states that in order to fiddle with the
584 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
585 * first have to put the transmit and/or receive logic in the idle state.
586 */
587void
588wb_setcfg(sc, media)
589	struct wb_softc *sc;
590	u_int32_t media;
591{
592	int			i, restart = 0;
593
594	if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) {
595		restart = 1;
596		WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON));
597
598		for (i = 0; i < WB_TIMEOUT; i++) {
599			DELAY(10);
600			if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) &&
601				(CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE))
602				break;
603		}
604
605		if (i == WB_TIMEOUT)
606			printf("%s: failed to force tx and "
607				"rx to idle state\n", sc->sc_dev.dv_xname);
608	}
609
610	if (IFM_SUBTYPE(media) == IFM_10_T)
611		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
612	else
613		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
614
615	if ((media & IFM_GMASK) == IFM_FDX)
616		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
617	else
618		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
619
620	if (restart)
621		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON);
622
623	return;
624}
625
626void
627wb_reset(sc)
628	struct wb_softc *sc;
629{
630	int i;
631	struct mii_data *mii = &sc->sc_mii;
632
633	CSR_WRITE_4(sc, WB_NETCFG, 0);
634	CSR_WRITE_4(sc, WB_BUSCTL, 0);
635	CSR_WRITE_4(sc, WB_TXADDR, 0);
636	CSR_WRITE_4(sc, WB_RXADDR, 0);
637
638	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
639	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
640
641	for (i = 0; i < WB_TIMEOUT; i++) {
642		DELAY(10);
643		if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET))
644			break;
645	}
646	if (i == WB_TIMEOUT)
647		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
648
649	/* Wait a little while for the chip to get its brains in order. */
650	DELAY(1000);
651
652	if (mii->mii_instance) {
653		struct mii_softc *miisc;
654		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
655			mii_phy_reset(miisc);
656	}
657}
658
659void
660wb_fixmedia(sc)
661	struct wb_softc *sc;
662{
663	struct mii_data *mii = &sc->sc_mii;
664	u_int32_t media;
665
666	if (LIST_FIRST(&mii->mii_phys) == NULL)
667		return;
668
669	mii_pollstat(mii);
670	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) {
671		media = mii->mii_media_active & ~IFM_10_T;
672		media |= IFM_100_TX;
673	} if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
674		media = mii->mii_media_active & ~IFM_100_TX;
675		media |= IFM_10_T;
676	} else
677		return;
678
679	ifmedia_set(&mii->mii_media, media);
680}
681
682const struct pci_matchid wb_devices[] = {
683	{ PCI_VENDOR_WINBOND, PCI_PRODUCT_WINBOND_W89C840F },
684	{ PCI_VENDOR_COMPEX, PCI_PRODUCT_COMPEX_RL100ATX },
685};
686
687/*
688 * Probe for a Winbond chip. Check the PCI vendor and device
689 * IDs against our list and return a device name if we find a match.
690 */
691int
692wb_probe(parent, match, aux)
693	struct device *parent;
694	void *match, *aux;
695{
696	return (pci_matchbyid((struct pci_attach_args *)aux, wb_devices,
697	    nitems(wb_devices)));
698}
699
700/*
701 * Attach the interface. Allocate softc structures, do ifmedia
702 * setup and ethernet/BPF attach.
703 */
704void
705wb_attach(parent, self, aux)
706	struct device *parent, *self;
707	void *aux;
708{
709	struct wb_softc *sc = (struct wb_softc *)self;
710	struct pci_attach_args *pa = aux;
711	pci_chipset_tag_t pc = pa->pa_pc;
712	pci_intr_handle_t ih;
713	const char *intrstr = NULL;
714	struct ifnet *ifp = &sc->arpcom.ac_if;
715	bus_size_t size;
716	int rseg;
717	pcireg_t command;
718	bus_dma_segment_t seg;
719	bus_dmamap_t dmamap;
720	caddr_t kva;
721
722	/*
723	 * Handle power management nonsense.
724	 */
725
726	command = pci_conf_read(pc, pa->pa_tag, WB_PCI_CAPID) & 0x000000FF;
727	if (command == 0x01) {
728
729		command = pci_conf_read(pc, pa->pa_tag, WB_PCI_PWRMGMTCTRL);
730		if (command & WB_PSTATE_MASK) {
731			u_int32_t		io, mem, irq;
732
733			/* Save important PCI config data. */
734			io = pci_conf_read(pc, pa->pa_tag, WB_PCI_LOIO);
735			mem = pci_conf_read(pc, pa->pa_tag, WB_PCI_LOMEM);
736			irq = pci_conf_read(pc, pa->pa_tag, WB_PCI_INTLINE);
737
738			/* Reset the power state. */
739			printf("%s: chip is in D%d power mode "
740			    "-- setting to D0\n", sc->sc_dev.dv_xname,
741			    command & WB_PSTATE_MASK);
742			command &= 0xFFFFFFFC;
743			pci_conf_write(pc, pa->pa_tag, WB_PCI_PWRMGMTCTRL,
744			    command);
745
746			/* Restore PCI config data. */
747			pci_conf_write(pc, pa->pa_tag, WB_PCI_LOIO, io);
748			pci_conf_write(pc, pa->pa_tag, WB_PCI_LOMEM, mem);
749			pci_conf_write(pc, pa->pa_tag, WB_PCI_INTLINE, irq);
750		}
751	}
752
753	/*
754	 * Map control/status registers.
755	 */
756
757#ifdef WB_USEIOSPACE
758	if (pci_mapreg_map(pa, WB_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
759	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)) {
760		printf(": can't map i/o space\n");
761		return;
762	}
763#else
764	if (pci_mapreg_map(pa, WB_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
765	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)){
766		printf(": can't map mem space\n");
767		return;
768	}
769#endif
770
771	/* Allocate interrupt */
772	if (pci_intr_map(pa, &ih)) {
773		printf(": couldn't map interrupt\n");
774		goto fail_1;
775	}
776	intrstr = pci_intr_string(pc, ih);
777	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wb_intr, sc,
778	    self->dv_xname);
779	if (sc->sc_ih == NULL) {
780		printf(": couldn't establish interrupt");
781		if (intrstr != NULL)
782			printf(" at %s", intrstr);
783		printf("\n");
784		goto fail_1;
785	}
786	printf(": %s", intrstr);
787
788	sc->wb_cachesize = pci_conf_read(pc, pa->pa_tag, WB_PCI_CACHELEN)&0xff;
789
790	/* Reset the adapter. */
791	wb_reset(sc);
792
793	/*
794	 * Get station address from the EEPROM.
795	 */
796	wb_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0, 3, 0);
797	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
798
799	if (bus_dmamem_alloc(pa->pa_dmat, sizeof(struct wb_list_data),
800	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
801		printf(": can't alloc list data\n");
802		goto fail_2;
803	}
804	if (bus_dmamem_map(pa->pa_dmat, &seg, rseg,
805	    sizeof(struct wb_list_data), &kva, BUS_DMA_NOWAIT)) {
806		printf(": can't map list data, size %d\n",
807		    sizeof(struct wb_list_data));
808		goto fail_3;
809	}
810	if (bus_dmamap_create(pa->pa_dmat, sizeof(struct wb_list_data), 1,
811	    sizeof(struct wb_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
812		printf(": can't create dma map\n");
813		goto fail_4;
814	}
815	if (bus_dmamap_load(pa->pa_dmat, dmamap, kva,
816	    sizeof(struct wb_list_data), NULL, BUS_DMA_NOWAIT)) {
817		printf(": can't load dma map\n");
818		goto fail_5;
819	}
820	sc->wb_ldata = (struct wb_list_data *)kva;
821
822	ifp->if_softc = sc;
823	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
824	ifp->if_ioctl = wb_ioctl;
825	ifp->if_start = wb_start;
826	ifp->if_watchdog = wb_watchdog;
827	ifp->if_baudrate = 10000000;
828	IFQ_SET_MAXLEN(&ifp->if_snd, WB_TX_LIST_CNT - 1);
829	IFQ_SET_READY(&ifp->if_snd);
830
831	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
832
833	/*
834	 * Do ifmedia setup.
835	 */
836	wb_stop(sc);
837
838	ifmedia_init(&sc->sc_mii.mii_media, 0, wb_ifmedia_upd, wb_ifmedia_sts);
839	sc->sc_mii.mii_ifp = ifp;
840	sc->sc_mii.mii_readreg = wb_miibus_readreg;
841	sc->sc_mii.mii_writereg = wb_miibus_writereg;
842	sc->sc_mii.mii_statchg = wb_miibus_statchg;
843	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
844	    0);
845	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
846		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,0,NULL);
847		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
848	} else
849		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
850
851	/*
852	 * Call MI attach routines.
853	 */
854	if_attach(ifp);
855	ether_ifattach(ifp);
856	return;
857
858fail_5:
859	bus_dmamap_destroy(pa->pa_dmat, dmamap);
860
861fail_4:
862	bus_dmamem_unmap(pa->pa_dmat, kva,
863	    sizeof(struct wb_list_data));
864
865fail_3:
866	bus_dmamem_free(pa->pa_dmat, &seg, rseg);
867
868fail_2:
869	pci_intr_disestablish(pc, sc->sc_ih);
870
871fail_1:
872	bus_space_unmap(sc->wb_btag, sc->wb_bhandle, size);
873}
874
875/*
876 * Initialize the transmit descriptors.
877 */
878int wb_list_tx_init(sc)
879	struct wb_softc		*sc;
880{
881	struct wb_chain_data	*cd;
882	struct wb_list_data	*ld;
883	int			i;
884
885	cd = &sc->wb_cdata;
886	ld = sc->wb_ldata;
887
888	for (i = 0; i < WB_TX_LIST_CNT; i++) {
889		cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i];
890		if (i == (WB_TX_LIST_CNT - 1)) {
891			cd->wb_tx_chain[i].wb_nextdesc =
892				&cd->wb_tx_chain[0];
893		} else {
894			cd->wb_tx_chain[i].wb_nextdesc =
895				&cd->wb_tx_chain[i + 1];
896		}
897	}
898
899	cd->wb_tx_free = &cd->wb_tx_chain[0];
900	cd->wb_tx_tail = cd->wb_tx_head = NULL;
901
902	return(0);
903}
904
905
906/*
907 * Initialize the RX descriptors and allocate mbufs for them. Note that
908 * we arrange the descriptors in a closed ring, so that the last descriptor
909 * points back to the first.
910 */
911int wb_list_rx_init(sc)
912	struct wb_softc		*sc;
913{
914	struct wb_chain_data	*cd;
915	struct wb_list_data	*ld;
916	int			i;
917
918	cd = &sc->wb_cdata;
919	ld = sc->wb_ldata;
920
921	for (i = 0; i < WB_RX_LIST_CNT; i++) {
922		cd->wb_rx_chain[i].wb_ptr =
923			(struct wb_desc *)&ld->wb_rx_list[i];
924		cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i];
925		wb_newbuf(sc, &cd->wb_rx_chain[i]);
926		if (i == (WB_RX_LIST_CNT - 1)) {
927			cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0];
928			ld->wb_rx_list[i].wb_next =
929					VTOPHYS(&ld->wb_rx_list[0]);
930		} else {
931			cd->wb_rx_chain[i].wb_nextdesc =
932					&cd->wb_rx_chain[i + 1];
933			ld->wb_rx_list[i].wb_next =
934					VTOPHYS(&ld->wb_rx_list[i + 1]);
935		}
936	}
937
938	cd->wb_rx_head = &cd->wb_rx_chain[0];
939
940	return(0);
941}
942
943/*
944 * Initialize an RX descriptor and attach an MBUF cluster.
945 */
946void
947wb_newbuf(sc, c)
948	struct wb_softc *sc;
949	struct wb_chain_onefrag *c;
950{
951	c->wb_ptr->wb_data = VTOPHYS(c->wb_buf + sizeof(u_int64_t));
952	c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | ETHER_MAX_DIX_LEN;
953	c->wb_ptr->wb_status = WB_RXSTAT;
954}
955
956/*
957 * A frame has been uploaded: pass the resulting mbuf chain up to
958 * the higher level protocols.
959 */
960void wb_rxeof(sc)
961	struct wb_softc		*sc;
962{
963        struct ifnet		*ifp;
964	struct wb_chain_onefrag	*cur_rx;
965	int			total_len = 0;
966	u_int32_t		rxstat;
967
968	ifp = &sc->arpcom.ac_if;
969
970	while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) &
971							WB_RXSTAT_OWN)) {
972		struct mbuf *m;
973
974		cur_rx = sc->wb_cdata.wb_rx_head;
975		sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc;
976
977		if ((rxstat & WB_RXSTAT_MIIERR) ||
978		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) ||
979		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > ETHER_MAX_DIX_LEN) ||
980		    !(rxstat & WB_RXSTAT_LASTFRAG) ||
981		    !(rxstat & WB_RXSTAT_RXCMP)) {
982			ifp->if_ierrors++;
983			wb_newbuf(sc, cur_rx);
984			printf("%s: receiver babbling: possible chip "
985				"bug, forcing reset\n", sc->sc_dev.dv_xname);
986			wb_fixmedia(sc);
987			wb_reset(sc);
988			wb_init(sc);
989			return;
990		}
991
992		if (rxstat & WB_RXSTAT_RXERR) {
993			ifp->if_ierrors++;
994			wb_newbuf(sc, cur_rx);
995			break;
996		}
997
998		/* No errors; receive the packet. */
999		total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status);
1000
1001		/*
1002		 * XXX The Winbond chip includes the CRC with every
1003		 * received frame, and there's no way to turn this
1004		 * behavior off (at least, I can't find anything in
1005	 	 * the manual that explains how to do it) so we have
1006		 * to trim off the CRC manually.
1007		 */
1008		total_len -= ETHER_CRC_LEN;
1009
1010		m = m_devget(cur_rx->wb_buf + sizeof(u_int64_t), total_len,
1011		    ETHER_ALIGN, ifp, NULL);
1012		wb_newbuf(sc, cur_rx);
1013		if (m == NULL) {
1014			ifp->if_ierrors++;
1015			break;
1016		}
1017
1018		ifp->if_ipackets++;
1019
1020#if NBPFILTER > 0
1021		/*
1022		 * Handle BPF listeners. Let the BPF user see the packet.
1023		 */
1024		if (ifp->if_bpf)
1025			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1026#endif
1027		/* pass it on. */
1028		ether_input_mbuf(ifp, m);
1029	}
1030
1031	return;
1032}
1033
1034void wb_rxeoc(sc)
1035	struct wb_softc		*sc;
1036{
1037	wb_rxeof(sc);
1038
1039	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1040	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
1041	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1042	if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND)
1043		CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1044
1045	return;
1046}
1047
1048/*
1049 * A frame was downloaded to the chip. It's safe for us to clean up
1050 * the list buffers.
1051 */
1052void wb_txeof(sc)
1053	struct wb_softc		*sc;
1054{
1055	struct wb_chain		*cur_tx;
1056	struct ifnet		*ifp;
1057
1058	ifp = &sc->arpcom.ac_if;
1059
1060	/* Clear the timeout timer. */
1061	ifp->if_timer = 0;
1062
1063	if (sc->wb_cdata.wb_tx_head == NULL)
1064		return;
1065
1066	/*
1067	 * Go through our tx list and free mbufs for those
1068	 * frames that have been transmitted.
1069	 */
1070	while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) {
1071		u_int32_t		txstat;
1072
1073		cur_tx = sc->wb_cdata.wb_tx_head;
1074		txstat = WB_TXSTATUS(cur_tx);
1075
1076		if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT)
1077			break;
1078
1079		if (txstat & WB_TXSTAT_TXERR) {
1080			ifp->if_oerrors++;
1081			if (txstat & WB_TXSTAT_ABORT)
1082				ifp->if_collisions++;
1083			if (txstat & WB_TXSTAT_LATECOLL)
1084				ifp->if_collisions++;
1085		}
1086
1087		ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3;
1088
1089		ifp->if_opackets++;
1090		m_freem(cur_tx->wb_mbuf);
1091		cur_tx->wb_mbuf = NULL;
1092
1093		if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) {
1094			sc->wb_cdata.wb_tx_head = NULL;
1095			sc->wb_cdata.wb_tx_tail = NULL;
1096			break;
1097		}
1098
1099		sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc;
1100	}
1101
1102	return;
1103}
1104
1105/*
1106 * TX 'end of channel' interrupt handler.
1107 */
1108void wb_txeoc(sc)
1109	struct wb_softc		*sc;
1110{
1111	struct ifnet		*ifp;
1112
1113	ifp = &sc->arpcom.ac_if;
1114
1115	ifp->if_timer = 0;
1116
1117	if (sc->wb_cdata.wb_tx_head == NULL) {
1118		ifp->if_flags &= ~IFF_OACTIVE;
1119		sc->wb_cdata.wb_tx_tail = NULL;
1120	} else {
1121		if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) {
1122			WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN;
1123			ifp->if_timer = 5;
1124			CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1125		}
1126	}
1127
1128	return;
1129}
1130
1131int wb_intr(arg)
1132	void			*arg;
1133{
1134	struct wb_softc		*sc;
1135	struct ifnet		*ifp;
1136	u_int32_t		status;
1137	int			r = 0;
1138
1139	sc = arg;
1140	ifp = &sc->arpcom.ac_if;
1141
1142	if (!(ifp->if_flags & IFF_UP))
1143		return (r);
1144
1145	/* Disable interrupts. */
1146	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1147
1148	for (;;) {
1149
1150		status = CSR_READ_4(sc, WB_ISR);
1151		if (status)
1152			CSR_WRITE_4(sc, WB_ISR, status);
1153
1154		if ((status & WB_INTRS) == 0)
1155			break;
1156
1157		r = 1;
1158
1159		if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) {
1160			ifp->if_ierrors++;
1161			wb_reset(sc);
1162			if (status & WB_ISR_RX_ERR)
1163				wb_fixmedia(sc);
1164			wb_init(sc);
1165			continue;
1166		}
1167
1168		if (status & WB_ISR_RX_OK)
1169			wb_rxeof(sc);
1170
1171		if (status & WB_ISR_RX_IDLE)
1172			wb_rxeoc(sc);
1173
1174		if (status & WB_ISR_TX_OK)
1175			wb_txeof(sc);
1176
1177		if (status & WB_ISR_TX_NOBUF)
1178			wb_txeoc(sc);
1179
1180		if (status & WB_ISR_TX_IDLE) {
1181			wb_txeof(sc);
1182			if (sc->wb_cdata.wb_tx_head != NULL) {
1183				WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1184				CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1185			}
1186		}
1187
1188		if (status & WB_ISR_TX_UNDERRUN) {
1189			ifp->if_oerrors++;
1190			wb_txeof(sc);
1191			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1192			/* Jack up TX threshold */
1193			sc->wb_txthresh += WB_TXTHRESH_CHUNK;
1194			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1195			WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1196			WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1197		}
1198
1199		if (status & WB_ISR_BUS_ERR) {
1200			wb_reset(sc);
1201			wb_init(sc);
1202		}
1203
1204	}
1205
1206	/* Re-enable interrupts. */
1207	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1208
1209	if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1210		wb_start(ifp);
1211	}
1212
1213	return (r);
1214}
1215
1216void
1217wb_tick(xsc)
1218	void *xsc;
1219{
1220	struct wb_softc *sc = xsc;
1221	int s;
1222
1223	s = splnet();
1224	mii_tick(&sc->sc_mii);
1225	splx(s);
1226	timeout_add_sec(&sc->wb_tick_tmo, 1);
1227}
1228
1229/*
1230 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1231 * pointers to the fragment pointers.
1232 */
1233int wb_encap(sc, c, m_head)
1234	struct wb_softc		*sc;
1235	struct wb_chain		*c;
1236	struct mbuf		*m_head;
1237{
1238	int			frag = 0;
1239	struct wb_desc		*f = NULL;
1240	int			total_len;
1241	struct mbuf		*m;
1242
1243	/*
1244 	 * Start packing the mbufs in this chain into
1245	 * the fragment pointers. Stop when we run out
1246 	 * of fragments or hit the end of the mbuf chain.
1247	 */
1248	m = m_head;
1249	total_len = 0;
1250
1251	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1252		if (m->m_len != 0) {
1253			if (frag == WB_MAXFRAGS)
1254				break;
1255			total_len += m->m_len;
1256			f = &c->wb_ptr->wb_frag[frag];
1257			f->wb_ctl = WB_TXCTL_TLINK | m->m_len;
1258			if (frag == 0) {
1259				f->wb_ctl |= WB_TXCTL_FIRSTFRAG;
1260				f->wb_status = 0;
1261			} else
1262				f->wb_status = WB_TXSTAT_OWN;
1263			f->wb_next = VTOPHYS(&c->wb_ptr->wb_frag[frag + 1]);
1264			f->wb_data = VTOPHYS(mtod(m, vaddr_t));
1265			frag++;
1266		}
1267	}
1268
1269	/*
1270	 * Handle special case: we used up all 16 fragments,
1271	 * but we have more mbufs left in the chain. Copy the
1272	 * data into an mbuf cluster. Note that we don't
1273	 * bother clearing the values in the other fragment
1274	 * pointers/counters; it wouldn't gain us anything,
1275	 * and would waste cycles.
1276	 */
1277	if (m != NULL) {
1278		struct mbuf		*m_new = NULL;
1279
1280		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1281		if (m_new == NULL)
1282			return(1);
1283		if (m_head->m_pkthdr.len > MHLEN) {
1284			MCLGET(m_new, M_DONTWAIT);
1285			if (!(m_new->m_flags & M_EXT)) {
1286				m_freem(m_new);
1287				return(1);
1288			}
1289		}
1290		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1291					mtod(m_new, caddr_t));
1292		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1293		m_freem(m_head);
1294		m_head = m_new;
1295		f = &c->wb_ptr->wb_frag[0];
1296		f->wb_status = 0;
1297		f->wb_data = VTOPHYS(mtod(m_new, caddr_t));
1298		f->wb_ctl = total_len = m_new->m_len;
1299		f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG;
1300		frag = 1;
1301	}
1302
1303	if (total_len < WB_MIN_FRAMELEN) {
1304		f = &c->wb_ptr->wb_frag[frag];
1305		f->wb_ctl = WB_MIN_FRAMELEN - total_len;
1306		f->wb_data = VTOPHYS(&sc->wb_cdata.wb_pad);
1307		f->wb_ctl |= WB_TXCTL_TLINK;
1308		f->wb_status = WB_TXSTAT_OWN;
1309		frag++;
1310	}
1311
1312	c->wb_mbuf = m_head;
1313	c->wb_lastdesc = frag - 1;
1314	WB_TXCTL(c) |= WB_TXCTL_LASTFRAG;
1315	WB_TXNEXT(c) = VTOPHYS(&c->wb_nextdesc->wb_ptr->wb_frag[0]);
1316
1317	return(0);
1318}
1319
1320/*
1321 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1322 * to the mbuf data regions directly in the transmit lists. We also save a
1323 * copy of the pointers since the transmit list fragment pointers are
1324 * physical addresses.
1325 */
1326
1327void wb_start(ifp)
1328	struct ifnet		*ifp;
1329{
1330	struct wb_softc		*sc;
1331	struct mbuf		*m_head = NULL;
1332	struct wb_chain		*cur_tx = NULL, *start_tx;
1333
1334	sc = ifp->if_softc;
1335
1336	/*
1337	 * Check for an available queue slot. If there are none,
1338	 * punt.
1339	 */
1340	if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) {
1341		ifp->if_flags |= IFF_OACTIVE;
1342		return;
1343	}
1344
1345	start_tx = sc->wb_cdata.wb_tx_free;
1346
1347	while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) {
1348		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1349		if (m_head == NULL)
1350			break;
1351
1352		/* Pick a descriptor off the free list. */
1353		cur_tx = sc->wb_cdata.wb_tx_free;
1354		sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc;
1355
1356		/* Pack the data into the descriptor. */
1357		wb_encap(sc, cur_tx, m_head);
1358
1359		if (cur_tx != start_tx)
1360			WB_TXOWN(cur_tx) = WB_TXSTAT_OWN;
1361
1362#if NBPFILTER > 0
1363		/*
1364		 * If there's a BPF listener, bounce a copy of this frame
1365		 * to him.
1366		 */
1367		if (ifp->if_bpf)
1368			bpf_mtap(ifp->if_bpf, cur_tx->wb_mbuf,
1369			    BPF_DIRECTION_OUT);
1370#endif
1371	}
1372
1373	/*
1374	 * If there are no packets queued, bail.
1375	 */
1376	if (cur_tx == NULL)
1377		return;
1378
1379	/*
1380	 * Place the request for the upload interrupt
1381	 * in the last descriptor in the chain. This way, if
1382	 * we're chaining several packets at once, we'll only
1383	 * get an interrupt once for the whole chain rather than
1384	 * once for each packet.
1385	 */
1386	WB_TXCTL(cur_tx) |= WB_TXCTL_FINT;
1387	cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT;
1388	sc->wb_cdata.wb_tx_tail = cur_tx;
1389
1390	if (sc->wb_cdata.wb_tx_head == NULL) {
1391		sc->wb_cdata.wb_tx_head = start_tx;
1392		WB_TXOWN(start_tx) = WB_TXSTAT_OWN;
1393		CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1394	} else {
1395		/*
1396		 * We need to distinguish between the case where
1397		 * the own bit is clear because the chip cleared it
1398		 * and where the own bit is clear because we haven't
1399		 * set it yet. The magic value WB_UNSET is just some
1400		 * ramdomly chosen number which doesn't have the own
1401	 	 * bit set. When we actually transmit the frame, the
1402		 * status word will have _only_ the own bit set, so
1403		 * the txeoc handler will be able to tell if it needs
1404		 * to initiate another transmission to flush out pending
1405		 * frames.
1406		 */
1407		WB_TXOWN(start_tx) = WB_UNSENT;
1408	}
1409
1410	/*
1411	 * Set a timeout in case the chip goes out to lunch.
1412	 */
1413	ifp->if_timer = 5;
1414
1415	return;
1416}
1417
1418void wb_init(xsc)
1419	void			*xsc;
1420{
1421	struct wb_softc *sc = xsc;
1422	struct ifnet *ifp = &sc->arpcom.ac_if;
1423	int s, i;
1424
1425	s = splnet();
1426
1427	/*
1428	 * Cancel pending I/O and free all RX/TX buffers.
1429	 */
1430	wb_stop(sc);
1431	wb_reset(sc);
1432
1433	sc->wb_txthresh = WB_TXTHRESH_INIT;
1434
1435	/*
1436	 * Set cache alignment and burst length.
1437	 */
1438#ifdef foo
1439	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG);
1440	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1441	WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1442#endif
1443
1444	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION);
1445	WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG);
1446	switch(sc->wb_cachesize) {
1447	case 32:
1448		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG);
1449		break;
1450	case 16:
1451		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG);
1452		break;
1453	case 8:
1454		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG);
1455		break;
1456	case 0:
1457	default:
1458		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE);
1459		break;
1460	}
1461
1462	/* This doesn't tend to work too well at 100Mbps. */
1463	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON);
1464
1465	/* Init our MAC address */
1466	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1467		CSR_WRITE_1(sc, WB_NODE0 + i, sc->arpcom.ac_enaddr[i]);
1468	}
1469
1470	/* Init circular RX list. */
1471	if (wb_list_rx_init(sc) == ENOBUFS) {
1472		printf("%s: initialization failed: no "
1473			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1474		wb_stop(sc);
1475		splx(s);
1476		return;
1477	}
1478
1479	/* Init TX descriptors. */
1480	wb_list_tx_init(sc);
1481
1482	/* If we want promiscuous mode, set the allframes bit. */
1483	if (ifp->if_flags & IFF_PROMISC) {
1484		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1485	} else {
1486		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1487	}
1488
1489	/*
1490	 * Set capture broadcast bit to capture broadcast frames.
1491	 */
1492	if (ifp->if_flags & IFF_BROADCAST) {
1493		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1494	} else {
1495		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1496	}
1497
1498	/*
1499	 * Program the multicast filter, if necessary.
1500	 */
1501	wb_setmulti(sc);
1502
1503	/*
1504	 * Load the address of the RX list.
1505	 */
1506	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1507	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
1508
1509	/*
1510	 * Enable interrupts.
1511	 */
1512	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1513	CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF);
1514
1515	/* Enable receiver and transmitter. */
1516	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1517	CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1518
1519	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1520	CSR_WRITE_4(sc, WB_TXADDR, VTOPHYS(&sc->wb_ldata->wb_tx_list[0]));
1521	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1522
1523	ifp->if_flags |= IFF_RUNNING;
1524	ifp->if_flags &= ~IFF_OACTIVE;
1525
1526	splx(s);
1527
1528	timeout_set(&sc->wb_tick_tmo, wb_tick, sc);
1529	timeout_add_sec(&sc->wb_tick_tmo, 1);
1530
1531	return;
1532}
1533
1534/*
1535 * Set media options.
1536 */
1537int
1538wb_ifmedia_upd(ifp)
1539	struct ifnet *ifp;
1540{
1541	struct wb_softc *sc = ifp->if_softc;
1542
1543	if (ifp->if_flags & IFF_UP)
1544		wb_init(sc);
1545
1546	return(0);
1547}
1548
1549/*
1550 * Report current media status.
1551 */
1552void
1553wb_ifmedia_sts(ifp, ifmr)
1554	struct ifnet		*ifp;
1555	struct ifmediareq	*ifmr;
1556{
1557	struct wb_softc *sc = ifp->if_softc;
1558	struct mii_data *mii = &sc->sc_mii;
1559
1560	mii_pollstat(mii);
1561	ifmr->ifm_active = mii->mii_media_active;
1562	ifmr->ifm_status = mii->mii_media_status;
1563}
1564
1565int wb_ioctl(ifp, command, data)
1566	struct ifnet		*ifp;
1567	u_long			command;
1568	caddr_t			data;
1569{
1570	struct wb_softc		*sc = ifp->if_softc;
1571	struct ifaddr		*ifa = (struct ifaddr *) data;
1572	struct ifreq		*ifr = (struct ifreq *) data;
1573	int			s, error = 0;
1574
1575	s = splnet();
1576
1577	switch(command) {
1578	case SIOCSIFADDR:
1579		ifp->if_flags |= IFF_UP;
1580		switch (ifa->ifa_addr->sa_family) {
1581#ifdef INET
1582		case AF_INET:
1583			wb_init(sc);
1584			arp_ifinit(&sc->arpcom, ifa);
1585			break;
1586#endif /* INET */
1587		default:
1588			wb_init(sc);
1589		}
1590		break;
1591
1592	case SIOCSIFFLAGS:
1593		if (ifp->if_flags & IFF_UP) {
1594			wb_init(sc);
1595		} else {
1596			if (ifp->if_flags & IFF_RUNNING)
1597				wb_stop(sc);
1598		}
1599		error = 0;
1600		break;
1601
1602	case SIOCGIFMEDIA:
1603	case SIOCSIFMEDIA:
1604		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1605		break;
1606
1607	default:
1608		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1609	}
1610
1611	if (error == ENETRESET) {
1612		if (ifp->if_flags & IFF_RUNNING)
1613			wb_setmulti(sc);
1614		error = 0;
1615	}
1616
1617	splx(s);
1618	return(error);
1619}
1620
1621void wb_watchdog(ifp)
1622	struct ifnet		*ifp;
1623{
1624	struct wb_softc		*sc;
1625
1626	sc = ifp->if_softc;
1627
1628	ifp->if_oerrors++;
1629	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1630
1631#ifdef foo
1632	if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1633		printf("%s: no carrier - transceiver cable problem?\n",
1634		    sc->sc_dev.dv_xname);
1635#endif
1636	wb_stop(sc);
1637	wb_reset(sc);
1638	wb_init(sc);
1639
1640	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1641		wb_start(ifp);
1642
1643	return;
1644}
1645
1646/*
1647 * Stop the adapter and free any mbufs allocated to the
1648 * RX and TX lists.
1649 */
1650void wb_stop(sc)
1651	struct wb_softc		*sc;
1652{
1653	int			i;
1654	struct ifnet		*ifp;
1655
1656	ifp = &sc->arpcom.ac_if;
1657	ifp->if_timer = 0;
1658
1659	timeout_del(&sc->wb_tick_tmo);
1660
1661	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1662
1663	WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON));
1664	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1665	CSR_WRITE_4(sc, WB_TXADDR, 0x00000000);
1666	CSR_WRITE_4(sc, WB_RXADDR, 0x00000000);
1667
1668	/*
1669	 * Free data in the RX lists.
1670	 */
1671	bzero((char *)&sc->wb_ldata->wb_rx_list,
1672		sizeof(sc->wb_ldata->wb_rx_list));
1673
1674	/*
1675	 * Free the TX list buffers.
1676	 */
1677	for (i = 0; i < WB_TX_LIST_CNT; i++) {
1678		if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) {
1679			m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf);
1680			sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL;
1681		}
1682	}
1683
1684	bzero((char *)&sc->wb_ldata->wb_tx_list,
1685		sizeof(sc->wb_ldata->wb_tx_list));
1686}
1687
1688struct cfattach wb_ca = {
1689	sizeof(struct wb_softc), wb_probe, wb_attach
1690};
1691
1692struct cfdriver wb_cd = {
1693	NULL, "wb", DV_IFNET
1694};
1695