1/*	$OpenBSD: if_wb.c,v 1.77 2024/05/24 06:02:57 jsg Exp $	*/
2
3/*
4 * Copyright (c) 1997, 1998
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: src/sys/pci/if_wb.c,v 1.26 1999/09/25 17:29:02 wpaul Exp $
35 */
36
37/*
38 * Winbond fast ethernet PCI NIC driver
39 *
40 * Supports various cheap network adapters based on the Winbond W89C840F
41 * fast ethernet controller chip. This includes adapters manufactured by
42 * Winbond itself and some made by Linksys.
43 *
44 * Written by Bill Paul <wpaul@ctr.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48
49/*
50 * The Winbond W89C840F chip is a bus master; in some ways it resembles
51 * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has
52 * one major difference which is that while the registers do many of
53 * the same things as a tulip adapter, the offsets are different: where
54 * tulip registers are typically spaced 8 bytes apart, the Winbond
55 * registers are spaced 4 bytes apart. The receiver filter is also
56 * programmed differently.
57 *
58 * Like the tulip, the Winbond chip uses small descriptors containing
59 * a status word, a control word and 32-bit areas that can either be used
60 * to point to two external data blocks, or to point to a single block
61 * and another descriptor in a linked list. Descriptors can be grouped
62 * together in blocks to form fixed length rings or can be chained
63 * together in linked lists. A single packet may be spread out over
64 * several descriptors if necessary.
65 *
66 * For the receive ring, this driver uses a linked list of descriptors,
67 * each pointing to a single mbuf cluster buffer, which us large enough
68 * to hold an entire packet. The link list is looped back to created a
69 * closed ring.
70 *
71 * For transmission, the driver creates a linked list of 'super descriptors'
72 * which each contain several individual descriptors linked together.
73 * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we
74 * abuse as fragment pointers. This allows us to use a buffer management
75 * scheme very similar to that used in the ThunderLAN and Etherlink XL
76 * drivers.
77 *
78 * Autonegotiation is performed using the external PHY via the MII bus.
79 * The sample boards I have all use a Davicom PHY.
80 *
81 * Note: the author of the Linux driver for the Winbond chip alludes
82 * to some sort of flaw in the chip's design that seems to mandate some
83 * drastic workaround which significantly impairs transmit performance.
84 * I have no idea what he's on about: transmit performance with all
85 * three of my test boards seems fine.
86 */
87
88#include "bpfilter.h"
89
90#include <sys/param.h>
91#include <sys/systm.h>
92#include <sys/sockio.h>
93#include <sys/mbuf.h>
94#include <sys/device.h>
95#include <sys/queue.h>
96#include <sys/timeout.h>
97
98#include <net/if.h>
99
100#include <netinet/in.h>
101#include <netinet/if_ether.h>
102
103#include <net/if_media.h>
104
105#if NBPFILTER > 0
106#include <net/bpf.h>
107#endif
108
109#include <uvm/uvm_extern.h>		/* for vtophys */
110#define	VTOPHYS(v)	vtophys((vaddr_t)(v))
111
112#include <dev/mii/miivar.h>
113#include <dev/pci/pcireg.h>
114#include <dev/pci/pcivar.h>
115#include <dev/pci/pcidevs.h>
116
117#define WB_USEIOSPACE
118
119/* #define WB_BACKGROUND_AUTONEG */
120
121#include <dev/pci/if_wbreg.h>
122
123int wb_probe(struct device *, void *, void *);
124void wb_attach(struct device *, struct device *, void *);
125
126void wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *);
127int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *);
128
129void wb_rxeof(struct wb_softc *);
130void wb_rxeoc(struct wb_softc *);
131void wb_txeof(struct wb_softc *);
132void wb_txeoc(struct wb_softc *);
133int wb_intr(void *);
134void wb_tick(void *);
135void wb_start(struct ifnet *);
136int wb_ioctl(struct ifnet *, u_long, caddr_t);
137void wb_init(void *);
138void wb_stop(struct wb_softc *);
139void wb_watchdog(struct ifnet *);
140int wb_ifmedia_upd(struct ifnet *);
141void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *);
142
143void wb_eeprom_putbyte(struct wb_softc *, int);
144void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *);
145void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int);
146void wb_mii_sync(struct wb_softc *);
147void wb_mii_send(struct wb_softc *, u_int32_t, int);
148int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *);
149int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *);
150
151void wb_setcfg(struct wb_softc *, uint64_t);
152void wb_setmulti(struct wb_softc *);
153void wb_reset(struct wb_softc *);
154void wb_fixmedia(struct wb_softc *);
155int wb_list_rx_init(struct wb_softc *);
156int wb_list_tx_init(struct wb_softc *);
157
158int wb_miibus_readreg(struct device *, int, int);
159void wb_miibus_writereg(struct device *, int, int, int);
160void wb_miibus_statchg(struct device *);
161
162#define WB_SETBIT(sc, reg, x)				\
163	CSR_WRITE_4(sc, reg,				\
164		CSR_READ_4(sc, reg) | x)
165
166#define WB_CLRBIT(sc, reg, x)				\
167	CSR_WRITE_4(sc, reg,				\
168		CSR_READ_4(sc, reg) & ~x)
169
170#define SIO_SET(x)					\
171	CSR_WRITE_4(sc, WB_SIO,				\
172		CSR_READ_4(sc, WB_SIO) | x)
173
174#define SIO_CLR(x)					\
175	CSR_WRITE_4(sc, WB_SIO,				\
176		CSR_READ_4(sc, WB_SIO) & ~x)
177
178/*
179 * Send a read command and address to the EEPROM, check for ACK.
180 */
181void
182wb_eeprom_putbyte(struct wb_softc *sc, int addr)
183{
184	int			d, i;
185
186	d = addr | WB_EECMD_READ;
187
188	/*
189	 * Feed in each bit and strobe the clock.
190	 */
191	for (i = 0x400; i; i >>= 1) {
192		if (d & i) {
193			SIO_SET(WB_SIO_EE_DATAIN);
194		} else {
195			SIO_CLR(WB_SIO_EE_DATAIN);
196		}
197		DELAY(100);
198		SIO_SET(WB_SIO_EE_CLK);
199		DELAY(150);
200		SIO_CLR(WB_SIO_EE_CLK);
201		DELAY(100);
202	}
203
204	return;
205}
206
207/*
208 * Read a word of data stored in the EEPROM at address 'addr.'
209 */
210void
211wb_eeprom_getword(struct wb_softc *sc, int addr, u_int16_t *dest)
212{
213	int			i;
214	u_int16_t		word = 0;
215
216	/* Enter EEPROM access mode. */
217	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
218
219	/*
220	 * Send address of word we want to read.
221	 */
222	wb_eeprom_putbyte(sc, addr);
223
224	CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS);
225
226	/*
227	 * Start reading bits from EEPROM.
228	 */
229	for (i = 0x8000; i; i >>= 1) {
230		SIO_SET(WB_SIO_EE_CLK);
231		DELAY(100);
232		if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT)
233			word |= i;
234		SIO_CLR(WB_SIO_EE_CLK);
235		DELAY(100);
236	}
237
238	/* Turn off EEPROM access mode. */
239	CSR_WRITE_4(sc, WB_SIO, 0);
240
241	*dest = word;
242
243	return;
244}
245
246/*
247 * Read a sequence of words from the EEPROM.
248 */
249void
250wb_read_eeprom(struct wb_softc *sc, caddr_t dest, int off, int cnt, int swap)
251{
252	int			i;
253	u_int16_t		word = 0, *ptr;
254
255	for (i = 0; i < cnt; i++) {
256		wb_eeprom_getword(sc, off + i, &word);
257		ptr = (u_int16_t *)(dest + (i * 2));
258		if (swap)
259			*ptr = ntohs(word);
260		else
261			*ptr = word;
262	}
263
264	return;
265}
266
267/*
268 * Sync the PHYs by setting data bit and strobing the clock 32 times.
269 */
270void
271wb_mii_sync(struct wb_softc *sc)
272{
273	int			i;
274
275	SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN);
276
277	for (i = 0; i < 32; i++) {
278		SIO_SET(WB_SIO_MII_CLK);
279		DELAY(1);
280		SIO_CLR(WB_SIO_MII_CLK);
281		DELAY(1);
282	}
283
284	return;
285}
286
287/*
288 * Clock a series of bits through the MII.
289 */
290void
291wb_mii_send(struct wb_softc *sc, u_int32_t bits, int cnt)
292{
293	int			i;
294
295	SIO_CLR(WB_SIO_MII_CLK);
296
297	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
298                if (bits & i) {
299			SIO_SET(WB_SIO_MII_DATAIN);
300                } else {
301			SIO_CLR(WB_SIO_MII_DATAIN);
302                }
303		DELAY(1);
304		SIO_CLR(WB_SIO_MII_CLK);
305		DELAY(1);
306		SIO_SET(WB_SIO_MII_CLK);
307	}
308}
309
310/*
311 * Read an PHY register through the MII.
312 */
313int
314wb_mii_readreg(struct wb_softc *sc, struct wb_mii_frame *frame)
315{
316	int			i, ack, s;
317
318	s = splnet();
319
320	/*
321	 * Set up frame for RX.
322	 */
323	frame->mii_stdelim = WB_MII_STARTDELIM;
324	frame->mii_opcode = WB_MII_READOP;
325	frame->mii_turnaround = 0;
326	frame->mii_data = 0;
327
328	CSR_WRITE_4(sc, WB_SIO, 0);
329
330	/*
331 	 * Turn on data xmit.
332	 */
333	SIO_SET(WB_SIO_MII_DIR);
334
335	wb_mii_sync(sc);
336
337	/*
338	 * Send command/address info.
339	 */
340	wb_mii_send(sc, frame->mii_stdelim, 2);
341	wb_mii_send(sc, frame->mii_opcode, 2);
342	wb_mii_send(sc, frame->mii_phyaddr, 5);
343	wb_mii_send(sc, frame->mii_regaddr, 5);
344
345	/* Idle bit */
346	SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN));
347	DELAY(1);
348	SIO_SET(WB_SIO_MII_CLK);
349	DELAY(1);
350
351	/* Turn off xmit. */
352	SIO_CLR(WB_SIO_MII_DIR);
353	/* Check for ack */
354	SIO_CLR(WB_SIO_MII_CLK);
355	DELAY(1);
356	ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT;
357	SIO_SET(WB_SIO_MII_CLK);
358	DELAY(1);
359	SIO_CLR(WB_SIO_MII_CLK);
360	DELAY(1);
361	SIO_SET(WB_SIO_MII_CLK);
362	DELAY(1);
363
364	/*
365	 * Now try reading data bits. If the ack failed, we still
366	 * need to clock through 16 cycles to keep the PHY(s) in sync.
367	 */
368	if (ack) {
369		for(i = 0; i < 16; i++) {
370			SIO_CLR(WB_SIO_MII_CLK);
371			DELAY(1);
372			SIO_SET(WB_SIO_MII_CLK);
373			DELAY(1);
374		}
375		goto fail;
376	}
377
378	for (i = 0x8000; i; i >>= 1) {
379		SIO_CLR(WB_SIO_MII_CLK);
380		DELAY(1);
381		if (!ack) {
382			if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT)
383				frame->mii_data |= i;
384			DELAY(1);
385		}
386		SIO_SET(WB_SIO_MII_CLK);
387		DELAY(1);
388	}
389
390fail:
391
392	SIO_CLR(WB_SIO_MII_CLK);
393	DELAY(1);
394	SIO_SET(WB_SIO_MII_CLK);
395	DELAY(1);
396
397	splx(s);
398
399	if (ack)
400		return(1);
401	return(0);
402}
403
404/*
405 * Write to a PHY register through the MII.
406 */
407int
408wb_mii_writereg(struct wb_softc *sc, struct wb_mii_frame *frame)
409{
410	int			s;
411
412	s = splnet();
413	/*
414	 * Set up frame for TX.
415	 */
416
417	frame->mii_stdelim = WB_MII_STARTDELIM;
418	frame->mii_opcode = WB_MII_WRITEOP;
419	frame->mii_turnaround = WB_MII_TURNAROUND;
420
421	/*
422 	 * Turn on data output.
423	 */
424	SIO_SET(WB_SIO_MII_DIR);
425
426	wb_mii_sync(sc);
427
428	wb_mii_send(sc, frame->mii_stdelim, 2);
429	wb_mii_send(sc, frame->mii_opcode, 2);
430	wb_mii_send(sc, frame->mii_phyaddr, 5);
431	wb_mii_send(sc, frame->mii_regaddr, 5);
432	wb_mii_send(sc, frame->mii_turnaround, 2);
433	wb_mii_send(sc, frame->mii_data, 16);
434
435	/* Idle bit. */
436	SIO_SET(WB_SIO_MII_CLK);
437	DELAY(1);
438	SIO_CLR(WB_SIO_MII_CLK);
439	DELAY(1);
440
441	/*
442	 * Turn off xmit.
443	 */
444	SIO_CLR(WB_SIO_MII_DIR);
445
446	splx(s);
447
448	return(0);
449}
450
451int
452wb_miibus_readreg(struct device *dev, int phy, int reg)
453{
454	struct wb_softc *sc = (struct wb_softc *)dev;
455	struct wb_mii_frame frame;
456
457	bzero(&frame, sizeof(frame));
458
459	frame.mii_phyaddr = phy;
460	frame.mii_regaddr = reg;
461	wb_mii_readreg(sc, &frame);
462
463	return(frame.mii_data);
464}
465
466void
467wb_miibus_writereg(struct device *dev, int phy, int reg, int data)
468{
469	struct wb_softc *sc = (struct wb_softc *)dev;
470	struct wb_mii_frame frame;
471
472	bzero(&frame, sizeof(frame));
473
474	frame.mii_phyaddr = phy;
475	frame.mii_regaddr = reg;
476	frame.mii_data = data;
477
478	wb_mii_writereg(sc, &frame);
479
480	return;
481}
482
483void
484wb_miibus_statchg(struct device *dev)
485{
486	struct wb_softc *sc = (struct wb_softc *)dev;
487
488	wb_setcfg(sc, sc->sc_mii.mii_media_active);
489}
490
491/*
492 * Program the 64-bit multicast hash filter.
493 */
494void
495wb_setmulti(struct wb_softc *sc)
496{
497	struct ifnet		*ifp;
498	int			h = 0;
499	u_int32_t		hashes[2] = { 0, 0 };
500	struct arpcom		*ac = &sc->arpcom;
501	struct ether_multi	*enm;
502	struct ether_multistep	step;
503	u_int32_t		rxfilt;
504	int			mcnt = 0;
505
506	ifp = &sc->arpcom.ac_if;
507
508	rxfilt = CSR_READ_4(sc, WB_NETCFG);
509
510	if (ac->ac_multirangecnt > 0)
511		ifp->if_flags |= IFF_ALLMULTI;
512
513	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
514		rxfilt |= WB_NETCFG_RX_MULTI;
515		CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
516		CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF);
517		CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF);
518		return;
519	}
520
521	/* first, zot all the existing hash bits */
522	CSR_WRITE_4(sc, WB_MAR0, 0);
523	CSR_WRITE_4(sc, WB_MAR1, 0);
524
525	/* now program new ones */
526	ETHER_FIRST_MULTI(step, ac, enm);
527	while (enm != NULL) {
528		h = ~(ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26);
529		if (h < 32)
530			hashes[0] |= (1 << h);
531		else
532			hashes[1] |= (1 << (h - 32));
533		mcnt++;
534		ETHER_NEXT_MULTI(step, enm);
535	}
536
537	if (mcnt)
538		rxfilt |= WB_NETCFG_RX_MULTI;
539	else
540		rxfilt &= ~WB_NETCFG_RX_MULTI;
541
542	CSR_WRITE_4(sc, WB_MAR0, hashes[0]);
543	CSR_WRITE_4(sc, WB_MAR1, hashes[1]);
544	CSR_WRITE_4(sc, WB_NETCFG, rxfilt);
545
546	return;
547}
548
549/*
550 * The Winbond manual states that in order to fiddle with the
551 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
552 * first have to put the transmit and/or receive logic in the idle state.
553 */
554void
555wb_setcfg(struct wb_softc *sc, uint64_t media)
556{
557	int			i, restart = 0;
558
559	if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) {
560		restart = 1;
561		WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON));
562
563		for (i = 0; i < WB_TIMEOUT; i++) {
564			DELAY(10);
565			if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) &&
566				(CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE))
567				break;
568		}
569
570		if (i == WB_TIMEOUT)
571			printf("%s: failed to force tx and "
572				"rx to idle state\n", sc->sc_dev.dv_xname);
573	}
574
575	if (IFM_SUBTYPE(media) == IFM_10_T)
576		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
577	else
578		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS);
579
580	if ((media & IFM_GMASK) == IFM_FDX)
581		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
582	else
583		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX);
584
585	if (restart)
586		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON);
587
588	return;
589}
590
591void
592wb_reset(struct wb_softc *sc)
593{
594	int i;
595	struct mii_data *mii = &sc->sc_mii;
596
597	CSR_WRITE_4(sc, WB_NETCFG, 0);
598	CSR_WRITE_4(sc, WB_BUSCTL, 0);
599	CSR_WRITE_4(sc, WB_TXADDR, 0);
600	CSR_WRITE_4(sc, WB_RXADDR, 0);
601
602	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
603	WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET);
604
605	for (i = 0; i < WB_TIMEOUT; i++) {
606		DELAY(10);
607		if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET))
608			break;
609	}
610	if (i == WB_TIMEOUT)
611		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
612
613	/* Wait a little while for the chip to get its brains in order. */
614	DELAY(1000);
615
616	if (mii->mii_instance) {
617		struct mii_softc *miisc;
618		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
619			mii_phy_reset(miisc);
620	}
621}
622
623void
624wb_fixmedia(struct wb_softc *sc)
625{
626	struct mii_data *mii = &sc->sc_mii;
627	uint64_t media;
628
629	if (LIST_FIRST(&mii->mii_phys) == NULL)
630		return;
631
632	mii_pollstat(mii);
633	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) {
634		media = mii->mii_media_active & ~IFM_10_T;
635		media |= IFM_100_TX;
636	} else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
637		media = mii->mii_media_active & ~IFM_100_TX;
638		media |= IFM_10_T;
639	} else
640		return;
641
642	ifmedia_set(&mii->mii_media, media);
643}
644
645const struct pci_matchid wb_devices[] = {
646	{ PCI_VENDOR_WINBOND, PCI_PRODUCT_WINBOND_W89C840F },
647	{ PCI_VENDOR_COMPEX, PCI_PRODUCT_COMPEX_RL100ATX },
648};
649
650/*
651 * Probe for a Winbond chip. Check the PCI vendor and device
652 * IDs against our list and return a device name if we find a match.
653 */
654int
655wb_probe(struct device *parent, void *match, void *aux)
656{
657	return (pci_matchbyid((struct pci_attach_args *)aux, wb_devices,
658	    nitems(wb_devices)));
659}
660
661/*
662 * Attach the interface. Allocate softc structures, do ifmedia
663 * setup and ethernet/BPF attach.
664 */
665void
666wb_attach(struct device *parent, struct device *self, void *aux)
667{
668	struct wb_softc *sc = (struct wb_softc *)self;
669	struct pci_attach_args *pa = aux;
670	pci_chipset_tag_t pc = pa->pa_pc;
671	pci_intr_handle_t ih;
672	const char *intrstr = NULL;
673	struct ifnet *ifp = &sc->arpcom.ac_if;
674	bus_size_t size;
675	int rseg;
676	bus_dma_segment_t seg;
677	bus_dmamap_t dmamap;
678	caddr_t kva;
679
680	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
681
682	/*
683	 * Map control/status registers.
684	 */
685
686#ifdef WB_USEIOSPACE
687	if (pci_mapreg_map(pa, WB_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
688	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)) {
689		printf(": can't map i/o space\n");
690		return;
691	}
692#else
693	if (pci_mapreg_map(pa, WB_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
694	    &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)){
695		printf(": can't map mem space\n");
696		return;
697	}
698#endif
699
700	/* Allocate interrupt */
701	if (pci_intr_map(pa, &ih)) {
702		printf(": couldn't map interrupt\n");
703		goto fail_1;
704	}
705	intrstr = pci_intr_string(pc, ih);
706	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wb_intr, sc,
707	    self->dv_xname);
708	if (sc->sc_ih == NULL) {
709		printf(": couldn't establish interrupt");
710		if (intrstr != NULL)
711			printf(" at %s", intrstr);
712		printf("\n");
713		goto fail_1;
714	}
715	printf(": %s", intrstr);
716
717	sc->wb_cachesize = pci_conf_read(pc, pa->pa_tag, WB_PCI_CACHELEN)&0xff;
718
719	/* Reset the adapter. */
720	wb_reset(sc);
721
722	/*
723	 * Get station address from the EEPROM.
724	 */
725	wb_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0, 3, 0);
726	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
727
728	if (bus_dmamem_alloc(pa->pa_dmat, sizeof(struct wb_list_data),
729	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
730		printf(": can't alloc list data\n");
731		goto fail_2;
732	}
733	if (bus_dmamem_map(pa->pa_dmat, &seg, rseg,
734	    sizeof(struct wb_list_data), &kva, BUS_DMA_NOWAIT)) {
735		printf(": can't map list data, size %zd\n",
736		    sizeof(struct wb_list_data));
737		goto fail_3;
738	}
739	if (bus_dmamap_create(pa->pa_dmat, sizeof(struct wb_list_data), 1,
740	    sizeof(struct wb_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
741		printf(": can't create dma map\n");
742		goto fail_4;
743	}
744	if (bus_dmamap_load(pa->pa_dmat, dmamap, kva,
745	    sizeof(struct wb_list_data), NULL, BUS_DMA_NOWAIT)) {
746		printf(": can't load dma map\n");
747		goto fail_5;
748	}
749	sc->wb_ldata = (struct wb_list_data *)kva;
750
751	ifp->if_softc = sc;
752	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
753	ifp->if_ioctl = wb_ioctl;
754	ifp->if_start = wb_start;
755	ifp->if_watchdog = wb_watchdog;
756	ifq_init_maxlen(&ifp->if_snd, WB_TX_LIST_CNT - 1);
757
758	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
759
760	/*
761	 * Do ifmedia setup.
762	 */
763	wb_stop(sc);
764
765	ifmedia_init(&sc->sc_mii.mii_media, 0, wb_ifmedia_upd, wb_ifmedia_sts);
766	sc->sc_mii.mii_ifp = ifp;
767	sc->sc_mii.mii_readreg = wb_miibus_readreg;
768	sc->sc_mii.mii_writereg = wb_miibus_writereg;
769	sc->sc_mii.mii_statchg = wb_miibus_statchg;
770	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
771	    0);
772	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
773		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,0,NULL);
774		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
775	} else
776		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
777
778	/*
779	 * Call MI attach routines.
780	 */
781	if_attach(ifp);
782	ether_ifattach(ifp);
783	return;
784
785fail_5:
786	bus_dmamap_destroy(pa->pa_dmat, dmamap);
787
788fail_4:
789	bus_dmamem_unmap(pa->pa_dmat, kva,
790	    sizeof(struct wb_list_data));
791
792fail_3:
793	bus_dmamem_free(pa->pa_dmat, &seg, rseg);
794
795fail_2:
796	pci_intr_disestablish(pc, sc->sc_ih);
797
798fail_1:
799	bus_space_unmap(sc->wb_btag, sc->wb_bhandle, size);
800}
801
802/*
803 * Initialize the transmit descriptors.
804 */
805int
806wb_list_tx_init(struct wb_softc *sc)
807{
808	struct wb_chain_data	*cd;
809	struct wb_list_data	*ld;
810	int			i;
811
812	cd = &sc->wb_cdata;
813	ld = sc->wb_ldata;
814
815	for (i = 0; i < WB_TX_LIST_CNT; i++) {
816		cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i];
817		if (i == (WB_TX_LIST_CNT - 1)) {
818			cd->wb_tx_chain[i].wb_nextdesc =
819				&cd->wb_tx_chain[0];
820		} else {
821			cd->wb_tx_chain[i].wb_nextdesc =
822				&cd->wb_tx_chain[i + 1];
823		}
824	}
825
826	cd->wb_tx_free = &cd->wb_tx_chain[0];
827	cd->wb_tx_tail = cd->wb_tx_head = NULL;
828
829	return(0);
830}
831
832
833/*
834 * Initialize the RX descriptors and allocate mbufs for them. Note that
835 * we arrange the descriptors in a closed ring, so that the last descriptor
836 * points back to the first.
837 */
838int
839wb_list_rx_init(struct wb_softc *sc)
840{
841	struct wb_chain_data	*cd;
842	struct wb_list_data	*ld;
843	int			i;
844
845	cd = &sc->wb_cdata;
846	ld = sc->wb_ldata;
847
848	for (i = 0; i < WB_RX_LIST_CNT; i++) {
849		cd->wb_rx_chain[i].wb_ptr =
850			(struct wb_desc *)&ld->wb_rx_list[i];
851		cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i];
852		wb_newbuf(sc, &cd->wb_rx_chain[i]);
853		if (i == (WB_RX_LIST_CNT - 1)) {
854			cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0];
855			ld->wb_rx_list[i].wb_next =
856					VTOPHYS(&ld->wb_rx_list[0]);
857		} else {
858			cd->wb_rx_chain[i].wb_nextdesc =
859					&cd->wb_rx_chain[i + 1];
860			ld->wb_rx_list[i].wb_next =
861					VTOPHYS(&ld->wb_rx_list[i + 1]);
862		}
863	}
864
865	cd->wb_rx_head = &cd->wb_rx_chain[0];
866
867	return(0);
868}
869
870/*
871 * Initialize an RX descriptor and attach an MBUF cluster.
872 */
873void
874wb_newbuf(struct wb_softc *sc, struct wb_chain_onefrag *c)
875{
876	c->wb_ptr->wb_data = VTOPHYS(c->wb_buf + sizeof(u_int64_t));
877	c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | ETHER_MAX_DIX_LEN;
878	c->wb_ptr->wb_status = WB_RXSTAT;
879}
880
881/*
882 * A frame has been uploaded: pass the resulting mbuf chain up to
883 * the higher level protocols.
884 */
885void
886wb_rxeof(struct wb_softc *sc)
887{
888	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
889        struct ifnet		*ifp;
890	struct wb_chain_onefrag	*cur_rx;
891	int			total_len = 0;
892	u_int32_t		rxstat;
893
894	ifp = &sc->arpcom.ac_if;
895
896	while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) &
897							WB_RXSTAT_OWN)) {
898		struct mbuf *m;
899
900		cur_rx = sc->wb_cdata.wb_rx_head;
901		sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc;
902
903		if ((rxstat & WB_RXSTAT_MIIERR) ||
904		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) ||
905		    (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > ETHER_MAX_DIX_LEN) ||
906		    !(rxstat & WB_RXSTAT_LASTFRAG) ||
907		    !(rxstat & WB_RXSTAT_RXCMP)) {
908			ifp->if_ierrors++;
909			wb_newbuf(sc, cur_rx);
910			printf("%s: receiver babbling: possible chip "
911				"bug, forcing reset\n", sc->sc_dev.dv_xname);
912			wb_fixmedia(sc);
913			wb_init(sc);
914			break;
915		}
916
917		if (rxstat & WB_RXSTAT_RXERR) {
918			ifp->if_ierrors++;
919			wb_newbuf(sc, cur_rx);
920			break;
921		}
922
923		/* No errors; receive the packet. */
924		total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status);
925
926		/*
927		 * XXX The Winbond chip includes the CRC with every
928		 * received frame, and there's no way to turn this
929		 * behavior off (at least, I can't find anything in
930	 	 * the manual that explains how to do it) so we have
931		 * to trim off the CRC manually.
932		 */
933		total_len -= ETHER_CRC_LEN;
934
935		m = m_devget(cur_rx->wb_buf + sizeof(u_int64_t), total_len,
936		    ETHER_ALIGN);
937		wb_newbuf(sc, cur_rx);
938		if (m == NULL) {
939			ifp->if_ierrors++;
940			break;
941		}
942
943		ml_enqueue(&ml, m);
944	}
945
946	if_input(ifp, &ml);
947}
948
949void
950wb_rxeoc(struct wb_softc *sc)
951{
952	wb_rxeof(sc);
953
954	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
955	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
956	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
957	if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND)
958		CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
959
960	return;
961}
962
963/*
964 * A frame was downloaded to the chip. It's safe for us to clean up
965 * the list buffers.
966 */
967void
968wb_txeof(struct wb_softc *sc)
969{
970	struct wb_chain		*cur_tx;
971	struct ifnet		*ifp;
972
973	ifp = &sc->arpcom.ac_if;
974
975	/* Clear the timeout timer. */
976	ifp->if_timer = 0;
977
978	if (sc->wb_cdata.wb_tx_head == NULL)
979		return;
980
981	/*
982	 * Go through our tx list and free mbufs for those
983	 * frames that have been transmitted.
984	 */
985	while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) {
986		u_int32_t		txstat;
987
988		cur_tx = sc->wb_cdata.wb_tx_head;
989		txstat = WB_TXSTATUS(cur_tx);
990
991		if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT)
992			break;
993
994		if (txstat & WB_TXSTAT_TXERR) {
995			ifp->if_oerrors++;
996			if (txstat & WB_TXSTAT_ABORT)
997				ifp->if_collisions++;
998			if (txstat & WB_TXSTAT_LATECOLL)
999				ifp->if_collisions++;
1000		}
1001
1002		ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3;
1003
1004		m_freem(cur_tx->wb_mbuf);
1005		cur_tx->wb_mbuf = NULL;
1006
1007		if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) {
1008			sc->wb_cdata.wb_tx_head = NULL;
1009			sc->wb_cdata.wb_tx_tail = NULL;
1010			break;
1011		}
1012
1013		sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc;
1014	}
1015
1016	return;
1017}
1018
1019/*
1020 * TX 'end of channel' interrupt handler.
1021 */
1022void
1023wb_txeoc(struct wb_softc *sc)
1024{
1025	struct ifnet		*ifp;
1026
1027	ifp = &sc->arpcom.ac_if;
1028
1029	ifp->if_timer = 0;
1030
1031	if (sc->wb_cdata.wb_tx_head == NULL) {
1032		ifq_clr_oactive(&ifp->if_snd);
1033		sc->wb_cdata.wb_tx_tail = NULL;
1034	} else {
1035		if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) {
1036			WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN;
1037			ifp->if_timer = 5;
1038			CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1039		}
1040	}
1041
1042	return;
1043}
1044
1045int
1046wb_intr(void *arg)
1047{
1048	struct wb_softc		*sc;
1049	struct ifnet		*ifp;
1050	u_int32_t		status;
1051	int			r = 0;
1052
1053	sc = arg;
1054	ifp = &sc->arpcom.ac_if;
1055
1056	if (!(ifp->if_flags & IFF_UP))
1057		return (r);
1058
1059	/* Disable interrupts. */
1060	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1061
1062	for (;;) {
1063
1064		status = CSR_READ_4(sc, WB_ISR);
1065		if (status)
1066			CSR_WRITE_4(sc, WB_ISR, status);
1067
1068		if ((status & WB_INTRS) == 0)
1069			break;
1070
1071		r = 1;
1072
1073		if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) {
1074			ifp->if_ierrors++;
1075			wb_reset(sc);
1076			if (status & WB_ISR_RX_ERR)
1077				wb_fixmedia(sc);
1078			wb_init(sc);
1079			continue;
1080		}
1081
1082		if (status & WB_ISR_RX_OK)
1083			wb_rxeof(sc);
1084
1085		if (status & WB_ISR_RX_IDLE)
1086			wb_rxeoc(sc);
1087
1088		if (status & WB_ISR_TX_OK)
1089			wb_txeof(sc);
1090
1091		if (status & WB_ISR_TX_NOBUF)
1092			wb_txeoc(sc);
1093
1094		if (status & WB_ISR_TX_IDLE) {
1095			wb_txeof(sc);
1096			if (sc->wb_cdata.wb_tx_head != NULL) {
1097				WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1098				CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1099			}
1100		}
1101
1102		if (status & WB_ISR_TX_UNDERRUN) {
1103			ifp->if_oerrors++;
1104			wb_txeof(sc);
1105			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1106			/* Jack up TX threshold */
1107			sc->wb_txthresh += WB_TXTHRESH_CHUNK;
1108			WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1109			WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1110			WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1111		}
1112
1113		if (status & WB_ISR_BUS_ERR)
1114			wb_init(sc);
1115	}
1116
1117	/* Re-enable interrupts. */
1118	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1119
1120	if (!ifq_empty(&ifp->if_snd)) {
1121		wb_start(ifp);
1122	}
1123
1124	return (r);
1125}
1126
1127void
1128wb_tick(void *xsc)
1129{
1130	struct wb_softc *sc = xsc;
1131	int s;
1132
1133	s = splnet();
1134	mii_tick(&sc->sc_mii);
1135	splx(s);
1136	timeout_add_sec(&sc->wb_tick_tmo, 1);
1137}
1138
1139/*
1140 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1141 * pointers to the fragment pointers.
1142 */
1143int
1144wb_encap(struct wb_softc *sc, struct wb_chain *c, struct mbuf *m_head)
1145{
1146	int			frag = 0;
1147	struct wb_desc		*f = NULL;
1148	int			total_len;
1149	struct mbuf		*m;
1150
1151	/*
1152 	 * Start packing the mbufs in this chain into
1153	 * the fragment pointers. Stop when we run out
1154 	 * of fragments or hit the end of the mbuf chain.
1155	 */
1156	m = m_head;
1157	total_len = 0;
1158
1159	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1160		if (m->m_len != 0) {
1161			if (frag == WB_MAXFRAGS)
1162				break;
1163			total_len += m->m_len;
1164			f = &c->wb_ptr->wb_frag[frag];
1165			f->wb_ctl = WB_TXCTL_TLINK | m->m_len;
1166			if (frag == 0) {
1167				f->wb_ctl |= WB_TXCTL_FIRSTFRAG;
1168				f->wb_status = 0;
1169			} else
1170				f->wb_status = WB_TXSTAT_OWN;
1171			f->wb_next = VTOPHYS(&c->wb_ptr->wb_frag[frag + 1]);
1172			f->wb_data = VTOPHYS(mtod(m, vaddr_t));
1173			frag++;
1174		}
1175	}
1176
1177	/*
1178	 * Handle special case: we used up all 16 fragments,
1179	 * but we have more mbufs left in the chain. Copy the
1180	 * data into an mbuf cluster. Note that we don't
1181	 * bother clearing the values in the other fragment
1182	 * pointers/counters; it wouldn't gain us anything,
1183	 * and would waste cycles.
1184	 */
1185	if (m != NULL) {
1186		struct mbuf		*m_new = NULL;
1187
1188		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1189		if (m_new == NULL)
1190			return(1);
1191		if (m_head->m_pkthdr.len > MHLEN) {
1192			MCLGET(m_new, M_DONTWAIT);
1193			if (!(m_new->m_flags & M_EXT)) {
1194				m_freem(m_new);
1195				return(1);
1196			}
1197		}
1198		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1199					mtod(m_new, caddr_t));
1200		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1201		m_freem(m_head);
1202		m_head = m_new;
1203		f = &c->wb_ptr->wb_frag[0];
1204		f->wb_status = 0;
1205		f->wb_data = VTOPHYS(mtod(m_new, caddr_t));
1206		f->wb_ctl = total_len = m_new->m_len;
1207		f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG;
1208		frag = 1;
1209	}
1210
1211	if (total_len < WB_MIN_FRAMELEN) {
1212		f = &c->wb_ptr->wb_frag[frag];
1213		f->wb_ctl = WB_MIN_FRAMELEN - total_len;
1214		f->wb_data = VTOPHYS(&sc->wb_cdata.wb_pad);
1215		f->wb_ctl |= WB_TXCTL_TLINK;
1216		f->wb_status = WB_TXSTAT_OWN;
1217		frag++;
1218	}
1219
1220	c->wb_mbuf = m_head;
1221	c->wb_lastdesc = frag - 1;
1222	WB_TXCTL(c) |= WB_TXCTL_LASTFRAG;
1223	WB_TXNEXT(c) = VTOPHYS(&c->wb_nextdesc->wb_ptr->wb_frag[0]);
1224
1225	return(0);
1226}
1227
1228/*
1229 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1230 * to the mbuf data regions directly in the transmit lists. We also save a
1231 * copy of the pointers since the transmit list fragment pointers are
1232 * physical addresses.
1233 */
1234void
1235wb_start(struct ifnet *ifp)
1236{
1237	struct wb_softc		*sc;
1238	struct mbuf		*m_head = NULL;
1239	struct wb_chain		*cur_tx = NULL, *start_tx;
1240
1241	sc = ifp->if_softc;
1242
1243	/*
1244	 * Check for an available queue slot. If there are none,
1245	 * punt.
1246	 */
1247	if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) {
1248		ifq_set_oactive(&ifp->if_snd);
1249		return;
1250	}
1251
1252	start_tx = sc->wb_cdata.wb_tx_free;
1253
1254	while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) {
1255		m_head = ifq_dequeue(&ifp->if_snd);
1256		if (m_head == NULL)
1257			break;
1258
1259		/* Pick a descriptor off the free list. */
1260		cur_tx = sc->wb_cdata.wb_tx_free;
1261		sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc;
1262
1263		/* Pack the data into the descriptor. */
1264		wb_encap(sc, cur_tx, m_head);
1265
1266		if (cur_tx != start_tx)
1267			WB_TXOWN(cur_tx) = WB_TXSTAT_OWN;
1268
1269#if NBPFILTER > 0
1270		/*
1271		 * If there's a BPF listener, bounce a copy of this frame
1272		 * to him.
1273		 */
1274		if (ifp->if_bpf)
1275			bpf_mtap(ifp->if_bpf, cur_tx->wb_mbuf,
1276			    BPF_DIRECTION_OUT);
1277#endif
1278	}
1279
1280	/*
1281	 * If there are no packets queued, bail.
1282	 */
1283	if (cur_tx == NULL)
1284		return;
1285
1286	/*
1287	 * Place the request for the upload interrupt
1288	 * in the last descriptor in the chain. This way, if
1289	 * we're chaining several packets at once, we'll only
1290	 * get an interrupt once for the whole chain rather than
1291	 * once for each packet.
1292	 */
1293	WB_TXCTL(cur_tx) |= WB_TXCTL_FINT;
1294	cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT;
1295	sc->wb_cdata.wb_tx_tail = cur_tx;
1296
1297	if (sc->wb_cdata.wb_tx_head == NULL) {
1298		sc->wb_cdata.wb_tx_head = start_tx;
1299		WB_TXOWN(start_tx) = WB_TXSTAT_OWN;
1300		CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF);
1301	} else {
1302		/*
1303		 * We need to distinguish between the case where
1304		 * the own bit is clear because the chip cleared it
1305		 * and where the own bit is clear because we haven't
1306		 * set it yet. The magic value WB_UNSET is just some
1307		 * ramdomly chosen number which doesn't have the own
1308	 	 * bit set. When we actually transmit the frame, the
1309		 * status word will have _only_ the own bit set, so
1310		 * the txeoc handler will be able to tell if it needs
1311		 * to initiate another transmission to flush out pending
1312		 * frames.
1313		 */
1314		WB_TXOWN(start_tx) = WB_UNSENT;
1315	}
1316
1317	/*
1318	 * Set a timeout in case the chip goes out to lunch.
1319	 */
1320	ifp->if_timer = 5;
1321
1322	return;
1323}
1324
1325void
1326wb_init(void *xsc)
1327{
1328	struct wb_softc *sc = xsc;
1329	struct ifnet *ifp = &sc->arpcom.ac_if;
1330	int s, i;
1331
1332	s = splnet();
1333
1334	/*
1335	 * Cancel pending I/O and free all RX/TX buffers.
1336	 */
1337	wb_stop(sc);
1338	wb_reset(sc);
1339
1340	sc->wb_txthresh = WB_TXTHRESH_INIT;
1341
1342	/*
1343	 * Set cache alignment and burst length.
1344	 */
1345#ifdef foo
1346	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG);
1347	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH);
1348	WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh));
1349#endif
1350
1351	CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION);
1352	WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG);
1353	switch(sc->wb_cachesize) {
1354	case 32:
1355		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG);
1356		break;
1357	case 16:
1358		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG);
1359		break;
1360	case 8:
1361		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG);
1362		break;
1363	case 0:
1364	default:
1365		WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE);
1366		break;
1367	}
1368
1369	/* This doesn't tend to work too well at 100Mbps. */
1370	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON);
1371
1372	/* Init our MAC address */
1373	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1374		CSR_WRITE_1(sc, WB_NODE0 + i, sc->arpcom.ac_enaddr[i]);
1375	}
1376
1377	/* Init circular RX list. */
1378	if (wb_list_rx_init(sc) == ENOBUFS) {
1379		printf("%s: initialization failed: no "
1380			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1381		wb_stop(sc);
1382		splx(s);
1383		return;
1384	}
1385
1386	/* Init TX descriptors. */
1387	wb_list_tx_init(sc);
1388
1389	/* If we want promiscuous mode, set the allframes bit. */
1390	if (ifp->if_flags & IFF_PROMISC) {
1391		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1392	} else {
1393		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS);
1394	}
1395
1396	/*
1397	 * Set capture broadcast bit to capture broadcast frames.
1398	 */
1399	if (ifp->if_flags & IFF_BROADCAST) {
1400		WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1401	} else {
1402		WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD);
1403	}
1404
1405	/*
1406	 * Program the multicast filter, if necessary.
1407	 */
1408	wb_setmulti(sc);
1409
1410	/*
1411	 * Load the address of the RX list.
1412	 */
1413	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1414	CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0]));
1415
1416	/*
1417	 * Enable interrupts.
1418	 */
1419	CSR_WRITE_4(sc, WB_IMR, WB_INTRS);
1420	CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF);
1421
1422	/* Enable receiver and transmitter. */
1423	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON);
1424	CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF);
1425
1426	WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1427	CSR_WRITE_4(sc, WB_TXADDR, VTOPHYS(&sc->wb_ldata->wb_tx_list[0]));
1428	WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON);
1429
1430	ifp->if_flags |= IFF_RUNNING;
1431	ifq_clr_oactive(&ifp->if_snd);
1432
1433	splx(s);
1434
1435	timeout_set(&sc->wb_tick_tmo, wb_tick, sc);
1436	timeout_add_sec(&sc->wb_tick_tmo, 1);
1437
1438	return;
1439}
1440
1441/*
1442 * Set media options.
1443 */
1444int
1445wb_ifmedia_upd(struct ifnet *ifp)
1446{
1447	struct wb_softc *sc = ifp->if_softc;
1448
1449	if (ifp->if_flags & IFF_UP)
1450		wb_init(sc);
1451
1452	return(0);
1453}
1454
1455/*
1456 * Report current media status.
1457 */
1458void
1459wb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1460{
1461	struct wb_softc *sc = ifp->if_softc;
1462	struct mii_data *mii = &sc->sc_mii;
1463
1464	mii_pollstat(mii);
1465	ifmr->ifm_active = mii->mii_media_active;
1466	ifmr->ifm_status = mii->mii_media_status;
1467}
1468
1469int
1470wb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1471{
1472	struct wb_softc		*sc = ifp->if_softc;
1473	struct ifreq		*ifr = (struct ifreq *) data;
1474	int			s, error = 0;
1475
1476	s = splnet();
1477
1478	switch(command) {
1479	case SIOCSIFADDR:
1480		ifp->if_flags |= IFF_UP;
1481		wb_init(sc);
1482		break;
1483
1484	case SIOCSIFFLAGS:
1485		if (ifp->if_flags & IFF_UP) {
1486			wb_init(sc);
1487		} else {
1488			if (ifp->if_flags & IFF_RUNNING)
1489				wb_stop(sc);
1490		}
1491		error = 0;
1492		break;
1493
1494	case SIOCGIFMEDIA:
1495	case SIOCSIFMEDIA:
1496		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1497		break;
1498
1499	default:
1500		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1501	}
1502
1503	if (error == ENETRESET) {
1504		if (ifp->if_flags & IFF_RUNNING)
1505			wb_setmulti(sc);
1506		error = 0;
1507	}
1508
1509	splx(s);
1510	return(error);
1511}
1512
1513void
1514wb_watchdog(struct ifnet *ifp)
1515{
1516	struct wb_softc		*sc;
1517
1518	sc = ifp->if_softc;
1519
1520	ifp->if_oerrors++;
1521	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1522
1523#ifdef foo
1524	if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1525		printf("%s: no carrier - transceiver cable problem?\n",
1526		    sc->sc_dev.dv_xname);
1527#endif
1528	wb_init(sc);
1529
1530	if (!ifq_empty(&ifp->if_snd))
1531		wb_start(ifp);
1532
1533	return;
1534}
1535
1536/*
1537 * Stop the adapter and free any mbufs allocated to the
1538 * RX and TX lists.
1539 */
1540void
1541wb_stop(struct wb_softc *sc)
1542{
1543	int			i;
1544	struct ifnet		*ifp;
1545
1546	ifp = &sc->arpcom.ac_if;
1547	ifp->if_timer = 0;
1548
1549	timeout_del(&sc->wb_tick_tmo);
1550
1551	ifp->if_flags &= ~IFF_RUNNING;
1552	ifq_clr_oactive(&ifp->if_snd);
1553
1554	WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON));
1555	CSR_WRITE_4(sc, WB_IMR, 0x00000000);
1556	CSR_WRITE_4(sc, WB_TXADDR, 0x00000000);
1557	CSR_WRITE_4(sc, WB_RXADDR, 0x00000000);
1558
1559	/*
1560	 * Free data in the RX lists.
1561	 */
1562	bzero(&sc->wb_ldata->wb_rx_list, sizeof(sc->wb_ldata->wb_rx_list));
1563
1564	/*
1565	 * Free the TX list buffers.
1566	 */
1567	for (i = 0; i < WB_TX_LIST_CNT; i++) {
1568		if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) {
1569			m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf);
1570			sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL;
1571		}
1572	}
1573
1574	bzero(&sc->wb_ldata->wb_tx_list, sizeof(sc->wb_ldata->wb_tx_list));
1575}
1576
1577const struct cfattach wb_ca = {
1578	sizeof(struct wb_softc), wb_probe, wb_attach
1579};
1580
1581struct cfdriver wb_cd = {
1582	NULL, "wb", DV_IFNET
1583};
1584