if_xl.c revision 131455
1/*
2 * Copyright (c) 1997, 1998, 1999
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/pci/if_xl.c 131455 2004-07-02 12:16:02Z mlaier $");
35
36/*
37 * 3Com 3c90x Etherlink XL PCI NIC driver
38 *
39 * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
40 * bus-master chips (3c90x cards and embedded controllers) including
41 * the following:
42 *
43 * 3Com 3c900-TPO	10Mbps/RJ-45
44 * 3Com 3c900-COMBO	10Mbps/RJ-45,AUI,BNC
45 * 3Com 3c905-TX	10/100Mbps/RJ-45
46 * 3Com 3c905-T4	10/100Mbps/RJ-45
47 * 3Com 3c900B-TPO	10Mbps/RJ-45
48 * 3Com 3c900B-COMBO	10Mbps/RJ-45,AUI,BNC
49 * 3Com 3c900B-TPC	10Mbps/RJ-45,BNC
50 * 3Com 3c900B-FL	10Mbps/Fiber-optic
51 * 3Com 3c905B-COMBO	10/100Mbps/RJ-45,AUI,BNC
52 * 3Com 3c905B-TX	10/100Mbps/RJ-45
53 * 3Com 3c905B-FL/FX	10/100Mbps/Fiber-optic
54 * 3Com 3c905C-TX	10/100Mbps/RJ-45 (Tornado ASIC)
55 * 3Com 3c980-TX	10/100Mbps server adapter (Hurricane ASIC)
56 * 3Com 3c980C-TX	10/100Mbps server adapter (Tornado ASIC)
57 * 3Com 3cSOHO100-TX	10/100Mbps/RJ-45 (Hurricane ASIC)
58 * 3Com 3c450-TX	10/100Mbps/RJ-45 (Tornado ASIC)
59 * 3Com 3c555		10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
60 * 3Com 3c556		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
61 * 3Com 3c556B		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62 * 3Com 3c575TX		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
63 * 3Com 3c575B		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64 * 3Com 3c575C		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65 * 3Com 3cxfem656	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66 * 3Com 3cxfem656b	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67 * 3Com 3cxfem656c	10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
68 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
69 * Dell on-board 3c920 10/100Mbps/RJ-45
70 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
71 * Dell Latitude laptop docking station embedded 3c905-TX
72 *
73 * Written by Bill Paul <wpaul@ctr.columbia.edu>
74 * Electrical Engineering Department
75 * Columbia University, New York City
76 */
77/*
78 * The 3c90x series chips use a bus-master DMA interface for transfering
79 * packets to and from the controller chip. Some of the "vortex" cards
80 * (3c59x) also supported a bus master mode, however for those chips
81 * you could only DMA packets to/from a contiguous memory buffer. For
82 * transmission this would mean copying the contents of the queued mbuf
83 * chain into an mbuf cluster and then DMAing the cluster. This extra
84 * copy would sort of defeat the purpose of the bus master support for
85 * any packet that doesn't fit into a single mbuf.
86 *
87 * By contrast, the 3c90x cards support a fragment-based bus master
88 * mode where mbuf chains can be encapsulated using TX descriptors.
89 * This is similar to other PCI chips such as the Texas Instruments
90 * ThunderLAN and the Intel 82557/82558.
91 *
92 * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
93 * bus master chips because they maintain the old PIO interface for
94 * backwards compatibility, but starting with the 3c905B and the
95 * "cyclone" chips, the compatibility interface has been dropped.
96 * Since using bus master DMA is a big win, we use this driver to
97 * support the PCI "boomerang" chips even though they work with the
98 * "vortex" driver in order to obtain better performance.
99 *
100 * This driver is in the /sys/pci directory because it only supports
101 * PCI-based NICs.
102 */
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/sockio.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/kernel.h>
110#include <sys/module.h>
111#include <sys/socket.h>
112
113#include <net/if.h>
114#include <net/if_arp.h>
115#include <net/ethernet.h>
116#include <net/if_dl.h>
117#include <net/if_media.h>
118
119#include <net/bpf.h>
120
121#include <machine/bus_memio.h>
122#include <machine/bus_pio.h>
123#include <machine/bus.h>
124#include <machine/resource.h>
125#include <sys/bus.h>
126#include <sys/rman.h>
127
128#include <dev/mii/mii.h>
129#include <dev/mii/miivar.h>
130
131#include <dev/pci/pcireg.h>
132#include <dev/pci/pcivar.h>
133
134MODULE_DEPEND(xl, pci, 1, 1, 1);
135MODULE_DEPEND(xl, ether, 1, 1, 1);
136MODULE_DEPEND(xl, miibus, 1, 1, 1);
137
138/* "device miibus" required.  See GENERIC if you get errors here. */
139#include "miibus_if.h"
140
141#include <pci/if_xlreg.h>
142
143/*
144 * TX Checksumming is disabled by default for two reasons:
145 * - TX Checksumming will occasionally produce corrupt packets
146 * - TX Checksumming seems to reduce performance
147 *
148 * Only 905B/C cards were reported to have this problem, it is possible
149 * that later chips _may_ be immune.
150 */
151#define	XL905B_TXCSUM_BROKEN	1
152
153#ifdef XL905B_TXCSUM_BROKEN
154#define XL905B_CSUM_FEATURES	0
155#else
156#define XL905B_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
157#endif
158
159/*
160 * Various supported device vendors/types and their names.
161 */
162static struct xl_type xl_devs[] = {
163	{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
164		"3Com 3c900-TPO Etherlink XL" },
165	{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
166		"3Com 3c900-COMBO Etherlink XL" },
167	{ TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
168		"3Com 3c905-TX Fast Etherlink XL" },
169	{ TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
170		"3Com 3c905-T4 Fast Etherlink XL" },
171	{ TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
172		"3Com 3c900B-TPO Etherlink XL" },
173	{ TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
174		"3Com 3c900B-COMBO Etherlink XL" },
175	{ TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
176		"3Com 3c900B-TPC Etherlink XL" },
177	{ TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
178		"3Com 3c900B-FL Etherlink XL" },
179	{ TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
180		"3Com 3c905B-TX Fast Etherlink XL" },
181	{ TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
182		"3Com 3c905B-T4 Fast Etherlink XL" },
183	{ TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
184		"3Com 3c905B-FX/SC Fast Etherlink XL" },
185	{ TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
186		"3Com 3c905B-COMBO Fast Etherlink XL" },
187	{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
188		"3Com 3c905C-TX Fast Etherlink XL" },
189	{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
190		"3Com 3c920B-EMB Integrated Fast Etherlink XL" },
191	{ TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
192		"3Com 3c980 Fast Etherlink XL" },
193	{ TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
194		"3Com 3c980C Fast Etherlink XL" },
195	{ TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
196		"3Com 3cSOHO100-TX OfficeConnect" },
197	{ TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
198		"3Com 3c450-TX HomeConnect" },
199	{ TC_VENDORID, TC_DEVICEID_HURRICANE_555,
200		"3Com 3c555 Fast Etherlink XL" },
201	{ TC_VENDORID, TC_DEVICEID_HURRICANE_556,
202		"3Com 3c556 Fast Etherlink XL" },
203	{ TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
204		"3Com 3c556B Fast Etherlink XL" },
205	{ TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
206		"3Com 3c575TX Fast Etherlink XL" },
207	{ TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
208		"3Com 3c575B Fast Etherlink XL" },
209	{ TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
210		"3Com 3c575C Fast Etherlink XL" },
211	{ TC_VENDORID, TC_DEVICEID_HURRICANE_656,
212		"3Com 3c656 Fast Etherlink XL" },
213	{ TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
214		"3Com 3c656B Fast Etherlink XL" },
215	{ TC_VENDORID, TC_DEVICEID_TORNADO_656C,
216		"3Com 3c656C Fast Etherlink XL" },
217	{ 0, 0, NULL }
218};
219
220static int xl_probe		(device_t);
221static int xl_attach		(device_t);
222static int xl_detach		(device_t);
223
224static int xl_newbuf		(struct xl_softc *, struct xl_chain_onefrag *);
225static void xl_stats_update	(void *);
226static int xl_encap		(struct xl_softc *, struct xl_chain *,
227						struct mbuf *);
228static void xl_rxeof		(struct xl_softc *);
229static int xl_rx_resync		(struct xl_softc *);
230static void xl_txeof		(struct xl_softc *);
231static void xl_txeof_90xB	(struct xl_softc *);
232static void xl_txeoc		(struct xl_softc *);
233static void xl_intr		(void *);
234static void xl_start		(struct ifnet *);
235static void xl_start_90xB	(struct ifnet *);
236static int xl_ioctl		(struct ifnet *, u_long, caddr_t);
237static void xl_init		(void *);
238static void xl_stop		(struct xl_softc *);
239static void xl_watchdog		(struct ifnet *);
240static void xl_shutdown		(device_t);
241static int xl_suspend		(device_t);
242static int xl_resume		(device_t);
243
244static int xl_ifmedia_upd	(struct ifnet *);
245static void xl_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
246
247static int xl_eeprom_wait	(struct xl_softc *);
248static int xl_read_eeprom	(struct xl_softc *, caddr_t, int, int, int);
249static void xl_mii_sync		(struct xl_softc *);
250static void xl_mii_send		(struct xl_softc *, u_int32_t, int);
251static int xl_mii_readreg	(struct xl_softc *, struct xl_mii_frame *);
252static int xl_mii_writereg	(struct xl_softc *, struct xl_mii_frame *);
253
254static void xl_setcfg		(struct xl_softc *);
255static void xl_setmode		(struct xl_softc *, int);
256static void xl_setmulti		(struct xl_softc *);
257static void xl_setmulti_hash	(struct xl_softc *);
258static void xl_reset		(struct xl_softc *);
259static int xl_list_rx_init	(struct xl_softc *);
260static int xl_list_tx_init	(struct xl_softc *);
261static int xl_list_tx_init_90xB	(struct xl_softc *);
262static void xl_wait		(struct xl_softc *);
263static void xl_mediacheck	(struct xl_softc *);
264static void xl_choose_xcvr	(struct xl_softc *, int);
265static void xl_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
266static void xl_dma_map_rxbuf	(void *, bus_dma_segment_t *, int, bus_size_t,
267						int);
268static void xl_dma_map_txbuf	(void *, bus_dma_segment_t *, int, bus_size_t,
269						int);
270#ifdef notdef
271static void xl_testpacket	(struct xl_softc *);
272#endif
273
274static int xl_miibus_readreg	(device_t, int, int);
275static int xl_miibus_writereg	(device_t, int, int, int);
276static void xl_miibus_statchg	(device_t);
277static void xl_miibus_mediainit	(device_t);
278
279static device_method_t xl_methods[] = {
280	/* Device interface */
281	DEVMETHOD(device_probe,		xl_probe),
282	DEVMETHOD(device_attach,	xl_attach),
283	DEVMETHOD(device_detach,	xl_detach),
284	DEVMETHOD(device_shutdown,	xl_shutdown),
285	DEVMETHOD(device_suspend,	xl_suspend),
286	DEVMETHOD(device_resume,	xl_resume),
287
288	/* bus interface */
289	DEVMETHOD(bus_print_child,	bus_generic_print_child),
290	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
291
292	/* MII interface */
293	DEVMETHOD(miibus_readreg,	xl_miibus_readreg),
294	DEVMETHOD(miibus_writereg,	xl_miibus_writereg),
295	DEVMETHOD(miibus_statchg,	xl_miibus_statchg),
296	DEVMETHOD(miibus_mediainit,	xl_miibus_mediainit),
297
298	{ 0, 0 }
299};
300
301static driver_t xl_driver = {
302	"xl",
303	xl_methods,
304	sizeof(struct xl_softc)
305};
306
307static devclass_t xl_devclass;
308
309DRIVER_MODULE(xl, cardbus, xl_driver, xl_devclass, 0, 0);
310DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0);
311DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
312
313static void
314xl_dma_map_addr(arg, segs, nseg, error)
315	void *arg;
316	bus_dma_segment_t *segs;
317	int nseg, error;
318{
319	u_int32_t *paddr;
320
321	paddr = arg;
322	*paddr = segs->ds_addr;
323}
324
325static void
326xl_dma_map_rxbuf(arg, segs, nseg, mapsize, error)
327	void *arg;
328	bus_dma_segment_t *segs;
329	int nseg;
330	bus_size_t mapsize;
331	int error;
332{
333	u_int32_t *paddr;
334
335	if (error)
336		return;
337	KASSERT(nseg == 1, ("xl_dma_map_rxbuf: too many DMA segments"));
338	paddr = arg;
339	*paddr = segs->ds_addr;
340}
341
342static void
343xl_dma_map_txbuf(arg, segs, nseg, mapsize, error)
344	void *arg;
345	bus_dma_segment_t *segs;
346	int nseg;
347	bus_size_t mapsize;
348	int error;
349{
350	struct xl_list *l;
351	int i, total_len;
352
353	if (error)
354		return;
355
356	KASSERT(nseg <= XL_MAXFRAGS, ("too many DMA segments"));
357
358	total_len = 0;
359	l = arg;
360	for (i = 0; i < nseg; i++) {
361		KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
362		l->xl_frag[i].xl_addr = htole32(segs[i].ds_addr);
363		l->xl_frag[i].xl_len = htole32(segs[i].ds_len);
364		total_len += segs[i].ds_len;
365	}
366	l->xl_frag[nseg - 1].xl_len = htole32(segs[nseg - 1].ds_len |
367	    XL_LAST_FRAG);
368	l->xl_status = htole32(total_len);
369	l->xl_next = 0;
370}
371
372/*
373 * Murphy's law says that it's possible the chip can wedge and
374 * the 'command in progress' bit may never clear. Hence, we wait
375 * only a finite amount of time to avoid getting caught in an
376 * infinite loop. Normally this delay routine would be a macro,
377 * but it isn't called during normal operation so we can afford
378 * to make it a function.
379 */
380static void
381xl_wait(sc)
382	struct xl_softc		*sc;
383{
384	register int		i;
385
386	for (i = 0; i < XL_TIMEOUT; i++) {
387		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
388			break;
389	}
390
391	if (i == XL_TIMEOUT)
392		printf("xl%d: command never completed!\n", sc->xl_unit);
393
394	return;
395}
396
397/*
398 * MII access routines are provided for adapters with external
399 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
400 * autoneg logic that's faked up to look like a PHY (3c905B-TX).
401 * Note: if you don't perform the MDIO operations just right,
402 * it's possible to end up with code that works correctly with
403 * some chips/CPUs/processor speeds/bus speeds/etc but not
404 * with others.
405 */
406#define MII_SET(x)					\
407	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
408		CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
409
410#define MII_CLR(x)					\
411	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
412		CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
413
414/*
415 * Sync the PHYs by setting data bit and strobing the clock 32 times.
416 */
417static void
418xl_mii_sync(sc)
419	struct xl_softc		*sc;
420{
421	register int		i;
422
423	XL_SEL_WIN(4);
424	MII_SET(XL_MII_DIR|XL_MII_DATA);
425
426	for (i = 0; i < 32; i++) {
427		MII_SET(XL_MII_CLK);
428		MII_SET(XL_MII_DATA);
429		MII_SET(XL_MII_DATA);
430		MII_CLR(XL_MII_CLK);
431		MII_SET(XL_MII_DATA);
432		MII_SET(XL_MII_DATA);
433	}
434
435	return;
436}
437
438/*
439 * Clock a series of bits through the MII.
440 */
441static void
442xl_mii_send(sc, bits, cnt)
443	struct xl_softc		*sc;
444	u_int32_t		bits;
445	int			cnt;
446{
447	int			i;
448
449	XL_SEL_WIN(4);
450	MII_CLR(XL_MII_CLK);
451
452	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
453                if (bits & i) {
454			MII_SET(XL_MII_DATA);
455                } else {
456			MII_CLR(XL_MII_DATA);
457                }
458		MII_CLR(XL_MII_CLK);
459		MII_SET(XL_MII_CLK);
460	}
461}
462
463/*
464 * Read an PHY register through the MII.
465 */
466static int
467xl_mii_readreg(sc, frame)
468	struct xl_softc		*sc;
469	struct xl_mii_frame	*frame;
470
471{
472	int			i, ack;
473
474	XL_LOCK(sc);
475
476	/*
477	 * Set up frame for RX.
478	 */
479	frame->mii_stdelim = XL_MII_STARTDELIM;
480	frame->mii_opcode = XL_MII_READOP;
481	frame->mii_turnaround = 0;
482	frame->mii_data = 0;
483
484	/*
485	 * Select register window 4.
486	 */
487
488	XL_SEL_WIN(4);
489
490	CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
491	/*
492 	 * Turn on data xmit.
493	 */
494	MII_SET(XL_MII_DIR);
495
496	xl_mii_sync(sc);
497
498	/*
499	 * Send command/address info.
500	 */
501	xl_mii_send(sc, frame->mii_stdelim, 2);
502	xl_mii_send(sc, frame->mii_opcode, 2);
503	xl_mii_send(sc, frame->mii_phyaddr, 5);
504	xl_mii_send(sc, frame->mii_regaddr, 5);
505
506	/* Idle bit */
507	MII_CLR((XL_MII_CLK|XL_MII_DATA));
508	MII_SET(XL_MII_CLK);
509
510	/* Turn off xmit. */
511	MII_CLR(XL_MII_DIR);
512
513	/* Check for ack */
514	MII_CLR(XL_MII_CLK);
515	ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
516	MII_SET(XL_MII_CLK);
517
518	/*
519	 * Now try reading data bits. If the ack failed, we still
520	 * need to clock through 16 cycles to keep the PHY(s) in sync.
521	 */
522	if (ack) {
523		for(i = 0; i < 16; i++) {
524			MII_CLR(XL_MII_CLK);
525			MII_SET(XL_MII_CLK);
526		}
527		goto fail;
528	}
529
530	for (i = 0x8000; i; i >>= 1) {
531		MII_CLR(XL_MII_CLK);
532		if (!ack) {
533			if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
534				frame->mii_data |= i;
535		}
536		MII_SET(XL_MII_CLK);
537	}
538
539fail:
540
541	MII_CLR(XL_MII_CLK);
542	MII_SET(XL_MII_CLK);
543
544	XL_UNLOCK(sc);
545
546	if (ack)
547		return(1);
548	return(0);
549}
550
551/*
552 * Write to a PHY register through the MII.
553 */
554static int
555xl_mii_writereg(sc, frame)
556	struct xl_softc		*sc;
557	struct xl_mii_frame	*frame;
558
559{
560	XL_LOCK(sc);
561
562	/*
563	 * Set up frame for TX.
564	 */
565
566	frame->mii_stdelim = XL_MII_STARTDELIM;
567	frame->mii_opcode = XL_MII_WRITEOP;
568	frame->mii_turnaround = XL_MII_TURNAROUND;
569
570	/*
571	 * Select the window 4.
572	 */
573	XL_SEL_WIN(4);
574
575	/*
576 	 * Turn on data output.
577	 */
578	MII_SET(XL_MII_DIR);
579
580	xl_mii_sync(sc);
581
582	xl_mii_send(sc, frame->mii_stdelim, 2);
583	xl_mii_send(sc, frame->mii_opcode, 2);
584	xl_mii_send(sc, frame->mii_phyaddr, 5);
585	xl_mii_send(sc, frame->mii_regaddr, 5);
586	xl_mii_send(sc, frame->mii_turnaround, 2);
587	xl_mii_send(sc, frame->mii_data, 16);
588
589	/* Idle bit. */
590	MII_SET(XL_MII_CLK);
591	MII_CLR(XL_MII_CLK);
592
593	/*
594	 * Turn off xmit.
595	 */
596	MII_CLR(XL_MII_DIR);
597
598	XL_UNLOCK(sc);
599
600	return(0);
601}
602
603static int
604xl_miibus_readreg(dev, phy, reg)
605	device_t		dev;
606	int			phy, reg;
607{
608	struct xl_softc		*sc;
609	struct xl_mii_frame	frame;
610
611	sc = device_get_softc(dev);
612
613	/*
614	 * Pretend that PHYs are only available at MII address 24.
615	 * This is to guard against problems with certain 3Com ASIC
616	 * revisions that incorrectly map the internal transceiver
617	 * control registers at all MII addresses. This can cause
618	 * the miibus code to attach the same PHY several times over.
619	 */
620	if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24)
621		return(0);
622
623	bzero((char *)&frame, sizeof(frame));
624
625	frame.mii_phyaddr = phy;
626	frame.mii_regaddr = reg;
627	xl_mii_readreg(sc, &frame);
628
629	return(frame.mii_data);
630}
631
632static int
633xl_miibus_writereg(dev, phy, reg, data)
634	device_t		dev;
635	int			phy, reg, data;
636{
637	struct xl_softc		*sc;
638	struct xl_mii_frame	frame;
639
640	sc = device_get_softc(dev);
641
642	if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24)
643		return(0);
644
645	bzero((char *)&frame, sizeof(frame));
646
647	frame.mii_phyaddr = phy;
648	frame.mii_regaddr = reg;
649	frame.mii_data = data;
650
651	xl_mii_writereg(sc, &frame);
652
653	return(0);
654}
655
656static void
657xl_miibus_statchg(dev)
658	device_t		dev;
659{
660        struct xl_softc		*sc;
661        struct mii_data		*mii;
662
663
664	sc = device_get_softc(dev);
665	mii = device_get_softc(sc->xl_miibus);
666
667	XL_LOCK(sc);
668
669	xl_setcfg(sc);
670
671	/* Set ASIC's duplex mode to match the PHY. */
672	XL_SEL_WIN(3);
673	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
674		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
675	else
676		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
677			(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
678
679	XL_UNLOCK(sc);
680
681        return;
682}
683
684/*
685 * Special support for the 3c905B-COMBO. This card has 10/100 support
686 * plus BNC and AUI ports. This means we will have both an miibus attached
687 * plus some non-MII media settings. In order to allow this, we have to
688 * add the extra media to the miibus's ifmedia struct, but we can't do
689 * that during xl_attach() because the miibus hasn't been attached yet.
690 * So instead, we wait until the miibus probe/attach is done, at which
691 * point we will get a callback telling is that it's safe to add our
692 * extra media.
693 */
694static void
695xl_miibus_mediainit(dev)
696	device_t		dev;
697{
698        struct xl_softc		*sc;
699        struct mii_data		*mii;
700	struct ifmedia		*ifm;
701
702	sc = device_get_softc(dev);
703	mii = device_get_softc(sc->xl_miibus);
704	ifm = &mii->mii_media;
705
706	XL_LOCK(sc);
707
708	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
709		/*
710		 * Check for a 10baseFL board in disguise.
711		 */
712		if (sc->xl_type == XL_TYPE_905B &&
713		    sc->xl_media == XL_MEDIAOPT_10FL) {
714			if (bootverbose)
715				printf("xl%d: found 10baseFL\n", sc->xl_unit);
716			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
717			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL);
718			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
719				ifmedia_add(ifm,
720				    IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
721		} else {
722			if (bootverbose)
723				printf("xl%d: found AUI\n", sc->xl_unit);
724			ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
725		}
726	}
727
728	if (sc->xl_media & XL_MEDIAOPT_BNC) {
729		if (bootverbose)
730			printf("xl%d: found BNC\n", sc->xl_unit);
731		ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
732	}
733
734	XL_UNLOCK(sc);
735
736	return;
737}
738
739/*
740 * The EEPROM is slow: give it time to come ready after issuing
741 * it a command.
742 */
743static int
744xl_eeprom_wait(sc)
745	struct xl_softc		*sc;
746{
747	int			i;
748
749	for (i = 0; i < 100; i++) {
750		if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
751			DELAY(162);
752		else
753			break;
754	}
755
756	if (i == 100) {
757		printf("xl%d: eeprom failed to come ready\n", sc->xl_unit);
758		return(1);
759	}
760
761	return(0);
762}
763
764/*
765 * Read a sequence of words from the EEPROM. Note that ethernet address
766 * data is stored in the EEPROM in network byte order.
767 */
768static int
769xl_read_eeprom(sc, dest, off, cnt, swap)
770	struct xl_softc		*sc;
771	caddr_t			dest;
772	int			off;
773	int			cnt;
774	int			swap;
775{
776	int			err = 0, i;
777	u_int16_t		word = 0, *ptr;
778#define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
779#define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
780	/* WARNING! DANGER!
781	 * It's easy to accidentally overwrite the rom content!
782	 * Note: the 3c575 uses 8bit EEPROM offsets.
783	 */
784	XL_SEL_WIN(0);
785
786	if (xl_eeprom_wait(sc))
787		return(1);
788
789	if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
790		off += 0x30;
791
792	for (i = 0; i < cnt; i++) {
793		if (sc->xl_flags & XL_FLAG_8BITROM)
794			CSR_WRITE_2(sc, XL_W0_EE_CMD,
795			    XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
796		else
797			CSR_WRITE_2(sc, XL_W0_EE_CMD,
798			    XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
799		err = xl_eeprom_wait(sc);
800		if (err)
801			break;
802		word = CSR_READ_2(sc, XL_W0_EE_DATA);
803		ptr = (u_int16_t *)(dest + (i * 2));
804		if (swap)
805			*ptr = ntohs(word);
806		else
807			*ptr = word;
808	}
809
810	return(err ? 1 : 0);
811}
812
813/*
814 * NICs older than the 3c905B have only one multicast option, which
815 * is to enable reception of all multicast frames.
816 */
817static void
818xl_setmulti(sc)
819	struct xl_softc		*sc;
820{
821	struct ifnet		*ifp;
822	struct ifmultiaddr	*ifma;
823	u_int8_t		rxfilt;
824	int			mcnt = 0;
825
826	ifp = &sc->arpcom.ac_if;
827
828	XL_SEL_WIN(5);
829	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
830
831	if (ifp->if_flags & IFF_ALLMULTI) {
832		rxfilt |= XL_RXFILTER_ALLMULTI;
833		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
834		return;
835	}
836
837	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
838		mcnt++;
839
840	if (mcnt)
841		rxfilt |= XL_RXFILTER_ALLMULTI;
842	else
843		rxfilt &= ~XL_RXFILTER_ALLMULTI;
844
845	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
846
847	return;
848}
849
850/*
851 * 3c905B adapters have a hash filter that we can program.
852 */
853static void
854xl_setmulti_hash(sc)
855	struct xl_softc		*sc;
856{
857	struct ifnet		*ifp;
858	int			h = 0, i;
859	struct ifmultiaddr	*ifma;
860	u_int8_t		rxfilt;
861	int			mcnt = 0;
862
863	ifp = &sc->arpcom.ac_if;
864
865	XL_SEL_WIN(5);
866	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
867
868	if (ifp->if_flags & IFF_ALLMULTI) {
869		rxfilt |= XL_RXFILTER_ALLMULTI;
870		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
871		return;
872	} else
873		rxfilt &= ~XL_RXFILTER_ALLMULTI;
874
875
876	/* first, zot all the existing hash bits */
877	for (i = 0; i < XL_HASHFILT_SIZE; i++)
878		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
879
880	/* now program new ones */
881	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
882		if (ifma->ifma_addr->sa_family != AF_LINK)
883			continue;
884		/*
885		 * Note: the 3c905B currently only supports a 64-bit hash
886		 * table, which means we really only need 6 bits, but the
887		 * manual indicates that future chip revisions will have a
888		 * 256-bit hash table, hence the routine is set up to
889		 * calculate 8 bits of position info in case we need it some
890		 * day.
891		 * Note II, The Sequel: _CURRENT_ versions of the 3c905B have
892		 * a 256 bit hash table. This means we have to use all 8 bits
893		 * regardless. On older cards, the upper 2 bits will be
894		 * ignored. Grrrr....
895 		 */
896		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
897		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF;
898		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
899		mcnt++;
900	}
901
902	if (mcnt)
903		rxfilt |= XL_RXFILTER_MULTIHASH;
904	else
905		rxfilt &= ~XL_RXFILTER_MULTIHASH;
906
907	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
908
909	return;
910}
911
912#ifdef notdef
913static void
914xl_testpacket(sc)
915	struct xl_softc		*sc;
916{
917	struct mbuf		*m;
918	struct ifnet		*ifp;
919
920	ifp = &sc->arpcom.ac_if;
921
922	MGETHDR(m, M_DONTWAIT, MT_DATA);
923
924	if (m == NULL)
925		return;
926
927	bcopy(&sc->arpcom.ac_enaddr,
928		mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
929	bcopy(&sc->arpcom.ac_enaddr,
930		mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
931	mtod(m, struct ether_header *)->ether_type = htons(3);
932	mtod(m, unsigned char *)[14] = 0;
933	mtod(m, unsigned char *)[15] = 0;
934	mtod(m, unsigned char *)[16] = 0xE3;
935	m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
936	IFQ_ENQUEUE(&ifp->if_snd, m);
937	xl_start(ifp);
938
939	return;
940}
941#endif
942
943static void
944xl_setcfg(sc)
945	struct xl_softc		*sc;
946{
947	u_int32_t		icfg;
948
949	XL_SEL_WIN(3);
950	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
951	icfg &= ~XL_ICFG_CONNECTOR_MASK;
952	if (sc->xl_media & XL_MEDIAOPT_MII ||
953		sc->xl_media & XL_MEDIAOPT_BT4)
954		icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
955	if (sc->xl_media & XL_MEDIAOPT_BTX)
956		icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
957
958	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
959	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
960
961	return;
962}
963
964static void
965xl_setmode(sc, media)
966	struct xl_softc		*sc;
967	int			media;
968{
969	u_int32_t		icfg;
970	u_int16_t		mediastat;
971
972	printf("xl%d: selecting ", sc->xl_unit);
973
974	XL_SEL_WIN(4);
975	mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
976	XL_SEL_WIN(3);
977	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
978
979	if (sc->xl_media & XL_MEDIAOPT_BT) {
980		if (IFM_SUBTYPE(media) == IFM_10_T) {
981			printf("10baseT transceiver, ");
982			sc->xl_xcvr = XL_XCVR_10BT;
983			icfg &= ~XL_ICFG_CONNECTOR_MASK;
984			icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
985			mediastat |= XL_MEDIASTAT_LINKBEAT|
986					XL_MEDIASTAT_JABGUARD;
987			mediastat &= ~XL_MEDIASTAT_SQEENB;
988		}
989	}
990
991	if (sc->xl_media & XL_MEDIAOPT_BFX) {
992		if (IFM_SUBTYPE(media) == IFM_100_FX) {
993			printf("100baseFX port, ");
994			sc->xl_xcvr = XL_XCVR_100BFX;
995			icfg &= ~XL_ICFG_CONNECTOR_MASK;
996			icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
997			mediastat |= XL_MEDIASTAT_LINKBEAT;
998			mediastat &= ~XL_MEDIASTAT_SQEENB;
999		}
1000	}
1001
1002	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1003		if (IFM_SUBTYPE(media) == IFM_10_5) {
1004			printf("AUI port, ");
1005			sc->xl_xcvr = XL_XCVR_AUI;
1006			icfg &= ~XL_ICFG_CONNECTOR_MASK;
1007			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1008			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1009					XL_MEDIASTAT_JABGUARD);
1010			mediastat |= ~XL_MEDIASTAT_SQEENB;
1011		}
1012		if (IFM_SUBTYPE(media) == IFM_10_FL) {
1013			printf("10baseFL transceiver, ");
1014			sc->xl_xcvr = XL_XCVR_AUI;
1015			icfg &= ~XL_ICFG_CONNECTOR_MASK;
1016			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1017			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1018					XL_MEDIASTAT_JABGUARD);
1019			mediastat |= ~XL_MEDIASTAT_SQEENB;
1020		}
1021	}
1022
1023	if (sc->xl_media & XL_MEDIAOPT_BNC) {
1024		if (IFM_SUBTYPE(media) == IFM_10_2) {
1025			printf("BNC port, ");
1026			sc->xl_xcvr = XL_XCVR_COAX;
1027			icfg &= ~XL_ICFG_CONNECTOR_MASK;
1028			icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
1029			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1030					XL_MEDIASTAT_JABGUARD|
1031					XL_MEDIASTAT_SQEENB);
1032		}
1033	}
1034
1035	if ((media & IFM_GMASK) == IFM_FDX ||
1036			IFM_SUBTYPE(media) == IFM_100_FX) {
1037		printf("full duplex\n");
1038		XL_SEL_WIN(3);
1039		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
1040	} else {
1041		printf("half duplex\n");
1042		XL_SEL_WIN(3);
1043		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
1044			(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
1045	}
1046
1047	if (IFM_SUBTYPE(media) == IFM_10_2)
1048		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1049	else
1050		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1051	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1052	XL_SEL_WIN(4);
1053	CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
1054	DELAY(800);
1055	XL_SEL_WIN(7);
1056
1057	return;
1058}
1059
1060static void
1061xl_reset(sc)
1062	struct xl_softc		*sc;
1063{
1064	register int		i;
1065
1066	XL_SEL_WIN(0);
1067	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
1068		    ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
1069		     XL_RESETOPT_DISADVFD:0));
1070
1071	/*
1072	 * If we're using memory mapped register mode, pause briefly
1073	 * after issuing the reset command before trying to access any
1074	 * other registers. With my 3c575C cardbus card, failing to do
1075	 * this results in the system locking up while trying to poll
1076	 * the command busy bit in the status register.
1077	 */
1078	if (sc->xl_flags & XL_FLAG_USE_MMIO)
1079		DELAY(100000);
1080
1081	for (i = 0; i < XL_TIMEOUT; i++) {
1082		DELAY(10);
1083		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
1084			break;
1085	}
1086
1087	if (i == XL_TIMEOUT)
1088		printf("xl%d: reset didn't complete\n", sc->xl_unit);
1089
1090	/* Reset TX and RX. */
1091	/* Note: the RX reset takes an absurd amount of time
1092	 * on newer versions of the Tornado chips such as those
1093	 * on the 3c905CX and newer 3c908C cards. We wait an
1094	 * extra amount of time so that xl_wait() doesn't complain
1095	 * and annoy the users.
1096	 */
1097	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1098	DELAY(100000);
1099	xl_wait(sc);
1100	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1101	xl_wait(sc);
1102
1103	if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
1104	    sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
1105		XL_SEL_WIN(2);
1106		CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
1107		    XL_W2_RESET_OPTIONS)
1108		    | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
1109		    | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
1110		    );
1111	}
1112
1113	/* Wait a little while for the chip to get its brains in order. */
1114	DELAY(100000);
1115        return;
1116}
1117
1118/*
1119 * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
1120 * IDs against our list and return a device name if we find a match.
1121 */
1122static int
1123xl_probe(dev)
1124	device_t		dev;
1125{
1126	struct xl_type		*t;
1127
1128	t = xl_devs;
1129
1130	while(t->xl_name != NULL) {
1131		if ((pci_get_vendor(dev) == t->xl_vid) &&
1132		    (pci_get_device(dev) == t->xl_did)) {
1133			device_set_desc(dev, t->xl_name);
1134			return(0);
1135		}
1136		t++;
1137	}
1138
1139	return(ENXIO);
1140}
1141
1142/*
1143 * This routine is a kludge to work around possible hardware faults
1144 * or manufacturing defects that can cause the media options register
1145 * (or reset options register, as it's called for the first generation
1146 * 3c90x adapters) to return an incorrect result. I have encountered
1147 * one Dell Latitude laptop docking station with an integrated 3c905-TX
1148 * which doesn't have any of the 'mediaopt' bits set. This screws up
1149 * the attach routine pretty badly because it doesn't know what media
1150 * to look for. If we find ourselves in this predicament, this routine
1151 * will try to guess the media options values and warn the user of a
1152 * possible manufacturing defect with his adapter/system/whatever.
1153 */
1154static void
1155xl_mediacheck(sc)
1156	struct xl_softc		*sc;
1157{
1158
1159	/*
1160	 * If some of the media options bits are set, assume they are
1161	 * correct. If not, try to figure it out down below.
1162	 * XXX I should check for 10baseFL, but I don't have an adapter
1163	 * to test with.
1164	 */
1165	if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
1166		/*
1167	 	 * Check the XCVR value. If it's not in the normal range
1168	 	 * of values, we need to fake it up here.
1169	 	 */
1170		if (sc->xl_xcvr <= XL_XCVR_AUTO)
1171			return;
1172		else {
1173			printf("xl%d: bogus xcvr value "
1174			"in EEPROM (%x)\n", sc->xl_unit, sc->xl_xcvr);
1175			printf("xl%d: choosing new default based "
1176				"on card type\n", sc->xl_unit);
1177		}
1178	} else {
1179		if (sc->xl_type == XL_TYPE_905B &&
1180		    sc->xl_media & XL_MEDIAOPT_10FL)
1181			return;
1182		printf("xl%d: WARNING: no media options bits set in "
1183			"the media options register!!\n", sc->xl_unit);
1184		printf("xl%d: this could be a manufacturing defect in "
1185			"your adapter or system\n", sc->xl_unit);
1186		printf("xl%d: attempting to guess media type; you "
1187			"should probably consult your vendor\n", sc->xl_unit);
1188	}
1189
1190	xl_choose_xcvr(sc, 1);
1191
1192	return;
1193}
1194
1195static void
1196xl_choose_xcvr(sc, verbose)
1197	struct xl_softc		*sc;
1198	int			verbose;
1199{
1200	u_int16_t		devid;
1201
1202	/*
1203	 * Read the device ID from the EEPROM.
1204	 * This is what's loaded into the PCI device ID register, so it has
1205	 * to be correct otherwise we wouldn't have gotten this far.
1206	 */
1207	xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
1208
1209	switch(devid) {
1210	case TC_DEVICEID_BOOMERANG_10BT:	/* 3c900-TPO */
1211	case TC_DEVICEID_KRAKATOA_10BT:		/* 3c900B-TPO */
1212		sc->xl_media = XL_MEDIAOPT_BT;
1213		sc->xl_xcvr = XL_XCVR_10BT;
1214		if (verbose)
1215			printf("xl%d: guessing 10BaseT "
1216			    "transceiver\n", sc->xl_unit);
1217		break;
1218	case TC_DEVICEID_BOOMERANG_10BT_COMBO:	/* 3c900-COMBO */
1219	case TC_DEVICEID_KRAKATOA_10BT_COMBO:	/* 3c900B-COMBO */
1220		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1221		sc->xl_xcvr = XL_XCVR_10BT;
1222		if (verbose)
1223			printf("xl%d: guessing COMBO "
1224			    "(AUI/BNC/TP)\n", sc->xl_unit);
1225		break;
1226	case TC_DEVICEID_KRAKATOA_10BT_TPC:	/* 3c900B-TPC */
1227		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
1228		sc->xl_xcvr = XL_XCVR_10BT;
1229		if (verbose)
1230			printf("xl%d: guessing TPC (BNC/TP)\n", sc->xl_unit);
1231		break;
1232	case TC_DEVICEID_CYCLONE_10FL:		/* 3c900B-FL */
1233		sc->xl_media = XL_MEDIAOPT_10FL;
1234		sc->xl_xcvr = XL_XCVR_AUI;
1235		if (verbose)
1236			printf("xl%d: guessing 10baseFL\n", sc->xl_unit);
1237		break;
1238	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
1239	case TC_DEVICEID_HURRICANE_555:		/* 3c555 */
1240	case TC_DEVICEID_HURRICANE_556:		/* 3c556 */
1241	case TC_DEVICEID_HURRICANE_556B:	/* 3c556B */
1242	case TC_DEVICEID_HURRICANE_575A:	/* 3c575TX */
1243	case TC_DEVICEID_HURRICANE_575B:	/* 3c575B */
1244	case TC_DEVICEID_HURRICANE_575C:	/* 3c575C */
1245	case TC_DEVICEID_HURRICANE_656:		/* 3c656 */
1246	case TC_DEVICEID_HURRICANE_656B:	/* 3c656B */
1247	case TC_DEVICEID_TORNADO_656C:		/* 3c656C */
1248	case TC_DEVICEID_TORNADO_10_100BT_920B:	/* 3c920B-EMB */
1249		sc->xl_media = XL_MEDIAOPT_MII;
1250		sc->xl_xcvr = XL_XCVR_MII;
1251		if (verbose)
1252			printf("xl%d: guessing MII\n", sc->xl_unit);
1253		break;
1254	case TC_DEVICEID_BOOMERANG_100BT4:	/* 3c905-T4 */
1255	case TC_DEVICEID_CYCLONE_10_100BT4:	/* 3c905B-T4 */
1256		sc->xl_media = XL_MEDIAOPT_BT4;
1257		sc->xl_xcvr = XL_XCVR_MII;
1258		if (verbose)
1259			printf("xl%d: guessing 100BaseT4/MII\n", sc->xl_unit);
1260		break;
1261	case TC_DEVICEID_HURRICANE_10_100BT:	/* 3c905B-TX */
1262	case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
1263	case TC_DEVICEID_TORNADO_10_100BT_SERV:	/* 3c980C-TX */
1264	case TC_DEVICEID_HURRICANE_SOHO100TX:	/* 3cSOHO100-TX */
1265	case TC_DEVICEID_TORNADO_10_100BT:	/* 3c905C-TX */
1266	case TC_DEVICEID_TORNADO_HOMECONNECT:	/* 3c450-TX */
1267		sc->xl_media = XL_MEDIAOPT_BTX;
1268		sc->xl_xcvr = XL_XCVR_AUTO;
1269		if (verbose)
1270			printf("xl%d: guessing 10/100 internal\n", sc->xl_unit);
1271		break;
1272	case TC_DEVICEID_CYCLONE_10_100_COMBO:	/* 3c905B-COMBO */
1273		sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1274		sc->xl_xcvr = XL_XCVR_AUTO;
1275		if (verbose)
1276			printf("xl%d: guessing 10/100 "
1277			    "plus BNC/AUI\n", sc->xl_unit);
1278		break;
1279	default:
1280		printf("xl%d: unknown device ID: %x -- "
1281			"defaulting to 10baseT\n", sc->xl_unit, devid);
1282		sc->xl_media = XL_MEDIAOPT_BT;
1283		break;
1284	}
1285
1286	return;
1287}
1288
1289/*
1290 * Attach the interface. Allocate softc structures, do ifmedia
1291 * setup and ethernet/BPF attach.
1292 */
1293static int
1294xl_attach(dev)
1295	device_t		dev;
1296{
1297	u_char			eaddr[ETHER_ADDR_LEN];
1298	u_int16_t		xcvr[2];
1299	struct xl_softc		*sc;
1300	struct ifnet		*ifp;
1301	int			media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1302	int			unit, error = 0, rid, res;
1303	uint16_t		did;
1304
1305	sc = device_get_softc(dev);
1306	unit = device_get_unit(dev);
1307
1308	mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1309	    MTX_DEF | MTX_RECURSE);
1310	ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
1311
1312	did = pci_get_device(dev);
1313
1314	sc->xl_flags = 0;
1315	if (did == TC_DEVICEID_HURRICANE_555)
1316		sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
1317	if (did == TC_DEVICEID_HURRICANE_556 ||
1318	    did == TC_DEVICEID_HURRICANE_556B)
1319		sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1320		    XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
1321		    XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
1322	if (did == TC_DEVICEID_HURRICANE_555 ||
1323	    did == TC_DEVICEID_HURRICANE_556)
1324		sc->xl_flags |= XL_FLAG_8BITROM;
1325	if (did == TC_DEVICEID_HURRICANE_556B)
1326		sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
1327
1328	if (did == TC_DEVICEID_HURRICANE_575A ||
1329	    did == TC_DEVICEID_HURRICANE_575B ||
1330	    did == TC_DEVICEID_HURRICANE_575C ||
1331	    did == TC_DEVICEID_HURRICANE_656B ||
1332	    did == TC_DEVICEID_TORNADO_656C)
1333		sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1334		    XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM;
1335	if (did == TC_DEVICEID_HURRICANE_656)
1336		sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
1337	if (did == TC_DEVICEID_HURRICANE_575B)
1338		sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
1339	if (did == TC_DEVICEID_HURRICANE_575C)
1340		sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1341	if (did == TC_DEVICEID_TORNADO_656C)
1342		sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1343	if (did == TC_DEVICEID_HURRICANE_656 ||
1344	    did == TC_DEVICEID_HURRICANE_656B)
1345		sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
1346		    XL_FLAG_INVERT_LED_PWR;
1347	if (did == TC_DEVICEID_TORNADO_10_100BT_920B)
1348		sc->xl_flags |= XL_FLAG_PHYOK;
1349
1350	switch (did) {
1351	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
1352	case TC_DEVICEID_HURRICANE_575A:
1353	case TC_DEVICEID_HURRICANE_575B:
1354	case TC_DEVICEID_HURRICANE_575C:
1355		sc->xl_flags |= XL_FLAG_NO_MMIO;
1356		break;
1357	default:
1358		break;
1359	}
1360
1361	/*
1362	 * Map control/status registers.
1363	 */
1364	pci_enable_busmaster(dev);
1365
1366	if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
1367		rid = XL_PCI_LOMEM;
1368		res = SYS_RES_MEMORY;
1369
1370		sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
1371	}
1372
1373	if (sc->xl_res != NULL) {
1374		sc->xl_flags |= XL_FLAG_USE_MMIO;
1375		if (bootverbose)
1376			printf("xl%d: using memory mapped I/O\n", unit);
1377	} else {
1378		rid = XL_PCI_LOIO;
1379		res = SYS_RES_IOPORT;
1380		sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
1381		if (sc->xl_res == NULL) {
1382			printf ("xl%d: couldn't map ports/memory\n", unit);
1383			error = ENXIO;
1384			goto fail;
1385		}
1386		if (bootverbose)
1387			printf("xl%d: using port I/O\n", unit);
1388	}
1389
1390	sc->xl_btag = rman_get_bustag(sc->xl_res);
1391	sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
1392
1393	if (sc->xl_flags & XL_FLAG_FUNCREG) {
1394		rid = XL_PCI_FUNCMEM;
1395		sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1396		    RF_ACTIVE);
1397
1398		if (sc->xl_fres == NULL) {
1399			printf ("xl%d: couldn't map ports/memory\n", unit);
1400			error = ENXIO;
1401			goto fail;
1402		}
1403
1404		sc->xl_ftag = rman_get_bustag(sc->xl_fres);
1405		sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
1406	}
1407
1408	/* Allocate interrupt */
1409	rid = 0;
1410	sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1411	    RF_SHAREABLE | RF_ACTIVE);
1412	if (sc->xl_irq == NULL) {
1413		printf("xl%d: couldn't map interrupt\n", unit);
1414		error = ENXIO;
1415		goto fail;
1416	}
1417
1418	/* Reset the adapter. */
1419	xl_reset(sc);
1420
1421	/*
1422	 * Get station address from the EEPROM.
1423	 */
1424	if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
1425		printf("xl%d: failed to read station address\n", sc->xl_unit);
1426		error = ENXIO;
1427		goto fail;
1428	}
1429
1430	sc->xl_unit = unit;
1431	callout_handle_init(&sc->xl_stat_ch);
1432	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1433
1434	/*
1435	 * Now allocate a tag for the DMA descriptor lists and a chunk
1436	 * of DMA-able memory based on the tag.  Also obtain the DMA
1437	 * addresses of the RX and TX ring, which we'll need later.
1438	 * All of our lists are allocated as a contiguous block
1439	 * of memory.
1440	 */
1441	error = bus_dma_tag_create(NULL, 8, 0,
1442	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1443	    XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
1444	    &sc->xl_ldata.xl_rx_tag);
1445	if (error) {
1446		printf("xl%d: failed to allocate rx dma tag\n", unit);
1447		goto fail;
1448	}
1449
1450	error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
1451	    (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1452	    &sc->xl_ldata.xl_rx_dmamap);
1453	if (error) {
1454		printf("xl%d: no memory for rx list buffers!\n", unit);
1455		bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1456		sc->xl_ldata.xl_rx_tag = NULL;
1457		goto fail;
1458	}
1459
1460	error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
1461	    sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
1462	    XL_RX_LIST_SZ, xl_dma_map_addr,
1463	    &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
1464	if (error) {
1465		printf("xl%d: cannot get dma address of the rx ring!\n", unit);
1466		bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1467		    sc->xl_ldata.xl_rx_dmamap);
1468		bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1469		sc->xl_ldata.xl_rx_tag = NULL;
1470		goto fail;
1471	}
1472
1473	error = bus_dma_tag_create(NULL, 8, 0,
1474	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1475	    XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
1476	    &sc->xl_ldata.xl_tx_tag);
1477	if (error) {
1478		printf("xl%d: failed to allocate tx dma tag\n", unit);
1479		goto fail;
1480	}
1481
1482	error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
1483	    (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1484	    &sc->xl_ldata.xl_tx_dmamap);
1485	if (error) {
1486		printf("xl%d: no memory for list buffers!\n", unit);
1487		bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1488		sc->xl_ldata.xl_tx_tag = NULL;
1489		goto fail;
1490	}
1491
1492	error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
1493	    sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
1494	    XL_TX_LIST_SZ, xl_dma_map_addr,
1495	    &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
1496	if (error) {
1497		printf("xl%d: cannot get dma address of the tx ring!\n", unit);
1498		bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1499		    sc->xl_ldata.xl_tx_dmamap);
1500		bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1501		sc->xl_ldata.xl_tx_tag = NULL;
1502		goto fail;
1503	}
1504
1505	/*
1506	 * Allocate a DMA tag for the mapping of mbufs.
1507	 */
1508	error = bus_dma_tag_create(NULL, 1, 0,
1509	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1510	    MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
1511	    NULL, &sc->xl_mtag);
1512	if (error) {
1513		printf("xl%d: failed to allocate mbuf dma tag\n", unit);
1514		goto fail;
1515	}
1516
1517	/* We need a spare DMA map for the RX ring. */
1518	error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
1519	if (error)
1520		goto fail;
1521
1522	/*
1523	 * Figure out the card type. 3c905B adapters have the
1524	 * 'supportsNoTxLength' bit set in the capabilities
1525	 * word in the EEPROM.
1526	 * Note: my 3c575C cardbus card lies. It returns a value
1527	 * of 0x1578 for its capabilities word, which is somewhat
1528 	 * nonsensical. Another way to distinguish a 3c90x chip
1529	 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
1530	 * bit. This will only be set for 3c90x boomerage chips.
1531	 */
1532	xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
1533	if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
1534	    !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
1535		sc->xl_type = XL_TYPE_905B;
1536	else
1537		sc->xl_type = XL_TYPE_90X;
1538
1539	ifp = &sc->arpcom.ac_if;
1540	ifp->if_softc = sc;
1541	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1542	ifp->if_mtu = ETHERMTU;
1543	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1544	ifp->if_ioctl = xl_ioctl;
1545	ifp->if_capabilities = IFCAP_VLAN_MTU;
1546	if (sc->xl_type == XL_TYPE_905B) {
1547		ifp->if_start = xl_start_90xB;
1548		ifp->if_hwassist = XL905B_CSUM_FEATURES;
1549#ifdef XL905B_TXCSUM_BROKEN
1550		ifp->if_capabilities |= IFCAP_RXCSUM;
1551#else
1552		ifp->if_capabilities |= IFCAP_HWCSUM;
1553#endif
1554	} else
1555		ifp->if_start = xl_start;
1556	ifp->if_watchdog = xl_watchdog;
1557	ifp->if_init = xl_init;
1558	ifp->if_baudrate = 10000000;
1559	IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
1560	ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1;
1561	IFQ_SET_READY(&ifp->if_snd);
1562	ifp->if_capenable = ifp->if_capabilities;
1563
1564	/*
1565	 * Now we have to see what sort of media we have.
1566	 * This includes probing for an MII interace and a
1567	 * possible PHY.
1568	 */
1569	XL_SEL_WIN(3);
1570	sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
1571	if (bootverbose)
1572		printf("xl%d: media options word: %x\n", sc->xl_unit,
1573							 sc->xl_media);
1574
1575	xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
1576	sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
1577	sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
1578	sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
1579
1580	xl_mediacheck(sc);
1581
1582	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
1583			|| sc->xl_media & XL_MEDIAOPT_BT4) {
1584		if (bootverbose)
1585			printf("xl%d: found MII/AUTO\n", sc->xl_unit);
1586		xl_setcfg(sc);
1587		if (mii_phy_probe(dev, &sc->xl_miibus,
1588		    xl_ifmedia_upd, xl_ifmedia_sts)) {
1589			printf("xl%d: no PHY found!\n", sc->xl_unit);
1590			error = ENXIO;
1591			goto fail;
1592		}
1593
1594		goto done;
1595	}
1596
1597	/*
1598	 * Sanity check. If the user has selected "auto" and this isn't
1599	 * a 10/100 card of some kind, we need to force the transceiver
1600	 * type to something sane.
1601	 */
1602	if (sc->xl_xcvr == XL_XCVR_AUTO)
1603		xl_choose_xcvr(sc, bootverbose);
1604
1605	/*
1606	 * Do ifmedia setup.
1607	 */
1608	if (sc->xl_media & XL_MEDIAOPT_BT) {
1609		if (bootverbose)
1610			printf("xl%d: found 10baseT\n", sc->xl_unit);
1611		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1612		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1613		if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1614			ifmedia_add(&sc->ifmedia,
1615			    IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1616	}
1617
1618	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1619		/*
1620		 * Check for a 10baseFL board in disguise.
1621		 */
1622		if (sc->xl_type == XL_TYPE_905B &&
1623		    sc->xl_media == XL_MEDIAOPT_10FL) {
1624			if (bootverbose)
1625				printf("xl%d: found 10baseFL\n", sc->xl_unit);
1626			ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
1627			ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
1628			    0, NULL);
1629			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1630				ifmedia_add(&sc->ifmedia,
1631				    IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
1632		} else {
1633			if (bootverbose)
1634				printf("xl%d: found AUI\n", sc->xl_unit);
1635			ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1636		}
1637	}
1638
1639	if (sc->xl_media & XL_MEDIAOPT_BNC) {
1640		if (bootverbose)
1641			printf("xl%d: found BNC\n", sc->xl_unit);
1642		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
1643	}
1644
1645	if (sc->xl_media & XL_MEDIAOPT_BFX) {
1646		if (bootverbose)
1647			printf("xl%d: found 100baseFX\n", sc->xl_unit);
1648		ifp->if_baudrate = 100000000;
1649		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
1650	}
1651
1652	/* Choose a default media. */
1653	switch(sc->xl_xcvr) {
1654	case XL_XCVR_10BT:
1655		media = IFM_ETHER|IFM_10_T;
1656		xl_setmode(sc, media);
1657		break;
1658	case XL_XCVR_AUI:
1659		if (sc->xl_type == XL_TYPE_905B &&
1660		    sc->xl_media == XL_MEDIAOPT_10FL) {
1661			media = IFM_ETHER|IFM_10_FL;
1662			xl_setmode(sc, media);
1663		} else {
1664			media = IFM_ETHER|IFM_10_5;
1665			xl_setmode(sc, media);
1666		}
1667		break;
1668	case XL_XCVR_COAX:
1669		media = IFM_ETHER|IFM_10_2;
1670		xl_setmode(sc, media);
1671		break;
1672	case XL_XCVR_AUTO:
1673	case XL_XCVR_100BTX:
1674	case XL_XCVR_MII:
1675		/* Chosen by miibus */
1676		break;
1677	case XL_XCVR_100BFX:
1678		media = IFM_ETHER|IFM_100_FX;
1679		break;
1680	default:
1681		printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit,
1682							sc->xl_xcvr);
1683		/*
1684		 * This will probably be wrong, but it prevents
1685	 	 * the ifmedia code from panicking.
1686		 */
1687		media = IFM_ETHER|IFM_10_T;
1688		break;
1689	}
1690
1691	if (sc->xl_miibus == NULL)
1692		ifmedia_set(&sc->ifmedia, media);
1693
1694done:
1695
1696	if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
1697		XL_SEL_WIN(0);
1698		CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
1699	}
1700
1701	/*
1702	 * Call MI attach routine.
1703	 */
1704	ether_ifattach(ifp, eaddr);
1705
1706	/* Hook interrupt last to avoid having to lock softc */
1707	error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET,
1708	    xl_intr, sc, &sc->xl_intrhand);
1709	if (error) {
1710		printf("xl%d: couldn't set up irq\n", unit);
1711		ether_ifdetach(ifp);
1712		goto fail;
1713	}
1714
1715fail:
1716	if (error)
1717		xl_detach(dev);
1718
1719	return(error);
1720}
1721
1722/*
1723 * Shutdown hardware and free up resources. This can be called any
1724 * time after the mutex has been initialized. It is called in both
1725 * the error case in attach and the normal detach case so it needs
1726 * to be careful about only freeing resources that have actually been
1727 * allocated.
1728 */
1729static int
1730xl_detach(dev)
1731	device_t		dev;
1732{
1733	struct xl_softc		*sc;
1734	struct ifnet		*ifp;
1735	int			rid, res;
1736
1737	sc = device_get_softc(dev);
1738	KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
1739	XL_LOCK(sc);
1740	ifp = &sc->arpcom.ac_if;
1741
1742	if (sc->xl_flags & XL_FLAG_USE_MMIO) {
1743		rid = XL_PCI_LOMEM;
1744		res = SYS_RES_MEMORY;
1745	} else {
1746		rid = XL_PCI_LOIO;
1747		res = SYS_RES_IOPORT;
1748	}
1749
1750	/* These should only be active if attach succeeded */
1751	if (device_is_attached(dev)) {
1752		xl_reset(sc);
1753		xl_stop(sc);
1754		ether_ifdetach(ifp);
1755	}
1756	if (sc->xl_miibus)
1757		device_delete_child(dev, sc->xl_miibus);
1758	bus_generic_detach(dev);
1759	ifmedia_removeall(&sc->ifmedia);
1760
1761	if (sc->xl_intrhand)
1762		bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
1763	if (sc->xl_irq)
1764		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
1765	if (sc->xl_fres != NULL)
1766		bus_release_resource(dev, SYS_RES_MEMORY,
1767		    XL_PCI_FUNCMEM, sc->xl_fres);
1768	if (sc->xl_res)
1769		bus_release_resource(dev, res, rid, sc->xl_res);
1770
1771	if (sc->xl_mtag) {
1772		bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
1773		bus_dma_tag_destroy(sc->xl_mtag);
1774	}
1775	if (sc->xl_ldata.xl_rx_tag) {
1776		bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
1777		    sc->xl_ldata.xl_rx_dmamap);
1778		bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1779		    sc->xl_ldata.xl_rx_dmamap);
1780		bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1781	}
1782	if (sc->xl_ldata.xl_tx_tag) {
1783		bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
1784		    sc->xl_ldata.xl_tx_dmamap);
1785		bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1786		    sc->xl_ldata.xl_tx_dmamap);
1787		bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1788	}
1789
1790	XL_UNLOCK(sc);
1791	mtx_destroy(&sc->xl_mtx);
1792
1793	return(0);
1794}
1795
1796/*
1797 * Initialize the transmit descriptors.
1798 */
1799static int
1800xl_list_tx_init(sc)
1801	struct xl_softc		*sc;
1802{
1803	struct xl_chain_data	*cd;
1804	struct xl_list_data	*ld;
1805	int			error, i;
1806
1807	cd = &sc->xl_cdata;
1808	ld = &sc->xl_ldata;
1809	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1810		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1811		error = bus_dmamap_create(sc->xl_mtag, 0,
1812		    &cd->xl_tx_chain[i].xl_map);
1813		if (error)
1814			return(error);
1815		cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1816		    i * sizeof(struct xl_list);
1817		if (i == (XL_TX_LIST_CNT - 1))
1818			cd->xl_tx_chain[i].xl_next = NULL;
1819		else
1820			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1821	}
1822
1823	cd->xl_tx_free = &cd->xl_tx_chain[0];
1824	cd->xl_tx_tail = cd->xl_tx_head = NULL;
1825
1826	bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1827	return(0);
1828}
1829
1830/*
1831 * Initialize the transmit descriptors.
1832 */
1833static int
1834xl_list_tx_init_90xB(sc)
1835	struct xl_softc		*sc;
1836{
1837	struct xl_chain_data	*cd;
1838	struct xl_list_data	*ld;
1839	int			error, i;
1840
1841	cd = &sc->xl_cdata;
1842	ld = &sc->xl_ldata;
1843	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1844		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1845		error = bus_dmamap_create(sc->xl_mtag, 0,
1846		    &cd->xl_tx_chain[i].xl_map);
1847		if (error)
1848			return(error);
1849		cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1850		    i * sizeof(struct xl_list);
1851		if (i == (XL_TX_LIST_CNT - 1))
1852			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
1853		else
1854			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1855		if (i == 0)
1856			cd->xl_tx_chain[i].xl_prev =
1857			    &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
1858		else
1859			cd->xl_tx_chain[i].xl_prev =
1860			    &cd->xl_tx_chain[i - 1];
1861	}
1862
1863	bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
1864	ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1865
1866	cd->xl_tx_prod = 1;
1867	cd->xl_tx_cons = 1;
1868	cd->xl_tx_cnt = 0;
1869
1870	bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1871	return(0);
1872}
1873
1874/*
1875 * Initialize the RX descriptors and allocate mbufs for them. Note that
1876 * we arrange the descriptors in a closed ring, so that the last descriptor
1877 * points back to the first.
1878 */
1879static int
1880xl_list_rx_init(sc)
1881	struct xl_softc		*sc;
1882{
1883	struct xl_chain_data	*cd;
1884	struct xl_list_data	*ld;
1885	int			error, i, next;
1886	u_int32_t		nextptr;
1887
1888	cd = &sc->xl_cdata;
1889	ld = &sc->xl_ldata;
1890
1891	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1892		cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
1893		error = bus_dmamap_create(sc->xl_mtag, 0,
1894		    &cd->xl_rx_chain[i].xl_map);
1895		if (error)
1896			return(error);
1897		error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
1898		if (error)
1899			return(error);
1900		if (i == (XL_RX_LIST_CNT - 1))
1901			next = 0;
1902		else
1903			next = i + 1;
1904		nextptr = ld->xl_rx_dmaaddr +
1905		    next * sizeof(struct xl_list_onefrag);
1906		cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
1907		ld->xl_rx_list[i].xl_next = htole32(nextptr);
1908	}
1909
1910	bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1911	cd->xl_rx_head = &cd->xl_rx_chain[0];
1912
1913	return(0);
1914}
1915
1916/*
1917 * Initialize an RX descriptor and attach an MBUF cluster.
1918 * If we fail to do so, we need to leave the old mbuf and
1919 * the old DMA map untouched so that it can be reused.
1920 */
1921static int
1922xl_newbuf(sc, c)
1923	struct xl_softc		*sc;
1924	struct xl_chain_onefrag	*c;
1925{
1926	struct mbuf		*m_new = NULL;
1927	bus_dmamap_t		map;
1928	int			error;
1929	u_int32_t		baddr;
1930
1931	m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1932	if (m_new == NULL)
1933		return(ENOBUFS);
1934
1935	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1936
1937	/* Force longword alignment for packet payload. */
1938	m_adj(m_new, ETHER_ALIGN);
1939
1940	error = bus_dmamap_load_mbuf(sc->xl_mtag, sc->xl_tmpmap, m_new,
1941	    xl_dma_map_rxbuf, &baddr, BUS_DMA_NOWAIT);
1942	if (error) {
1943		m_freem(m_new);
1944		printf("xl%d: can't map mbuf (error %d)\n", sc->xl_unit, error);
1945		return(error);
1946	}
1947
1948	bus_dmamap_unload(sc->xl_mtag, c->xl_map);
1949	map = c->xl_map;
1950	c->xl_map = sc->xl_tmpmap;
1951	sc->xl_tmpmap = map;
1952	c->xl_mbuf = m_new;
1953	c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
1954	c->xl_ptr->xl_status = 0;
1955	c->xl_ptr->xl_frag.xl_addr = htole32(baddr);
1956	bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
1957	return(0);
1958}
1959
1960static int
1961xl_rx_resync(sc)
1962	struct xl_softc		*sc;
1963{
1964	struct xl_chain_onefrag	*pos;
1965	int			i;
1966
1967	pos = sc->xl_cdata.xl_rx_head;
1968
1969	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1970		if (pos->xl_ptr->xl_status)
1971			break;
1972		pos = pos->xl_next;
1973	}
1974
1975	if (i == XL_RX_LIST_CNT)
1976		return(0);
1977
1978	sc->xl_cdata.xl_rx_head = pos;
1979
1980	return(EAGAIN);
1981}
1982
1983/*
1984 * A frame has been uploaded: pass the resulting mbuf chain up to
1985 * the higher level protocols.
1986 */
1987static void
1988xl_rxeof(sc)
1989	struct xl_softc		*sc;
1990{
1991        struct mbuf		*m;
1992        struct ifnet		*ifp;
1993	struct xl_chain_onefrag	*cur_rx;
1994	int			total_len = 0;
1995	u_int32_t		rxstat;
1996
1997	XL_LOCK_ASSERT(sc);
1998
1999	ifp = &sc->arpcom.ac_if;
2000
2001again:
2002
2003	bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
2004	    BUS_DMASYNC_POSTREAD);
2005	while((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
2006		cur_rx = sc->xl_cdata.xl_rx_head;
2007		sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
2008		total_len = rxstat & XL_RXSTAT_LENMASK;
2009
2010		/*
2011		 * Since we have told the chip to allow large frames,
2012		 * we need to trap giant frame errors in software. We allow
2013		 * a little more than the normal frame size to account for
2014		 * frames with VLAN tags.
2015		 */
2016		if (total_len > XL_MAX_FRAMELEN)
2017			rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
2018
2019		/*
2020		 * If an error occurs, update stats, clear the
2021		 * status word and leave the mbuf cluster in place:
2022		 * it should simply get re-used next time this descriptor
2023	 	 * comes up in the ring.
2024		 */
2025		if (rxstat & XL_RXSTAT_UP_ERROR) {
2026			ifp->if_ierrors++;
2027			cur_rx->xl_ptr->xl_status = 0;
2028			bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2029			    sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2030			continue;
2031		}
2032
2033		/*
2034		 * If the error bit was not set, the upload complete
2035		 * bit should be set which means we have a valid packet.
2036		 * If not, something truly strange has happened.
2037		 */
2038		if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
2039			printf("xl%d: bad receive status -- "
2040			    "packet dropped\n", sc->xl_unit);
2041			ifp->if_ierrors++;
2042			cur_rx->xl_ptr->xl_status = 0;
2043			bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2044			    sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2045			continue;
2046		}
2047
2048		/* No errors; receive the packet. */
2049		bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
2050		    BUS_DMASYNC_POSTREAD);
2051		m = cur_rx->xl_mbuf;
2052
2053		/*
2054		 * Try to conjure up a new mbuf cluster. If that
2055		 * fails, it means we have an out of memory condition and
2056		 * should leave the buffer in place and continue. This will
2057		 * result in a lost packet, but there's little else we
2058		 * can do in this situation.
2059		 */
2060		if (xl_newbuf(sc, cur_rx)) {
2061			ifp->if_ierrors++;
2062			cur_rx->xl_ptr->xl_status = 0;
2063			bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2064			    sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2065			continue;
2066		}
2067		bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2068		    sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2069
2070		ifp->if_ipackets++;
2071		m->m_pkthdr.rcvif = ifp;
2072		m->m_pkthdr.len = m->m_len = total_len;
2073
2074		if (ifp->if_capenable & IFCAP_RXCSUM) {
2075			/* Do IP checksum checking. */
2076			if (rxstat & XL_RXSTAT_IPCKOK)
2077				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2078			if (!(rxstat & XL_RXSTAT_IPCKERR))
2079				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2080			if ((rxstat & XL_RXSTAT_TCPCOK &&
2081			     !(rxstat & XL_RXSTAT_TCPCKERR)) ||
2082			    (rxstat & XL_RXSTAT_UDPCKOK &&
2083			     !(rxstat & XL_RXSTAT_UDPCKERR))) {
2084				m->m_pkthdr.csum_flags |=
2085					CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2086				m->m_pkthdr.csum_data = 0xffff;
2087			}
2088		}
2089
2090		XL_UNLOCK(sc);
2091		(*ifp->if_input)(ifp, m);
2092		XL_LOCK(sc);
2093	}
2094
2095	/*
2096	 * Handle the 'end of channel' condition. When the upload
2097	 * engine hits the end of the RX ring, it will stall. This
2098	 * is our cue to flush the RX ring, reload the uplist pointer
2099	 * register and unstall the engine.
2100	 * XXX This is actually a little goofy. With the ThunderLAN
2101	 * chip, you get an interrupt when the receiver hits the end
2102	 * of the receive ring, which tells you exactly when you
2103	 * you need to reload the ring pointer. Here we have to
2104	 * fake it. I'm mad at myself for not being clever enough
2105	 * to avoid the use of a goto here.
2106	 */
2107	if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
2108		CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
2109		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2110		xl_wait(sc);
2111		CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2112		sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
2113		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2114		goto again;
2115	}
2116
2117	return;
2118}
2119
2120/*
2121 * A frame was downloaded to the chip. It's safe for us to clean up
2122 * the list buffers.
2123 */
2124static void
2125xl_txeof(sc)
2126	struct xl_softc		*sc;
2127{
2128	struct xl_chain		*cur_tx;
2129	struct ifnet		*ifp;
2130
2131	ifp = &sc->arpcom.ac_if;
2132
2133	/* Clear the timeout timer. */
2134	ifp->if_timer = 0;
2135
2136	/*
2137	 * Go through our tx list and free mbufs for those
2138	 * frames that have been uploaded. Note: the 3c905B
2139	 * sets a special bit in the status word to let us
2140	 * know that a frame has been downloaded, but the
2141	 * original 3c900/3c905 adapters don't do that.
2142	 * Consequently, we have to use a different test if
2143	 * xl_type != XL_TYPE_905B.
2144	 */
2145	while(sc->xl_cdata.xl_tx_head != NULL) {
2146		cur_tx = sc->xl_cdata.xl_tx_head;
2147
2148		if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
2149			break;
2150
2151		sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
2152		bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2153		    BUS_DMASYNC_POSTWRITE);
2154		bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2155		m_freem(cur_tx->xl_mbuf);
2156		cur_tx->xl_mbuf = NULL;
2157		ifp->if_opackets++;
2158
2159		cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
2160		sc->xl_cdata.xl_tx_free = cur_tx;
2161	}
2162
2163	if (sc->xl_cdata.xl_tx_head == NULL) {
2164		ifp->if_flags &= ~IFF_OACTIVE;
2165		sc->xl_cdata.xl_tx_tail = NULL;
2166	} else {
2167		if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
2168			!CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2169			CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2170				sc->xl_cdata.xl_tx_head->xl_phys);
2171			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2172		}
2173	}
2174
2175	return;
2176}
2177
2178static void
2179xl_txeof_90xB(sc)
2180	struct xl_softc		*sc;
2181{
2182	struct xl_chain		*cur_tx = NULL;
2183	struct ifnet		*ifp;
2184	int			idx;
2185
2186	ifp = &sc->arpcom.ac_if;
2187
2188	bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2189	    BUS_DMASYNC_POSTREAD);
2190	idx = sc->xl_cdata.xl_tx_cons;
2191	while(idx != sc->xl_cdata.xl_tx_prod) {
2192
2193		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2194
2195		if (!(le32toh(cur_tx->xl_ptr->xl_status) &
2196		      XL_TXSTAT_DL_COMPLETE))
2197			break;
2198
2199		if (cur_tx->xl_mbuf != NULL) {
2200			bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2201			    BUS_DMASYNC_POSTWRITE);
2202			bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2203			m_freem(cur_tx->xl_mbuf);
2204			cur_tx->xl_mbuf = NULL;
2205		}
2206
2207		ifp->if_opackets++;
2208
2209		sc->xl_cdata.xl_tx_cnt--;
2210		XL_INC(idx, XL_TX_LIST_CNT);
2211		ifp->if_timer = 0;
2212	}
2213
2214	sc->xl_cdata.xl_tx_cons = idx;
2215
2216	if (cur_tx != NULL)
2217		ifp->if_flags &= ~IFF_OACTIVE;
2218
2219	return;
2220}
2221
2222/*
2223 * TX 'end of channel' interrupt handler. Actually, we should
2224 * only get a 'TX complete' interrupt if there's a transmit error,
2225 * so this is really TX error handler.
2226 */
2227static void
2228xl_txeoc(sc)
2229	struct xl_softc		*sc;
2230{
2231	u_int8_t		txstat;
2232
2233	while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
2234		if (txstat & XL_TXSTATUS_UNDERRUN ||
2235			txstat & XL_TXSTATUS_JABBER ||
2236			txstat & XL_TXSTATUS_RECLAIM) {
2237			printf("xl%d: transmission error: %x\n",
2238						sc->xl_unit, txstat);
2239			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2240			xl_wait(sc);
2241			if (sc->xl_type == XL_TYPE_905B) {
2242				if (sc->xl_cdata.xl_tx_cnt) {
2243					int			i;
2244					struct xl_chain		*c;
2245					i = sc->xl_cdata.xl_tx_cons;
2246					c = &sc->xl_cdata.xl_tx_chain[i];
2247					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2248					    c->xl_phys);
2249					CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2250				}
2251			} else {
2252				if (sc->xl_cdata.xl_tx_head != NULL)
2253					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2254					    sc->xl_cdata.xl_tx_head->xl_phys);
2255			}
2256			/*
2257			 * Remember to set this for the
2258			 * first generation 3c90X chips.
2259			 */
2260			CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2261			if (txstat & XL_TXSTATUS_UNDERRUN &&
2262			    sc->xl_tx_thresh < XL_PACKET_SIZE) {
2263				sc->xl_tx_thresh += XL_MIN_FRAMELEN;
2264				printf("xl%d: tx underrun, increasing tx start"
2265				    " threshold to %d bytes\n", sc->xl_unit,
2266				    sc->xl_tx_thresh);
2267			}
2268			CSR_WRITE_2(sc, XL_COMMAND,
2269			    XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2270			if (sc->xl_type == XL_TYPE_905B) {
2271				CSR_WRITE_2(sc, XL_COMMAND,
2272				XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2273			}
2274			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2275			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2276		} else {
2277			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2278			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2279		}
2280		/*
2281		 * Write an arbitrary byte to the TX_STATUS register
2282	 	 * to clear this interrupt/error and advance to the next.
2283		 */
2284		CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
2285	}
2286
2287	return;
2288}
2289
2290static void
2291xl_intr(arg)
2292	void			*arg;
2293{
2294	struct xl_softc		*sc;
2295	struct ifnet		*ifp;
2296	u_int16_t		status;
2297
2298	sc = arg;
2299	XL_LOCK(sc);
2300	ifp = &sc->arpcom.ac_if;
2301
2302	while((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
2303
2304		CSR_WRITE_2(sc, XL_COMMAND,
2305		    XL_CMD_INTR_ACK|(status & XL_INTRS));
2306
2307		if (status & XL_STAT_UP_COMPLETE) {
2308			int			curpkts;
2309
2310			curpkts = ifp->if_ipackets;
2311			xl_rxeof(sc);
2312			if (curpkts == ifp->if_ipackets) {
2313				while (xl_rx_resync(sc))
2314					xl_rxeof(sc);
2315			}
2316		}
2317
2318		if (status & XL_STAT_DOWN_COMPLETE) {
2319			if (sc->xl_type == XL_TYPE_905B)
2320				xl_txeof_90xB(sc);
2321			else
2322				xl_txeof(sc);
2323		}
2324
2325		if (status & XL_STAT_TX_COMPLETE) {
2326			ifp->if_oerrors++;
2327			xl_txeoc(sc);
2328		}
2329
2330		if (status & XL_STAT_ADFAIL) {
2331			xl_reset(sc);
2332			xl_init(sc);
2333		}
2334
2335		if (status & XL_STAT_STATSOFLOW) {
2336			sc->xl_stats_no_timeout = 1;
2337			xl_stats_update(sc);
2338			sc->xl_stats_no_timeout = 0;
2339		}
2340	}
2341
2342	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2343		(*ifp->if_start)(ifp);
2344
2345	XL_UNLOCK(sc);
2346
2347	return;
2348}
2349
2350static void
2351xl_stats_update(xsc)
2352	void			*xsc;
2353{
2354	struct xl_softc		*sc;
2355	struct ifnet		*ifp;
2356	struct xl_stats		xl_stats;
2357	u_int8_t		*p;
2358	int			i;
2359	struct mii_data		*mii = NULL;
2360
2361	bzero((char *)&xl_stats, sizeof(struct xl_stats));
2362
2363	sc = xsc;
2364	ifp = &sc->arpcom.ac_if;
2365	if (sc->xl_miibus != NULL)
2366		mii = device_get_softc(sc->xl_miibus);
2367
2368	p = (u_int8_t *)&xl_stats;
2369
2370	/* Read all the stats registers. */
2371	XL_SEL_WIN(6);
2372
2373	for (i = 0; i < 16; i++)
2374		*p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
2375
2376	ifp->if_ierrors += xl_stats.xl_rx_overrun;
2377
2378	ifp->if_collisions += xl_stats.xl_tx_multi_collision +
2379				xl_stats.xl_tx_single_collision +
2380				xl_stats.xl_tx_late_collision;
2381
2382	/*
2383	 * Boomerang and cyclone chips have an extra stats counter
2384	 * in window 4 (BadSSD). We have to read this too in order
2385	 * to clear out all the stats registers and avoid a statsoflow
2386	 * interrupt.
2387	 */
2388	XL_SEL_WIN(4);
2389	CSR_READ_1(sc, XL_W4_BADSSD);
2390
2391	if ((mii != NULL) && (!sc->xl_stats_no_timeout))
2392		mii_tick(mii);
2393
2394	XL_SEL_WIN(7);
2395
2396	if (!sc->xl_stats_no_timeout)
2397		sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2398
2399	return;
2400}
2401
2402/*
2403 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2404 * pointers to the fragment pointers.
2405 */
2406static int
2407xl_encap(sc, c, m_head)
2408	struct xl_softc		*sc;
2409	struct xl_chain		*c;
2410	struct mbuf		*m_head;
2411{
2412	int			error;
2413	u_int32_t		status;
2414	struct ifnet		*ifp;
2415
2416	ifp = &sc->arpcom.ac_if;
2417
2418	/*
2419 	 * Start packing the mbufs in this chain into
2420	 * the fragment pointers. Stop when we run out
2421 	 * of fragments or hit the end of the mbuf chain.
2422	 */
2423	error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map, m_head,
2424	    xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
2425
2426	if (error && error != EFBIG) {
2427		m_freem(m_head);
2428		printf("xl%d: can't map mbuf (error %d)\n", sc->xl_unit, error);
2429		return(1);
2430	}
2431
2432	/*
2433	 * Handle special case: we used up all 63 fragments,
2434	 * but we have more mbufs left in the chain. Copy the
2435	 * data into an mbuf cluster. Note that we don't
2436	 * bother clearing the values in the other fragment
2437	 * pointers/counters; it wouldn't gain us anything,
2438	 * and would waste cycles.
2439	 */
2440	if (error) {
2441		struct mbuf		*m_new;
2442
2443		m_new = m_defrag(m_head, M_DONTWAIT);
2444		if (m_new == NULL) {
2445			m_freem(m_head);
2446			return(1);
2447		} else {
2448			m_head = m_new;
2449		}
2450
2451		error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map,
2452			m_head, xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
2453		if (error) {
2454			m_freem(m_head);
2455			printf("xl%d: can't map mbuf (error %d)\n",
2456			    sc->xl_unit, error);
2457			return(1);
2458		}
2459	}
2460
2461	if (sc->xl_type == XL_TYPE_905B) {
2462		status = XL_TXSTAT_RND_DEFEAT;
2463
2464#ifndef XL905B_TXCSUM_BROKEN
2465		if (m_head->m_pkthdr.csum_flags) {
2466			if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2467				status |= XL_TXSTAT_IPCKSUM;
2468			if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
2469				status |= XL_TXSTAT_TCPCKSUM;
2470			if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
2471				status |= XL_TXSTAT_UDPCKSUM;
2472		}
2473#endif
2474		c->xl_ptr->xl_status = htole32(status);
2475	}
2476
2477	c->xl_mbuf = m_head;
2478	bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
2479	return(0);
2480}
2481
2482/*
2483 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2484 * to the mbuf data regions directly in the transmit lists. We also save a
2485 * copy of the pointers since the transmit list fragment pointers are
2486 * physical addresses.
2487 */
2488static void
2489xl_start(ifp)
2490	struct ifnet		*ifp;
2491{
2492	struct xl_softc		*sc;
2493	struct mbuf		*m_head = NULL;
2494	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
2495	struct xl_chain		*prev_tx;
2496	u_int32_t		status;
2497	int			error;
2498
2499	sc = ifp->if_softc;
2500	XL_LOCK(sc);
2501	/*
2502	 * Check for an available queue slot. If there are none,
2503	 * punt.
2504	 */
2505	if (sc->xl_cdata.xl_tx_free == NULL) {
2506		xl_txeoc(sc);
2507		xl_txeof(sc);
2508		if (sc->xl_cdata.xl_tx_free == NULL) {
2509			ifp->if_flags |= IFF_OACTIVE;
2510			XL_UNLOCK(sc);
2511			return;
2512		}
2513	}
2514
2515	start_tx = sc->xl_cdata.xl_tx_free;
2516
2517	while(sc->xl_cdata.xl_tx_free != NULL) {
2518		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2519		if (m_head == NULL)
2520			break;
2521
2522		/* Pick a descriptor off the free list. */
2523		prev_tx = cur_tx;
2524		cur_tx = sc->xl_cdata.xl_tx_free;
2525
2526		/* Pack the data into the descriptor. */
2527		error = xl_encap(sc, cur_tx, m_head);
2528		if (error) {
2529			cur_tx = prev_tx;
2530			continue;
2531		}
2532
2533		sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
2534		cur_tx->xl_next = NULL;
2535
2536		/* Chain it together. */
2537		if (prev != NULL) {
2538			prev->xl_next = cur_tx;
2539			prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2540		}
2541		prev = cur_tx;
2542
2543		/*
2544		 * If there's a BPF listener, bounce a copy of this frame
2545		 * to him.
2546		 */
2547		BPF_MTAP(ifp, cur_tx->xl_mbuf);
2548	}
2549
2550	/*
2551	 * If there are no packets queued, bail.
2552	 */
2553	if (cur_tx == NULL) {
2554		XL_UNLOCK(sc);
2555		return;
2556	}
2557
2558	/*
2559	 * Place the request for the upload interrupt
2560	 * in the last descriptor in the chain. This way, if
2561	 * we're chaining several packets at once, we'll only
2562	 * get an interupt once for the whole chain rather than
2563	 * once for each packet.
2564	 */
2565	cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2566	    XL_TXSTAT_DL_INTR);
2567	bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2568	    BUS_DMASYNC_PREWRITE);
2569
2570	/*
2571	 * Queue the packets. If the TX channel is clear, update
2572	 * the downlist pointer register.
2573	 */
2574	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2575	xl_wait(sc);
2576
2577	if (sc->xl_cdata.xl_tx_head != NULL) {
2578		sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
2579		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
2580		    htole32(start_tx->xl_phys);
2581		status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status;
2582		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status =
2583		    htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR);
2584		sc->xl_cdata.xl_tx_tail = cur_tx;
2585	} else {
2586		sc->xl_cdata.xl_tx_head = start_tx;
2587		sc->xl_cdata.xl_tx_tail = cur_tx;
2588	}
2589	if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
2590		CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
2591
2592	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2593
2594	XL_SEL_WIN(7);
2595
2596	/*
2597	 * Set a timeout in case the chip goes out to lunch.
2598	 */
2599	ifp->if_timer = 5;
2600
2601	/*
2602	 * XXX Under certain conditions, usually on slower machines
2603	 * where interrupts may be dropped, it's possible for the
2604	 * adapter to chew up all the buffers in the receive ring
2605	 * and stall, without us being able to do anything about it.
2606	 * To guard against this, we need to make a pass over the
2607	 * RX queue to make sure there aren't any packets pending.
2608	 * Doing it here means we can flush the receive ring at the
2609	 * same time the chip is DMAing the transmit descriptors we
2610	 * just gave it.
2611 	 *
2612	 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
2613	 * nature of their chips in all their marketing literature;
2614	 * we may as well take advantage of it. :)
2615	 */
2616	xl_rxeof(sc);
2617
2618	XL_UNLOCK(sc);
2619
2620	return;
2621}
2622
2623static void
2624xl_start_90xB(ifp)
2625	struct ifnet		*ifp;
2626{
2627	struct xl_softc		*sc;
2628	struct mbuf		*m_head = NULL;
2629	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
2630	struct xl_chain		*prev_tx;
2631	int			error, idx;
2632
2633	sc = ifp->if_softc;
2634	XL_LOCK(sc);
2635
2636	if (ifp->if_flags & IFF_OACTIVE) {
2637		XL_UNLOCK(sc);
2638		return;
2639	}
2640
2641	idx = sc->xl_cdata.xl_tx_prod;
2642	start_tx = &sc->xl_cdata.xl_tx_chain[idx];
2643
2644	while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
2645
2646		if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
2647			ifp->if_flags |= IFF_OACTIVE;
2648			break;
2649		}
2650
2651		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2652		if (m_head == NULL)
2653			break;
2654
2655		prev_tx = cur_tx;
2656		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2657
2658		/* Pack the data into the descriptor. */
2659		error = xl_encap(sc, cur_tx, m_head);
2660		if (error) {
2661			cur_tx = prev_tx;
2662			continue;
2663		}
2664
2665		/* Chain it together. */
2666		if (prev != NULL)
2667			prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2668		prev = cur_tx;
2669
2670		/*
2671		 * If there's a BPF listener, bounce a copy of this frame
2672		 * to him.
2673		 */
2674		BPF_MTAP(ifp, cur_tx->xl_mbuf);
2675
2676		XL_INC(idx, XL_TX_LIST_CNT);
2677		sc->xl_cdata.xl_tx_cnt++;
2678	}
2679
2680	/*
2681	 * If there are no packets queued, bail.
2682	 */
2683	if (cur_tx == NULL) {
2684		XL_UNLOCK(sc);
2685		return;
2686	}
2687
2688	/*
2689	 * Place the request for the upload interrupt
2690	 * in the last descriptor in the chain. This way, if
2691	 * we're chaining several packets at once, we'll only
2692	 * get an interupt once for the whole chain rather than
2693	 * once for each packet.
2694	 */
2695	cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2696	    XL_TXSTAT_DL_INTR);
2697	bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2698	    BUS_DMASYNC_PREWRITE);
2699
2700	/* Start transmission */
2701	sc->xl_cdata.xl_tx_prod = idx;
2702	start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
2703
2704	/*
2705	 * Set a timeout in case the chip goes out to lunch.
2706	 */
2707	ifp->if_timer = 5;
2708
2709	XL_UNLOCK(sc);
2710
2711	return;
2712}
2713
2714static void
2715xl_init(xsc)
2716	void			*xsc;
2717{
2718	struct xl_softc		*sc = xsc;
2719	struct ifnet		*ifp = &sc->arpcom.ac_if;
2720	int			error, i;
2721	u_int16_t		rxfilt = 0;
2722	struct mii_data		*mii = NULL;
2723
2724	XL_LOCK(sc);
2725
2726	/*
2727	 * Cancel pending I/O and free all RX/TX buffers.
2728	 */
2729	xl_stop(sc);
2730
2731	if (sc->xl_miibus == NULL) {
2732		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2733		xl_wait(sc);
2734	}
2735	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2736	xl_wait(sc);
2737	DELAY(10000);
2738
2739	if (sc->xl_miibus != NULL)
2740		mii = device_get_softc(sc->xl_miibus);
2741
2742	/* Init our MAC address */
2743	XL_SEL_WIN(2);
2744	for (i = 0; i < ETHER_ADDR_LEN; i++) {
2745		CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
2746				sc->arpcom.ac_enaddr[i]);
2747	}
2748
2749	/* Clear the station mask. */
2750	for (i = 0; i < 3; i++)
2751		CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
2752#ifdef notdef
2753	/* Reset TX and RX. */
2754	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2755	xl_wait(sc);
2756	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2757	xl_wait(sc);
2758#endif
2759	/* Init circular RX list. */
2760	error = xl_list_rx_init(sc);
2761	if (error) {
2762		printf("xl%d: initialization of the rx ring failed (%d)\n",
2763		    sc->xl_unit, error);
2764		xl_stop(sc);
2765		XL_UNLOCK(sc);
2766		return;
2767	}
2768
2769	/* Init TX descriptors. */
2770	if (sc->xl_type == XL_TYPE_905B)
2771		error = xl_list_tx_init_90xB(sc);
2772	else
2773		error = xl_list_tx_init(sc);
2774	if (error) {
2775		printf("xl%d: initialization of the tx ring failed (%d)\n",
2776		    sc->xl_unit, error);
2777		xl_stop(sc);
2778		XL_UNLOCK(sc);
2779	}
2780
2781	/*
2782	 * Set the TX freethresh value.
2783	 * Note that this has no effect on 3c905B "cyclone"
2784	 * cards but is required for 3c900/3c905 "boomerang"
2785	 * cards in order to enable the download engine.
2786	 */
2787	CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2788
2789	/* Set the TX start threshold for best performance. */
2790	sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2791	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2792
2793	/*
2794	 * If this is a 3c905B, also set the tx reclaim threshold.
2795	 * This helps cut down on the number of tx reclaim errors
2796	 * that could happen on a busy network. The chip multiplies
2797	 * the register value by 16 to obtain the actual threshold
2798	 * in bytes, so we divide by 16 when setting the value here.
2799	 * The existing threshold value can be examined by reading
2800	 * the register at offset 9 in window 5.
2801	 */
2802	if (sc->xl_type == XL_TYPE_905B) {
2803		CSR_WRITE_2(sc, XL_COMMAND,
2804		    XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2805	}
2806
2807	/* Set RX filter bits. */
2808	XL_SEL_WIN(5);
2809	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2810
2811	/* Set the individual bit to receive frames for this host only. */
2812	rxfilt |= XL_RXFILTER_INDIVIDUAL;
2813
2814	/* If we want promiscuous mode, set the allframes bit. */
2815	if (ifp->if_flags & IFF_PROMISC) {
2816		rxfilt |= XL_RXFILTER_ALLFRAMES;
2817		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2818	} else {
2819		rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2820		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2821	}
2822
2823	/*
2824	 * Set capture broadcast bit to capture broadcast frames.
2825	 */
2826	if (ifp->if_flags & IFF_BROADCAST) {
2827		rxfilt |= XL_RXFILTER_BROADCAST;
2828		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2829	} else {
2830		rxfilt &= ~XL_RXFILTER_BROADCAST;
2831		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2832	}
2833
2834	/*
2835	 * Program the multicast filter, if necessary.
2836	 */
2837	if (sc->xl_type == XL_TYPE_905B)
2838		xl_setmulti_hash(sc);
2839	else
2840		xl_setmulti(sc);
2841
2842	/*
2843	 * Load the address of the RX list. We have to
2844	 * stall the upload engine before we can manipulate
2845	 * the uplist pointer register, then unstall it when
2846	 * we're finished. We also have to wait for the
2847	 * stall command to complete before proceeding.
2848	 * Note that we have to do this after any RX resets
2849	 * have completed since the uplist register is cleared
2850	 * by a reset.
2851	 */
2852	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2853	xl_wait(sc);
2854	CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2855	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2856	xl_wait(sc);
2857
2858
2859	if (sc->xl_type == XL_TYPE_905B) {
2860		/* Set polling interval */
2861		CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2862		/* Load the address of the TX list */
2863		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2864		xl_wait(sc);
2865		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2866		    sc->xl_cdata.xl_tx_chain[0].xl_phys);
2867		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2868		xl_wait(sc);
2869	}
2870
2871	/*
2872	 * If the coax transceiver is on, make sure to enable
2873	 * the DC-DC converter.
2874 	 */
2875	XL_SEL_WIN(3);
2876	if (sc->xl_xcvr == XL_XCVR_COAX)
2877		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2878	else
2879		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2880
2881	/*
2882	 * increase packet size to allow reception of 802.1q or ISL packets.
2883	 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
2884	 * control register. For 3c90xB/C chips, use the RX packet size
2885	 * register.
2886	 */
2887
2888	if (sc->xl_type == XL_TYPE_905B)
2889		CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
2890	else {
2891		u_int8_t macctl;
2892		macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
2893		macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
2894		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
2895	}
2896
2897	/* Clear out the stats counters. */
2898	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2899	sc->xl_stats_no_timeout = 1;
2900	xl_stats_update(sc);
2901	sc->xl_stats_no_timeout = 0;
2902	XL_SEL_WIN(4);
2903	CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2904	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2905
2906	/*
2907	 * Enable interrupts.
2908	 */
2909	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2910	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2911	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2912	if (sc->xl_flags & XL_FLAG_FUNCREG)
2913	    bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
2914
2915	/* Set the RX early threshold */
2916	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2917	CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2918
2919	/* Enable receiver and transmitter. */
2920	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2921	xl_wait(sc);
2922	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2923	xl_wait(sc);
2924
2925	if (mii != NULL)
2926		mii_mediachg(mii);
2927
2928	/* Select window 7 for normal operations. */
2929	XL_SEL_WIN(7);
2930
2931	ifp->if_flags |= IFF_RUNNING;
2932	ifp->if_flags &= ~IFF_OACTIVE;
2933
2934	sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2935
2936	XL_UNLOCK(sc);
2937
2938	return;
2939}
2940
2941/*
2942 * Set media options.
2943 */
2944static int
2945xl_ifmedia_upd(ifp)
2946	struct ifnet		*ifp;
2947{
2948	struct xl_softc		*sc;
2949	struct ifmedia		*ifm = NULL;
2950	struct mii_data		*mii = NULL;
2951
2952	sc = ifp->if_softc;
2953	if (sc->xl_miibus != NULL)
2954		mii = device_get_softc(sc->xl_miibus);
2955	if (mii == NULL)
2956		ifm = &sc->ifmedia;
2957	else
2958		ifm = &mii->mii_media;
2959
2960	switch(IFM_SUBTYPE(ifm->ifm_media)) {
2961	case IFM_100_FX:
2962	case IFM_10_FL:
2963	case IFM_10_2:
2964	case IFM_10_5:
2965		xl_setmode(sc, ifm->ifm_media);
2966		return(0);
2967		break;
2968	default:
2969		break;
2970	}
2971
2972	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2973		|| sc->xl_media & XL_MEDIAOPT_BT4) {
2974		xl_init(sc);
2975	} else {
2976		xl_setmode(sc, ifm->ifm_media);
2977	}
2978
2979	return(0);
2980}
2981
2982/*
2983 * Report current media status.
2984 */
2985static void
2986xl_ifmedia_sts(ifp, ifmr)
2987	struct ifnet		*ifp;
2988	struct ifmediareq	*ifmr;
2989{
2990	struct xl_softc		*sc;
2991	u_int32_t		icfg;
2992	u_int16_t		status = 0;
2993	struct mii_data		*mii = NULL;
2994
2995	sc = ifp->if_softc;
2996	if (sc->xl_miibus != NULL)
2997		mii = device_get_softc(sc->xl_miibus);
2998
2999	XL_SEL_WIN(4);
3000	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
3001
3002	XL_SEL_WIN(3);
3003	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
3004	icfg >>= XL_ICFG_CONNECTOR_BITS;
3005
3006	ifmr->ifm_active = IFM_ETHER;
3007	ifmr->ifm_status = IFM_AVALID;
3008
3009	if ((status & XL_MEDIASTAT_CARRIER) == 0)
3010		ifmr->ifm_status |= IFM_ACTIVE;
3011
3012	switch(icfg) {
3013	case XL_XCVR_10BT:
3014		ifmr->ifm_active = IFM_ETHER|IFM_10_T;
3015		if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3016			ifmr->ifm_active |= IFM_FDX;
3017		else
3018			ifmr->ifm_active |= IFM_HDX;
3019		break;
3020	case XL_XCVR_AUI:
3021		if (sc->xl_type == XL_TYPE_905B &&
3022		    sc->xl_media == XL_MEDIAOPT_10FL) {
3023			ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
3024			if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3025				ifmr->ifm_active |= IFM_FDX;
3026			else
3027				ifmr->ifm_active |= IFM_HDX;
3028		} else
3029			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
3030		break;
3031	case XL_XCVR_COAX:
3032		ifmr->ifm_active = IFM_ETHER|IFM_10_2;
3033		break;
3034	/*
3035	 * XXX MII and BTX/AUTO should be separate cases.
3036	 */
3037
3038	case XL_XCVR_100BTX:
3039	case XL_XCVR_AUTO:
3040	case XL_XCVR_MII:
3041		if (mii != NULL) {
3042			mii_pollstat(mii);
3043			ifmr->ifm_active = mii->mii_media_active;
3044			ifmr->ifm_status = mii->mii_media_status;
3045		}
3046		break;
3047	case XL_XCVR_100BFX:
3048		ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
3049		break;
3050	default:
3051		printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, icfg);
3052		break;
3053	}
3054
3055	return;
3056}
3057
3058static int
3059xl_ioctl(ifp, command, data)
3060	struct ifnet		*ifp;
3061	u_long			command;
3062	caddr_t			data;
3063{
3064	struct xl_softc		*sc = ifp->if_softc;
3065	struct ifreq		*ifr = (struct ifreq *) data;
3066	int			error = 0;
3067	struct mii_data		*mii = NULL;
3068	u_int8_t		rxfilt;
3069
3070	XL_LOCK(sc);
3071
3072	switch(command) {
3073	case SIOCSIFFLAGS:
3074		XL_SEL_WIN(5);
3075		rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
3076		if (ifp->if_flags & IFF_UP) {
3077			if (ifp->if_flags & IFF_RUNNING &&
3078			    ifp->if_flags & IFF_PROMISC &&
3079			    !(sc->xl_if_flags & IFF_PROMISC)) {
3080				rxfilt |= XL_RXFILTER_ALLFRAMES;
3081				CSR_WRITE_2(sc, XL_COMMAND,
3082				    XL_CMD_RX_SET_FILT|rxfilt);
3083				XL_SEL_WIN(7);
3084			} else if (ifp->if_flags & IFF_RUNNING &&
3085			    !(ifp->if_flags & IFF_PROMISC) &&
3086			    sc->xl_if_flags & IFF_PROMISC) {
3087				rxfilt &= ~XL_RXFILTER_ALLFRAMES;
3088				CSR_WRITE_2(sc, XL_COMMAND,
3089				    XL_CMD_RX_SET_FILT|rxfilt);
3090				XL_SEL_WIN(7);
3091			} else
3092				xl_init(sc);
3093		} else {
3094			if (ifp->if_flags & IFF_RUNNING)
3095				xl_stop(sc);
3096		}
3097		sc->xl_if_flags = ifp->if_flags;
3098		error = 0;
3099		break;
3100	case SIOCADDMULTI:
3101	case SIOCDELMULTI:
3102		if (sc->xl_type == XL_TYPE_905B)
3103			xl_setmulti_hash(sc);
3104		else
3105			xl_setmulti(sc);
3106		error = 0;
3107		break;
3108	case SIOCGIFMEDIA:
3109	case SIOCSIFMEDIA:
3110		if (sc->xl_miibus != NULL)
3111			mii = device_get_softc(sc->xl_miibus);
3112		if (mii == NULL)
3113			error = ifmedia_ioctl(ifp, ifr,
3114			    &sc->ifmedia, command);
3115		else
3116			error = ifmedia_ioctl(ifp, ifr,
3117			    &mii->mii_media, command);
3118		break;
3119        case SIOCSIFCAP:
3120		ifp->if_capenable = ifr->ifr_reqcap;
3121		if (ifp->if_capenable & IFCAP_TXCSUM)
3122			ifp->if_hwassist = XL905B_CSUM_FEATURES;
3123		else
3124			ifp->if_hwassist = 0;
3125		break;
3126	default:
3127		error = ether_ioctl(ifp, command, data);
3128		break;
3129	}
3130
3131	XL_UNLOCK(sc);
3132
3133	return(error);
3134}
3135
3136static void
3137xl_watchdog(ifp)
3138	struct ifnet		*ifp;
3139{
3140	struct xl_softc		*sc;
3141	u_int16_t		status = 0;
3142
3143	sc = ifp->if_softc;
3144
3145	XL_LOCK(sc);
3146
3147	ifp->if_oerrors++;
3148	XL_SEL_WIN(4);
3149	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
3150	printf("xl%d: watchdog timeout\n", sc->xl_unit);
3151
3152	if (status & XL_MEDIASTAT_CARRIER)
3153		printf("xl%d: no carrier - transceiver cable problem?\n",
3154								sc->xl_unit);
3155	xl_txeoc(sc);
3156	xl_txeof(sc);
3157	xl_rxeof(sc);
3158	xl_reset(sc);
3159	xl_init(sc);
3160
3161	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3162		(*ifp->if_start)(ifp);
3163
3164	XL_UNLOCK(sc);
3165
3166	return;
3167}
3168
3169/*
3170 * Stop the adapter and free any mbufs allocated to the
3171 * RX and TX lists.
3172 */
3173static void
3174xl_stop(sc)
3175	struct xl_softc		*sc;
3176{
3177	register int		i;
3178	struct ifnet		*ifp;
3179
3180	XL_LOCK(sc);
3181
3182	ifp = &sc->arpcom.ac_if;
3183	ifp->if_timer = 0;
3184
3185	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
3186	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
3187	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
3188	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
3189	xl_wait(sc);
3190	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
3191	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
3192	DELAY(800);
3193
3194#ifdef foo
3195	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
3196	xl_wait(sc);
3197	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
3198	xl_wait(sc);
3199#endif
3200
3201	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
3202	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
3203	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
3204	if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4 (sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
3205
3206	/* Stop the stats updater. */
3207	untimeout(xl_stats_update, sc, sc->xl_stat_ch);
3208
3209	/*
3210	 * Free data in the RX lists.
3211	 */
3212	for (i = 0; i < XL_RX_LIST_CNT; i++) {
3213		if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
3214			bus_dmamap_unload(sc->xl_mtag,
3215			    sc->xl_cdata.xl_rx_chain[i].xl_map);
3216			bus_dmamap_destroy(sc->xl_mtag,
3217			    sc->xl_cdata.xl_rx_chain[i].xl_map);
3218			m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
3219			sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
3220		}
3221	}
3222	bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
3223	/*
3224	 * Free the TX list buffers.
3225	 */
3226	for (i = 0; i < XL_TX_LIST_CNT; i++) {
3227		if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
3228			bus_dmamap_unload(sc->xl_mtag,
3229			    sc->xl_cdata.xl_tx_chain[i].xl_map);
3230			bus_dmamap_destroy(sc->xl_mtag,
3231			    sc->xl_cdata.xl_tx_chain[i].xl_map);
3232			m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
3233			sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
3234		}
3235	}
3236	bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
3237
3238	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3239
3240	XL_UNLOCK(sc);
3241
3242	return;
3243}
3244
3245/*
3246 * Stop all chip I/O so that the kernel's probe routines don't
3247 * get confused by errant DMAs when rebooting.
3248 */
3249static void
3250xl_shutdown(dev)
3251	device_t		dev;
3252{
3253	struct xl_softc		*sc;
3254
3255	sc = device_get_softc(dev);
3256
3257	XL_LOCK(sc);
3258	xl_reset(sc);
3259	xl_stop(sc);
3260	XL_UNLOCK(sc);
3261
3262	return;
3263}
3264
3265static int
3266xl_suspend(dev)
3267	device_t		dev;
3268{
3269	struct xl_softc		*sc;
3270
3271	sc = device_get_softc(dev);
3272
3273	XL_LOCK(sc);
3274	xl_stop(sc);
3275	XL_UNLOCK(sc);
3276
3277	return(0);
3278}
3279
3280static int
3281xl_resume(dev)
3282	device_t		dev;
3283{
3284	struct xl_softc		*sc;
3285	struct ifnet		*ifp;
3286
3287	sc = device_get_softc(dev);
3288	XL_LOCK(sc);
3289	ifp = &sc->arpcom.ac_if;
3290
3291	xl_reset(sc);
3292	if (ifp->if_flags & IFF_UP)
3293		xl_init(sc);
3294
3295	XL_UNLOCK(sc);
3296	return(0);
3297}
3298