if_tl.c revision 151297
1/*-
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/pci/if_tl.c 151297 2005-10-13 21:11:20Z ru $");
35
36/*
37 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
38 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
39 * the National Semiconductor DP83840A physical interface and the
40 * Microchip Technology 24Cxx series serial EEPROM.
41 *
42 * Written using the following four documents:
43 *
44 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
45 * National Semiconductor DP83840A data sheet (www.national.com)
46 * Microchip Technology 24C02C data sheet (www.microchip.com)
47 * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
48 *
49 * Written by Bill Paul <wpaul@ctr.columbia.edu>
50 * Electrical Engineering Department
51 * Columbia University, New York City
52 */
53/*
54 * Some notes about the ThunderLAN:
55 *
56 * The ThunderLAN controller is a single chip containing PCI controller
57 * logic, approximately 3K of on-board SRAM, a LAN controller, and media
58 * independent interface (MII) bus. The MII allows the ThunderLAN chip to
59 * control up to 32 different physical interfaces (PHYs). The ThunderLAN
60 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
61 * to act as a complete ethernet interface.
62 *
63 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
64 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
65 * in full or half duplex. Some of the Compaq Deskpro machines use a
66 * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
67 * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
68 * concert with the ThunderLAN's internal PHY to provide full 10/100
69 * support. This is cheaper than using a standalone external PHY for both
70 * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
71 * A serial EEPROM is also attached to the ThunderLAN chip to provide
72 * power-up default register settings and for storing the adapter's
73 * station address. Although not supported by this driver, the ThunderLAN
74 * chip can also be connected to token ring PHYs.
75 *
76 * The ThunderLAN has a set of registers which can be used to issue
77 * commands, acknowledge interrupts, and to manipulate other internal
78 * registers on its DIO bus. The primary registers can be accessed
79 * using either programmed I/O (inb/outb) or via PCI memory mapping,
80 * depending on how the card is configured during the PCI probing
81 * phase. It is even possible to have both PIO and memory mapped
82 * access turned on at the same time.
83 *
84 * Frame reception and transmission with the ThunderLAN chip is done
85 * using frame 'lists.' A list structure looks more or less like this:
86 *
87 * struct tl_frag {
88 *	u_int32_t		fragment_address;
89 *	u_int32_t		fragment_size;
90 * };
91 * struct tl_list {
92 *	u_int32_t		forward_pointer;
93 *	u_int16_t		cstat;
94 *	u_int16_t		frame_size;
95 *	struct tl_frag		fragments[10];
96 * };
97 *
98 * The forward pointer in the list header can be either a 0 or the address
99 * of another list, which allows several lists to be linked together. Each
100 * list contains up to 10 fragment descriptors. This means the chip allows
101 * ethernet frames to be broken up into up to 10 chunks for transfer to
102 * and from the SRAM. Note that the forward pointer and fragment buffer
103 * addresses are physical memory addresses, not virtual. Note also that
104 * a single ethernet frame can not span lists: if the host wants to
105 * transmit a frame and the frame data is split up over more than 10
106 * buffers, the frame has to collapsed before it can be transmitted.
107 *
108 * To receive frames, the driver sets up a number of lists and populates
109 * the fragment descriptors, then it sends an RX GO command to the chip.
110 * When a frame is received, the chip will DMA it into the memory regions
111 * specified by the fragment descriptors and then trigger an RX 'end of
112 * frame interrupt' when done. The driver may choose to use only one
113 * fragment per list; this may result is slighltly less efficient use
114 * of memory in exchange for improving performance.
115 *
116 * To transmit frames, the driver again sets up lists and fragment
117 * descriptors, only this time the buffers contain frame data that
118 * is to be DMA'ed into the chip instead of out of it. Once the chip
119 * has transfered the data into its on-board SRAM, it will trigger a
120 * TX 'end of frame' interrupt. It will also generate an 'end of channel'
121 * interrupt when it reaches the end of the list.
122 */
123/*
124 * Some notes about this driver:
125 *
126 * The ThunderLAN chip provides a couple of different ways to organize
127 * reception, transmission and interrupt handling. The simplest approach
128 * is to use one list each for transmission and reception. In this mode,
129 * the ThunderLAN will generate two interrupts for every received frame
130 * (one RX EOF and one RX EOC) and two for each transmitted frame (one
131 * TX EOF and one TX EOC). This may make the driver simpler but it hurts
132 * performance to have to handle so many interrupts.
133 *
134 * Initially I wanted to create a circular list of receive buffers so
135 * that the ThunderLAN chip would think there was an infinitely long
136 * receive channel and never deliver an RXEOC interrupt. However this
137 * doesn't work correctly under heavy load: while the manual says the
138 * chip will trigger an RXEOF interrupt each time a frame is copied into
139 * memory, you can't count on the chip waiting around for you to acknowledge
140 * the interrupt before it starts trying to DMA the next frame. The result
141 * is that the chip might traverse the entire circular list and then wrap
142 * around before you have a chance to do anything about it. Consequently,
143 * the receive list is terminated (with a 0 in the forward pointer in the
144 * last element). Each time an RXEOF interrupt arrives, the used list
145 * is shifted to the end of the list. This gives the appearance of an
146 * infinitely large RX chain so long as the driver doesn't fall behind
147 * the chip and allow all of the lists to be filled up.
148 *
149 * If all the lists are filled, the adapter will deliver an RX 'end of
150 * channel' interrupt when it hits the 0 forward pointer at the end of
151 * the chain. The RXEOC handler then cleans out the RX chain and resets
152 * the list head pointer in the ch_parm register and restarts the receiver.
153 *
154 * For frame transmission, it is possible to program the ThunderLAN's
155 * transmit interrupt threshold so that the chip can acknowledge multiple
156 * lists with only a single TX EOF interrupt. This allows the driver to
157 * queue several frames in one shot, and only have to handle a total
158 * two interrupts (one TX EOF and one TX EOC) no matter how many frames
159 * are transmitted. Frame transmission is done directly out of the
160 * mbufs passed to the tl_start() routine via the interface send queue.
161 * The driver simply sets up the fragment descriptors in the transmit
162 * lists to point to the mbuf data regions and sends a TX GO command.
163 *
164 * Note that since the RX and TX lists themselves are always used
165 * only by the driver, the are malloc()ed once at driver initialization
166 * time and never free()ed.
167 *
168 * Also, in order to remain as platform independent as possible, this
169 * driver uses memory mapped register access to manipulate the card
170 * as opposed to programmed I/O. This avoids the use of the inb/outb
171 * (and related) instructions which are specific to the i386 platform.
172 *
173 * Using these techniques, this driver achieves very high performance
174 * by minimizing the amount of interrupts generated during large
175 * transfers and by completely avoiding buffer copies. Frame transfer
176 * to and from the ThunderLAN chip is performed entirely by the chip
177 * itself thereby reducing the load on the host CPU.
178 */
179
180#include <sys/param.h>
181#include <sys/systm.h>
182#include <sys/sockio.h>
183#include <sys/mbuf.h>
184#include <sys/malloc.h>
185#include <sys/kernel.h>
186#include <sys/module.h>
187#include <sys/socket.h>
188
189#include <net/if.h>
190#include <net/if_arp.h>
191#include <net/ethernet.h>
192#include <net/if_dl.h>
193#include <net/if_media.h>
194#include <net/if_types.h>
195
196#include <net/bpf.h>
197
198#include <vm/vm.h>              /* for vtophys */
199#include <vm/pmap.h>            /* for vtophys */
200#include <machine/bus.h>
201#include <machine/resource.h>
202#include <sys/bus.h>
203#include <sys/rman.h>
204
205#include <dev/mii/mii.h>
206#include <dev/mii/miivar.h>
207
208#include <dev/pci/pcireg.h>
209#include <dev/pci/pcivar.h>
210
211/*
212 * Default to using PIO register access mode to pacify certain
213 * laptop docking stations with built-in ThunderLAN chips that
214 * don't seem to handle memory mapped mode properly.
215 */
216#define TL_USEIOSPACE
217
218#include <pci/if_tlreg.h>
219
220MODULE_DEPEND(tl, pci, 1, 1, 1);
221MODULE_DEPEND(tl, ether, 1, 1, 1);
222MODULE_DEPEND(tl, miibus, 1, 1, 1);
223
224/* "controller miibus0" required.  See GENERIC if you get errors here. */
225#include "miibus_if.h"
226
227/*
228 * Various supported device vendors/types and their names.
229 */
230
231static struct tl_type tl_devs[] = {
232	{ TI_VENDORID,	TI_DEVICEID_THUNDERLAN,
233		"Texas Instruments ThunderLAN" },
234	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
235		"Compaq Netelligent 10" },
236	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
237		"Compaq Netelligent 10/100" },
238	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
239		"Compaq Netelligent 10/100 Proliant" },
240	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
241		"Compaq Netelligent 10/100 Dual Port" },
242	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
243		"Compaq NetFlex-3/P Integrated" },
244	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
245		"Compaq NetFlex-3/P" },
246	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
247		"Compaq NetFlex 3/P w/ BNC" },
248	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED,
249		"Compaq Netelligent 10/100 TX Embedded UTP" },
250	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX,
251		"Compaq Netelligent 10 T/2 PCI UTP/Coax" },
252	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP,
253		"Compaq Netelligent 10/100 TX UTP" },
254	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2183,
255		"Olicom OC-2183/2185" },
256	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2325,
257		"Olicom OC-2325" },
258	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2326,
259		"Olicom OC-2326 10/100 TX UTP" },
260	{ 0, 0, NULL }
261};
262
263static int tl_probe(device_t);
264static int tl_attach(device_t);
265static int tl_detach(device_t);
266static int tl_intvec_rxeoc(void *, u_int32_t);
267static int tl_intvec_txeoc(void *, u_int32_t);
268static int tl_intvec_txeof(void *, u_int32_t);
269static int tl_intvec_rxeof(void *, u_int32_t);
270static int tl_intvec_adchk(void *, u_int32_t);
271static int tl_intvec_netsts(void *, u_int32_t);
272
273static int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *);
274static void tl_stats_update(void *);
275static int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *);
276
277static void tl_intr(void *);
278static void tl_start(struct ifnet *);
279static void tl_start_locked(struct ifnet *);
280static int tl_ioctl(struct ifnet *, u_long, caddr_t);
281static void tl_init(void *);
282static void tl_init_locked(struct tl_softc *);
283static void tl_stop(struct tl_softc *);
284static void tl_watchdog(struct ifnet *);
285static void tl_shutdown(device_t);
286static int tl_ifmedia_upd(struct ifnet *);
287static void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
288
289static u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
290static u_int8_t	tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *);
291static int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
292
293static void tl_mii_sync(struct tl_softc *);
294static void tl_mii_send(struct tl_softc *, u_int32_t, int);
295static int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *);
296static int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *);
297static int tl_miibus_readreg(device_t, int, int);
298static int tl_miibus_writereg(device_t, int, int, int);
299static void tl_miibus_statchg(device_t);
300
301static void tl_setmode(struct tl_softc *, int);
302static uint32_t tl_mchash(const uint8_t *);
303static void tl_setmulti(struct tl_softc *);
304static void tl_setfilt(struct tl_softc *, caddr_t, int);
305static void tl_softreset(struct tl_softc *, int);
306static void tl_hardreset(device_t);
307static int tl_list_rx_init(struct tl_softc *);
308static int tl_list_tx_init(struct tl_softc *);
309
310static u_int8_t tl_dio_read8(struct tl_softc *, int);
311static u_int16_t tl_dio_read16(struct tl_softc *, int);
312static u_int32_t tl_dio_read32(struct tl_softc *, int);
313static void tl_dio_write8(struct tl_softc *, int, int);
314static void tl_dio_write16(struct tl_softc *, int, int);
315static void tl_dio_write32(struct tl_softc *, int, int);
316static void tl_dio_setbit(struct tl_softc *, int, int);
317static void tl_dio_clrbit(struct tl_softc *, int, int);
318static void tl_dio_setbit16(struct tl_softc *, int, int);
319static void tl_dio_clrbit16(struct tl_softc *, int, int);
320
321#ifdef TL_USEIOSPACE
322#define TL_RES		SYS_RES_IOPORT
323#define TL_RID		TL_PCI_LOIO
324#else
325#define TL_RES		SYS_RES_MEMORY
326#define TL_RID		TL_PCI_LOMEM
327#endif
328
329static device_method_t tl_methods[] = {
330	/* Device interface */
331	DEVMETHOD(device_probe,		tl_probe),
332	DEVMETHOD(device_attach,	tl_attach),
333	DEVMETHOD(device_detach,	tl_detach),
334	DEVMETHOD(device_shutdown,	tl_shutdown),
335
336	/* bus interface */
337	DEVMETHOD(bus_print_child,	bus_generic_print_child),
338	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
339
340	/* MII interface */
341	DEVMETHOD(miibus_readreg,	tl_miibus_readreg),
342	DEVMETHOD(miibus_writereg,	tl_miibus_writereg),
343	DEVMETHOD(miibus_statchg,	tl_miibus_statchg),
344
345	{ 0, 0 }
346};
347
348static driver_t tl_driver = {
349	"tl",
350	tl_methods,
351	sizeof(struct tl_softc)
352};
353
354static devclass_t tl_devclass;
355
356DRIVER_MODULE(tl, pci, tl_driver, tl_devclass, 0, 0);
357DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0);
358
359static u_int8_t tl_dio_read8(sc, reg)
360	struct tl_softc		*sc;
361	int			reg;
362{
363	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
364	return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
365}
366
367static u_int16_t tl_dio_read16(sc, reg)
368	struct tl_softc		*sc;
369	int			reg;
370{
371	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
372	return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
373}
374
375static u_int32_t tl_dio_read32(sc, reg)
376	struct tl_softc		*sc;
377	int			reg;
378{
379	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
380	return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
381}
382
383static void tl_dio_write8(sc, reg, val)
384	struct tl_softc		*sc;
385	int			reg;
386	int			val;
387{
388	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
389	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
390	return;
391}
392
393static void tl_dio_write16(sc, reg, val)
394	struct tl_softc		*sc;
395	int			reg;
396	int			val;
397{
398	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
399	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
400	return;
401}
402
403static void tl_dio_write32(sc, reg, val)
404	struct tl_softc		*sc;
405	int			reg;
406	int			val;
407{
408	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
409	CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
410	return;
411}
412
413static void
414tl_dio_setbit(sc, reg, bit)
415	struct tl_softc		*sc;
416	int			reg;
417	int			bit;
418{
419	u_int8_t			f;
420
421	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
422	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
423	f |= bit;
424	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
425
426	return;
427}
428
429static void
430tl_dio_clrbit(sc, reg, bit)
431	struct tl_softc		*sc;
432	int			reg;
433	int			bit;
434{
435	u_int8_t			f;
436
437	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
438	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
439	f &= ~bit;
440	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
441
442	return;
443}
444
445static void tl_dio_setbit16(sc, reg, bit)
446	struct tl_softc		*sc;
447	int			reg;
448	int			bit;
449{
450	u_int16_t			f;
451
452	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
453	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
454	f |= bit;
455	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
456
457	return;
458}
459
460static void tl_dio_clrbit16(sc, reg, bit)
461	struct tl_softc		*sc;
462	int			reg;
463	int			bit;
464{
465	u_int16_t			f;
466
467	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
468	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
469	f &= ~bit;
470	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
471
472	return;
473}
474
475/*
476 * Send an instruction or address to the EEPROM, check for ACK.
477 */
478static u_int8_t tl_eeprom_putbyte(sc, byte)
479	struct tl_softc		*sc;
480	int			byte;
481{
482	register int		i, ack = 0;
483
484	/*
485	 * Make sure we're in TX mode.
486	 */
487	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
488
489	/*
490	 * Feed in each bit and stobe the clock.
491	 */
492	for (i = 0x80; i; i >>= 1) {
493		if (byte & i) {
494			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
495		} else {
496			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
497		}
498		DELAY(1);
499		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
500		DELAY(1);
501		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
502	}
503
504	/*
505	 * Turn off TX mode.
506	 */
507	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
508
509	/*
510	 * Check for ack.
511	 */
512	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
513	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
514	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
515
516	return(ack);
517}
518
519/*
520 * Read a byte of data stored in the EEPROM at address 'addr.'
521 */
522static u_int8_t tl_eeprom_getbyte(sc, addr, dest)
523	struct tl_softc		*sc;
524	int			addr;
525	u_int8_t		*dest;
526{
527	register int		i;
528	u_int8_t		byte = 0;
529	struct ifnet		*ifp = sc->tl_ifp;
530
531	tl_dio_write8(sc, TL_NETSIO, 0);
532
533	EEPROM_START;
534
535	/*
536	 * Send write control code to EEPROM.
537	 */
538	if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
539		if_printf(ifp, "failed to send write command, status: %x\n",
540		    tl_dio_read8(sc, TL_NETSIO));
541		return(1);
542	}
543
544	/*
545	 * Send address of byte we want to read.
546	 */
547	if (tl_eeprom_putbyte(sc, addr)) {
548		if_printf(ifp, "failed to send address, status: %x\n",
549		    tl_dio_read8(sc, TL_NETSIO));
550		return(1);
551	}
552
553	EEPROM_STOP;
554	EEPROM_START;
555	/*
556	 * Send read control code to EEPROM.
557	 */
558	if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
559		if_printf(ifp, "failed to send write command, status: %x\n",
560		    tl_dio_read8(sc, TL_NETSIO));
561		return(1);
562	}
563
564	/*
565	 * Start reading bits from EEPROM.
566	 */
567	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
568	for (i = 0x80; i; i >>= 1) {
569		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
570		DELAY(1);
571		if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
572			byte |= i;
573		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
574		DELAY(1);
575	}
576
577	EEPROM_STOP;
578
579	/*
580	 * No ACK generated for read, so just return byte.
581	 */
582
583	*dest = byte;
584
585	return(0);
586}
587
588/*
589 * Read a sequence of bytes from the EEPROM.
590 */
591static int
592tl_read_eeprom(sc, dest, off, cnt)
593	struct tl_softc		*sc;
594	caddr_t			dest;
595	int			off;
596	int			cnt;
597{
598	int			err = 0, i;
599	u_int8_t		byte = 0;
600
601	for (i = 0; i < cnt; i++) {
602		err = tl_eeprom_getbyte(sc, off + i, &byte);
603		if (err)
604			break;
605		*(dest + i) = byte;
606	}
607
608	return(err ? 1 : 0);
609}
610
611static void
612tl_mii_sync(sc)
613	struct tl_softc		*sc;
614{
615	register int		i;
616
617	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
618
619	for (i = 0; i < 32; i++) {
620		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
621		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
622	}
623
624	return;
625}
626
627static void
628tl_mii_send(sc, bits, cnt)
629	struct tl_softc		*sc;
630	u_int32_t		bits;
631	int			cnt;
632{
633	int			i;
634
635	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
636		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
637		if (bits & i) {
638			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
639		} else {
640			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
641		}
642		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
643	}
644}
645
646static int
647tl_mii_readreg(sc, frame)
648	struct tl_softc		*sc;
649	struct tl_mii_frame	*frame;
650
651{
652	int			i, ack;
653	int			minten = 0;
654
655	tl_mii_sync(sc);
656
657	/*
658	 * Set up frame for RX.
659	 */
660	frame->mii_stdelim = TL_MII_STARTDELIM;
661	frame->mii_opcode = TL_MII_READOP;
662	frame->mii_turnaround = 0;
663	frame->mii_data = 0;
664
665	/*
666	 * Turn off MII interrupt by forcing MINTEN low.
667	 */
668	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
669	if (minten) {
670		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
671	}
672
673	/*
674 	 * Turn on data xmit.
675	 */
676	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
677
678	/*
679	 * Send command/address info.
680	 */
681	tl_mii_send(sc, frame->mii_stdelim, 2);
682	tl_mii_send(sc, frame->mii_opcode, 2);
683	tl_mii_send(sc, frame->mii_phyaddr, 5);
684	tl_mii_send(sc, frame->mii_regaddr, 5);
685
686	/*
687	 * Turn off xmit.
688	 */
689	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
690
691	/* Idle bit */
692	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
693	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
694
695	/* Check for ack */
696	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
697	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
698
699	/* Complete the cycle */
700	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
701
702	/*
703	 * Now try reading data bits. If the ack failed, we still
704	 * need to clock through 16 cycles to keep the PHYs in sync.
705	 */
706	if (ack) {
707		for(i = 0; i < 16; i++) {
708			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
709			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
710		}
711		goto fail;
712	}
713
714	for (i = 0x8000; i; i >>= 1) {
715		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
716		if (!ack) {
717			if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
718				frame->mii_data |= i;
719		}
720		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
721	}
722
723fail:
724
725	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
726	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
727
728	/* Reenable interrupts */
729	if (minten) {
730		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
731	}
732
733	if (ack)
734		return(1);
735	return(0);
736}
737
738static int
739tl_mii_writereg(sc, frame)
740	struct tl_softc		*sc;
741	struct tl_mii_frame	*frame;
742
743{
744	int			minten;
745
746	tl_mii_sync(sc);
747
748	/*
749	 * Set up frame for TX.
750	 */
751
752	frame->mii_stdelim = TL_MII_STARTDELIM;
753	frame->mii_opcode = TL_MII_WRITEOP;
754	frame->mii_turnaround = TL_MII_TURNAROUND;
755
756	/*
757	 * Turn off MII interrupt by forcing MINTEN low.
758	 */
759	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
760	if (minten) {
761		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
762	}
763
764	/*
765 	 * Turn on data output.
766	 */
767	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
768
769	tl_mii_send(sc, frame->mii_stdelim, 2);
770	tl_mii_send(sc, frame->mii_opcode, 2);
771	tl_mii_send(sc, frame->mii_phyaddr, 5);
772	tl_mii_send(sc, frame->mii_regaddr, 5);
773	tl_mii_send(sc, frame->mii_turnaround, 2);
774	tl_mii_send(sc, frame->mii_data, 16);
775
776	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
777	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
778
779	/*
780	 * Turn off xmit.
781	 */
782	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
783
784	/* Reenable interrupts */
785	if (minten)
786		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
787
788	return(0);
789}
790
791static int
792tl_miibus_readreg(dev, phy, reg)
793	device_t		dev;
794	int			phy, reg;
795{
796	struct tl_softc		*sc;
797	struct tl_mii_frame	frame;
798
799	sc = device_get_softc(dev);
800	bzero((char *)&frame, sizeof(frame));
801
802	frame.mii_phyaddr = phy;
803	frame.mii_regaddr = reg;
804	tl_mii_readreg(sc, &frame);
805
806	return(frame.mii_data);
807}
808
809static int
810tl_miibus_writereg(dev, phy, reg, data)
811	device_t		dev;
812	int			phy, reg, data;
813{
814	struct tl_softc		*sc;
815	struct tl_mii_frame	frame;
816
817	sc = device_get_softc(dev);
818	bzero((char *)&frame, sizeof(frame));
819
820	frame.mii_phyaddr = phy;
821	frame.mii_regaddr = reg;
822	frame.mii_data = data;
823
824	tl_mii_writereg(sc, &frame);
825
826	return(0);
827}
828
829static void
830tl_miibus_statchg(dev)
831	device_t		dev;
832{
833	struct tl_softc		*sc;
834	struct mii_data		*mii;
835
836	sc = device_get_softc(dev);
837	mii = device_get_softc(sc->tl_miibus);
838
839	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
840		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
841	} else {
842		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
843	}
844
845	return;
846}
847
848/*
849 * Set modes for bitrate devices.
850 */
851static void
852tl_setmode(sc, media)
853	struct tl_softc		*sc;
854	int			media;
855{
856	if (IFM_SUBTYPE(media) == IFM_10_5)
857		tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
858	if (IFM_SUBTYPE(media) == IFM_10_T) {
859		tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
860		if ((media & IFM_GMASK) == IFM_FDX) {
861			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
862			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
863		} else {
864			tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
865			tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
866		}
867	}
868
869	return;
870}
871
872/*
873 * Calculate the hash of a MAC address for programming the multicast hash
874 * table.  This hash is simply the address split into 6-bit chunks
875 * XOR'd, e.g.
876 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
877 * bit:  765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
878 * Bytes 0-2 and 3-5 are symmetrical, so are folded together.  Then
879 * the folded 24-bit value is split into 6-bit portions and XOR'd.
880 */
881static uint32_t
882tl_mchash(addr)
883	const uint8_t *addr;
884{
885	int t;
886
887	t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
888		(addr[2] ^ addr[5]);
889	return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
890}
891
892/*
893 * The ThunderLAN has a perfect MAC address filter in addition to
894 * the multicast hash filter. The perfect filter can be programmed
895 * with up to four MAC addresses. The first one is always used to
896 * hold the station address, which leaves us free to use the other
897 * three for multicast addresses.
898 */
899static void
900tl_setfilt(sc, addr, slot)
901	struct tl_softc		*sc;
902	caddr_t			addr;
903	int			slot;
904{
905	int			i;
906	u_int16_t		regaddr;
907
908	regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
909
910	for (i = 0; i < ETHER_ADDR_LEN; i++)
911		tl_dio_write8(sc, regaddr + i, *(addr + i));
912
913	return;
914}
915
916/*
917 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
918 * linked list. This is fine, except addresses are added from the head
919 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
920 * group to always be in the perfect filter, but as more groups are added,
921 * the 224.0.0.1 entry (which is always added first) gets pushed down
922 * the list and ends up at the tail. So after 3 or 4 multicast groups
923 * are added, the all-hosts entry gets pushed out of the perfect filter
924 * and into the hash table.
925 *
926 * Because the multicast list is a doubly-linked list as opposed to a
927 * circular queue, we don't have the ability to just grab the tail of
928 * the list and traverse it backwards. Instead, we have to traverse
929 * the list once to find the tail, then traverse it again backwards to
930 * update the multicast filter.
931 */
932static void
933tl_setmulti(sc)
934	struct tl_softc		*sc;
935{
936	struct ifnet		*ifp;
937	u_int32_t		hashes[2] = { 0, 0 };
938	int			h, i;
939	struct ifmultiaddr	*ifma;
940	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
941	ifp = sc->tl_ifp;
942
943	/* First, zot all the existing filters. */
944	for (i = 1; i < 4; i++)
945		tl_setfilt(sc, (caddr_t)&dummy, i);
946	tl_dio_write32(sc, TL_HASH1, 0);
947	tl_dio_write32(sc, TL_HASH2, 0);
948
949	/* Now program new ones. */
950	if (ifp->if_flags & IFF_ALLMULTI) {
951		hashes[0] = 0xFFFFFFFF;
952		hashes[1] = 0xFFFFFFFF;
953	} else {
954		i = 1;
955		IF_ADDR_LOCK(ifp);
956		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
957			if (ifma->ifma_addr->sa_family != AF_LINK)
958				continue;
959			/*
960			 * Program the first three multicast groups
961			 * into the perfect filter. For all others,
962			 * use the hash table.
963			 */
964			if (i < 4) {
965				tl_setfilt(sc,
966			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
967				i++;
968				continue;
969			}
970
971			h = tl_mchash(
972				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
973			if (h < 32)
974				hashes[0] |= (1 << h);
975			else
976				hashes[1] |= (1 << (h - 32));
977		}
978		IF_ADDR_UNLOCK(ifp);
979	}
980
981	tl_dio_write32(sc, TL_HASH1, hashes[0]);
982	tl_dio_write32(sc, TL_HASH2, hashes[1]);
983
984	return;
985}
986
987/*
988 * This routine is recommended by the ThunderLAN manual to insure that
989 * the internal PHY is powered up correctly. It also recommends a one
990 * second pause at the end to 'wait for the clocks to start' but in my
991 * experience this isn't necessary.
992 */
993static void
994tl_hardreset(dev)
995	device_t		dev;
996{
997	struct tl_softc		*sc;
998	int			i;
999	u_int16_t		flags;
1000
1001	sc = device_get_softc(dev);
1002
1003	tl_mii_sync(sc);
1004
1005	flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
1006
1007	for (i = 0; i < MII_NPHY; i++)
1008		tl_miibus_writereg(dev, i, MII_BMCR, flags);
1009
1010	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
1011	DELAY(50000);
1012	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
1013	tl_mii_sync(sc);
1014	while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
1015
1016	DELAY(50000);
1017	return;
1018}
1019
1020static void
1021tl_softreset(sc, internal)
1022	struct tl_softc		*sc;
1023	int			internal;
1024{
1025        u_int32_t               cmd, dummy, i;
1026
1027        /* Assert the adapter reset bit. */
1028	CMD_SET(sc, TL_CMD_ADRST);
1029
1030        /* Turn off interrupts */
1031	CMD_SET(sc, TL_CMD_INTSOFF);
1032
1033	/* First, clear the stats registers. */
1034	for (i = 0; i < 5; i++)
1035		dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
1036
1037        /* Clear Areg and Hash registers */
1038	for (i = 0; i < 8; i++)
1039		tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
1040
1041        /*
1042	 * Set up Netconfig register. Enable one channel and
1043	 * one fragment mode.
1044	 */
1045	tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
1046	if (internal && !sc->tl_bitrate) {
1047		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1048	} else {
1049		tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1050	}
1051
1052	/* Handle cards with bitrate devices. */
1053	if (sc->tl_bitrate)
1054		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
1055
1056	/*
1057	 * Load adapter irq pacing timer and tx threshold.
1058	 * We make the transmit threshold 1 initially but we may
1059	 * change that later.
1060	 */
1061	cmd = CSR_READ_4(sc, TL_HOSTCMD);
1062	cmd |= TL_CMD_NES;
1063	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
1064	CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
1065	CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
1066
1067        /* Unreset the MII */
1068	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
1069
1070	/* Take the adapter out of reset */
1071	tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
1072
1073	/* Wait for things to settle down a little. */
1074	DELAY(500);
1075
1076        return;
1077}
1078
1079/*
1080 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
1081 * against our list and return its name if we find a match.
1082 */
1083static int
1084tl_probe(dev)
1085	device_t		dev;
1086{
1087	struct tl_type		*t;
1088
1089	t = tl_devs;
1090
1091	while(t->tl_name != NULL) {
1092		if ((pci_get_vendor(dev) == t->tl_vid) &&
1093		    (pci_get_device(dev) == t->tl_did)) {
1094			device_set_desc(dev, t->tl_name);
1095			return (BUS_PROBE_DEFAULT);
1096		}
1097		t++;
1098	}
1099
1100	return(ENXIO);
1101}
1102
1103static int
1104tl_attach(dev)
1105	device_t		dev;
1106{
1107	int			i;
1108	u_int16_t		did, vid;
1109	struct tl_type		*t;
1110	struct ifnet		*ifp;
1111	struct tl_softc		*sc;
1112	int			unit, error = 0, rid;
1113	u_char			eaddr[6];
1114
1115	vid = pci_get_vendor(dev);
1116	did = pci_get_device(dev);
1117	sc = device_get_softc(dev);
1118	unit = device_get_unit(dev);
1119
1120	t = tl_devs;
1121	while(t->tl_name != NULL) {
1122		if (vid == t->tl_vid && did == t->tl_did)
1123			break;
1124		t++;
1125	}
1126
1127	if (t->tl_name == NULL) {
1128		device_printf(dev, "unknown device!?\n");
1129		return (ENXIO);
1130	}
1131
1132	mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1133	    MTX_DEF);
1134
1135	/*
1136	 * Map control/status registers.
1137	 */
1138	pci_enable_busmaster(dev);
1139
1140#ifdef TL_USEIOSPACE
1141
1142	rid = TL_PCI_LOIO;
1143	sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1144		RF_ACTIVE);
1145
1146	/*
1147	 * Some cards have the I/O and memory mapped address registers
1148	 * reversed. Try both combinations before giving up.
1149	 */
1150	if (sc->tl_res == NULL) {
1151		rid = TL_PCI_LOMEM;
1152		sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1153		    RF_ACTIVE);
1154	}
1155#else
1156	rid = TL_PCI_LOMEM;
1157	sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1158	    RF_ACTIVE);
1159	if (sc->tl_res == NULL) {
1160		rid = TL_PCI_LOIO;
1161		sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1162		    RF_ACTIVE);
1163	}
1164#endif
1165
1166	if (sc->tl_res == NULL) {
1167		device_printf(dev, "couldn't map ports/memory\n");
1168		error = ENXIO;
1169		goto fail;
1170	}
1171
1172	sc->tl_btag = rman_get_bustag(sc->tl_res);
1173	sc->tl_bhandle = rman_get_bushandle(sc->tl_res);
1174
1175#ifdef notdef
1176	/*
1177	 * The ThunderLAN manual suggests jacking the PCI latency
1178	 * timer all the way up to its maximum value. I'm not sure
1179	 * if this is really necessary, but what the manual wants,
1180	 * the manual gets.
1181	 */
1182	command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4);
1183	command |= 0x0000FF00;
1184	pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4);
1185#endif
1186
1187	/* Allocate interrupt */
1188	rid = 0;
1189	sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1190	    RF_SHAREABLE | RF_ACTIVE);
1191
1192	if (sc->tl_irq == NULL) {
1193		device_printf(dev, "couldn't map interrupt\n");
1194		error = ENXIO;
1195		goto fail;
1196	}
1197
1198	/*
1199	 * Now allocate memory for the TX and RX lists.
1200	 */
1201	sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF,
1202	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1203
1204	if (sc->tl_ldata == NULL) {
1205		device_printf(dev, "no memory for list buffers!\n");
1206		error = ENXIO;
1207		goto fail;
1208	}
1209
1210	bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1211
1212	sc->tl_dinfo = t;
1213	if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID)
1214		sc->tl_eeaddr = TL_EEPROM_EADDR;
1215	if (t->tl_vid == OLICOM_VENDORID)
1216		sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1217
1218	/* Reset the adapter. */
1219	tl_softreset(sc, 1);
1220	tl_hardreset(dev);
1221	tl_softreset(sc, 1);
1222
1223	/*
1224	 * Get station address from the EEPROM.
1225	 */
1226	if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1227		device_printf(dev, "failed to read station address\n");
1228		error = ENXIO;
1229		goto fail;
1230	}
1231
1232        /*
1233         * XXX Olicom, in its desire to be different from the
1234         * rest of the world, has done strange things with the
1235         * encoding of the station address in the EEPROM. First
1236         * of all, they store the address at offset 0xF8 rather
1237         * than at 0x83 like the ThunderLAN manual suggests.
1238         * Second, they store the address in three 16-bit words in
1239         * network byte order, as opposed to storing it sequentially
1240         * like all the other ThunderLAN cards. In order to get
1241         * the station address in a form that matches what the Olicom
1242         * diagnostic utility specifies, we have to byte-swap each
1243         * word. To make things even more confusing, neither 00:00:28
1244         * nor 00:00:24 appear in the IEEE OUI database.
1245         */
1246        if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) {
1247                for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1248                        u_int16_t               *p;
1249                        p = (u_int16_t *)&eaddr[i];
1250                        *p = ntohs(*p);
1251                }
1252        }
1253
1254	ifp = sc->tl_ifp = if_alloc(IFT_ETHER);
1255	if (ifp == NULL) {
1256		device_printf(dev, "can not if_alloc()\n");
1257		error = ENOSPC;
1258		goto fail;
1259	}
1260	ifp->if_softc = sc;
1261	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1262	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1263	ifp->if_ioctl = tl_ioctl;
1264	ifp->if_start = tl_start;
1265	ifp->if_watchdog = tl_watchdog;
1266	ifp->if_init = tl_init;
1267	ifp->if_mtu = ETHERMTU;
1268	ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1;
1269	callout_init_mtx(&sc->tl_stat_callout, &sc->tl_mtx, 0);
1270
1271	/* Reset the adapter again. */
1272	tl_softreset(sc, 1);
1273	tl_hardreset(dev);
1274	tl_softreset(sc, 1);
1275
1276	/*
1277	 * Do MII setup. If no PHYs are found, then this is a
1278	 * bitrate ThunderLAN chip that only supports 10baseT
1279	 * and AUI/BNC.
1280	 */
1281	if (mii_phy_probe(dev, &sc->tl_miibus,
1282	    tl_ifmedia_upd, tl_ifmedia_sts)) {
1283		struct ifmedia		*ifm;
1284		sc->tl_bitrate = 1;
1285		ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1286		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1287		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1288		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1289		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1290		ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
1291		/* Reset again, this time setting bitrate mode. */
1292		tl_softreset(sc, 1);
1293		ifm = &sc->ifmedia;
1294		ifm->ifm_media = ifm->ifm_cur->ifm_media;
1295		tl_ifmedia_upd(ifp);
1296	}
1297
1298	/*
1299	 * Call MI attach routine.
1300	 */
1301	ether_ifattach(ifp, eaddr);
1302
1303	/* Hook interrupt last to avoid having to lock softc */
1304	error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET | INTR_MPSAFE,
1305	    tl_intr, sc, &sc->tl_intrhand);
1306
1307	if (error) {
1308		device_printf(dev, "couldn't set up irq\n");
1309		ether_ifdetach(ifp);
1310		goto fail;
1311	}
1312
1313fail:
1314	if (error)
1315		tl_detach(dev);
1316
1317	return(error);
1318}
1319
1320/*
1321 * Shutdown hardware and free up resources. This can be called any
1322 * time after the mutex has been initialized. It is called in both
1323 * the error case in attach and the normal detach case so it needs
1324 * to be careful about only freeing resources that have actually been
1325 * allocated.
1326 */
1327static int
1328tl_detach(dev)
1329	device_t		dev;
1330{
1331	struct tl_softc		*sc;
1332	struct ifnet		*ifp;
1333
1334	sc = device_get_softc(dev);
1335	KASSERT(mtx_initialized(&sc->tl_mtx), ("tl mutex not initialized"));
1336	ifp = sc->tl_ifp;
1337
1338	/* These should only be active if attach succeeded */
1339	if (device_is_attached(dev)) {
1340		TL_LOCK(sc);
1341		tl_stop(sc);
1342		TL_UNLOCK(sc);
1343		callout_drain(&sc->tl_stat_callout);
1344		ether_ifdetach(ifp);
1345	}
1346	if (sc->tl_miibus)
1347		device_delete_child(dev, sc->tl_miibus);
1348	bus_generic_detach(dev);
1349
1350	if (sc->tl_ldata)
1351		contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF);
1352	if (sc->tl_bitrate)
1353		ifmedia_removeall(&sc->ifmedia);
1354
1355	if (sc->tl_intrhand)
1356		bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1357	if (sc->tl_irq)
1358		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1359	if (sc->tl_res)
1360		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1361
1362	if (ifp)
1363		if_free(ifp);
1364
1365	mtx_destroy(&sc->tl_mtx);
1366
1367	return(0);
1368}
1369
1370/*
1371 * Initialize the transmit lists.
1372 */
1373static int
1374tl_list_tx_init(sc)
1375	struct tl_softc		*sc;
1376{
1377	struct tl_chain_data	*cd;
1378	struct tl_list_data	*ld;
1379	int			i;
1380
1381	cd = &sc->tl_cdata;
1382	ld = sc->tl_ldata;
1383	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1384		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1385		if (i == (TL_TX_LIST_CNT - 1))
1386			cd->tl_tx_chain[i].tl_next = NULL;
1387		else
1388			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1389	}
1390
1391	cd->tl_tx_free = &cd->tl_tx_chain[0];
1392	cd->tl_tx_tail = cd->tl_tx_head = NULL;
1393	sc->tl_txeoc = 1;
1394
1395	return(0);
1396}
1397
1398/*
1399 * Initialize the RX lists and allocate mbufs for them.
1400 */
1401static int
1402tl_list_rx_init(sc)
1403	struct tl_softc		*sc;
1404{
1405	struct tl_chain_data	*cd;
1406	struct tl_list_data	*ld;
1407	int			i;
1408
1409	cd = &sc->tl_cdata;
1410	ld = sc->tl_ldata;
1411
1412	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1413		cd->tl_rx_chain[i].tl_ptr =
1414			(struct tl_list_onefrag *)&ld->tl_rx_list[i];
1415		if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1416			return(ENOBUFS);
1417		if (i == (TL_RX_LIST_CNT - 1)) {
1418			cd->tl_rx_chain[i].tl_next = NULL;
1419			ld->tl_rx_list[i].tlist_fptr = 0;
1420		} else {
1421			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1422			ld->tl_rx_list[i].tlist_fptr =
1423					vtophys(&ld->tl_rx_list[i + 1]);
1424		}
1425	}
1426
1427	cd->tl_rx_head = &cd->tl_rx_chain[0];
1428	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1429
1430	return(0);
1431}
1432
1433static int
1434tl_newbuf(sc, c)
1435	struct tl_softc		*sc;
1436	struct tl_chain_onefrag	*c;
1437{
1438	struct mbuf		*m_new = NULL;
1439
1440	m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1441	if (m_new == NULL)
1442		return(ENOBUFS);
1443
1444#ifdef __alpha__
1445	m_new->m_data += 2;
1446#endif
1447
1448	c->tl_mbuf = m_new;
1449	c->tl_next = NULL;
1450	c->tl_ptr->tlist_frsize = MCLBYTES;
1451	c->tl_ptr->tlist_fptr = 0;
1452	c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1453	c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1454	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1455
1456	return(0);
1457}
1458/*
1459 * Interrupt handler for RX 'end of frame' condition (EOF). This
1460 * tells us that a full ethernet frame has been captured and we need
1461 * to handle it.
1462 *
1463 * Reception is done using 'lists' which consist of a header and a
1464 * series of 10 data count/data address pairs that point to buffers.
1465 * Initially you're supposed to create a list, populate it with pointers
1466 * to buffers, then load the physical address of the list into the
1467 * ch_parm register. The adapter is then supposed to DMA the received
1468 * frame into the buffers for you.
1469 *
1470 * To make things as fast as possible, we have the chip DMA directly
1471 * into mbufs. This saves us from having to do a buffer copy: we can
1472 * just hand the mbufs directly to ether_input(). Once the frame has
1473 * been sent on its way, the 'list' structure is assigned a new buffer
1474 * and moved to the end of the RX chain. As long we we stay ahead of
1475 * the chip, it will always think it has an endless receive channel.
1476 *
1477 * If we happen to fall behind and the chip manages to fill up all of
1478 * the buffers, it will generate an end of channel interrupt and wait
1479 * for us to empty the chain and restart the receiver.
1480 */
1481static int
1482tl_intvec_rxeof(xsc, type)
1483	void			*xsc;
1484	u_int32_t		type;
1485{
1486	struct tl_softc		*sc;
1487	int			r = 0, total_len = 0;
1488	struct ether_header	*eh;
1489	struct mbuf		*m;
1490	struct ifnet		*ifp;
1491	struct tl_chain_onefrag	*cur_rx;
1492
1493	sc = xsc;
1494	ifp = sc->tl_ifp;
1495
1496	TL_LOCK_ASSERT(sc);
1497
1498	while(sc->tl_cdata.tl_rx_head != NULL) {
1499		cur_rx = sc->tl_cdata.tl_rx_head;
1500		if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1501			break;
1502		r++;
1503		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1504		m = cur_rx->tl_mbuf;
1505		total_len = cur_rx->tl_ptr->tlist_frsize;
1506
1507		if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1508			ifp->if_ierrors++;
1509			cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1510			cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1511			cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1512			continue;
1513		}
1514
1515		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1516						vtophys(cur_rx->tl_ptr);
1517		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1518		sc->tl_cdata.tl_rx_tail = cur_rx;
1519
1520		/*
1521		 * Note: when the ThunderLAN chip is in 'capture all
1522		 * frames' mode, it will receive its own transmissions.
1523		 * We drop don't need to process our own transmissions,
1524		 * so we drop them here and continue.
1525		 */
1526		eh = mtod(m, struct ether_header *);
1527		/*if (ifp->if_flags & IFF_PROMISC && */
1528		if (!bcmp(eh->ether_shost, IFP2ENADDR(sc->tl_ifp),
1529		 					ETHER_ADDR_LEN)) {
1530				m_freem(m);
1531				continue;
1532		}
1533
1534		m->m_pkthdr.rcvif = ifp;
1535		m->m_pkthdr.len = m->m_len = total_len;
1536
1537		TL_UNLOCK(sc);
1538		(*ifp->if_input)(ifp, m);
1539		TL_LOCK(sc);
1540	}
1541
1542	return(r);
1543}
1544
1545/*
1546 * The RX-EOC condition hits when the ch_parm address hasn't been
1547 * initialized or the adapter reached a list with a forward pointer
1548 * of 0 (which indicates the end of the chain). In our case, this means
1549 * the card has hit the end of the receive buffer chain and we need to
1550 * empty out the buffers and shift the pointer back to the beginning again.
1551 */
1552static int
1553tl_intvec_rxeoc(xsc, type)
1554	void			*xsc;
1555	u_int32_t		type;
1556{
1557	struct tl_softc		*sc;
1558	int			r;
1559	struct tl_chain_data	*cd;
1560
1561
1562	sc = xsc;
1563	cd = &sc->tl_cdata;
1564
1565	/* Flush out the receive queue and ack RXEOF interrupts. */
1566	r = tl_intvec_rxeof(xsc, type);
1567	CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1568	r = 1;
1569	cd->tl_rx_head = &cd->tl_rx_chain[0];
1570	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1571	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1572	r |= (TL_CMD_GO|TL_CMD_RT);
1573	return(r);
1574}
1575
1576static int
1577tl_intvec_txeof(xsc, type)
1578	void			*xsc;
1579	u_int32_t		type;
1580{
1581	struct tl_softc		*sc;
1582	int			r = 0;
1583	struct tl_chain		*cur_tx;
1584
1585	sc = xsc;
1586
1587	/*
1588	 * Go through our tx list and free mbufs for those
1589	 * frames that have been sent.
1590	 */
1591	while (sc->tl_cdata.tl_tx_head != NULL) {
1592		cur_tx = sc->tl_cdata.tl_tx_head;
1593		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1594			break;
1595		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1596
1597		r++;
1598		m_freem(cur_tx->tl_mbuf);
1599		cur_tx->tl_mbuf = NULL;
1600
1601		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1602		sc->tl_cdata.tl_tx_free = cur_tx;
1603		if (!cur_tx->tl_ptr->tlist_fptr)
1604			break;
1605	}
1606
1607	return(r);
1608}
1609
1610/*
1611 * The transmit end of channel interrupt. The adapter triggers this
1612 * interrupt to tell us it hit the end of the current transmit list.
1613 *
1614 * A note about this: it's possible for a condition to arise where
1615 * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1616 * You have to avoid this since the chip expects things to go in a
1617 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1618 * When the TXEOF handler is called, it will free all of the transmitted
1619 * frames and reset the tx_head pointer to NULL. However, a TXEOC
1620 * interrupt should be received and acknowledged before any more frames
1621 * are queued for transmission. If tl_statrt() is called after TXEOF
1622 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1623 * it could attempt to issue a transmit command prematurely.
1624 *
1625 * To guard against this, tl_start() will only issue transmit commands
1626 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1627 * can set this flag once tl_start() has cleared it.
1628 */
1629static int
1630tl_intvec_txeoc(xsc, type)
1631	void			*xsc;
1632	u_int32_t		type;
1633{
1634	struct tl_softc		*sc;
1635	struct ifnet		*ifp;
1636	u_int32_t		cmd;
1637
1638	sc = xsc;
1639	ifp = sc->tl_ifp;
1640
1641	/* Clear the timeout timer. */
1642	ifp->if_timer = 0;
1643
1644	if (sc->tl_cdata.tl_tx_head == NULL) {
1645		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1646		sc->tl_cdata.tl_tx_tail = NULL;
1647		sc->tl_txeoc = 1;
1648	} else {
1649		sc->tl_txeoc = 0;
1650		/* First we have to ack the EOC interrupt. */
1651		CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1652		/* Then load the address of the next TX list. */
1653		CSR_WRITE_4(sc, TL_CH_PARM,
1654		    vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1655		/* Restart TX channel. */
1656		cmd = CSR_READ_4(sc, TL_HOSTCMD);
1657		cmd &= ~TL_CMD_RT;
1658		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1659		CMD_PUT(sc, cmd);
1660		return(0);
1661	}
1662
1663	return(1);
1664}
1665
1666static int
1667tl_intvec_adchk(xsc, type)
1668	void			*xsc;
1669	u_int32_t		type;
1670{
1671	struct tl_softc		*sc;
1672
1673	sc = xsc;
1674
1675	if (type)
1676		if_printf(sc->tl_ifp, "adapter check: %x\n",
1677			(unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1678
1679	tl_softreset(sc, 1);
1680	tl_stop(sc);
1681	tl_init_locked(sc);
1682	CMD_SET(sc, TL_CMD_INTSON);
1683
1684	return(0);
1685}
1686
1687static int
1688tl_intvec_netsts(xsc, type)
1689	void			*xsc;
1690	u_int32_t		type;
1691{
1692	struct tl_softc		*sc;
1693	u_int16_t		netsts;
1694
1695	sc = xsc;
1696
1697	netsts = tl_dio_read16(sc, TL_NETSTS);
1698	tl_dio_write16(sc, TL_NETSTS, netsts);
1699
1700	if_printf(sc->tl_ifp, "network status: %x\n", netsts);
1701
1702	return(1);
1703}
1704
1705static void
1706tl_intr(xsc)
1707	void			*xsc;
1708{
1709	struct tl_softc		*sc;
1710	struct ifnet		*ifp;
1711	int			r = 0;
1712	u_int32_t		type = 0;
1713	u_int16_t		ints = 0;
1714	u_int8_t		ivec = 0;
1715
1716	sc = xsc;
1717	TL_LOCK(sc);
1718
1719	/* Disable interrupts */
1720	ints = CSR_READ_2(sc, TL_HOST_INT);
1721	CSR_WRITE_2(sc, TL_HOST_INT, ints);
1722	type = (ints << 16) & 0xFFFF0000;
1723	ivec = (ints & TL_VEC_MASK) >> 5;
1724	ints = (ints & TL_INT_MASK) >> 2;
1725
1726	ifp = sc->tl_ifp;
1727
1728	switch(ints) {
1729	case (TL_INTR_INVALID):
1730#ifdef DIAGNOSTIC
1731		if_printf(ifp, "got an invalid interrupt!\n");
1732#endif
1733		/* Re-enable interrupts but don't ack this one. */
1734		CMD_PUT(sc, type);
1735		r = 0;
1736		break;
1737	case (TL_INTR_TXEOF):
1738		r = tl_intvec_txeof((void *)sc, type);
1739		break;
1740	case (TL_INTR_TXEOC):
1741		r = tl_intvec_txeoc((void *)sc, type);
1742		break;
1743	case (TL_INTR_STATOFLOW):
1744		tl_stats_update(sc);
1745		r = 1;
1746		break;
1747	case (TL_INTR_RXEOF):
1748		r = tl_intvec_rxeof((void *)sc, type);
1749		break;
1750	case (TL_INTR_DUMMY):
1751		if_printf(ifp, "got a dummy interrupt\n");
1752		r = 1;
1753		break;
1754	case (TL_INTR_ADCHK):
1755		if (ivec)
1756			r = tl_intvec_adchk((void *)sc, type);
1757		else
1758			r = tl_intvec_netsts((void *)sc, type);
1759		break;
1760	case (TL_INTR_RXEOC):
1761		r = tl_intvec_rxeoc((void *)sc, type);
1762		break;
1763	default:
1764		if_printf(ifp, "bogus interrupt type\n");
1765		break;
1766	}
1767
1768	/* Re-enable interrupts */
1769	if (r) {
1770		CMD_PUT(sc, TL_CMD_ACK | r | type);
1771	}
1772
1773	if (ifp->if_snd.ifq_head != NULL)
1774		tl_start_locked(ifp);
1775
1776	TL_UNLOCK(sc);
1777
1778	return;
1779}
1780
1781static void
1782tl_stats_update(xsc)
1783	void			*xsc;
1784{
1785	struct tl_softc		*sc;
1786	struct ifnet		*ifp;
1787	struct tl_stats		tl_stats;
1788	struct mii_data		*mii;
1789	u_int32_t		*p;
1790
1791	bzero((char *)&tl_stats, sizeof(struct tl_stats));
1792
1793	sc = xsc;
1794	TL_LOCK_ASSERT(sc);
1795	ifp = sc->tl_ifp;
1796
1797	p = (u_int32_t *)&tl_stats;
1798
1799	CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1800	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1801	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1802	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1803	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1804	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1805
1806	ifp->if_opackets += tl_tx_goodframes(tl_stats);
1807	ifp->if_collisions += tl_stats.tl_tx_single_collision +
1808				tl_stats.tl_tx_multi_collision;
1809	ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1810	ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1811			    tl_rx_overrun(tl_stats);
1812	ifp->if_oerrors += tl_tx_underrun(tl_stats);
1813
1814	if (tl_tx_underrun(tl_stats)) {
1815		u_int8_t		tx_thresh;
1816		tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1817		if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1818			tx_thresh >>= 4;
1819			tx_thresh++;
1820			if_printf(ifp, "tx underrun -- increasing "
1821			    "tx threshold to %d bytes\n",
1822			    (64 * (tx_thresh * 4)));
1823			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1824			tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1825		}
1826	}
1827
1828	callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
1829
1830	if (!sc->tl_bitrate) {
1831		mii = device_get_softc(sc->tl_miibus);
1832		mii_tick(mii);
1833	}
1834
1835	return;
1836}
1837
1838/*
1839 * Encapsulate an mbuf chain in a list by coupling the mbuf data
1840 * pointers to the fragment pointers.
1841 */
1842static int
1843tl_encap(sc, c, m_head)
1844	struct tl_softc		*sc;
1845	struct tl_chain		*c;
1846	struct mbuf		*m_head;
1847{
1848	int			frag = 0;
1849	struct tl_frag		*f = NULL;
1850	int			total_len;
1851	struct mbuf		*m;
1852	struct ifnet		*ifp = sc->tl_ifp;
1853
1854	/*
1855 	 * Start packing the mbufs in this chain into
1856	 * the fragment pointers. Stop when we run out
1857 	 * of fragments or hit the end of the mbuf chain.
1858	 */
1859	m = m_head;
1860	total_len = 0;
1861
1862	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1863		if (m->m_len != 0) {
1864			if (frag == TL_MAXFRAGS)
1865				break;
1866			total_len+= m->m_len;
1867			c->tl_ptr->tl_frag[frag].tlist_dadr =
1868				vtophys(mtod(m, vm_offset_t));
1869			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1870			frag++;
1871		}
1872	}
1873
1874	/*
1875	 * Handle special cases.
1876	 * Special case #1: we used up all 10 fragments, but
1877	 * we have more mbufs left in the chain. Copy the
1878	 * data into an mbuf cluster. Note that we don't
1879	 * bother clearing the values in the other fragment
1880	 * pointers/counters; it wouldn't gain us anything,
1881	 * and would waste cycles.
1882	 */
1883	if (m != NULL) {
1884		struct mbuf		*m_new = NULL;
1885
1886		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1887		if (m_new == NULL) {
1888			if_printf(ifp, "no memory for tx list\n");
1889			return(1);
1890		}
1891		if (m_head->m_pkthdr.len > MHLEN) {
1892			MCLGET(m_new, M_DONTWAIT);
1893			if (!(m_new->m_flags & M_EXT)) {
1894				m_freem(m_new);
1895				if_printf(ifp, "no memory for tx list\n");
1896				return(1);
1897			}
1898		}
1899		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1900					mtod(m_new, caddr_t));
1901		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1902		m_freem(m_head);
1903		m_head = m_new;
1904		f = &c->tl_ptr->tl_frag[0];
1905		f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1906		f->tlist_dcnt = total_len = m_new->m_len;
1907		frag = 1;
1908	}
1909
1910	/*
1911	 * Special case #2: the frame is smaller than the minimum
1912	 * frame size. We have to pad it to make the chip happy.
1913	 */
1914	if (total_len < TL_MIN_FRAMELEN) {
1915		if (frag == TL_MAXFRAGS)
1916			if_printf(ifp,
1917			    "all frags filled but frame still to small!\n");
1918		f = &c->tl_ptr->tl_frag[frag];
1919		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1920		f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1921		total_len += f->tlist_dcnt;
1922		frag++;
1923	}
1924
1925	c->tl_mbuf = m_head;
1926	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1927	c->tl_ptr->tlist_frsize = total_len;
1928	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1929	c->tl_ptr->tlist_fptr = 0;
1930
1931	return(0);
1932}
1933
1934/*
1935 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1936 * to the mbuf data regions directly in the transmit lists. We also save a
1937 * copy of the pointers since the transmit list fragment pointers are
1938 * physical addresses.
1939 */
1940static void
1941tl_start(ifp)
1942	struct ifnet		*ifp;
1943{
1944	struct tl_softc		*sc;
1945
1946	sc = ifp->if_softc;
1947	TL_LOCK(sc);
1948	tl_start_locked(ifp);
1949	TL_UNLOCK(sc);
1950}
1951
1952static void
1953tl_start_locked(ifp)
1954	struct ifnet		*ifp;
1955{
1956	struct tl_softc		*sc;
1957	struct mbuf		*m_head = NULL;
1958	u_int32_t		cmd;
1959	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1960
1961	sc = ifp->if_softc;
1962	TL_LOCK_ASSERT(sc);
1963
1964	/*
1965	 * Check for an available queue slot. If there are none,
1966	 * punt.
1967	 */
1968	if (sc->tl_cdata.tl_tx_free == NULL) {
1969		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1970		return;
1971	}
1972
1973	start_tx = sc->tl_cdata.tl_tx_free;
1974
1975	while(sc->tl_cdata.tl_tx_free != NULL) {
1976		IF_DEQUEUE(&ifp->if_snd, m_head);
1977		if (m_head == NULL)
1978			break;
1979
1980		/* Pick a chain member off the free list. */
1981		cur_tx = sc->tl_cdata.tl_tx_free;
1982		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1983
1984		cur_tx->tl_next = NULL;
1985
1986		/* Pack the data into the list. */
1987		tl_encap(sc, cur_tx, m_head);
1988
1989		/* Chain it together */
1990		if (prev != NULL) {
1991			prev->tl_next = cur_tx;
1992			prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
1993		}
1994		prev = cur_tx;
1995
1996		/*
1997		 * If there's a BPF listener, bounce a copy of this frame
1998		 * to him.
1999		 */
2000		BPF_MTAP(ifp, cur_tx->tl_mbuf);
2001	}
2002
2003	/*
2004	 * If there are no packets queued, bail.
2005	 */
2006	if (cur_tx == NULL)
2007		return;
2008
2009	/*
2010	 * That's all we can stands, we can't stands no more.
2011	 * If there are no other transfers pending, then issue the
2012	 * TX GO command to the adapter to start things moving.
2013	 * Otherwise, just leave the data in the queue and let
2014	 * the EOF/EOC interrupt handler send.
2015	 */
2016	if (sc->tl_cdata.tl_tx_head == NULL) {
2017		sc->tl_cdata.tl_tx_head = start_tx;
2018		sc->tl_cdata.tl_tx_tail = cur_tx;
2019
2020		if (sc->tl_txeoc) {
2021			sc->tl_txeoc = 0;
2022			CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
2023			cmd = CSR_READ_4(sc, TL_HOSTCMD);
2024			cmd &= ~TL_CMD_RT;
2025			cmd |= TL_CMD_GO|TL_CMD_INTSON;
2026			CMD_PUT(sc, cmd);
2027		}
2028	} else {
2029		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
2030		sc->tl_cdata.tl_tx_tail = cur_tx;
2031	}
2032
2033	/*
2034	 * Set a timeout in case the chip goes out to lunch.
2035	 */
2036	ifp->if_timer = 5;
2037
2038	return;
2039}
2040
2041static void
2042tl_init(xsc)
2043	void			*xsc;
2044{
2045	struct tl_softc		*sc = xsc;
2046
2047	TL_LOCK(sc);
2048	tl_init_locked(sc);
2049	TL_UNLOCK(sc);
2050}
2051
2052static void
2053tl_init_locked(sc)
2054	struct tl_softc		*sc;
2055{
2056	struct ifnet		*ifp = sc->tl_ifp;
2057	struct mii_data		*mii;
2058
2059	TL_LOCK_ASSERT(sc);
2060
2061	ifp = sc->tl_ifp;
2062
2063	/*
2064	 * Cancel pending I/O.
2065	 */
2066	tl_stop(sc);
2067
2068	/* Initialize TX FIFO threshold */
2069	tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
2070	tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
2071
2072        /* Set PCI burst size */
2073	tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
2074
2075	/*
2076	 * Set 'capture all frames' bit for promiscuous mode.
2077	 */
2078	if (ifp->if_flags & IFF_PROMISC)
2079		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2080	else
2081		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2082
2083	/*
2084	 * Set capture broadcast bit to capture broadcast frames.
2085	 */
2086	if (ifp->if_flags & IFF_BROADCAST)
2087		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2088	else
2089		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2090
2091	tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
2092
2093	/* Init our MAC address */
2094	tl_setfilt(sc, (caddr_t)&IFP2ENADDR(sc->tl_ifp), 0);
2095
2096	/* Init multicast filter, if needed. */
2097	tl_setmulti(sc);
2098
2099	/* Init circular RX list. */
2100	if (tl_list_rx_init(sc) == ENOBUFS) {
2101		if_printf(ifp,
2102		    "initialization failed: no memory for rx buffers\n");
2103		tl_stop(sc);
2104		return;
2105	}
2106
2107	/* Init TX pointers. */
2108	tl_list_tx_init(sc);
2109
2110	/* Enable PCI interrupts. */
2111	CMD_SET(sc, TL_CMD_INTSON);
2112
2113	/* Load the address of the rx list */
2114	CMD_SET(sc, TL_CMD_RT);
2115	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
2116
2117	if (!sc->tl_bitrate) {
2118		if (sc->tl_miibus != NULL) {
2119			mii = device_get_softc(sc->tl_miibus);
2120			mii_mediachg(mii);
2121		}
2122	} else {
2123		tl_ifmedia_upd(ifp);
2124	}
2125
2126	/* Send the RX go command */
2127	CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
2128
2129	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2130	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2131
2132	/* Start the stats update counter */
2133	callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
2134
2135	return;
2136}
2137
2138/*
2139 * Set media options.
2140 */
2141static int
2142tl_ifmedia_upd(ifp)
2143	struct ifnet		*ifp;
2144{
2145	struct tl_softc		*sc;
2146	struct mii_data		*mii = NULL;
2147
2148	sc = ifp->if_softc;
2149
2150	TL_LOCK(sc);
2151	if (sc->tl_bitrate)
2152		tl_setmode(sc, sc->ifmedia.ifm_media);
2153	else {
2154		mii = device_get_softc(sc->tl_miibus);
2155		mii_mediachg(mii);
2156	}
2157	TL_UNLOCK(sc);
2158
2159	return(0);
2160}
2161
2162/*
2163 * Report current media status.
2164 */
2165static void
2166tl_ifmedia_sts(ifp, ifmr)
2167	struct ifnet		*ifp;
2168	struct ifmediareq	*ifmr;
2169{
2170	struct tl_softc		*sc;
2171	struct mii_data		*mii;
2172
2173	sc = ifp->if_softc;
2174
2175	TL_LOCK(sc);
2176	ifmr->ifm_active = IFM_ETHER;
2177
2178	if (sc->tl_bitrate) {
2179		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
2180			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2181		else
2182			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2183		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
2184			ifmr->ifm_active |= IFM_HDX;
2185		else
2186			ifmr->ifm_active |= IFM_FDX;
2187		return;
2188	} else {
2189		mii = device_get_softc(sc->tl_miibus);
2190		mii_pollstat(mii);
2191		ifmr->ifm_active = mii->mii_media_active;
2192		ifmr->ifm_status = mii->mii_media_status;
2193	}
2194	TL_UNLOCK(sc);
2195
2196	return;
2197}
2198
2199static int
2200tl_ioctl(ifp, command, data)
2201	struct ifnet		*ifp;
2202	u_long			command;
2203	caddr_t			data;
2204{
2205	struct tl_softc		*sc = ifp->if_softc;
2206	struct ifreq		*ifr = (struct ifreq *) data;
2207	int			error = 0;
2208
2209	switch(command) {
2210	case SIOCSIFFLAGS:
2211		TL_LOCK(sc);
2212		if (ifp->if_flags & IFF_UP) {
2213			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2214			    ifp->if_flags & IFF_PROMISC &&
2215			    !(sc->tl_if_flags & IFF_PROMISC)) {
2216				tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2217				tl_setmulti(sc);
2218			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2219			    !(ifp->if_flags & IFF_PROMISC) &&
2220			    sc->tl_if_flags & IFF_PROMISC) {
2221				tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2222				tl_setmulti(sc);
2223			} else
2224				tl_init_locked(sc);
2225		} else {
2226			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2227				tl_stop(sc);
2228			}
2229		}
2230		sc->tl_if_flags = ifp->if_flags;
2231		TL_UNLOCK(sc);
2232		error = 0;
2233		break;
2234	case SIOCADDMULTI:
2235	case SIOCDELMULTI:
2236		TL_LOCK(sc);
2237		tl_setmulti(sc);
2238		TL_UNLOCK(sc);
2239		error = 0;
2240		break;
2241	case SIOCSIFMEDIA:
2242	case SIOCGIFMEDIA:
2243		if (sc->tl_bitrate)
2244			error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2245		else {
2246			struct mii_data		*mii;
2247			mii = device_get_softc(sc->tl_miibus);
2248			error = ifmedia_ioctl(ifp, ifr,
2249			    &mii->mii_media, command);
2250		}
2251		break;
2252	default:
2253		error = ether_ioctl(ifp, command, data);
2254		break;
2255	}
2256
2257	return(error);
2258}
2259
2260static void
2261tl_watchdog(ifp)
2262	struct ifnet		*ifp;
2263{
2264	struct tl_softc		*sc;
2265
2266	sc = ifp->if_softc;
2267
2268	if_printf(ifp, "device timeout\n");
2269
2270	TL_LOCK(sc);
2271	ifp->if_oerrors++;
2272
2273	tl_softreset(sc, 1);
2274	tl_init_locked(sc);
2275	TL_UNLOCK(sc);
2276
2277	return;
2278}
2279
2280/*
2281 * Stop the adapter and free any mbufs allocated to the
2282 * RX and TX lists.
2283 */
2284static void
2285tl_stop(sc)
2286	struct tl_softc		*sc;
2287{
2288	register int		i;
2289	struct ifnet		*ifp;
2290
2291	TL_LOCK_ASSERT(sc);
2292
2293	ifp = sc->tl_ifp;
2294
2295	/* Stop the stats updater. */
2296	callout_stop(&sc->tl_stat_callout);
2297
2298	/* Stop the transmitter */
2299	CMD_CLR(sc, TL_CMD_RT);
2300	CMD_SET(sc, TL_CMD_STOP);
2301	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2302
2303	/* Stop the receiver */
2304	CMD_SET(sc, TL_CMD_RT);
2305	CMD_SET(sc, TL_CMD_STOP);
2306	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2307
2308	/*
2309	 * Disable host interrupts.
2310	 */
2311	CMD_SET(sc, TL_CMD_INTSOFF);
2312
2313	/*
2314	 * Clear list pointer.
2315	 */
2316	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2317
2318	/*
2319	 * Free the RX lists.
2320	 */
2321	for (i = 0; i < TL_RX_LIST_CNT; i++) {
2322		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2323			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2324			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2325		}
2326	}
2327	bzero((char *)&sc->tl_ldata->tl_rx_list,
2328		sizeof(sc->tl_ldata->tl_rx_list));
2329
2330	/*
2331	 * Free the TX list buffers.
2332	 */
2333	for (i = 0; i < TL_TX_LIST_CNT; i++) {
2334		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2335			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2336			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2337		}
2338	}
2339	bzero((char *)&sc->tl_ldata->tl_tx_list,
2340		sizeof(sc->tl_ldata->tl_tx_list));
2341
2342	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2343
2344	return;
2345}
2346
2347/*
2348 * Stop all chip I/O so that the kernel's probe routines don't
2349 * get confused by errant DMAs when rebooting.
2350 */
2351static void
2352tl_shutdown(dev)
2353	device_t		dev;
2354{
2355	struct tl_softc		*sc;
2356
2357	sc = device_get_softc(dev);
2358
2359	TL_LOCK(sc);
2360	tl_stop(sc);
2361	TL_UNLOCK(sc);
2362
2363	return;
2364}
2365