if_tl.c revision 227843
1/*-
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/tl/if_tl.c 227843 2011-11-22 21:28:20Z marius $");
35
36/*
37 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
38 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
39 * the National Semiconductor DP83840A physical interface and the
40 * Microchip Technology 24Cxx series serial EEPROM.
41 *
42 * Written using the following four documents:
43 *
44 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
45 * National Semiconductor DP83840A data sheet (www.national.com)
46 * Microchip Technology 24C02C data sheet (www.microchip.com)
47 * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
48 *
49 * Written by Bill Paul <wpaul@ctr.columbia.edu>
50 * Electrical Engineering Department
51 * Columbia University, New York City
52 */
53/*
54 * Some notes about the ThunderLAN:
55 *
56 * The ThunderLAN controller is a single chip containing PCI controller
57 * logic, approximately 3K of on-board SRAM, a LAN controller, and media
58 * independent interface (MII) bus. The MII allows the ThunderLAN chip to
59 * control up to 32 different physical interfaces (PHYs). The ThunderLAN
60 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
61 * to act as a complete ethernet interface.
62 *
63 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
64 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
65 * in full or half duplex. Some of the Compaq Deskpro machines use a
66 * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
67 * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
68 * concert with the ThunderLAN's internal PHY to provide full 10/100
69 * support. This is cheaper than using a standalone external PHY for both
70 * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
71 * A serial EEPROM is also attached to the ThunderLAN chip to provide
72 * power-up default register settings and for storing the adapter's
73 * station address. Although not supported by this driver, the ThunderLAN
74 * chip can also be connected to token ring PHYs.
75 *
76 * The ThunderLAN has a set of registers which can be used to issue
77 * commands, acknowledge interrupts, and to manipulate other internal
78 * registers on its DIO bus. The primary registers can be accessed
79 * using either programmed I/O (inb/outb) or via PCI memory mapping,
80 * depending on how the card is configured during the PCI probing
81 * phase. It is even possible to have both PIO and memory mapped
82 * access turned on at the same time.
83 *
84 * Frame reception and transmission with the ThunderLAN chip is done
85 * using frame 'lists.' A list structure looks more or less like this:
86 *
87 * struct tl_frag {
88 *	u_int32_t		fragment_address;
89 *	u_int32_t		fragment_size;
90 * };
91 * struct tl_list {
92 *	u_int32_t		forward_pointer;
93 *	u_int16_t		cstat;
94 *	u_int16_t		frame_size;
95 *	struct tl_frag		fragments[10];
96 * };
97 *
98 * The forward pointer in the list header can be either a 0 or the address
99 * of another list, which allows several lists to be linked together. Each
100 * list contains up to 10 fragment descriptors. This means the chip allows
101 * ethernet frames to be broken up into up to 10 chunks for transfer to
102 * and from the SRAM. Note that the forward pointer and fragment buffer
103 * addresses are physical memory addresses, not virtual. Note also that
104 * a single ethernet frame can not span lists: if the host wants to
105 * transmit a frame and the frame data is split up over more than 10
106 * buffers, the frame has to collapsed before it can be transmitted.
107 *
108 * To receive frames, the driver sets up a number of lists and populates
109 * the fragment descriptors, then it sends an RX GO command to the chip.
110 * When a frame is received, the chip will DMA it into the memory regions
111 * specified by the fragment descriptors and then trigger an RX 'end of
112 * frame interrupt' when done. The driver may choose to use only one
113 * fragment per list; this may result is slighltly less efficient use
114 * of memory in exchange for improving performance.
115 *
116 * To transmit frames, the driver again sets up lists and fragment
117 * descriptors, only this time the buffers contain frame data that
118 * is to be DMA'ed into the chip instead of out of it. Once the chip
119 * has transfered the data into its on-board SRAM, it will trigger a
120 * TX 'end of frame' interrupt. It will also generate an 'end of channel'
121 * interrupt when it reaches the end of the list.
122 */
123/*
124 * Some notes about this driver:
125 *
126 * The ThunderLAN chip provides a couple of different ways to organize
127 * reception, transmission and interrupt handling. The simplest approach
128 * is to use one list each for transmission and reception. In this mode,
129 * the ThunderLAN will generate two interrupts for every received frame
130 * (one RX EOF and one RX EOC) and two for each transmitted frame (one
131 * TX EOF and one TX EOC). This may make the driver simpler but it hurts
132 * performance to have to handle so many interrupts.
133 *
134 * Initially I wanted to create a circular list of receive buffers so
135 * that the ThunderLAN chip would think there was an infinitely long
136 * receive channel and never deliver an RXEOC interrupt. However this
137 * doesn't work correctly under heavy load: while the manual says the
138 * chip will trigger an RXEOF interrupt each time a frame is copied into
139 * memory, you can't count on the chip waiting around for you to acknowledge
140 * the interrupt before it starts trying to DMA the next frame. The result
141 * is that the chip might traverse the entire circular list and then wrap
142 * around before you have a chance to do anything about it. Consequently,
143 * the receive list is terminated (with a 0 in the forward pointer in the
144 * last element). Each time an RXEOF interrupt arrives, the used list
145 * is shifted to the end of the list. This gives the appearance of an
146 * infinitely large RX chain so long as the driver doesn't fall behind
147 * the chip and allow all of the lists to be filled up.
148 *
149 * If all the lists are filled, the adapter will deliver an RX 'end of
150 * channel' interrupt when it hits the 0 forward pointer at the end of
151 * the chain. The RXEOC handler then cleans out the RX chain and resets
152 * the list head pointer in the ch_parm register and restarts the receiver.
153 *
154 * For frame transmission, it is possible to program the ThunderLAN's
155 * transmit interrupt threshold so that the chip can acknowledge multiple
156 * lists with only a single TX EOF interrupt. This allows the driver to
157 * queue several frames in one shot, and only have to handle a total
158 * two interrupts (one TX EOF and one TX EOC) no matter how many frames
159 * are transmitted. Frame transmission is done directly out of the
160 * mbufs passed to the tl_start() routine via the interface send queue.
161 * The driver simply sets up the fragment descriptors in the transmit
162 * lists to point to the mbuf data regions and sends a TX GO command.
163 *
164 * Note that since the RX and TX lists themselves are always used
165 * only by the driver, the are malloc()ed once at driver initialization
166 * time and never free()ed.
167 *
168 * Also, in order to remain as platform independent as possible, this
169 * driver uses memory mapped register access to manipulate the card
170 * as opposed to programmed I/O. This avoids the use of the inb/outb
171 * (and related) instructions which are specific to the i386 platform.
172 *
173 * Using these techniques, this driver achieves very high performance
174 * by minimizing the amount of interrupts generated during large
175 * transfers and by completely avoiding buffer copies. Frame transfer
176 * to and from the ThunderLAN chip is performed entirely by the chip
177 * itself thereby reducing the load on the host CPU.
178 */
179
180#include <sys/param.h>
181#include <sys/systm.h>
182#include <sys/sockio.h>
183#include <sys/mbuf.h>
184#include <sys/malloc.h>
185#include <sys/kernel.h>
186#include <sys/module.h>
187#include <sys/socket.h>
188
189#include <net/if.h>
190#include <net/if_arp.h>
191#include <net/ethernet.h>
192#include <net/if_dl.h>
193#include <net/if_media.h>
194#include <net/if_types.h>
195
196#include <net/bpf.h>
197
198#include <vm/vm.h>              /* for vtophys */
199#include <vm/pmap.h>            /* for vtophys */
200#include <machine/bus.h>
201#include <machine/resource.h>
202#include <sys/bus.h>
203#include <sys/rman.h>
204
205#include <dev/mii/mii.h>
206#include <dev/mii/mii_bitbang.h>
207#include <dev/mii/miivar.h>
208
209#include <dev/pci/pcireg.h>
210#include <dev/pci/pcivar.h>
211
212/*
213 * Default to using PIO register access mode to pacify certain
214 * laptop docking stations with built-in ThunderLAN chips that
215 * don't seem to handle memory mapped mode properly.
216 */
217#define TL_USEIOSPACE
218
219#include <dev/tl/if_tlreg.h>
220
221MODULE_DEPEND(tl, pci, 1, 1, 1);
222MODULE_DEPEND(tl, ether, 1, 1, 1);
223MODULE_DEPEND(tl, miibus, 1, 1, 1);
224
225/* "device miibus" required.  See GENERIC if you get errors here. */
226#include "miibus_if.h"
227
228/*
229 * Various supported device vendors/types and their names.
230 */
231
232static const struct tl_type const tl_devs[] = {
233	{ TI_VENDORID,	TI_DEVICEID_THUNDERLAN,
234		"Texas Instruments ThunderLAN" },
235	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
236		"Compaq Netelligent 10" },
237	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
238		"Compaq Netelligent 10/100" },
239	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
240		"Compaq Netelligent 10/100 Proliant" },
241	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
242		"Compaq Netelligent 10/100 Dual Port" },
243	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
244		"Compaq NetFlex-3/P Integrated" },
245	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
246		"Compaq NetFlex-3/P" },
247	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
248		"Compaq NetFlex 3/P w/ BNC" },
249	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED,
250		"Compaq Netelligent 10/100 TX Embedded UTP" },
251	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX,
252		"Compaq Netelligent 10 T/2 PCI UTP/Coax" },
253	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP,
254		"Compaq Netelligent 10/100 TX UTP" },
255	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2183,
256		"Olicom OC-2183/2185" },
257	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2325,
258		"Olicom OC-2325" },
259	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2326,
260		"Olicom OC-2326 10/100 TX UTP" },
261	{ 0, 0, NULL }
262};
263
264static int tl_probe(device_t);
265static int tl_attach(device_t);
266static int tl_detach(device_t);
267static int tl_intvec_rxeoc(void *, u_int32_t);
268static int tl_intvec_txeoc(void *, u_int32_t);
269static int tl_intvec_txeof(void *, u_int32_t);
270static int tl_intvec_rxeof(void *, u_int32_t);
271static int tl_intvec_adchk(void *, u_int32_t);
272static int tl_intvec_netsts(void *, u_int32_t);
273
274static int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *);
275static void tl_stats_update(void *);
276static int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *);
277
278static void tl_intr(void *);
279static void tl_start(struct ifnet *);
280static void tl_start_locked(struct ifnet *);
281static int tl_ioctl(struct ifnet *, u_long, caddr_t);
282static void tl_init(void *);
283static void tl_init_locked(struct tl_softc *);
284static void tl_stop(struct tl_softc *);
285static void tl_watchdog(struct tl_softc *);
286static int tl_shutdown(device_t);
287static int tl_ifmedia_upd(struct ifnet *);
288static void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
289
290static u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
291static u_int8_t	tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *);
292static int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
293
294static int tl_miibus_readreg(device_t, int, int);
295static int tl_miibus_writereg(device_t, int, int, int);
296static void tl_miibus_statchg(device_t);
297
298static void tl_setmode(struct tl_softc *, int);
299static uint32_t tl_mchash(const uint8_t *);
300static void tl_setmulti(struct tl_softc *);
301static void tl_setfilt(struct tl_softc *, caddr_t, int);
302static void tl_softreset(struct tl_softc *, int);
303static void tl_hardreset(device_t);
304static int tl_list_rx_init(struct tl_softc *);
305static int tl_list_tx_init(struct tl_softc *);
306
307static u_int8_t tl_dio_read8(struct tl_softc *, int);
308static u_int16_t tl_dio_read16(struct tl_softc *, int);
309static u_int32_t tl_dio_read32(struct tl_softc *, int);
310static void tl_dio_write8(struct tl_softc *, int, int);
311static void tl_dio_write16(struct tl_softc *, int, int);
312static void tl_dio_write32(struct tl_softc *, int, int);
313static void tl_dio_setbit(struct tl_softc *, int, int);
314static void tl_dio_clrbit(struct tl_softc *, int, int);
315static void tl_dio_setbit16(struct tl_softc *, int, int);
316static void tl_dio_clrbit16(struct tl_softc *, int, int);
317
318/*
319 * MII bit-bang glue
320 */
321static uint32_t tl_mii_bitbang_read(device_t);
322static void tl_mii_bitbang_write(device_t, uint32_t);
323
324static const struct mii_bitbang_ops tl_mii_bitbang_ops = {
325	tl_mii_bitbang_read,
326	tl_mii_bitbang_write,
327	{
328		TL_SIO_MDATA,	/* MII_BIT_MDO */
329		TL_SIO_MDATA,	/* MII_BIT_MDI */
330		TL_SIO_MCLK,	/* MII_BIT_MDC */
331		TL_SIO_MTXEN,	/* MII_BIT_DIR_HOST_PHY */
332		0,		/* MII_BIT_DIR_PHY_HOST */
333	}
334};
335
336#ifdef TL_USEIOSPACE
337#define TL_RES		SYS_RES_IOPORT
338#define TL_RID		TL_PCI_LOIO
339#else
340#define TL_RES		SYS_RES_MEMORY
341#define TL_RID		TL_PCI_LOMEM
342#endif
343
344static device_method_t tl_methods[] = {
345	/* Device interface */
346	DEVMETHOD(device_probe,		tl_probe),
347	DEVMETHOD(device_attach,	tl_attach),
348	DEVMETHOD(device_detach,	tl_detach),
349	DEVMETHOD(device_shutdown,	tl_shutdown),
350
351	/* MII interface */
352	DEVMETHOD(miibus_readreg,	tl_miibus_readreg),
353	DEVMETHOD(miibus_writereg,	tl_miibus_writereg),
354	DEVMETHOD(miibus_statchg,	tl_miibus_statchg),
355
356	DEVMETHOD_END
357};
358
359static driver_t tl_driver = {
360	"tl",
361	tl_methods,
362	sizeof(struct tl_softc)
363};
364
365static devclass_t tl_devclass;
366
367DRIVER_MODULE(tl, pci, tl_driver, tl_devclass, 0, 0);
368DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0);
369
370static u_int8_t tl_dio_read8(sc, reg)
371	struct tl_softc		*sc;
372	int			reg;
373{
374
375	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
376		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
377	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
378	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
379		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
380	return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
381}
382
383static u_int16_t tl_dio_read16(sc, reg)
384	struct tl_softc		*sc;
385	int			reg;
386{
387
388	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
389		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
390	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
391	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
392		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
393	return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
394}
395
396static u_int32_t tl_dio_read32(sc, reg)
397	struct tl_softc		*sc;
398	int			reg;
399{
400
401	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
402		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
403	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
404	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
405		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
406	return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
407}
408
409static void tl_dio_write8(sc, reg, val)
410	struct tl_softc		*sc;
411	int			reg;
412	int			val;
413{
414
415	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
416		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
417	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
418	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
419		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
420	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
421}
422
423static void tl_dio_write16(sc, reg, val)
424	struct tl_softc		*sc;
425	int			reg;
426	int			val;
427{
428
429	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
430		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
431	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
432	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
433		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
434	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
435}
436
437static void tl_dio_write32(sc, reg, val)
438	struct tl_softc		*sc;
439	int			reg;
440	int			val;
441{
442
443	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
444		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
445	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
446	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
447		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
448	CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
449}
450
451static void
452tl_dio_setbit(sc, reg, bit)
453	struct tl_softc		*sc;
454	int			reg;
455	int			bit;
456{
457	u_int8_t			f;
458
459	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
460		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
461	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
462	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
463		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
464	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
465	f |= bit;
466	CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 1,
467		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
468	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
469}
470
471static void
472tl_dio_clrbit(sc, reg, bit)
473	struct tl_softc		*sc;
474	int			reg;
475	int			bit;
476{
477	u_int8_t			f;
478
479	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
480		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
481	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
482	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
483		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
484	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
485	f &= ~bit;
486	CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 1,
487		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
488	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
489}
490
491static void tl_dio_setbit16(sc, reg, bit)
492	struct tl_softc		*sc;
493	int			reg;
494	int			bit;
495{
496	u_int16_t			f;
497
498	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
499		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
500	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
501	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
502		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
503	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
504	f |= bit;
505	CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 2,
506		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
507	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
508}
509
510static void tl_dio_clrbit16(sc, reg, bit)
511	struct tl_softc		*sc;
512	int			reg;
513	int			bit;
514{
515	u_int16_t			f;
516
517	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
518		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
519	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
520	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
521		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
522	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
523	f &= ~bit;
524	CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 2,
525		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
526	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
527}
528
529/*
530 * Send an instruction or address to the EEPROM, check for ACK.
531 */
532static u_int8_t tl_eeprom_putbyte(sc, byte)
533	struct tl_softc		*sc;
534	int			byte;
535{
536	register int		i, ack = 0;
537
538	/*
539	 * Make sure we're in TX mode.
540	 */
541	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
542
543	/*
544	 * Feed in each bit and stobe the clock.
545	 */
546	for (i = 0x80; i; i >>= 1) {
547		if (byte & i) {
548			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
549		} else {
550			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
551		}
552		DELAY(1);
553		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
554		DELAY(1);
555		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
556	}
557
558	/*
559	 * Turn off TX mode.
560	 */
561	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
562
563	/*
564	 * Check for ack.
565	 */
566	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
567	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
568	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
569
570	return(ack);
571}
572
573/*
574 * Read a byte of data stored in the EEPROM at address 'addr.'
575 */
576static u_int8_t tl_eeprom_getbyte(sc, addr, dest)
577	struct tl_softc		*sc;
578	int			addr;
579	u_int8_t		*dest;
580{
581	register int		i;
582	u_int8_t		byte = 0;
583	device_t		tl_dev = sc->tl_dev;
584
585	tl_dio_write8(sc, TL_NETSIO, 0);
586
587	EEPROM_START;
588
589	/*
590	 * Send write control code to EEPROM.
591	 */
592	if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
593		device_printf(tl_dev, "failed to send write command, status: %x\n",
594		    tl_dio_read8(sc, TL_NETSIO));
595		return(1);
596	}
597
598	/*
599	 * Send address of byte we want to read.
600	 */
601	if (tl_eeprom_putbyte(sc, addr)) {
602		device_printf(tl_dev, "failed to send address, status: %x\n",
603		    tl_dio_read8(sc, TL_NETSIO));
604		return(1);
605	}
606
607	EEPROM_STOP;
608	EEPROM_START;
609	/*
610	 * Send read control code to EEPROM.
611	 */
612	if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
613		device_printf(tl_dev, "failed to send write command, status: %x\n",
614		    tl_dio_read8(sc, TL_NETSIO));
615		return(1);
616	}
617
618	/*
619	 * Start reading bits from EEPROM.
620	 */
621	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
622	for (i = 0x80; i; i >>= 1) {
623		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
624		DELAY(1);
625		if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
626			byte |= i;
627		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
628		DELAY(1);
629	}
630
631	EEPROM_STOP;
632
633	/*
634	 * No ACK generated for read, so just return byte.
635	 */
636
637	*dest = byte;
638
639	return(0);
640}
641
642/*
643 * Read a sequence of bytes from the EEPROM.
644 */
645static int
646tl_read_eeprom(sc, dest, off, cnt)
647	struct tl_softc		*sc;
648	caddr_t			dest;
649	int			off;
650	int			cnt;
651{
652	int			err = 0, i;
653	u_int8_t		byte = 0;
654
655	for (i = 0; i < cnt; i++) {
656		err = tl_eeprom_getbyte(sc, off + i, &byte);
657		if (err)
658			break;
659		*(dest + i) = byte;
660	}
661
662	return(err ? 1 : 0);
663}
664
665#define	TL_SIO_MII	(TL_SIO_MCLK | TL_SIO_MDATA | TL_SIO_MTXEN)
666
667/*
668 * Read the MII serial port for the MII bit-bang module.
669 */
670static uint32_t
671tl_mii_bitbang_read(device_t dev)
672{
673	struct tl_softc *sc;
674	uint32_t val;
675
676	sc = device_get_softc(dev);
677
678	val = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MII;
679	CSR_BARRIER(sc, TL_NETSIO, 1,
680	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
681
682	return (val);
683}
684
685/*
686 * Write the MII serial port for the MII bit-bang module.
687 */
688static void
689tl_mii_bitbang_write(device_t dev, uint32_t val)
690{
691	struct tl_softc *sc;
692
693	sc = device_get_softc(dev);
694
695	val = (tl_dio_read8(sc, TL_NETSIO) & ~TL_SIO_MII) | val;
696	CSR_BARRIER(sc, TL_NETSIO, 1,
697	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
698	tl_dio_write8(sc, TL_NETSIO, val);
699	CSR_BARRIER(sc, TL_NETSIO, 1,
700	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
701}
702
703static int
704tl_miibus_readreg(dev, phy, reg)
705	device_t		dev;
706	int			phy, reg;
707{
708	struct tl_softc		*sc;
709	int			minten, val;
710
711	sc = device_get_softc(dev);
712
713	/*
714	 * Turn off MII interrupt by forcing MINTEN low.
715	 */
716	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
717	if (minten) {
718		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
719	}
720
721	val = mii_bitbang_readreg(dev, &tl_mii_bitbang_ops, phy, reg);
722
723	/* Reenable interrupts. */
724	if (minten) {
725		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
726	}
727
728	return (val);
729}
730
731static int
732tl_miibus_writereg(dev, phy, reg, data)
733	device_t		dev;
734	int			phy, reg, data;
735{
736	struct tl_softc		*sc;
737	int			minten;
738
739	sc = device_get_softc(dev);
740
741	/*
742	 * Turn off MII interrupt by forcing MINTEN low.
743	 */
744	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
745	if (minten) {
746		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
747	}
748
749	mii_bitbang_writereg(dev, &tl_mii_bitbang_ops, phy, reg, data);
750
751	/* Reenable interrupts. */
752	if (minten) {
753		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
754	}
755
756	return(0);
757}
758
759static void
760tl_miibus_statchg(dev)
761	device_t		dev;
762{
763	struct tl_softc		*sc;
764	struct mii_data		*mii;
765
766	sc = device_get_softc(dev);
767	mii = device_get_softc(sc->tl_miibus);
768
769	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
770		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
771	} else {
772		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
773	}
774}
775
776/*
777 * Set modes for bitrate devices.
778 */
779static void
780tl_setmode(sc, media)
781	struct tl_softc		*sc;
782	int			media;
783{
784	if (IFM_SUBTYPE(media) == IFM_10_5)
785		tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
786	if (IFM_SUBTYPE(media) == IFM_10_T) {
787		tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
788		if ((media & IFM_GMASK) == IFM_FDX) {
789			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
790			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
791		} else {
792			tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
793			tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
794		}
795	}
796}
797
798/*
799 * Calculate the hash of a MAC address for programming the multicast hash
800 * table.  This hash is simply the address split into 6-bit chunks
801 * XOR'd, e.g.
802 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
803 * bit:  765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
804 * Bytes 0-2 and 3-5 are symmetrical, so are folded together.  Then
805 * the folded 24-bit value is split into 6-bit portions and XOR'd.
806 */
807static uint32_t
808tl_mchash(addr)
809	const uint8_t *addr;
810{
811	int t;
812
813	t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
814		(addr[2] ^ addr[5]);
815	return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
816}
817
818/*
819 * The ThunderLAN has a perfect MAC address filter in addition to
820 * the multicast hash filter. The perfect filter can be programmed
821 * with up to four MAC addresses. The first one is always used to
822 * hold the station address, which leaves us free to use the other
823 * three for multicast addresses.
824 */
825static void
826tl_setfilt(sc, addr, slot)
827	struct tl_softc		*sc;
828	caddr_t			addr;
829	int			slot;
830{
831	int			i;
832	u_int16_t		regaddr;
833
834	regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
835
836	for (i = 0; i < ETHER_ADDR_LEN; i++)
837		tl_dio_write8(sc, regaddr + i, *(addr + i));
838}
839
840/*
841 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
842 * linked list. This is fine, except addresses are added from the head
843 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
844 * group to always be in the perfect filter, but as more groups are added,
845 * the 224.0.0.1 entry (which is always added first) gets pushed down
846 * the list and ends up at the tail. So after 3 or 4 multicast groups
847 * are added, the all-hosts entry gets pushed out of the perfect filter
848 * and into the hash table.
849 *
850 * Because the multicast list is a doubly-linked list as opposed to a
851 * circular queue, we don't have the ability to just grab the tail of
852 * the list and traverse it backwards. Instead, we have to traverse
853 * the list once to find the tail, then traverse it again backwards to
854 * update the multicast filter.
855 */
856static void
857tl_setmulti(sc)
858	struct tl_softc		*sc;
859{
860	struct ifnet		*ifp;
861	u_int32_t		hashes[2] = { 0, 0 };
862	int			h, i;
863	struct ifmultiaddr	*ifma;
864	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
865	ifp = sc->tl_ifp;
866
867	/* First, zot all the existing filters. */
868	for (i = 1; i < 4; i++)
869		tl_setfilt(sc, (caddr_t)&dummy, i);
870	tl_dio_write32(sc, TL_HASH1, 0);
871	tl_dio_write32(sc, TL_HASH2, 0);
872
873	/* Now program new ones. */
874	if (ifp->if_flags & IFF_ALLMULTI) {
875		hashes[0] = 0xFFFFFFFF;
876		hashes[1] = 0xFFFFFFFF;
877	} else {
878		i = 1;
879		if_maddr_rlock(ifp);
880		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
881			if (ifma->ifma_addr->sa_family != AF_LINK)
882				continue;
883			/*
884			 * Program the first three multicast groups
885			 * into the perfect filter. For all others,
886			 * use the hash table.
887			 */
888			if (i < 4) {
889				tl_setfilt(sc,
890			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
891				i++;
892				continue;
893			}
894
895			h = tl_mchash(
896				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
897			if (h < 32)
898				hashes[0] |= (1 << h);
899			else
900				hashes[1] |= (1 << (h - 32));
901		}
902		if_maddr_runlock(ifp);
903	}
904
905	tl_dio_write32(sc, TL_HASH1, hashes[0]);
906	tl_dio_write32(sc, TL_HASH2, hashes[1]);
907}
908
909/*
910 * This routine is recommended by the ThunderLAN manual to insure that
911 * the internal PHY is powered up correctly. It also recommends a one
912 * second pause at the end to 'wait for the clocks to start' but in my
913 * experience this isn't necessary.
914 */
915static void
916tl_hardreset(dev)
917	device_t		dev;
918{
919	int			i;
920	u_int16_t		flags;
921
922	mii_bitbang_sync(dev, &tl_mii_bitbang_ops);
923
924	flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
925
926	for (i = 0; i < MII_NPHY; i++)
927		tl_miibus_writereg(dev, i, MII_BMCR, flags);
928
929	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
930	DELAY(50000);
931	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
932	mii_bitbang_sync(dev, &tl_mii_bitbang_ops);
933	while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
934
935	DELAY(50000);
936}
937
938static void
939tl_softreset(sc, internal)
940	struct tl_softc		*sc;
941	int			internal;
942{
943        u_int32_t               cmd, dummy, i;
944
945        /* Assert the adapter reset bit. */
946	CMD_SET(sc, TL_CMD_ADRST);
947
948        /* Turn off interrupts */
949	CMD_SET(sc, TL_CMD_INTSOFF);
950
951	/* First, clear the stats registers. */
952	for (i = 0; i < 5; i++)
953		dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
954
955        /* Clear Areg and Hash registers */
956	for (i = 0; i < 8; i++)
957		tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
958
959        /*
960	 * Set up Netconfig register. Enable one channel and
961	 * one fragment mode.
962	 */
963	tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
964	if (internal && !sc->tl_bitrate) {
965		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
966	} else {
967		tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
968	}
969
970	/* Handle cards with bitrate devices. */
971	if (sc->tl_bitrate)
972		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
973
974	/*
975	 * Load adapter irq pacing timer and tx threshold.
976	 * We make the transmit threshold 1 initially but we may
977	 * change that later.
978	 */
979	cmd = CSR_READ_4(sc, TL_HOSTCMD);
980	cmd |= TL_CMD_NES;
981	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
982	CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
983	CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
984
985        /* Unreset the MII */
986	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
987
988	/* Take the adapter out of reset */
989	tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
990
991	/* Wait for things to settle down a little. */
992	DELAY(500);
993}
994
995/*
996 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
997 * against our list and return its name if we find a match.
998 */
999static int
1000tl_probe(dev)
1001	device_t		dev;
1002{
1003	const struct tl_type	*t;
1004
1005	t = tl_devs;
1006
1007	while(t->tl_name != NULL) {
1008		if ((pci_get_vendor(dev) == t->tl_vid) &&
1009		    (pci_get_device(dev) == t->tl_did)) {
1010			device_set_desc(dev, t->tl_name);
1011			return (BUS_PROBE_DEFAULT);
1012		}
1013		t++;
1014	}
1015
1016	return(ENXIO);
1017}
1018
1019static int
1020tl_attach(dev)
1021	device_t		dev;
1022{
1023	u_int16_t		did, vid;
1024	const struct tl_type	*t;
1025	struct ifnet		*ifp;
1026	struct tl_softc		*sc;
1027	int			error, flags, i, rid, unit;
1028	u_char			eaddr[6];
1029
1030	vid = pci_get_vendor(dev);
1031	did = pci_get_device(dev);
1032	sc = device_get_softc(dev);
1033	sc->tl_dev = dev;
1034	unit = device_get_unit(dev);
1035
1036	t = tl_devs;
1037	while(t->tl_name != NULL) {
1038		if (vid == t->tl_vid && did == t->tl_did)
1039			break;
1040		t++;
1041	}
1042
1043	if (t->tl_name == NULL) {
1044		device_printf(dev, "unknown device!?\n");
1045		return (ENXIO);
1046	}
1047
1048	mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1049	    MTX_DEF);
1050
1051	/*
1052	 * Map control/status registers.
1053	 */
1054	pci_enable_busmaster(dev);
1055
1056#ifdef TL_USEIOSPACE
1057
1058	rid = TL_PCI_LOIO;
1059	sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1060		RF_ACTIVE);
1061
1062	/*
1063	 * Some cards have the I/O and memory mapped address registers
1064	 * reversed. Try both combinations before giving up.
1065	 */
1066	if (sc->tl_res == NULL) {
1067		rid = TL_PCI_LOMEM;
1068		sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1069		    RF_ACTIVE);
1070	}
1071#else
1072	rid = TL_PCI_LOMEM;
1073	sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1074	    RF_ACTIVE);
1075	if (sc->tl_res == NULL) {
1076		rid = TL_PCI_LOIO;
1077		sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1078		    RF_ACTIVE);
1079	}
1080#endif
1081
1082	if (sc->tl_res == NULL) {
1083		device_printf(dev, "couldn't map ports/memory\n");
1084		error = ENXIO;
1085		goto fail;
1086	}
1087
1088#ifdef notdef
1089	/*
1090	 * The ThunderLAN manual suggests jacking the PCI latency
1091	 * timer all the way up to its maximum value. I'm not sure
1092	 * if this is really necessary, but what the manual wants,
1093	 * the manual gets.
1094	 */
1095	command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4);
1096	command |= 0x0000FF00;
1097	pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4);
1098#endif
1099
1100	/* Allocate interrupt */
1101	rid = 0;
1102	sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1103	    RF_SHAREABLE | RF_ACTIVE);
1104
1105	if (sc->tl_irq == NULL) {
1106		device_printf(dev, "couldn't map interrupt\n");
1107		error = ENXIO;
1108		goto fail;
1109	}
1110
1111	/*
1112	 * Now allocate memory for the TX and RX lists.
1113	 */
1114	sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF,
1115	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1116
1117	if (sc->tl_ldata == NULL) {
1118		device_printf(dev, "no memory for list buffers!\n");
1119		error = ENXIO;
1120		goto fail;
1121	}
1122
1123	bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1124
1125	if (vid == COMPAQ_VENDORID || vid == TI_VENDORID)
1126		sc->tl_eeaddr = TL_EEPROM_EADDR;
1127	if (vid == OLICOM_VENDORID)
1128		sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1129
1130	/* Reset the adapter. */
1131	tl_softreset(sc, 1);
1132	tl_hardreset(dev);
1133	tl_softreset(sc, 1);
1134
1135	/*
1136	 * Get station address from the EEPROM.
1137	 */
1138	if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1139		device_printf(dev, "failed to read station address\n");
1140		error = ENXIO;
1141		goto fail;
1142	}
1143
1144        /*
1145         * XXX Olicom, in its desire to be different from the
1146         * rest of the world, has done strange things with the
1147         * encoding of the station address in the EEPROM. First
1148         * of all, they store the address at offset 0xF8 rather
1149         * than at 0x83 like the ThunderLAN manual suggests.
1150         * Second, they store the address in three 16-bit words in
1151         * network byte order, as opposed to storing it sequentially
1152         * like all the other ThunderLAN cards. In order to get
1153         * the station address in a form that matches what the Olicom
1154         * diagnostic utility specifies, we have to byte-swap each
1155         * word. To make things even more confusing, neither 00:00:28
1156         * nor 00:00:24 appear in the IEEE OUI database.
1157         */
1158        if (vid == OLICOM_VENDORID) {
1159                for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1160                        u_int16_t               *p;
1161                        p = (u_int16_t *)&eaddr[i];
1162                        *p = ntohs(*p);
1163                }
1164        }
1165
1166	ifp = sc->tl_ifp = if_alloc(IFT_ETHER);
1167	if (ifp == NULL) {
1168		device_printf(dev, "can not if_alloc()\n");
1169		error = ENOSPC;
1170		goto fail;
1171	}
1172	ifp->if_softc = sc;
1173	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1174	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1175	ifp->if_ioctl = tl_ioctl;
1176	ifp->if_start = tl_start;
1177	ifp->if_init = tl_init;
1178	ifp->if_mtu = ETHERMTU;
1179	ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1;
1180	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1181	ifp->if_capenable |= IFCAP_VLAN_MTU;
1182	callout_init_mtx(&sc->tl_stat_callout, &sc->tl_mtx, 0);
1183
1184	/* Reset the adapter again. */
1185	tl_softreset(sc, 1);
1186	tl_hardreset(dev);
1187	tl_softreset(sc, 1);
1188
1189	/*
1190	 * Do MII setup. If no PHYs are found, then this is a
1191	 * bitrate ThunderLAN chip that only supports 10baseT
1192	 * and AUI/BNC.
1193	 * XXX mii_attach() can fail for reason different than
1194	 * no PHYs found!
1195	 */
1196	flags = 0;
1197	if (vid == COMPAQ_VENDORID) {
1198		if (did == COMPAQ_DEVICEID_NETEL_10_100_PROLIANT ||
1199		    did == COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED ||
1200		    did == COMPAQ_DEVICEID_NETFLEX_3P_BNC ||
1201		    did == COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX)
1202			flags |= MIIF_MACPRIV0;
1203		if (did == COMPAQ_DEVICEID_NETEL_10 ||
1204		    did == COMPAQ_DEVICEID_NETEL_10_100_DUAL ||
1205		    did == COMPAQ_DEVICEID_NETFLEX_3P ||
1206		    did == COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED)
1207			flags |= MIIF_MACPRIV1;
1208	} else if (vid == OLICOM_VENDORID && did == OLICOM_DEVICEID_OC2183)
1209			flags |= MIIF_MACPRIV0 | MIIF_MACPRIV1;
1210	if (mii_attach(dev, &sc->tl_miibus, ifp, tl_ifmedia_upd,
1211	    tl_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0)) {
1212		struct ifmedia		*ifm;
1213		sc->tl_bitrate = 1;
1214		ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1215		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1216		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1217		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1218		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1219		ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
1220		/* Reset again, this time setting bitrate mode. */
1221		tl_softreset(sc, 1);
1222		ifm = &sc->ifmedia;
1223		ifm->ifm_media = ifm->ifm_cur->ifm_media;
1224		tl_ifmedia_upd(ifp);
1225	}
1226
1227	/*
1228	 * Call MI attach routine.
1229	 */
1230	ether_ifattach(ifp, eaddr);
1231
1232	/* Hook interrupt last to avoid having to lock softc */
1233	error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET | INTR_MPSAFE,
1234	    NULL, tl_intr, sc, &sc->tl_intrhand);
1235
1236	if (error) {
1237		device_printf(dev, "couldn't set up irq\n");
1238		ether_ifdetach(ifp);
1239		goto fail;
1240	}
1241
1242fail:
1243	if (error)
1244		tl_detach(dev);
1245
1246	return(error);
1247}
1248
1249/*
1250 * Shutdown hardware and free up resources. This can be called any
1251 * time after the mutex has been initialized. It is called in both
1252 * the error case in attach and the normal detach case so it needs
1253 * to be careful about only freeing resources that have actually been
1254 * allocated.
1255 */
1256static int
1257tl_detach(dev)
1258	device_t		dev;
1259{
1260	struct tl_softc		*sc;
1261	struct ifnet		*ifp;
1262
1263	sc = device_get_softc(dev);
1264	KASSERT(mtx_initialized(&sc->tl_mtx), ("tl mutex not initialized"));
1265	ifp = sc->tl_ifp;
1266
1267	/* These should only be active if attach succeeded */
1268	if (device_is_attached(dev)) {
1269		ether_ifdetach(ifp);
1270		TL_LOCK(sc);
1271		tl_stop(sc);
1272		TL_UNLOCK(sc);
1273		callout_drain(&sc->tl_stat_callout);
1274	}
1275	if (sc->tl_miibus)
1276		device_delete_child(dev, sc->tl_miibus);
1277	bus_generic_detach(dev);
1278
1279	if (sc->tl_ldata)
1280		contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF);
1281	if (sc->tl_bitrate)
1282		ifmedia_removeall(&sc->ifmedia);
1283
1284	if (sc->tl_intrhand)
1285		bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1286	if (sc->tl_irq)
1287		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1288	if (sc->tl_res)
1289		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1290
1291	if (ifp)
1292		if_free(ifp);
1293
1294	mtx_destroy(&sc->tl_mtx);
1295
1296	return(0);
1297}
1298
1299/*
1300 * Initialize the transmit lists.
1301 */
1302static int
1303tl_list_tx_init(sc)
1304	struct tl_softc		*sc;
1305{
1306	struct tl_chain_data	*cd;
1307	struct tl_list_data	*ld;
1308	int			i;
1309
1310	cd = &sc->tl_cdata;
1311	ld = sc->tl_ldata;
1312	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1313		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1314		if (i == (TL_TX_LIST_CNT - 1))
1315			cd->tl_tx_chain[i].tl_next = NULL;
1316		else
1317			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1318	}
1319
1320	cd->tl_tx_free = &cd->tl_tx_chain[0];
1321	cd->tl_tx_tail = cd->tl_tx_head = NULL;
1322	sc->tl_txeoc = 1;
1323
1324	return(0);
1325}
1326
1327/*
1328 * Initialize the RX lists and allocate mbufs for them.
1329 */
1330static int
1331tl_list_rx_init(sc)
1332	struct tl_softc		*sc;
1333{
1334	struct tl_chain_data		*cd;
1335	struct tl_list_data		*ld;
1336	int				i;
1337
1338	cd = &sc->tl_cdata;
1339	ld = sc->tl_ldata;
1340
1341	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1342		cd->tl_rx_chain[i].tl_ptr =
1343			(struct tl_list_onefrag *)&ld->tl_rx_list[i];
1344		if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1345			return(ENOBUFS);
1346		if (i == (TL_RX_LIST_CNT - 1)) {
1347			cd->tl_rx_chain[i].tl_next = NULL;
1348			ld->tl_rx_list[i].tlist_fptr = 0;
1349		} else {
1350			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1351			ld->tl_rx_list[i].tlist_fptr =
1352					vtophys(&ld->tl_rx_list[i + 1]);
1353		}
1354	}
1355
1356	cd->tl_rx_head = &cd->tl_rx_chain[0];
1357	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1358
1359	return(0);
1360}
1361
1362static int
1363tl_newbuf(sc, c)
1364	struct tl_softc		*sc;
1365	struct tl_chain_onefrag	*c;
1366{
1367	struct mbuf		*m_new = NULL;
1368
1369	m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1370	if (m_new == NULL)
1371		return(ENOBUFS);
1372
1373	c->tl_mbuf = m_new;
1374	c->tl_next = NULL;
1375	c->tl_ptr->tlist_frsize = MCLBYTES;
1376	c->tl_ptr->tlist_fptr = 0;
1377	c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1378	c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1379	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1380
1381	return(0);
1382}
1383/*
1384 * Interrupt handler for RX 'end of frame' condition (EOF). This
1385 * tells us that a full ethernet frame has been captured and we need
1386 * to handle it.
1387 *
1388 * Reception is done using 'lists' which consist of a header and a
1389 * series of 10 data count/data address pairs that point to buffers.
1390 * Initially you're supposed to create a list, populate it with pointers
1391 * to buffers, then load the physical address of the list into the
1392 * ch_parm register. The adapter is then supposed to DMA the received
1393 * frame into the buffers for you.
1394 *
1395 * To make things as fast as possible, we have the chip DMA directly
1396 * into mbufs. This saves us from having to do a buffer copy: we can
1397 * just hand the mbufs directly to ether_input(). Once the frame has
1398 * been sent on its way, the 'list' structure is assigned a new buffer
1399 * and moved to the end of the RX chain. As long we we stay ahead of
1400 * the chip, it will always think it has an endless receive channel.
1401 *
1402 * If we happen to fall behind and the chip manages to fill up all of
1403 * the buffers, it will generate an end of channel interrupt and wait
1404 * for us to empty the chain and restart the receiver.
1405 */
1406static int
1407tl_intvec_rxeof(xsc, type)
1408	void			*xsc;
1409	u_int32_t		type;
1410{
1411	struct tl_softc		*sc;
1412	int			r = 0, total_len = 0;
1413	struct ether_header	*eh;
1414	struct mbuf		*m;
1415	struct ifnet		*ifp;
1416	struct tl_chain_onefrag	*cur_rx;
1417
1418	sc = xsc;
1419	ifp = sc->tl_ifp;
1420
1421	TL_LOCK_ASSERT(sc);
1422
1423	while(sc->tl_cdata.tl_rx_head != NULL) {
1424		cur_rx = sc->tl_cdata.tl_rx_head;
1425		if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1426			break;
1427		r++;
1428		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1429		m = cur_rx->tl_mbuf;
1430		total_len = cur_rx->tl_ptr->tlist_frsize;
1431
1432		if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1433			ifp->if_ierrors++;
1434			cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1435			cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1436			cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1437			continue;
1438		}
1439
1440		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1441						vtophys(cur_rx->tl_ptr);
1442		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1443		sc->tl_cdata.tl_rx_tail = cur_rx;
1444
1445		/*
1446		 * Note: when the ThunderLAN chip is in 'capture all
1447		 * frames' mode, it will receive its own transmissions.
1448		 * We drop don't need to process our own transmissions,
1449		 * so we drop them here and continue.
1450		 */
1451		eh = mtod(m, struct ether_header *);
1452		/*if (ifp->if_flags & IFF_PROMISC && */
1453		if (!bcmp(eh->ether_shost, IF_LLADDR(sc->tl_ifp),
1454		 					ETHER_ADDR_LEN)) {
1455				m_freem(m);
1456				continue;
1457		}
1458
1459		m->m_pkthdr.rcvif = ifp;
1460		m->m_pkthdr.len = m->m_len = total_len;
1461
1462		TL_UNLOCK(sc);
1463		(*ifp->if_input)(ifp, m);
1464		TL_LOCK(sc);
1465	}
1466
1467	return(r);
1468}
1469
1470/*
1471 * The RX-EOC condition hits when the ch_parm address hasn't been
1472 * initialized or the adapter reached a list with a forward pointer
1473 * of 0 (which indicates the end of the chain). In our case, this means
1474 * the card has hit the end of the receive buffer chain and we need to
1475 * empty out the buffers and shift the pointer back to the beginning again.
1476 */
1477static int
1478tl_intvec_rxeoc(xsc, type)
1479	void			*xsc;
1480	u_int32_t		type;
1481{
1482	struct tl_softc		*sc;
1483	int			r;
1484	struct tl_chain_data	*cd;
1485
1486
1487	sc = xsc;
1488	cd = &sc->tl_cdata;
1489
1490	/* Flush out the receive queue and ack RXEOF interrupts. */
1491	r = tl_intvec_rxeof(xsc, type);
1492	CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1493	r = 1;
1494	cd->tl_rx_head = &cd->tl_rx_chain[0];
1495	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1496	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1497	r |= (TL_CMD_GO|TL_CMD_RT);
1498	return(r);
1499}
1500
1501static int
1502tl_intvec_txeof(xsc, type)
1503	void			*xsc;
1504	u_int32_t		type;
1505{
1506	struct tl_softc		*sc;
1507	int			r = 0;
1508	struct tl_chain		*cur_tx;
1509
1510	sc = xsc;
1511
1512	/*
1513	 * Go through our tx list and free mbufs for those
1514	 * frames that have been sent.
1515	 */
1516	while (sc->tl_cdata.tl_tx_head != NULL) {
1517		cur_tx = sc->tl_cdata.tl_tx_head;
1518		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1519			break;
1520		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1521
1522		r++;
1523		m_freem(cur_tx->tl_mbuf);
1524		cur_tx->tl_mbuf = NULL;
1525
1526		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1527		sc->tl_cdata.tl_tx_free = cur_tx;
1528		if (!cur_tx->tl_ptr->tlist_fptr)
1529			break;
1530	}
1531
1532	return(r);
1533}
1534
1535/*
1536 * The transmit end of channel interrupt. The adapter triggers this
1537 * interrupt to tell us it hit the end of the current transmit list.
1538 *
1539 * A note about this: it's possible for a condition to arise where
1540 * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1541 * You have to avoid this since the chip expects things to go in a
1542 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1543 * When the TXEOF handler is called, it will free all of the transmitted
1544 * frames and reset the tx_head pointer to NULL. However, a TXEOC
1545 * interrupt should be received and acknowledged before any more frames
1546 * are queued for transmission. If tl_statrt() is called after TXEOF
1547 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1548 * it could attempt to issue a transmit command prematurely.
1549 *
1550 * To guard against this, tl_start() will only issue transmit commands
1551 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1552 * can set this flag once tl_start() has cleared it.
1553 */
1554static int
1555tl_intvec_txeoc(xsc, type)
1556	void			*xsc;
1557	u_int32_t		type;
1558{
1559	struct tl_softc		*sc;
1560	struct ifnet		*ifp;
1561	u_int32_t		cmd;
1562
1563	sc = xsc;
1564	ifp = sc->tl_ifp;
1565
1566	/* Clear the timeout timer. */
1567	sc->tl_timer = 0;
1568
1569	if (sc->tl_cdata.tl_tx_head == NULL) {
1570		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1571		sc->tl_cdata.tl_tx_tail = NULL;
1572		sc->tl_txeoc = 1;
1573	} else {
1574		sc->tl_txeoc = 0;
1575		/* First we have to ack the EOC interrupt. */
1576		CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1577		/* Then load the address of the next TX list. */
1578		CSR_WRITE_4(sc, TL_CH_PARM,
1579		    vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1580		/* Restart TX channel. */
1581		cmd = CSR_READ_4(sc, TL_HOSTCMD);
1582		cmd &= ~TL_CMD_RT;
1583		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1584		CMD_PUT(sc, cmd);
1585		return(0);
1586	}
1587
1588	return(1);
1589}
1590
1591static int
1592tl_intvec_adchk(xsc, type)
1593	void			*xsc;
1594	u_int32_t		type;
1595{
1596	struct tl_softc		*sc;
1597
1598	sc = xsc;
1599
1600	if (type)
1601		device_printf(sc->tl_dev, "adapter check: %x\n",
1602			(unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1603
1604	tl_softreset(sc, 1);
1605	tl_stop(sc);
1606	tl_init_locked(sc);
1607	CMD_SET(sc, TL_CMD_INTSON);
1608
1609	return(0);
1610}
1611
1612static int
1613tl_intvec_netsts(xsc, type)
1614	void			*xsc;
1615	u_int32_t		type;
1616{
1617	struct tl_softc		*sc;
1618	u_int16_t		netsts;
1619
1620	sc = xsc;
1621
1622	netsts = tl_dio_read16(sc, TL_NETSTS);
1623	tl_dio_write16(sc, TL_NETSTS, netsts);
1624
1625	device_printf(sc->tl_dev, "network status: %x\n", netsts);
1626
1627	return(1);
1628}
1629
1630static void
1631tl_intr(xsc)
1632	void			*xsc;
1633{
1634	struct tl_softc		*sc;
1635	struct ifnet		*ifp;
1636	int			r = 0;
1637	u_int32_t		type = 0;
1638	u_int16_t		ints = 0;
1639	u_int8_t		ivec = 0;
1640
1641	sc = xsc;
1642	TL_LOCK(sc);
1643
1644	/* Disable interrupts */
1645	ints = CSR_READ_2(sc, TL_HOST_INT);
1646	CSR_WRITE_2(sc, TL_HOST_INT, ints);
1647	type = (ints << 16) & 0xFFFF0000;
1648	ivec = (ints & TL_VEC_MASK) >> 5;
1649	ints = (ints & TL_INT_MASK) >> 2;
1650
1651	ifp = sc->tl_ifp;
1652
1653	switch(ints) {
1654	case (TL_INTR_INVALID):
1655#ifdef DIAGNOSTIC
1656		device_printf(sc->tl_dev, "got an invalid interrupt!\n");
1657#endif
1658		/* Re-enable interrupts but don't ack this one. */
1659		CMD_PUT(sc, type);
1660		r = 0;
1661		break;
1662	case (TL_INTR_TXEOF):
1663		r = tl_intvec_txeof((void *)sc, type);
1664		break;
1665	case (TL_INTR_TXEOC):
1666		r = tl_intvec_txeoc((void *)sc, type);
1667		break;
1668	case (TL_INTR_STATOFLOW):
1669		tl_stats_update(sc);
1670		r = 1;
1671		break;
1672	case (TL_INTR_RXEOF):
1673		r = tl_intvec_rxeof((void *)sc, type);
1674		break;
1675	case (TL_INTR_DUMMY):
1676		device_printf(sc->tl_dev, "got a dummy interrupt\n");
1677		r = 1;
1678		break;
1679	case (TL_INTR_ADCHK):
1680		if (ivec)
1681			r = tl_intvec_adchk((void *)sc, type);
1682		else
1683			r = tl_intvec_netsts((void *)sc, type);
1684		break;
1685	case (TL_INTR_RXEOC):
1686		r = tl_intvec_rxeoc((void *)sc, type);
1687		break;
1688	default:
1689		device_printf(sc->tl_dev, "bogus interrupt type\n");
1690		break;
1691	}
1692
1693	/* Re-enable interrupts */
1694	if (r) {
1695		CMD_PUT(sc, TL_CMD_ACK | r | type);
1696	}
1697
1698	if (ifp->if_snd.ifq_head != NULL)
1699		tl_start_locked(ifp);
1700
1701	TL_UNLOCK(sc);
1702}
1703
1704static void
1705tl_stats_update(xsc)
1706	void			*xsc;
1707{
1708	struct tl_softc		*sc;
1709	struct ifnet		*ifp;
1710	struct tl_stats		tl_stats;
1711	struct mii_data		*mii;
1712	u_int32_t		*p;
1713
1714	bzero((char *)&tl_stats, sizeof(struct tl_stats));
1715
1716	sc = xsc;
1717	TL_LOCK_ASSERT(sc);
1718	ifp = sc->tl_ifp;
1719
1720	p = (u_int32_t *)&tl_stats;
1721
1722	CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1723	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1724	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1725	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1726	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1727	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1728
1729	ifp->if_opackets += tl_tx_goodframes(tl_stats);
1730	ifp->if_collisions += tl_stats.tl_tx_single_collision +
1731				tl_stats.tl_tx_multi_collision;
1732	ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1733	ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1734			    tl_rx_overrun(tl_stats);
1735	ifp->if_oerrors += tl_tx_underrun(tl_stats);
1736
1737	if (tl_tx_underrun(tl_stats)) {
1738		u_int8_t		tx_thresh;
1739		tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1740		if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1741			tx_thresh >>= 4;
1742			tx_thresh++;
1743			device_printf(sc->tl_dev, "tx underrun -- increasing "
1744			    "tx threshold to %d bytes\n",
1745			    (64 * (tx_thresh * 4)));
1746			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1747			tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1748		}
1749	}
1750
1751	if (sc->tl_timer > 0 && --sc->tl_timer == 0)
1752		tl_watchdog(sc);
1753
1754	callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
1755
1756	if (!sc->tl_bitrate) {
1757		mii = device_get_softc(sc->tl_miibus);
1758		mii_tick(mii);
1759	}
1760}
1761
1762/*
1763 * Encapsulate an mbuf chain in a list by coupling the mbuf data
1764 * pointers to the fragment pointers.
1765 */
1766static int
1767tl_encap(sc, c, m_head)
1768	struct tl_softc		*sc;
1769	struct tl_chain		*c;
1770	struct mbuf		*m_head;
1771{
1772	int			frag = 0;
1773	struct tl_frag		*f = NULL;
1774	int			total_len;
1775	struct mbuf		*m;
1776	struct ifnet		*ifp = sc->tl_ifp;
1777
1778	/*
1779 	 * Start packing the mbufs in this chain into
1780	 * the fragment pointers. Stop when we run out
1781 	 * of fragments or hit the end of the mbuf chain.
1782	 */
1783	m = m_head;
1784	total_len = 0;
1785
1786	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1787		if (m->m_len != 0) {
1788			if (frag == TL_MAXFRAGS)
1789				break;
1790			total_len+= m->m_len;
1791			c->tl_ptr->tl_frag[frag].tlist_dadr =
1792				vtophys(mtod(m, vm_offset_t));
1793			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1794			frag++;
1795		}
1796	}
1797
1798	/*
1799	 * Handle special cases.
1800	 * Special case #1: we used up all 10 fragments, but
1801	 * we have more mbufs left in the chain. Copy the
1802	 * data into an mbuf cluster. Note that we don't
1803	 * bother clearing the values in the other fragment
1804	 * pointers/counters; it wouldn't gain us anything,
1805	 * and would waste cycles.
1806	 */
1807	if (m != NULL) {
1808		struct mbuf		*m_new = NULL;
1809
1810		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1811		if (m_new == NULL) {
1812			if_printf(ifp, "no memory for tx list\n");
1813			return(1);
1814		}
1815		if (m_head->m_pkthdr.len > MHLEN) {
1816			MCLGET(m_new, M_DONTWAIT);
1817			if (!(m_new->m_flags & M_EXT)) {
1818				m_freem(m_new);
1819				if_printf(ifp, "no memory for tx list\n");
1820				return(1);
1821			}
1822		}
1823		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1824					mtod(m_new, caddr_t));
1825		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1826		m_freem(m_head);
1827		m_head = m_new;
1828		f = &c->tl_ptr->tl_frag[0];
1829		f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1830		f->tlist_dcnt = total_len = m_new->m_len;
1831		frag = 1;
1832	}
1833
1834	/*
1835	 * Special case #2: the frame is smaller than the minimum
1836	 * frame size. We have to pad it to make the chip happy.
1837	 */
1838	if (total_len < TL_MIN_FRAMELEN) {
1839		if (frag == TL_MAXFRAGS)
1840			if_printf(ifp,
1841			    "all frags filled but frame still to small!\n");
1842		f = &c->tl_ptr->tl_frag[frag];
1843		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1844		f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1845		total_len += f->tlist_dcnt;
1846		frag++;
1847	}
1848
1849	c->tl_mbuf = m_head;
1850	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1851	c->tl_ptr->tlist_frsize = total_len;
1852	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1853	c->tl_ptr->tlist_fptr = 0;
1854
1855	return(0);
1856}
1857
1858/*
1859 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1860 * to the mbuf data regions directly in the transmit lists. We also save a
1861 * copy of the pointers since the transmit list fragment pointers are
1862 * physical addresses.
1863 */
1864static void
1865tl_start(ifp)
1866	struct ifnet		*ifp;
1867{
1868	struct tl_softc		*sc;
1869
1870	sc = ifp->if_softc;
1871	TL_LOCK(sc);
1872	tl_start_locked(ifp);
1873	TL_UNLOCK(sc);
1874}
1875
1876static void
1877tl_start_locked(ifp)
1878	struct ifnet		*ifp;
1879{
1880	struct tl_softc		*sc;
1881	struct mbuf		*m_head = NULL;
1882	u_int32_t		cmd;
1883	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1884
1885	sc = ifp->if_softc;
1886	TL_LOCK_ASSERT(sc);
1887
1888	/*
1889	 * Check for an available queue slot. If there are none,
1890	 * punt.
1891	 */
1892	if (sc->tl_cdata.tl_tx_free == NULL) {
1893		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1894		return;
1895	}
1896
1897	start_tx = sc->tl_cdata.tl_tx_free;
1898
1899	while(sc->tl_cdata.tl_tx_free != NULL) {
1900		IF_DEQUEUE(&ifp->if_snd, m_head);
1901		if (m_head == NULL)
1902			break;
1903
1904		/* Pick a chain member off the free list. */
1905		cur_tx = sc->tl_cdata.tl_tx_free;
1906		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1907
1908		cur_tx->tl_next = NULL;
1909
1910		/* Pack the data into the list. */
1911		tl_encap(sc, cur_tx, m_head);
1912
1913		/* Chain it together */
1914		if (prev != NULL) {
1915			prev->tl_next = cur_tx;
1916			prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
1917		}
1918		prev = cur_tx;
1919
1920		/*
1921		 * If there's a BPF listener, bounce a copy of this frame
1922		 * to him.
1923		 */
1924		BPF_MTAP(ifp, cur_tx->tl_mbuf);
1925	}
1926
1927	/*
1928	 * If there are no packets queued, bail.
1929	 */
1930	if (cur_tx == NULL)
1931		return;
1932
1933	/*
1934	 * That's all we can stands, we can't stands no more.
1935	 * If there are no other transfers pending, then issue the
1936	 * TX GO command to the adapter to start things moving.
1937	 * Otherwise, just leave the data in the queue and let
1938	 * the EOF/EOC interrupt handler send.
1939	 */
1940	if (sc->tl_cdata.tl_tx_head == NULL) {
1941		sc->tl_cdata.tl_tx_head = start_tx;
1942		sc->tl_cdata.tl_tx_tail = cur_tx;
1943
1944		if (sc->tl_txeoc) {
1945			sc->tl_txeoc = 0;
1946			CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
1947			cmd = CSR_READ_4(sc, TL_HOSTCMD);
1948			cmd &= ~TL_CMD_RT;
1949			cmd |= TL_CMD_GO|TL_CMD_INTSON;
1950			CMD_PUT(sc, cmd);
1951		}
1952	} else {
1953		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
1954		sc->tl_cdata.tl_tx_tail = cur_tx;
1955	}
1956
1957	/*
1958	 * Set a timeout in case the chip goes out to lunch.
1959	 */
1960	sc->tl_timer = 5;
1961}
1962
1963static void
1964tl_init(xsc)
1965	void			*xsc;
1966{
1967	struct tl_softc		*sc = xsc;
1968
1969	TL_LOCK(sc);
1970	tl_init_locked(sc);
1971	TL_UNLOCK(sc);
1972}
1973
1974static void
1975tl_init_locked(sc)
1976	struct tl_softc		*sc;
1977{
1978	struct ifnet		*ifp = sc->tl_ifp;
1979	struct mii_data		*mii;
1980
1981	TL_LOCK_ASSERT(sc);
1982
1983	ifp = sc->tl_ifp;
1984
1985	/*
1986	 * Cancel pending I/O.
1987	 */
1988	tl_stop(sc);
1989
1990	/* Initialize TX FIFO threshold */
1991	tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1992	tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
1993
1994        /* Set PCI burst size */
1995	tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
1996
1997	/*
1998	 * Set 'capture all frames' bit for promiscuous mode.
1999	 */
2000	if (ifp->if_flags & IFF_PROMISC)
2001		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2002	else
2003		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2004
2005	/*
2006	 * Set capture broadcast bit to capture broadcast frames.
2007	 */
2008	if (ifp->if_flags & IFF_BROADCAST)
2009		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2010	else
2011		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2012
2013	tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
2014
2015	/* Init our MAC address */
2016	tl_setfilt(sc, IF_LLADDR(sc->tl_ifp), 0);
2017
2018	/* Init multicast filter, if needed. */
2019	tl_setmulti(sc);
2020
2021	/* Init circular RX list. */
2022	if (tl_list_rx_init(sc) == ENOBUFS) {
2023		device_printf(sc->tl_dev,
2024		    "initialization failed: no memory for rx buffers\n");
2025		tl_stop(sc);
2026		return;
2027	}
2028
2029	/* Init TX pointers. */
2030	tl_list_tx_init(sc);
2031
2032	/* Enable PCI interrupts. */
2033	CMD_SET(sc, TL_CMD_INTSON);
2034
2035	/* Load the address of the rx list */
2036	CMD_SET(sc, TL_CMD_RT);
2037	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
2038
2039	if (!sc->tl_bitrate) {
2040		if (sc->tl_miibus != NULL) {
2041			mii = device_get_softc(sc->tl_miibus);
2042			mii_mediachg(mii);
2043		}
2044	} else {
2045		tl_ifmedia_upd(ifp);
2046	}
2047
2048	/* Send the RX go command */
2049	CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
2050
2051	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2052	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2053
2054	/* Start the stats update counter */
2055	callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
2056}
2057
2058/*
2059 * Set media options.
2060 */
2061static int
2062tl_ifmedia_upd(ifp)
2063	struct ifnet		*ifp;
2064{
2065	struct tl_softc		*sc;
2066	struct mii_data		*mii = NULL;
2067
2068	sc = ifp->if_softc;
2069
2070	TL_LOCK(sc);
2071	if (sc->tl_bitrate)
2072		tl_setmode(sc, sc->ifmedia.ifm_media);
2073	else {
2074		mii = device_get_softc(sc->tl_miibus);
2075		mii_mediachg(mii);
2076	}
2077	TL_UNLOCK(sc);
2078
2079	return(0);
2080}
2081
2082/*
2083 * Report current media status.
2084 */
2085static void
2086tl_ifmedia_sts(ifp, ifmr)
2087	struct ifnet		*ifp;
2088	struct ifmediareq	*ifmr;
2089{
2090	struct tl_softc		*sc;
2091	struct mii_data		*mii;
2092
2093	sc = ifp->if_softc;
2094
2095	TL_LOCK(sc);
2096	ifmr->ifm_active = IFM_ETHER;
2097
2098	if (sc->tl_bitrate) {
2099		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
2100			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2101		else
2102			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2103		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
2104			ifmr->ifm_active |= IFM_HDX;
2105		else
2106			ifmr->ifm_active |= IFM_FDX;
2107		return;
2108	} else {
2109		mii = device_get_softc(sc->tl_miibus);
2110		mii_pollstat(mii);
2111		ifmr->ifm_active = mii->mii_media_active;
2112		ifmr->ifm_status = mii->mii_media_status;
2113	}
2114	TL_UNLOCK(sc);
2115}
2116
2117static int
2118tl_ioctl(ifp, command, data)
2119	struct ifnet		*ifp;
2120	u_long			command;
2121	caddr_t			data;
2122{
2123	struct tl_softc		*sc = ifp->if_softc;
2124	struct ifreq		*ifr = (struct ifreq *) data;
2125	int			error = 0;
2126
2127	switch(command) {
2128	case SIOCSIFFLAGS:
2129		TL_LOCK(sc);
2130		if (ifp->if_flags & IFF_UP) {
2131			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2132			    ifp->if_flags & IFF_PROMISC &&
2133			    !(sc->tl_if_flags & IFF_PROMISC)) {
2134				tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2135				tl_setmulti(sc);
2136			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2137			    !(ifp->if_flags & IFF_PROMISC) &&
2138			    sc->tl_if_flags & IFF_PROMISC) {
2139				tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2140				tl_setmulti(sc);
2141			} else
2142				tl_init_locked(sc);
2143		} else {
2144			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2145				tl_stop(sc);
2146			}
2147		}
2148		sc->tl_if_flags = ifp->if_flags;
2149		TL_UNLOCK(sc);
2150		error = 0;
2151		break;
2152	case SIOCADDMULTI:
2153	case SIOCDELMULTI:
2154		TL_LOCK(sc);
2155		tl_setmulti(sc);
2156		TL_UNLOCK(sc);
2157		error = 0;
2158		break;
2159	case SIOCSIFMEDIA:
2160	case SIOCGIFMEDIA:
2161		if (sc->tl_bitrate)
2162			error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2163		else {
2164			struct mii_data		*mii;
2165			mii = device_get_softc(sc->tl_miibus);
2166			error = ifmedia_ioctl(ifp, ifr,
2167			    &mii->mii_media, command);
2168		}
2169		break;
2170	default:
2171		error = ether_ioctl(ifp, command, data);
2172		break;
2173	}
2174
2175	return(error);
2176}
2177
2178static void
2179tl_watchdog(sc)
2180	struct tl_softc		*sc;
2181{
2182	struct ifnet		*ifp;
2183
2184	TL_LOCK_ASSERT(sc);
2185	ifp = sc->tl_ifp;
2186
2187	if_printf(ifp, "device timeout\n");
2188
2189	ifp->if_oerrors++;
2190
2191	tl_softreset(sc, 1);
2192	tl_init_locked(sc);
2193}
2194
2195/*
2196 * Stop the adapter and free any mbufs allocated to the
2197 * RX and TX lists.
2198 */
2199static void
2200tl_stop(sc)
2201	struct tl_softc		*sc;
2202{
2203	register int		i;
2204	struct ifnet		*ifp;
2205
2206	TL_LOCK_ASSERT(sc);
2207
2208	ifp = sc->tl_ifp;
2209
2210	/* Stop the stats updater. */
2211	callout_stop(&sc->tl_stat_callout);
2212
2213	/* Stop the transmitter */
2214	CMD_CLR(sc, TL_CMD_RT);
2215	CMD_SET(sc, TL_CMD_STOP);
2216	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2217
2218	/* Stop the receiver */
2219	CMD_SET(sc, TL_CMD_RT);
2220	CMD_SET(sc, TL_CMD_STOP);
2221	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2222
2223	/*
2224	 * Disable host interrupts.
2225	 */
2226	CMD_SET(sc, TL_CMD_INTSOFF);
2227
2228	/*
2229	 * Clear list pointer.
2230	 */
2231	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2232
2233	/*
2234	 * Free the RX lists.
2235	 */
2236	for (i = 0; i < TL_RX_LIST_CNT; i++) {
2237		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2238			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2239			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2240		}
2241	}
2242	bzero((char *)&sc->tl_ldata->tl_rx_list,
2243		sizeof(sc->tl_ldata->tl_rx_list));
2244
2245	/*
2246	 * Free the TX list buffers.
2247	 */
2248	for (i = 0; i < TL_TX_LIST_CNT; i++) {
2249		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2250			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2251			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2252		}
2253	}
2254	bzero((char *)&sc->tl_ldata->tl_tx_list,
2255		sizeof(sc->tl_ldata->tl_tx_list));
2256
2257	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2258}
2259
2260/*
2261 * Stop all chip I/O so that the kernel's probe routines don't
2262 * get confused by errant DMAs when rebooting.
2263 */
2264static int
2265tl_shutdown(dev)
2266	device_t		dev;
2267{
2268	struct tl_softc		*sc;
2269
2270	sc = device_get_softc(dev);
2271
2272	TL_LOCK(sc);
2273	tl_stop(sc);
2274	TL_UNLOCK(sc);
2275
2276	return (0);
2277}
2278