1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1997, 1998
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38/*
39 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
40 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
41 * the National Semiconductor DP83840A physical interface and the
42 * Microchip Technology 24Cxx series serial EEPROM.
43 *
44 * Written using the following four documents:
45 *
46 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
47 * National Semiconductor DP83840A data sheet (www.national.com)
48 * Microchip Technology 24C02C data sheet (www.microchip.com)
49 * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
50 *
51 * Written by Bill Paul <wpaul@ctr.columbia.edu>
52 * Electrical Engineering Department
53 * Columbia University, New York City
54 */
55/*
56 * Some notes about the ThunderLAN:
57 *
58 * The ThunderLAN controller is a single chip containing PCI controller
59 * logic, approximately 3K of on-board SRAM, a LAN controller, and media
60 * independent interface (MII) bus. The MII allows the ThunderLAN chip to
61 * control up to 32 different physical interfaces (PHYs). The ThunderLAN
62 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
63 * to act as a complete ethernet interface.
64 *
65 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
66 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
67 * in full or half duplex. Some of the Compaq Deskpro machines use a
68 * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
69 * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
70 * concert with the ThunderLAN's internal PHY to provide full 10/100
71 * support. This is cheaper than using a standalone external PHY for both
72 * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
73 * A serial EEPROM is also attached to the ThunderLAN chip to provide
74 * power-up default register settings and for storing the adapter's
75 * station address. Although not supported by this driver, the ThunderLAN
76 * chip can also be connected to token ring PHYs.
77 *
78 * The ThunderLAN has a set of registers which can be used to issue
79 * commands, acknowledge interrupts, and to manipulate other internal
80 * registers on its DIO bus. The primary registers can be accessed
81 * using either programmed I/O (inb/outb) or via PCI memory mapping,
82 * depending on how the card is configured during the PCI probing
83 * phase. It is even possible to have both PIO and memory mapped
84 * access turned on at the same time.
85 *
86 * Frame reception and transmission with the ThunderLAN chip is done
87 * using frame 'lists.' A list structure looks more or less like this:
88 *
89 * struct tl_frag {
90 *	u_int32_t		fragment_address;
91 *	u_int32_t		fragment_size;
92 * };
93 * struct tl_list {
94 *	u_int32_t		forward_pointer;
95 *	u_int16_t		cstat;
96 *	u_int16_t		frame_size;
97 *	struct tl_frag		fragments[10];
98 * };
99 *
100 * The forward pointer in the list header can be either a 0 or the address
101 * of another list, which allows several lists to be linked together. Each
102 * list contains up to 10 fragment descriptors. This means the chip allows
103 * ethernet frames to be broken up into up to 10 chunks for transfer to
104 * and from the SRAM. Note that the forward pointer and fragment buffer
105 * addresses are physical memory addresses, not virtual. Note also that
106 * a single ethernet frame can not span lists: if the host wants to
107 * transmit a frame and the frame data is split up over more than 10
108 * buffers, the frame has to collapsed before it can be transmitted.
109 *
110 * To receive frames, the driver sets up a number of lists and populates
111 * the fragment descriptors, then it sends an RX GO command to the chip.
112 * When a frame is received, the chip will DMA it into the memory regions
113 * specified by the fragment descriptors and then trigger an RX 'end of
114 * frame interrupt' when done. The driver may choose to use only one
115 * fragment per list; this may result is slighltly less efficient use
116 * of memory in exchange for improving performance.
117 *
118 * To transmit frames, the driver again sets up lists and fragment
119 * descriptors, only this time the buffers contain frame data that
120 * is to be DMA'ed into the chip instead of out of it. Once the chip
121 * has transferred the data into its on-board SRAM, it will trigger a
122 * TX 'end of frame' interrupt. It will also generate an 'end of channel'
123 * interrupt when it reaches the end of the list.
124 */
125/*
126 * Some notes about this driver:
127 *
128 * The ThunderLAN chip provides a couple of different ways to organize
129 * reception, transmission and interrupt handling. The simplest approach
130 * is to use one list each for transmission and reception. In this mode,
131 * the ThunderLAN will generate two interrupts for every received frame
132 * (one RX EOF and one RX EOC) and two for each transmitted frame (one
133 * TX EOF and one TX EOC). This may make the driver simpler but it hurts
134 * performance to have to handle so many interrupts.
135 *
136 * Initially I wanted to create a circular list of receive buffers so
137 * that the ThunderLAN chip would think there was an infinitely long
138 * receive channel and never deliver an RXEOC interrupt. However this
139 * doesn't work correctly under heavy load: while the manual says the
140 * chip will trigger an RXEOF interrupt each time a frame is copied into
141 * memory, you can't count on the chip waiting around for you to acknowledge
142 * the interrupt before it starts trying to DMA the next frame. The result
143 * is that the chip might traverse the entire circular list and then wrap
144 * around before you have a chance to do anything about it. Consequently,
145 * the receive list is terminated (with a 0 in the forward pointer in the
146 * last element). Each time an RXEOF interrupt arrives, the used list
147 * is shifted to the end of the list. This gives the appearance of an
148 * infinitely large RX chain so long as the driver doesn't fall behind
149 * the chip and allow all of the lists to be filled up.
150 *
151 * If all the lists are filled, the adapter will deliver an RX 'end of
152 * channel' interrupt when it hits the 0 forward pointer at the end of
153 * the chain. The RXEOC handler then cleans out the RX chain and resets
154 * the list head pointer in the ch_parm register and restarts the receiver.
155 *
156 * For frame transmission, it is possible to program the ThunderLAN's
157 * transmit interrupt threshold so that the chip can acknowledge multiple
158 * lists with only a single TX EOF interrupt. This allows the driver to
159 * queue several frames in one shot, and only have to handle a total
160 * two interrupts (one TX EOF and one TX EOC) no matter how many frames
161 * are transmitted. Frame transmission is done directly out of the
162 * mbufs passed to the tl_start() routine via the interface send queue.
163 * The driver simply sets up the fragment descriptors in the transmit
164 * lists to point to the mbuf data regions and sends a TX GO command.
165 *
166 * Note that since the RX and TX lists themselves are always used
167 * only by the driver, the are malloc()ed once at driver initialization
168 * time and never free()ed.
169 *
170 * Also, in order to remain as platform independent as possible, this
171 * driver uses memory mapped register access to manipulate the card
172 * as opposed to programmed I/O. This avoids the use of the inb/outb
173 * (and related) instructions which are specific to the i386 platform.
174 *
175 * Using these techniques, this driver achieves very high performance
176 * by minimizing the amount of interrupts generated during large
177 * transfers and by completely avoiding buffer copies. Frame transfer
178 * to and from the ThunderLAN chip is performed entirely by the chip
179 * itself thereby reducing the load on the host CPU.
180 */
181
182#include <sys/param.h>
183#include <sys/systm.h>
184#include <sys/sockio.h>
185#include <sys/mbuf.h>
186#include <sys/malloc.h>
187#include <sys/kernel.h>
188#include <sys/module.h>
189#include <sys/socket.h>
190
191#include <net/if.h>
192#include <net/if_var.h>
193#include <net/if_arp.h>
194#include <net/ethernet.h>
195#include <net/if_dl.h>
196#include <net/if_media.h>
197#include <net/if_types.h>
198
199#include <net/bpf.h>
200
201#include <vm/vm.h>              /* for vtophys */
202#include <vm/pmap.h>            /* for vtophys */
203#include <machine/bus.h>
204#include <machine/resource.h>
205#include <sys/bus.h>
206#include <sys/rman.h>
207
208#include <dev/mii/mii.h>
209#include <dev/mii/mii_bitbang.h>
210#include <dev/mii/miivar.h>
211
212#include <dev/pci/pcireg.h>
213#include <dev/pci/pcivar.h>
214
215/*
216 * Default to using PIO register access mode to pacify certain
217 * laptop docking stations with built-in ThunderLAN chips that
218 * don't seem to handle memory mapped mode properly.
219 */
220#define TL_USEIOSPACE
221
222#include <dev/tl/if_tlreg.h>
223
224MODULE_DEPEND(tl, pci, 1, 1, 1);
225MODULE_DEPEND(tl, ether, 1, 1, 1);
226MODULE_DEPEND(tl, miibus, 1, 1, 1);
227
228/* "device miibus" required.  See GENERIC if you get errors here. */
229#include "miibus_if.h"
230
231/*
232 * Various supported device vendors/types and their names.
233 */
234
235static const struct tl_type tl_devs[] = {
236	{ TI_VENDORID,	TI_DEVICEID_THUNDERLAN,
237		"Texas Instruments ThunderLAN" },
238	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
239		"Compaq Netelligent 10" },
240	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
241		"Compaq Netelligent 10/100" },
242	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
243		"Compaq Netelligent 10/100 Proliant" },
244	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
245		"Compaq Netelligent 10/100 Dual Port" },
246	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
247		"Compaq NetFlex-3/P Integrated" },
248	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
249		"Compaq NetFlex-3/P" },
250	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
251		"Compaq NetFlex 3/P w/ BNC" },
252	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED,
253		"Compaq Netelligent 10/100 TX Embedded UTP" },
254	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX,
255		"Compaq Netelligent 10 T/2 PCI UTP/Coax" },
256	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP,
257		"Compaq Netelligent 10/100 TX UTP" },
258	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2183,
259		"Olicom OC-2183/2185" },
260	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2325,
261		"Olicom OC-2325" },
262	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2326,
263		"Olicom OC-2326 10/100 TX UTP" },
264	{ 0, 0, NULL }
265};
266
267static int tl_probe(device_t);
268static int tl_attach(device_t);
269static int tl_detach(device_t);
270static int tl_intvec_rxeoc(void *, u_int32_t);
271static int tl_intvec_txeoc(void *, u_int32_t);
272static int tl_intvec_txeof(void *, u_int32_t);
273static int tl_intvec_rxeof(void *, u_int32_t);
274static int tl_intvec_adchk(void *, u_int32_t);
275static int tl_intvec_netsts(void *, u_int32_t);
276
277static int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *);
278static void tl_stats_update(void *);
279static int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *);
280
281static void tl_intr(void *);
282static void tl_start(struct ifnet *);
283static void tl_start_locked(struct ifnet *);
284static int tl_ioctl(struct ifnet *, u_long, caddr_t);
285static void tl_init(void *);
286static void tl_init_locked(struct tl_softc *);
287static void tl_stop(struct tl_softc *);
288static void tl_watchdog(struct tl_softc *);
289static int tl_shutdown(device_t);
290static int tl_ifmedia_upd(struct ifnet *);
291static void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
292
293static u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
294static u_int8_t	tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *);
295static int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
296
297static int tl_miibus_readreg(device_t, int, int);
298static int tl_miibus_writereg(device_t, int, int, int);
299static void tl_miibus_statchg(device_t);
300
301static void tl_setmode(struct tl_softc *, int);
302static uint32_t tl_mchash(const uint8_t *);
303static void tl_setmulti(struct tl_softc *);
304static void tl_setfilt(struct tl_softc *, caddr_t, int);
305static void tl_softreset(struct tl_softc *, int);
306static void tl_hardreset(device_t);
307static int tl_list_rx_init(struct tl_softc *);
308static int tl_list_tx_init(struct tl_softc *);
309
310static u_int8_t tl_dio_read8(struct tl_softc *, int);
311static u_int16_t tl_dio_read16(struct tl_softc *, int);
312static u_int32_t tl_dio_read32(struct tl_softc *, int);
313static void tl_dio_write8(struct tl_softc *, int, int);
314static void tl_dio_write16(struct tl_softc *, int, int);
315static void tl_dio_write32(struct tl_softc *, int, int);
316static void tl_dio_setbit(struct tl_softc *, int, int);
317static void tl_dio_clrbit(struct tl_softc *, int, int);
318static void tl_dio_setbit16(struct tl_softc *, int, int);
319static void tl_dio_clrbit16(struct tl_softc *, int, int);
320
321/*
322 * MII bit-bang glue
323 */
324static uint32_t tl_mii_bitbang_read(device_t);
325static void tl_mii_bitbang_write(device_t, uint32_t);
326
327static const struct mii_bitbang_ops tl_mii_bitbang_ops = {
328	tl_mii_bitbang_read,
329	tl_mii_bitbang_write,
330	{
331		TL_SIO_MDATA,	/* MII_BIT_MDO */
332		TL_SIO_MDATA,	/* MII_BIT_MDI */
333		TL_SIO_MCLK,	/* MII_BIT_MDC */
334		TL_SIO_MTXEN,	/* MII_BIT_DIR_HOST_PHY */
335		0,		/* MII_BIT_DIR_PHY_HOST */
336	}
337};
338
339#ifdef TL_USEIOSPACE
340#define TL_RES		SYS_RES_IOPORT
341#define TL_RID		TL_PCI_LOIO
342#else
343#define TL_RES		SYS_RES_MEMORY
344#define TL_RID		TL_PCI_LOMEM
345#endif
346
347static device_method_t tl_methods[] = {
348	/* Device interface */
349	DEVMETHOD(device_probe,		tl_probe),
350	DEVMETHOD(device_attach,	tl_attach),
351	DEVMETHOD(device_detach,	tl_detach),
352	DEVMETHOD(device_shutdown,	tl_shutdown),
353
354	/* MII interface */
355	DEVMETHOD(miibus_readreg,	tl_miibus_readreg),
356	DEVMETHOD(miibus_writereg,	tl_miibus_writereg),
357	DEVMETHOD(miibus_statchg,	tl_miibus_statchg),
358
359	DEVMETHOD_END
360};
361
362static driver_t tl_driver = {
363	"tl",
364	tl_methods,
365	sizeof(struct tl_softc)
366};
367
368static devclass_t tl_devclass;
369
370DRIVER_MODULE(tl, pci, tl_driver, tl_devclass, 0, 0);
371DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0);
372
373static u_int8_t tl_dio_read8(sc, reg)
374	struct tl_softc		*sc;
375	int			reg;
376{
377
378	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
379		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
380	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
381	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
382		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
383	return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
384}
385
386static u_int16_t tl_dio_read16(sc, reg)
387	struct tl_softc		*sc;
388	int			reg;
389{
390
391	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
392		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
393	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
394	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
395		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
396	return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
397}
398
399static u_int32_t tl_dio_read32(sc, reg)
400	struct tl_softc		*sc;
401	int			reg;
402{
403
404	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
405		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
406	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
407	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
408		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
409	return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
410}
411
412static void tl_dio_write8(sc, reg, val)
413	struct tl_softc		*sc;
414	int			reg;
415	int			val;
416{
417
418	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
419		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
420	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
421	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
422		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
423	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
424}
425
426static void tl_dio_write16(sc, reg, val)
427	struct tl_softc		*sc;
428	int			reg;
429	int			val;
430{
431
432	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
433		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
434	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
435	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
436		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
437	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
438}
439
440static void tl_dio_write32(sc, reg, val)
441	struct tl_softc		*sc;
442	int			reg;
443	int			val;
444{
445
446	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
447		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
448	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
449	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
450		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
451	CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
452}
453
454static void
455tl_dio_setbit(sc, reg, bit)
456	struct tl_softc		*sc;
457	int			reg;
458	int			bit;
459{
460	u_int8_t			f;
461
462	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
463		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
464	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
465	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
466		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
467	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
468	f |= bit;
469	CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 1,
470		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
471	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
472}
473
474static void
475tl_dio_clrbit(sc, reg, bit)
476	struct tl_softc		*sc;
477	int			reg;
478	int			bit;
479{
480	u_int8_t			f;
481
482	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
483		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
484	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
485	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
486		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
487	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
488	f &= ~bit;
489	CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 1,
490		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
491	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
492}
493
494static void tl_dio_setbit16(sc, reg, bit)
495	struct tl_softc		*sc;
496	int			reg;
497	int			bit;
498{
499	u_int16_t			f;
500
501	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
502		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
503	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
504	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
505		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
506	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
507	f |= bit;
508	CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 2,
509		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
510	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
511}
512
513static void tl_dio_clrbit16(sc, reg, bit)
514	struct tl_softc		*sc;
515	int			reg;
516	int			bit;
517{
518	u_int16_t			f;
519
520	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
521		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
522	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
523	CSR_BARRIER(sc, TL_DIO_ADDR, 2,
524		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
525	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
526	f &= ~bit;
527	CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 2,
528		BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
529	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
530}
531
532/*
533 * Send an instruction or address to the EEPROM, check for ACK.
534 */
535static u_int8_t tl_eeprom_putbyte(sc, byte)
536	struct tl_softc		*sc;
537	int			byte;
538{
539	int			i, ack = 0;
540
541	/*
542	 * Make sure we're in TX mode.
543	 */
544	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
545
546	/*
547	 * Feed in each bit and stobe the clock.
548	 */
549	for (i = 0x80; i; i >>= 1) {
550		if (byte & i) {
551			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
552		} else {
553			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
554		}
555		DELAY(1);
556		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
557		DELAY(1);
558		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
559	}
560
561	/*
562	 * Turn off TX mode.
563	 */
564	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
565
566	/*
567	 * Check for ack.
568	 */
569	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
570	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
571	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
572
573	return(ack);
574}
575
576/*
577 * Read a byte of data stored in the EEPROM at address 'addr.'
578 */
579static u_int8_t tl_eeprom_getbyte(sc, addr, dest)
580	struct tl_softc		*sc;
581	int			addr;
582	u_int8_t		*dest;
583{
584	int			i;
585	u_int8_t		byte = 0;
586	device_t		tl_dev = sc->tl_dev;
587
588	tl_dio_write8(sc, TL_NETSIO, 0);
589
590	EEPROM_START;
591
592	/*
593	 * Send write control code to EEPROM.
594	 */
595	if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
596		device_printf(tl_dev, "failed to send write command, status: %x\n",
597		    tl_dio_read8(sc, TL_NETSIO));
598		return(1);
599	}
600
601	/*
602	 * Send address of byte we want to read.
603	 */
604	if (tl_eeprom_putbyte(sc, addr)) {
605		device_printf(tl_dev, "failed to send address, status: %x\n",
606		    tl_dio_read8(sc, TL_NETSIO));
607		return(1);
608	}
609
610	EEPROM_STOP;
611	EEPROM_START;
612	/*
613	 * Send read control code to EEPROM.
614	 */
615	if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
616		device_printf(tl_dev, "failed to send write command, status: %x\n",
617		    tl_dio_read8(sc, TL_NETSIO));
618		return(1);
619	}
620
621	/*
622	 * Start reading bits from EEPROM.
623	 */
624	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
625	for (i = 0x80; i; i >>= 1) {
626		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
627		DELAY(1);
628		if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
629			byte |= i;
630		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
631		DELAY(1);
632	}
633
634	EEPROM_STOP;
635
636	/*
637	 * No ACK generated for read, so just return byte.
638	 */
639
640	*dest = byte;
641
642	return(0);
643}
644
645/*
646 * Read a sequence of bytes from the EEPROM.
647 */
648static int
649tl_read_eeprom(sc, dest, off, cnt)
650	struct tl_softc		*sc;
651	caddr_t			dest;
652	int			off;
653	int			cnt;
654{
655	int			err = 0, i;
656	u_int8_t		byte = 0;
657
658	for (i = 0; i < cnt; i++) {
659		err = tl_eeprom_getbyte(sc, off + i, &byte);
660		if (err)
661			break;
662		*(dest + i) = byte;
663	}
664
665	return(err ? 1 : 0);
666}
667
668#define	TL_SIO_MII	(TL_SIO_MCLK | TL_SIO_MDATA | TL_SIO_MTXEN)
669
670/*
671 * Read the MII serial port for the MII bit-bang module.
672 */
673static uint32_t
674tl_mii_bitbang_read(device_t dev)
675{
676	struct tl_softc *sc;
677	uint32_t val;
678
679	sc = device_get_softc(dev);
680
681	val = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MII;
682	CSR_BARRIER(sc, TL_NETSIO, 1,
683	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
684
685	return (val);
686}
687
688/*
689 * Write the MII serial port for the MII bit-bang module.
690 */
691static void
692tl_mii_bitbang_write(device_t dev, uint32_t val)
693{
694	struct tl_softc *sc;
695
696	sc = device_get_softc(dev);
697
698	val = (tl_dio_read8(sc, TL_NETSIO) & ~TL_SIO_MII) | val;
699	CSR_BARRIER(sc, TL_NETSIO, 1,
700	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
701	tl_dio_write8(sc, TL_NETSIO, val);
702	CSR_BARRIER(sc, TL_NETSIO, 1,
703	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
704}
705
706static int
707tl_miibus_readreg(dev, phy, reg)
708	device_t		dev;
709	int			phy, reg;
710{
711	struct tl_softc		*sc;
712	int			minten, val;
713
714	sc = device_get_softc(dev);
715
716	/*
717	 * Turn off MII interrupt by forcing MINTEN low.
718	 */
719	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
720	if (minten) {
721		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
722	}
723
724	val = mii_bitbang_readreg(dev, &tl_mii_bitbang_ops, phy, reg);
725
726	/* Reenable interrupts. */
727	if (minten) {
728		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
729	}
730
731	return (val);
732}
733
734static int
735tl_miibus_writereg(dev, phy, reg, data)
736	device_t		dev;
737	int			phy, reg, data;
738{
739	struct tl_softc		*sc;
740	int			minten;
741
742	sc = device_get_softc(dev);
743
744	/*
745	 * Turn off MII interrupt by forcing MINTEN low.
746	 */
747	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
748	if (minten) {
749		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
750	}
751
752	mii_bitbang_writereg(dev, &tl_mii_bitbang_ops, phy, reg, data);
753
754	/* Reenable interrupts. */
755	if (minten) {
756		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
757	}
758
759	return(0);
760}
761
762static void
763tl_miibus_statchg(dev)
764	device_t		dev;
765{
766	struct tl_softc		*sc;
767	struct mii_data		*mii;
768
769	sc = device_get_softc(dev);
770	mii = device_get_softc(sc->tl_miibus);
771
772	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
773		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
774	} else {
775		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
776	}
777}
778
779/*
780 * Set modes for bitrate devices.
781 */
782static void
783tl_setmode(sc, media)
784	struct tl_softc		*sc;
785	int			media;
786{
787	if (IFM_SUBTYPE(media) == IFM_10_5)
788		tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
789	if (IFM_SUBTYPE(media) == IFM_10_T) {
790		tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
791		if ((media & IFM_GMASK) == IFM_FDX) {
792			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
793			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
794		} else {
795			tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
796			tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
797		}
798	}
799}
800
801/*
802 * Calculate the hash of a MAC address for programming the multicast hash
803 * table.  This hash is simply the address split into 6-bit chunks
804 * XOR'd, e.g.
805 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
806 * bit:  765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
807 * Bytes 0-2 and 3-5 are symmetrical, so are folded together.  Then
808 * the folded 24-bit value is split into 6-bit portions and XOR'd.
809 */
810static uint32_t
811tl_mchash(addr)
812	const uint8_t *addr;
813{
814	int t;
815
816	t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
817		(addr[2] ^ addr[5]);
818	return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
819}
820
821/*
822 * The ThunderLAN has a perfect MAC address filter in addition to
823 * the multicast hash filter. The perfect filter can be programmed
824 * with up to four MAC addresses. The first one is always used to
825 * hold the station address, which leaves us free to use the other
826 * three for multicast addresses.
827 */
828static void
829tl_setfilt(sc, addr, slot)
830	struct tl_softc		*sc;
831	caddr_t			addr;
832	int			slot;
833{
834	int			i;
835	u_int16_t		regaddr;
836
837	regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
838
839	for (i = 0; i < ETHER_ADDR_LEN; i++)
840		tl_dio_write8(sc, regaddr + i, *(addr + i));
841}
842
843/*
844 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
845 * linked list. This is fine, except addresses are added from the head
846 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
847 * group to always be in the perfect filter, but as more groups are added,
848 * the 224.0.0.1 entry (which is always added first) gets pushed down
849 * the list and ends up at the tail. So after 3 or 4 multicast groups
850 * are added, the all-hosts entry gets pushed out of the perfect filter
851 * and into the hash table.
852 *
853 * Because the multicast list is a doubly-linked list as opposed to a
854 * circular queue, we don't have the ability to just grab the tail of
855 * the list and traverse it backwards. Instead, we have to traverse
856 * the list once to find the tail, then traverse it again backwards to
857 * update the multicast filter.
858 */
859static void
860tl_setmulti(sc)
861	struct tl_softc		*sc;
862{
863	struct ifnet		*ifp;
864	u_int32_t		hashes[2] = { 0, 0 };
865	int			h, i;
866	struct ifmultiaddr	*ifma;
867	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
868	ifp = sc->tl_ifp;
869
870	/* First, zot all the existing filters. */
871	for (i = 1; i < 4; i++)
872		tl_setfilt(sc, (caddr_t)&dummy, i);
873	tl_dio_write32(sc, TL_HASH1, 0);
874	tl_dio_write32(sc, TL_HASH2, 0);
875
876	/* Now program new ones. */
877	if (ifp->if_flags & IFF_ALLMULTI) {
878		hashes[0] = 0xFFFFFFFF;
879		hashes[1] = 0xFFFFFFFF;
880	} else {
881		i = 1;
882		if_maddr_rlock(ifp);
883		/* XXX want to maintain reverse semantics - pop list and re-add? */
884		CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
885			if (ifma->ifma_addr->sa_family != AF_LINK)
886				continue;
887			/*
888			 * Program the first three multicast groups
889			 * into the perfect filter. For all others,
890			 * use the hash table.
891			 */
892			if (i < 4) {
893				tl_setfilt(sc,
894			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
895				i++;
896				continue;
897			}
898
899			h = tl_mchash(
900				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
901			if (h < 32)
902				hashes[0] |= (1 << h);
903			else
904				hashes[1] |= (1 << (h - 32));
905		}
906		if_maddr_runlock(ifp);
907	}
908
909	tl_dio_write32(sc, TL_HASH1, hashes[0]);
910	tl_dio_write32(sc, TL_HASH2, hashes[1]);
911}
912
913/*
914 * This routine is recommended by the ThunderLAN manual to insure that
915 * the internal PHY is powered up correctly. It also recommends a one
916 * second pause at the end to 'wait for the clocks to start' but in my
917 * experience this isn't necessary.
918 */
919static void
920tl_hardreset(dev)
921	device_t		dev;
922{
923	int			i;
924	u_int16_t		flags;
925
926	mii_bitbang_sync(dev, &tl_mii_bitbang_ops);
927
928	flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
929
930	for (i = 0; i < MII_NPHY; i++)
931		tl_miibus_writereg(dev, i, MII_BMCR, flags);
932
933	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
934	DELAY(50000);
935	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
936	mii_bitbang_sync(dev, &tl_mii_bitbang_ops);
937	while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
938
939	DELAY(50000);
940}
941
942static void
943tl_softreset(sc, internal)
944	struct tl_softc		*sc;
945	int			internal;
946{
947        u_int32_t               cmd, dummy, i;
948
949        /* Assert the adapter reset bit. */
950	CMD_SET(sc, TL_CMD_ADRST);
951
952        /* Turn off interrupts */
953	CMD_SET(sc, TL_CMD_INTSOFF);
954
955	/* First, clear the stats registers. */
956	for (i = 0; i < 5; i++)
957		dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
958
959        /* Clear Areg and Hash registers */
960	for (i = 0; i < 8; i++)
961		tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
962
963        /*
964	 * Set up Netconfig register. Enable one channel and
965	 * one fragment mode.
966	 */
967	tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
968	if (internal && !sc->tl_bitrate) {
969		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
970	} else {
971		tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
972	}
973
974	/* Handle cards with bitrate devices. */
975	if (sc->tl_bitrate)
976		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
977
978	/*
979	 * Load adapter irq pacing timer and tx threshold.
980	 * We make the transmit threshold 1 initially but we may
981	 * change that later.
982	 */
983	cmd = CSR_READ_4(sc, TL_HOSTCMD);
984	cmd |= TL_CMD_NES;
985	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
986	CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
987	CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
988
989        /* Unreset the MII */
990	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
991
992	/* Take the adapter out of reset */
993	tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
994
995	/* Wait for things to settle down a little. */
996	DELAY(500);
997}
998
999/*
1000 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
1001 * against our list and return its name if we find a match.
1002 */
1003static int
1004tl_probe(dev)
1005	device_t		dev;
1006{
1007	const struct tl_type	*t;
1008
1009	t = tl_devs;
1010
1011	while(t->tl_name != NULL) {
1012		if ((pci_get_vendor(dev) == t->tl_vid) &&
1013		    (pci_get_device(dev) == t->tl_did)) {
1014			device_set_desc(dev, t->tl_name);
1015			return (BUS_PROBE_DEFAULT);
1016		}
1017		t++;
1018	}
1019
1020	return(ENXIO);
1021}
1022
1023static int
1024tl_attach(dev)
1025	device_t		dev;
1026{
1027	u_int16_t		did, vid;
1028	const struct tl_type	*t;
1029	struct ifnet		*ifp;
1030	struct tl_softc		*sc;
1031	int			error, flags, i, rid, unit;
1032	u_char			eaddr[6];
1033
1034	vid = pci_get_vendor(dev);
1035	did = pci_get_device(dev);
1036	sc = device_get_softc(dev);
1037	sc->tl_dev = dev;
1038	unit = device_get_unit(dev);
1039
1040	t = tl_devs;
1041	while(t->tl_name != NULL) {
1042		if (vid == t->tl_vid && did == t->tl_did)
1043			break;
1044		t++;
1045	}
1046
1047	if (t->tl_name == NULL) {
1048		device_printf(dev, "unknown device!?\n");
1049		return (ENXIO);
1050	}
1051
1052	mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1053	    MTX_DEF);
1054
1055	/*
1056	 * Map control/status registers.
1057	 */
1058	pci_enable_busmaster(dev);
1059
1060#ifdef TL_USEIOSPACE
1061
1062	rid = TL_PCI_LOIO;
1063	sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1064		RF_ACTIVE);
1065
1066	/*
1067	 * Some cards have the I/O and memory mapped address registers
1068	 * reversed. Try both combinations before giving up.
1069	 */
1070	if (sc->tl_res == NULL) {
1071		rid = TL_PCI_LOMEM;
1072		sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1073		    RF_ACTIVE);
1074	}
1075#else
1076	rid = TL_PCI_LOMEM;
1077	sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1078	    RF_ACTIVE);
1079	if (sc->tl_res == NULL) {
1080		rid = TL_PCI_LOIO;
1081		sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1082		    RF_ACTIVE);
1083	}
1084#endif
1085
1086	if (sc->tl_res == NULL) {
1087		device_printf(dev, "couldn't map ports/memory\n");
1088		error = ENXIO;
1089		goto fail;
1090	}
1091
1092#ifdef notdef
1093	/*
1094	 * The ThunderLAN manual suggests jacking the PCI latency
1095	 * timer all the way up to its maximum value. I'm not sure
1096	 * if this is really necessary, but what the manual wants,
1097	 * the manual gets.
1098	 */
1099	command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4);
1100	command |= 0x0000FF00;
1101	pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4);
1102#endif
1103
1104	/* Allocate interrupt */
1105	rid = 0;
1106	sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1107	    RF_SHAREABLE | RF_ACTIVE);
1108
1109	if (sc->tl_irq == NULL) {
1110		device_printf(dev, "couldn't map interrupt\n");
1111		error = ENXIO;
1112		goto fail;
1113	}
1114
1115	/*
1116	 * Now allocate memory for the TX and RX lists.
1117	 */
1118	sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF,
1119	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1120
1121	if (sc->tl_ldata == NULL) {
1122		device_printf(dev, "no memory for list buffers!\n");
1123		error = ENXIO;
1124		goto fail;
1125	}
1126
1127	bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1128
1129	if (vid == COMPAQ_VENDORID || vid == TI_VENDORID)
1130		sc->tl_eeaddr = TL_EEPROM_EADDR;
1131	if (vid == OLICOM_VENDORID)
1132		sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1133
1134	/* Reset the adapter. */
1135	tl_softreset(sc, 1);
1136	tl_hardreset(dev);
1137	tl_softreset(sc, 1);
1138
1139	/*
1140	 * Get station address from the EEPROM.
1141	 */
1142	if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1143		device_printf(dev, "failed to read station address\n");
1144		error = ENXIO;
1145		goto fail;
1146	}
1147
1148        /*
1149         * XXX Olicom, in its desire to be different from the
1150         * rest of the world, has done strange things with the
1151         * encoding of the station address in the EEPROM. First
1152         * of all, they store the address at offset 0xF8 rather
1153         * than at 0x83 like the ThunderLAN manual suggests.
1154         * Second, they store the address in three 16-bit words in
1155         * network byte order, as opposed to storing it sequentially
1156         * like all the other ThunderLAN cards. In order to get
1157         * the station address in a form that matches what the Olicom
1158         * diagnostic utility specifies, we have to byte-swap each
1159         * word. To make things even more confusing, neither 00:00:28
1160         * nor 00:00:24 appear in the IEEE OUI database.
1161         */
1162        if (vid == OLICOM_VENDORID) {
1163                for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1164                        u_int16_t               *p;
1165                        p = (u_int16_t *)&eaddr[i];
1166                        *p = ntohs(*p);
1167                }
1168        }
1169
1170	ifp = sc->tl_ifp = if_alloc(IFT_ETHER);
1171	if (ifp == NULL) {
1172		device_printf(dev, "can not if_alloc()\n");
1173		error = ENOSPC;
1174		goto fail;
1175	}
1176	ifp->if_softc = sc;
1177	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1178	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1179	ifp->if_ioctl = tl_ioctl;
1180	ifp->if_start = tl_start;
1181	ifp->if_init = tl_init;
1182	ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1;
1183	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1184	ifp->if_capenable |= IFCAP_VLAN_MTU;
1185	callout_init_mtx(&sc->tl_stat_callout, &sc->tl_mtx, 0);
1186
1187	/* Reset the adapter again. */
1188	tl_softreset(sc, 1);
1189	tl_hardreset(dev);
1190	tl_softreset(sc, 1);
1191
1192	/*
1193	 * Do MII setup. If no PHYs are found, then this is a
1194	 * bitrate ThunderLAN chip that only supports 10baseT
1195	 * and AUI/BNC.
1196	 * XXX mii_attach() can fail for reason different than
1197	 * no PHYs found!
1198	 */
1199	flags = 0;
1200	if (vid == COMPAQ_VENDORID) {
1201		if (did == COMPAQ_DEVICEID_NETEL_10_100_PROLIANT ||
1202		    did == COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED ||
1203		    did == COMPAQ_DEVICEID_NETFLEX_3P_BNC ||
1204		    did == COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX)
1205			flags |= MIIF_MACPRIV0;
1206		if (did == COMPAQ_DEVICEID_NETEL_10 ||
1207		    did == COMPAQ_DEVICEID_NETEL_10_100_DUAL ||
1208		    did == COMPAQ_DEVICEID_NETFLEX_3P ||
1209		    did == COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED)
1210			flags |= MIIF_MACPRIV1;
1211	} else if (vid == OLICOM_VENDORID && did == OLICOM_DEVICEID_OC2183)
1212			flags |= MIIF_MACPRIV0 | MIIF_MACPRIV1;
1213	if (mii_attach(dev, &sc->tl_miibus, ifp, tl_ifmedia_upd,
1214	    tl_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0)) {
1215		struct ifmedia		*ifm;
1216		sc->tl_bitrate = 1;
1217		ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1218		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1219		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1220		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1221		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1222		ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
1223		/* Reset again, this time setting bitrate mode. */
1224		tl_softreset(sc, 1);
1225		ifm = &sc->ifmedia;
1226		ifm->ifm_media = ifm->ifm_cur->ifm_media;
1227		tl_ifmedia_upd(ifp);
1228	}
1229
1230	/*
1231	 * Call MI attach routine.
1232	 */
1233	ether_ifattach(ifp, eaddr);
1234
1235	/* Hook interrupt last to avoid having to lock softc */
1236	error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET | INTR_MPSAFE,
1237	    NULL, tl_intr, sc, &sc->tl_intrhand);
1238
1239	if (error) {
1240		device_printf(dev, "couldn't set up irq\n");
1241		ether_ifdetach(ifp);
1242		goto fail;
1243	}
1244
1245	gone_by_fcp101_dev(dev);
1246
1247fail:
1248	if (error)
1249		tl_detach(dev);
1250
1251	return(error);
1252}
1253
1254/*
1255 * Shutdown hardware and free up resources. This can be called any
1256 * time after the mutex has been initialized. It is called in both
1257 * the error case in attach and the normal detach case so it needs
1258 * to be careful about only freeing resources that have actually been
1259 * allocated.
1260 */
1261static int
1262tl_detach(dev)
1263	device_t		dev;
1264{
1265	struct tl_softc		*sc;
1266	struct ifnet		*ifp;
1267
1268	sc = device_get_softc(dev);
1269	KASSERT(mtx_initialized(&sc->tl_mtx), ("tl mutex not initialized"));
1270	ifp = sc->tl_ifp;
1271
1272	/* These should only be active if attach succeeded */
1273	if (device_is_attached(dev)) {
1274		ether_ifdetach(ifp);
1275		TL_LOCK(sc);
1276		tl_stop(sc);
1277		TL_UNLOCK(sc);
1278		callout_drain(&sc->tl_stat_callout);
1279	}
1280	if (sc->tl_miibus)
1281		device_delete_child(dev, sc->tl_miibus);
1282	bus_generic_detach(dev);
1283
1284	if (sc->tl_ldata)
1285		contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF);
1286	if (sc->tl_bitrate)
1287		ifmedia_removeall(&sc->ifmedia);
1288
1289	if (sc->tl_intrhand)
1290		bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1291	if (sc->tl_irq)
1292		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1293	if (sc->tl_res)
1294		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1295
1296	if (ifp)
1297		if_free(ifp);
1298
1299	mtx_destroy(&sc->tl_mtx);
1300
1301	return(0);
1302}
1303
1304/*
1305 * Initialize the transmit lists.
1306 */
1307static int
1308tl_list_tx_init(sc)
1309	struct tl_softc		*sc;
1310{
1311	struct tl_chain_data	*cd;
1312	struct tl_list_data	*ld;
1313	int			i;
1314
1315	cd = &sc->tl_cdata;
1316	ld = sc->tl_ldata;
1317	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1318		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1319		if (i == (TL_TX_LIST_CNT - 1))
1320			cd->tl_tx_chain[i].tl_next = NULL;
1321		else
1322			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1323	}
1324
1325	cd->tl_tx_free = &cd->tl_tx_chain[0];
1326	cd->tl_tx_tail = cd->tl_tx_head = NULL;
1327	sc->tl_txeoc = 1;
1328
1329	return(0);
1330}
1331
1332/*
1333 * Initialize the RX lists and allocate mbufs for them.
1334 */
1335static int
1336tl_list_rx_init(sc)
1337	struct tl_softc		*sc;
1338{
1339	struct tl_chain_data		*cd;
1340	struct tl_list_data		*ld;
1341	int				i;
1342
1343	cd = &sc->tl_cdata;
1344	ld = sc->tl_ldata;
1345
1346	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1347		cd->tl_rx_chain[i].tl_ptr =
1348			(struct tl_list_onefrag *)&ld->tl_rx_list[i];
1349		if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1350			return(ENOBUFS);
1351		if (i == (TL_RX_LIST_CNT - 1)) {
1352			cd->tl_rx_chain[i].tl_next = NULL;
1353			ld->tl_rx_list[i].tlist_fptr = 0;
1354		} else {
1355			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1356			ld->tl_rx_list[i].tlist_fptr =
1357					vtophys(&ld->tl_rx_list[i + 1]);
1358		}
1359	}
1360
1361	cd->tl_rx_head = &cd->tl_rx_chain[0];
1362	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1363
1364	return(0);
1365}
1366
1367static int
1368tl_newbuf(sc, c)
1369	struct tl_softc		*sc;
1370	struct tl_chain_onefrag	*c;
1371{
1372	struct mbuf		*m_new = NULL;
1373
1374	m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1375	if (m_new == NULL)
1376		return(ENOBUFS);
1377
1378	c->tl_mbuf = m_new;
1379	c->tl_next = NULL;
1380	c->tl_ptr->tlist_frsize = MCLBYTES;
1381	c->tl_ptr->tlist_fptr = 0;
1382	c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1383	c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1384	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1385
1386	return(0);
1387}
1388/*
1389 * Interrupt handler for RX 'end of frame' condition (EOF). This
1390 * tells us that a full ethernet frame has been captured and we need
1391 * to handle it.
1392 *
1393 * Reception is done using 'lists' which consist of a header and a
1394 * series of 10 data count/data address pairs that point to buffers.
1395 * Initially you're supposed to create a list, populate it with pointers
1396 * to buffers, then load the physical address of the list into the
1397 * ch_parm register. The adapter is then supposed to DMA the received
1398 * frame into the buffers for you.
1399 *
1400 * To make things as fast as possible, we have the chip DMA directly
1401 * into mbufs. This saves us from having to do a buffer copy: we can
1402 * just hand the mbufs directly to ether_input(). Once the frame has
1403 * been sent on its way, the 'list' structure is assigned a new buffer
1404 * and moved to the end of the RX chain. As long we we stay ahead of
1405 * the chip, it will always think it has an endless receive channel.
1406 *
1407 * If we happen to fall behind and the chip manages to fill up all of
1408 * the buffers, it will generate an end of channel interrupt and wait
1409 * for us to empty the chain and restart the receiver.
1410 */
1411static int
1412tl_intvec_rxeof(xsc, type)
1413	void			*xsc;
1414	u_int32_t		type;
1415{
1416	struct tl_softc		*sc;
1417	int			r = 0, total_len = 0;
1418	struct ether_header	*eh;
1419	struct mbuf		*m;
1420	struct ifnet		*ifp;
1421	struct tl_chain_onefrag	*cur_rx;
1422
1423	sc = xsc;
1424	ifp = sc->tl_ifp;
1425
1426	TL_LOCK_ASSERT(sc);
1427
1428	while(sc->tl_cdata.tl_rx_head != NULL) {
1429		cur_rx = sc->tl_cdata.tl_rx_head;
1430		if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1431			break;
1432		r++;
1433		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1434		m = cur_rx->tl_mbuf;
1435		total_len = cur_rx->tl_ptr->tlist_frsize;
1436
1437		if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1438			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1439			cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1440			cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1441			cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1442			continue;
1443		}
1444
1445		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1446						vtophys(cur_rx->tl_ptr);
1447		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1448		sc->tl_cdata.tl_rx_tail = cur_rx;
1449
1450		/*
1451		 * Note: when the ThunderLAN chip is in 'capture all
1452		 * frames' mode, it will receive its own transmissions.
1453		 * We drop don't need to process our own transmissions,
1454		 * so we drop them here and continue.
1455		 */
1456		eh = mtod(m, struct ether_header *);
1457		/*if (ifp->if_flags & IFF_PROMISC && */
1458		if (!bcmp(eh->ether_shost, IF_LLADDR(sc->tl_ifp),
1459		 					ETHER_ADDR_LEN)) {
1460				m_freem(m);
1461				continue;
1462		}
1463
1464		m->m_pkthdr.rcvif = ifp;
1465		m->m_pkthdr.len = m->m_len = total_len;
1466
1467		TL_UNLOCK(sc);
1468		(*ifp->if_input)(ifp, m);
1469		TL_LOCK(sc);
1470	}
1471
1472	return(r);
1473}
1474
1475/*
1476 * The RX-EOC condition hits when the ch_parm address hasn't been
1477 * initialized or the adapter reached a list with a forward pointer
1478 * of 0 (which indicates the end of the chain). In our case, this means
1479 * the card has hit the end of the receive buffer chain and we need to
1480 * empty out the buffers and shift the pointer back to the beginning again.
1481 */
1482static int
1483tl_intvec_rxeoc(xsc, type)
1484	void			*xsc;
1485	u_int32_t		type;
1486{
1487	struct tl_softc		*sc;
1488	int			r;
1489	struct tl_chain_data	*cd;
1490
1491
1492	sc = xsc;
1493	cd = &sc->tl_cdata;
1494
1495	/* Flush out the receive queue and ack RXEOF interrupts. */
1496	r = tl_intvec_rxeof(xsc, type);
1497	CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1498	r = 1;
1499	cd->tl_rx_head = &cd->tl_rx_chain[0];
1500	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1501	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1502	r |= (TL_CMD_GO|TL_CMD_RT);
1503	return(r);
1504}
1505
1506static int
1507tl_intvec_txeof(xsc, type)
1508	void			*xsc;
1509	u_int32_t		type;
1510{
1511	struct tl_softc		*sc;
1512	int			r = 0;
1513	struct tl_chain		*cur_tx;
1514
1515	sc = xsc;
1516
1517	/*
1518	 * Go through our tx list and free mbufs for those
1519	 * frames that have been sent.
1520	 */
1521	while (sc->tl_cdata.tl_tx_head != NULL) {
1522		cur_tx = sc->tl_cdata.tl_tx_head;
1523		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1524			break;
1525		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1526
1527		r++;
1528		m_freem(cur_tx->tl_mbuf);
1529		cur_tx->tl_mbuf = NULL;
1530
1531		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1532		sc->tl_cdata.tl_tx_free = cur_tx;
1533		if (!cur_tx->tl_ptr->tlist_fptr)
1534			break;
1535	}
1536
1537	return(r);
1538}
1539
1540/*
1541 * The transmit end of channel interrupt. The adapter triggers this
1542 * interrupt to tell us it hit the end of the current transmit list.
1543 *
1544 * A note about this: it's possible for a condition to arise where
1545 * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1546 * You have to avoid this since the chip expects things to go in a
1547 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1548 * When the TXEOF handler is called, it will free all of the transmitted
1549 * frames and reset the tx_head pointer to NULL. However, a TXEOC
1550 * interrupt should be received and acknowledged before any more frames
1551 * are queued for transmission. If tl_statrt() is called after TXEOF
1552 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1553 * it could attempt to issue a transmit command prematurely.
1554 *
1555 * To guard against this, tl_start() will only issue transmit commands
1556 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1557 * can set this flag once tl_start() has cleared it.
1558 */
1559static int
1560tl_intvec_txeoc(xsc, type)
1561	void			*xsc;
1562	u_int32_t		type;
1563{
1564	struct tl_softc		*sc;
1565	struct ifnet		*ifp;
1566	u_int32_t		cmd;
1567
1568	sc = xsc;
1569	ifp = sc->tl_ifp;
1570
1571	/* Clear the timeout timer. */
1572	sc->tl_timer = 0;
1573
1574	if (sc->tl_cdata.tl_tx_head == NULL) {
1575		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1576		sc->tl_cdata.tl_tx_tail = NULL;
1577		sc->tl_txeoc = 1;
1578	} else {
1579		sc->tl_txeoc = 0;
1580		/* First we have to ack the EOC interrupt. */
1581		CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1582		/* Then load the address of the next TX list. */
1583		CSR_WRITE_4(sc, TL_CH_PARM,
1584		    vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1585		/* Restart TX channel. */
1586		cmd = CSR_READ_4(sc, TL_HOSTCMD);
1587		cmd &= ~TL_CMD_RT;
1588		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1589		CMD_PUT(sc, cmd);
1590		return(0);
1591	}
1592
1593	return(1);
1594}
1595
1596static int
1597tl_intvec_adchk(xsc, type)
1598	void			*xsc;
1599	u_int32_t		type;
1600{
1601	struct tl_softc		*sc;
1602
1603	sc = xsc;
1604
1605	if (type)
1606		device_printf(sc->tl_dev, "adapter check: %x\n",
1607			(unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1608
1609	tl_softreset(sc, 1);
1610	tl_stop(sc);
1611	tl_init_locked(sc);
1612	CMD_SET(sc, TL_CMD_INTSON);
1613
1614	return(0);
1615}
1616
1617static int
1618tl_intvec_netsts(xsc, type)
1619	void			*xsc;
1620	u_int32_t		type;
1621{
1622	struct tl_softc		*sc;
1623	u_int16_t		netsts;
1624
1625	sc = xsc;
1626
1627	netsts = tl_dio_read16(sc, TL_NETSTS);
1628	tl_dio_write16(sc, TL_NETSTS, netsts);
1629
1630	device_printf(sc->tl_dev, "network status: %x\n", netsts);
1631
1632	return(1);
1633}
1634
1635static void
1636tl_intr(xsc)
1637	void			*xsc;
1638{
1639	struct tl_softc		*sc;
1640	struct ifnet		*ifp;
1641	int			r = 0;
1642	u_int32_t		type = 0;
1643	u_int16_t		ints = 0;
1644	u_int8_t		ivec = 0;
1645
1646	sc = xsc;
1647	TL_LOCK(sc);
1648
1649	/* Disable interrupts */
1650	ints = CSR_READ_2(sc, TL_HOST_INT);
1651	CSR_WRITE_2(sc, TL_HOST_INT, ints);
1652	type = (ints << 16) & 0xFFFF0000;
1653	ivec = (ints & TL_VEC_MASK) >> 5;
1654	ints = (ints & TL_INT_MASK) >> 2;
1655
1656	ifp = sc->tl_ifp;
1657
1658	switch(ints) {
1659	case (TL_INTR_INVALID):
1660#ifdef DIAGNOSTIC
1661		device_printf(sc->tl_dev, "got an invalid interrupt!\n");
1662#endif
1663		/* Re-enable interrupts but don't ack this one. */
1664		CMD_PUT(sc, type);
1665		r = 0;
1666		break;
1667	case (TL_INTR_TXEOF):
1668		r = tl_intvec_txeof((void *)sc, type);
1669		break;
1670	case (TL_INTR_TXEOC):
1671		r = tl_intvec_txeoc((void *)sc, type);
1672		break;
1673	case (TL_INTR_STATOFLOW):
1674		tl_stats_update(sc);
1675		r = 1;
1676		break;
1677	case (TL_INTR_RXEOF):
1678		r = tl_intvec_rxeof((void *)sc, type);
1679		break;
1680	case (TL_INTR_DUMMY):
1681		device_printf(sc->tl_dev, "got a dummy interrupt\n");
1682		r = 1;
1683		break;
1684	case (TL_INTR_ADCHK):
1685		if (ivec)
1686			r = tl_intvec_adchk((void *)sc, type);
1687		else
1688			r = tl_intvec_netsts((void *)sc, type);
1689		break;
1690	case (TL_INTR_RXEOC):
1691		r = tl_intvec_rxeoc((void *)sc, type);
1692		break;
1693	default:
1694		device_printf(sc->tl_dev, "bogus interrupt type\n");
1695		break;
1696	}
1697
1698	/* Re-enable interrupts */
1699	if (r) {
1700		CMD_PUT(sc, TL_CMD_ACK | r | type);
1701	}
1702
1703	if (ifp->if_snd.ifq_head != NULL)
1704		tl_start_locked(ifp);
1705
1706	TL_UNLOCK(sc);
1707}
1708
1709static void
1710tl_stats_update(xsc)
1711	void			*xsc;
1712{
1713	struct tl_softc		*sc;
1714	struct ifnet		*ifp;
1715	struct tl_stats		tl_stats;
1716	struct mii_data		*mii;
1717	u_int32_t		*p;
1718
1719	bzero((char *)&tl_stats, sizeof(struct tl_stats));
1720
1721	sc = xsc;
1722	TL_LOCK_ASSERT(sc);
1723	ifp = sc->tl_ifp;
1724
1725	p = (u_int32_t *)&tl_stats;
1726
1727	CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1728	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1729	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1730	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1731	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1732	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1733
1734	if_inc_counter(ifp, IFCOUNTER_OPACKETS, tl_tx_goodframes(tl_stats));
1735	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1736	    tl_stats.tl_tx_single_collision + tl_stats.tl_tx_multi_collision);
1737	if_inc_counter(ifp, IFCOUNTER_IPACKETS, tl_rx_goodframes(tl_stats));
1738	if_inc_counter(ifp, IFCOUNTER_IERRORS, tl_stats.tl_crc_errors +
1739	    tl_stats.tl_code_errors + tl_rx_overrun(tl_stats));
1740	if_inc_counter(ifp, IFCOUNTER_OERRORS, tl_tx_underrun(tl_stats));
1741
1742	if (tl_tx_underrun(tl_stats)) {
1743		u_int8_t		tx_thresh;
1744		tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1745		if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1746			tx_thresh >>= 4;
1747			tx_thresh++;
1748			device_printf(sc->tl_dev, "tx underrun -- increasing "
1749			    "tx threshold to %d bytes\n",
1750			    (64 * (tx_thresh * 4)));
1751			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1752			tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1753		}
1754	}
1755
1756	if (sc->tl_timer > 0 && --sc->tl_timer == 0)
1757		tl_watchdog(sc);
1758
1759	callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
1760
1761	if (!sc->tl_bitrate) {
1762		mii = device_get_softc(sc->tl_miibus);
1763		mii_tick(mii);
1764	}
1765}
1766
1767/*
1768 * Encapsulate an mbuf chain in a list by coupling the mbuf data
1769 * pointers to the fragment pointers.
1770 */
1771static int
1772tl_encap(sc, c, m_head)
1773	struct tl_softc		*sc;
1774	struct tl_chain		*c;
1775	struct mbuf		*m_head;
1776{
1777	int			frag = 0;
1778	struct tl_frag		*f = NULL;
1779	int			total_len;
1780	struct mbuf		*m;
1781	struct ifnet		*ifp = sc->tl_ifp;
1782
1783	/*
1784 	 * Start packing the mbufs in this chain into
1785	 * the fragment pointers. Stop when we run out
1786 	 * of fragments or hit the end of the mbuf chain.
1787	 */
1788	m = m_head;
1789	total_len = 0;
1790
1791	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1792		if (m->m_len != 0) {
1793			if (frag == TL_MAXFRAGS)
1794				break;
1795			total_len+= m->m_len;
1796			c->tl_ptr->tl_frag[frag].tlist_dadr =
1797				vtophys(mtod(m, vm_offset_t));
1798			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1799			frag++;
1800		}
1801	}
1802
1803	/*
1804	 * Handle special cases.
1805	 * Special case #1: we used up all 10 fragments, but
1806	 * we have more mbufs left in the chain. Copy the
1807	 * data into an mbuf cluster. Note that we don't
1808	 * bother clearing the values in the other fragment
1809	 * pointers/counters; it wouldn't gain us anything,
1810	 * and would waste cycles.
1811	 */
1812	if (m != NULL) {
1813		struct mbuf		*m_new = NULL;
1814
1815		MGETHDR(m_new, M_NOWAIT, MT_DATA);
1816		if (m_new == NULL) {
1817			if_printf(ifp, "no memory for tx list\n");
1818			return(1);
1819		}
1820		if (m_head->m_pkthdr.len > MHLEN) {
1821			if (!(MCLGET(m_new, M_NOWAIT))) {
1822				m_freem(m_new);
1823				if_printf(ifp, "no memory for tx list\n");
1824				return(1);
1825			}
1826		}
1827		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1828					mtod(m_new, caddr_t));
1829		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1830		m_freem(m_head);
1831		m_head = m_new;
1832		f = &c->tl_ptr->tl_frag[0];
1833		f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1834		f->tlist_dcnt = total_len = m_new->m_len;
1835		frag = 1;
1836	}
1837
1838	/*
1839	 * Special case #2: the frame is smaller than the minimum
1840	 * frame size. We have to pad it to make the chip happy.
1841	 */
1842	if (total_len < TL_MIN_FRAMELEN) {
1843		if (frag == TL_MAXFRAGS)
1844			if_printf(ifp,
1845			    "all frags filled but frame still to small!\n");
1846		f = &c->tl_ptr->tl_frag[frag];
1847		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1848		f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1849		total_len += f->tlist_dcnt;
1850		frag++;
1851	}
1852
1853	c->tl_mbuf = m_head;
1854	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1855	c->tl_ptr->tlist_frsize = total_len;
1856	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1857	c->tl_ptr->tlist_fptr = 0;
1858
1859	return(0);
1860}
1861
1862/*
1863 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1864 * to the mbuf data regions directly in the transmit lists. We also save a
1865 * copy of the pointers since the transmit list fragment pointers are
1866 * physical addresses.
1867 */
1868static void
1869tl_start(ifp)
1870	struct ifnet		*ifp;
1871{
1872	struct tl_softc		*sc;
1873
1874	sc = ifp->if_softc;
1875	TL_LOCK(sc);
1876	tl_start_locked(ifp);
1877	TL_UNLOCK(sc);
1878}
1879
1880static void
1881tl_start_locked(ifp)
1882	struct ifnet		*ifp;
1883{
1884	struct tl_softc		*sc;
1885	struct mbuf		*m_head = NULL;
1886	u_int32_t		cmd;
1887	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1888
1889	sc = ifp->if_softc;
1890	TL_LOCK_ASSERT(sc);
1891
1892	/*
1893	 * Check for an available queue slot. If there are none,
1894	 * punt.
1895	 */
1896	if (sc->tl_cdata.tl_tx_free == NULL) {
1897		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1898		return;
1899	}
1900
1901	start_tx = sc->tl_cdata.tl_tx_free;
1902
1903	while(sc->tl_cdata.tl_tx_free != NULL) {
1904		IF_DEQUEUE(&ifp->if_snd, m_head);
1905		if (m_head == NULL)
1906			break;
1907
1908		/* Pick a chain member off the free list. */
1909		cur_tx = sc->tl_cdata.tl_tx_free;
1910		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1911
1912		cur_tx->tl_next = NULL;
1913
1914		/* Pack the data into the list. */
1915		tl_encap(sc, cur_tx, m_head);
1916
1917		/* Chain it together */
1918		if (prev != NULL) {
1919			prev->tl_next = cur_tx;
1920			prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
1921		}
1922		prev = cur_tx;
1923
1924		/*
1925		 * If there's a BPF listener, bounce a copy of this frame
1926		 * to him.
1927		 */
1928		BPF_MTAP(ifp, cur_tx->tl_mbuf);
1929	}
1930
1931	/*
1932	 * If there are no packets queued, bail.
1933	 */
1934	if (cur_tx == NULL)
1935		return;
1936
1937	/*
1938	 * That's all we can stands, we can't stands no more.
1939	 * If there are no other transfers pending, then issue the
1940	 * TX GO command to the adapter to start things moving.
1941	 * Otherwise, just leave the data in the queue and let
1942	 * the EOF/EOC interrupt handler send.
1943	 */
1944	if (sc->tl_cdata.tl_tx_head == NULL) {
1945		sc->tl_cdata.tl_tx_head = start_tx;
1946		sc->tl_cdata.tl_tx_tail = cur_tx;
1947
1948		if (sc->tl_txeoc) {
1949			sc->tl_txeoc = 0;
1950			CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
1951			cmd = CSR_READ_4(sc, TL_HOSTCMD);
1952			cmd &= ~TL_CMD_RT;
1953			cmd |= TL_CMD_GO|TL_CMD_INTSON;
1954			CMD_PUT(sc, cmd);
1955		}
1956	} else {
1957		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
1958		sc->tl_cdata.tl_tx_tail = cur_tx;
1959	}
1960
1961	/*
1962	 * Set a timeout in case the chip goes out to lunch.
1963	 */
1964	sc->tl_timer = 5;
1965}
1966
1967static void
1968tl_init(xsc)
1969	void			*xsc;
1970{
1971	struct tl_softc		*sc = xsc;
1972
1973	TL_LOCK(sc);
1974	tl_init_locked(sc);
1975	TL_UNLOCK(sc);
1976}
1977
1978static void
1979tl_init_locked(sc)
1980	struct tl_softc		*sc;
1981{
1982	struct ifnet		*ifp = sc->tl_ifp;
1983	struct mii_data		*mii;
1984
1985	TL_LOCK_ASSERT(sc);
1986
1987	ifp = sc->tl_ifp;
1988
1989	/*
1990	 * Cancel pending I/O.
1991	 */
1992	tl_stop(sc);
1993
1994	/* Initialize TX FIFO threshold */
1995	tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1996	tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
1997
1998        /* Set PCI burst size */
1999	tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
2000
2001	/*
2002	 * Set 'capture all frames' bit for promiscuous mode.
2003	 */
2004	if (ifp->if_flags & IFF_PROMISC)
2005		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2006	else
2007		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2008
2009	/*
2010	 * Set capture broadcast bit to capture broadcast frames.
2011	 */
2012	if (ifp->if_flags & IFF_BROADCAST)
2013		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2014	else
2015		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2016
2017	tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
2018
2019	/* Init our MAC address */
2020	tl_setfilt(sc, IF_LLADDR(sc->tl_ifp), 0);
2021
2022	/* Init multicast filter, if needed. */
2023	tl_setmulti(sc);
2024
2025	/* Init circular RX list. */
2026	if (tl_list_rx_init(sc) == ENOBUFS) {
2027		device_printf(sc->tl_dev,
2028		    "initialization failed: no memory for rx buffers\n");
2029		tl_stop(sc);
2030		return;
2031	}
2032
2033	/* Init TX pointers. */
2034	tl_list_tx_init(sc);
2035
2036	/* Enable PCI interrupts. */
2037	CMD_SET(sc, TL_CMD_INTSON);
2038
2039	/* Load the address of the rx list */
2040	CMD_SET(sc, TL_CMD_RT);
2041	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
2042
2043	if (!sc->tl_bitrate) {
2044		if (sc->tl_miibus != NULL) {
2045			mii = device_get_softc(sc->tl_miibus);
2046			mii_mediachg(mii);
2047		}
2048	} else {
2049		tl_ifmedia_upd(ifp);
2050	}
2051
2052	/* Send the RX go command */
2053	CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
2054
2055	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2056	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2057
2058	/* Start the stats update counter */
2059	callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
2060}
2061
2062/*
2063 * Set media options.
2064 */
2065static int
2066tl_ifmedia_upd(ifp)
2067	struct ifnet		*ifp;
2068{
2069	struct tl_softc		*sc;
2070	struct mii_data		*mii = NULL;
2071
2072	sc = ifp->if_softc;
2073
2074	TL_LOCK(sc);
2075	if (sc->tl_bitrate)
2076		tl_setmode(sc, sc->ifmedia.ifm_media);
2077	else {
2078		mii = device_get_softc(sc->tl_miibus);
2079		mii_mediachg(mii);
2080	}
2081	TL_UNLOCK(sc);
2082
2083	return(0);
2084}
2085
2086/*
2087 * Report current media status.
2088 */
2089static void
2090tl_ifmedia_sts(ifp, ifmr)
2091	struct ifnet		*ifp;
2092	struct ifmediareq	*ifmr;
2093{
2094	struct tl_softc		*sc;
2095	struct mii_data		*mii;
2096
2097	sc = ifp->if_softc;
2098
2099	TL_LOCK(sc);
2100	ifmr->ifm_active = IFM_ETHER;
2101
2102	if (sc->tl_bitrate) {
2103		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
2104			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2105		else
2106			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2107		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
2108			ifmr->ifm_active |= IFM_HDX;
2109		else
2110			ifmr->ifm_active |= IFM_FDX;
2111		return;
2112	} else {
2113		mii = device_get_softc(sc->tl_miibus);
2114		mii_pollstat(mii);
2115		ifmr->ifm_active = mii->mii_media_active;
2116		ifmr->ifm_status = mii->mii_media_status;
2117	}
2118	TL_UNLOCK(sc);
2119}
2120
2121static int
2122tl_ioctl(ifp, command, data)
2123	struct ifnet		*ifp;
2124	u_long			command;
2125	caddr_t			data;
2126{
2127	struct tl_softc		*sc = ifp->if_softc;
2128	struct ifreq		*ifr = (struct ifreq *) data;
2129	int			error = 0;
2130
2131	switch(command) {
2132	case SIOCSIFFLAGS:
2133		TL_LOCK(sc);
2134		if (ifp->if_flags & IFF_UP) {
2135			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2136			    ifp->if_flags & IFF_PROMISC &&
2137			    !(sc->tl_if_flags & IFF_PROMISC)) {
2138				tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2139				tl_setmulti(sc);
2140			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2141			    !(ifp->if_flags & IFF_PROMISC) &&
2142			    sc->tl_if_flags & IFF_PROMISC) {
2143				tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2144				tl_setmulti(sc);
2145			} else
2146				tl_init_locked(sc);
2147		} else {
2148			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2149				tl_stop(sc);
2150			}
2151		}
2152		sc->tl_if_flags = ifp->if_flags;
2153		TL_UNLOCK(sc);
2154		error = 0;
2155		break;
2156	case SIOCADDMULTI:
2157	case SIOCDELMULTI:
2158		TL_LOCK(sc);
2159		tl_setmulti(sc);
2160		TL_UNLOCK(sc);
2161		error = 0;
2162		break;
2163	case SIOCSIFMEDIA:
2164	case SIOCGIFMEDIA:
2165		if (sc->tl_bitrate)
2166			error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2167		else {
2168			struct mii_data		*mii;
2169			mii = device_get_softc(sc->tl_miibus);
2170			error = ifmedia_ioctl(ifp, ifr,
2171			    &mii->mii_media, command);
2172		}
2173		break;
2174	default:
2175		error = ether_ioctl(ifp, command, data);
2176		break;
2177	}
2178
2179	return(error);
2180}
2181
2182static void
2183tl_watchdog(sc)
2184	struct tl_softc		*sc;
2185{
2186	struct ifnet		*ifp;
2187
2188	TL_LOCK_ASSERT(sc);
2189	ifp = sc->tl_ifp;
2190
2191	if_printf(ifp, "device timeout\n");
2192
2193	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2194
2195	tl_softreset(sc, 1);
2196	tl_init_locked(sc);
2197}
2198
2199/*
2200 * Stop the adapter and free any mbufs allocated to the
2201 * RX and TX lists.
2202 */
2203static void
2204tl_stop(sc)
2205	struct tl_softc		*sc;
2206{
2207	int			i;
2208	struct ifnet		*ifp;
2209
2210	TL_LOCK_ASSERT(sc);
2211
2212	ifp = sc->tl_ifp;
2213
2214	/* Stop the stats updater. */
2215	callout_stop(&sc->tl_stat_callout);
2216
2217	/* Stop the transmitter */
2218	CMD_CLR(sc, TL_CMD_RT);
2219	CMD_SET(sc, TL_CMD_STOP);
2220	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2221
2222	/* Stop the receiver */
2223	CMD_SET(sc, TL_CMD_RT);
2224	CMD_SET(sc, TL_CMD_STOP);
2225	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2226
2227	/*
2228	 * Disable host interrupts.
2229	 */
2230	CMD_SET(sc, TL_CMD_INTSOFF);
2231
2232	/*
2233	 * Clear list pointer.
2234	 */
2235	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2236
2237	/*
2238	 * Free the RX lists.
2239	 */
2240	for (i = 0; i < TL_RX_LIST_CNT; i++) {
2241		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2242			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2243			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2244		}
2245	}
2246	bzero((char *)&sc->tl_ldata->tl_rx_list,
2247		sizeof(sc->tl_ldata->tl_rx_list));
2248
2249	/*
2250	 * Free the TX list buffers.
2251	 */
2252	for (i = 0; i < TL_TX_LIST_CNT; i++) {
2253		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2254			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2255			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2256		}
2257	}
2258	bzero((char *)&sc->tl_ldata->tl_tx_list,
2259		sizeof(sc->tl_ldata->tl_tx_list));
2260
2261	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2262}
2263
2264/*
2265 * Stop all chip I/O so that the kernel's probe routines don't
2266 * get confused by errant DMAs when rebooting.
2267 */
2268static int
2269tl_shutdown(dev)
2270	device_t		dev;
2271{
2272	struct tl_softc		*sc;
2273
2274	sc = device_get_softc(dev);
2275
2276	TL_LOCK(sc);
2277	tl_stop(sc);
2278	TL_UNLOCK(sc);
2279
2280	return (0);
2281}
2282