if_tl.c revision 147256
1/*-
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/pci/if_tl.c 147256 2005-06-10 16:49:24Z brooks $");
35
36/*
37 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
38 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
39 * the National Semiconductor DP83840A physical interface and the
40 * Microchip Technology 24Cxx series serial EEPROM.
41 *
42 * Written using the following four documents:
43 *
44 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
45 * National Semiconductor DP83840A data sheet (www.national.com)
46 * Microchip Technology 24C02C data sheet (www.microchip.com)
47 * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
48 *
49 * Written by Bill Paul <wpaul@ctr.columbia.edu>
50 * Electrical Engineering Department
51 * Columbia University, New York City
52 */
53/*
54 * Some notes about the ThunderLAN:
55 *
56 * The ThunderLAN controller is a single chip containing PCI controller
57 * logic, approximately 3K of on-board SRAM, a LAN controller, and media
58 * independent interface (MII) bus. The MII allows the ThunderLAN chip to
59 * control up to 32 different physical interfaces (PHYs). The ThunderLAN
60 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
61 * to act as a complete ethernet interface.
62 *
63 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
64 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
65 * in full or half duplex. Some of the Compaq Deskpro machines use a
66 * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
67 * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
68 * concert with the ThunderLAN's internal PHY to provide full 10/100
69 * support. This is cheaper than using a standalone external PHY for both
70 * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
71 * A serial EEPROM is also attached to the ThunderLAN chip to provide
72 * power-up default register settings and for storing the adapter's
73 * station address. Although not supported by this driver, the ThunderLAN
74 * chip can also be connected to token ring PHYs.
75 *
76 * The ThunderLAN has a set of registers which can be used to issue
77 * commands, acknowledge interrupts, and to manipulate other internal
78 * registers on its DIO bus. The primary registers can be accessed
79 * using either programmed I/O (inb/outb) or via PCI memory mapping,
80 * depending on how the card is configured during the PCI probing
81 * phase. It is even possible to have both PIO and memory mapped
82 * access turned on at the same time.
83 *
84 * Frame reception and transmission with the ThunderLAN chip is done
85 * using frame 'lists.' A list structure looks more or less like this:
86 *
87 * struct tl_frag {
88 *	u_int32_t		fragment_address;
89 *	u_int32_t		fragment_size;
90 * };
91 * struct tl_list {
92 *	u_int32_t		forward_pointer;
93 *	u_int16_t		cstat;
94 *	u_int16_t		frame_size;
95 *	struct tl_frag		fragments[10];
96 * };
97 *
98 * The forward pointer in the list header can be either a 0 or the address
99 * of another list, which allows several lists to be linked together. Each
100 * list contains up to 10 fragment descriptors. This means the chip allows
101 * ethernet frames to be broken up into up to 10 chunks for transfer to
102 * and from the SRAM. Note that the forward pointer and fragment buffer
103 * addresses are physical memory addresses, not virtual. Note also that
104 * a single ethernet frame can not span lists: if the host wants to
105 * transmit a frame and the frame data is split up over more than 10
106 * buffers, the frame has to collapsed before it can be transmitted.
107 *
108 * To receive frames, the driver sets up a number of lists and populates
109 * the fragment descriptors, then it sends an RX GO command to the chip.
110 * When a frame is received, the chip will DMA it into the memory regions
111 * specified by the fragment descriptors and then trigger an RX 'end of
112 * frame interrupt' when done. The driver may choose to use only one
113 * fragment per list; this may result is slighltly less efficient use
114 * of memory in exchange for improving performance.
115 *
116 * To transmit frames, the driver again sets up lists and fragment
117 * descriptors, only this time the buffers contain frame data that
118 * is to be DMA'ed into the chip instead of out of it. Once the chip
119 * has transfered the data into its on-board SRAM, it will trigger a
120 * TX 'end of frame' interrupt. It will also generate an 'end of channel'
121 * interrupt when it reaches the end of the list.
122 */
123/*
124 * Some notes about this driver:
125 *
126 * The ThunderLAN chip provides a couple of different ways to organize
127 * reception, transmission and interrupt handling. The simplest approach
128 * is to use one list each for transmission and reception. In this mode,
129 * the ThunderLAN will generate two interrupts for every received frame
130 * (one RX EOF and one RX EOC) and two for each transmitted frame (one
131 * TX EOF and one TX EOC). This may make the driver simpler but it hurts
132 * performance to have to handle so many interrupts.
133 *
134 * Initially I wanted to create a circular list of receive buffers so
135 * that the ThunderLAN chip would think there was an infinitely long
136 * receive channel and never deliver an RXEOC interrupt. However this
137 * doesn't work correctly under heavy load: while the manual says the
138 * chip will trigger an RXEOF interrupt each time a frame is copied into
139 * memory, you can't count on the chip waiting around for you to acknowledge
140 * the interrupt before it starts trying to DMA the next frame. The result
141 * is that the chip might traverse the entire circular list and then wrap
142 * around before you have a chance to do anything about it. Consequently,
143 * the receive list is terminated (with a 0 in the forward pointer in the
144 * last element). Each time an RXEOF interrupt arrives, the used list
145 * is shifted to the end of the list. This gives the appearance of an
146 * infinitely large RX chain so long as the driver doesn't fall behind
147 * the chip and allow all of the lists to be filled up.
148 *
149 * If all the lists are filled, the adapter will deliver an RX 'end of
150 * channel' interrupt when it hits the 0 forward pointer at the end of
151 * the chain. The RXEOC handler then cleans out the RX chain and resets
152 * the list head pointer in the ch_parm register and restarts the receiver.
153 *
154 * For frame transmission, it is possible to program the ThunderLAN's
155 * transmit interrupt threshold so that the chip can acknowledge multiple
156 * lists with only a single TX EOF interrupt. This allows the driver to
157 * queue several frames in one shot, and only have to handle a total
158 * two interrupts (one TX EOF and one TX EOC) no matter how many frames
159 * are transmitted. Frame transmission is done directly out of the
160 * mbufs passed to the tl_start() routine via the interface send queue.
161 * The driver simply sets up the fragment descriptors in the transmit
162 * lists to point to the mbuf data regions and sends a TX GO command.
163 *
164 * Note that since the RX and TX lists themselves are always used
165 * only by the driver, the are malloc()ed once at driver initialization
166 * time and never free()ed.
167 *
168 * Also, in order to remain as platform independent as possible, this
169 * driver uses memory mapped register access to manipulate the card
170 * as opposed to programmed I/O. This avoids the use of the inb/outb
171 * (and related) instructions which are specific to the i386 platform.
172 *
173 * Using these techniques, this driver achieves very high performance
174 * by minimizing the amount of interrupts generated during large
175 * transfers and by completely avoiding buffer copies. Frame transfer
176 * to and from the ThunderLAN chip is performed entirely by the chip
177 * itself thereby reducing the load on the host CPU.
178 */
179
180#include <sys/param.h>
181#include <sys/systm.h>
182#include <sys/sockio.h>
183#include <sys/mbuf.h>
184#include <sys/malloc.h>
185#include <sys/kernel.h>
186#include <sys/module.h>
187#include <sys/socket.h>
188
189#include <net/if.h>
190#include <net/if_arp.h>
191#include <net/ethernet.h>
192#include <net/if_dl.h>
193#include <net/if_media.h>
194#include <net/if_types.h>
195
196#include <net/bpf.h>
197
198#include <vm/vm.h>              /* for vtophys */
199#include <vm/pmap.h>            /* for vtophys */
200#include <machine/bus.h>
201#include <machine/resource.h>
202#include <sys/bus.h>
203#include <sys/rman.h>
204
205#include <dev/mii/mii.h>
206#include <dev/mii/miivar.h>
207
208#include <dev/pci/pcireg.h>
209#include <dev/pci/pcivar.h>
210
211/*
212 * Default to using PIO register access mode to pacify certain
213 * laptop docking stations with built-in ThunderLAN chips that
214 * don't seem to handle memory mapped mode properly.
215 */
216#define TL_USEIOSPACE
217
218#include <pci/if_tlreg.h>
219
220MODULE_DEPEND(tl, pci, 1, 1, 1);
221MODULE_DEPEND(tl, ether, 1, 1, 1);
222MODULE_DEPEND(tl, miibus, 1, 1, 1);
223
224/* "controller miibus0" required.  See GENERIC if you get errors here. */
225#include "miibus_if.h"
226
227/*
228 * Various supported device vendors/types and their names.
229 */
230
231static struct tl_type tl_devs[] = {
232	{ TI_VENDORID,	TI_DEVICEID_THUNDERLAN,
233		"Texas Instruments ThunderLAN" },
234	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
235		"Compaq Netelligent 10" },
236	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
237		"Compaq Netelligent 10/100" },
238	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
239		"Compaq Netelligent 10/100 Proliant" },
240	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
241		"Compaq Netelligent 10/100 Dual Port" },
242	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
243		"Compaq NetFlex-3/P Integrated" },
244	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
245		"Compaq NetFlex-3/P" },
246	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
247		"Compaq NetFlex 3/P w/ BNC" },
248	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED,
249		"Compaq Netelligent 10/100 TX Embedded UTP" },
250	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX,
251		"Compaq Netelligent 10 T/2 PCI UTP/Coax" },
252	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP,
253		"Compaq Netelligent 10/100 TX UTP" },
254	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2183,
255		"Olicom OC-2183/2185" },
256	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2325,
257		"Olicom OC-2325" },
258	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2326,
259		"Olicom OC-2326 10/100 TX UTP" },
260	{ 0, 0, NULL }
261};
262
263static int tl_probe(device_t);
264static int tl_attach(device_t);
265static int tl_detach(device_t);
266static int tl_intvec_rxeoc(void *, u_int32_t);
267static int tl_intvec_txeoc(void *, u_int32_t);
268static int tl_intvec_txeof(void *, u_int32_t);
269static int tl_intvec_rxeof(void *, u_int32_t);
270static int tl_intvec_adchk(void *, u_int32_t);
271static int tl_intvec_netsts(void *, u_int32_t);
272
273static int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *);
274static void tl_stats_update(void *);
275static int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *);
276
277static void tl_intr(void *);
278static void tl_start(struct ifnet *);
279static int tl_ioctl(struct ifnet *, u_long, caddr_t);
280static void tl_init(void *);
281static void tl_stop(struct tl_softc *);
282static void tl_watchdog(struct ifnet *);
283static void tl_shutdown(device_t);
284static int tl_ifmedia_upd(struct ifnet *);
285static void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
286
287static u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
288static u_int8_t	tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *);
289static int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
290
291static void tl_mii_sync(struct tl_softc *);
292static void tl_mii_send(struct tl_softc *, u_int32_t, int);
293static int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *);
294static int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *);
295static int tl_miibus_readreg(device_t, int, int);
296static int tl_miibus_writereg(device_t, int, int, int);
297static void tl_miibus_statchg(device_t);
298
299static void tl_setmode(struct tl_softc *, int);
300static uint32_t tl_mchash(const uint8_t *);
301static void tl_setmulti(struct tl_softc *);
302static void tl_setfilt(struct tl_softc *, caddr_t, int);
303static void tl_softreset(struct tl_softc *, int);
304static void tl_hardreset(device_t);
305static int tl_list_rx_init(struct tl_softc *);
306static int tl_list_tx_init(struct tl_softc *);
307
308static u_int8_t tl_dio_read8(struct tl_softc *, int);
309static u_int16_t tl_dio_read16(struct tl_softc *, int);
310static u_int32_t tl_dio_read32(struct tl_softc *, int);
311static void tl_dio_write8(struct tl_softc *, int, int);
312static void tl_dio_write16(struct tl_softc *, int, int);
313static void tl_dio_write32(struct tl_softc *, int, int);
314static void tl_dio_setbit(struct tl_softc *, int, int);
315static void tl_dio_clrbit(struct tl_softc *, int, int);
316static void tl_dio_setbit16(struct tl_softc *, int, int);
317static void tl_dio_clrbit16(struct tl_softc *, int, int);
318
319#ifdef TL_USEIOSPACE
320#define TL_RES		SYS_RES_IOPORT
321#define TL_RID		TL_PCI_LOIO
322#else
323#define TL_RES		SYS_RES_MEMORY
324#define TL_RID		TL_PCI_LOMEM
325#endif
326
327static device_method_t tl_methods[] = {
328	/* Device interface */
329	DEVMETHOD(device_probe,		tl_probe),
330	DEVMETHOD(device_attach,	tl_attach),
331	DEVMETHOD(device_detach,	tl_detach),
332	DEVMETHOD(device_shutdown,	tl_shutdown),
333
334	/* bus interface */
335	DEVMETHOD(bus_print_child,	bus_generic_print_child),
336	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
337
338	/* MII interface */
339	DEVMETHOD(miibus_readreg,	tl_miibus_readreg),
340	DEVMETHOD(miibus_writereg,	tl_miibus_writereg),
341	DEVMETHOD(miibus_statchg,	tl_miibus_statchg),
342
343	{ 0, 0 }
344};
345
346static driver_t tl_driver = {
347	"tl",
348	tl_methods,
349	sizeof(struct tl_softc)
350};
351
352static devclass_t tl_devclass;
353
354DRIVER_MODULE(tl, pci, tl_driver, tl_devclass, 0, 0);
355DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0);
356
357static u_int8_t tl_dio_read8(sc, reg)
358	struct tl_softc		*sc;
359	int			reg;
360{
361	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
362	return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
363}
364
365static u_int16_t tl_dio_read16(sc, reg)
366	struct tl_softc		*sc;
367	int			reg;
368{
369	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
370	return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
371}
372
373static u_int32_t tl_dio_read32(sc, reg)
374	struct tl_softc		*sc;
375	int			reg;
376{
377	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
378	return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
379}
380
381static void tl_dio_write8(sc, reg, val)
382	struct tl_softc		*sc;
383	int			reg;
384	int			val;
385{
386	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
387	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
388	return;
389}
390
391static void tl_dio_write16(sc, reg, val)
392	struct tl_softc		*sc;
393	int			reg;
394	int			val;
395{
396	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
397	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
398	return;
399}
400
401static void tl_dio_write32(sc, reg, val)
402	struct tl_softc		*sc;
403	int			reg;
404	int			val;
405{
406	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
407	CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
408	return;
409}
410
411static void
412tl_dio_setbit(sc, reg, bit)
413	struct tl_softc		*sc;
414	int			reg;
415	int			bit;
416{
417	u_int8_t			f;
418
419	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
420	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
421	f |= bit;
422	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
423
424	return;
425}
426
427static void
428tl_dio_clrbit(sc, reg, bit)
429	struct tl_softc		*sc;
430	int			reg;
431	int			bit;
432{
433	u_int8_t			f;
434
435	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
436	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
437	f &= ~bit;
438	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
439
440	return;
441}
442
443static void tl_dio_setbit16(sc, reg, bit)
444	struct tl_softc		*sc;
445	int			reg;
446	int			bit;
447{
448	u_int16_t			f;
449
450	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
451	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
452	f |= bit;
453	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
454
455	return;
456}
457
458static void tl_dio_clrbit16(sc, reg, bit)
459	struct tl_softc		*sc;
460	int			reg;
461	int			bit;
462{
463	u_int16_t			f;
464
465	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
466	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
467	f &= ~bit;
468	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
469
470	return;
471}
472
473/*
474 * Send an instruction or address to the EEPROM, check for ACK.
475 */
476static u_int8_t tl_eeprom_putbyte(sc, byte)
477	struct tl_softc		*sc;
478	int			byte;
479{
480	register int		i, ack = 0;
481
482	/*
483	 * Make sure we're in TX mode.
484	 */
485	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
486
487	/*
488	 * Feed in each bit and stobe the clock.
489	 */
490	for (i = 0x80; i; i >>= 1) {
491		if (byte & i) {
492			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
493		} else {
494			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
495		}
496		DELAY(1);
497		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
498		DELAY(1);
499		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
500	}
501
502	/*
503	 * Turn off TX mode.
504	 */
505	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
506
507	/*
508	 * Check for ack.
509	 */
510	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
511	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
512	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
513
514	return(ack);
515}
516
517/*
518 * Read a byte of data stored in the EEPROM at address 'addr.'
519 */
520static u_int8_t tl_eeprom_getbyte(sc, addr, dest)
521	struct tl_softc		*sc;
522	int			addr;
523	u_int8_t		*dest;
524{
525	register int		i;
526	u_int8_t		byte = 0;
527	struct ifnet		*ifp = sc->tl_ifp;
528
529	tl_dio_write8(sc, TL_NETSIO, 0);
530
531	EEPROM_START;
532
533	/*
534	 * Send write control code to EEPROM.
535	 */
536	if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
537		if_printf(ifp, "failed to send write command, status: %x\n",
538		    tl_dio_read8(sc, TL_NETSIO));
539		return(1);
540	}
541
542	/*
543	 * Send address of byte we want to read.
544	 */
545	if (tl_eeprom_putbyte(sc, addr)) {
546		if_printf(ifp, "failed to send address, status: %x\n",
547		    tl_dio_read8(sc, TL_NETSIO));
548		return(1);
549	}
550
551	EEPROM_STOP;
552	EEPROM_START;
553	/*
554	 * Send read control code to EEPROM.
555	 */
556	if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
557		if_printf(ifp, "failed to send write command, status: %x\n",
558		    tl_dio_read8(sc, TL_NETSIO));
559		return(1);
560	}
561
562	/*
563	 * Start reading bits from EEPROM.
564	 */
565	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
566	for (i = 0x80; i; i >>= 1) {
567		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
568		DELAY(1);
569		if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
570			byte |= i;
571		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
572		DELAY(1);
573	}
574
575	EEPROM_STOP;
576
577	/*
578	 * No ACK generated for read, so just return byte.
579	 */
580
581	*dest = byte;
582
583	return(0);
584}
585
586/*
587 * Read a sequence of bytes from the EEPROM.
588 */
589static int
590tl_read_eeprom(sc, dest, off, cnt)
591	struct tl_softc		*sc;
592	caddr_t			dest;
593	int			off;
594	int			cnt;
595{
596	int			err = 0, i;
597	u_int8_t		byte = 0;
598
599	for (i = 0; i < cnt; i++) {
600		err = tl_eeprom_getbyte(sc, off + i, &byte);
601		if (err)
602			break;
603		*(dest + i) = byte;
604	}
605
606	return(err ? 1 : 0);
607}
608
609static void
610tl_mii_sync(sc)
611	struct tl_softc		*sc;
612{
613	register int		i;
614
615	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
616
617	for (i = 0; i < 32; i++) {
618		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
619		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
620	}
621
622	return;
623}
624
625static void
626tl_mii_send(sc, bits, cnt)
627	struct tl_softc		*sc;
628	u_int32_t		bits;
629	int			cnt;
630{
631	int			i;
632
633	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
634		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
635		if (bits & i) {
636			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
637		} else {
638			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
639		}
640		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
641	}
642}
643
644static int
645tl_mii_readreg(sc, frame)
646	struct tl_softc		*sc;
647	struct tl_mii_frame	*frame;
648
649{
650	int			i, ack;
651	int			minten = 0;
652
653	TL_LOCK(sc);
654
655	tl_mii_sync(sc);
656
657	/*
658	 * Set up frame for RX.
659	 */
660	frame->mii_stdelim = TL_MII_STARTDELIM;
661	frame->mii_opcode = TL_MII_READOP;
662	frame->mii_turnaround = 0;
663	frame->mii_data = 0;
664
665	/*
666	 * Turn off MII interrupt by forcing MINTEN low.
667	 */
668	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
669	if (minten) {
670		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
671	}
672
673	/*
674 	 * Turn on data xmit.
675	 */
676	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
677
678	/*
679	 * Send command/address info.
680	 */
681	tl_mii_send(sc, frame->mii_stdelim, 2);
682	tl_mii_send(sc, frame->mii_opcode, 2);
683	tl_mii_send(sc, frame->mii_phyaddr, 5);
684	tl_mii_send(sc, frame->mii_regaddr, 5);
685
686	/*
687	 * Turn off xmit.
688	 */
689	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
690
691	/* Idle bit */
692	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
693	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
694
695	/* Check for ack */
696	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
697	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
698
699	/* Complete the cycle */
700	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
701
702	/*
703	 * Now try reading data bits. If the ack failed, we still
704	 * need to clock through 16 cycles to keep the PHYs in sync.
705	 */
706	if (ack) {
707		for(i = 0; i < 16; i++) {
708			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
709			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
710		}
711		goto fail;
712	}
713
714	for (i = 0x8000; i; i >>= 1) {
715		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
716		if (!ack) {
717			if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
718				frame->mii_data |= i;
719		}
720		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
721	}
722
723fail:
724
725	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
726	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
727
728	/* Reenable interrupts */
729	if (minten) {
730		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
731	}
732
733	TL_UNLOCK(sc);
734
735	if (ack)
736		return(1);
737	return(0);
738}
739
740static int
741tl_mii_writereg(sc, frame)
742	struct tl_softc		*sc;
743	struct tl_mii_frame	*frame;
744
745{
746	int			minten;
747
748	TL_LOCK(sc);
749
750	tl_mii_sync(sc);
751
752	/*
753	 * Set up frame for TX.
754	 */
755
756	frame->mii_stdelim = TL_MII_STARTDELIM;
757	frame->mii_opcode = TL_MII_WRITEOP;
758	frame->mii_turnaround = TL_MII_TURNAROUND;
759
760	/*
761	 * Turn off MII interrupt by forcing MINTEN low.
762	 */
763	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
764	if (minten) {
765		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
766	}
767
768	/*
769 	 * Turn on data output.
770	 */
771	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
772
773	tl_mii_send(sc, frame->mii_stdelim, 2);
774	tl_mii_send(sc, frame->mii_opcode, 2);
775	tl_mii_send(sc, frame->mii_phyaddr, 5);
776	tl_mii_send(sc, frame->mii_regaddr, 5);
777	tl_mii_send(sc, frame->mii_turnaround, 2);
778	tl_mii_send(sc, frame->mii_data, 16);
779
780	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
781	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
782
783	/*
784	 * Turn off xmit.
785	 */
786	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
787
788	/* Reenable interrupts */
789	if (minten)
790		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
791
792	TL_UNLOCK(sc);
793
794	return(0);
795}
796
797static int
798tl_miibus_readreg(dev, phy, reg)
799	device_t		dev;
800	int			phy, reg;
801{
802	struct tl_softc		*sc;
803	struct tl_mii_frame	frame;
804
805	sc = device_get_softc(dev);
806	bzero((char *)&frame, sizeof(frame));
807
808	frame.mii_phyaddr = phy;
809	frame.mii_regaddr = reg;
810	tl_mii_readreg(sc, &frame);
811
812	return(frame.mii_data);
813}
814
815static int
816tl_miibus_writereg(dev, phy, reg, data)
817	device_t		dev;
818	int			phy, reg, data;
819{
820	struct tl_softc		*sc;
821	struct tl_mii_frame	frame;
822
823	sc = device_get_softc(dev);
824	bzero((char *)&frame, sizeof(frame));
825
826	frame.mii_phyaddr = phy;
827	frame.mii_regaddr = reg;
828	frame.mii_data = data;
829
830	tl_mii_writereg(sc, &frame);
831
832	return(0);
833}
834
835static void
836tl_miibus_statchg(dev)
837	device_t		dev;
838{
839	struct tl_softc		*sc;
840	struct mii_data		*mii;
841
842	sc = device_get_softc(dev);
843	TL_LOCK(sc);
844	mii = device_get_softc(sc->tl_miibus);
845
846	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
847		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
848	} else {
849		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
850	}
851	TL_UNLOCK(sc);
852
853	return;
854}
855
856/*
857 * Set modes for bitrate devices.
858 */
859static void
860tl_setmode(sc, media)
861	struct tl_softc		*sc;
862	int			media;
863{
864	if (IFM_SUBTYPE(media) == IFM_10_5)
865		tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
866	if (IFM_SUBTYPE(media) == IFM_10_T) {
867		tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
868		if ((media & IFM_GMASK) == IFM_FDX) {
869			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
870			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
871		} else {
872			tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
873			tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
874		}
875	}
876
877	return;
878}
879
880/*
881 * Calculate the hash of a MAC address for programming the multicast hash
882 * table.  This hash is simply the address split into 6-bit chunks
883 * XOR'd, e.g.
884 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
885 * bit:  765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
886 * Bytes 0-2 and 3-5 are symmetrical, so are folded together.  Then
887 * the folded 24-bit value is split into 6-bit portions and XOR'd.
888 */
889static uint32_t
890tl_mchash(addr)
891	const uint8_t *addr;
892{
893	int t;
894
895	t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
896		(addr[2] ^ addr[5]);
897	return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
898}
899
900/*
901 * The ThunderLAN has a perfect MAC address filter in addition to
902 * the multicast hash filter. The perfect filter can be programmed
903 * with up to four MAC addresses. The first one is always used to
904 * hold the station address, which leaves us free to use the other
905 * three for multicast addresses.
906 */
907static void
908tl_setfilt(sc, addr, slot)
909	struct tl_softc		*sc;
910	caddr_t			addr;
911	int			slot;
912{
913	int			i;
914	u_int16_t		regaddr;
915
916	regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
917
918	for (i = 0; i < ETHER_ADDR_LEN; i++)
919		tl_dio_write8(sc, regaddr + i, *(addr + i));
920
921	return;
922}
923
924/*
925 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
926 * linked list. This is fine, except addresses are added from the head
927 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
928 * group to always be in the perfect filter, but as more groups are added,
929 * the 224.0.0.1 entry (which is always added first) gets pushed down
930 * the list and ends up at the tail. So after 3 or 4 multicast groups
931 * are added, the all-hosts entry gets pushed out of the perfect filter
932 * and into the hash table.
933 *
934 * Because the multicast list is a doubly-linked list as opposed to a
935 * circular queue, we don't have the ability to just grab the tail of
936 * the list and traverse it backwards. Instead, we have to traverse
937 * the list once to find the tail, then traverse it again backwards to
938 * update the multicast filter.
939 */
940static void
941tl_setmulti(sc)
942	struct tl_softc		*sc;
943{
944	struct ifnet		*ifp;
945	u_int32_t		hashes[2] = { 0, 0 };
946	int			h, i;
947	struct ifmultiaddr	*ifma;
948	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
949	ifp = sc->tl_ifp;
950
951	/* First, zot all the existing filters. */
952	for (i = 1; i < 4; i++)
953		tl_setfilt(sc, (caddr_t)&dummy, i);
954	tl_dio_write32(sc, TL_HASH1, 0);
955	tl_dio_write32(sc, TL_HASH2, 0);
956
957	/* Now program new ones. */
958	if (ifp->if_flags & IFF_ALLMULTI) {
959		hashes[0] = 0xFFFFFFFF;
960		hashes[1] = 0xFFFFFFFF;
961	} else {
962		i = 1;
963		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
964			if (ifma->ifma_addr->sa_family != AF_LINK)
965				continue;
966			/*
967			 * Program the first three multicast groups
968			 * into the perfect filter. For all others,
969			 * use the hash table.
970			 */
971			if (i < 4) {
972				tl_setfilt(sc,
973			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
974				i++;
975				continue;
976			}
977
978			h = tl_mchash(
979				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
980			if (h < 32)
981				hashes[0] |= (1 << h);
982			else
983				hashes[1] |= (1 << (h - 32));
984		}
985	}
986
987	tl_dio_write32(sc, TL_HASH1, hashes[0]);
988	tl_dio_write32(sc, TL_HASH2, hashes[1]);
989
990	return;
991}
992
993/*
994 * This routine is recommended by the ThunderLAN manual to insure that
995 * the internal PHY is powered up correctly. It also recommends a one
996 * second pause at the end to 'wait for the clocks to start' but in my
997 * experience this isn't necessary.
998 */
999static void
1000tl_hardreset(dev)
1001	device_t		dev;
1002{
1003	struct tl_softc		*sc;
1004	int			i;
1005	u_int16_t		flags;
1006
1007	sc = device_get_softc(dev);
1008
1009	tl_mii_sync(sc);
1010
1011	flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
1012
1013	for (i = 0; i < MII_NPHY; i++)
1014		tl_miibus_writereg(dev, i, MII_BMCR, flags);
1015
1016	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
1017	DELAY(50000);
1018	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
1019	tl_mii_sync(sc);
1020	while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
1021
1022	DELAY(50000);
1023	return;
1024}
1025
1026static void
1027tl_softreset(sc, internal)
1028	struct tl_softc		*sc;
1029	int			internal;
1030{
1031        u_int32_t               cmd, dummy, i;
1032
1033        /* Assert the adapter reset bit. */
1034	CMD_SET(sc, TL_CMD_ADRST);
1035
1036        /* Turn off interrupts */
1037	CMD_SET(sc, TL_CMD_INTSOFF);
1038
1039	/* First, clear the stats registers. */
1040	for (i = 0; i < 5; i++)
1041		dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
1042
1043        /* Clear Areg and Hash registers */
1044	for (i = 0; i < 8; i++)
1045		tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
1046
1047        /*
1048	 * Set up Netconfig register. Enable one channel and
1049	 * one fragment mode.
1050	 */
1051	tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
1052	if (internal && !sc->tl_bitrate) {
1053		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1054	} else {
1055		tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1056	}
1057
1058	/* Handle cards with bitrate devices. */
1059	if (sc->tl_bitrate)
1060		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
1061
1062	/*
1063	 * Load adapter irq pacing timer and tx threshold.
1064	 * We make the transmit threshold 1 initially but we may
1065	 * change that later.
1066	 */
1067	cmd = CSR_READ_4(sc, TL_HOSTCMD);
1068	cmd |= TL_CMD_NES;
1069	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
1070	CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
1071	CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
1072
1073        /* Unreset the MII */
1074	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
1075
1076	/* Take the adapter out of reset */
1077	tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
1078
1079	/* Wait for things to settle down a little. */
1080	DELAY(500);
1081
1082        return;
1083}
1084
1085/*
1086 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
1087 * against our list and return its name if we find a match.
1088 */
1089static int
1090tl_probe(dev)
1091	device_t		dev;
1092{
1093	struct tl_type		*t;
1094
1095	t = tl_devs;
1096
1097	while(t->tl_name != NULL) {
1098		if ((pci_get_vendor(dev) == t->tl_vid) &&
1099		    (pci_get_device(dev) == t->tl_did)) {
1100			device_set_desc(dev, t->tl_name);
1101			return (BUS_PROBE_DEFAULT);
1102		}
1103		t++;
1104	}
1105
1106	return(ENXIO);
1107}
1108
1109static int
1110tl_attach(dev)
1111	device_t		dev;
1112{
1113	int			i;
1114	u_int16_t		did, vid;
1115	struct tl_type		*t;
1116	struct ifnet		*ifp;
1117	struct tl_softc		*sc;
1118	int			unit, error = 0, rid;
1119	u_char			eaddr[6];
1120
1121	vid = pci_get_vendor(dev);
1122	did = pci_get_device(dev);
1123	sc = device_get_softc(dev);
1124	unit = device_get_unit(dev);
1125
1126	t = tl_devs;
1127	while(t->tl_name != NULL) {
1128		if (vid == t->tl_vid && did == t->tl_did)
1129			break;
1130		t++;
1131	}
1132
1133	if (t->tl_name == NULL) {
1134		device_printf(dev, "unknown device!?\n");
1135		return (ENXIO);
1136	}
1137
1138	mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1139	    MTX_DEF | MTX_RECURSE);
1140
1141	/*
1142	 * Map control/status registers.
1143	 */
1144	pci_enable_busmaster(dev);
1145
1146#ifdef TL_USEIOSPACE
1147
1148	rid = TL_PCI_LOIO;
1149	sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1150		RF_ACTIVE);
1151
1152	/*
1153	 * Some cards have the I/O and memory mapped address registers
1154	 * reversed. Try both combinations before giving up.
1155	 */
1156	if (sc->tl_res == NULL) {
1157		rid = TL_PCI_LOMEM;
1158		sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1159		    RF_ACTIVE);
1160	}
1161#else
1162	rid = TL_PCI_LOMEM;
1163	sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1164	    RF_ACTIVE);
1165	if (sc->tl_res == NULL) {
1166		rid = TL_PCI_LOIO;
1167		sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1168		    RF_ACTIVE);
1169	}
1170#endif
1171
1172	if (sc->tl_res == NULL) {
1173		device_printf(dev, "couldn't map ports/memory\n");
1174		error = ENXIO;
1175		goto fail;
1176	}
1177
1178	sc->tl_btag = rman_get_bustag(sc->tl_res);
1179	sc->tl_bhandle = rman_get_bushandle(sc->tl_res);
1180
1181#ifdef notdef
1182	/*
1183	 * The ThunderLAN manual suggests jacking the PCI latency
1184	 * timer all the way up to its maximum value. I'm not sure
1185	 * if this is really necessary, but what the manual wants,
1186	 * the manual gets.
1187	 */
1188	command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4);
1189	command |= 0x0000FF00;
1190	pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4);
1191#endif
1192
1193	/* Allocate interrupt */
1194	rid = 0;
1195	sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1196	    RF_SHAREABLE | RF_ACTIVE);
1197
1198	if (sc->tl_irq == NULL) {
1199		device_printf(dev, "couldn't map interrupt\n");
1200		error = ENXIO;
1201		goto fail;
1202	}
1203
1204	/*
1205	 * Now allocate memory for the TX and RX lists.
1206	 */
1207	sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF,
1208	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1209
1210	if (sc->tl_ldata == NULL) {
1211		device_printf(dev, "no memory for list buffers!\n");
1212		error = ENXIO;
1213		goto fail;
1214	}
1215
1216	bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1217
1218	sc->tl_dinfo = t;
1219	if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID)
1220		sc->tl_eeaddr = TL_EEPROM_EADDR;
1221	if (t->tl_vid == OLICOM_VENDORID)
1222		sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1223
1224	/* Reset the adapter. */
1225	tl_softreset(sc, 1);
1226	tl_hardreset(dev);
1227	tl_softreset(sc, 1);
1228
1229	/*
1230	 * Get station address from the EEPROM.
1231	 */
1232	if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1233		device_printf(dev, "failed to read station address\n");
1234		error = ENXIO;
1235		goto fail;
1236	}
1237
1238        /*
1239         * XXX Olicom, in its desire to be different from the
1240         * rest of the world, has done strange things with the
1241         * encoding of the station address in the EEPROM. First
1242         * of all, they store the address at offset 0xF8 rather
1243         * than at 0x83 like the ThunderLAN manual suggests.
1244         * Second, they store the address in three 16-bit words in
1245         * network byte order, as opposed to storing it sequentially
1246         * like all the other ThunderLAN cards. In order to get
1247         * the station address in a form that matches what the Olicom
1248         * diagnostic utility specifies, we have to byte-swap each
1249         * word. To make things even more confusing, neither 00:00:28
1250         * nor 00:00:24 appear in the IEEE OUI database.
1251         */
1252        if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) {
1253                for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1254                        u_int16_t               *p;
1255                        p = (u_int16_t *)&eaddr[i];
1256                        *p = ntohs(*p);
1257                }
1258        }
1259
1260	ifp = sc->tl_ifp = if_alloc(IFT_ETHER);
1261	if (ifp == NULL) {
1262		device_printf(dev, "can not if_alloc()\n");
1263		error = ENOSPC;
1264		goto fail;
1265	}
1266	ifp->if_softc = sc;
1267	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1268	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
1269	    IFF_NEEDSGIANT;
1270	ifp->if_ioctl = tl_ioctl;
1271	ifp->if_start = tl_start;
1272	ifp->if_watchdog = tl_watchdog;
1273	ifp->if_init = tl_init;
1274	ifp->if_mtu = ETHERMTU;
1275	ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1;
1276	callout_handle_init(&sc->tl_stat_ch);
1277
1278	/* Reset the adapter again. */
1279	tl_softreset(sc, 1);
1280	tl_hardreset(dev);
1281	tl_softreset(sc, 1);
1282
1283	/*
1284	 * Do MII setup. If no PHYs are found, then this is a
1285	 * bitrate ThunderLAN chip that only supports 10baseT
1286	 * and AUI/BNC.
1287	 */
1288	if (mii_phy_probe(dev, &sc->tl_miibus,
1289	    tl_ifmedia_upd, tl_ifmedia_sts)) {
1290		struct ifmedia		*ifm;
1291		sc->tl_bitrate = 1;
1292		ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1293		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1294		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1295		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1296		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1297		ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
1298		/* Reset again, this time setting bitrate mode. */
1299		tl_softreset(sc, 1);
1300		ifm = &sc->ifmedia;
1301		ifm->ifm_media = ifm->ifm_cur->ifm_media;
1302		tl_ifmedia_upd(ifp);
1303	}
1304
1305	/*
1306	 * Call MI attach routine.
1307	 */
1308	ether_ifattach(ifp, eaddr);
1309
1310	/* Hook interrupt last to avoid having to lock softc */
1311	error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET,
1312	    tl_intr, sc, &sc->tl_intrhand);
1313
1314	if (error) {
1315		device_printf(dev, "couldn't set up irq\n");
1316		ether_ifdetach(ifp);
1317		if_free(ifp);
1318		goto fail;
1319	}
1320
1321fail:
1322	if (error)
1323		tl_detach(dev);
1324
1325	return(error);
1326}
1327
1328/*
1329 * Shutdown hardware and free up resources. This can be called any
1330 * time after the mutex has been initialized. It is called in both
1331 * the error case in attach and the normal detach case so it needs
1332 * to be careful about only freeing resources that have actually been
1333 * allocated.
1334 */
1335static int
1336tl_detach(dev)
1337	device_t		dev;
1338{
1339	struct tl_softc		*sc;
1340	struct ifnet		*ifp;
1341
1342	sc = device_get_softc(dev);
1343	KASSERT(mtx_initialized(&sc->tl_mtx), ("tl mutex not initialized"));
1344	TL_LOCK(sc);
1345	ifp = sc->tl_ifp;
1346
1347	/* These should only be active if attach succeeded */
1348	if (device_is_attached(dev)) {
1349		tl_stop(sc);
1350		ether_ifdetach(ifp);
1351		if_free(ifp);
1352	}
1353	if (sc->tl_miibus)
1354		device_delete_child(dev, sc->tl_miibus);
1355	bus_generic_detach(dev);
1356
1357	if (sc->tl_ldata)
1358		contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF);
1359	if (sc->tl_bitrate)
1360		ifmedia_removeall(&sc->ifmedia);
1361
1362	if (sc->tl_intrhand)
1363		bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1364	if (sc->tl_irq)
1365		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1366	if (sc->tl_res)
1367		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1368
1369	TL_UNLOCK(sc);
1370	mtx_destroy(&sc->tl_mtx);
1371
1372	return(0);
1373}
1374
1375/*
1376 * Initialize the transmit lists.
1377 */
1378static int
1379tl_list_tx_init(sc)
1380	struct tl_softc		*sc;
1381{
1382	struct tl_chain_data	*cd;
1383	struct tl_list_data	*ld;
1384	int			i;
1385
1386	cd = &sc->tl_cdata;
1387	ld = sc->tl_ldata;
1388	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1389		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1390		if (i == (TL_TX_LIST_CNT - 1))
1391			cd->tl_tx_chain[i].tl_next = NULL;
1392		else
1393			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1394	}
1395
1396	cd->tl_tx_free = &cd->tl_tx_chain[0];
1397	cd->tl_tx_tail = cd->tl_tx_head = NULL;
1398	sc->tl_txeoc = 1;
1399
1400	return(0);
1401}
1402
1403/*
1404 * Initialize the RX lists and allocate mbufs for them.
1405 */
1406static int
1407tl_list_rx_init(sc)
1408	struct tl_softc		*sc;
1409{
1410	struct tl_chain_data	*cd;
1411	struct tl_list_data	*ld;
1412	int			i;
1413
1414	cd = &sc->tl_cdata;
1415	ld = sc->tl_ldata;
1416
1417	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1418		cd->tl_rx_chain[i].tl_ptr =
1419			(struct tl_list_onefrag *)&ld->tl_rx_list[i];
1420		if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1421			return(ENOBUFS);
1422		if (i == (TL_RX_LIST_CNT - 1)) {
1423			cd->tl_rx_chain[i].tl_next = NULL;
1424			ld->tl_rx_list[i].tlist_fptr = 0;
1425		} else {
1426			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1427			ld->tl_rx_list[i].tlist_fptr =
1428					vtophys(&ld->tl_rx_list[i + 1]);
1429		}
1430	}
1431
1432	cd->tl_rx_head = &cd->tl_rx_chain[0];
1433	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1434
1435	return(0);
1436}
1437
1438static int
1439tl_newbuf(sc, c)
1440	struct tl_softc		*sc;
1441	struct tl_chain_onefrag	*c;
1442{
1443	struct mbuf		*m_new = NULL;
1444
1445	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1446	if (m_new == NULL)
1447		return(ENOBUFS);
1448
1449	MCLGET(m_new, M_DONTWAIT);
1450	if (!(m_new->m_flags & M_EXT)) {
1451		m_freem(m_new);
1452		return(ENOBUFS);
1453	}
1454
1455#ifdef __alpha__
1456	m_new->m_data += 2;
1457#endif
1458
1459	c->tl_mbuf = m_new;
1460	c->tl_next = NULL;
1461	c->tl_ptr->tlist_frsize = MCLBYTES;
1462	c->tl_ptr->tlist_fptr = 0;
1463	c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1464	c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1465	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1466
1467	return(0);
1468}
1469/*
1470 * Interrupt handler for RX 'end of frame' condition (EOF). This
1471 * tells us that a full ethernet frame has been captured and we need
1472 * to handle it.
1473 *
1474 * Reception is done using 'lists' which consist of a header and a
1475 * series of 10 data count/data address pairs that point to buffers.
1476 * Initially you're supposed to create a list, populate it with pointers
1477 * to buffers, then load the physical address of the list into the
1478 * ch_parm register. The adapter is then supposed to DMA the received
1479 * frame into the buffers for you.
1480 *
1481 * To make things as fast as possible, we have the chip DMA directly
1482 * into mbufs. This saves us from having to do a buffer copy: we can
1483 * just hand the mbufs directly to ether_input(). Once the frame has
1484 * been sent on its way, the 'list' structure is assigned a new buffer
1485 * and moved to the end of the RX chain. As long we we stay ahead of
1486 * the chip, it will always think it has an endless receive channel.
1487 *
1488 * If we happen to fall behind and the chip manages to fill up all of
1489 * the buffers, it will generate an end of channel interrupt and wait
1490 * for us to empty the chain and restart the receiver.
1491 */
1492static int
1493tl_intvec_rxeof(xsc, type)
1494	void			*xsc;
1495	u_int32_t		type;
1496{
1497	struct tl_softc		*sc;
1498	int			r = 0, total_len = 0;
1499	struct ether_header	*eh;
1500	struct mbuf		*m;
1501	struct ifnet		*ifp;
1502	struct tl_chain_onefrag	*cur_rx;
1503
1504	sc = xsc;
1505	ifp = sc->tl_ifp;
1506
1507	TL_LOCK_ASSERT(sc);
1508
1509	while(sc->tl_cdata.tl_rx_head != NULL) {
1510		cur_rx = sc->tl_cdata.tl_rx_head;
1511		if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1512			break;
1513		r++;
1514		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1515		m = cur_rx->tl_mbuf;
1516		total_len = cur_rx->tl_ptr->tlist_frsize;
1517
1518		if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1519			ifp->if_ierrors++;
1520			cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1521			cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1522			cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1523			continue;
1524		}
1525
1526		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1527						vtophys(cur_rx->tl_ptr);
1528		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1529		sc->tl_cdata.tl_rx_tail = cur_rx;
1530
1531		/*
1532		 * Note: when the ThunderLAN chip is in 'capture all
1533		 * frames' mode, it will receive its own transmissions.
1534		 * We drop don't need to process our own transmissions,
1535		 * so we drop them here and continue.
1536		 */
1537		eh = mtod(m, struct ether_header *);
1538		/*if (ifp->if_flags & IFF_PROMISC && */
1539		if (!bcmp(eh->ether_shost, IFP2ENADDR(sc->tl_ifp),
1540		 					ETHER_ADDR_LEN)) {
1541				m_freem(m);
1542				continue;
1543		}
1544
1545		m->m_pkthdr.rcvif = ifp;
1546		m->m_pkthdr.len = m->m_len = total_len;
1547
1548		TL_UNLOCK(sc);
1549		(*ifp->if_input)(ifp, m);
1550		TL_LOCK(sc);
1551	}
1552
1553	return(r);
1554}
1555
1556/*
1557 * The RX-EOC condition hits when the ch_parm address hasn't been
1558 * initialized or the adapter reached a list with a forward pointer
1559 * of 0 (which indicates the end of the chain). In our case, this means
1560 * the card has hit the end of the receive buffer chain and we need to
1561 * empty out the buffers and shift the pointer back to the beginning again.
1562 */
1563static int
1564tl_intvec_rxeoc(xsc, type)
1565	void			*xsc;
1566	u_int32_t		type;
1567{
1568	struct tl_softc		*sc;
1569	int			r;
1570	struct tl_chain_data	*cd;
1571
1572
1573	sc = xsc;
1574	cd = &sc->tl_cdata;
1575
1576	/* Flush out the receive queue and ack RXEOF interrupts. */
1577	r = tl_intvec_rxeof(xsc, type);
1578	CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1579	r = 1;
1580	cd->tl_rx_head = &cd->tl_rx_chain[0];
1581	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1582	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1583	r |= (TL_CMD_GO|TL_CMD_RT);
1584	return(r);
1585}
1586
1587static int
1588tl_intvec_txeof(xsc, type)
1589	void			*xsc;
1590	u_int32_t		type;
1591{
1592	struct tl_softc		*sc;
1593	int			r = 0;
1594	struct tl_chain		*cur_tx;
1595
1596	sc = xsc;
1597
1598	/*
1599	 * Go through our tx list and free mbufs for those
1600	 * frames that have been sent.
1601	 */
1602	while (sc->tl_cdata.tl_tx_head != NULL) {
1603		cur_tx = sc->tl_cdata.tl_tx_head;
1604		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1605			break;
1606		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1607
1608		r++;
1609		m_freem(cur_tx->tl_mbuf);
1610		cur_tx->tl_mbuf = NULL;
1611
1612		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1613		sc->tl_cdata.tl_tx_free = cur_tx;
1614		if (!cur_tx->tl_ptr->tlist_fptr)
1615			break;
1616	}
1617
1618	return(r);
1619}
1620
1621/*
1622 * The transmit end of channel interrupt. The adapter triggers this
1623 * interrupt to tell us it hit the end of the current transmit list.
1624 *
1625 * A note about this: it's possible for a condition to arise where
1626 * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1627 * You have to avoid this since the chip expects things to go in a
1628 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1629 * When the TXEOF handler is called, it will free all of the transmitted
1630 * frames and reset the tx_head pointer to NULL. However, a TXEOC
1631 * interrupt should be received and acknowledged before any more frames
1632 * are queued for transmission. If tl_statrt() is called after TXEOF
1633 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1634 * it could attempt to issue a transmit command prematurely.
1635 *
1636 * To guard against this, tl_start() will only issue transmit commands
1637 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1638 * can set this flag once tl_start() has cleared it.
1639 */
1640static int
1641tl_intvec_txeoc(xsc, type)
1642	void			*xsc;
1643	u_int32_t		type;
1644{
1645	struct tl_softc		*sc;
1646	struct ifnet		*ifp;
1647	u_int32_t		cmd;
1648
1649	sc = xsc;
1650	ifp = sc->tl_ifp;
1651
1652	/* Clear the timeout timer. */
1653	ifp->if_timer = 0;
1654
1655	if (sc->tl_cdata.tl_tx_head == NULL) {
1656		ifp->if_flags &= ~IFF_OACTIVE;
1657		sc->tl_cdata.tl_tx_tail = NULL;
1658		sc->tl_txeoc = 1;
1659	} else {
1660		sc->tl_txeoc = 0;
1661		/* First we have to ack the EOC interrupt. */
1662		CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1663		/* Then load the address of the next TX list. */
1664		CSR_WRITE_4(sc, TL_CH_PARM,
1665		    vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1666		/* Restart TX channel. */
1667		cmd = CSR_READ_4(sc, TL_HOSTCMD);
1668		cmd &= ~TL_CMD_RT;
1669		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1670		CMD_PUT(sc, cmd);
1671		return(0);
1672	}
1673
1674	return(1);
1675}
1676
1677static int
1678tl_intvec_adchk(xsc, type)
1679	void			*xsc;
1680	u_int32_t		type;
1681{
1682	struct tl_softc		*sc;
1683
1684	sc = xsc;
1685
1686	if (type)
1687		if_printf(sc->tl_ifp, "adapter check: %x\n",
1688			(unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1689
1690	tl_softreset(sc, 1);
1691	tl_stop(sc);
1692	tl_init(sc);
1693	CMD_SET(sc, TL_CMD_INTSON);
1694
1695	return(0);
1696}
1697
1698static int
1699tl_intvec_netsts(xsc, type)
1700	void			*xsc;
1701	u_int32_t		type;
1702{
1703	struct tl_softc		*sc;
1704	u_int16_t		netsts;
1705
1706	sc = xsc;
1707
1708	netsts = tl_dio_read16(sc, TL_NETSTS);
1709	tl_dio_write16(sc, TL_NETSTS, netsts);
1710
1711	if_printf(sc->tl_ifp, "network status: %x\n", netsts);
1712
1713	return(1);
1714}
1715
1716static void
1717tl_intr(xsc)
1718	void			*xsc;
1719{
1720	struct tl_softc		*sc;
1721	struct ifnet		*ifp;
1722	int			r = 0;
1723	u_int32_t		type = 0;
1724	u_int16_t		ints = 0;
1725	u_int8_t		ivec = 0;
1726
1727	sc = xsc;
1728	TL_LOCK(sc);
1729
1730	/* Disable interrupts */
1731	ints = CSR_READ_2(sc, TL_HOST_INT);
1732	CSR_WRITE_2(sc, TL_HOST_INT, ints);
1733	type = (ints << 16) & 0xFFFF0000;
1734	ivec = (ints & TL_VEC_MASK) >> 5;
1735	ints = (ints & TL_INT_MASK) >> 2;
1736
1737	ifp = sc->tl_ifp;
1738
1739	switch(ints) {
1740	case (TL_INTR_INVALID):
1741#ifdef DIAGNOSTIC
1742		if_printf(ifp, "got an invalid interrupt!\n");
1743#endif
1744		/* Re-enable interrupts but don't ack this one. */
1745		CMD_PUT(sc, type);
1746		r = 0;
1747		break;
1748	case (TL_INTR_TXEOF):
1749		r = tl_intvec_txeof((void *)sc, type);
1750		break;
1751	case (TL_INTR_TXEOC):
1752		r = tl_intvec_txeoc((void *)sc, type);
1753		break;
1754	case (TL_INTR_STATOFLOW):
1755		tl_stats_update(sc);
1756		r = 1;
1757		break;
1758	case (TL_INTR_RXEOF):
1759		r = tl_intvec_rxeof((void *)sc, type);
1760		break;
1761	case (TL_INTR_DUMMY):
1762		if_printf(ifp, "got a dummy interrupt\n");
1763		r = 1;
1764		break;
1765	case (TL_INTR_ADCHK):
1766		if (ivec)
1767			r = tl_intvec_adchk((void *)sc, type);
1768		else
1769			r = tl_intvec_netsts((void *)sc, type);
1770		break;
1771	case (TL_INTR_RXEOC):
1772		r = tl_intvec_rxeoc((void *)sc, type);
1773		break;
1774	default:
1775		if_printf(ifp, "bogus interrupt type\n");
1776		break;
1777	}
1778
1779	/* Re-enable interrupts */
1780	if (r) {
1781		CMD_PUT(sc, TL_CMD_ACK | r | type);
1782	}
1783
1784	if (ifp->if_snd.ifq_head != NULL)
1785		tl_start(ifp);
1786
1787	TL_UNLOCK(sc);
1788
1789	return;
1790}
1791
1792static void
1793tl_stats_update(xsc)
1794	void			*xsc;
1795{
1796	struct tl_softc		*sc;
1797	struct ifnet		*ifp;
1798	struct tl_stats		tl_stats;
1799	struct mii_data		*mii;
1800	u_int32_t		*p;
1801
1802	bzero((char *)&tl_stats, sizeof(struct tl_stats));
1803
1804	sc = xsc;
1805	TL_LOCK(sc);
1806	ifp = sc->tl_ifp;
1807
1808	p = (u_int32_t *)&tl_stats;
1809
1810	CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1811	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1812	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1813	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1814	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1815	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1816
1817	ifp->if_opackets += tl_tx_goodframes(tl_stats);
1818	ifp->if_collisions += tl_stats.tl_tx_single_collision +
1819				tl_stats.tl_tx_multi_collision;
1820	ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1821	ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1822			    tl_rx_overrun(tl_stats);
1823	ifp->if_oerrors += tl_tx_underrun(tl_stats);
1824
1825	if (tl_tx_underrun(tl_stats)) {
1826		u_int8_t		tx_thresh;
1827		tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1828		if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1829			tx_thresh >>= 4;
1830			tx_thresh++;
1831			if_printf(ifp, "tx underrun -- increasing "
1832			    "tx threshold to %d bytes\n",
1833			    (64 * (tx_thresh * 4)));
1834			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1835			tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1836		}
1837	}
1838
1839	sc->tl_stat_ch = timeout(tl_stats_update, sc, hz);
1840
1841	if (!sc->tl_bitrate) {
1842		mii = device_get_softc(sc->tl_miibus);
1843		mii_tick(mii);
1844	}
1845
1846	TL_UNLOCK(sc);
1847
1848	return;
1849}
1850
1851/*
1852 * Encapsulate an mbuf chain in a list by coupling the mbuf data
1853 * pointers to the fragment pointers.
1854 */
1855static int
1856tl_encap(sc, c, m_head)
1857	struct tl_softc		*sc;
1858	struct tl_chain		*c;
1859	struct mbuf		*m_head;
1860{
1861	int			frag = 0;
1862	struct tl_frag		*f = NULL;
1863	int			total_len;
1864	struct mbuf		*m;
1865	struct ifnet		*ifp = sc->tl_ifp;
1866
1867	/*
1868 	 * Start packing the mbufs in this chain into
1869	 * the fragment pointers. Stop when we run out
1870 	 * of fragments or hit the end of the mbuf chain.
1871	 */
1872	m = m_head;
1873	total_len = 0;
1874
1875	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1876		if (m->m_len != 0) {
1877			if (frag == TL_MAXFRAGS)
1878				break;
1879			total_len+= m->m_len;
1880			c->tl_ptr->tl_frag[frag].tlist_dadr =
1881				vtophys(mtod(m, vm_offset_t));
1882			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1883			frag++;
1884		}
1885	}
1886
1887	/*
1888	 * Handle special cases.
1889	 * Special case #1: we used up all 10 fragments, but
1890	 * we have more mbufs left in the chain. Copy the
1891	 * data into an mbuf cluster. Note that we don't
1892	 * bother clearing the values in the other fragment
1893	 * pointers/counters; it wouldn't gain us anything,
1894	 * and would waste cycles.
1895	 */
1896	if (m != NULL) {
1897		struct mbuf		*m_new = NULL;
1898
1899		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1900		if (m_new == NULL) {
1901			if_printf(ifp, "no memory for tx list\n");
1902			return(1);
1903		}
1904		if (m_head->m_pkthdr.len > MHLEN) {
1905			MCLGET(m_new, M_DONTWAIT);
1906			if (!(m_new->m_flags & M_EXT)) {
1907				m_freem(m_new);
1908				if_printf(ifp, "no memory for tx list\n");
1909				return(1);
1910			}
1911		}
1912		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1913					mtod(m_new, caddr_t));
1914		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1915		m_freem(m_head);
1916		m_head = m_new;
1917		f = &c->tl_ptr->tl_frag[0];
1918		f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1919		f->tlist_dcnt = total_len = m_new->m_len;
1920		frag = 1;
1921	}
1922
1923	/*
1924	 * Special case #2: the frame is smaller than the minimum
1925	 * frame size. We have to pad it to make the chip happy.
1926	 */
1927	if (total_len < TL_MIN_FRAMELEN) {
1928		if (frag == TL_MAXFRAGS)
1929			if_printf(ifp,
1930			    "all frags filled but frame still to small!\n");
1931		f = &c->tl_ptr->tl_frag[frag];
1932		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1933		f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1934		total_len += f->tlist_dcnt;
1935		frag++;
1936	}
1937
1938	c->tl_mbuf = m_head;
1939	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1940	c->tl_ptr->tlist_frsize = total_len;
1941	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1942	c->tl_ptr->tlist_fptr = 0;
1943
1944	return(0);
1945}
1946
1947/*
1948 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1949 * to the mbuf data regions directly in the transmit lists. We also save a
1950 * copy of the pointers since the transmit list fragment pointers are
1951 * physical addresses.
1952 */
1953static void
1954tl_start(ifp)
1955	struct ifnet		*ifp;
1956{
1957	struct tl_softc		*sc;
1958	struct mbuf		*m_head = NULL;
1959	u_int32_t		cmd;
1960	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1961
1962	sc = ifp->if_softc;
1963	TL_LOCK(sc);
1964
1965	/*
1966	 * Check for an available queue slot. If there are none,
1967	 * punt.
1968	 */
1969	if (sc->tl_cdata.tl_tx_free == NULL) {
1970		ifp->if_flags |= IFF_OACTIVE;
1971		TL_UNLOCK(sc);
1972		return;
1973	}
1974
1975	start_tx = sc->tl_cdata.tl_tx_free;
1976
1977	while(sc->tl_cdata.tl_tx_free != NULL) {
1978		IF_DEQUEUE(&ifp->if_snd, m_head);
1979		if (m_head == NULL)
1980			break;
1981
1982		/* Pick a chain member off the free list. */
1983		cur_tx = sc->tl_cdata.tl_tx_free;
1984		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1985
1986		cur_tx->tl_next = NULL;
1987
1988		/* Pack the data into the list. */
1989		tl_encap(sc, cur_tx, m_head);
1990
1991		/* Chain it together */
1992		if (prev != NULL) {
1993			prev->tl_next = cur_tx;
1994			prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
1995		}
1996		prev = cur_tx;
1997
1998		/*
1999		 * If there's a BPF listener, bounce a copy of this frame
2000		 * to him.
2001		 */
2002		BPF_MTAP(ifp, cur_tx->tl_mbuf);
2003	}
2004
2005	/*
2006	 * If there are no packets queued, bail.
2007	 */
2008	if (cur_tx == NULL) {
2009		TL_UNLOCK(sc);
2010		return;
2011	}
2012
2013	/*
2014	 * That's all we can stands, we can't stands no more.
2015	 * If there are no other transfers pending, then issue the
2016	 * TX GO command to the adapter to start things moving.
2017	 * Otherwise, just leave the data in the queue and let
2018	 * the EOF/EOC interrupt handler send.
2019	 */
2020	if (sc->tl_cdata.tl_tx_head == NULL) {
2021		sc->tl_cdata.tl_tx_head = start_tx;
2022		sc->tl_cdata.tl_tx_tail = cur_tx;
2023
2024		if (sc->tl_txeoc) {
2025			sc->tl_txeoc = 0;
2026			CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
2027			cmd = CSR_READ_4(sc, TL_HOSTCMD);
2028			cmd &= ~TL_CMD_RT;
2029			cmd |= TL_CMD_GO|TL_CMD_INTSON;
2030			CMD_PUT(sc, cmd);
2031		}
2032	} else {
2033		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
2034		sc->tl_cdata.tl_tx_tail = cur_tx;
2035	}
2036
2037	/*
2038	 * Set a timeout in case the chip goes out to lunch.
2039	 */
2040	ifp->if_timer = 5;
2041	TL_UNLOCK(sc);
2042
2043	return;
2044}
2045
2046static void
2047tl_init(xsc)
2048	void			*xsc;
2049{
2050	struct tl_softc		*sc = xsc;
2051	struct ifnet		*ifp = sc->tl_ifp;
2052	struct mii_data		*mii;
2053
2054	TL_LOCK(sc);
2055
2056	ifp = sc->tl_ifp;
2057
2058	/*
2059	 * Cancel pending I/O.
2060	 */
2061	tl_stop(sc);
2062
2063	/* Initialize TX FIFO threshold */
2064	tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
2065	tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
2066
2067        /* Set PCI burst size */
2068	tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
2069
2070	/*
2071	 * Set 'capture all frames' bit for promiscuous mode.
2072	 */
2073	if (ifp->if_flags & IFF_PROMISC)
2074		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2075	else
2076		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2077
2078	/*
2079	 * Set capture broadcast bit to capture broadcast frames.
2080	 */
2081	if (ifp->if_flags & IFF_BROADCAST)
2082		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2083	else
2084		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2085
2086	tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
2087
2088	/* Init our MAC address */
2089	tl_setfilt(sc, (caddr_t)&IFP2ENADDR(sc->tl_ifp), 0);
2090
2091	/* Init multicast filter, if needed. */
2092	tl_setmulti(sc);
2093
2094	/* Init circular RX list. */
2095	if (tl_list_rx_init(sc) == ENOBUFS) {
2096		if_printf(ifp,
2097		    "initialization failed: no memory for rx buffers\n");
2098		tl_stop(sc);
2099		TL_UNLOCK(sc);
2100		return;
2101	}
2102
2103	/* Init TX pointers. */
2104	tl_list_tx_init(sc);
2105
2106	/* Enable PCI interrupts. */
2107	CMD_SET(sc, TL_CMD_INTSON);
2108
2109	/* Load the address of the rx list */
2110	CMD_SET(sc, TL_CMD_RT);
2111	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
2112
2113	if (!sc->tl_bitrate) {
2114		if (sc->tl_miibus != NULL) {
2115			mii = device_get_softc(sc->tl_miibus);
2116			mii_mediachg(mii);
2117		}
2118	} else {
2119		tl_ifmedia_upd(ifp);
2120	}
2121
2122	/* Send the RX go command */
2123	CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
2124
2125	ifp->if_flags |= IFF_RUNNING;
2126	ifp->if_flags &= ~IFF_OACTIVE;
2127
2128	/* Start the stats update counter */
2129	sc->tl_stat_ch = timeout(tl_stats_update, sc, hz);
2130	TL_UNLOCK(sc);
2131
2132	return;
2133}
2134
2135/*
2136 * Set media options.
2137 */
2138static int
2139tl_ifmedia_upd(ifp)
2140	struct ifnet		*ifp;
2141{
2142	struct tl_softc		*sc;
2143	struct mii_data		*mii = NULL;
2144
2145	sc = ifp->if_softc;
2146
2147	if (sc->tl_bitrate)
2148		tl_setmode(sc, sc->ifmedia.ifm_media);
2149	else {
2150		mii = device_get_softc(sc->tl_miibus);
2151		mii_mediachg(mii);
2152	}
2153
2154	return(0);
2155}
2156
2157/*
2158 * Report current media status.
2159 */
2160static void
2161tl_ifmedia_sts(ifp, ifmr)
2162	struct ifnet		*ifp;
2163	struct ifmediareq	*ifmr;
2164{
2165	struct tl_softc		*sc;
2166	struct mii_data		*mii;
2167
2168	sc = ifp->if_softc;
2169
2170	ifmr->ifm_active = IFM_ETHER;
2171
2172	if (sc->tl_bitrate) {
2173		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
2174			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2175		else
2176			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2177		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
2178			ifmr->ifm_active |= IFM_HDX;
2179		else
2180			ifmr->ifm_active |= IFM_FDX;
2181		return;
2182	} else {
2183		mii = device_get_softc(sc->tl_miibus);
2184		mii_pollstat(mii);
2185		ifmr->ifm_active = mii->mii_media_active;
2186		ifmr->ifm_status = mii->mii_media_status;
2187	}
2188
2189	return;
2190}
2191
2192static int
2193tl_ioctl(ifp, command, data)
2194	struct ifnet		*ifp;
2195	u_long			command;
2196	caddr_t			data;
2197{
2198	struct tl_softc		*sc = ifp->if_softc;
2199	struct ifreq		*ifr = (struct ifreq *) data;
2200	int			s, error = 0;
2201
2202	s = splimp();
2203
2204	switch(command) {
2205	case SIOCSIFFLAGS:
2206		if (ifp->if_flags & IFF_UP) {
2207			if (ifp->if_flags & IFF_RUNNING &&
2208			    ifp->if_flags & IFF_PROMISC &&
2209			    !(sc->tl_if_flags & IFF_PROMISC)) {
2210				tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2211				tl_setmulti(sc);
2212			} else if (ifp->if_flags & IFF_RUNNING &&
2213			    !(ifp->if_flags & IFF_PROMISC) &&
2214			    sc->tl_if_flags & IFF_PROMISC) {
2215				tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2216				tl_setmulti(sc);
2217			} else
2218				tl_init(sc);
2219		} else {
2220			if (ifp->if_flags & IFF_RUNNING) {
2221				tl_stop(sc);
2222			}
2223		}
2224		sc->tl_if_flags = ifp->if_flags;
2225		error = 0;
2226		break;
2227	case SIOCADDMULTI:
2228	case SIOCDELMULTI:
2229		tl_setmulti(sc);
2230		error = 0;
2231		break;
2232	case SIOCSIFMEDIA:
2233	case SIOCGIFMEDIA:
2234		if (sc->tl_bitrate)
2235			error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2236		else {
2237			struct mii_data		*mii;
2238			mii = device_get_softc(sc->tl_miibus);
2239			error = ifmedia_ioctl(ifp, ifr,
2240			    &mii->mii_media, command);
2241		}
2242		break;
2243	default:
2244		error = ether_ioctl(ifp, command, data);
2245		break;
2246	}
2247
2248	(void)splx(s);
2249
2250	return(error);
2251}
2252
2253static void
2254tl_watchdog(ifp)
2255	struct ifnet		*ifp;
2256{
2257	struct tl_softc		*sc;
2258
2259	sc = ifp->if_softc;
2260
2261	if_printf(ifp, "device timeout\n");
2262
2263	ifp->if_oerrors++;
2264
2265	tl_softreset(sc, 1);
2266	tl_init(sc);
2267
2268	return;
2269}
2270
2271/*
2272 * Stop the adapter and free any mbufs allocated to the
2273 * RX and TX lists.
2274 */
2275static void
2276tl_stop(sc)
2277	struct tl_softc		*sc;
2278{
2279	register int		i;
2280	struct ifnet		*ifp;
2281
2282	TL_LOCK(sc);
2283
2284	ifp = sc->tl_ifp;
2285
2286	/* Stop the stats updater. */
2287	untimeout(tl_stats_update, sc, sc->tl_stat_ch);
2288
2289	/* Stop the transmitter */
2290	CMD_CLR(sc, TL_CMD_RT);
2291	CMD_SET(sc, TL_CMD_STOP);
2292	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2293
2294	/* Stop the receiver */
2295	CMD_SET(sc, TL_CMD_RT);
2296	CMD_SET(sc, TL_CMD_STOP);
2297	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2298
2299	/*
2300	 * Disable host interrupts.
2301	 */
2302	CMD_SET(sc, TL_CMD_INTSOFF);
2303
2304	/*
2305	 * Clear list pointer.
2306	 */
2307	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2308
2309	/*
2310	 * Free the RX lists.
2311	 */
2312	for (i = 0; i < TL_RX_LIST_CNT; i++) {
2313		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2314			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2315			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2316		}
2317	}
2318	bzero((char *)&sc->tl_ldata->tl_rx_list,
2319		sizeof(sc->tl_ldata->tl_rx_list));
2320
2321	/*
2322	 * Free the TX list buffers.
2323	 */
2324	for (i = 0; i < TL_TX_LIST_CNT; i++) {
2325		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2326			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2327			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2328		}
2329	}
2330	bzero((char *)&sc->tl_ldata->tl_tx_list,
2331		sizeof(sc->tl_ldata->tl_tx_list));
2332
2333	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2334	TL_UNLOCK(sc);
2335
2336	return;
2337}
2338
2339/*
2340 * Stop all chip I/O so that the kernel's probe routines don't
2341 * get confused by errant DMAs when rebooting.
2342 */
2343static void
2344tl_shutdown(dev)
2345	device_t		dev;
2346{
2347	struct tl_softc		*sc;
2348
2349	sc = device_get_softc(dev);
2350
2351	tl_stop(sc);
2352
2353	return;
2354}
2355