if_tl.c revision 36302
1/*
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *	$Id: if_tl.c,v 1.3 1998/05/21 17:05:32 jkh Exp $
33 */
34
35/*
36 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
37 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
38 * the National Semiconductor DP83840A physical interface and the
39 * Microchip Technology 24Cxx series serial EEPROM.
40 *
41 * Written using the following three documents:
42 *
43 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
44 * National Semiconductor DP83840A data sheet (www.national.com)
45 * Microchip Technology 24C02C data sheet (www.microchip.com)
46 *
47 * Written by Bill Paul <wpaul@ctr.columbia.edu>
48 * Electrical Engineering Department
49 * Columbia University, New York City
50 */
51
52/*
53 * Some notes about the ThunderLAN:
54 *
55 * The ThunderLAN controller is a single chip containing PCI controller
56 * logic, approximately 3K of on-board SRAM, a LAN controller, and media
57 * independent interface (MII). The MII allows the ThunderLAN chip to
58 * control up to 32 different physical interfaces (PHYs). The ThunderLAN
59 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
60 * to act as a complete ethernet interface.
61 *
62 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
63 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
64 * in full or half duplex. Some of the Compaq Deskpro machines use a
65 * Level 1 LXT970 PHY with the same capabilities. A serial EEPROM is also
66 * attached to the ThunderLAN chip to provide power-up default register
67 * settings and for storing the adapter's stattion address. Although not
68 * supported by this driver, the ThunderLAN chip can also be connected
69 * to token ring PHYs.
70 *
71 * It is important to note that while it is possible to have multiple
72 * PHYs attached to the ThunderLAN's MII, only one PHY may be active at
73 * any time. (This makes me wonder exactly how the dual port Compaq
74 * adapter is supposed to work.) This driver attempts to compensate for
75 * this in the following way:
76 *
77 * When the ThunderLAN chip is probed, the probe routine attempts to
78 * locate all attached PHYs by checking all 32 possible PHY addresses
79 * (0x00 to 0x1F). Each PHY is attached as a separate logical interface.
80 * The driver allows any one interface to be brought up at any given
81 * time: if an attempt is made to bring up a second PHY while another
82 * PHY is already enabled, the driver will return an error.
83 *
84 * The ThunderLAN has a set of registers which can be used to issue
85 * command, acknowledge interrupts, and to manipulate other internal
86 * registers on its DIO bus. The primary registers can be accessed
87 * using either programmed I/O (inb/outb) or via PCI memory mapping,
88 * depending on how the card is configured during the PCI probing
89 * phase. It is even possible to have both PIO and memory mapped
90 * access turned on at the same time.
91 *
92 * Frame reception and transmission with the ThunderLAN chip is done
93 * using frame 'lists.' A list structure looks more or less like this:
94 *
95 * struct tl_frag {
96 *	u_int32_t		fragment_address;
97 *	u_int32_t		fragment_size;
98 * };
99 * struct tl_list {
100 *	u_int32_t		forward_pointer;
101 *	u_int16_t		cstat;
102 *	u_int16_t		frame_size;
103 *	struct tl_frag		fragments[10];
104 * };
105 *
106 * The forward pointer in the list header can be either a 0 or the address
107 * of another list, which allows several lists to be linked together. Each
108 * list contains up to 10 fragment descriptors. This means the chip allows
109 * ethernet frames to be broken up into up to 10 chunks for transfer to
110 * and from the SRAM. Note that the forward pointer and fragment buffer
111 * addresses are physical memory addresses, not virtual. Note also that
112 * a single ethernet frame can not span lists: if the host wants to
113 * transmit a frame and the frame data is split up over more than 10
114 * buffers, the frame has to collapsed before it can be transmitted.
115 *
116 * To receive frames, the driver sets up a number of lists and populates
117 * the fragment descriptors, then it sends an RX GO command to the chip.
118 * When a frame is received, the chip will DMA it into the memory regions
119 * specified by the fragment descriptors and then trigger an RX 'end of
120 * frame interrupt' when done. The driver may choose to use only one
121 * fragment per list; this may result is slighltly less efficient use
122 * of memory in exchange for improving performance.
123 *
124 * To transmit frames, the driver again sets up lists and fragment
125 * descriptors, only this time the buffers contain frame data that
126 * is to be DMA'ed into the chip instead of out of it. Once the chip
127 * has transfered the data into its on-board SRAM, it will trigger a
128 * TX 'end of frame' interrupt. It will also generate an 'end of channel'
129 * interrupt when it reaches the end of the list.
130 */
131
132/*
133 * Some notes about this driver:
134 *
135 * The ThunderLAN chip provides a couple of different ways to organize
136 * reception, transmission and interrupt handling. The simplest approach
137 * is to use one list each for transmission and reception. In this mode,
138 * the ThunderLAN will generate two interrupts for every received frame
139 * (one RX EOF and one RX EOC) and two for each transmitted frame (one
140 * TX EOF and one TX EOC). This may make the driver simpler but it hurts
141 * performance to have to handle so many interrupts.
142 *
143 * Initially I wanted to create a circular list of receive buffers so
144 * that the ThunderLAN chip would think there was an infinitely long
145 * receive channel and never deliver an RXEOC interrupt. However this
146 * doesn't work correctly under heavy load: while the manual says the
147 * chip will trigger an RXEOF interrupt each time a frame is copied into
148 * memory, you can't count on the chip waiting around for you to acknowledge
149 * the interrupt before it starts trying to DMA the next frame. The result
150 * is that the chip might traverse the entire circular list and then wrap
151 * around before you have a chance to do anything about it. Consequently,
152 * the receive list is terminated (with a 0 in the forward pointer in the
153 * last element). Each time an RXEOF interrupt arrives, the used list
154 * is shifted to the end of the list. This gives the appearance of an
155 * infinitely large RX chain so long as the driver doesn't fall behind
156 * the chip and allow all of the lists to be filled up.
157 *
158 * If all the lists are filled, the adapter will deliver an RX 'end of
159 * channel' interrupt when it hits the 0 forward pointer at the end of
160 * the chain. The RXEOC handler then cleans out the RX chain and resets
161 * the list head pointer in the ch_parm register and restarts the receiver.
162 *
163 * For frame transmission, it is possible to program the ThunderLAN's
164 * transmit interrupt threshold so that the chip can acknowledge multiple
165 * lists with only a single TX EOF interrupt. This allows the driver to
166 * queue several frames in one shot, and only have to handle a total
167 * two interrupts (one TX EOF and one TX EOC) no matter how many frames
168 * are transmitted. Frame transmission is done directly out of the
169 * mbufs passed to the tl_start() routine via the interface send queue.
170 * The driver simply sets up the fragment descriptors in the transmit
171 * lists to point to the mbuf data regions and sends a TX GO command.
172 *
173 * Note that since the RX and TX lists themselves are always used
174 * only by the driver, the are malloc()ed once at driver initialization
175 * time and never free()ed.
176 *
177 * Also, in order to remain as platform independent as possible, this
178 * driver uses memory mapped register access to manipulate the card
179 * as opposed to programmed I/O. This avoids the use of the inb/outb
180 * (and related) instructions which are specific to the i386 platform.
181 *
182 * Using these techniques, this driver achieves very high performance
183 * by minimizing the amount of interrupts generated during large
184 * transfers and by completely avoiding buffer copies. Frame transfer
185 * to and from the ThunderLAN chip is performed entirely by the chip
186 * itself thereby reducing the load on the host CPU.
187 */
188
189#include "bpfilter.h"
190
191#include <sys/param.h>
192#include <sys/systm.h>
193#include <sys/sockio.h>
194#include <sys/mbuf.h>
195#include <sys/malloc.h>
196#include <sys/kernel.h>
197#include <sys/socket.h>
198#include <sys/syslog.h>
199
200#include <net/if.h>
201#include <net/if_arp.h>
202#include <net/ethernet.h>
203#include <net/if_dl.h>
204#include <net/if_mib.h>
205#include <net/if_media.h>
206#include <net/if_types.h>
207
208#ifdef INET
209#include <netinet/in.h>
210#include <netinet/in_systm.h>
211#include <netinet/in_var.h>
212#include <netinet/ip.h>
213#include <netinet/if_ether.h>
214#endif
215
216#ifdef IPX
217#include <netipx/ipx.h>
218#include <netipx/ipx_if.h>
219#endif
220
221#ifdef NS
222#include <netns/ns.h>
223#include <netns/ns_if.h>
224#endif
225
226#if NBPFILTER > 0
227#include <net/bpf.h>
228#include <net/bpfdesc.h>
229#endif
230
231#include <vm/vm.h>              /* for vtophys */
232#include <vm/vm_param.h>        /* for vtophys */
233#include <vm/pmap.h>            /* for vtophys */
234#include <machine/clock.h>      /* for DELAY */
235
236#include <pci/pcireg.h>
237#include <pci/pcivar.h>
238
239#include <pci/if_tlreg.h>
240
241#ifndef lint
242static char rcsid[] =
243	"$Id: if_tl.c,v 1.3 1998/05/21 17:05:32 jkh Exp $";
244#endif
245
246/*
247 * Various supported device vendors/types and their names.
248 */
249
250static struct tl_type tl_devs[] = {
251	{ TI_VENDORID,	TI_DEVICEID_THUNDERLAN,
252		"Texas Instruments ThunderLAN" },
253	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
254		"Compaq Netelligent 10" },
255	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
256		"Compaq Netelligent 10/100" },
257	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
258		"Compaq Netelligent 10/100 Proliant" },
259	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
260		"Compaq Netelligent 10/100 Dual Port" },
261	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
262		"Compaq NetFlex-3/P Integrated" },
263	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
264		"Compaq NetFlex-3/P" },
265	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
266		"Compaq NetFlex 3/P w/ BNC" },
267	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_DESKPRO_4000_5233MMX,
268		"Compaq Deskpro 4000 5233MMX" },
269	{ 0, 0, NULL }
270};
271
272/*
273 * Various supported PHY vendors/types and their names. Note that
274 * this driver will work with pretty much any MII-compliant PHY,
275 * so failure to positively identify the chip is not a fatal error.
276 */
277
278static struct tl_type tl_phys[] = {
279	{ TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
280	{ TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
281	{ NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
282	{ LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
283	{ INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
284	{ SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
285	{ 0, 0, "<MII-compliant physical interface>" }
286};
287
288static struct tl_iflist		*tl_iflist = NULL;
289static unsigned long		tl_count;
290
291static char *tl_probe		__P((pcici_t, pcidi_t));
292static void tl_attach_ctlr	__P((pcici_t, int));
293static int tl_attach_phy	__P((struct tl_csr *, int, char *,
294					int, struct tl_iflist *));
295static int tl_intvec_invalid	__P((void *, u_int32_t));
296static int tl_intvec_dummy	__P((void *, u_int32_t));
297static int tl_intvec_rxeoc	__P((void *, u_int32_t));
298static int tl_intvec_txeoc	__P((void *, u_int32_t));
299static int tl_intvec_txeof	__P((void *, u_int32_t));
300static int tl_intvec_rxeof	__P((void *, u_int32_t));
301static int tl_intvec_adchk	__P((void *, u_int32_t));
302static int tl_intvec_netsts	__P((void *, u_int32_t));
303static int tl_intvec_statoflow	__P((void *, u_int32_t));
304
305static int tl_newbuf		__P((struct tl_softc *, struct tl_chain *));
306static void tl_stats_update	__P((void *));
307static int tl_encap		__P((struct tl_softc *, struct tl_chain *,
308						struct mbuf *));
309
310static void tl_intr		__P((void *));
311static void tl_start		__P((struct ifnet *));
312static int tl_ioctl		__P((struct ifnet *, int, caddr_t));
313static void tl_init		__P((void *));
314static void tl_stop		__P((struct tl_softc *));
315static void tl_watchdog		__P((struct ifnet *));
316static void tl_shutdown		__P((int, void *));
317static int tl_ifmedia_upd	__P((struct ifnet *));
318static void tl_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
319
320static u_int8_t tl_eeprom_putbyte	__P((struct tl_csr *, u_int8_t));
321static u_int8_t	tl_eeprom_getbyte	__P((struct tl_csr *, u_int8_t ,
322							u_int8_t * ));
323static int tl_read_eeprom	__P((struct tl_csr *, caddr_t, int, int));
324
325static void tl_mii_sync		__P((struct tl_csr *));
326static void tl_mii_send		__P((struct tl_csr *, u_int32_t, int));
327static int tl_mii_readreg	__P((struct tl_csr *, struct tl_mii_frame *));
328static int tl_mii_writereg	__P((struct tl_csr *, struct tl_mii_frame *));
329static u_int16_t tl_phy_readreg	__P((struct tl_softc *, int));
330static void tl_phy_writereg	__P((struct tl_softc *, u_int16_t, u_int16_t));
331
332static void tl_autoneg		__P((struct tl_softc *, int, int));
333static void tl_setmode		__P((struct tl_softc *, int));
334static int tl_calchash		__P((char *));
335static void tl_setmulti		__P((struct tl_softc *));
336static void tl_softreset	__P((struct tl_csr *, int));
337static int tl_list_rx_init	__P((struct tl_softc *));
338static int tl_list_tx_init	__P((struct tl_softc *));
339
340/*
341 * ThunderLAN adapters typically have a serial EEPROM containing
342 * configuration information. The main reason we're interested in
343 * it is because it also contains the adapters's station address.
344 *
345 * Access to the EEPROM is a bit goofy since it is a serial device:
346 * you have to do reads and writes one bit at a time. The state of
347 * the DATA bit can only change while the CLOCK line is held low.
348 * Transactions work basically like this:
349 *
350 * 1) Send the EEPROM_START sequence to prepare the EEPROM for
351 *    accepting commands. This pulls the clock high, sets
352 *    the data bit to 0, enables transmission to the EEPROM,
353 *    pulls the data bit up to 1, then pulls the clock low.
354 *    The idea is to do a 0 to 1 transition of the data bit
355 *    while the clock pin is held high.
356 *
357 * 2) To write a bit to the EEPROM, set the TXENABLE bit, then
358 *    set the EDATA bit to send a 1 or clear it to send a 0.
359 *    Finally, set and then clear ECLOK. Strobing the clock
360 *    transmits the bit. After 8 bits have been written, the
361 *    EEPROM should respond with an ACK, which should be read.
362 *
363 * 3) To read a bit from the EEPROM, clear the TXENABLE bit,
364 *    then set ECLOK. The bit can then be read by reading EDATA.
365 *    ECLOCK should then be cleared again. This can be repeated
366 *    8 times to read a whole byte, after which the
367 *
368 * 4) We need to send the address byte to the EEPROM. For this
369 *    we have to send the write control byte to the EEPROM to
370 *    tell it to accept data. The byte is 0xA0. The EEPROM should
371 *    ack this. The address byte can be send after that.
372 *
373 * 5) Now we have to tell the EEPROM to send us data. For that we
374 *    have to transmit the read control byte, which is 0xA1. This
375 *    byte should also be acked. We can then read the data bits
376 *    from the EEPROM.
377 *
378 * 6) When we're all finished, send the EEPROM_STOP sequence.
379 *
380 * Note that we use the ThunderLAN's NetSio register to access the
381 * EEPROM, however there is an alternate method. There is a PCI NVRAM
382 * register at PCI offset 0xB4 which can also be used with minor changes.
383 * The difference is that access to PCI registers via pci_conf_read()
384 * and pci_conf_write() is done using programmed I/O, which we want to
385 * avoid.
386 */
387
388/*
389 * Note that EEPROM_START leaves transmission enabled.
390 */
391#define EEPROM_START							\
392	DIO_SEL(TL_NETSIO);						\
393	DIO_BYTE1_SET(TL_SIO_ECLOK); /* Pull clock pin high */		\
394	DIO_BYTE1_SET(TL_SIO_EDATA); /* Set DATA bit to 1 */		\
395	DIO_BYTE1_SET(TL_SIO_ETXEN); /* Enable xmit to write bit */	\
396	DIO_BYTE1_CLR(TL_SIO_EDATA); /* Pull DATA bit to 0 again */	\
397	DIO_BYTE1_CLR(TL_SIO_ECLOK); /* Pull clock low again */
398
399/*
400 * EEPROM_STOP ends access to the EEPROM and clears the ETXEN bit so
401 * that no further data can be written to the EEPROM I/O pin.
402 */
403#define EEPROM_STOP							\
404	DIO_SEL(TL_NETSIO);						\
405	DIO_BYTE1_CLR(TL_SIO_ETXEN); /* Disable xmit */			\
406	DIO_BYTE1_CLR(TL_SIO_EDATA); /* Pull DATA to 0 */		\
407	DIO_BYTE1_SET(TL_SIO_ECLOK); /* Pull clock high */		\
408	DIO_BYTE1_SET(TL_SIO_ETXEN); /* Enable xmit */			\
409	DIO_BYTE1_SET(TL_SIO_EDATA); /* Toggle DATA to 1 */		\
410	DIO_BYTE1_CLR(TL_SIO_ETXEN); /* Disable xmit. */		\
411	DIO_BYTE1_CLR(TL_SIO_ECLOK); /* Pull clock low again */
412
413/*
414 * Send an instruction or address to the EEPROM, check for ACK.
415 */
416static u_int8_t tl_eeprom_putbyte(csr, byte)
417	struct tl_csr		*csr;
418	u_int8_t		byte;
419{
420	register int		i, ack = 0;
421
422	/*
423	 * Make sure we're in TX mode.
424	 */
425	DIO_SEL(TL_NETSIO);
426	DIO_BYTE1_SET(TL_SIO_ETXEN);
427
428	/*
429	 * Feed in each bit and stobe the clock.
430	 */
431	for (i = 0x80; i; i >>= 1) {
432		DIO_SEL(TL_NETSIO);
433		if (byte & i) {
434			DIO_BYTE1_SET(TL_SIO_EDATA);
435		} else {
436			DIO_BYTE1_CLR(TL_SIO_EDATA);
437		}
438		DIO_BYTE1_SET(TL_SIO_ECLOK);
439		DIO_BYTE1_CLR(TL_SIO_ECLOK);
440	}
441
442	/*
443	 * Turn off TX mode.
444	 */
445	DIO_BYTE1_CLR(TL_SIO_ETXEN);
446
447	/*
448	 * Check for ack.
449	 */
450	DIO_BYTE1_SET(TL_SIO_ECLOK);
451	ack = DIO_BYTE1_GET(TL_SIO_EDATA);
452	DIO_BYTE1_CLR(TL_SIO_ECLOK);
453
454	return(ack);
455}
456
457/*
458 * Read a byte of data stored in the EEPROM at address 'addr.'
459 */
460static u_int8_t tl_eeprom_getbyte(csr, addr, dest)
461	struct tl_csr		*csr;
462	u_int8_t		addr;
463	u_int8_t		*dest;
464{
465	register int		i;
466	u_int8_t		byte = 0;
467
468	EEPROM_START;
469	/*
470	 * Send write control code to EEPROM.
471	 */
472	if (tl_eeprom_putbyte(csr, EEPROM_CTL_WRITE))
473		return(1);
474
475	/*
476	 * Send address of byte we want to read.
477	 */
478	if (tl_eeprom_putbyte(csr, addr))
479		return(1);
480
481	EEPROM_STOP;
482	EEPROM_START;
483	/*
484	 * Send read control code to EEPROM.
485	 */
486	if (tl_eeprom_putbyte(csr, EEPROM_CTL_READ))
487		return(1);
488
489	/*
490	 * Start reading bits from EEPROM.
491	 */
492	DIO_SEL(TL_NETSIO);
493	DIO_BYTE1_CLR(TL_SIO_ETXEN);
494	for (i = 0x80; i; i >>= 1) {
495		DIO_SEL(TL_NETSIO);
496		DIO_BYTE1_SET(TL_SIO_ECLOK);
497		if (DIO_BYTE1_GET(TL_SIO_EDATA))
498			byte |= i;
499		DIO_BYTE1_CLR(TL_SIO_ECLOK);
500	}
501
502	EEPROM_STOP;
503
504	/*
505	 * No ACK generated for read, so just return byte.
506	 */
507
508	*dest = byte;
509
510	return(0);
511}
512
513static void tl_mii_sync(csr)
514	struct tl_csr		*csr;
515{
516	register int		i;
517
518	DIO_SEL(TL_NETSIO);
519	DIO_BYTE1_CLR(TL_SIO_MTXEN);
520
521	for (i = 0; i < 32; i++) {
522		DIO_BYTE1_SET(TL_SIO_MCLK);
523		DIO_BYTE1_CLR(TL_SIO_MCLK);
524	}
525
526	return;
527}
528
529static void tl_mii_send(csr, bits, cnt)
530	struct tl_csr		*csr;
531	u_int32_t		bits;
532	int			cnt;
533{
534	int			i;
535
536	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
537		DIO_BYTE1_CLR(TL_SIO_MCLK);
538		if (bits & i) {
539			DIO_BYTE1_SET(TL_SIO_MDATA);
540		} else {
541			DIO_BYTE1_CLR(TL_SIO_MDATA);
542		}
543		DIO_BYTE1_SET(TL_SIO_MCLK);
544	}
545}
546
547static int tl_mii_readreg(csr, frame)
548	struct tl_csr		*csr;
549	struct tl_mii_frame	*frame;
550
551{
552	int			i, ack, s;
553	int			minten = 0;
554
555	s = splimp();
556
557	tl_mii_sync(csr);
558
559	/*
560	 * Set up frame for RX.
561	 */
562	frame->mii_stdelim = TL_MII_STARTDELIM;
563	frame->mii_opcode = TL_MII_READOP;
564	frame->mii_turnaround = 0;
565	frame->mii_data = 0;
566
567	/*
568	 * Select the NETSIO register. We will be using it
569 	 * to communicate indirectly with the MII.
570	 */
571
572	DIO_SEL(TL_NETSIO);
573
574	/*
575	 * Turn off MII interrupt by forcing MINTEN low.
576	 */
577	minten = DIO_BYTE1_GET(TL_SIO_MINTEN);
578	if (minten) {
579		DIO_BYTE1_CLR(TL_SIO_MINTEN);
580	}
581
582	/*
583 	 * Turn on data xmit.
584	 */
585	DIO_BYTE1_SET(TL_SIO_MTXEN);
586
587	/*
588	 * Send command/address info.
589	 */
590	tl_mii_send(csr, frame->mii_stdelim, 2);
591	tl_mii_send(csr, frame->mii_opcode, 2);
592	tl_mii_send(csr, frame->mii_phyaddr, 5);
593	tl_mii_send(csr, frame->mii_regaddr, 5);
594
595	/*
596	 * Turn off xmit.
597	 */
598	DIO_BYTE1_CLR(TL_SIO_MTXEN);
599
600	/* Idle bit */
601	DIO_BYTE1_CLR(TL_SIO_MCLK);
602	DIO_BYTE1_SET(TL_SIO_MCLK);
603
604	/* Check for ack */
605	DIO_BYTE1_CLR(TL_SIO_MCLK);
606	ack = DIO_BYTE1_GET(TL_SIO_MDATA);
607
608	/* Complete the cycle */
609	DIO_BYTE1_SET(TL_SIO_MCLK);
610
611	/*
612	 * Now try reading data bits. If the ack failed, we still
613	 * need to clock through 16 cycles to keep the PHYs in sync.
614	 */
615	if (ack) {
616		for(i = 0; i < 16; i++) {
617			DIO_BYTE1_CLR(TL_SIO_MCLK);
618			DIO_BYTE1_SET(TL_SIO_MCLK);
619		}
620		goto fail;
621	}
622
623	for (i = 0x8000; i; i >>= 1) {
624		DIO_BYTE1_CLR(TL_SIO_MCLK);
625		if (!ack) {
626			if (DIO_BYTE1_GET(TL_SIO_MDATA))
627				frame->mii_data |= i;
628		}
629		DIO_BYTE1_SET(TL_SIO_MCLK);
630	}
631
632fail:
633
634	DIO_BYTE1_CLR(TL_SIO_MCLK);
635	DIO_BYTE1_SET(TL_SIO_MCLK);
636
637	/* Reenable interrupts */
638	if (minten) {
639		DIO_BYTE1_SET(TL_SIO_MINTEN);
640	}
641
642	splx(s);
643
644	if (ack)
645		return(1);
646	return(0);
647}
648
649static int tl_mii_writereg(csr, frame)
650	struct tl_csr		*csr;
651	struct tl_mii_frame	*frame;
652
653{
654	int			s;
655	int			minten;
656
657	tl_mii_sync(csr);
658
659	s = splimp();
660	/*
661	 * Set up frame for TX.
662	 */
663
664	frame->mii_stdelim = TL_MII_STARTDELIM;
665	frame->mii_opcode = TL_MII_WRITEOP;
666	frame->mii_turnaround = TL_MII_TURNAROUND;
667
668	/*
669	 * Select the NETSIO register. We will be using it
670 	 * to communicate indirectly with the MII.
671	 */
672
673	DIO_SEL(TL_NETSIO);
674
675	/*
676	 * Turn off MII interrupt by forcing MINTEN low.
677	 */
678	minten = DIO_BYTE1_GET(TL_SIO_MINTEN);
679	if (minten) {
680		DIO_BYTE1_CLR(TL_SIO_MINTEN);
681	}
682
683	/*
684 	 * Turn on data output.
685	 */
686	DIO_BYTE1_SET(TL_SIO_MTXEN);
687
688	tl_mii_send(csr, frame->mii_stdelim, 2);
689	tl_mii_send(csr, frame->mii_opcode, 2);
690	tl_mii_send(csr, frame->mii_phyaddr, 5);
691	tl_mii_send(csr, frame->mii_regaddr, 5);
692	tl_mii_send(csr, frame->mii_turnaround, 2);
693	tl_mii_send(csr, frame->mii_data, 16);
694
695	DIO_BYTE1_SET(TL_SIO_MCLK);
696	DIO_BYTE1_CLR(TL_SIO_MCLK);
697
698	/*
699	 * Turn off xmit.
700	 */
701	DIO_BYTE1_CLR(TL_SIO_MTXEN);
702
703	/* Reenable interrupts */
704	if (minten)
705		DIO_BYTE1_SET(TL_SIO_MINTEN);
706
707	splx(s);
708
709	return(0);
710}
711
712static u_int16_t tl_phy_readreg(sc, reg)
713	struct tl_softc		*sc;
714	int			reg;
715{
716	struct tl_mii_frame	frame;
717	struct tl_csr		*csr;
718
719	bzero((char *)&frame, sizeof(frame));
720
721	csr = sc->csr;
722
723	frame.mii_phyaddr = sc->tl_phy_addr;
724	frame.mii_regaddr = reg;
725	tl_mii_readreg(sc->csr, &frame);
726
727	/* Reenable MII interrupts, just in case. */
728	DIO_SEL(TL_NETSIO);
729	DIO_BYTE1_SET(TL_SIO_MINTEN);
730
731	return(frame.mii_data);
732}
733
734static void tl_phy_writereg(sc, reg, data)
735	struct tl_softc		*sc;
736	u_int16_t		reg;
737	u_int16_t		data;
738{
739	struct tl_mii_frame	frame;
740	struct tl_csr		*csr;
741
742	bzero((char *)&frame, sizeof(frame));
743
744	csr = sc->csr;
745	frame.mii_phyaddr = sc->tl_phy_addr;
746	frame.mii_regaddr = reg;
747	frame.mii_data = data;
748
749	tl_mii_writereg(sc->csr, &frame);
750
751	/* Reenable MII interrupts, just in case. */
752	DIO_SEL(TL_NETSIO);
753	DIO_BYTE1_SET(TL_SIO_MINTEN);
754
755	return;
756}
757
758/*
759 * Read a sequence of bytes from the EEPROM.
760 */
761static int tl_read_eeprom(csr, dest, off, cnt)
762	struct tl_csr		*csr;
763	caddr_t			dest;
764	int			off;
765	int			cnt;
766{
767	int			err = 0, i;
768	u_int8_t		byte = 0;
769
770	for (i = 0; i < cnt; i++) {
771		err = tl_eeprom_getbyte(csr, off + i, &byte);
772		if (err)
773			break;
774		*(dest + i) = byte;
775	}
776
777	return(err ? 1 : 0);
778}
779
780/*
781 * Initiate autonegotiation with a link partner.
782 *
783 * Note that the Texas Instruments ThunderLAN programmer's guide
784 * fails to mention one very important point about autonegotiation.
785 * Autonegotiation is done largely by the PHY, independent of the
786 * ThunderLAN chip itself: the PHY sets the flags in the BMCR
787 * register to indicate what modes were selected and if link status
788 * is good. In fact, the PHY does pretty much all of the work itself,
789 * except for one small detail.
790 *
791 * The PHY may negotiate a full-duplex of half-duplex link, and set
792 * the PHY_BMCR_DUPLEX bit accordingly, but the ThunderLAN's 'NetCommand'
793 * register _also_ has a half-duplex/full-duplex bit, and you MUST ALSO
794 * SET THIS BIT MANUALLY TO CORRESPOND TO THE MODE SELECTED FOR THE PHY!
795 * In other words, both the ThunderLAN chip and the PHY have to be
796 * programmed for full-duplex mode in order for full-duplex to actually
797 * work. So in order for autonegotiation to really work right, we have
798 * to wait for the link to come up, check the BMCR register, then set
799 * the ThunderLAN for full or half-duplex as needed.
800 *
801 * I struggled for two days to figure this out, so I'm making a point
802 * of drawing attention to this fact. I think it's very strange that
803 * the ThunderLAN doesn't automagically track the duplex state of the
804 * PHY, but there you have it.
805 *
806 * Also when, using a National Semiconductor DP83840A PHY, we have to
807 * allow a full three seconds for autonegotiation to complete. So what
808 * we do is flip the autonegotiation restart bit, then set a timeout
809 * to wake us up in three seconds to check the link state.
810 */
811static void tl_autoneg(sc, flag, verbose)
812	struct tl_softc		*sc;
813	int			flag;
814	int			verbose;
815{
816	u_int16_t		phy_sts = 0, media = 0;
817	struct ifnet		*ifp;
818	struct ifmedia		*ifm;
819	struct tl_csr		*csr;
820
821	ifm = &sc->ifmedia;
822	ifp = &sc->arpcom.ac_if;
823	csr = sc->csr;
824
825	/*
826	 * First, see if autoneg is supported. If not, there's
827	 * no point in continuing.
828	 */
829	phy_sts = tl_phy_readreg(sc, PHY_BMSR);
830	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
831		if (verbose)
832			printf("tl%d: autonegotiation not supported\n",
833							sc->tl_unit);
834		return;
835	}
836
837	switch (flag) {
838	case TL_FLAG_FORCEDELAY:
839		/*
840	 	 * XXX Never use this option anywhere but in the probe
841	 	 * routine: making the kernel stop dead in its tracks
842 		 * for three whole seconds after we've gone multi-user
843		 * is really bad manners.
844	 	 */
845		phy_sts = tl_phy_readreg(sc, PHY_BMCR);
846		phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
847		tl_phy_writereg(sc, PHY_BMCR, phy_sts);
848		DELAY(3000000);
849		break;
850	case TL_FLAG_SCHEDDELAY:
851		phy_sts = tl_phy_readreg(sc, PHY_BMCR);
852		phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
853		tl_phy_writereg(sc, PHY_BMCR, phy_sts);
854		ifp->if_timer = 3;
855		sc->tl_autoneg = 1;
856		return;
857	case TL_FLAG_DELAYTIMEO:
858		ifp->if_timer = 0;
859		sc->tl_autoneg = 0;
860		break;
861	default:
862		printf("tl%d: invalid autoneg flag: %d\n", flag, sc->tl_unit);
863		return;
864	}
865
866	/*
867 	 * Read the BMSR register twice: the LINKSTAT bit is a
868	 * latching bit.
869	 */
870	tl_phy_readreg(sc, PHY_BMSR);
871	phy_sts = tl_phy_readreg(sc, PHY_BMSR);
872	if (phy_sts & PHY_BMSR_AUTONEGCOMP) {
873		if (verbose)
874			printf("tl%d: autoneg complete, ", sc->tl_unit);
875		phy_sts = tl_phy_readreg(sc, PHY_BMSR);
876	} else {
877		if (verbose)
878			printf("tl%d: autoneg not complete, ", sc->tl_unit);
879	}
880
881	/* Link is good. Report modes and set duplex mode. */
882	if (phy_sts & PHY_BMSR_LINKSTAT) {
883		if (verbose)
884			printf("link status good ");
885		media = tl_phy_readreg(sc, PHY_BMCR);
886
887		/* Set the DUPLEX bit in the NetCmd register accordingly. */
888		if (media & PHY_BMCR_DUPLEX) {
889			if (verbose)
890				printf("(full-duplex, ");
891			ifm->ifm_media |= IFM_FDX;
892			ifm->ifm_media &= ~IFM_HDX;
893			DIO_SEL(TL_NETCMD);
894			DIO_BYTE0_SET(TL_CMD_DUPLEX);
895		} else {
896			if (verbose)
897				printf("(half-duplex, ");
898			ifm->ifm_media &= ~IFM_FDX;
899			ifm->ifm_media |= IFM_HDX;
900			DIO_SEL(TL_NETCMD);
901			DIO_BYTE0_CLR(TL_CMD_DUPLEX);
902		}
903
904		if (media & PHY_BMCR_SPEEDSEL) {
905			if (verbose)
906				printf("100Mb/s)\n");
907			ifm->ifm_media |= IFM_100_TX;
908			ifm->ifm_media &= ~IFM_10_T;
909		} else {
910			if (verbose)
911				printf("10Mb/s)\n");
912			ifm->ifm_media &= ~IFM_100_TX;
913			ifm->ifm_media |= IFM_10_T;
914		}
915
916		/* Turn off autoneg */
917		media &= ~PHY_BMCR_AUTONEGENBL;
918		tl_phy_writereg(sc, PHY_BMCR, media);
919	} else {
920		if (verbose)
921			printf("no carrier\n");
922	}
923
924	return;
925}
926
927/*
928 * Set speed and duplex mode. Also program autoneg advertisements
929 * accordingly.
930 */
931static void tl_setmode(sc, media)
932	struct tl_softc		*sc;
933	int			media;
934{
935	u_int16_t		bmcr, anar, ctl;
936	struct tl_csr		*csr;
937
938	csr = sc->csr;
939	bmcr = tl_phy_readreg(sc, PHY_BMCR);
940	anar = tl_phy_readreg(sc, PHY_ANAR);
941	ctl = tl_phy_readreg(sc, TL_PHY_CTL);
942	DIO_SEL(TL_NETCMD);
943
944	bmcr &= ~(PHY_BMCR_SPEEDSEL|PHY_BMCR_DUPLEX|PHY_BMCR_AUTONEGENBL|
945		  PHY_BMCR_LOOPBK);
946	anar &= ~(PHY_ANAR_100BT4|PHY_ANAR_100BTXFULL|PHY_ANAR_100BTXHALF|
947		  PHY_ANAR_10BTFULL|PHY_ANAR_10BTHALF);
948
949	ctl &= ~PHY_CTL_AUISEL;
950
951	if (IFM_SUBTYPE(media) == IFM_LOOP)
952		bmcr |= PHY_BMCR_LOOPBK;
953
954	if (IFM_SUBTYPE(media) == IFM_AUTO)
955		bmcr |= PHY_BMCR_AUTONEGENBL;
956
957	if (IFM_SUBTYPE(media) == IFM_10_5)
958		ctl |= PHY_CTL_AUISEL;
959
960	if (IFM_SUBTYPE(media) == IFM_100_TX) {
961		bmcr |= PHY_BMCR_SPEEDSEL;
962		if ((media & IFM_GMASK) == IFM_FDX) {
963			bmcr |= PHY_BMCR_DUPLEX;
964			anar |= PHY_ANAR_100BTXFULL;
965			DIO_BYTE0_SET(TL_CMD_DUPLEX);
966		} else if ((media & IFM_GMASK) == IFM_HDX) {
967			bmcr &= ~PHY_BMCR_DUPLEX;
968			anar |= PHY_ANAR_100BTXHALF;
969			DIO_BYTE0_CLR(TL_CMD_DUPLEX);
970		} else {
971			bmcr &= ~PHY_BMCR_DUPLEX;
972			anar |= PHY_ANAR_100BTXHALF;
973			DIO_BYTE0_CLR(TL_CMD_DUPLEX);
974		}
975	}
976
977	if (IFM_SUBTYPE(media) == IFM_10_T) {
978		bmcr &= ~PHY_BMCR_SPEEDSEL;
979		if ((media & IFM_GMASK) == IFM_FDX) {
980			bmcr |= PHY_BMCR_DUPLEX;
981			anar |= PHY_ANAR_10BTFULL;
982			DIO_BYTE0_SET(TL_CMD_DUPLEX);
983		} else if ((media & IFM_GMASK) == IFM_HDX) {
984			bmcr &= ~PHY_BMCR_DUPLEX;
985			anar |= PHY_ANAR_10BTHALF;
986			DIO_BYTE0_CLR(TL_CMD_DUPLEX);
987		} else {
988			bmcr &= ~PHY_BMCR_DUPLEX;
989			anar |= PHY_ANAR_10BTHALF;
990			DIO_BYTE0_CLR(TL_CMD_DUPLEX);
991		}
992	}
993
994	tl_phy_writereg(sc, PHY_BMCR, bmcr);
995	tl_phy_writereg(sc, PHY_ANAR, anar);
996	tl_phy_writereg(sc, TL_PHY_CTL, ctl);
997
998	return;
999}
1000
1001#define XOR(a, b)		((a && !b) || (!a && b))
1002#define DA(addr, offset)	(addr[offset / 8] & (1 << (offset % 8)))
1003
1004static int tl_calchash(addr)
1005	char			*addr;
1006{
1007	int			h;
1008
1009	h = XOR(DA(addr, 0), XOR(DA(addr, 6), XOR(DA(addr, 12),
1010	    XOR(DA(addr, 18), XOR(DA(addr, 24), XOR(DA(addr, 30),
1011	    XOR(DA(addr, 36), DA(addr, 42))))))));
1012
1013	h |= XOR(DA(addr, 1), XOR(DA(addr, 7), XOR(DA(addr, 13),
1014	     XOR(DA(addr, 19), XOR(DA(addr, 25), XOR(DA(addr, 31),
1015	     XOR(DA(addr, 37), DA(addr, 43)))))))) << 1;
1016
1017	h |= XOR(DA(addr, 2), XOR(DA(addr, 8), XOR(DA(addr, 14),
1018	     XOR(DA(addr, 20), XOR(DA(addr, 26), XOR(DA(addr, 32),
1019	     XOR(DA(addr, 38), DA(addr, 44)))))))) << 2;
1020
1021	h |= XOR(DA(addr, 3), XOR(DA(addr, 9), XOR(DA(addr, 15),
1022	     XOR(DA(addr, 21), XOR(DA(addr, 27), XOR(DA(addr, 33),
1023	     XOR(DA(addr, 39), DA(addr, 45)))))))) << 3;
1024
1025	h |= XOR(DA(addr, 4), XOR(DA(addr, 10), XOR(DA(addr, 16),
1026	     XOR(DA(addr, 22), XOR(DA(addr, 28), XOR(DA(addr, 34),
1027	     XOR(DA(addr, 40), DA(addr, 46)))))))) << 4;
1028
1029	h |= XOR(DA(addr, 5), XOR(DA(addr, 11), XOR(DA(addr, 17),
1030	     XOR(DA(addr, 23), XOR(DA(addr, 29), XOR(DA(addr, 35),
1031	     XOR(DA(addr, 41), DA(addr, 47)))))))) << 5;
1032
1033	return(h);
1034}
1035
1036static void tl_setmulti(sc)
1037	struct tl_softc		*sc;
1038{
1039	struct ifnet		*ifp;
1040	struct tl_csr		*csr;
1041	u_int32_t		hashes[2] = { 0, 0 };
1042	int			h;
1043	struct ifmultiaddr	*ifma;
1044
1045	csr = sc->csr;
1046	ifp = &sc->arpcom.ac_if;
1047
1048	if (sc->arpcom.ac_multicnt > 64 || ifp->if_flags & IFF_ALLMULTI) {
1049		hashes[0] = 0xFFFFFFFF;
1050		hashes[1] = 0xFFFFFFFF;
1051	} else {
1052		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
1053					ifma = ifma->ifma_link.le_next) {
1054			if (ifma->ifma_addr->sa_family != AF_LINK)
1055				continue;
1056			h = tl_calchash(
1057				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1058			if (h < 32)
1059				hashes[0] |= (1 << h);
1060			else
1061				hashes[1] |= (1 << (h - 31));
1062		}
1063	}
1064
1065	DIO_SEL(TL_HASH1);
1066	DIO_LONG_PUT(hashes[0]);
1067	DIO_SEL(TL_HASH2);
1068	DIO_LONG_PUT(hashes[1]);
1069
1070	return;
1071}
1072
1073static void tl_softreset(csr, internal)
1074        struct tl_csr           *csr;
1075	int			internal;
1076{
1077        u_int32_t               cmd, dummy;
1078
1079        /* Assert the adapter reset bit. */
1080        csr->tl_host_cmd |= TL_CMD_ADRST;
1081        /* Turn off interrupts */
1082        csr->tl_host_cmd |= TL_CMD_INTSOFF;
1083
1084	/* First, clear the stats registers. */
1085	DIO_SEL(TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1086	DIO_LONG_GET(dummy);
1087	DIO_LONG_GET(dummy);
1088	DIO_LONG_GET(dummy);
1089	DIO_LONG_GET(dummy);
1090	DIO_LONG_GET(dummy);
1091
1092        /* Clear Areg and Hash registers */
1093	DIO_SEL(TL_AREG0_B5|TL_DIO_ADDR_INC);
1094	DIO_LONG_PUT(0x00000000);
1095	DIO_LONG_PUT(0x00000000);
1096	DIO_LONG_PUT(0x00000000);
1097	DIO_LONG_PUT(0x00000000);
1098	DIO_LONG_PUT(0x00000000);
1099	DIO_LONG_PUT(0x00000000);
1100	DIO_LONG_PUT(0x00000000);
1101	DIO_LONG_PUT(0x00000000);
1102
1103        /*
1104	 * Set up Netconfig register. Enable one channel and
1105	 * one fragment mode.
1106	 */
1107	DIO_SEL(TL_NETCONFIG);
1108	DIO_WORD0_SET(TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
1109	if (internal) {
1110		DIO_SEL(TL_NETCONFIG);
1111		DIO_WORD0_SET(TL_CFG_PHYEN);
1112	} else {
1113		DIO_SEL(TL_NETCONFIG);
1114		DIO_WORD0_CLR(TL_CFG_PHYEN);
1115	}
1116
1117        /* Set PCI burst size */
1118        DIO_SEL(TL_BSIZEREG);
1119        DIO_BYTE1_SET(0x33);
1120
1121	/*
1122	 * Load adapter irq pacing timer and tx threshold.
1123	 * We make the transmit threshold 1 initially but we may
1124	 * change that later.
1125	 */
1126	cmd = csr->tl_host_cmd;
1127	cmd |= TL_CMD_NES;
1128	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
1129	csr->tl_host_cmd = cmd | (TL_CMD_LDTHR | TX_THR);
1130	csr->tl_host_cmd = cmd | (TL_CMD_LDTMR | 0x00000003);
1131
1132        /* Unreset the MII */
1133        DIO_SEL(TL_NETSIO);
1134        DIO_BYTE1_SET(TL_SIO_NMRST);
1135
1136	/* Clear status register */
1137        DIO_SEL(TL_NETSTS);
1138        DIO_BYTE2_SET(TL_STS_MIRQ);
1139        DIO_BYTE2_SET(TL_STS_HBEAT);
1140        DIO_BYTE2_SET(TL_STS_TXSTOP);
1141        DIO_BYTE2_SET(TL_STS_RXSTOP);
1142
1143	/* Enable network status interrupts for everything. */
1144	DIO_SEL(TL_NETMASK);
1145	DIO_BYTE3_SET(TL_MASK_MASK7|TL_MASK_MASK6|
1146			TL_MASK_MASK5|TL_MASK_MASK4);
1147
1148	/* Take the adapter out of reset */
1149	DIO_SEL(TL_NETCMD);
1150	DIO_BYTE0_SET(TL_CMD_NRESET|TL_CMD_NWRAP);
1151
1152	/* Wait for things to settle down a little. */
1153	DELAY(500);
1154
1155        return;
1156}
1157
1158/*
1159 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
1160 * against our list and return its name if we find a match. Note that
1161 * we also save a pointer to the tl_type struct for this card since we
1162 * will need it for the softc struct and attach routine later.
1163 */
1164static char *
1165tl_probe(config_id, device_id)
1166	pcici_t			config_id;
1167	pcidi_t			device_id;
1168{
1169	struct tl_type		*t;
1170	struct tl_iflist	*new;
1171
1172	t = tl_devs;
1173
1174	while(t->tl_name != NULL) {
1175		if ((device_id & 0xFFFF) == t->tl_vid &&
1176		    ((device_id >> 16) & 0xFFFF) == t->tl_did) {
1177			new = malloc(sizeof(struct tl_iflist),
1178					M_DEVBUF, M_NOWAIT);
1179			if (new == NULL) {
1180				printf("no memory for controller struct!\n");
1181				break;
1182			}
1183			bzero(new, sizeof(struct tl_iflist));
1184			new->tl_config_id = config_id;
1185			new->tl_dinfo = t;
1186			new->tl_next = tl_iflist;
1187			tl_iflist = new;
1188			return(t->tl_name);
1189		}
1190		t++;
1191	}
1192
1193	return(NULL);
1194}
1195
1196/*
1197 * The ThunderLAN controller can support multiple PHYs. Logically,
1198 * this means we have to be able to deal with each PHY as a separate
1199 * interface. We therefore consider ThunderLAN devices as follows:
1200 *
1201 * o Each ThunderLAN controller device is assigned the name tlcX where
1202 *   X is the controller's unit number. Each ThunderLAN device found
1203 *   is assigned a different number.
1204 *
1205 * o Each PHY on each controller is assigned the name tlX. X starts at
1206 *   0 and is incremented each time an additional PHY is found.
1207 *
1208 * So, if you had two dual-channel ThunderLAN cards, you'd have
1209 * tlc0 and tlc1 (the controllers) and tl0, tl1, tl2, tl3 (the logical
1210 * interfaces). I think. I'm still not sure how dual chanel controllers
1211 * work as I've yet to see one.
1212 */
1213
1214/*
1215 * Do the interface setup and attach for a PHY on a particular
1216 * ThunderLAN chip. Also also set up interrupt vectors.
1217 */
1218static int tl_attach_phy(csr, tl_unit, eaddr, tl_phy, ilist)
1219	struct tl_csr		*csr;
1220	int			tl_unit;
1221	char			*eaddr;
1222	int			tl_phy;
1223	struct tl_iflist	*ilist;
1224{
1225	struct tl_softc		*sc;
1226	struct ifnet		*ifp;
1227	int			phy_ctl;
1228	struct tl_type		*p = tl_phys;
1229	struct tl_mii_frame	frame;
1230	int			i, media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1231	unsigned int		round;
1232	caddr_t			roundptr;
1233
1234	if (tl_phy != TL_PHYADDR_MAX)
1235		tl_softreset(csr, 0);
1236
1237	/* Reset the PHY again, just in case. */
1238	bzero((char *)&frame, sizeof(frame));
1239	frame.mii_phyaddr = tl_phy;
1240	frame.mii_regaddr = TL_PHY_GENCTL;
1241	frame.mii_data = PHY_BMCR_RESET;
1242	tl_mii_writereg(csr, &frame);
1243	DELAY(500);
1244	frame.mii_data = 0;
1245
1246	/* First, allocate memory for the softc struct. */
1247	sc = malloc(sizeof(struct tl_softc), M_DEVBUF, M_NOWAIT);
1248	if (sc == NULL) {
1249		printf("tlc%d: no memory for softc struct!\n", ilist->tlc_unit);
1250		return(1);
1251	}
1252
1253	bzero(sc, sizeof(struct tl_softc));
1254
1255	/*
1256	 * Now allocate memory for the TX and RX lists. Note that
1257	 * we actually allocate 8 bytes more than we really need:
1258	 * this is because we need to adjust the final address to
1259	 * be aligned on a quadword (64-bit) boundary in order to
1260	 * make the chip happy. If the list structures aren't properly
1261	 * aligned, DMA fails and the chip generates an adapter check
1262	 * interrupt and has to be reset. If you set up the softc struct
1263	 * just right you can sort of obtain proper alignment 'by chance.'
1264	 * But I don't want to depend on this, so instead the alignment
1265	 * is forced here.
1266	 */
1267	sc->tl_ldata_ptr = malloc(sizeof(struct tl_list_data) + 8,
1268				M_DEVBUF, M_NOWAIT);
1269
1270	if (sc->tl_ldata_ptr == NULL) {
1271		free(sc, M_DEVBUF);
1272		printf("tlc%d: no memory for list buffers!\n", ilist->tlc_unit);
1273		return(1);
1274	}
1275
1276	/*
1277	 * Convoluted but satisfies my ANSI sensibilities. GCC lets
1278	 * you do casts on the LHS of an assignment, but ANSI doesn't
1279	 * allow that.
1280	 */
1281	sc->tl_ldata = (struct tl_list_data *)sc->tl_ldata_ptr;
1282	round = (unsigned int)sc->tl_ldata_ptr & 0xF;
1283	roundptr = sc->tl_ldata_ptr;
1284	for (i = 0; i < 8; i++) {
1285		if (round % 8) {
1286			round++;
1287			roundptr++;
1288		} else
1289			break;
1290	}
1291	sc->tl_ldata = (struct tl_list_data *)roundptr;
1292
1293	bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1294
1295	sc->csr = csr;
1296	sc->tl_dinfo = ilist->tl_dinfo;
1297	sc->tl_ctlr = ilist->tlc_unit;
1298	sc->tl_unit = tl_unit;
1299	sc->tl_phy_addr = tl_phy;
1300	sc->tl_iflist = ilist;
1301	callout_handle_init(&sc->tl_stat_ch);
1302
1303	frame.mii_regaddr = TL_PHY_VENID;
1304	tl_mii_readreg(csr, &frame);
1305	sc->tl_phy_vid = frame.mii_data;
1306
1307	frame.mii_regaddr = TL_PHY_DEVID;
1308	tl_mii_readreg(csr, &frame);
1309	sc->tl_phy_did = frame.mii_data;
1310
1311	frame.mii_regaddr = TL_PHY_GENSTS;
1312	tl_mii_readreg(csr, &frame);
1313	sc->tl_phy_sts = frame.mii_data;
1314
1315	frame.mii_regaddr = TL_PHY_GENCTL;
1316	tl_mii_readreg(csr, &frame);
1317	phy_ctl = frame.mii_data;
1318
1319	/*
1320	 * PHY revision numbers tend to vary a bit. Our algorithm here
1321	 * is to check everything but the 8 least significant bits.
1322	 */
1323	while(p->tl_vid) {
1324		if (sc->tl_phy_vid  == p->tl_vid &&
1325			(sc->tl_phy_did | 0x000F) == p->tl_did) {
1326			sc->tl_pinfo = p;
1327			break;
1328		}
1329		p++;
1330	}
1331	if (sc->tl_pinfo == NULL) {
1332		sc->tl_pinfo = &tl_phys[PHY_UNKNOWN];
1333	}
1334
1335	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1336	ifp = &sc->arpcom.ac_if;
1337	ifp->if_softc = sc;
1338	ifp->if_unit = tl_unit;
1339	ifp->if_name = "tl";
1340	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1341	ifp->if_ioctl = tl_ioctl;
1342	ifp->if_output = ether_output;
1343	ifp->if_start = tl_start;
1344	ifp->if_watchdog = tl_watchdog;
1345	ifp->if_init = tl_init;
1346
1347	if (sc->tl_phy_sts & PHY_BMSR_100BT4 ||
1348		sc->tl_phy_sts & PHY_BMSR_100BTXFULL ||
1349		sc->tl_phy_sts & PHY_BMSR_100BTXHALF)
1350		ifp->if_baudrate = 100000000;
1351	else
1352		ifp->if_baudrate = 10000000;
1353
1354	ilist->tl_sc[tl_phy] = sc;
1355
1356	printf("tl%d at tlc%d physical interface %d\n", ifp->if_unit,
1357						sc->tl_ctlr,
1358						sc->tl_phy_addr);
1359
1360	printf("tl%d: %s ", ifp->if_unit, sc->tl_pinfo->tl_name);
1361
1362	if (sc->tl_phy_sts & PHY_BMSR_100BT4 ||
1363		sc->tl_phy_sts & PHY_BMSR_100BTXHALF ||
1364		sc->tl_phy_sts & PHY_BMSR_100BTXHALF)
1365		printf("10/100Mbps ");
1366	else {
1367		media &= ~IFM_100_TX;
1368		media |= IFM_10_T;
1369		printf("10Mbps ");
1370	}
1371
1372	if (sc->tl_phy_sts & PHY_BMSR_100BTXFULL ||
1373		sc->tl_phy_sts & PHY_BMSR_10BTFULL)
1374		printf("full duplex ");
1375	else {
1376		printf("half duplex ");
1377		media &= ~IFM_FDX;
1378	}
1379
1380	if (sc->tl_phy_sts & PHY_BMSR_CANAUTONEG) {
1381		media = IFM_ETHER|IFM_AUTO;
1382		printf("autonegotiating\n");
1383	} else
1384		printf("\n");
1385
1386	/* If this isn't a known PHY, print the PHY indentifier info. */
1387	if (sc->tl_pinfo->tl_vid == 0)
1388		printf("tl%d: vendor id: %04x product id: %04x\n",
1389			sc->tl_unit, sc->tl_phy_vid, sc->tl_phy_did);
1390
1391	/* Set up ifmedia data and callbacks. */
1392	ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1393
1394	/*
1395	 * All ThunderLANs support at least 10baseT half duplex.
1396	 * They also support AUI selection if used in 10Mb/s modes.
1397	 * They all also support a loopback mode.
1398	 */
1399	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1400	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1401	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1402	ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_LOOP, 0, NULL);
1403
1404	/* Some ThunderLAN PHYs support autonegotiation. */
1405	if (sc->tl_phy_sts & PHY_BMSR_CANAUTONEG)
1406		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1407
1408	/* Some support 10baseT full duplex. */
1409	if (sc->tl_phy_sts & PHY_BMSR_10BTFULL)
1410		ifmedia_add(&sc->ifmedia,
1411			IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1412
1413	/* Some support 100BaseTX half duplex. */
1414	if (sc->tl_phy_sts & PHY_BMSR_100BTXHALF)
1415		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
1416	if (sc->tl_phy_sts & PHY_BMSR_100BTXHALF)
1417		ifmedia_add(&sc->ifmedia,
1418			IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
1419
1420	/* Some support 100BaseTX full duplex. */
1421	if (sc->tl_phy_sts & PHY_BMSR_100BTXFULL)
1422		ifmedia_add(&sc->ifmedia,
1423			IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
1424
1425	/* Some also support 100BaseT4. */
1426	if (sc->tl_phy_sts & PHY_BMSR_100BT4)
1427		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
1428
1429	/* Set default media. */
1430	ifmedia_set(&sc->ifmedia, media);
1431
1432	/*
1433	 * Kick off an autonegotiation session if this PHY supports it.
1434	 * This is necessary to make sure the chip's duplex mode matches
1435	 * the PHY's duplex mode. It may not: once enabled, the PHY may
1436	 * autonegotiate full-duplex mode with its link partner, but the
1437	 * ThunderLAN chip defaults to half-duplex and stays there unless
1438	 * told otherwise.
1439	 */
1440	if (sc->tl_phy_sts & PHY_BMSR_CANAUTONEG)
1441		tl_autoneg(sc, TL_FLAG_FORCEDELAY, 0);
1442
1443	/*
1444	 * Call MI attach routines.
1445	 */
1446	if_attach(ifp);
1447	ether_ifattach(ifp);
1448
1449#if NBPFILTER > 0
1450	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1451#endif
1452
1453	return(0);
1454}
1455
1456static void
1457tl_attach_ctlr(config_id, unit)
1458	pcici_t			config_id;
1459	int			unit;
1460{
1461	int			s, i, phys = 0;
1462	vm_offset_t		pbase, vbase;
1463	struct tl_csr		*csr;
1464	char			eaddr[ETHER_ADDR_LEN];
1465	struct tl_mii_frame	frame;
1466	u_int32_t		command;
1467	struct tl_iflist	*ilist;
1468
1469	s = splimp();
1470
1471	for (ilist = tl_iflist; ilist != NULL; ilist = ilist->tl_next)
1472		if (ilist->tl_config_id == config_id)
1473			break;
1474
1475	if (ilist == NULL) {
1476		printf("couldn't match config id with controller struct\n");
1477		goto fail;
1478	}
1479
1480	/*
1481	 * Map control/status registers.
1482	 */
1483	pci_conf_write(config_id, PCI_COMMAND_STATUS_REG,
1484			PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1485
1486	command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1487
1488	if (!(command & PCIM_CMD_MEMEN)) {
1489		printf("tlc%d: failed to enable memory mapping!\n", unit);
1490		goto fail;
1491	}
1492
1493	if (!pci_map_mem(config_id, TL_PCI_LOMEM, &vbase, &pbase)) {
1494		printf ("tlc%d: couldn't map memory\n", unit);
1495		goto fail;
1496	}
1497
1498	csr = (struct tl_csr *)vbase;
1499
1500	ilist->csr = csr;
1501	ilist->tl_active_phy = TL_PHYS_IDLE;
1502	ilist->tlc_unit = unit;
1503
1504	/* Allocate interrupt */
1505	if (!pci_map_int(config_id, tl_intr, ilist, &net_imask)) {
1506		printf("tlc%d: couldn't map interrupt\n", unit);
1507		goto fail;
1508	}
1509
1510	/* Reset the adapter. */
1511	tl_softreset(csr, 1);
1512
1513	/*
1514	 * Get station address from the EEPROM.
1515	 */
1516	if (tl_read_eeprom(csr, (caddr_t)&eaddr,
1517				TL_EEPROM_EADDR, ETHER_ADDR_LEN)) {
1518		printf("tlc%d: failed to read station address\n", unit);
1519		goto fail;
1520	}
1521
1522	/*
1523	 * A ThunderLAN chip was detected. Inform the world.
1524	 */
1525	printf("tlc%d: Ethernet address: %6D\n", unit, eaddr, ":");
1526
1527	/*
1528	 * Now attach the ThunderLAN's PHYs. There will always
1529	 * be at least one PHY; if the PHY address is 0x1F, then
1530	 * it's the internal one. If we encounter a lower numbered
1531	 * PHY, we ignore the internal once since enabling the
1532	 * internal PHY disables the external one.
1533	 */
1534
1535	bzero((char *)&frame, sizeof(frame));
1536
1537	for (i = TL_PHYADDR_MIN; i < TL_PHYADDR_MAX + 1; i++) {
1538		frame.mii_phyaddr = i;
1539		frame.mii_regaddr = TL_PHY_GENCTL;
1540		frame.mii_data = PHY_BMCR_RESET;
1541		tl_mii_writereg(csr, &frame);
1542		DELAY(500);
1543		while(frame.mii_data & PHY_BMCR_RESET)
1544			tl_mii_readreg(csr, &frame);
1545		frame.mii_regaddr = TL_PHY_VENID;
1546		frame.mii_data = 0;
1547		tl_mii_readreg(csr, &frame);
1548		if (!frame.mii_data)
1549			continue;
1550		if (tl_attach_phy(csr, phys, eaddr, i, ilist)) {
1551			printf("tlc%d: failed to attach interface %d\n",
1552						unit, i);
1553			goto fail;
1554		}
1555		phys++;
1556		if (phys && i != TL_PHYADDR_MAX)
1557			break;
1558	}
1559
1560	if (!phys) {
1561		printf("tlc%d: no physical interfaces attached!\n", unit);
1562		goto fail;
1563	}
1564
1565	at_shutdown(tl_shutdown, ilist, SHUTDOWN_POST_SYNC);
1566
1567fail:
1568	splx(s);
1569	return;
1570}
1571
1572/*
1573 * Initialize the transmit lists.
1574 */
1575static int tl_list_tx_init(sc)
1576	struct tl_softc		*sc;
1577{
1578	struct tl_chain_data	*cd;
1579	struct tl_list_data	*ld;
1580	int			i;
1581
1582	cd = &sc->tl_cdata;
1583	ld = sc->tl_ldata;
1584	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1585		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1586		if (i == (TL_TX_LIST_CNT - 1))
1587			cd->tl_tx_chain[i].tl_next = NULL;
1588		else
1589			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1590	}
1591
1592	cd->tl_tx_free = &cd->tl_tx_chain[0];
1593	cd->tl_tx_tail = cd->tl_tx_head = NULL;
1594	sc->tl_txeoc = 1;
1595
1596	return(0);
1597}
1598
1599/*
1600 * Initialize the RX lists and allocate mbufs for them.
1601 */
1602static int tl_list_rx_init(sc)
1603	struct tl_softc		*sc;
1604{
1605	struct tl_chain_data	*cd;
1606	struct tl_list_data	*ld;
1607	int			i;
1608
1609	cd = &sc->tl_cdata;
1610	ld = sc->tl_ldata;
1611
1612	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1613		cd->tl_rx_chain[i].tl_ptr =
1614			(struct tl_list *)&ld->tl_rx_list[i];
1615		tl_newbuf(sc, &cd->tl_rx_chain[i]);
1616		if (i == (TL_TX_LIST_CNT - 1)) {
1617			cd->tl_rx_chain[i].tl_next = NULL;
1618			ld->tl_rx_list[i].tlist_fptr = 0;
1619		} else {
1620			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1621			ld->tl_rx_list[i].tlist_fptr =
1622					vtophys(&ld->tl_rx_list[i + 1]);
1623		}
1624	}
1625
1626	cd->tl_rx_head = &cd->tl_rx_chain[0];
1627	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1628
1629	return(0);
1630}
1631
1632static int tl_newbuf(sc, c)
1633	struct tl_softc		*sc;
1634	struct tl_chain		*c;
1635{
1636	struct mbuf		*m_new = NULL;
1637
1638	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1639	if (m_new == NULL) {
1640		printf("tl%d: no memory for rx list",
1641				sc->tl_unit);
1642		return(ENOBUFS);
1643	}
1644
1645	MCLGET(m_new, M_DONTWAIT);
1646	if (!(m_new->m_flags & M_EXT)) {
1647		printf("tl%d: no memory for rx list", sc->tl_unit);
1648		m_freem(m_new);
1649		return(ENOBUFS);
1650	}
1651
1652	c->tl_mbuf = m_new;
1653	c->tl_next = NULL;
1654	c->tl_ptr->tlist_frsize = MCLBYTES;
1655	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1656	c->tl_ptr->tlist_fptr = 0;
1657	c->tl_ptr->tl_frag[0].tlist_dadr = vtophys(mtod(m_new, caddr_t));
1658	c->tl_ptr->tl_frag[0].tlist_dcnt = MCLBYTES;
1659
1660	return(0);
1661}
1662/*
1663 * Interrupt handler for RX 'end of frame' condition (EOF). This
1664 * tells us that a full ethernet frame has been captured and we need
1665 * to handle it.
1666 *
1667 * Reception is done using 'lists' which consist of a header and a
1668 * series of 10 data count/data address pairs that point to buffers.
1669 * Initially you're supposed to create a list, populate it with pointers
1670 * to buffers, then load the physical address of the list into the
1671 * ch_parm register. The adapter is then supposed to DMA the received
1672 * frame into the buffers for you.
1673 *
1674 * To make things as fast as possible, we have the chip DMA directly
1675 * into mbufs. This saves us from having to do a buffer copy: we can
1676 * just hand the mbufs directly to ether_input(). Once the frame has
1677 * been sent on its way, the 'list' structure is assigned a new buffer
1678 * and moved to the end of the RX chain. As long we we stay ahead of
1679 * the chip, it will always think it has an endless receive channel.
1680 *
1681 * If we happen to fall behind and the chip manages to fill up all of
1682 * the buffers, it will generate an end of channel interrupt and wait
1683 * for us to empty the chain and restart the receiver.
1684 */
1685static int tl_intvec_rxeof(xsc, type)
1686	void			*xsc;
1687	u_int32_t		type;
1688{
1689	struct tl_softc		*sc;
1690	int			r = 0, total_len = 0;
1691	struct ether_header	*eh;
1692	struct mbuf		*m;
1693	struct ifnet		*ifp;
1694	struct tl_chain		*cur_rx;
1695
1696	sc = xsc;
1697	ifp = &sc->arpcom.ac_if;
1698
1699	while(sc->tl_cdata.tl_rx_head->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP){
1700		r++;
1701		cur_rx = sc->tl_cdata.tl_rx_head;
1702		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1703		m = cur_rx->tl_mbuf;
1704		total_len = cur_rx->tl_ptr->tlist_frsize;
1705
1706		tl_newbuf(sc, cur_rx);
1707
1708		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1709						vtophys(cur_rx->tl_ptr);
1710		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1711		sc->tl_cdata.tl_rx_tail = cur_rx;
1712
1713		eh = mtod(m, struct ether_header *);
1714		m->m_pkthdr.rcvif = ifp;
1715
1716#if NBPFILTER > 0
1717		/*
1718	 	 * Handle BPF listeners. Let the BPF user see the packet, but
1719	 	 * don't pass it up to the ether_input() layer unless it's
1720	 	 * a broadcast packet, multicast packet, matches our ethernet
1721	 	 * address or the interface is in promiscuous mode. If we don't
1722	 	 * want the packet, just forget it. We leave the mbuf in place
1723	 	 * since it can be used again later.
1724	 	 */
1725		if (ifp->if_bpf) {
1726			m->m_pkthdr.len = m->m_len = total_len;
1727			bpf_mtap(ifp, m);
1728			if (ifp->if_flags & IFF_PROMISC &&
1729				(bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1730		 				ETHER_ADDR_LEN) &&
1731					(eh->ether_dhost[0] & 1) == 0)) {
1732				m_freem(m);
1733				continue;
1734			}
1735		}
1736#endif
1737		/* Remove header from mbuf and pass it on. */
1738		m->m_pkthdr.len = m->m_len =
1739				total_len - sizeof(struct ether_header);
1740		m->m_data += sizeof(struct ether_header);
1741		ether_input(ifp, eh, m);
1742	}
1743
1744	return(r);
1745}
1746
1747/*
1748 * The RX-EOC condition hits when the ch_parm address hasn't been
1749 * initialized or the adapter reached a list with a forward pointer
1750 * of 0 (which indicates the end of the chain). In our case, this means
1751 * the card has hit the end of the receive buffer chain and we need to
1752 * empty out the buffers and shift the pointer back to the beginning again.
1753 */
1754static int tl_intvec_rxeoc(xsc, type)
1755	void			*xsc;
1756	u_int32_t		type;
1757{
1758	struct tl_softc		*sc;
1759	int			r;
1760
1761	sc = xsc;
1762
1763	/* Flush out the receive queue and ack RXEOF interrupts. */
1764	r = tl_intvec_rxeof(xsc, type);
1765	sc->csr->tl_host_cmd = TL_CMD_ACK | r | (type & ~(0x00100000));
1766	r = 1;
1767	sc->csr->tl_ch_parm = vtophys(sc->tl_cdata.tl_rx_head->tl_ptr);
1768	r |= (TL_CMD_GO|TL_CMD_RT);
1769	return(r);
1770}
1771
1772/*
1773 * Invalid interrupt handler. The manual says invalid interrupts
1774 * are caused by a hardware error in other hardware and that they
1775 * should just be ignored.
1776 */
1777static int tl_intvec_invalid(xsc, type)
1778	void			*xsc;
1779	u_int32_t		type;
1780{
1781	struct tl_softc		*sc;
1782
1783	sc = xsc;
1784
1785#ifdef DIAGNOSTIC
1786	printf("tl%d: got an invalid interrupt!\n", sc->tl_unit);
1787#endif
1788	/* Re-enable interrupts but don't ack this one. */
1789	sc->csr->tl_host_cmd |= type;
1790
1791	return(0);
1792}
1793
1794/*
1795 * Dummy interrupt handler. Dummy interrupts are generated by setting
1796 * the ReqInt bit in the host command register. They should only occur
1797 * if we ask for them, and we never do, so if one magically appears,
1798 * we should make some noise about it.
1799 */
1800static int tl_intvec_dummy(xsc, type)
1801	void			*xsc;
1802	u_int32_t		type;
1803{
1804	struct tl_softc		*sc;
1805
1806	sc = xsc;
1807	printf("tl%d: got a dummy interrupt\n", sc->tl_unit);
1808
1809	return(1);
1810}
1811
1812/*
1813 * Stats counter overflow interrupt. The chip delivers one of these
1814 * if we don't poll the stats counters often enough.
1815 */
1816static int tl_intvec_statoflow(xsc, type)
1817	void			*xsc;
1818	u_int32_t		type;
1819{
1820	struct tl_softc		*sc;
1821
1822	sc = xsc;
1823
1824	tl_stats_update(sc);
1825
1826	return(1);
1827}
1828
1829static int tl_intvec_txeof(xsc, type)
1830	void			*xsc;
1831	u_int32_t		type;
1832{
1833	struct tl_softc		*sc;
1834	int			r = 0;
1835	struct tl_chain		*cur_tx;
1836
1837	sc = xsc;
1838
1839	/*
1840	 * Go through our tx list and free mbufs for those
1841	 * frames that have been sent.
1842	 */
1843	while (sc->tl_cdata.tl_tx_head != NULL) {
1844		cur_tx = sc->tl_cdata.tl_tx_head;
1845		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1846			break;
1847		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1848
1849		r++;
1850		m_freem(cur_tx->tl_mbuf);
1851		cur_tx->tl_mbuf = NULL;
1852
1853		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1854		sc->tl_cdata.tl_tx_free = cur_tx;
1855	}
1856
1857	return(r);
1858}
1859
1860/*
1861 * The transmit end of channel interrupt. The adapter triggers this
1862 * interrupt to tell us it hit the end of the current transmit list.
1863 *
1864 * A note about this: it's possible for a condition to arise where
1865 * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1866 * You have to avoid this since the chip expects things to go in a
1867 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1868 * When the TXEOF handler is called, it will free all of the transmitted
1869 * frames and reset the tx_head pointer to NULL. However, a TXEOC
1870 * interrupt should be received and acknowledged before any more frames
1871 * are queued for transmission. If tl_statrt() is called after TXEOF
1872 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1873 * it could attempt to issue a transmit command prematurely.
1874 *
1875 * To guard against this, tl_start() will only issue transmit commands
1876 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1877 * can set this flag once tl_start() has cleared it.
1878 */
1879static int tl_intvec_txeoc(xsc, type)
1880	void			*xsc;
1881	u_int32_t		type;
1882{
1883	struct tl_softc		*sc;
1884	struct ifnet		*ifp;
1885	u_int32_t		cmd;
1886
1887	sc = xsc;
1888	ifp = &sc->arpcom.ac_if;
1889
1890	/* Clear the timeout timer. */
1891	ifp->if_timer = 0;
1892
1893	if (sc->tl_cdata.tl_tx_head == NULL) {
1894		ifp->if_flags &= ~IFF_OACTIVE;
1895		sc->tl_cdata.tl_tx_tail = NULL;
1896		sc->tl_txeoc = 1;
1897	} else {
1898		sc->tl_txeoc = 0;
1899		/* First we have to ack the EOC interrupt. */
1900		sc->csr->tl_host_cmd = TL_CMD_ACK | 0x00000001 | type;
1901		/* Then load the address of the next TX list. */
1902		sc->csr->tl_ch_parm = vtophys(sc->tl_cdata.tl_tx_head->tl_ptr);
1903		/* Restart TX channel. */
1904		cmd = sc->csr->tl_host_cmd;
1905		cmd &= ~TL_CMD_RT;
1906		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1907		sc->csr->tl_host_cmd = cmd;
1908		return(0);
1909	}
1910
1911	return(1);
1912}
1913
1914static int tl_intvec_adchk(xsc, type)
1915	void			*xsc;
1916	u_int32_t		type;
1917{
1918	struct tl_softc		*sc;
1919
1920	sc = xsc;
1921
1922	printf("tl%d: adapter check: %x\n", sc->tl_unit, sc->csr->tl_ch_parm);
1923
1924	tl_softreset(sc->csr, sc->tl_phy_addr == TL_PHYADDR_MAX ? 1 : 0);
1925	tl_init(sc);
1926	sc->csr->tl_host_cmd |= TL_CMD_INTSON;
1927
1928	return(0);
1929}
1930
1931static int tl_intvec_netsts(xsc, type)
1932	void			*xsc;
1933	u_int32_t		type;
1934{
1935	struct tl_softc		*sc;
1936	u_int16_t		netsts;
1937	struct tl_csr		*csr;
1938
1939	sc = xsc;
1940	csr = sc->csr;
1941
1942	DIO_SEL(TL_NETSTS);
1943	netsts = DIO_BYTE2_GET(0xFF);
1944	DIO_BYTE2_SET(netsts);
1945
1946	printf("tl%d: network status: %x\n", sc->tl_unit, netsts);
1947
1948	return(1);
1949}
1950
1951static void tl_intr(xilist)
1952	void			*xilist;
1953{
1954	struct tl_iflist	*ilist;
1955	struct tl_softc		*sc;
1956	struct tl_csr		*csr;
1957	struct ifnet		*ifp;
1958	int			r = 0;
1959	u_int32_t		type = 0;
1960	u_int16_t		ints = 0;
1961	u_int8_t		ivec = 0;
1962
1963	ilist = xilist;
1964	csr = ilist->csr;
1965
1966	/* Disable interrupts */
1967	ints = csr->tl_host_int;
1968	csr->tl_host_int = ints;
1969	type = (ints << 16) & 0xFFFF0000;
1970	ivec = (ints & TL_VEC_MASK) >> 5;
1971	ints = (ints & TL_INT_MASK) >> 2;
1972	/*
1973 	 * An interrupt has been posted by the ThunderLAN, but we
1974	 * have to figure out which PHY generated it before we can
1975	 * do anything with it. If we receive an interrupt when we
1976	 * know none of the PHYs are turned on, then either there's
1977	 * a bug in the driver or we we handed an interrupt that
1978	 * doesn't actually belong to us.
1979	 */
1980	if (ilist->tl_active_phy == TL_PHYS_IDLE) {
1981		printf("tlc%d: interrupt type %x with all phys idle\n",
1982			ilist->tlc_unit, ints);
1983		return;
1984	}
1985
1986	sc = ilist->tl_sc[ilist->tl_active_phy];
1987	csr = sc->csr;
1988	ifp = &sc->arpcom.ac_if;
1989
1990	switch(ints) {
1991	case (TL_INTR_INVALID):
1992		r = tl_intvec_invalid((void *)sc, type);
1993		break;
1994	case (TL_INTR_TXEOF):
1995		r = tl_intvec_txeof((void *)sc, type);
1996		break;
1997	case (TL_INTR_TXEOC):
1998		r = tl_intvec_txeoc((void *)sc, type);
1999		break;
2000	case (TL_INTR_STATOFLOW):
2001		r = tl_intvec_statoflow((void *)sc, type);
2002		break;
2003	case (TL_INTR_RXEOF):
2004		r = tl_intvec_rxeof((void *)sc, type);
2005		break;
2006	case (TL_INTR_DUMMY):
2007		r = tl_intvec_dummy((void *)sc, type);
2008		break;
2009	case (TL_INTR_ADCHK):
2010		if (ivec)
2011			r = tl_intvec_adchk((void *)sc, type);
2012		else
2013			r = tl_intvec_netsts((void *)sc, type);
2014		break;
2015	case (TL_INTR_RXEOC):
2016		r = tl_intvec_rxeoc((void *)sc, type);
2017		break;
2018	default:
2019		printf("tl%d: bogus interrupt type\n", ifp->if_unit);
2020		break;
2021	}
2022
2023	/* Re-enable interrupts */
2024	if (r)
2025		csr->tl_host_cmd = TL_CMD_ACK | r | type;
2026
2027	return;
2028}
2029
2030static void tl_stats_update(xsc)
2031	void			*xsc;
2032{
2033	struct tl_softc		*sc;
2034	struct ifnet		*ifp;
2035	struct tl_csr		*csr;
2036	struct tl_stats		tl_stats;
2037	u_int32_t		*p;
2038
2039	bzero((char *)&tl_stats, sizeof(struct tl_stats));
2040
2041	sc = xsc;
2042	csr = sc->csr;
2043	ifp = &sc->arpcom.ac_if;
2044
2045	p = (u_int32_t *)&tl_stats;
2046
2047	DIO_SEL(TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
2048	DIO_LONG_GET(*p++);
2049	DIO_LONG_GET(*p++);
2050	DIO_LONG_GET(*p++);
2051	DIO_LONG_GET(*p++);
2052	DIO_LONG_GET(*p++);
2053
2054	ifp->if_opackets += tl_tx_goodframes(tl_stats);
2055	ifp->if_collisions += tl_stats.tl_tx_single_collision +
2056				tl_stats.tl_tx_multi_collision;
2057	ifp->if_ipackets += tl_rx_goodframes(tl_stats);
2058	ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
2059			    tl_rx_overrun(tl_stats);
2060	ifp->if_oerrors += tl_tx_underrun(tl_stats);
2061
2062	sc->tl_stat_ch = timeout(tl_stats_update, sc, hz);
2063
2064	return;
2065}
2066
2067/*
2068 * Encapsulate an mbuf chain in a list by coupling the mbuf data
2069 * pointers to the fragment pointers.
2070 */
2071static int tl_encap(sc, c, m_head)
2072	struct tl_softc		*sc;
2073	struct tl_chain		*c;
2074	struct mbuf		*m_head;
2075{
2076	int			frag = 0;
2077	struct tl_frag		*f = NULL;
2078	int			total_len;
2079	struct mbuf		*m;
2080
2081	/*
2082 	 * Start packing the mbufs in this chain into
2083	 * the fragment pointers. Stop when we run out
2084 	 * of fragments or hit the end of the mbuf chain.
2085	 */
2086	m = m_head;
2087	total_len = 0;
2088
2089	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
2090		if (m->m_len != 0) {
2091			if (frag == TL_MAXFRAGS)
2092				break;
2093			total_len+= m->m_len;
2094			c->tl_ptr->tl_frag[frag].tlist_dadr =
2095				vtophys(mtod(m, vm_offset_t));
2096			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
2097			frag++;
2098		}
2099	}
2100
2101	/*
2102	 * Handle special cases.
2103	 * Special case #1: we used up all 10 fragments, but
2104	 * we have more mbufs left in the chain. Copy the
2105	 * data into an mbuf cluster. Note that we don't
2106	 * bother clearing the values in the other fragment
2107	 * pointers/counters; it wouldn't gain us anything,
2108	 * and would waste cycles.
2109	 */
2110	if (m != NULL) {
2111		struct mbuf		*m_new = NULL;
2112
2113		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
2114		if (m_new == NULL) {
2115			printf("tl%d: no memory for tx list", sc->tl_unit);
2116			return(1);
2117		}
2118		if (m_head->m_pkthdr.len > MHLEN) {
2119			MCLGET(m_new, M_DONTWAIT);
2120			if (!(m_new->m_flags & M_EXT)) {
2121				m_freem(m_new);
2122				printf("tl%d: no memory for tx list",
2123				sc->tl_unit);
2124				return(1);
2125			}
2126		}
2127		m_copydata(m_head, 0, m_head->m_pkthdr.len,
2128					mtod(m_new, caddr_t));
2129		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
2130		m_freem(m_head);
2131		m_head = m_new;
2132		f = &c->tl_ptr->tl_frag[0];
2133		f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
2134		f->tlist_dcnt = total_len = m_new->m_len;
2135		frag = 1;
2136	}
2137
2138	/*
2139	 * Special case #2: the frame is smaller than the minimum
2140	 * frame size. We have to pad it to make the chip happy.
2141	 */
2142	if (total_len < TL_MIN_FRAMELEN) {
2143		if (frag == TL_MAXFRAGS)
2144			printf("all frags filled but frame still to small!\n");
2145		f = &c->tl_ptr->tl_frag[frag];
2146		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
2147		f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
2148		total_len += f->tlist_dcnt;
2149		frag++;
2150	}
2151
2152	c->tl_mbuf = m_head;
2153	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
2154	c->tl_ptr->tlist_frsize = total_len;
2155	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
2156	c->tl_ptr->tlist_fptr = 0;
2157
2158	return(0);
2159}
2160
2161/*
2162 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2163 * to the mbuf data regions directly in the transmit lists. We also save a
2164 * copy of the pointers since the transmit list fragment pointers are
2165 * physical addresses.
2166 */
2167static void tl_start(ifp)
2168	struct ifnet		*ifp;
2169{
2170	struct tl_softc		*sc;
2171	struct tl_csr		*csr;
2172	struct mbuf		*m_head = NULL;
2173	u_int32_t		cmd;
2174	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
2175
2176	sc = ifp->if_softc;
2177	csr = sc->csr;
2178
2179	/*
2180	 * Check for an available queue slot. If there are none,
2181	 * punt.
2182	 */
2183	if (sc->tl_cdata.tl_tx_free == NULL) {
2184		ifp->if_flags |= IFF_OACTIVE;
2185		return;
2186	}
2187
2188	start_tx = sc->tl_cdata.tl_tx_free;
2189
2190	while(sc->tl_cdata.tl_tx_free != NULL) {
2191		IF_DEQUEUE(&ifp->if_snd, m_head);
2192		if (m_head == NULL)
2193			break;
2194
2195		/* Pick a chain member off the free list. */
2196		cur_tx = sc->tl_cdata.tl_tx_free;
2197		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
2198
2199		cur_tx->tl_next = NULL;
2200
2201		/* Pack the data into the list. */
2202		tl_encap(sc, cur_tx, m_head);
2203
2204		/* Chain it together */
2205		if (prev != NULL) {
2206			prev->tl_next = cur_tx;
2207			prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
2208		}
2209		prev = cur_tx;
2210
2211		/*
2212		 * If there's a BPF listener, bounce a copy of this frame
2213		 * to him.
2214		 */
2215#if NBPFILTER > 0
2216		if (ifp->if_bpf)
2217			bpf_mtap(ifp, cur_tx->tl_mbuf);
2218#endif
2219	}
2220
2221	/*
2222	 * That's all we can stands, we can't stands no more.
2223	 * If there are no other transfers pending, then issue the
2224	 * TX GO command to the adapter to start things moving.
2225	 * Otherwise, just leave the data in the queue and let
2226	 * the EOF/EOC interrupt handler send.
2227	 */
2228	if (sc->tl_cdata.tl_tx_head == NULL) {
2229		sc->tl_cdata.tl_tx_head = start_tx;
2230		sc->tl_cdata.tl_tx_tail = cur_tx;
2231		if (sc->tl_txeoc) {
2232			sc->tl_txeoc = 0;
2233			sc->csr->tl_ch_parm = vtophys(start_tx->tl_ptr);
2234			cmd = sc->csr->tl_host_cmd;
2235			cmd &= ~TL_CMD_RT;
2236			cmd |= TL_CMD_GO|TL_CMD_INTSON;
2237			sc->csr->tl_host_cmd = cmd;
2238		}
2239	} else {
2240		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
2241		sc->tl_cdata.tl_tx_tail->tl_ptr->tlist_fptr =
2242					vtophys(start_tx->tl_ptr);
2243		sc->tl_cdata.tl_tx_tail = start_tx;
2244	}
2245
2246	/*
2247	 * Set a timeout in case the chip goes out to lunch.
2248	 */
2249	ifp->if_timer = 5;
2250
2251	return;
2252}
2253
2254static void tl_init(xsc)
2255	void			*xsc;
2256{
2257	struct tl_softc		*sc = xsc;
2258	struct ifnet		*ifp = &sc->arpcom.ac_if;
2259	struct tl_csr		*csr = sc->csr;
2260        int			s;
2261	u_int16_t		phy_sts;
2262
2263	s = splimp();
2264
2265	ifp = &sc->arpcom.ac_if;
2266
2267	/*
2268	 * Cancel pending I/O.
2269	 */
2270	tl_stop(sc);
2271
2272	/*
2273	 * Set 'capture all frames' bit for promiscuous mode.
2274	 */
2275	if (ifp->if_flags & IFF_PROMISC) {
2276		DIO_SEL(TL_NETCMD);
2277		DIO_BYTE0_SET(TL_CMD_CAF);
2278	} else {
2279		DIO_SEL(TL_NETCMD);
2280		DIO_BYTE0_CLR(TL_CMD_CAF);
2281	}
2282
2283	/*
2284	 * Set capture broadcast bit to capture broadcast frames.
2285	 */
2286	if (ifp->if_flags & IFF_BROADCAST) {
2287		DIO_SEL(TL_NETCMD);
2288		DIO_BYTE0_CLR(TL_CMD_NOBRX);
2289	} else {
2290		DIO_SEL(TL_NETCMD);
2291		DIO_BYTE0_SET(TL_CMD_NOBRX);
2292	}
2293
2294	/* Init our MAC address */
2295	DIO_SEL(TL_AREG0_B5);
2296	csr->u.tl_dio_bytes.byte0 = sc->arpcom.ac_enaddr[0];
2297	csr->u.tl_dio_bytes.byte1 = sc->arpcom.ac_enaddr[1];
2298	csr->u.tl_dio_bytes.byte2 = sc->arpcom.ac_enaddr[2];
2299	csr->u.tl_dio_bytes.byte3 = sc->arpcom.ac_enaddr[3];
2300	DIO_SEL(TL_AREG0_B1);
2301	csr->u.tl_dio_bytes.byte0 = sc->arpcom.ac_enaddr[4];
2302	csr->u.tl_dio_bytes.byte1 = sc->arpcom.ac_enaddr[5];
2303
2304	/* Init circular RX list. */
2305	if (tl_list_rx_init(sc)) {
2306		printf("tl%d: failed to set up rx lists\n", sc->tl_unit);
2307		return;
2308	}
2309
2310	/* Init TX pointers. */
2311	tl_list_tx_init(sc);
2312
2313	/*
2314	 * Enable PHY interrupts.
2315	 */
2316	phy_sts = tl_phy_readreg(sc, TL_PHY_CTL);
2317	phy_sts |= PHY_CTL_INTEN;
2318	tl_phy_writereg(sc, TL_PHY_CTL, phy_sts);
2319
2320	/* Enable MII interrupts. */
2321	DIO_SEL(TL_NETSIO);
2322	DIO_BYTE1_SET(TL_SIO_MINTEN);
2323
2324	/* Enable PCI interrupts. */
2325        csr->tl_host_cmd |= TL_CMD_INTSON;
2326
2327	/* Load the address of the rx list */
2328	sc->csr->tl_host_cmd |= TL_CMD_RT;
2329	sc->csr->tl_ch_parm = vtophys(&sc->tl_ldata->tl_rx_list[0]);
2330
2331	/* Send the RX go command */
2332	sc->csr->tl_host_cmd |= (TL_CMD_GO|TL_CMD_RT);
2333	sc->tl_iflist->tl_active_phy = sc->tl_phy_addr;
2334
2335	ifp->if_flags |= IFF_RUNNING;
2336	ifp->if_flags &= ~IFF_OACTIVE;
2337
2338	(void)splx(s);
2339
2340	/* Start the stats update counter */
2341	sc->tl_stat_ch = timeout(tl_stats_update, sc, hz);
2342
2343	return;
2344}
2345
2346/*
2347 * Set media options.
2348 */
2349static int tl_ifmedia_upd(ifp)
2350	struct ifnet		*ifp;
2351{
2352	struct tl_softc		*sc;
2353	struct tl_csr		*csr;
2354	struct ifmedia		*ifm;
2355
2356	sc = ifp->if_softc;
2357	csr = sc->csr;
2358	ifm = &sc->ifmedia;
2359
2360	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2361		return(EINVAL);
2362
2363	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
2364		tl_autoneg(sc, TL_FLAG_SCHEDDELAY, 1);
2365	else
2366		tl_setmode(sc, ifm->ifm_media);
2367
2368	return(0);
2369}
2370
2371/*
2372 * Report current media status.
2373 */
2374static void tl_ifmedia_sts(ifp, ifmr)
2375	struct ifnet		*ifp;
2376	struct ifmediareq	*ifmr;
2377{
2378	u_int16_t		phy_ctl;
2379	u_int16_t		phy_sts;
2380	struct tl_softc		*sc;
2381	struct tl_csr		*csr;
2382
2383	sc = ifp->if_softc;
2384	csr = sc->csr;
2385
2386	ifmr->ifm_active = IFM_ETHER;
2387
2388	phy_ctl = tl_phy_readreg(sc, PHY_BMCR);
2389	phy_sts = tl_phy_readreg(sc, TL_PHY_CTL);
2390
2391	if (phy_sts & PHY_CTL_AUISEL)
2392		ifmr->ifm_active |= IFM_10_5;
2393
2394	if (phy_ctl & PHY_BMCR_LOOPBK)
2395		ifmr->ifm_active |= IFM_LOOP;
2396
2397	if (phy_ctl & PHY_BMCR_SPEEDSEL)
2398		ifmr->ifm_active |= IFM_100_TX;
2399	else
2400		ifmr->ifm_active |= IFM_10_T;
2401
2402	if (phy_ctl & PHY_BMCR_DUPLEX) {
2403		ifmr->ifm_active |= IFM_FDX;
2404		ifmr->ifm_active &= ~IFM_HDX;
2405	} else {
2406		ifmr->ifm_active &= ~IFM_FDX;
2407		ifmr->ifm_active |= IFM_HDX;
2408	}
2409
2410	if (phy_ctl & PHY_BMCR_AUTONEGENBL)
2411		ifmr->ifm_active |= IFM_AUTO;
2412
2413	return;
2414}
2415
2416static int tl_ioctl(ifp, command, data)
2417	struct ifnet		*ifp;
2418	int			command;
2419	caddr_t			data;
2420{
2421	struct tl_softc		*sc = ifp->if_softc;
2422	struct ifreq		*ifr = (struct ifreq *) data;
2423	int			s, error = 0;
2424
2425	s = splimp();
2426
2427	switch(command) {
2428	case SIOCSIFADDR:
2429	case SIOCGIFADDR:
2430	case SIOCSIFMTU:
2431		error = ether_ioctl(ifp, command, data);
2432		break;
2433	case SIOCSIFFLAGS:
2434		/*
2435		 * Make sure no more than one PHY is active
2436		 * at any one time.
2437		 */
2438		if (ifp->if_flags & IFF_UP) {
2439			if (sc->tl_iflist->tl_active_phy != TL_PHYS_IDLE &&
2440			    sc->tl_iflist->tl_active_phy != sc->tl_phy_addr) {
2441				error = EINVAL;
2442				break;
2443			}
2444			sc->tl_iflist->tl_active_phy = sc->tl_phy_addr;
2445			tl_init(sc);
2446		} else {
2447			if (ifp->if_flags & IFF_RUNNING) {
2448				sc->tl_iflist->tl_active_phy = TL_PHYS_IDLE;
2449				tl_stop(sc);
2450			}
2451		}
2452		error = 0;
2453		break;
2454	case SIOCADDMULTI:
2455	case SIOCDELMULTI:
2456		tl_setmulti(sc);
2457		error = 0;
2458		break;
2459	case SIOCSIFMEDIA:
2460	case SIOCGIFMEDIA:
2461		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2462		break;
2463	default:
2464		error = EINVAL;
2465		break;
2466	}
2467
2468	(void)splx(s);
2469
2470	return(error);
2471}
2472
2473static void tl_watchdog(ifp)
2474	struct ifnet		*ifp;
2475{
2476	struct tl_softc		*sc;
2477	u_int16_t		bmsr;
2478
2479	sc = ifp->if_softc;
2480
2481	if (sc->tl_autoneg) {
2482		tl_autoneg(sc, TL_FLAG_DELAYTIMEO, 1);
2483		return;
2484	}
2485
2486	/* Check that we're still connected. */
2487	tl_phy_readreg(sc, PHY_BMSR);
2488	bmsr = tl_phy_readreg(sc, PHY_BMSR);
2489	if (!(bmsr & PHY_BMSR_LINKSTAT)) {
2490		printf("tl%d: no carrier\n", sc->tl_unit);
2491		tl_autoneg(sc, TL_FLAG_SCHEDDELAY, 1);
2492	} else
2493		printf("tl%d: device timeout\n", sc->tl_unit);
2494
2495	ifp->if_oerrors++;
2496
2497	tl_init(sc);
2498
2499	return;
2500}
2501
2502/*
2503 * Stop the adapter and free any mbufs allocated to the
2504 * RX and TX lists.
2505 */
2506static void tl_stop(sc)
2507	struct tl_softc		*sc;
2508{
2509	register int		i;
2510	struct ifnet		*ifp;
2511	struct tl_csr		*csr;
2512	struct tl_mii_frame	frame;
2513
2514	csr = sc->csr;
2515	ifp = &sc->arpcom.ac_if;
2516
2517	/* Stop the stats updater. */
2518	untimeout(tl_stats_update, sc, sc->tl_stat_ch);
2519
2520	/* Stop the transmitter */
2521	sc->csr->tl_host_cmd &= TL_CMD_RT;
2522	sc->csr->tl_host_cmd |= TL_CMD_STOP;
2523
2524	/* Stop the receiver */
2525	sc->csr->tl_host_cmd |= TL_CMD_RT;
2526	sc->csr->tl_host_cmd |= TL_CMD_STOP;
2527
2528	/*
2529	 * Disable host interrupts.
2530	 */
2531	sc->csr->tl_host_cmd |= TL_CMD_INTSOFF;
2532
2533	/*
2534	 * Disable PHY interrupts.
2535	 */
2536	bzero((char *)&frame, sizeof(frame));
2537
2538	frame.mii_phyaddr = sc->tl_phy_addr;
2539	frame.mii_regaddr = TL_PHY_CTL;
2540	tl_mii_readreg(csr, &frame);
2541	frame.mii_data |= PHY_CTL_INTEN;
2542	tl_mii_writereg(csr, &frame);
2543
2544	/*
2545	 * Disable MII interrupts.
2546	 */
2547	DIO_SEL(TL_NETSIO);
2548	DIO_BYTE1_CLR(TL_SIO_MINTEN);
2549
2550	/*
2551	 * Clear list pointer.
2552	 */
2553	sc->csr->tl_ch_parm = 0;
2554
2555	/*
2556	 * Free the RX lists.
2557	 */
2558	for (i = 0; i < TL_RX_LIST_CNT; i++) {
2559		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2560			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2561			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2562		}
2563	}
2564	bzero((char *)&sc->tl_ldata->tl_rx_list,
2565		sizeof(sc->tl_ldata->tl_rx_list));
2566
2567	/*
2568	 * Free the TX list buffers.
2569	 */
2570	for (i = 0; i < TL_TX_LIST_CNT; i++) {
2571		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2572			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2573			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2574		}
2575	}
2576	bzero((char *)&sc->tl_ldata->tl_tx_list,
2577		sizeof(sc->tl_ldata->tl_tx_list));
2578
2579	sc->tl_iflist->tl_active_phy = TL_PHYS_IDLE;
2580	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2581
2582	return;
2583}
2584
2585/*
2586 * Stop all chip I/O so that the kernel's probe routines don't
2587 * get confused by errant DMAs when rebooting.
2588 */
2589static void tl_shutdown(howto, xilist)
2590	int			howto;
2591	void			*xilist;
2592{
2593	struct tl_iflist	*ilist = (struct tl_iflist *)xilist;
2594	struct tl_csr		*csr = ilist->csr;
2595	struct tl_mii_frame	frame;
2596	int			i;
2597
2598	/* Stop the transmitter */
2599	csr->tl_host_cmd &= TL_CMD_RT;
2600	csr->tl_host_cmd |= TL_CMD_STOP;
2601
2602	/* Stop the receiver */
2603	csr->tl_host_cmd |= TL_CMD_RT;
2604	csr->tl_host_cmd |= TL_CMD_STOP;
2605
2606	/*
2607	 * Disable host interrupts.
2608	 */
2609	csr->tl_host_cmd |= TL_CMD_INTSOFF;
2610
2611	/*
2612	 * Disable PHY interrupts.
2613	 */
2614	bzero((char *)&frame, sizeof(frame));
2615
2616	for (i = TL_PHYADDR_MIN; i < TL_PHYADDR_MAX + 1; i++) {
2617		frame.mii_phyaddr = i;
2618		frame.mii_regaddr = TL_PHY_CTL;
2619		tl_mii_readreg(csr, &frame);
2620		frame.mii_data |= PHY_CTL_INTEN;
2621		tl_mii_writereg(csr, &frame);
2622	};
2623
2624	/*
2625	 * Disable MII interrupts.
2626	 */
2627	DIO_SEL(TL_NETSIO);
2628	DIO_BYTE1_CLR(TL_SIO_MINTEN);
2629
2630	return;
2631}
2632
2633
2634static struct pci_device tlc_device = {
2635	"tlc",
2636	tl_probe,
2637	tl_attach_ctlr,
2638	&tl_count,
2639	NULL
2640};
2641DATA_SET(pcidevice_set, tlc_device);
2642