Deleted Added
full compact
if_tl.c (214264) if_tl.c (226995)
1/*-
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/tl/if_tl.c 214264 2010-10-24 12:51:02Z marius $");
34__FBSDID("$FreeBSD: head/sys/dev/tl/if_tl.c 226995 2011-11-01 16:13:59Z marius $");
35
36/*
37 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
38 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
39 * the National Semiconductor DP83840A physical interface and the
40 * Microchip Technology 24Cxx series serial EEPROM.
41 *
42 * Written using the following four documents:
43 *
44 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
45 * National Semiconductor DP83840A data sheet (www.national.com)
46 * Microchip Technology 24C02C data sheet (www.microchip.com)
47 * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
48 *
49 * Written by Bill Paul <wpaul@ctr.columbia.edu>
50 * Electrical Engineering Department
51 * Columbia University, New York City
52 */
53/*
54 * Some notes about the ThunderLAN:
55 *
56 * The ThunderLAN controller is a single chip containing PCI controller
57 * logic, approximately 3K of on-board SRAM, a LAN controller, and media
58 * independent interface (MII) bus. The MII allows the ThunderLAN chip to
59 * control up to 32 different physical interfaces (PHYs). The ThunderLAN
60 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
61 * to act as a complete ethernet interface.
62 *
63 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
64 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
65 * in full or half duplex. Some of the Compaq Deskpro machines use a
66 * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
67 * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
68 * concert with the ThunderLAN's internal PHY to provide full 10/100
69 * support. This is cheaper than using a standalone external PHY for both
70 * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
71 * A serial EEPROM is also attached to the ThunderLAN chip to provide
72 * power-up default register settings and for storing the adapter's
73 * station address. Although not supported by this driver, the ThunderLAN
74 * chip can also be connected to token ring PHYs.
75 *
76 * The ThunderLAN has a set of registers which can be used to issue
77 * commands, acknowledge interrupts, and to manipulate other internal
78 * registers on its DIO bus. The primary registers can be accessed
79 * using either programmed I/O (inb/outb) or via PCI memory mapping,
80 * depending on how the card is configured during the PCI probing
81 * phase. It is even possible to have both PIO and memory mapped
82 * access turned on at the same time.
83 *
84 * Frame reception and transmission with the ThunderLAN chip is done
85 * using frame 'lists.' A list structure looks more or less like this:
86 *
87 * struct tl_frag {
88 * u_int32_t fragment_address;
89 * u_int32_t fragment_size;
90 * };
91 * struct tl_list {
92 * u_int32_t forward_pointer;
93 * u_int16_t cstat;
94 * u_int16_t frame_size;
95 * struct tl_frag fragments[10];
96 * };
97 *
98 * The forward pointer in the list header can be either a 0 or the address
99 * of another list, which allows several lists to be linked together. Each
100 * list contains up to 10 fragment descriptors. This means the chip allows
101 * ethernet frames to be broken up into up to 10 chunks for transfer to
102 * and from the SRAM. Note that the forward pointer and fragment buffer
103 * addresses are physical memory addresses, not virtual. Note also that
104 * a single ethernet frame can not span lists: if the host wants to
105 * transmit a frame and the frame data is split up over more than 10
106 * buffers, the frame has to collapsed before it can be transmitted.
107 *
108 * To receive frames, the driver sets up a number of lists and populates
109 * the fragment descriptors, then it sends an RX GO command to the chip.
110 * When a frame is received, the chip will DMA it into the memory regions
111 * specified by the fragment descriptors and then trigger an RX 'end of
112 * frame interrupt' when done. The driver may choose to use only one
113 * fragment per list; this may result is slighltly less efficient use
114 * of memory in exchange for improving performance.
115 *
116 * To transmit frames, the driver again sets up lists and fragment
117 * descriptors, only this time the buffers contain frame data that
118 * is to be DMA'ed into the chip instead of out of it. Once the chip
119 * has transfered the data into its on-board SRAM, it will trigger a
120 * TX 'end of frame' interrupt. It will also generate an 'end of channel'
121 * interrupt when it reaches the end of the list.
122 */
123/*
124 * Some notes about this driver:
125 *
126 * The ThunderLAN chip provides a couple of different ways to organize
127 * reception, transmission and interrupt handling. The simplest approach
128 * is to use one list each for transmission and reception. In this mode,
129 * the ThunderLAN will generate two interrupts for every received frame
130 * (one RX EOF and one RX EOC) and two for each transmitted frame (one
131 * TX EOF and one TX EOC). This may make the driver simpler but it hurts
132 * performance to have to handle so many interrupts.
133 *
134 * Initially I wanted to create a circular list of receive buffers so
135 * that the ThunderLAN chip would think there was an infinitely long
136 * receive channel and never deliver an RXEOC interrupt. However this
137 * doesn't work correctly under heavy load: while the manual says the
138 * chip will trigger an RXEOF interrupt each time a frame is copied into
139 * memory, you can't count on the chip waiting around for you to acknowledge
140 * the interrupt before it starts trying to DMA the next frame. The result
141 * is that the chip might traverse the entire circular list and then wrap
142 * around before you have a chance to do anything about it. Consequently,
143 * the receive list is terminated (with a 0 in the forward pointer in the
144 * last element). Each time an RXEOF interrupt arrives, the used list
145 * is shifted to the end of the list. This gives the appearance of an
146 * infinitely large RX chain so long as the driver doesn't fall behind
147 * the chip and allow all of the lists to be filled up.
148 *
149 * If all the lists are filled, the adapter will deliver an RX 'end of
150 * channel' interrupt when it hits the 0 forward pointer at the end of
151 * the chain. The RXEOC handler then cleans out the RX chain and resets
152 * the list head pointer in the ch_parm register and restarts the receiver.
153 *
154 * For frame transmission, it is possible to program the ThunderLAN's
155 * transmit interrupt threshold so that the chip can acknowledge multiple
156 * lists with only a single TX EOF interrupt. This allows the driver to
157 * queue several frames in one shot, and only have to handle a total
158 * two interrupts (one TX EOF and one TX EOC) no matter how many frames
159 * are transmitted. Frame transmission is done directly out of the
160 * mbufs passed to the tl_start() routine via the interface send queue.
161 * The driver simply sets up the fragment descriptors in the transmit
162 * lists to point to the mbuf data regions and sends a TX GO command.
163 *
164 * Note that since the RX and TX lists themselves are always used
165 * only by the driver, the are malloc()ed once at driver initialization
166 * time and never free()ed.
167 *
168 * Also, in order to remain as platform independent as possible, this
169 * driver uses memory mapped register access to manipulate the card
170 * as opposed to programmed I/O. This avoids the use of the inb/outb
171 * (and related) instructions which are specific to the i386 platform.
172 *
173 * Using these techniques, this driver achieves very high performance
174 * by minimizing the amount of interrupts generated during large
175 * transfers and by completely avoiding buffer copies. Frame transfer
176 * to and from the ThunderLAN chip is performed entirely by the chip
177 * itself thereby reducing the load on the host CPU.
178 */
179
180#include <sys/param.h>
181#include <sys/systm.h>
182#include <sys/sockio.h>
183#include <sys/mbuf.h>
184#include <sys/malloc.h>
185#include <sys/kernel.h>
186#include <sys/module.h>
187#include <sys/socket.h>
188
189#include <net/if.h>
190#include <net/if_arp.h>
191#include <net/ethernet.h>
192#include <net/if_dl.h>
193#include <net/if_media.h>
194#include <net/if_types.h>
195
196#include <net/bpf.h>
197
198#include <vm/vm.h> /* for vtophys */
199#include <vm/pmap.h> /* for vtophys */
200#include <machine/bus.h>
201#include <machine/resource.h>
202#include <sys/bus.h>
203#include <sys/rman.h>
204
205#include <dev/mii/mii.h>
35
36/*
37 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
38 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
39 * the National Semiconductor DP83840A physical interface and the
40 * Microchip Technology 24Cxx series serial EEPROM.
41 *
42 * Written using the following four documents:
43 *
44 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
45 * National Semiconductor DP83840A data sheet (www.national.com)
46 * Microchip Technology 24C02C data sheet (www.microchip.com)
47 * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
48 *
49 * Written by Bill Paul <wpaul@ctr.columbia.edu>
50 * Electrical Engineering Department
51 * Columbia University, New York City
52 */
53/*
54 * Some notes about the ThunderLAN:
55 *
56 * The ThunderLAN controller is a single chip containing PCI controller
57 * logic, approximately 3K of on-board SRAM, a LAN controller, and media
58 * independent interface (MII) bus. The MII allows the ThunderLAN chip to
59 * control up to 32 different physical interfaces (PHYs). The ThunderLAN
60 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
61 * to act as a complete ethernet interface.
62 *
63 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
64 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
65 * in full or half duplex. Some of the Compaq Deskpro machines use a
66 * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
67 * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
68 * concert with the ThunderLAN's internal PHY to provide full 10/100
69 * support. This is cheaper than using a standalone external PHY for both
70 * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
71 * A serial EEPROM is also attached to the ThunderLAN chip to provide
72 * power-up default register settings and for storing the adapter's
73 * station address. Although not supported by this driver, the ThunderLAN
74 * chip can also be connected to token ring PHYs.
75 *
76 * The ThunderLAN has a set of registers which can be used to issue
77 * commands, acknowledge interrupts, and to manipulate other internal
78 * registers on its DIO bus. The primary registers can be accessed
79 * using either programmed I/O (inb/outb) or via PCI memory mapping,
80 * depending on how the card is configured during the PCI probing
81 * phase. It is even possible to have both PIO and memory mapped
82 * access turned on at the same time.
83 *
84 * Frame reception and transmission with the ThunderLAN chip is done
85 * using frame 'lists.' A list structure looks more or less like this:
86 *
87 * struct tl_frag {
88 * u_int32_t fragment_address;
89 * u_int32_t fragment_size;
90 * };
91 * struct tl_list {
92 * u_int32_t forward_pointer;
93 * u_int16_t cstat;
94 * u_int16_t frame_size;
95 * struct tl_frag fragments[10];
96 * };
97 *
98 * The forward pointer in the list header can be either a 0 or the address
99 * of another list, which allows several lists to be linked together. Each
100 * list contains up to 10 fragment descriptors. This means the chip allows
101 * ethernet frames to be broken up into up to 10 chunks for transfer to
102 * and from the SRAM. Note that the forward pointer and fragment buffer
103 * addresses are physical memory addresses, not virtual. Note also that
104 * a single ethernet frame can not span lists: if the host wants to
105 * transmit a frame and the frame data is split up over more than 10
106 * buffers, the frame has to collapsed before it can be transmitted.
107 *
108 * To receive frames, the driver sets up a number of lists and populates
109 * the fragment descriptors, then it sends an RX GO command to the chip.
110 * When a frame is received, the chip will DMA it into the memory regions
111 * specified by the fragment descriptors and then trigger an RX 'end of
112 * frame interrupt' when done. The driver may choose to use only one
113 * fragment per list; this may result is slighltly less efficient use
114 * of memory in exchange for improving performance.
115 *
116 * To transmit frames, the driver again sets up lists and fragment
117 * descriptors, only this time the buffers contain frame data that
118 * is to be DMA'ed into the chip instead of out of it. Once the chip
119 * has transfered the data into its on-board SRAM, it will trigger a
120 * TX 'end of frame' interrupt. It will also generate an 'end of channel'
121 * interrupt when it reaches the end of the list.
122 */
123/*
124 * Some notes about this driver:
125 *
126 * The ThunderLAN chip provides a couple of different ways to organize
127 * reception, transmission and interrupt handling. The simplest approach
128 * is to use one list each for transmission and reception. In this mode,
129 * the ThunderLAN will generate two interrupts for every received frame
130 * (one RX EOF and one RX EOC) and two for each transmitted frame (one
131 * TX EOF and one TX EOC). This may make the driver simpler but it hurts
132 * performance to have to handle so many interrupts.
133 *
134 * Initially I wanted to create a circular list of receive buffers so
135 * that the ThunderLAN chip would think there was an infinitely long
136 * receive channel and never deliver an RXEOC interrupt. However this
137 * doesn't work correctly under heavy load: while the manual says the
138 * chip will trigger an RXEOF interrupt each time a frame is copied into
139 * memory, you can't count on the chip waiting around for you to acknowledge
140 * the interrupt before it starts trying to DMA the next frame. The result
141 * is that the chip might traverse the entire circular list and then wrap
142 * around before you have a chance to do anything about it. Consequently,
143 * the receive list is terminated (with a 0 in the forward pointer in the
144 * last element). Each time an RXEOF interrupt arrives, the used list
145 * is shifted to the end of the list. This gives the appearance of an
146 * infinitely large RX chain so long as the driver doesn't fall behind
147 * the chip and allow all of the lists to be filled up.
148 *
149 * If all the lists are filled, the adapter will deliver an RX 'end of
150 * channel' interrupt when it hits the 0 forward pointer at the end of
151 * the chain. The RXEOC handler then cleans out the RX chain and resets
152 * the list head pointer in the ch_parm register and restarts the receiver.
153 *
154 * For frame transmission, it is possible to program the ThunderLAN's
155 * transmit interrupt threshold so that the chip can acknowledge multiple
156 * lists with only a single TX EOF interrupt. This allows the driver to
157 * queue several frames in one shot, and only have to handle a total
158 * two interrupts (one TX EOF and one TX EOC) no matter how many frames
159 * are transmitted. Frame transmission is done directly out of the
160 * mbufs passed to the tl_start() routine via the interface send queue.
161 * The driver simply sets up the fragment descriptors in the transmit
162 * lists to point to the mbuf data regions and sends a TX GO command.
163 *
164 * Note that since the RX and TX lists themselves are always used
165 * only by the driver, the are malloc()ed once at driver initialization
166 * time and never free()ed.
167 *
168 * Also, in order to remain as platform independent as possible, this
169 * driver uses memory mapped register access to manipulate the card
170 * as opposed to programmed I/O. This avoids the use of the inb/outb
171 * (and related) instructions which are specific to the i386 platform.
172 *
173 * Using these techniques, this driver achieves very high performance
174 * by minimizing the amount of interrupts generated during large
175 * transfers and by completely avoiding buffer copies. Frame transfer
176 * to and from the ThunderLAN chip is performed entirely by the chip
177 * itself thereby reducing the load on the host CPU.
178 */
179
180#include <sys/param.h>
181#include <sys/systm.h>
182#include <sys/sockio.h>
183#include <sys/mbuf.h>
184#include <sys/malloc.h>
185#include <sys/kernel.h>
186#include <sys/module.h>
187#include <sys/socket.h>
188
189#include <net/if.h>
190#include <net/if_arp.h>
191#include <net/ethernet.h>
192#include <net/if_dl.h>
193#include <net/if_media.h>
194#include <net/if_types.h>
195
196#include <net/bpf.h>
197
198#include <vm/vm.h> /* for vtophys */
199#include <vm/pmap.h> /* for vtophys */
200#include <machine/bus.h>
201#include <machine/resource.h>
202#include <sys/bus.h>
203#include <sys/rman.h>
204
205#include <dev/mii/mii.h>
206#include <dev/mii/mii_bitbang.h>
206#include <dev/mii/miivar.h>
207
208#include <dev/pci/pcireg.h>
209#include <dev/pci/pcivar.h>
210
211/*
212 * Default to using PIO register access mode to pacify certain
213 * laptop docking stations with built-in ThunderLAN chips that
214 * don't seem to handle memory mapped mode properly.
215 */
216#define TL_USEIOSPACE
217
218#include <dev/tl/if_tlreg.h>
219
220MODULE_DEPEND(tl, pci, 1, 1, 1);
221MODULE_DEPEND(tl, ether, 1, 1, 1);
222MODULE_DEPEND(tl, miibus, 1, 1, 1);
223
224/* "device miibus" required. See GENERIC if you get errors here. */
225#include "miibus_if.h"
226
227/*
228 * Various supported device vendors/types and their names.
229 */
230
207#include <dev/mii/miivar.h>
208
209#include <dev/pci/pcireg.h>
210#include <dev/pci/pcivar.h>
211
212/*
213 * Default to using PIO register access mode to pacify certain
214 * laptop docking stations with built-in ThunderLAN chips that
215 * don't seem to handle memory mapped mode properly.
216 */
217#define TL_USEIOSPACE
218
219#include <dev/tl/if_tlreg.h>
220
221MODULE_DEPEND(tl, pci, 1, 1, 1);
222MODULE_DEPEND(tl, ether, 1, 1, 1);
223MODULE_DEPEND(tl, miibus, 1, 1, 1);
224
225/* "device miibus" required. See GENERIC if you get errors here. */
226#include "miibus_if.h"
227
228/*
229 * Various supported device vendors/types and their names.
230 */
231
231static struct tl_type tl_devs[] = {
232static const struct tl_type const tl_devs[] = {
232 { TI_VENDORID, TI_DEVICEID_THUNDERLAN,
233 "Texas Instruments ThunderLAN" },
234 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
235 "Compaq Netelligent 10" },
236 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
237 "Compaq Netelligent 10/100" },
238 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
239 "Compaq Netelligent 10/100 Proliant" },
240 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
241 "Compaq Netelligent 10/100 Dual Port" },
242 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
243 "Compaq NetFlex-3/P Integrated" },
244 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
245 "Compaq NetFlex-3/P" },
246 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
247 "Compaq NetFlex 3/P w/ BNC" },
248 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED,
249 "Compaq Netelligent 10/100 TX Embedded UTP" },
250 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX,
251 "Compaq Netelligent 10 T/2 PCI UTP/Coax" },
252 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP,
253 "Compaq Netelligent 10/100 TX UTP" },
254 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2183,
255 "Olicom OC-2183/2185" },
256 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2325,
257 "Olicom OC-2325" },
258 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2326,
259 "Olicom OC-2326 10/100 TX UTP" },
260 { 0, 0, NULL }
261};
262
263static int tl_probe(device_t);
264static int tl_attach(device_t);
265static int tl_detach(device_t);
266static int tl_intvec_rxeoc(void *, u_int32_t);
267static int tl_intvec_txeoc(void *, u_int32_t);
268static int tl_intvec_txeof(void *, u_int32_t);
269static int tl_intvec_rxeof(void *, u_int32_t);
270static int tl_intvec_adchk(void *, u_int32_t);
271static int tl_intvec_netsts(void *, u_int32_t);
272
273static int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *);
274static void tl_stats_update(void *);
275static int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *);
276
277static void tl_intr(void *);
278static void tl_start(struct ifnet *);
279static void tl_start_locked(struct ifnet *);
280static int tl_ioctl(struct ifnet *, u_long, caddr_t);
281static void tl_init(void *);
282static void tl_init_locked(struct tl_softc *);
283static void tl_stop(struct tl_softc *);
284static void tl_watchdog(struct tl_softc *);
285static int tl_shutdown(device_t);
286static int tl_ifmedia_upd(struct ifnet *);
287static void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
288
289static u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
290static u_int8_t tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *);
291static int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
292
233 { TI_VENDORID, TI_DEVICEID_THUNDERLAN,
234 "Texas Instruments ThunderLAN" },
235 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
236 "Compaq Netelligent 10" },
237 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
238 "Compaq Netelligent 10/100" },
239 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
240 "Compaq Netelligent 10/100 Proliant" },
241 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
242 "Compaq Netelligent 10/100 Dual Port" },
243 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
244 "Compaq NetFlex-3/P Integrated" },
245 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
246 "Compaq NetFlex-3/P" },
247 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
248 "Compaq NetFlex 3/P w/ BNC" },
249 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED,
250 "Compaq Netelligent 10/100 TX Embedded UTP" },
251 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX,
252 "Compaq Netelligent 10 T/2 PCI UTP/Coax" },
253 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP,
254 "Compaq Netelligent 10/100 TX UTP" },
255 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2183,
256 "Olicom OC-2183/2185" },
257 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2325,
258 "Olicom OC-2325" },
259 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2326,
260 "Olicom OC-2326 10/100 TX UTP" },
261 { 0, 0, NULL }
262};
263
264static int tl_probe(device_t);
265static int tl_attach(device_t);
266static int tl_detach(device_t);
267static int tl_intvec_rxeoc(void *, u_int32_t);
268static int tl_intvec_txeoc(void *, u_int32_t);
269static int tl_intvec_txeof(void *, u_int32_t);
270static int tl_intvec_rxeof(void *, u_int32_t);
271static int tl_intvec_adchk(void *, u_int32_t);
272static int tl_intvec_netsts(void *, u_int32_t);
273
274static int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *);
275static void tl_stats_update(void *);
276static int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *);
277
278static void tl_intr(void *);
279static void tl_start(struct ifnet *);
280static void tl_start_locked(struct ifnet *);
281static int tl_ioctl(struct ifnet *, u_long, caddr_t);
282static void tl_init(void *);
283static void tl_init_locked(struct tl_softc *);
284static void tl_stop(struct tl_softc *);
285static void tl_watchdog(struct tl_softc *);
286static int tl_shutdown(device_t);
287static int tl_ifmedia_upd(struct ifnet *);
288static void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
289
290static u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
291static u_int8_t tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *);
292static int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
293
293static void tl_mii_sync(struct tl_softc *);
294static void tl_mii_send(struct tl_softc *, u_int32_t, int);
295static int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *);
296static int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *);
297static int tl_miibus_readreg(device_t, int, int);
298static int tl_miibus_writereg(device_t, int, int, int);
299static void tl_miibus_statchg(device_t);
300
301static void tl_setmode(struct tl_softc *, int);
302static uint32_t tl_mchash(const uint8_t *);
303static void tl_setmulti(struct tl_softc *);
304static void tl_setfilt(struct tl_softc *, caddr_t, int);
305static void tl_softreset(struct tl_softc *, int);
306static void tl_hardreset(device_t);
307static int tl_list_rx_init(struct tl_softc *);
308static int tl_list_tx_init(struct tl_softc *);
309
310static u_int8_t tl_dio_read8(struct tl_softc *, int);
311static u_int16_t tl_dio_read16(struct tl_softc *, int);
312static u_int32_t tl_dio_read32(struct tl_softc *, int);
313static void tl_dio_write8(struct tl_softc *, int, int);
314static void tl_dio_write16(struct tl_softc *, int, int);
315static void tl_dio_write32(struct tl_softc *, int, int);
316static void tl_dio_setbit(struct tl_softc *, int, int);
317static void tl_dio_clrbit(struct tl_softc *, int, int);
318static void tl_dio_setbit16(struct tl_softc *, int, int);
319static void tl_dio_clrbit16(struct tl_softc *, int, int);
320
294static int tl_miibus_readreg(device_t, int, int);
295static int tl_miibus_writereg(device_t, int, int, int);
296static void tl_miibus_statchg(device_t);
297
298static void tl_setmode(struct tl_softc *, int);
299static uint32_t tl_mchash(const uint8_t *);
300static void tl_setmulti(struct tl_softc *);
301static void tl_setfilt(struct tl_softc *, caddr_t, int);
302static void tl_softreset(struct tl_softc *, int);
303static void tl_hardreset(device_t);
304static int tl_list_rx_init(struct tl_softc *);
305static int tl_list_tx_init(struct tl_softc *);
306
307static u_int8_t tl_dio_read8(struct tl_softc *, int);
308static u_int16_t tl_dio_read16(struct tl_softc *, int);
309static u_int32_t tl_dio_read32(struct tl_softc *, int);
310static void tl_dio_write8(struct tl_softc *, int, int);
311static void tl_dio_write16(struct tl_softc *, int, int);
312static void tl_dio_write32(struct tl_softc *, int, int);
313static void tl_dio_setbit(struct tl_softc *, int, int);
314static void tl_dio_clrbit(struct tl_softc *, int, int);
315static void tl_dio_setbit16(struct tl_softc *, int, int);
316static void tl_dio_clrbit16(struct tl_softc *, int, int);
317
318/*
319 * MII bit-bang glue
320 */
321static uint32_t tl_mii_bitbang_read(device_t);
322static void tl_mii_bitbang_write(device_t, uint32_t);
323
324static const struct mii_bitbang_ops tl_mii_bitbang_ops = {
325 tl_mii_bitbang_read,
326 tl_mii_bitbang_write,
327 {
328 TL_SIO_MDATA, /* MII_BIT_MDO */
329 TL_SIO_MDATA, /* MII_BIT_MDI */
330 TL_SIO_MCLK, /* MII_BIT_MDC */
331 TL_SIO_MTXEN, /* MII_BIT_DIR_HOST_PHY */
332 0, /* MII_BIT_DIR_PHY_HOST */
333 }
334};
335
321#ifdef TL_USEIOSPACE
322#define TL_RES SYS_RES_IOPORT
323#define TL_RID TL_PCI_LOIO
324#else
325#define TL_RES SYS_RES_MEMORY
326#define TL_RID TL_PCI_LOMEM
327#endif
328
329static device_method_t tl_methods[] = {
330 /* Device interface */
331 DEVMETHOD(device_probe, tl_probe),
332 DEVMETHOD(device_attach, tl_attach),
333 DEVMETHOD(device_detach, tl_detach),
334 DEVMETHOD(device_shutdown, tl_shutdown),
335
336 /* bus interface */
337 DEVMETHOD(bus_print_child, bus_generic_print_child),
338 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
339
340 /* MII interface */
341 DEVMETHOD(miibus_readreg, tl_miibus_readreg),
342 DEVMETHOD(miibus_writereg, tl_miibus_writereg),
343 DEVMETHOD(miibus_statchg, tl_miibus_statchg),
344
345 { 0, 0 }
346};
347
348static driver_t tl_driver = {
349 "tl",
350 tl_methods,
351 sizeof(struct tl_softc)
352};
353
354static devclass_t tl_devclass;
355
356DRIVER_MODULE(tl, pci, tl_driver, tl_devclass, 0, 0);
357DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0);
358
359static u_int8_t tl_dio_read8(sc, reg)
360 struct tl_softc *sc;
361 int reg;
362{
336#ifdef TL_USEIOSPACE
337#define TL_RES SYS_RES_IOPORT
338#define TL_RID TL_PCI_LOIO
339#else
340#define TL_RES SYS_RES_MEMORY
341#define TL_RID TL_PCI_LOMEM
342#endif
343
344static device_method_t tl_methods[] = {
345 /* Device interface */
346 DEVMETHOD(device_probe, tl_probe),
347 DEVMETHOD(device_attach, tl_attach),
348 DEVMETHOD(device_detach, tl_detach),
349 DEVMETHOD(device_shutdown, tl_shutdown),
350
351 /* bus interface */
352 DEVMETHOD(bus_print_child, bus_generic_print_child),
353 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
354
355 /* MII interface */
356 DEVMETHOD(miibus_readreg, tl_miibus_readreg),
357 DEVMETHOD(miibus_writereg, tl_miibus_writereg),
358 DEVMETHOD(miibus_statchg, tl_miibus_statchg),
359
360 { 0, 0 }
361};
362
363static driver_t tl_driver = {
364 "tl",
365 tl_methods,
366 sizeof(struct tl_softc)
367};
368
369static devclass_t tl_devclass;
370
371DRIVER_MODULE(tl, pci, tl_driver, tl_devclass, 0, 0);
372DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0);
373
374static u_int8_t tl_dio_read8(sc, reg)
375 struct tl_softc *sc;
376 int reg;
377{
378
379 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
380 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
363 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
381 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
382 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
383 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
364 return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
365}
366
367static u_int16_t tl_dio_read16(sc, reg)
368 struct tl_softc *sc;
369 int reg;
370{
384 return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
385}
386
387static u_int16_t tl_dio_read16(sc, reg)
388 struct tl_softc *sc;
389 int reg;
390{
391
392 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
393 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
371 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
394 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
395 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
396 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
372 return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
373}
374
375static u_int32_t tl_dio_read32(sc, reg)
376 struct tl_softc *sc;
377 int reg;
378{
397 return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
398}
399
400static u_int32_t tl_dio_read32(sc, reg)
401 struct tl_softc *sc;
402 int reg;
403{
404
405 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
406 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
379 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
407 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
408 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
409 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
380 return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
381}
382
383static void tl_dio_write8(sc, reg, val)
384 struct tl_softc *sc;
385 int reg;
386 int val;
387{
410 return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
411}
412
413static void tl_dio_write8(sc, reg, val)
414 struct tl_softc *sc;
415 int reg;
416 int val;
417{
418
419 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
420 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
388 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
421 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
422 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
423 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
389 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
424 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
390 return;
391}
392
393static void tl_dio_write16(sc, reg, val)
394 struct tl_softc *sc;
395 int reg;
396 int val;
397{
425}
426
427static void tl_dio_write16(sc, reg, val)
428 struct tl_softc *sc;
429 int reg;
430 int val;
431{
432
433 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
434 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
398 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
435 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
436 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
437 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
399 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
438 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
400 return;
401}
402
403static void tl_dio_write32(sc, reg, val)
404 struct tl_softc *sc;
405 int reg;
406 int val;
407{
439}
440
441static void tl_dio_write32(sc, reg, val)
442 struct tl_softc *sc;
443 int reg;
444 int val;
445{
446
447 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
448 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
408 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
449 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
450 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
451 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
409 CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
452 CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
410 return;
411}
412
413static void
414tl_dio_setbit(sc, reg, bit)
415 struct tl_softc *sc;
416 int reg;
417 int bit;
418{
419 u_int8_t f;
420
453}
454
455static void
456tl_dio_setbit(sc, reg, bit)
457 struct tl_softc *sc;
458 int reg;
459 int bit;
460{
461 u_int8_t f;
462
463 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
464 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
421 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
465 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
466 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
467 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
422 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
423 f |= bit;
468 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
469 f |= bit;
470 CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 1,
471 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
424 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
472 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
425
426 return;
427}
428
429static void
430tl_dio_clrbit(sc, reg, bit)
431 struct tl_softc *sc;
432 int reg;
433 int bit;
434{
435 u_int8_t f;
436
473}
474
475static void
476tl_dio_clrbit(sc, reg, bit)
477 struct tl_softc *sc;
478 int reg;
479 int bit;
480{
481 u_int8_t f;
482
483 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
484 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
437 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
485 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
486 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
487 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
438 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
439 f &= ~bit;
488 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
489 f &= ~bit;
490 CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 1,
491 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
440 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
492 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
441
442 return;
443}
444
445static void tl_dio_setbit16(sc, reg, bit)
446 struct tl_softc *sc;
447 int reg;
448 int bit;
449{
450 u_int16_t f;
451
493}
494
495static void tl_dio_setbit16(sc, reg, bit)
496 struct tl_softc *sc;
497 int reg;
498 int bit;
499{
500 u_int16_t f;
501
502 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
503 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
452 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
504 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
505 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
506 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
453 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
454 f |= bit;
507 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
508 f |= bit;
509 CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 2,
510 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
455 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
511 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
456
457 return;
458}
459
460static void tl_dio_clrbit16(sc, reg, bit)
461 struct tl_softc *sc;
462 int reg;
463 int bit;
464{
465 u_int16_t f;
466
512}
513
514static void tl_dio_clrbit16(sc, reg, bit)
515 struct tl_softc *sc;
516 int reg;
517 int bit;
518{
519 u_int16_t f;
520
521 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
522 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
467 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
523 CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
524 CSR_BARRIER(sc, TL_DIO_ADDR, 2,
525 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
468 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
469 f &= ~bit;
526 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
527 f &= ~bit;
528 CSR_BARRIER(sc, TL_DIO_DATA + (reg & 3), 2,
529 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
470 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
530 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
471
472 return;
473}
474
475/*
476 * Send an instruction or address to the EEPROM, check for ACK.
477 */
478static u_int8_t tl_eeprom_putbyte(sc, byte)
479 struct tl_softc *sc;
480 int byte;
481{
482 register int i, ack = 0;
483
484 /*
485 * Make sure we're in TX mode.
486 */
487 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
488
489 /*
490 * Feed in each bit and stobe the clock.
491 */
492 for (i = 0x80; i; i >>= 1) {
493 if (byte & i) {
494 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
495 } else {
496 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
497 }
498 DELAY(1);
499 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
500 DELAY(1);
501 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
502 }
503
504 /*
505 * Turn off TX mode.
506 */
507 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
508
509 /*
510 * Check for ack.
511 */
512 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
513 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
514 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
515
516 return(ack);
517}
518
519/*
520 * Read a byte of data stored in the EEPROM at address 'addr.'
521 */
522static u_int8_t tl_eeprom_getbyte(sc, addr, dest)
523 struct tl_softc *sc;
524 int addr;
525 u_int8_t *dest;
526{
527 register int i;
528 u_int8_t byte = 0;
529 device_t tl_dev = sc->tl_dev;
530
531 tl_dio_write8(sc, TL_NETSIO, 0);
532
533 EEPROM_START;
534
535 /*
536 * Send write control code to EEPROM.
537 */
538 if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
539 device_printf(tl_dev, "failed to send write command, status: %x\n",
540 tl_dio_read8(sc, TL_NETSIO));
541 return(1);
542 }
543
544 /*
545 * Send address of byte we want to read.
546 */
547 if (tl_eeprom_putbyte(sc, addr)) {
548 device_printf(tl_dev, "failed to send address, status: %x\n",
549 tl_dio_read8(sc, TL_NETSIO));
550 return(1);
551 }
552
553 EEPROM_STOP;
554 EEPROM_START;
555 /*
556 * Send read control code to EEPROM.
557 */
558 if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
559 device_printf(tl_dev, "failed to send write command, status: %x\n",
560 tl_dio_read8(sc, TL_NETSIO));
561 return(1);
562 }
563
564 /*
565 * Start reading bits from EEPROM.
566 */
567 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
568 for (i = 0x80; i; i >>= 1) {
569 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
570 DELAY(1);
571 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
572 byte |= i;
573 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
574 DELAY(1);
575 }
576
577 EEPROM_STOP;
578
579 /*
580 * No ACK generated for read, so just return byte.
581 */
582
583 *dest = byte;
584
585 return(0);
586}
587
588/*
589 * Read a sequence of bytes from the EEPROM.
590 */
591static int
592tl_read_eeprom(sc, dest, off, cnt)
593 struct tl_softc *sc;
594 caddr_t dest;
595 int off;
596 int cnt;
597{
598 int err = 0, i;
599 u_int8_t byte = 0;
600
601 for (i = 0; i < cnt; i++) {
602 err = tl_eeprom_getbyte(sc, off + i, &byte);
603 if (err)
604 break;
605 *(dest + i) = byte;
606 }
607
608 return(err ? 1 : 0);
609}
610
531}
532
533/*
534 * Send an instruction or address to the EEPROM, check for ACK.
535 */
536static u_int8_t tl_eeprom_putbyte(sc, byte)
537 struct tl_softc *sc;
538 int byte;
539{
540 register int i, ack = 0;
541
542 /*
543 * Make sure we're in TX mode.
544 */
545 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
546
547 /*
548 * Feed in each bit and stobe the clock.
549 */
550 for (i = 0x80; i; i >>= 1) {
551 if (byte & i) {
552 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
553 } else {
554 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
555 }
556 DELAY(1);
557 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
558 DELAY(1);
559 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
560 }
561
562 /*
563 * Turn off TX mode.
564 */
565 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
566
567 /*
568 * Check for ack.
569 */
570 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
571 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
572 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
573
574 return(ack);
575}
576
577/*
578 * Read a byte of data stored in the EEPROM at address 'addr.'
579 */
580static u_int8_t tl_eeprom_getbyte(sc, addr, dest)
581 struct tl_softc *sc;
582 int addr;
583 u_int8_t *dest;
584{
585 register int i;
586 u_int8_t byte = 0;
587 device_t tl_dev = sc->tl_dev;
588
589 tl_dio_write8(sc, TL_NETSIO, 0);
590
591 EEPROM_START;
592
593 /*
594 * Send write control code to EEPROM.
595 */
596 if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
597 device_printf(tl_dev, "failed to send write command, status: %x\n",
598 tl_dio_read8(sc, TL_NETSIO));
599 return(1);
600 }
601
602 /*
603 * Send address of byte we want to read.
604 */
605 if (tl_eeprom_putbyte(sc, addr)) {
606 device_printf(tl_dev, "failed to send address, status: %x\n",
607 tl_dio_read8(sc, TL_NETSIO));
608 return(1);
609 }
610
611 EEPROM_STOP;
612 EEPROM_START;
613 /*
614 * Send read control code to EEPROM.
615 */
616 if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
617 device_printf(tl_dev, "failed to send write command, status: %x\n",
618 tl_dio_read8(sc, TL_NETSIO));
619 return(1);
620 }
621
622 /*
623 * Start reading bits from EEPROM.
624 */
625 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
626 for (i = 0x80; i; i >>= 1) {
627 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
628 DELAY(1);
629 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
630 byte |= i;
631 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
632 DELAY(1);
633 }
634
635 EEPROM_STOP;
636
637 /*
638 * No ACK generated for read, so just return byte.
639 */
640
641 *dest = byte;
642
643 return(0);
644}
645
646/*
647 * Read a sequence of bytes from the EEPROM.
648 */
649static int
650tl_read_eeprom(sc, dest, off, cnt)
651 struct tl_softc *sc;
652 caddr_t dest;
653 int off;
654 int cnt;
655{
656 int err = 0, i;
657 u_int8_t byte = 0;
658
659 for (i = 0; i < cnt; i++) {
660 err = tl_eeprom_getbyte(sc, off + i, &byte);
661 if (err)
662 break;
663 *(dest + i) = byte;
664 }
665
666 return(err ? 1 : 0);
667}
668
611static void
612tl_mii_sync(sc)
613 struct tl_softc *sc;
669#define TL_SIO_MII (TL_SIO_MCLK | TL_SIO_MDATA | TL_SIO_MTXEN)
670
671/*
672 * Read the MII serial port for the MII bit-bang module.
673 */
674static uint32_t
675tl_mii_bitbang_read(device_t dev)
614{
676{
615 register int i;
677 struct tl_softc *sc;
678 uint32_t val;
616
679
617 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
680 sc = device_get_softc(dev);
618
681
619 for (i = 0; i < 32; i++) {
620 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
621 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
622 }
682 val = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MII;
683 CSR_BARRIER(sc, TL_NETSIO, 1,
684 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
623
685
624 return;
686 return (val);
625}
626
687}
688
689/*
690 * Write the MII serial port for the MII bit-bang module.
691 */
627static void
692static void
628tl_mii_send(sc, bits, cnt)
629 struct tl_softc *sc;
630 u_int32_t bits;
631 int cnt;
693tl_mii_bitbang_write(device_t dev, uint32_t val)
632{
694{
633 int i;
695 struct tl_softc *sc;
634
696
635 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
636 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
637 if (bits & i) {
638 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
639 } else {
640 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
641 }
642 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
643 }
697 sc = device_get_softc(dev);
698
699 val = (tl_dio_read8(sc, TL_NETSIO) & ~TL_SIO_MII) | val;
700 CSR_BARRIER(sc, TL_NETSIO, 1,
701 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
702 tl_dio_write8(sc, TL_NETSIO, val);
703 CSR_BARRIER(sc, TL_NETSIO, 1,
704 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
644}
645
646static int
705}
706
707static int
647tl_mii_readreg(sc, frame)
648 struct tl_softc *sc;
649 struct tl_mii_frame *frame;
650
708tl_miibus_readreg(dev, phy, reg)
709 device_t dev;
710 int phy, reg;
651{
711{
652 int i, ack;
653 int minten = 0;
712 struct tl_softc *sc;
713 int minten, val;
654
714
655 tl_mii_sync(sc);
715 sc = device_get_softc(dev);
656
657 /*
716
717 /*
658 * Set up frame for RX.
659 */
660 frame->mii_stdelim = TL_MII_STARTDELIM;
661 frame->mii_opcode = TL_MII_READOP;
662 frame->mii_turnaround = 0;
663 frame->mii_data = 0;
664
665 /*
666 * Turn off MII interrupt by forcing MINTEN low.
667 */
668 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
669 if (minten) {
670 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
671 }
672
718 * Turn off MII interrupt by forcing MINTEN low.
719 */
720 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
721 if (minten) {
722 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
723 }
724
673 /*
674 * Turn on data xmit.
675 */
676 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
725 val = mii_bitbang_readreg(dev, &tl_mii_bitbang_ops, phy, reg);
677
726
678 /*
679 * Send command/address info.
680 */
681 tl_mii_send(sc, frame->mii_stdelim, 2);
682 tl_mii_send(sc, frame->mii_opcode, 2);
683 tl_mii_send(sc, frame->mii_phyaddr, 5);
684 tl_mii_send(sc, frame->mii_regaddr, 5);
685
686 /*
687 * Turn off xmit.
688 */
689 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
690
691 /* Idle bit */
692 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
693 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
694
695 /* Check for ack */
696 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
697 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
698
699 /* Complete the cycle */
700 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
701
702 /*
703 * Now try reading data bits. If the ack failed, we still
704 * need to clock through 16 cycles to keep the PHYs in sync.
705 */
706 if (ack) {
707 for(i = 0; i < 16; i++) {
708 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
709 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
710 }
711 goto fail;
712 }
713
714 for (i = 0x8000; i; i >>= 1) {
715 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
716 if (!ack) {
717 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
718 frame->mii_data |= i;
719 }
720 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
721 }
722
723fail:
724
725 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
726 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
727
728 /* Reenable interrupts */
727 /* Reenable interrupts. */
729 if (minten) {
730 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
731 }
732
728 if (minten) {
729 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
730 }
731
733 if (ack)
734 return(1);
735 return(0);
732 return (val);
736}
737
738static int
733}
734
735static int
739tl_mii_writereg(sc, frame)
740 struct tl_softc *sc;
741 struct tl_mii_frame *frame;
742
736tl_miibus_writereg(dev, phy, reg, data)
737 device_t dev;
738 int phy, reg, data;
743{
739{
740 struct tl_softc *sc;
744 int minten;
745
741 int minten;
742
746 tl_mii_sync(sc);
743 sc = device_get_softc(dev);
747
748 /*
744
745 /*
749 * Set up frame for TX.
750 */
751
752 frame->mii_stdelim = TL_MII_STARTDELIM;
753 frame->mii_opcode = TL_MII_WRITEOP;
754 frame->mii_turnaround = TL_MII_TURNAROUND;
755
756 /*
757 * Turn off MII interrupt by forcing MINTEN low.
758 */
759 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
760 if (minten) {
761 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
762 }
763
746 * Turn off MII interrupt by forcing MINTEN low.
747 */
748 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
749 if (minten) {
750 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
751 }
752
764 /*
765 * Turn on data output.
766 */
767 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
753 mii_bitbang_writereg(dev, &tl_mii_bitbang_ops, phy, reg, data);
768
754
769 tl_mii_send(sc, frame->mii_stdelim, 2);
770 tl_mii_send(sc, frame->mii_opcode, 2);
771 tl_mii_send(sc, frame->mii_phyaddr, 5);
772 tl_mii_send(sc, frame->mii_regaddr, 5);
773 tl_mii_send(sc, frame->mii_turnaround, 2);
774 tl_mii_send(sc, frame->mii_data, 16);
775
776 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
777 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
778
779 /*
780 * Turn off xmit.
781 */
782 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
783
784 /* Reenable interrupts */
785 if (minten)
755 /* Reenable interrupts. */
756 if (minten) {
786 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
757 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
758 }
787
788 return(0);
789}
790
759
760 return(0);
761}
762
791static int
792tl_miibus_readreg(dev, phy, reg)
793 device_t dev;
794 int phy, reg;
795{
796 struct tl_softc *sc;
797 struct tl_mii_frame frame;
798
799 sc = device_get_softc(dev);
800 bzero((char *)&frame, sizeof(frame));
801
802 frame.mii_phyaddr = phy;
803 frame.mii_regaddr = reg;
804 tl_mii_readreg(sc, &frame);
805
806 return(frame.mii_data);
807}
808
809static int
810tl_miibus_writereg(dev, phy, reg, data)
811 device_t dev;
812 int phy, reg, data;
813{
814 struct tl_softc *sc;
815 struct tl_mii_frame frame;
816
817 sc = device_get_softc(dev);
818 bzero((char *)&frame, sizeof(frame));
819
820 frame.mii_phyaddr = phy;
821 frame.mii_regaddr = reg;
822 frame.mii_data = data;
823
824 tl_mii_writereg(sc, &frame);
825
826 return(0);
827}
828
829static void
830tl_miibus_statchg(dev)
831 device_t dev;
832{
833 struct tl_softc *sc;
834 struct mii_data *mii;
835
836 sc = device_get_softc(dev);
837 mii = device_get_softc(sc->tl_miibus);
838
839 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
840 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
841 } else {
842 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
843 }
763static void
764tl_miibus_statchg(dev)
765 device_t dev;
766{
767 struct tl_softc *sc;
768 struct mii_data *mii;
769
770 sc = device_get_softc(dev);
771 mii = device_get_softc(sc->tl_miibus);
772
773 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
774 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
775 } else {
776 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
777 }
844
845 return;
846}
847
848/*
849 * Set modes for bitrate devices.
850 */
851static void
852tl_setmode(sc, media)
853 struct tl_softc *sc;
854 int media;
855{
856 if (IFM_SUBTYPE(media) == IFM_10_5)
857 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
858 if (IFM_SUBTYPE(media) == IFM_10_T) {
859 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
860 if ((media & IFM_GMASK) == IFM_FDX) {
861 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
862 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
863 } else {
864 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
865 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
866 }
867 }
778}
779
780/*
781 * Set modes for bitrate devices.
782 */
783static void
784tl_setmode(sc, media)
785 struct tl_softc *sc;
786 int media;
787{
788 if (IFM_SUBTYPE(media) == IFM_10_5)
789 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
790 if (IFM_SUBTYPE(media) == IFM_10_T) {
791 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
792 if ((media & IFM_GMASK) == IFM_FDX) {
793 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
794 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
795 } else {
796 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
797 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
798 }
799 }
868
869 return;
870}
871
872/*
873 * Calculate the hash of a MAC address for programming the multicast hash
874 * table. This hash is simply the address split into 6-bit chunks
875 * XOR'd, e.g.
876 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
877 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
878 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then
879 * the folded 24-bit value is split into 6-bit portions and XOR'd.
880 */
881static uint32_t
882tl_mchash(addr)
883 const uint8_t *addr;
884{
885 int t;
886
887 t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
888 (addr[2] ^ addr[5]);
889 return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
890}
891
892/*
893 * The ThunderLAN has a perfect MAC address filter in addition to
894 * the multicast hash filter. The perfect filter can be programmed
895 * with up to four MAC addresses. The first one is always used to
896 * hold the station address, which leaves us free to use the other
897 * three for multicast addresses.
898 */
899static void
900tl_setfilt(sc, addr, slot)
901 struct tl_softc *sc;
902 caddr_t addr;
903 int slot;
904{
905 int i;
906 u_int16_t regaddr;
907
908 regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
909
910 for (i = 0; i < ETHER_ADDR_LEN; i++)
911 tl_dio_write8(sc, regaddr + i, *(addr + i));
800}
801
802/*
803 * Calculate the hash of a MAC address for programming the multicast hash
804 * table. This hash is simply the address split into 6-bit chunks
805 * XOR'd, e.g.
806 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
807 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
808 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then
809 * the folded 24-bit value is split into 6-bit portions and XOR'd.
810 */
811static uint32_t
812tl_mchash(addr)
813 const uint8_t *addr;
814{
815 int t;
816
817 t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
818 (addr[2] ^ addr[5]);
819 return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
820}
821
822/*
823 * The ThunderLAN has a perfect MAC address filter in addition to
824 * the multicast hash filter. The perfect filter can be programmed
825 * with up to four MAC addresses. The first one is always used to
826 * hold the station address, which leaves us free to use the other
827 * three for multicast addresses.
828 */
829static void
830tl_setfilt(sc, addr, slot)
831 struct tl_softc *sc;
832 caddr_t addr;
833 int slot;
834{
835 int i;
836 u_int16_t regaddr;
837
838 regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
839
840 for (i = 0; i < ETHER_ADDR_LEN; i++)
841 tl_dio_write8(sc, regaddr + i, *(addr + i));
912
913 return;
914}
915
916/*
917 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
918 * linked list. This is fine, except addresses are added from the head
919 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
920 * group to always be in the perfect filter, but as more groups are added,
921 * the 224.0.0.1 entry (which is always added first) gets pushed down
922 * the list and ends up at the tail. So after 3 or 4 multicast groups
923 * are added, the all-hosts entry gets pushed out of the perfect filter
924 * and into the hash table.
925 *
926 * Because the multicast list is a doubly-linked list as opposed to a
927 * circular queue, we don't have the ability to just grab the tail of
928 * the list and traverse it backwards. Instead, we have to traverse
929 * the list once to find the tail, then traverse it again backwards to
930 * update the multicast filter.
931 */
932static void
933tl_setmulti(sc)
934 struct tl_softc *sc;
935{
936 struct ifnet *ifp;
937 u_int32_t hashes[2] = { 0, 0 };
938 int h, i;
939 struct ifmultiaddr *ifma;
940 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
941 ifp = sc->tl_ifp;
942
943 /* First, zot all the existing filters. */
944 for (i = 1; i < 4; i++)
945 tl_setfilt(sc, (caddr_t)&dummy, i);
946 tl_dio_write32(sc, TL_HASH1, 0);
947 tl_dio_write32(sc, TL_HASH2, 0);
948
949 /* Now program new ones. */
950 if (ifp->if_flags & IFF_ALLMULTI) {
951 hashes[0] = 0xFFFFFFFF;
952 hashes[1] = 0xFFFFFFFF;
953 } else {
954 i = 1;
955 if_maddr_rlock(ifp);
956 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
957 if (ifma->ifma_addr->sa_family != AF_LINK)
958 continue;
959 /*
960 * Program the first three multicast groups
961 * into the perfect filter. For all others,
962 * use the hash table.
963 */
964 if (i < 4) {
965 tl_setfilt(sc,
966 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
967 i++;
968 continue;
969 }
970
971 h = tl_mchash(
972 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
973 if (h < 32)
974 hashes[0] |= (1 << h);
975 else
976 hashes[1] |= (1 << (h - 32));
977 }
978 if_maddr_runlock(ifp);
979 }
980
981 tl_dio_write32(sc, TL_HASH1, hashes[0]);
982 tl_dio_write32(sc, TL_HASH2, hashes[1]);
842}
843
844/*
845 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
846 * linked list. This is fine, except addresses are added from the head
847 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
848 * group to always be in the perfect filter, but as more groups are added,
849 * the 224.0.0.1 entry (which is always added first) gets pushed down
850 * the list and ends up at the tail. So after 3 or 4 multicast groups
851 * are added, the all-hosts entry gets pushed out of the perfect filter
852 * and into the hash table.
853 *
854 * Because the multicast list is a doubly-linked list as opposed to a
855 * circular queue, we don't have the ability to just grab the tail of
856 * the list and traverse it backwards. Instead, we have to traverse
857 * the list once to find the tail, then traverse it again backwards to
858 * update the multicast filter.
859 */
860static void
861tl_setmulti(sc)
862 struct tl_softc *sc;
863{
864 struct ifnet *ifp;
865 u_int32_t hashes[2] = { 0, 0 };
866 int h, i;
867 struct ifmultiaddr *ifma;
868 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
869 ifp = sc->tl_ifp;
870
871 /* First, zot all the existing filters. */
872 for (i = 1; i < 4; i++)
873 tl_setfilt(sc, (caddr_t)&dummy, i);
874 tl_dio_write32(sc, TL_HASH1, 0);
875 tl_dio_write32(sc, TL_HASH2, 0);
876
877 /* Now program new ones. */
878 if (ifp->if_flags & IFF_ALLMULTI) {
879 hashes[0] = 0xFFFFFFFF;
880 hashes[1] = 0xFFFFFFFF;
881 } else {
882 i = 1;
883 if_maddr_rlock(ifp);
884 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
885 if (ifma->ifma_addr->sa_family != AF_LINK)
886 continue;
887 /*
888 * Program the first three multicast groups
889 * into the perfect filter. For all others,
890 * use the hash table.
891 */
892 if (i < 4) {
893 tl_setfilt(sc,
894 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
895 i++;
896 continue;
897 }
898
899 h = tl_mchash(
900 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
901 if (h < 32)
902 hashes[0] |= (1 << h);
903 else
904 hashes[1] |= (1 << (h - 32));
905 }
906 if_maddr_runlock(ifp);
907 }
908
909 tl_dio_write32(sc, TL_HASH1, hashes[0]);
910 tl_dio_write32(sc, TL_HASH2, hashes[1]);
983
984 return;
985}
986
987/*
988 * This routine is recommended by the ThunderLAN manual to insure that
989 * the internal PHY is powered up correctly. It also recommends a one
990 * second pause at the end to 'wait for the clocks to start' but in my
991 * experience this isn't necessary.
992 */
993static void
994tl_hardreset(dev)
995 device_t dev;
996{
997 struct tl_softc *sc;
998 int i;
999 u_int16_t flags;
1000
1001 sc = device_get_softc(dev);
1002
911}
912
913/*
914 * This routine is recommended by the ThunderLAN manual to insure that
915 * the internal PHY is powered up correctly. It also recommends a one
916 * second pause at the end to 'wait for the clocks to start' but in my
917 * experience this isn't necessary.
918 */
919static void
920tl_hardreset(dev)
921 device_t dev;
922{
923 struct tl_softc *sc;
924 int i;
925 u_int16_t flags;
926
927 sc = device_get_softc(dev);
928
1003 tl_mii_sync(sc);
929 mii_bitbang_sync(dev, &tl_mii_bitbang_ops);
1004
1005 flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
1006
1007 for (i = 0; i < MII_NPHY; i++)
1008 tl_miibus_writereg(dev, i, MII_BMCR, flags);
1009
1010 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
1011 DELAY(50000);
1012 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
930
931 flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
932
933 for (i = 0; i < MII_NPHY; i++)
934 tl_miibus_writereg(dev, i, MII_BMCR, flags);
935
936 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
937 DELAY(50000);
938 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
1013 tl_mii_sync(sc);
939 mii_bitbang_sync(dev, &tl_mii_bitbang_ops);
1014 while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
1015
1016 DELAY(50000);
940 while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
941
942 DELAY(50000);
1017 return;
1018}
1019
1020static void
1021tl_softreset(sc, internal)
1022 struct tl_softc *sc;
1023 int internal;
1024{
1025 u_int32_t cmd, dummy, i;
1026
1027 /* Assert the adapter reset bit. */
1028 CMD_SET(sc, TL_CMD_ADRST);
1029
1030 /* Turn off interrupts */
1031 CMD_SET(sc, TL_CMD_INTSOFF);
1032
1033 /* First, clear the stats registers. */
1034 for (i = 0; i < 5; i++)
1035 dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
1036
1037 /* Clear Areg and Hash registers */
1038 for (i = 0; i < 8; i++)
1039 tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
1040
1041 /*
1042 * Set up Netconfig register. Enable one channel and
1043 * one fragment mode.
1044 */
1045 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
1046 if (internal && !sc->tl_bitrate) {
1047 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1048 } else {
1049 tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1050 }
1051
1052 /* Handle cards with bitrate devices. */
1053 if (sc->tl_bitrate)
1054 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
1055
1056 /*
1057 * Load adapter irq pacing timer and tx threshold.
1058 * We make the transmit threshold 1 initially but we may
1059 * change that later.
1060 */
1061 cmd = CSR_READ_4(sc, TL_HOSTCMD);
1062 cmd |= TL_CMD_NES;
1063 cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
1064 CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
1065 CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
1066
1067 /* Unreset the MII */
1068 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
1069
1070 /* Take the adapter out of reset */
1071 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
1072
1073 /* Wait for things to settle down a little. */
1074 DELAY(500);
943}
944
945static void
946tl_softreset(sc, internal)
947 struct tl_softc *sc;
948 int internal;
949{
950 u_int32_t cmd, dummy, i;
951
952 /* Assert the adapter reset bit. */
953 CMD_SET(sc, TL_CMD_ADRST);
954
955 /* Turn off interrupts */
956 CMD_SET(sc, TL_CMD_INTSOFF);
957
958 /* First, clear the stats registers. */
959 for (i = 0; i < 5; i++)
960 dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
961
962 /* Clear Areg and Hash registers */
963 for (i = 0; i < 8; i++)
964 tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
965
966 /*
967 * Set up Netconfig register. Enable one channel and
968 * one fragment mode.
969 */
970 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
971 if (internal && !sc->tl_bitrate) {
972 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
973 } else {
974 tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
975 }
976
977 /* Handle cards with bitrate devices. */
978 if (sc->tl_bitrate)
979 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
980
981 /*
982 * Load adapter irq pacing timer and tx threshold.
983 * We make the transmit threshold 1 initially but we may
984 * change that later.
985 */
986 cmd = CSR_READ_4(sc, TL_HOSTCMD);
987 cmd |= TL_CMD_NES;
988 cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
989 CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
990 CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
991
992 /* Unreset the MII */
993 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
994
995 /* Take the adapter out of reset */
996 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
997
998 /* Wait for things to settle down a little. */
999 DELAY(500);
1075
1076 return;
1077}
1078
1079/*
1080 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
1081 * against our list and return its name if we find a match.
1082 */
1083static int
1084tl_probe(dev)
1085 device_t dev;
1086{
1000}
1001
1002/*
1003 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
1004 * against our list and return its name if we find a match.
1005 */
1006static int
1007tl_probe(dev)
1008 device_t dev;
1009{
1087 struct tl_type *t;
1010 const struct tl_type *t;
1088
1089 t = tl_devs;
1090
1091 while(t->tl_name != NULL) {
1092 if ((pci_get_vendor(dev) == t->tl_vid) &&
1093 (pci_get_device(dev) == t->tl_did)) {
1094 device_set_desc(dev, t->tl_name);
1095 return (BUS_PROBE_DEFAULT);
1096 }
1097 t++;
1098 }
1099
1100 return(ENXIO);
1101}
1102
1103static int
1104tl_attach(dev)
1105 device_t dev;
1106{
1107 u_int16_t did, vid;
1011
1012 t = tl_devs;
1013
1014 while(t->tl_name != NULL) {
1015 if ((pci_get_vendor(dev) == t->tl_vid) &&
1016 (pci_get_device(dev) == t->tl_did)) {
1017 device_set_desc(dev, t->tl_name);
1018 return (BUS_PROBE_DEFAULT);
1019 }
1020 t++;
1021 }
1022
1023 return(ENXIO);
1024}
1025
1026static int
1027tl_attach(dev)
1028 device_t dev;
1029{
1030 u_int16_t did, vid;
1108 struct tl_type *t;
1031 const struct tl_type *t;
1109 struct ifnet *ifp;
1110 struct tl_softc *sc;
1111 int error, flags, i, rid, unit;
1112 u_char eaddr[6];
1113
1114 vid = pci_get_vendor(dev);
1115 did = pci_get_device(dev);
1116 sc = device_get_softc(dev);
1117 sc->tl_dev = dev;
1118 unit = device_get_unit(dev);
1119
1120 t = tl_devs;
1121 while(t->tl_name != NULL) {
1122 if (vid == t->tl_vid && did == t->tl_did)
1123 break;
1124 t++;
1125 }
1126
1127 if (t->tl_name == NULL) {
1128 device_printf(dev, "unknown device!?\n");
1129 return (ENXIO);
1130 }
1131
1132 mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1133 MTX_DEF);
1134
1135 /*
1136 * Map control/status registers.
1137 */
1138 pci_enable_busmaster(dev);
1139
1140#ifdef TL_USEIOSPACE
1141
1142 rid = TL_PCI_LOIO;
1143 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1144 RF_ACTIVE);
1145
1146 /*
1147 * Some cards have the I/O and memory mapped address registers
1148 * reversed. Try both combinations before giving up.
1149 */
1150 if (sc->tl_res == NULL) {
1151 rid = TL_PCI_LOMEM;
1152 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1153 RF_ACTIVE);
1154 }
1155#else
1156 rid = TL_PCI_LOMEM;
1157 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1158 RF_ACTIVE);
1159 if (sc->tl_res == NULL) {
1160 rid = TL_PCI_LOIO;
1161 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1162 RF_ACTIVE);
1163 }
1164#endif
1165
1166 if (sc->tl_res == NULL) {
1167 device_printf(dev, "couldn't map ports/memory\n");
1168 error = ENXIO;
1169 goto fail;
1170 }
1171
1172#ifdef notdef
1173 /*
1174 * The ThunderLAN manual suggests jacking the PCI latency
1175 * timer all the way up to its maximum value. I'm not sure
1176 * if this is really necessary, but what the manual wants,
1177 * the manual gets.
1178 */
1179 command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4);
1180 command |= 0x0000FF00;
1181 pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4);
1182#endif
1183
1184 /* Allocate interrupt */
1185 rid = 0;
1186 sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1187 RF_SHAREABLE | RF_ACTIVE);
1188
1189 if (sc->tl_irq == NULL) {
1190 device_printf(dev, "couldn't map interrupt\n");
1191 error = ENXIO;
1192 goto fail;
1193 }
1194
1195 /*
1196 * Now allocate memory for the TX and RX lists.
1197 */
1198 sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF,
1199 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1200
1201 if (sc->tl_ldata == NULL) {
1202 device_printf(dev, "no memory for list buffers!\n");
1203 error = ENXIO;
1204 goto fail;
1205 }
1206
1207 bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1208
1209 if (vid == COMPAQ_VENDORID || vid == TI_VENDORID)
1210 sc->tl_eeaddr = TL_EEPROM_EADDR;
1211 if (vid == OLICOM_VENDORID)
1212 sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1213
1214 /* Reset the adapter. */
1215 tl_softreset(sc, 1);
1216 tl_hardreset(dev);
1217 tl_softreset(sc, 1);
1218
1219 /*
1220 * Get station address from the EEPROM.
1221 */
1222 if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1223 device_printf(dev, "failed to read station address\n");
1224 error = ENXIO;
1225 goto fail;
1226 }
1227
1228 /*
1229 * XXX Olicom, in its desire to be different from the
1230 * rest of the world, has done strange things with the
1231 * encoding of the station address in the EEPROM. First
1232 * of all, they store the address at offset 0xF8 rather
1233 * than at 0x83 like the ThunderLAN manual suggests.
1234 * Second, they store the address in three 16-bit words in
1235 * network byte order, as opposed to storing it sequentially
1236 * like all the other ThunderLAN cards. In order to get
1237 * the station address in a form that matches what the Olicom
1238 * diagnostic utility specifies, we have to byte-swap each
1239 * word. To make things even more confusing, neither 00:00:28
1240 * nor 00:00:24 appear in the IEEE OUI database.
1241 */
1242 if (vid == OLICOM_VENDORID) {
1243 for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1244 u_int16_t *p;
1245 p = (u_int16_t *)&eaddr[i];
1246 *p = ntohs(*p);
1247 }
1248 }
1249
1250 ifp = sc->tl_ifp = if_alloc(IFT_ETHER);
1251 if (ifp == NULL) {
1252 device_printf(dev, "can not if_alloc()\n");
1253 error = ENOSPC;
1254 goto fail;
1255 }
1256 ifp->if_softc = sc;
1257 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1258 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1259 ifp->if_ioctl = tl_ioctl;
1260 ifp->if_start = tl_start;
1261 ifp->if_init = tl_init;
1262 ifp->if_mtu = ETHERMTU;
1263 ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1;
1264 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1265 ifp->if_capenable |= IFCAP_VLAN_MTU;
1266 callout_init_mtx(&sc->tl_stat_callout, &sc->tl_mtx, 0);
1267
1268 /* Reset the adapter again. */
1269 tl_softreset(sc, 1);
1270 tl_hardreset(dev);
1271 tl_softreset(sc, 1);
1272
1273 /*
1274 * Do MII setup. If no PHYs are found, then this is a
1275 * bitrate ThunderLAN chip that only supports 10baseT
1276 * and AUI/BNC.
1277 * XXX mii_attach() can fail for reason different than
1278 * no PHYs found!
1279 */
1280 flags = 0;
1281 if (vid == COMPAQ_VENDORID) {
1282 if (did == COMPAQ_DEVICEID_NETEL_10_100_PROLIANT ||
1283 did == COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED ||
1284 did == COMPAQ_DEVICEID_NETFLEX_3P_BNC ||
1285 did == COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX)
1286 flags |= MIIF_MACPRIV0;
1287 if (did == COMPAQ_DEVICEID_NETEL_10 ||
1288 did == COMPAQ_DEVICEID_NETEL_10_100_DUAL ||
1289 did == COMPAQ_DEVICEID_NETFLEX_3P ||
1290 did == COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED)
1291 flags |= MIIF_MACPRIV1;
1292 } else if (vid == OLICOM_VENDORID && did == OLICOM_DEVICEID_OC2183)
1293 flags |= MIIF_MACPRIV0 | MIIF_MACPRIV1;
1294 if (mii_attach(dev, &sc->tl_miibus, ifp, tl_ifmedia_upd,
1295 tl_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0)) {
1296 struct ifmedia *ifm;
1297 sc->tl_bitrate = 1;
1298 ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1299 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1300 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1301 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1302 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1303 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
1304 /* Reset again, this time setting bitrate mode. */
1305 tl_softreset(sc, 1);
1306 ifm = &sc->ifmedia;
1307 ifm->ifm_media = ifm->ifm_cur->ifm_media;
1308 tl_ifmedia_upd(ifp);
1309 }
1310
1311 /*
1312 * Call MI attach routine.
1313 */
1314 ether_ifattach(ifp, eaddr);
1315
1316 /* Hook interrupt last to avoid having to lock softc */
1317 error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET | INTR_MPSAFE,
1318 NULL, tl_intr, sc, &sc->tl_intrhand);
1319
1320 if (error) {
1321 device_printf(dev, "couldn't set up irq\n");
1322 ether_ifdetach(ifp);
1323 goto fail;
1324 }
1325
1326fail:
1327 if (error)
1328 tl_detach(dev);
1329
1330 return(error);
1331}
1332
1333/*
1334 * Shutdown hardware and free up resources. This can be called any
1335 * time after the mutex has been initialized. It is called in both
1336 * the error case in attach and the normal detach case so it needs
1337 * to be careful about only freeing resources that have actually been
1338 * allocated.
1339 */
1340static int
1341tl_detach(dev)
1342 device_t dev;
1343{
1344 struct tl_softc *sc;
1345 struct ifnet *ifp;
1346
1347 sc = device_get_softc(dev);
1348 KASSERT(mtx_initialized(&sc->tl_mtx), ("tl mutex not initialized"));
1349 ifp = sc->tl_ifp;
1350
1351 /* These should only be active if attach succeeded */
1352 if (device_is_attached(dev)) {
1353 ether_ifdetach(ifp);
1354 TL_LOCK(sc);
1355 tl_stop(sc);
1356 TL_UNLOCK(sc);
1357 callout_drain(&sc->tl_stat_callout);
1358 }
1359 if (sc->tl_miibus)
1360 device_delete_child(dev, sc->tl_miibus);
1361 bus_generic_detach(dev);
1362
1363 if (sc->tl_ldata)
1364 contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF);
1365 if (sc->tl_bitrate)
1366 ifmedia_removeall(&sc->ifmedia);
1367
1368 if (sc->tl_intrhand)
1369 bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1370 if (sc->tl_irq)
1371 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1372 if (sc->tl_res)
1373 bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1374
1375 if (ifp)
1376 if_free(ifp);
1377
1378 mtx_destroy(&sc->tl_mtx);
1379
1380 return(0);
1381}
1382
1383/*
1384 * Initialize the transmit lists.
1385 */
1386static int
1387tl_list_tx_init(sc)
1388 struct tl_softc *sc;
1389{
1390 struct tl_chain_data *cd;
1391 struct tl_list_data *ld;
1392 int i;
1393
1394 cd = &sc->tl_cdata;
1395 ld = sc->tl_ldata;
1396 for (i = 0; i < TL_TX_LIST_CNT; i++) {
1397 cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1398 if (i == (TL_TX_LIST_CNT - 1))
1399 cd->tl_tx_chain[i].tl_next = NULL;
1400 else
1401 cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1402 }
1403
1404 cd->tl_tx_free = &cd->tl_tx_chain[0];
1405 cd->tl_tx_tail = cd->tl_tx_head = NULL;
1406 sc->tl_txeoc = 1;
1407
1408 return(0);
1409}
1410
1411/*
1412 * Initialize the RX lists and allocate mbufs for them.
1413 */
1414static int
1415tl_list_rx_init(sc)
1416 struct tl_softc *sc;
1417{
1032 struct ifnet *ifp;
1033 struct tl_softc *sc;
1034 int error, flags, i, rid, unit;
1035 u_char eaddr[6];
1036
1037 vid = pci_get_vendor(dev);
1038 did = pci_get_device(dev);
1039 sc = device_get_softc(dev);
1040 sc->tl_dev = dev;
1041 unit = device_get_unit(dev);
1042
1043 t = tl_devs;
1044 while(t->tl_name != NULL) {
1045 if (vid == t->tl_vid && did == t->tl_did)
1046 break;
1047 t++;
1048 }
1049
1050 if (t->tl_name == NULL) {
1051 device_printf(dev, "unknown device!?\n");
1052 return (ENXIO);
1053 }
1054
1055 mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1056 MTX_DEF);
1057
1058 /*
1059 * Map control/status registers.
1060 */
1061 pci_enable_busmaster(dev);
1062
1063#ifdef TL_USEIOSPACE
1064
1065 rid = TL_PCI_LOIO;
1066 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1067 RF_ACTIVE);
1068
1069 /*
1070 * Some cards have the I/O and memory mapped address registers
1071 * reversed. Try both combinations before giving up.
1072 */
1073 if (sc->tl_res == NULL) {
1074 rid = TL_PCI_LOMEM;
1075 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1076 RF_ACTIVE);
1077 }
1078#else
1079 rid = TL_PCI_LOMEM;
1080 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1081 RF_ACTIVE);
1082 if (sc->tl_res == NULL) {
1083 rid = TL_PCI_LOIO;
1084 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1085 RF_ACTIVE);
1086 }
1087#endif
1088
1089 if (sc->tl_res == NULL) {
1090 device_printf(dev, "couldn't map ports/memory\n");
1091 error = ENXIO;
1092 goto fail;
1093 }
1094
1095#ifdef notdef
1096 /*
1097 * The ThunderLAN manual suggests jacking the PCI latency
1098 * timer all the way up to its maximum value. I'm not sure
1099 * if this is really necessary, but what the manual wants,
1100 * the manual gets.
1101 */
1102 command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4);
1103 command |= 0x0000FF00;
1104 pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4);
1105#endif
1106
1107 /* Allocate interrupt */
1108 rid = 0;
1109 sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1110 RF_SHAREABLE | RF_ACTIVE);
1111
1112 if (sc->tl_irq == NULL) {
1113 device_printf(dev, "couldn't map interrupt\n");
1114 error = ENXIO;
1115 goto fail;
1116 }
1117
1118 /*
1119 * Now allocate memory for the TX and RX lists.
1120 */
1121 sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF,
1122 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1123
1124 if (sc->tl_ldata == NULL) {
1125 device_printf(dev, "no memory for list buffers!\n");
1126 error = ENXIO;
1127 goto fail;
1128 }
1129
1130 bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1131
1132 if (vid == COMPAQ_VENDORID || vid == TI_VENDORID)
1133 sc->tl_eeaddr = TL_EEPROM_EADDR;
1134 if (vid == OLICOM_VENDORID)
1135 sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1136
1137 /* Reset the adapter. */
1138 tl_softreset(sc, 1);
1139 tl_hardreset(dev);
1140 tl_softreset(sc, 1);
1141
1142 /*
1143 * Get station address from the EEPROM.
1144 */
1145 if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1146 device_printf(dev, "failed to read station address\n");
1147 error = ENXIO;
1148 goto fail;
1149 }
1150
1151 /*
1152 * XXX Olicom, in its desire to be different from the
1153 * rest of the world, has done strange things with the
1154 * encoding of the station address in the EEPROM. First
1155 * of all, they store the address at offset 0xF8 rather
1156 * than at 0x83 like the ThunderLAN manual suggests.
1157 * Second, they store the address in three 16-bit words in
1158 * network byte order, as opposed to storing it sequentially
1159 * like all the other ThunderLAN cards. In order to get
1160 * the station address in a form that matches what the Olicom
1161 * diagnostic utility specifies, we have to byte-swap each
1162 * word. To make things even more confusing, neither 00:00:28
1163 * nor 00:00:24 appear in the IEEE OUI database.
1164 */
1165 if (vid == OLICOM_VENDORID) {
1166 for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1167 u_int16_t *p;
1168 p = (u_int16_t *)&eaddr[i];
1169 *p = ntohs(*p);
1170 }
1171 }
1172
1173 ifp = sc->tl_ifp = if_alloc(IFT_ETHER);
1174 if (ifp == NULL) {
1175 device_printf(dev, "can not if_alloc()\n");
1176 error = ENOSPC;
1177 goto fail;
1178 }
1179 ifp->if_softc = sc;
1180 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1181 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1182 ifp->if_ioctl = tl_ioctl;
1183 ifp->if_start = tl_start;
1184 ifp->if_init = tl_init;
1185 ifp->if_mtu = ETHERMTU;
1186 ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1;
1187 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1188 ifp->if_capenable |= IFCAP_VLAN_MTU;
1189 callout_init_mtx(&sc->tl_stat_callout, &sc->tl_mtx, 0);
1190
1191 /* Reset the adapter again. */
1192 tl_softreset(sc, 1);
1193 tl_hardreset(dev);
1194 tl_softreset(sc, 1);
1195
1196 /*
1197 * Do MII setup. If no PHYs are found, then this is a
1198 * bitrate ThunderLAN chip that only supports 10baseT
1199 * and AUI/BNC.
1200 * XXX mii_attach() can fail for reason different than
1201 * no PHYs found!
1202 */
1203 flags = 0;
1204 if (vid == COMPAQ_VENDORID) {
1205 if (did == COMPAQ_DEVICEID_NETEL_10_100_PROLIANT ||
1206 did == COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED ||
1207 did == COMPAQ_DEVICEID_NETFLEX_3P_BNC ||
1208 did == COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX)
1209 flags |= MIIF_MACPRIV0;
1210 if (did == COMPAQ_DEVICEID_NETEL_10 ||
1211 did == COMPAQ_DEVICEID_NETEL_10_100_DUAL ||
1212 did == COMPAQ_DEVICEID_NETFLEX_3P ||
1213 did == COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED)
1214 flags |= MIIF_MACPRIV1;
1215 } else if (vid == OLICOM_VENDORID && did == OLICOM_DEVICEID_OC2183)
1216 flags |= MIIF_MACPRIV0 | MIIF_MACPRIV1;
1217 if (mii_attach(dev, &sc->tl_miibus, ifp, tl_ifmedia_upd,
1218 tl_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0)) {
1219 struct ifmedia *ifm;
1220 sc->tl_bitrate = 1;
1221 ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1222 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1223 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1224 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1225 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1226 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
1227 /* Reset again, this time setting bitrate mode. */
1228 tl_softreset(sc, 1);
1229 ifm = &sc->ifmedia;
1230 ifm->ifm_media = ifm->ifm_cur->ifm_media;
1231 tl_ifmedia_upd(ifp);
1232 }
1233
1234 /*
1235 * Call MI attach routine.
1236 */
1237 ether_ifattach(ifp, eaddr);
1238
1239 /* Hook interrupt last to avoid having to lock softc */
1240 error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET | INTR_MPSAFE,
1241 NULL, tl_intr, sc, &sc->tl_intrhand);
1242
1243 if (error) {
1244 device_printf(dev, "couldn't set up irq\n");
1245 ether_ifdetach(ifp);
1246 goto fail;
1247 }
1248
1249fail:
1250 if (error)
1251 tl_detach(dev);
1252
1253 return(error);
1254}
1255
1256/*
1257 * Shutdown hardware and free up resources. This can be called any
1258 * time after the mutex has been initialized. It is called in both
1259 * the error case in attach and the normal detach case so it needs
1260 * to be careful about only freeing resources that have actually been
1261 * allocated.
1262 */
1263static int
1264tl_detach(dev)
1265 device_t dev;
1266{
1267 struct tl_softc *sc;
1268 struct ifnet *ifp;
1269
1270 sc = device_get_softc(dev);
1271 KASSERT(mtx_initialized(&sc->tl_mtx), ("tl mutex not initialized"));
1272 ifp = sc->tl_ifp;
1273
1274 /* These should only be active if attach succeeded */
1275 if (device_is_attached(dev)) {
1276 ether_ifdetach(ifp);
1277 TL_LOCK(sc);
1278 tl_stop(sc);
1279 TL_UNLOCK(sc);
1280 callout_drain(&sc->tl_stat_callout);
1281 }
1282 if (sc->tl_miibus)
1283 device_delete_child(dev, sc->tl_miibus);
1284 bus_generic_detach(dev);
1285
1286 if (sc->tl_ldata)
1287 contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF);
1288 if (sc->tl_bitrate)
1289 ifmedia_removeall(&sc->ifmedia);
1290
1291 if (sc->tl_intrhand)
1292 bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1293 if (sc->tl_irq)
1294 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1295 if (sc->tl_res)
1296 bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1297
1298 if (ifp)
1299 if_free(ifp);
1300
1301 mtx_destroy(&sc->tl_mtx);
1302
1303 return(0);
1304}
1305
1306/*
1307 * Initialize the transmit lists.
1308 */
1309static int
1310tl_list_tx_init(sc)
1311 struct tl_softc *sc;
1312{
1313 struct tl_chain_data *cd;
1314 struct tl_list_data *ld;
1315 int i;
1316
1317 cd = &sc->tl_cdata;
1318 ld = sc->tl_ldata;
1319 for (i = 0; i < TL_TX_LIST_CNT; i++) {
1320 cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1321 if (i == (TL_TX_LIST_CNT - 1))
1322 cd->tl_tx_chain[i].tl_next = NULL;
1323 else
1324 cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1325 }
1326
1327 cd->tl_tx_free = &cd->tl_tx_chain[0];
1328 cd->tl_tx_tail = cd->tl_tx_head = NULL;
1329 sc->tl_txeoc = 1;
1330
1331 return(0);
1332}
1333
1334/*
1335 * Initialize the RX lists and allocate mbufs for them.
1336 */
1337static int
1338tl_list_rx_init(sc)
1339 struct tl_softc *sc;
1340{
1418 struct tl_chain_data *cd;
1419 struct tl_list_data *ld;
1420 int i;
1341 struct tl_chain_data *cd;
1342 struct tl_list_data *ld;
1343 int i;
1421
1422 cd = &sc->tl_cdata;
1423 ld = sc->tl_ldata;
1424
1425 for (i = 0; i < TL_RX_LIST_CNT; i++) {
1426 cd->tl_rx_chain[i].tl_ptr =
1427 (struct tl_list_onefrag *)&ld->tl_rx_list[i];
1428 if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1429 return(ENOBUFS);
1430 if (i == (TL_RX_LIST_CNT - 1)) {
1431 cd->tl_rx_chain[i].tl_next = NULL;
1432 ld->tl_rx_list[i].tlist_fptr = 0;
1433 } else {
1434 cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1435 ld->tl_rx_list[i].tlist_fptr =
1436 vtophys(&ld->tl_rx_list[i + 1]);
1437 }
1438 }
1439
1440 cd->tl_rx_head = &cd->tl_rx_chain[0];
1441 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1442
1443 return(0);
1444}
1445
1446static int
1447tl_newbuf(sc, c)
1448 struct tl_softc *sc;
1449 struct tl_chain_onefrag *c;
1450{
1451 struct mbuf *m_new = NULL;
1452
1453 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1454 if (m_new == NULL)
1455 return(ENOBUFS);
1456
1457 c->tl_mbuf = m_new;
1458 c->tl_next = NULL;
1459 c->tl_ptr->tlist_frsize = MCLBYTES;
1460 c->tl_ptr->tlist_fptr = 0;
1461 c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1462 c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1463 c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1464
1465 return(0);
1466}
1467/*
1468 * Interrupt handler for RX 'end of frame' condition (EOF). This
1469 * tells us that a full ethernet frame has been captured and we need
1470 * to handle it.
1471 *
1472 * Reception is done using 'lists' which consist of a header and a
1473 * series of 10 data count/data address pairs that point to buffers.
1474 * Initially you're supposed to create a list, populate it with pointers
1475 * to buffers, then load the physical address of the list into the
1476 * ch_parm register. The adapter is then supposed to DMA the received
1477 * frame into the buffers for you.
1478 *
1479 * To make things as fast as possible, we have the chip DMA directly
1480 * into mbufs. This saves us from having to do a buffer copy: we can
1481 * just hand the mbufs directly to ether_input(). Once the frame has
1482 * been sent on its way, the 'list' structure is assigned a new buffer
1483 * and moved to the end of the RX chain. As long we we stay ahead of
1484 * the chip, it will always think it has an endless receive channel.
1485 *
1486 * If we happen to fall behind and the chip manages to fill up all of
1487 * the buffers, it will generate an end of channel interrupt and wait
1488 * for us to empty the chain and restart the receiver.
1489 */
1490static int
1491tl_intvec_rxeof(xsc, type)
1492 void *xsc;
1493 u_int32_t type;
1494{
1495 struct tl_softc *sc;
1496 int r = 0, total_len = 0;
1497 struct ether_header *eh;
1498 struct mbuf *m;
1499 struct ifnet *ifp;
1500 struct tl_chain_onefrag *cur_rx;
1501
1502 sc = xsc;
1503 ifp = sc->tl_ifp;
1504
1505 TL_LOCK_ASSERT(sc);
1506
1507 while(sc->tl_cdata.tl_rx_head != NULL) {
1508 cur_rx = sc->tl_cdata.tl_rx_head;
1509 if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1510 break;
1511 r++;
1512 sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1513 m = cur_rx->tl_mbuf;
1514 total_len = cur_rx->tl_ptr->tlist_frsize;
1515
1516 if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1517 ifp->if_ierrors++;
1518 cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1519 cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1520 cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1521 continue;
1522 }
1523
1524 sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1525 vtophys(cur_rx->tl_ptr);
1526 sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1527 sc->tl_cdata.tl_rx_tail = cur_rx;
1528
1529 /*
1530 * Note: when the ThunderLAN chip is in 'capture all
1531 * frames' mode, it will receive its own transmissions.
1532 * We drop don't need to process our own transmissions,
1533 * so we drop them here and continue.
1534 */
1535 eh = mtod(m, struct ether_header *);
1536 /*if (ifp->if_flags & IFF_PROMISC && */
1537 if (!bcmp(eh->ether_shost, IF_LLADDR(sc->tl_ifp),
1538 ETHER_ADDR_LEN)) {
1539 m_freem(m);
1540 continue;
1541 }
1542
1543 m->m_pkthdr.rcvif = ifp;
1544 m->m_pkthdr.len = m->m_len = total_len;
1545
1546 TL_UNLOCK(sc);
1547 (*ifp->if_input)(ifp, m);
1548 TL_LOCK(sc);
1549 }
1550
1551 return(r);
1552}
1553
1554/*
1555 * The RX-EOC condition hits when the ch_parm address hasn't been
1556 * initialized or the adapter reached a list with a forward pointer
1557 * of 0 (which indicates the end of the chain). In our case, this means
1558 * the card has hit the end of the receive buffer chain and we need to
1559 * empty out the buffers and shift the pointer back to the beginning again.
1560 */
1561static int
1562tl_intvec_rxeoc(xsc, type)
1563 void *xsc;
1564 u_int32_t type;
1565{
1566 struct tl_softc *sc;
1567 int r;
1568 struct tl_chain_data *cd;
1569
1570
1571 sc = xsc;
1572 cd = &sc->tl_cdata;
1573
1574 /* Flush out the receive queue and ack RXEOF interrupts. */
1575 r = tl_intvec_rxeof(xsc, type);
1576 CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1577 r = 1;
1578 cd->tl_rx_head = &cd->tl_rx_chain[0];
1579 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1580 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1581 r |= (TL_CMD_GO|TL_CMD_RT);
1582 return(r);
1583}
1584
1585static int
1586tl_intvec_txeof(xsc, type)
1587 void *xsc;
1588 u_int32_t type;
1589{
1590 struct tl_softc *sc;
1591 int r = 0;
1592 struct tl_chain *cur_tx;
1593
1594 sc = xsc;
1595
1596 /*
1597 * Go through our tx list and free mbufs for those
1598 * frames that have been sent.
1599 */
1600 while (sc->tl_cdata.tl_tx_head != NULL) {
1601 cur_tx = sc->tl_cdata.tl_tx_head;
1602 if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1603 break;
1604 sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1605
1606 r++;
1607 m_freem(cur_tx->tl_mbuf);
1608 cur_tx->tl_mbuf = NULL;
1609
1610 cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1611 sc->tl_cdata.tl_tx_free = cur_tx;
1612 if (!cur_tx->tl_ptr->tlist_fptr)
1613 break;
1614 }
1615
1616 return(r);
1617}
1618
1619/*
1620 * The transmit end of channel interrupt. The adapter triggers this
1621 * interrupt to tell us it hit the end of the current transmit list.
1622 *
1623 * A note about this: it's possible for a condition to arise where
1624 * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1625 * You have to avoid this since the chip expects things to go in a
1626 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1627 * When the TXEOF handler is called, it will free all of the transmitted
1628 * frames and reset the tx_head pointer to NULL. However, a TXEOC
1629 * interrupt should be received and acknowledged before any more frames
1630 * are queued for transmission. If tl_statrt() is called after TXEOF
1631 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1632 * it could attempt to issue a transmit command prematurely.
1633 *
1634 * To guard against this, tl_start() will only issue transmit commands
1635 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1636 * can set this flag once tl_start() has cleared it.
1637 */
1638static int
1639tl_intvec_txeoc(xsc, type)
1640 void *xsc;
1641 u_int32_t type;
1642{
1643 struct tl_softc *sc;
1644 struct ifnet *ifp;
1645 u_int32_t cmd;
1646
1647 sc = xsc;
1648 ifp = sc->tl_ifp;
1649
1650 /* Clear the timeout timer. */
1651 sc->tl_timer = 0;
1652
1653 if (sc->tl_cdata.tl_tx_head == NULL) {
1654 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1655 sc->tl_cdata.tl_tx_tail = NULL;
1656 sc->tl_txeoc = 1;
1657 } else {
1658 sc->tl_txeoc = 0;
1659 /* First we have to ack the EOC interrupt. */
1660 CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1661 /* Then load the address of the next TX list. */
1662 CSR_WRITE_4(sc, TL_CH_PARM,
1663 vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1664 /* Restart TX channel. */
1665 cmd = CSR_READ_4(sc, TL_HOSTCMD);
1666 cmd &= ~TL_CMD_RT;
1667 cmd |= TL_CMD_GO|TL_CMD_INTSON;
1668 CMD_PUT(sc, cmd);
1669 return(0);
1670 }
1671
1672 return(1);
1673}
1674
1675static int
1676tl_intvec_adchk(xsc, type)
1677 void *xsc;
1678 u_int32_t type;
1679{
1680 struct tl_softc *sc;
1681
1682 sc = xsc;
1683
1684 if (type)
1685 device_printf(sc->tl_dev, "adapter check: %x\n",
1686 (unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1687
1688 tl_softreset(sc, 1);
1689 tl_stop(sc);
1690 tl_init_locked(sc);
1691 CMD_SET(sc, TL_CMD_INTSON);
1692
1693 return(0);
1694}
1695
1696static int
1697tl_intvec_netsts(xsc, type)
1698 void *xsc;
1699 u_int32_t type;
1700{
1701 struct tl_softc *sc;
1702 u_int16_t netsts;
1703
1704 sc = xsc;
1705
1706 netsts = tl_dio_read16(sc, TL_NETSTS);
1707 tl_dio_write16(sc, TL_NETSTS, netsts);
1708
1709 device_printf(sc->tl_dev, "network status: %x\n", netsts);
1710
1711 return(1);
1712}
1713
1714static void
1715tl_intr(xsc)
1716 void *xsc;
1717{
1718 struct tl_softc *sc;
1719 struct ifnet *ifp;
1720 int r = 0;
1721 u_int32_t type = 0;
1722 u_int16_t ints = 0;
1723 u_int8_t ivec = 0;
1724
1725 sc = xsc;
1726 TL_LOCK(sc);
1727
1728 /* Disable interrupts */
1729 ints = CSR_READ_2(sc, TL_HOST_INT);
1730 CSR_WRITE_2(sc, TL_HOST_INT, ints);
1731 type = (ints << 16) & 0xFFFF0000;
1732 ivec = (ints & TL_VEC_MASK) >> 5;
1733 ints = (ints & TL_INT_MASK) >> 2;
1734
1735 ifp = sc->tl_ifp;
1736
1737 switch(ints) {
1738 case (TL_INTR_INVALID):
1739#ifdef DIAGNOSTIC
1740 device_printf(sc->tl_dev, "got an invalid interrupt!\n");
1741#endif
1742 /* Re-enable interrupts but don't ack this one. */
1743 CMD_PUT(sc, type);
1744 r = 0;
1745 break;
1746 case (TL_INTR_TXEOF):
1747 r = tl_intvec_txeof((void *)sc, type);
1748 break;
1749 case (TL_INTR_TXEOC):
1750 r = tl_intvec_txeoc((void *)sc, type);
1751 break;
1752 case (TL_INTR_STATOFLOW):
1753 tl_stats_update(sc);
1754 r = 1;
1755 break;
1756 case (TL_INTR_RXEOF):
1757 r = tl_intvec_rxeof((void *)sc, type);
1758 break;
1759 case (TL_INTR_DUMMY):
1760 device_printf(sc->tl_dev, "got a dummy interrupt\n");
1761 r = 1;
1762 break;
1763 case (TL_INTR_ADCHK):
1764 if (ivec)
1765 r = tl_intvec_adchk((void *)sc, type);
1766 else
1767 r = tl_intvec_netsts((void *)sc, type);
1768 break;
1769 case (TL_INTR_RXEOC):
1770 r = tl_intvec_rxeoc((void *)sc, type);
1771 break;
1772 default:
1773 device_printf(sc->tl_dev, "bogus interrupt type\n");
1774 break;
1775 }
1776
1777 /* Re-enable interrupts */
1778 if (r) {
1779 CMD_PUT(sc, TL_CMD_ACK | r | type);
1780 }
1781
1782 if (ifp->if_snd.ifq_head != NULL)
1783 tl_start_locked(ifp);
1784
1785 TL_UNLOCK(sc);
1344
1345 cd = &sc->tl_cdata;
1346 ld = sc->tl_ldata;
1347
1348 for (i = 0; i < TL_RX_LIST_CNT; i++) {
1349 cd->tl_rx_chain[i].tl_ptr =
1350 (struct tl_list_onefrag *)&ld->tl_rx_list[i];
1351 if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1352 return(ENOBUFS);
1353 if (i == (TL_RX_LIST_CNT - 1)) {
1354 cd->tl_rx_chain[i].tl_next = NULL;
1355 ld->tl_rx_list[i].tlist_fptr = 0;
1356 } else {
1357 cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1358 ld->tl_rx_list[i].tlist_fptr =
1359 vtophys(&ld->tl_rx_list[i + 1]);
1360 }
1361 }
1362
1363 cd->tl_rx_head = &cd->tl_rx_chain[0];
1364 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1365
1366 return(0);
1367}
1368
1369static int
1370tl_newbuf(sc, c)
1371 struct tl_softc *sc;
1372 struct tl_chain_onefrag *c;
1373{
1374 struct mbuf *m_new = NULL;
1375
1376 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1377 if (m_new == NULL)
1378 return(ENOBUFS);
1379
1380 c->tl_mbuf = m_new;
1381 c->tl_next = NULL;
1382 c->tl_ptr->tlist_frsize = MCLBYTES;
1383 c->tl_ptr->tlist_fptr = 0;
1384 c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1385 c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1386 c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1387
1388 return(0);
1389}
1390/*
1391 * Interrupt handler for RX 'end of frame' condition (EOF). This
1392 * tells us that a full ethernet frame has been captured and we need
1393 * to handle it.
1394 *
1395 * Reception is done using 'lists' which consist of a header and a
1396 * series of 10 data count/data address pairs that point to buffers.
1397 * Initially you're supposed to create a list, populate it with pointers
1398 * to buffers, then load the physical address of the list into the
1399 * ch_parm register. The adapter is then supposed to DMA the received
1400 * frame into the buffers for you.
1401 *
1402 * To make things as fast as possible, we have the chip DMA directly
1403 * into mbufs. This saves us from having to do a buffer copy: we can
1404 * just hand the mbufs directly to ether_input(). Once the frame has
1405 * been sent on its way, the 'list' structure is assigned a new buffer
1406 * and moved to the end of the RX chain. As long we we stay ahead of
1407 * the chip, it will always think it has an endless receive channel.
1408 *
1409 * If we happen to fall behind and the chip manages to fill up all of
1410 * the buffers, it will generate an end of channel interrupt and wait
1411 * for us to empty the chain and restart the receiver.
1412 */
1413static int
1414tl_intvec_rxeof(xsc, type)
1415 void *xsc;
1416 u_int32_t type;
1417{
1418 struct tl_softc *sc;
1419 int r = 0, total_len = 0;
1420 struct ether_header *eh;
1421 struct mbuf *m;
1422 struct ifnet *ifp;
1423 struct tl_chain_onefrag *cur_rx;
1424
1425 sc = xsc;
1426 ifp = sc->tl_ifp;
1427
1428 TL_LOCK_ASSERT(sc);
1429
1430 while(sc->tl_cdata.tl_rx_head != NULL) {
1431 cur_rx = sc->tl_cdata.tl_rx_head;
1432 if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1433 break;
1434 r++;
1435 sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1436 m = cur_rx->tl_mbuf;
1437 total_len = cur_rx->tl_ptr->tlist_frsize;
1438
1439 if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1440 ifp->if_ierrors++;
1441 cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1442 cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1443 cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1444 continue;
1445 }
1446
1447 sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1448 vtophys(cur_rx->tl_ptr);
1449 sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1450 sc->tl_cdata.tl_rx_tail = cur_rx;
1451
1452 /*
1453 * Note: when the ThunderLAN chip is in 'capture all
1454 * frames' mode, it will receive its own transmissions.
1455 * We drop don't need to process our own transmissions,
1456 * so we drop them here and continue.
1457 */
1458 eh = mtod(m, struct ether_header *);
1459 /*if (ifp->if_flags & IFF_PROMISC && */
1460 if (!bcmp(eh->ether_shost, IF_LLADDR(sc->tl_ifp),
1461 ETHER_ADDR_LEN)) {
1462 m_freem(m);
1463 continue;
1464 }
1465
1466 m->m_pkthdr.rcvif = ifp;
1467 m->m_pkthdr.len = m->m_len = total_len;
1468
1469 TL_UNLOCK(sc);
1470 (*ifp->if_input)(ifp, m);
1471 TL_LOCK(sc);
1472 }
1473
1474 return(r);
1475}
1476
1477/*
1478 * The RX-EOC condition hits when the ch_parm address hasn't been
1479 * initialized or the adapter reached a list with a forward pointer
1480 * of 0 (which indicates the end of the chain). In our case, this means
1481 * the card has hit the end of the receive buffer chain and we need to
1482 * empty out the buffers and shift the pointer back to the beginning again.
1483 */
1484static int
1485tl_intvec_rxeoc(xsc, type)
1486 void *xsc;
1487 u_int32_t type;
1488{
1489 struct tl_softc *sc;
1490 int r;
1491 struct tl_chain_data *cd;
1492
1493
1494 sc = xsc;
1495 cd = &sc->tl_cdata;
1496
1497 /* Flush out the receive queue and ack RXEOF interrupts. */
1498 r = tl_intvec_rxeof(xsc, type);
1499 CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1500 r = 1;
1501 cd->tl_rx_head = &cd->tl_rx_chain[0];
1502 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1503 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1504 r |= (TL_CMD_GO|TL_CMD_RT);
1505 return(r);
1506}
1507
1508static int
1509tl_intvec_txeof(xsc, type)
1510 void *xsc;
1511 u_int32_t type;
1512{
1513 struct tl_softc *sc;
1514 int r = 0;
1515 struct tl_chain *cur_tx;
1516
1517 sc = xsc;
1518
1519 /*
1520 * Go through our tx list and free mbufs for those
1521 * frames that have been sent.
1522 */
1523 while (sc->tl_cdata.tl_tx_head != NULL) {
1524 cur_tx = sc->tl_cdata.tl_tx_head;
1525 if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1526 break;
1527 sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1528
1529 r++;
1530 m_freem(cur_tx->tl_mbuf);
1531 cur_tx->tl_mbuf = NULL;
1532
1533 cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1534 sc->tl_cdata.tl_tx_free = cur_tx;
1535 if (!cur_tx->tl_ptr->tlist_fptr)
1536 break;
1537 }
1538
1539 return(r);
1540}
1541
1542/*
1543 * The transmit end of channel interrupt. The adapter triggers this
1544 * interrupt to tell us it hit the end of the current transmit list.
1545 *
1546 * A note about this: it's possible for a condition to arise where
1547 * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1548 * You have to avoid this since the chip expects things to go in a
1549 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1550 * When the TXEOF handler is called, it will free all of the transmitted
1551 * frames and reset the tx_head pointer to NULL. However, a TXEOC
1552 * interrupt should be received and acknowledged before any more frames
1553 * are queued for transmission. If tl_statrt() is called after TXEOF
1554 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1555 * it could attempt to issue a transmit command prematurely.
1556 *
1557 * To guard against this, tl_start() will only issue transmit commands
1558 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1559 * can set this flag once tl_start() has cleared it.
1560 */
1561static int
1562tl_intvec_txeoc(xsc, type)
1563 void *xsc;
1564 u_int32_t type;
1565{
1566 struct tl_softc *sc;
1567 struct ifnet *ifp;
1568 u_int32_t cmd;
1569
1570 sc = xsc;
1571 ifp = sc->tl_ifp;
1572
1573 /* Clear the timeout timer. */
1574 sc->tl_timer = 0;
1575
1576 if (sc->tl_cdata.tl_tx_head == NULL) {
1577 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1578 sc->tl_cdata.tl_tx_tail = NULL;
1579 sc->tl_txeoc = 1;
1580 } else {
1581 sc->tl_txeoc = 0;
1582 /* First we have to ack the EOC interrupt. */
1583 CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1584 /* Then load the address of the next TX list. */
1585 CSR_WRITE_4(sc, TL_CH_PARM,
1586 vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1587 /* Restart TX channel. */
1588 cmd = CSR_READ_4(sc, TL_HOSTCMD);
1589 cmd &= ~TL_CMD_RT;
1590 cmd |= TL_CMD_GO|TL_CMD_INTSON;
1591 CMD_PUT(sc, cmd);
1592 return(0);
1593 }
1594
1595 return(1);
1596}
1597
1598static int
1599tl_intvec_adchk(xsc, type)
1600 void *xsc;
1601 u_int32_t type;
1602{
1603 struct tl_softc *sc;
1604
1605 sc = xsc;
1606
1607 if (type)
1608 device_printf(sc->tl_dev, "adapter check: %x\n",
1609 (unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1610
1611 tl_softreset(sc, 1);
1612 tl_stop(sc);
1613 tl_init_locked(sc);
1614 CMD_SET(sc, TL_CMD_INTSON);
1615
1616 return(0);
1617}
1618
1619static int
1620tl_intvec_netsts(xsc, type)
1621 void *xsc;
1622 u_int32_t type;
1623{
1624 struct tl_softc *sc;
1625 u_int16_t netsts;
1626
1627 sc = xsc;
1628
1629 netsts = tl_dio_read16(sc, TL_NETSTS);
1630 tl_dio_write16(sc, TL_NETSTS, netsts);
1631
1632 device_printf(sc->tl_dev, "network status: %x\n", netsts);
1633
1634 return(1);
1635}
1636
1637static void
1638tl_intr(xsc)
1639 void *xsc;
1640{
1641 struct tl_softc *sc;
1642 struct ifnet *ifp;
1643 int r = 0;
1644 u_int32_t type = 0;
1645 u_int16_t ints = 0;
1646 u_int8_t ivec = 0;
1647
1648 sc = xsc;
1649 TL_LOCK(sc);
1650
1651 /* Disable interrupts */
1652 ints = CSR_READ_2(sc, TL_HOST_INT);
1653 CSR_WRITE_2(sc, TL_HOST_INT, ints);
1654 type = (ints << 16) & 0xFFFF0000;
1655 ivec = (ints & TL_VEC_MASK) >> 5;
1656 ints = (ints & TL_INT_MASK) >> 2;
1657
1658 ifp = sc->tl_ifp;
1659
1660 switch(ints) {
1661 case (TL_INTR_INVALID):
1662#ifdef DIAGNOSTIC
1663 device_printf(sc->tl_dev, "got an invalid interrupt!\n");
1664#endif
1665 /* Re-enable interrupts but don't ack this one. */
1666 CMD_PUT(sc, type);
1667 r = 0;
1668 break;
1669 case (TL_INTR_TXEOF):
1670 r = tl_intvec_txeof((void *)sc, type);
1671 break;
1672 case (TL_INTR_TXEOC):
1673 r = tl_intvec_txeoc((void *)sc, type);
1674 break;
1675 case (TL_INTR_STATOFLOW):
1676 tl_stats_update(sc);
1677 r = 1;
1678 break;
1679 case (TL_INTR_RXEOF):
1680 r = tl_intvec_rxeof((void *)sc, type);
1681 break;
1682 case (TL_INTR_DUMMY):
1683 device_printf(sc->tl_dev, "got a dummy interrupt\n");
1684 r = 1;
1685 break;
1686 case (TL_INTR_ADCHK):
1687 if (ivec)
1688 r = tl_intvec_adchk((void *)sc, type);
1689 else
1690 r = tl_intvec_netsts((void *)sc, type);
1691 break;
1692 case (TL_INTR_RXEOC):
1693 r = tl_intvec_rxeoc((void *)sc, type);
1694 break;
1695 default:
1696 device_printf(sc->tl_dev, "bogus interrupt type\n");
1697 break;
1698 }
1699
1700 /* Re-enable interrupts */
1701 if (r) {
1702 CMD_PUT(sc, TL_CMD_ACK | r | type);
1703 }
1704
1705 if (ifp->if_snd.ifq_head != NULL)
1706 tl_start_locked(ifp);
1707
1708 TL_UNLOCK(sc);
1786
1787 return;
1788}
1789
1790static void
1791tl_stats_update(xsc)
1792 void *xsc;
1793{
1794 struct tl_softc *sc;
1795 struct ifnet *ifp;
1796 struct tl_stats tl_stats;
1797 struct mii_data *mii;
1798 u_int32_t *p;
1799
1800 bzero((char *)&tl_stats, sizeof(struct tl_stats));
1801
1802 sc = xsc;
1803 TL_LOCK_ASSERT(sc);
1804 ifp = sc->tl_ifp;
1805
1806 p = (u_int32_t *)&tl_stats;
1807
1808 CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1809 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1810 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1811 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1812 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1813 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1814
1815 ifp->if_opackets += tl_tx_goodframes(tl_stats);
1816 ifp->if_collisions += tl_stats.tl_tx_single_collision +
1817 tl_stats.tl_tx_multi_collision;
1818 ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1819 ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1820 tl_rx_overrun(tl_stats);
1821 ifp->if_oerrors += tl_tx_underrun(tl_stats);
1822
1823 if (tl_tx_underrun(tl_stats)) {
1824 u_int8_t tx_thresh;
1825 tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1826 if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1827 tx_thresh >>= 4;
1828 tx_thresh++;
1829 device_printf(sc->tl_dev, "tx underrun -- increasing "
1830 "tx threshold to %d bytes\n",
1831 (64 * (tx_thresh * 4)));
1832 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1833 tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1834 }
1835 }
1836
1837 if (sc->tl_timer > 0 && --sc->tl_timer == 0)
1838 tl_watchdog(sc);
1839
1840 callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
1841
1842 if (!sc->tl_bitrate) {
1843 mii = device_get_softc(sc->tl_miibus);
1844 mii_tick(mii);
1845 }
1709}
1710
1711static void
1712tl_stats_update(xsc)
1713 void *xsc;
1714{
1715 struct tl_softc *sc;
1716 struct ifnet *ifp;
1717 struct tl_stats tl_stats;
1718 struct mii_data *mii;
1719 u_int32_t *p;
1720
1721 bzero((char *)&tl_stats, sizeof(struct tl_stats));
1722
1723 sc = xsc;
1724 TL_LOCK_ASSERT(sc);
1725 ifp = sc->tl_ifp;
1726
1727 p = (u_int32_t *)&tl_stats;
1728
1729 CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1730 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1731 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1732 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1733 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1734 *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1735
1736 ifp->if_opackets += tl_tx_goodframes(tl_stats);
1737 ifp->if_collisions += tl_stats.tl_tx_single_collision +
1738 tl_stats.tl_tx_multi_collision;
1739 ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1740 ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1741 tl_rx_overrun(tl_stats);
1742 ifp->if_oerrors += tl_tx_underrun(tl_stats);
1743
1744 if (tl_tx_underrun(tl_stats)) {
1745 u_int8_t tx_thresh;
1746 tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1747 if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1748 tx_thresh >>= 4;
1749 tx_thresh++;
1750 device_printf(sc->tl_dev, "tx underrun -- increasing "
1751 "tx threshold to %d bytes\n",
1752 (64 * (tx_thresh * 4)));
1753 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1754 tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1755 }
1756 }
1757
1758 if (sc->tl_timer > 0 && --sc->tl_timer == 0)
1759 tl_watchdog(sc);
1760
1761 callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
1762
1763 if (!sc->tl_bitrate) {
1764 mii = device_get_softc(sc->tl_miibus);
1765 mii_tick(mii);
1766 }
1846
1847 return;
1848}
1849
1850/*
1851 * Encapsulate an mbuf chain in a list by coupling the mbuf data
1852 * pointers to the fragment pointers.
1853 */
1854static int
1855tl_encap(sc, c, m_head)
1856 struct tl_softc *sc;
1857 struct tl_chain *c;
1858 struct mbuf *m_head;
1859{
1860 int frag = 0;
1861 struct tl_frag *f = NULL;
1862 int total_len;
1863 struct mbuf *m;
1864 struct ifnet *ifp = sc->tl_ifp;
1865
1866 /*
1867 * Start packing the mbufs in this chain into
1868 * the fragment pointers. Stop when we run out
1869 * of fragments or hit the end of the mbuf chain.
1870 */
1871 m = m_head;
1872 total_len = 0;
1873
1874 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1875 if (m->m_len != 0) {
1876 if (frag == TL_MAXFRAGS)
1877 break;
1878 total_len+= m->m_len;
1879 c->tl_ptr->tl_frag[frag].tlist_dadr =
1880 vtophys(mtod(m, vm_offset_t));
1881 c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1882 frag++;
1883 }
1884 }
1885
1886 /*
1887 * Handle special cases.
1888 * Special case #1: we used up all 10 fragments, but
1889 * we have more mbufs left in the chain. Copy the
1890 * data into an mbuf cluster. Note that we don't
1891 * bother clearing the values in the other fragment
1892 * pointers/counters; it wouldn't gain us anything,
1893 * and would waste cycles.
1894 */
1895 if (m != NULL) {
1896 struct mbuf *m_new = NULL;
1897
1898 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1899 if (m_new == NULL) {
1900 if_printf(ifp, "no memory for tx list\n");
1901 return(1);
1902 }
1903 if (m_head->m_pkthdr.len > MHLEN) {
1904 MCLGET(m_new, M_DONTWAIT);
1905 if (!(m_new->m_flags & M_EXT)) {
1906 m_freem(m_new);
1907 if_printf(ifp, "no memory for tx list\n");
1908 return(1);
1909 }
1910 }
1911 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1912 mtod(m_new, caddr_t));
1913 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1914 m_freem(m_head);
1915 m_head = m_new;
1916 f = &c->tl_ptr->tl_frag[0];
1917 f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1918 f->tlist_dcnt = total_len = m_new->m_len;
1919 frag = 1;
1920 }
1921
1922 /*
1923 * Special case #2: the frame is smaller than the minimum
1924 * frame size. We have to pad it to make the chip happy.
1925 */
1926 if (total_len < TL_MIN_FRAMELEN) {
1927 if (frag == TL_MAXFRAGS)
1928 if_printf(ifp,
1929 "all frags filled but frame still to small!\n");
1930 f = &c->tl_ptr->tl_frag[frag];
1931 f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1932 f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1933 total_len += f->tlist_dcnt;
1934 frag++;
1935 }
1936
1937 c->tl_mbuf = m_head;
1938 c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1939 c->tl_ptr->tlist_frsize = total_len;
1940 c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1941 c->tl_ptr->tlist_fptr = 0;
1942
1943 return(0);
1944}
1945
1946/*
1947 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1948 * to the mbuf data regions directly in the transmit lists. We also save a
1949 * copy of the pointers since the transmit list fragment pointers are
1950 * physical addresses.
1951 */
1952static void
1953tl_start(ifp)
1954 struct ifnet *ifp;
1955{
1956 struct tl_softc *sc;
1957
1958 sc = ifp->if_softc;
1959 TL_LOCK(sc);
1960 tl_start_locked(ifp);
1961 TL_UNLOCK(sc);
1962}
1963
1964static void
1965tl_start_locked(ifp)
1966 struct ifnet *ifp;
1967{
1968 struct tl_softc *sc;
1969 struct mbuf *m_head = NULL;
1970 u_int32_t cmd;
1971 struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
1972
1973 sc = ifp->if_softc;
1974 TL_LOCK_ASSERT(sc);
1975
1976 /*
1977 * Check for an available queue slot. If there are none,
1978 * punt.
1979 */
1980 if (sc->tl_cdata.tl_tx_free == NULL) {
1981 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1982 return;
1983 }
1984
1985 start_tx = sc->tl_cdata.tl_tx_free;
1986
1987 while(sc->tl_cdata.tl_tx_free != NULL) {
1988 IF_DEQUEUE(&ifp->if_snd, m_head);
1989 if (m_head == NULL)
1990 break;
1991
1992 /* Pick a chain member off the free list. */
1993 cur_tx = sc->tl_cdata.tl_tx_free;
1994 sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1995
1996 cur_tx->tl_next = NULL;
1997
1998 /* Pack the data into the list. */
1999 tl_encap(sc, cur_tx, m_head);
2000
2001 /* Chain it together */
2002 if (prev != NULL) {
2003 prev->tl_next = cur_tx;
2004 prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
2005 }
2006 prev = cur_tx;
2007
2008 /*
2009 * If there's a BPF listener, bounce a copy of this frame
2010 * to him.
2011 */
2012 BPF_MTAP(ifp, cur_tx->tl_mbuf);
2013 }
2014
2015 /*
2016 * If there are no packets queued, bail.
2017 */
2018 if (cur_tx == NULL)
2019 return;
2020
2021 /*
2022 * That's all we can stands, we can't stands no more.
2023 * If there are no other transfers pending, then issue the
2024 * TX GO command to the adapter to start things moving.
2025 * Otherwise, just leave the data in the queue and let
2026 * the EOF/EOC interrupt handler send.
2027 */
2028 if (sc->tl_cdata.tl_tx_head == NULL) {
2029 sc->tl_cdata.tl_tx_head = start_tx;
2030 sc->tl_cdata.tl_tx_tail = cur_tx;
2031
2032 if (sc->tl_txeoc) {
2033 sc->tl_txeoc = 0;
2034 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
2035 cmd = CSR_READ_4(sc, TL_HOSTCMD);
2036 cmd &= ~TL_CMD_RT;
2037 cmd |= TL_CMD_GO|TL_CMD_INTSON;
2038 CMD_PUT(sc, cmd);
2039 }
2040 } else {
2041 sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
2042 sc->tl_cdata.tl_tx_tail = cur_tx;
2043 }
2044
2045 /*
2046 * Set a timeout in case the chip goes out to lunch.
2047 */
2048 sc->tl_timer = 5;
1767}
1768
1769/*
1770 * Encapsulate an mbuf chain in a list by coupling the mbuf data
1771 * pointers to the fragment pointers.
1772 */
1773static int
1774tl_encap(sc, c, m_head)
1775 struct tl_softc *sc;
1776 struct tl_chain *c;
1777 struct mbuf *m_head;
1778{
1779 int frag = 0;
1780 struct tl_frag *f = NULL;
1781 int total_len;
1782 struct mbuf *m;
1783 struct ifnet *ifp = sc->tl_ifp;
1784
1785 /*
1786 * Start packing the mbufs in this chain into
1787 * the fragment pointers. Stop when we run out
1788 * of fragments or hit the end of the mbuf chain.
1789 */
1790 m = m_head;
1791 total_len = 0;
1792
1793 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1794 if (m->m_len != 0) {
1795 if (frag == TL_MAXFRAGS)
1796 break;
1797 total_len+= m->m_len;
1798 c->tl_ptr->tl_frag[frag].tlist_dadr =
1799 vtophys(mtod(m, vm_offset_t));
1800 c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1801 frag++;
1802 }
1803 }
1804
1805 /*
1806 * Handle special cases.
1807 * Special case #1: we used up all 10 fragments, but
1808 * we have more mbufs left in the chain. Copy the
1809 * data into an mbuf cluster. Note that we don't
1810 * bother clearing the values in the other fragment
1811 * pointers/counters; it wouldn't gain us anything,
1812 * and would waste cycles.
1813 */
1814 if (m != NULL) {
1815 struct mbuf *m_new = NULL;
1816
1817 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1818 if (m_new == NULL) {
1819 if_printf(ifp, "no memory for tx list\n");
1820 return(1);
1821 }
1822 if (m_head->m_pkthdr.len > MHLEN) {
1823 MCLGET(m_new, M_DONTWAIT);
1824 if (!(m_new->m_flags & M_EXT)) {
1825 m_freem(m_new);
1826 if_printf(ifp, "no memory for tx list\n");
1827 return(1);
1828 }
1829 }
1830 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1831 mtod(m_new, caddr_t));
1832 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1833 m_freem(m_head);
1834 m_head = m_new;
1835 f = &c->tl_ptr->tl_frag[0];
1836 f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1837 f->tlist_dcnt = total_len = m_new->m_len;
1838 frag = 1;
1839 }
1840
1841 /*
1842 * Special case #2: the frame is smaller than the minimum
1843 * frame size. We have to pad it to make the chip happy.
1844 */
1845 if (total_len < TL_MIN_FRAMELEN) {
1846 if (frag == TL_MAXFRAGS)
1847 if_printf(ifp,
1848 "all frags filled but frame still to small!\n");
1849 f = &c->tl_ptr->tl_frag[frag];
1850 f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1851 f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1852 total_len += f->tlist_dcnt;
1853 frag++;
1854 }
1855
1856 c->tl_mbuf = m_head;
1857 c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1858 c->tl_ptr->tlist_frsize = total_len;
1859 c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1860 c->tl_ptr->tlist_fptr = 0;
1861
1862 return(0);
1863}
1864
1865/*
1866 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1867 * to the mbuf data regions directly in the transmit lists. We also save a
1868 * copy of the pointers since the transmit list fragment pointers are
1869 * physical addresses.
1870 */
1871static void
1872tl_start(ifp)
1873 struct ifnet *ifp;
1874{
1875 struct tl_softc *sc;
1876
1877 sc = ifp->if_softc;
1878 TL_LOCK(sc);
1879 tl_start_locked(ifp);
1880 TL_UNLOCK(sc);
1881}
1882
1883static void
1884tl_start_locked(ifp)
1885 struct ifnet *ifp;
1886{
1887 struct tl_softc *sc;
1888 struct mbuf *m_head = NULL;
1889 u_int32_t cmd;
1890 struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
1891
1892 sc = ifp->if_softc;
1893 TL_LOCK_ASSERT(sc);
1894
1895 /*
1896 * Check for an available queue slot. If there are none,
1897 * punt.
1898 */
1899 if (sc->tl_cdata.tl_tx_free == NULL) {
1900 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1901 return;
1902 }
1903
1904 start_tx = sc->tl_cdata.tl_tx_free;
1905
1906 while(sc->tl_cdata.tl_tx_free != NULL) {
1907 IF_DEQUEUE(&ifp->if_snd, m_head);
1908 if (m_head == NULL)
1909 break;
1910
1911 /* Pick a chain member off the free list. */
1912 cur_tx = sc->tl_cdata.tl_tx_free;
1913 sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1914
1915 cur_tx->tl_next = NULL;
1916
1917 /* Pack the data into the list. */
1918 tl_encap(sc, cur_tx, m_head);
1919
1920 /* Chain it together */
1921 if (prev != NULL) {
1922 prev->tl_next = cur_tx;
1923 prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
1924 }
1925 prev = cur_tx;
1926
1927 /*
1928 * If there's a BPF listener, bounce a copy of this frame
1929 * to him.
1930 */
1931 BPF_MTAP(ifp, cur_tx->tl_mbuf);
1932 }
1933
1934 /*
1935 * If there are no packets queued, bail.
1936 */
1937 if (cur_tx == NULL)
1938 return;
1939
1940 /*
1941 * That's all we can stands, we can't stands no more.
1942 * If there are no other transfers pending, then issue the
1943 * TX GO command to the adapter to start things moving.
1944 * Otherwise, just leave the data in the queue and let
1945 * the EOF/EOC interrupt handler send.
1946 */
1947 if (sc->tl_cdata.tl_tx_head == NULL) {
1948 sc->tl_cdata.tl_tx_head = start_tx;
1949 sc->tl_cdata.tl_tx_tail = cur_tx;
1950
1951 if (sc->tl_txeoc) {
1952 sc->tl_txeoc = 0;
1953 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
1954 cmd = CSR_READ_4(sc, TL_HOSTCMD);
1955 cmd &= ~TL_CMD_RT;
1956 cmd |= TL_CMD_GO|TL_CMD_INTSON;
1957 CMD_PUT(sc, cmd);
1958 }
1959 } else {
1960 sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
1961 sc->tl_cdata.tl_tx_tail = cur_tx;
1962 }
1963
1964 /*
1965 * Set a timeout in case the chip goes out to lunch.
1966 */
1967 sc->tl_timer = 5;
2049
2050 return;
2051}
2052
2053static void
2054tl_init(xsc)
2055 void *xsc;
2056{
2057 struct tl_softc *sc = xsc;
2058
2059 TL_LOCK(sc);
2060 tl_init_locked(sc);
2061 TL_UNLOCK(sc);
2062}
2063
2064static void
2065tl_init_locked(sc)
2066 struct tl_softc *sc;
2067{
2068 struct ifnet *ifp = sc->tl_ifp;
2069 struct mii_data *mii;
2070
2071 TL_LOCK_ASSERT(sc);
2072
2073 ifp = sc->tl_ifp;
2074
2075 /*
2076 * Cancel pending I/O.
2077 */
2078 tl_stop(sc);
2079
2080 /* Initialize TX FIFO threshold */
2081 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
2082 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
2083
2084 /* Set PCI burst size */
2085 tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
2086
2087 /*
2088 * Set 'capture all frames' bit for promiscuous mode.
2089 */
2090 if (ifp->if_flags & IFF_PROMISC)
2091 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2092 else
2093 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2094
2095 /*
2096 * Set capture broadcast bit to capture broadcast frames.
2097 */
2098 if (ifp->if_flags & IFF_BROADCAST)
2099 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2100 else
2101 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2102
2103 tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
2104
2105 /* Init our MAC address */
2106 tl_setfilt(sc, IF_LLADDR(sc->tl_ifp), 0);
2107
2108 /* Init multicast filter, if needed. */
2109 tl_setmulti(sc);
2110
2111 /* Init circular RX list. */
2112 if (tl_list_rx_init(sc) == ENOBUFS) {
2113 device_printf(sc->tl_dev,
2114 "initialization failed: no memory for rx buffers\n");
2115 tl_stop(sc);
2116 return;
2117 }
2118
2119 /* Init TX pointers. */
2120 tl_list_tx_init(sc);
2121
2122 /* Enable PCI interrupts. */
2123 CMD_SET(sc, TL_CMD_INTSON);
2124
2125 /* Load the address of the rx list */
2126 CMD_SET(sc, TL_CMD_RT);
2127 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
2128
2129 if (!sc->tl_bitrate) {
2130 if (sc->tl_miibus != NULL) {
2131 mii = device_get_softc(sc->tl_miibus);
2132 mii_mediachg(mii);
2133 }
2134 } else {
2135 tl_ifmedia_upd(ifp);
2136 }
2137
2138 /* Send the RX go command */
2139 CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
2140
2141 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2142 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2143
2144 /* Start the stats update counter */
2145 callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
1968}
1969
1970static void
1971tl_init(xsc)
1972 void *xsc;
1973{
1974 struct tl_softc *sc = xsc;
1975
1976 TL_LOCK(sc);
1977 tl_init_locked(sc);
1978 TL_UNLOCK(sc);
1979}
1980
1981static void
1982tl_init_locked(sc)
1983 struct tl_softc *sc;
1984{
1985 struct ifnet *ifp = sc->tl_ifp;
1986 struct mii_data *mii;
1987
1988 TL_LOCK_ASSERT(sc);
1989
1990 ifp = sc->tl_ifp;
1991
1992 /*
1993 * Cancel pending I/O.
1994 */
1995 tl_stop(sc);
1996
1997 /* Initialize TX FIFO threshold */
1998 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1999 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
2000
2001 /* Set PCI burst size */
2002 tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
2003
2004 /*
2005 * Set 'capture all frames' bit for promiscuous mode.
2006 */
2007 if (ifp->if_flags & IFF_PROMISC)
2008 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2009 else
2010 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2011
2012 /*
2013 * Set capture broadcast bit to capture broadcast frames.
2014 */
2015 if (ifp->if_flags & IFF_BROADCAST)
2016 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2017 else
2018 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2019
2020 tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
2021
2022 /* Init our MAC address */
2023 tl_setfilt(sc, IF_LLADDR(sc->tl_ifp), 0);
2024
2025 /* Init multicast filter, if needed. */
2026 tl_setmulti(sc);
2027
2028 /* Init circular RX list. */
2029 if (tl_list_rx_init(sc) == ENOBUFS) {
2030 device_printf(sc->tl_dev,
2031 "initialization failed: no memory for rx buffers\n");
2032 tl_stop(sc);
2033 return;
2034 }
2035
2036 /* Init TX pointers. */
2037 tl_list_tx_init(sc);
2038
2039 /* Enable PCI interrupts. */
2040 CMD_SET(sc, TL_CMD_INTSON);
2041
2042 /* Load the address of the rx list */
2043 CMD_SET(sc, TL_CMD_RT);
2044 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
2045
2046 if (!sc->tl_bitrate) {
2047 if (sc->tl_miibus != NULL) {
2048 mii = device_get_softc(sc->tl_miibus);
2049 mii_mediachg(mii);
2050 }
2051 } else {
2052 tl_ifmedia_upd(ifp);
2053 }
2054
2055 /* Send the RX go command */
2056 CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
2057
2058 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2059 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2060
2061 /* Start the stats update counter */
2062 callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc);
2146
2147 return;
2148}
2149
2150/*
2151 * Set media options.
2152 */
2153static int
2154tl_ifmedia_upd(ifp)
2155 struct ifnet *ifp;
2156{
2157 struct tl_softc *sc;
2158 struct mii_data *mii = NULL;
2159
2160 sc = ifp->if_softc;
2161
2162 TL_LOCK(sc);
2163 if (sc->tl_bitrate)
2164 tl_setmode(sc, sc->ifmedia.ifm_media);
2165 else {
2166 mii = device_get_softc(sc->tl_miibus);
2167 mii_mediachg(mii);
2168 }
2169 TL_UNLOCK(sc);
2170
2171 return(0);
2172}
2173
2174/*
2175 * Report current media status.
2176 */
2177static void
2178tl_ifmedia_sts(ifp, ifmr)
2179 struct ifnet *ifp;
2180 struct ifmediareq *ifmr;
2181{
2182 struct tl_softc *sc;
2183 struct mii_data *mii;
2184
2185 sc = ifp->if_softc;
2186
2187 TL_LOCK(sc);
2188 ifmr->ifm_active = IFM_ETHER;
2189
2190 if (sc->tl_bitrate) {
2191 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
2192 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2193 else
2194 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2195 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
2196 ifmr->ifm_active |= IFM_HDX;
2197 else
2198 ifmr->ifm_active |= IFM_FDX;
2199 return;
2200 } else {
2201 mii = device_get_softc(sc->tl_miibus);
2202 mii_pollstat(mii);
2203 ifmr->ifm_active = mii->mii_media_active;
2204 ifmr->ifm_status = mii->mii_media_status;
2205 }
2206 TL_UNLOCK(sc);
2063}
2064
2065/*
2066 * Set media options.
2067 */
2068static int
2069tl_ifmedia_upd(ifp)
2070 struct ifnet *ifp;
2071{
2072 struct tl_softc *sc;
2073 struct mii_data *mii = NULL;
2074
2075 sc = ifp->if_softc;
2076
2077 TL_LOCK(sc);
2078 if (sc->tl_bitrate)
2079 tl_setmode(sc, sc->ifmedia.ifm_media);
2080 else {
2081 mii = device_get_softc(sc->tl_miibus);
2082 mii_mediachg(mii);
2083 }
2084 TL_UNLOCK(sc);
2085
2086 return(0);
2087}
2088
2089/*
2090 * Report current media status.
2091 */
2092static void
2093tl_ifmedia_sts(ifp, ifmr)
2094 struct ifnet *ifp;
2095 struct ifmediareq *ifmr;
2096{
2097 struct tl_softc *sc;
2098 struct mii_data *mii;
2099
2100 sc = ifp->if_softc;
2101
2102 TL_LOCK(sc);
2103 ifmr->ifm_active = IFM_ETHER;
2104
2105 if (sc->tl_bitrate) {
2106 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
2107 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2108 else
2109 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2110 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
2111 ifmr->ifm_active |= IFM_HDX;
2112 else
2113 ifmr->ifm_active |= IFM_FDX;
2114 return;
2115 } else {
2116 mii = device_get_softc(sc->tl_miibus);
2117 mii_pollstat(mii);
2118 ifmr->ifm_active = mii->mii_media_active;
2119 ifmr->ifm_status = mii->mii_media_status;
2120 }
2121 TL_UNLOCK(sc);
2207
2208 return;
2209}
2210
2211static int
2212tl_ioctl(ifp, command, data)
2213 struct ifnet *ifp;
2214 u_long command;
2215 caddr_t data;
2216{
2217 struct tl_softc *sc = ifp->if_softc;
2218 struct ifreq *ifr = (struct ifreq *) data;
2219 int error = 0;
2220
2221 switch(command) {
2222 case SIOCSIFFLAGS:
2223 TL_LOCK(sc);
2224 if (ifp->if_flags & IFF_UP) {
2225 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2226 ifp->if_flags & IFF_PROMISC &&
2227 !(sc->tl_if_flags & IFF_PROMISC)) {
2228 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2229 tl_setmulti(sc);
2230 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2231 !(ifp->if_flags & IFF_PROMISC) &&
2232 sc->tl_if_flags & IFF_PROMISC) {
2233 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2234 tl_setmulti(sc);
2235 } else
2236 tl_init_locked(sc);
2237 } else {
2238 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2239 tl_stop(sc);
2240 }
2241 }
2242 sc->tl_if_flags = ifp->if_flags;
2243 TL_UNLOCK(sc);
2244 error = 0;
2245 break;
2246 case SIOCADDMULTI:
2247 case SIOCDELMULTI:
2248 TL_LOCK(sc);
2249 tl_setmulti(sc);
2250 TL_UNLOCK(sc);
2251 error = 0;
2252 break;
2253 case SIOCSIFMEDIA:
2254 case SIOCGIFMEDIA:
2255 if (sc->tl_bitrate)
2256 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2257 else {
2258 struct mii_data *mii;
2259 mii = device_get_softc(sc->tl_miibus);
2260 error = ifmedia_ioctl(ifp, ifr,
2261 &mii->mii_media, command);
2262 }
2263 break;
2264 default:
2265 error = ether_ioctl(ifp, command, data);
2266 break;
2267 }
2268
2269 return(error);
2270}
2271
2272static void
2273tl_watchdog(sc)
2274 struct tl_softc *sc;
2275{
2276 struct ifnet *ifp;
2277
2278 TL_LOCK_ASSERT(sc);
2279 ifp = sc->tl_ifp;
2280
2281 if_printf(ifp, "device timeout\n");
2282
2283 ifp->if_oerrors++;
2284
2285 tl_softreset(sc, 1);
2286 tl_init_locked(sc);
2122}
2123
2124static int
2125tl_ioctl(ifp, command, data)
2126 struct ifnet *ifp;
2127 u_long command;
2128 caddr_t data;
2129{
2130 struct tl_softc *sc = ifp->if_softc;
2131 struct ifreq *ifr = (struct ifreq *) data;
2132 int error = 0;
2133
2134 switch(command) {
2135 case SIOCSIFFLAGS:
2136 TL_LOCK(sc);
2137 if (ifp->if_flags & IFF_UP) {
2138 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2139 ifp->if_flags & IFF_PROMISC &&
2140 !(sc->tl_if_flags & IFF_PROMISC)) {
2141 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2142 tl_setmulti(sc);
2143 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2144 !(ifp->if_flags & IFF_PROMISC) &&
2145 sc->tl_if_flags & IFF_PROMISC) {
2146 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2147 tl_setmulti(sc);
2148 } else
2149 tl_init_locked(sc);
2150 } else {
2151 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2152 tl_stop(sc);
2153 }
2154 }
2155 sc->tl_if_flags = ifp->if_flags;
2156 TL_UNLOCK(sc);
2157 error = 0;
2158 break;
2159 case SIOCADDMULTI:
2160 case SIOCDELMULTI:
2161 TL_LOCK(sc);
2162 tl_setmulti(sc);
2163 TL_UNLOCK(sc);
2164 error = 0;
2165 break;
2166 case SIOCSIFMEDIA:
2167 case SIOCGIFMEDIA:
2168 if (sc->tl_bitrate)
2169 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2170 else {
2171 struct mii_data *mii;
2172 mii = device_get_softc(sc->tl_miibus);
2173 error = ifmedia_ioctl(ifp, ifr,
2174 &mii->mii_media, command);
2175 }
2176 break;
2177 default:
2178 error = ether_ioctl(ifp, command, data);
2179 break;
2180 }
2181
2182 return(error);
2183}
2184
2185static void
2186tl_watchdog(sc)
2187 struct tl_softc *sc;
2188{
2189 struct ifnet *ifp;
2190
2191 TL_LOCK_ASSERT(sc);
2192 ifp = sc->tl_ifp;
2193
2194 if_printf(ifp, "device timeout\n");
2195
2196 ifp->if_oerrors++;
2197
2198 tl_softreset(sc, 1);
2199 tl_init_locked(sc);
2287
2288 return;
2289}
2290
2291/*
2292 * Stop the adapter and free any mbufs allocated to the
2293 * RX and TX lists.
2294 */
2295static void
2296tl_stop(sc)
2297 struct tl_softc *sc;
2298{
2299 register int i;
2300 struct ifnet *ifp;
2301
2302 TL_LOCK_ASSERT(sc);
2303
2304 ifp = sc->tl_ifp;
2305
2306 /* Stop the stats updater. */
2307 callout_stop(&sc->tl_stat_callout);
2308
2309 /* Stop the transmitter */
2310 CMD_CLR(sc, TL_CMD_RT);
2311 CMD_SET(sc, TL_CMD_STOP);
2312 CSR_WRITE_4(sc, TL_CH_PARM, 0);
2313
2314 /* Stop the receiver */
2315 CMD_SET(sc, TL_CMD_RT);
2316 CMD_SET(sc, TL_CMD_STOP);
2317 CSR_WRITE_4(sc, TL_CH_PARM, 0);
2318
2319 /*
2320 * Disable host interrupts.
2321 */
2322 CMD_SET(sc, TL_CMD_INTSOFF);
2323
2324 /*
2325 * Clear list pointer.
2326 */
2327 CSR_WRITE_4(sc, TL_CH_PARM, 0);
2328
2329 /*
2330 * Free the RX lists.
2331 */
2332 for (i = 0; i < TL_RX_LIST_CNT; i++) {
2333 if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2334 m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2335 sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2336 }
2337 }
2338 bzero((char *)&sc->tl_ldata->tl_rx_list,
2339 sizeof(sc->tl_ldata->tl_rx_list));
2340
2341 /*
2342 * Free the TX list buffers.
2343 */
2344 for (i = 0; i < TL_TX_LIST_CNT; i++) {
2345 if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2346 m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2347 sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2348 }
2349 }
2350 bzero((char *)&sc->tl_ldata->tl_tx_list,
2351 sizeof(sc->tl_ldata->tl_tx_list));
2352
2353 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2200}
2201
2202/*
2203 * Stop the adapter and free any mbufs allocated to the
2204 * RX and TX lists.
2205 */
2206static void
2207tl_stop(sc)
2208 struct tl_softc *sc;
2209{
2210 register int i;
2211 struct ifnet *ifp;
2212
2213 TL_LOCK_ASSERT(sc);
2214
2215 ifp = sc->tl_ifp;
2216
2217 /* Stop the stats updater. */
2218 callout_stop(&sc->tl_stat_callout);
2219
2220 /* Stop the transmitter */
2221 CMD_CLR(sc, TL_CMD_RT);
2222 CMD_SET(sc, TL_CMD_STOP);
2223 CSR_WRITE_4(sc, TL_CH_PARM, 0);
2224
2225 /* Stop the receiver */
2226 CMD_SET(sc, TL_CMD_RT);
2227 CMD_SET(sc, TL_CMD_STOP);
2228 CSR_WRITE_4(sc, TL_CH_PARM, 0);
2229
2230 /*
2231 * Disable host interrupts.
2232 */
2233 CMD_SET(sc, TL_CMD_INTSOFF);
2234
2235 /*
2236 * Clear list pointer.
2237 */
2238 CSR_WRITE_4(sc, TL_CH_PARM, 0);
2239
2240 /*
2241 * Free the RX lists.
2242 */
2243 for (i = 0; i < TL_RX_LIST_CNT; i++) {
2244 if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2245 m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2246 sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2247 }
2248 }
2249 bzero((char *)&sc->tl_ldata->tl_rx_list,
2250 sizeof(sc->tl_ldata->tl_rx_list));
2251
2252 /*
2253 * Free the TX list buffers.
2254 */
2255 for (i = 0; i < TL_TX_LIST_CNT; i++) {
2256 if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2257 m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2258 sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2259 }
2260 }
2261 bzero((char *)&sc->tl_ldata->tl_tx_list,
2262 sizeof(sc->tl_ldata->tl_tx_list));
2263
2264 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2354
2355 return;
2356}
2357
2358/*
2359 * Stop all chip I/O so that the kernel's probe routines don't
2360 * get confused by errant DMAs when rebooting.
2361 */
2362static int
2363tl_shutdown(dev)
2364 device_t dev;
2365{
2366 struct tl_softc *sc;
2367
2368 sc = device_get_softc(dev);
2369
2370 TL_LOCK(sc);
2371 tl_stop(sc);
2372 TL_UNLOCK(sc);
2373
2374 return (0);
2375}
2265}
2266
2267/*
2268 * Stop all chip I/O so that the kernel's probe routines don't
2269 * get confused by errant DMAs when rebooting.
2270 */
2271static int
2272tl_shutdown(dev)
2273 device_t dev;
2274{
2275 struct tl_softc *sc;
2276
2277 sc = device_get_softc(dev);
2278
2279 TL_LOCK(sc);
2280 tl_stop(sc);
2281 TL_UNLOCK(sc);
2282
2283 return (0);
2284}