Deleted Added
full compact
if_xl.c (122689) if_xl.c (123019)
1/*
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
1/*
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/pci/if_xl.c 122689 2003-11-14 19:00:32Z sam $");
34__FBSDID("$FreeBSD: head/sys/pci/if_xl.c 123019 2003-11-28 05:28:29Z imp $");
35
36/*
37 * 3Com 3c90x Etherlink XL PCI NIC driver
38 *
39 * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
40 * bus-master chips (3c90x cards and embedded controllers) including
41 * the following:
42 *
43 * 3Com 3c900-TPO 10Mbps/RJ-45
44 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
45 * 3Com 3c905-TX 10/100Mbps/RJ-45
46 * 3Com 3c905-T4 10/100Mbps/RJ-45
47 * 3Com 3c900B-TPO 10Mbps/RJ-45
48 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
49 * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC
50 * 3Com 3c900B-FL 10Mbps/Fiber-optic
51 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC
52 * 3Com 3c905B-TX 10/100Mbps/RJ-45
53 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
54 * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC)
55 * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC)
56 * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC)
57 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC)
58 * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC)
59 * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
60 * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
61 * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62 * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
63 * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64 * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65 * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66 * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67 * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
68 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
69 * Dell on-board 3c920 10/100Mbps/RJ-45
70 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
71 * Dell Latitude laptop docking station embedded 3c905-TX
72 *
73 * Written by Bill Paul <wpaul@ctr.columbia.edu>
74 * Electrical Engineering Department
75 * Columbia University, New York City
76 */
77/*
78 * The 3c90x series chips use a bus-master DMA interface for transfering
79 * packets to and from the controller chip. Some of the "vortex" cards
80 * (3c59x) also supported a bus master mode, however for those chips
81 * you could only DMA packets to/from a contiguous memory buffer. For
82 * transmission this would mean copying the contents of the queued mbuf
83 * chain into an mbuf cluster and then DMAing the cluster. This extra
84 * copy would sort of defeat the purpose of the bus master support for
85 * any packet that doesn't fit into a single mbuf.
86 *
87 * By contrast, the 3c90x cards support a fragment-based bus master
88 * mode where mbuf chains can be encapsulated using TX descriptors.
89 * This is similar to other PCI chips such as the Texas Instruments
90 * ThunderLAN and the Intel 82557/82558.
91 *
92 * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
93 * bus master chips because they maintain the old PIO interface for
94 * backwards compatibility, but starting with the 3c905B and the
95 * "cyclone" chips, the compatibility interface has been dropped.
96 * Since using bus master DMA is a big win, we use this driver to
97 * support the PCI "boomerang" chips even though they work with the
98 * "vortex" driver in order to obtain better performance.
99 *
100 * This driver is in the /sys/pci directory because it only supports
101 * PCI-based NICs.
102 */
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/sockio.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/kernel.h>
110#include <sys/socket.h>
111
112#include <net/if.h>
113#include <net/if_arp.h>
114#include <net/ethernet.h>
115#include <net/if_dl.h>
116#include <net/if_media.h>
117
118#include <net/bpf.h>
119
120#include <machine/bus_memio.h>
121#include <machine/bus_pio.h>
122#include <machine/bus.h>
123#include <machine/resource.h>
124#include <sys/bus.h>
125#include <sys/rman.h>
126
127#include <dev/mii/mii.h>
128#include <dev/mii/miivar.h>
129
130#include <dev/pci/pcireg.h>
131#include <dev/pci/pcivar.h>
132
133MODULE_DEPEND(xl, pci, 1, 1, 1);
134MODULE_DEPEND(xl, ether, 1, 1, 1);
135MODULE_DEPEND(xl, miibus, 1, 1, 1);
136
137/* "device miibus" required. See GENERIC if you get errors here. */
138#include "miibus_if.h"
139
140#include <pci/if_xlreg.h>
141
142#define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
143
144/*
145 * Various supported device vendors/types and their names.
146 */
147static struct xl_type xl_devs[] = {
148 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
149 "3Com 3c900-TPO Etherlink XL" },
150 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
151 "3Com 3c900-COMBO Etherlink XL" },
152 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
153 "3Com 3c905-TX Fast Etherlink XL" },
154 { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
155 "3Com 3c905-T4 Fast Etherlink XL" },
156 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
157 "3Com 3c900B-TPO Etherlink XL" },
158 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
159 "3Com 3c900B-COMBO Etherlink XL" },
160 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
161 "3Com 3c900B-TPC Etherlink XL" },
162 { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
163 "3Com 3c900B-FL Etherlink XL" },
164 { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
165 "3Com 3c905B-TX Fast Etherlink XL" },
166 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
167 "3Com 3c905B-T4 Fast Etherlink XL" },
168 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
169 "3Com 3c905B-FX/SC Fast Etherlink XL" },
170 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
171 "3Com 3c905B-COMBO Fast Etherlink XL" },
172 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
173 "3Com 3c905C-TX Fast Etherlink XL" },
174 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
175 "3Com 3c920B-EMB Integrated Fast Etherlink XL" },
176 { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
177 "3Com 3c980 Fast Etherlink XL" },
178 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
179 "3Com 3c980C Fast Etherlink XL" },
180 { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
181 "3Com 3cSOHO100-TX OfficeConnect" },
182 { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
183 "3Com 3c450-TX HomeConnect" },
184 { TC_VENDORID, TC_DEVICEID_HURRICANE_555,
185 "3Com 3c555 Fast Etherlink XL" },
186 { TC_VENDORID, TC_DEVICEID_HURRICANE_556,
187 "3Com 3c556 Fast Etherlink XL" },
188 { TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
189 "3Com 3c556B Fast Etherlink XL" },
190 { TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
191 "3Com 3c575TX Fast Etherlink XL" },
192 { TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
193 "3Com 3c575B Fast Etherlink XL" },
194 { TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
195 "3Com 3c575C Fast Etherlink XL" },
196 { TC_VENDORID, TC_DEVICEID_HURRICANE_656,
197 "3Com 3c656 Fast Etherlink XL" },
198 { TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
199 "3Com 3c656B Fast Etherlink XL" },
200 { TC_VENDORID, TC_DEVICEID_TORNADO_656C,
201 "3Com 3c656C Fast Etherlink XL" },
202 { 0, 0, NULL }
203};
204
205static int xl_probe (device_t);
206static int xl_attach (device_t);
207static int xl_detach (device_t);
208
209static int xl_newbuf (struct xl_softc *, struct xl_chain_onefrag *);
210static void xl_stats_update (void *);
211static int xl_encap (struct xl_softc *, struct xl_chain *,
212 struct mbuf *);
213static void xl_rxeof (struct xl_softc *);
214static int xl_rx_resync (struct xl_softc *);
215static void xl_txeof (struct xl_softc *);
216static void xl_txeof_90xB (struct xl_softc *);
217static void xl_txeoc (struct xl_softc *);
218static void xl_intr (void *);
219static void xl_start (struct ifnet *);
220static void xl_start_90xB (struct ifnet *);
221static int xl_ioctl (struct ifnet *, u_long, caddr_t);
222static void xl_init (void *);
223static void xl_stop (struct xl_softc *);
224static void xl_watchdog (struct ifnet *);
225static void xl_shutdown (device_t);
226static int xl_suspend (device_t);
227static int xl_resume (device_t);
228
229static int xl_ifmedia_upd (struct ifnet *);
230static void xl_ifmedia_sts (struct ifnet *, struct ifmediareq *);
231
232static int xl_eeprom_wait (struct xl_softc *);
233static int xl_read_eeprom (struct xl_softc *, caddr_t, int, int, int);
234static void xl_mii_sync (struct xl_softc *);
235static void xl_mii_send (struct xl_softc *, u_int32_t, int);
236static int xl_mii_readreg (struct xl_softc *, struct xl_mii_frame *);
237static int xl_mii_writereg (struct xl_softc *, struct xl_mii_frame *);
238
239static void xl_setcfg (struct xl_softc *);
240static void xl_setmode (struct xl_softc *, int);
241static u_int32_t xl_mchash (caddr_t);
242static void xl_setmulti (struct xl_softc *);
243static void xl_setmulti_hash (struct xl_softc *);
244static void xl_reset (struct xl_softc *);
245static int xl_list_rx_init (struct xl_softc *);
246static int xl_list_tx_init (struct xl_softc *);
247static int xl_list_tx_init_90xB (struct xl_softc *);
248static void xl_wait (struct xl_softc *);
249static void xl_mediacheck (struct xl_softc *);
250static void xl_choose_xcvr (struct xl_softc *, int);
251static void xl_dma_map_addr (void *, bus_dma_segment_t *, int, int);
252static void xl_dma_map_rxbuf (void *, bus_dma_segment_t *, int, bus_size_t,
253 int);
254static void xl_dma_map_txbuf (void *, bus_dma_segment_t *, int, bus_size_t,
255 int);
256#ifdef notdef
257static void xl_testpacket (struct xl_softc *);
258#endif
259
260static int xl_miibus_readreg (device_t, int, int);
261static int xl_miibus_writereg (device_t, int, int, int);
262static void xl_miibus_statchg (device_t);
263static void xl_miibus_mediainit (device_t);
264
265static device_method_t xl_methods[] = {
266 /* Device interface */
267 DEVMETHOD(device_probe, xl_probe),
268 DEVMETHOD(device_attach, xl_attach),
269 DEVMETHOD(device_detach, xl_detach),
270 DEVMETHOD(device_shutdown, xl_shutdown),
271 DEVMETHOD(device_suspend, xl_suspend),
272 DEVMETHOD(device_resume, xl_resume),
273
274 /* bus interface */
275 DEVMETHOD(bus_print_child, bus_generic_print_child),
276 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
277
278 /* MII interface */
279 DEVMETHOD(miibus_readreg, xl_miibus_readreg),
280 DEVMETHOD(miibus_writereg, xl_miibus_writereg),
281 DEVMETHOD(miibus_statchg, xl_miibus_statchg),
282 DEVMETHOD(miibus_mediainit, xl_miibus_mediainit),
283
284 { 0, 0 }
285};
286
287static driver_t xl_driver = {
288 "xl",
289 xl_methods,
290 sizeof(struct xl_softc)
291};
292
293static devclass_t xl_devclass;
294
35
36/*
37 * 3Com 3c90x Etherlink XL PCI NIC driver
38 *
39 * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
40 * bus-master chips (3c90x cards and embedded controllers) including
41 * the following:
42 *
43 * 3Com 3c900-TPO 10Mbps/RJ-45
44 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
45 * 3Com 3c905-TX 10/100Mbps/RJ-45
46 * 3Com 3c905-T4 10/100Mbps/RJ-45
47 * 3Com 3c900B-TPO 10Mbps/RJ-45
48 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
49 * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC
50 * 3Com 3c900B-FL 10Mbps/Fiber-optic
51 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC
52 * 3Com 3c905B-TX 10/100Mbps/RJ-45
53 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
54 * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC)
55 * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC)
56 * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC)
57 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC)
58 * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC)
59 * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
60 * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
61 * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62 * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
63 * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64 * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65 * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66 * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67 * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
68 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
69 * Dell on-board 3c920 10/100Mbps/RJ-45
70 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
71 * Dell Latitude laptop docking station embedded 3c905-TX
72 *
73 * Written by Bill Paul <wpaul@ctr.columbia.edu>
74 * Electrical Engineering Department
75 * Columbia University, New York City
76 */
77/*
78 * The 3c90x series chips use a bus-master DMA interface for transfering
79 * packets to and from the controller chip. Some of the "vortex" cards
80 * (3c59x) also supported a bus master mode, however for those chips
81 * you could only DMA packets to/from a contiguous memory buffer. For
82 * transmission this would mean copying the contents of the queued mbuf
83 * chain into an mbuf cluster and then DMAing the cluster. This extra
84 * copy would sort of defeat the purpose of the bus master support for
85 * any packet that doesn't fit into a single mbuf.
86 *
87 * By contrast, the 3c90x cards support a fragment-based bus master
88 * mode where mbuf chains can be encapsulated using TX descriptors.
89 * This is similar to other PCI chips such as the Texas Instruments
90 * ThunderLAN and the Intel 82557/82558.
91 *
92 * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
93 * bus master chips because they maintain the old PIO interface for
94 * backwards compatibility, but starting with the 3c905B and the
95 * "cyclone" chips, the compatibility interface has been dropped.
96 * Since using bus master DMA is a big win, we use this driver to
97 * support the PCI "boomerang" chips even though they work with the
98 * "vortex" driver in order to obtain better performance.
99 *
100 * This driver is in the /sys/pci directory because it only supports
101 * PCI-based NICs.
102 */
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/sockio.h>
107#include <sys/endian.h>
108#include <sys/mbuf.h>
109#include <sys/kernel.h>
110#include <sys/socket.h>
111
112#include <net/if.h>
113#include <net/if_arp.h>
114#include <net/ethernet.h>
115#include <net/if_dl.h>
116#include <net/if_media.h>
117
118#include <net/bpf.h>
119
120#include <machine/bus_memio.h>
121#include <machine/bus_pio.h>
122#include <machine/bus.h>
123#include <machine/resource.h>
124#include <sys/bus.h>
125#include <sys/rman.h>
126
127#include <dev/mii/mii.h>
128#include <dev/mii/miivar.h>
129
130#include <dev/pci/pcireg.h>
131#include <dev/pci/pcivar.h>
132
133MODULE_DEPEND(xl, pci, 1, 1, 1);
134MODULE_DEPEND(xl, ether, 1, 1, 1);
135MODULE_DEPEND(xl, miibus, 1, 1, 1);
136
137/* "device miibus" required. See GENERIC if you get errors here. */
138#include "miibus_if.h"
139
140#include <pci/if_xlreg.h>
141
142#define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
143
144/*
145 * Various supported device vendors/types and their names.
146 */
147static struct xl_type xl_devs[] = {
148 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
149 "3Com 3c900-TPO Etherlink XL" },
150 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
151 "3Com 3c900-COMBO Etherlink XL" },
152 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
153 "3Com 3c905-TX Fast Etherlink XL" },
154 { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
155 "3Com 3c905-T4 Fast Etherlink XL" },
156 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
157 "3Com 3c900B-TPO Etherlink XL" },
158 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
159 "3Com 3c900B-COMBO Etherlink XL" },
160 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
161 "3Com 3c900B-TPC Etherlink XL" },
162 { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
163 "3Com 3c900B-FL Etherlink XL" },
164 { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
165 "3Com 3c905B-TX Fast Etherlink XL" },
166 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
167 "3Com 3c905B-T4 Fast Etherlink XL" },
168 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
169 "3Com 3c905B-FX/SC Fast Etherlink XL" },
170 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
171 "3Com 3c905B-COMBO Fast Etherlink XL" },
172 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
173 "3Com 3c905C-TX Fast Etherlink XL" },
174 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
175 "3Com 3c920B-EMB Integrated Fast Etherlink XL" },
176 { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
177 "3Com 3c980 Fast Etherlink XL" },
178 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
179 "3Com 3c980C Fast Etherlink XL" },
180 { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
181 "3Com 3cSOHO100-TX OfficeConnect" },
182 { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
183 "3Com 3c450-TX HomeConnect" },
184 { TC_VENDORID, TC_DEVICEID_HURRICANE_555,
185 "3Com 3c555 Fast Etherlink XL" },
186 { TC_VENDORID, TC_DEVICEID_HURRICANE_556,
187 "3Com 3c556 Fast Etherlink XL" },
188 { TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
189 "3Com 3c556B Fast Etherlink XL" },
190 { TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
191 "3Com 3c575TX Fast Etherlink XL" },
192 { TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
193 "3Com 3c575B Fast Etherlink XL" },
194 { TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
195 "3Com 3c575C Fast Etherlink XL" },
196 { TC_VENDORID, TC_DEVICEID_HURRICANE_656,
197 "3Com 3c656 Fast Etherlink XL" },
198 { TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
199 "3Com 3c656B Fast Etherlink XL" },
200 { TC_VENDORID, TC_DEVICEID_TORNADO_656C,
201 "3Com 3c656C Fast Etherlink XL" },
202 { 0, 0, NULL }
203};
204
205static int xl_probe (device_t);
206static int xl_attach (device_t);
207static int xl_detach (device_t);
208
209static int xl_newbuf (struct xl_softc *, struct xl_chain_onefrag *);
210static void xl_stats_update (void *);
211static int xl_encap (struct xl_softc *, struct xl_chain *,
212 struct mbuf *);
213static void xl_rxeof (struct xl_softc *);
214static int xl_rx_resync (struct xl_softc *);
215static void xl_txeof (struct xl_softc *);
216static void xl_txeof_90xB (struct xl_softc *);
217static void xl_txeoc (struct xl_softc *);
218static void xl_intr (void *);
219static void xl_start (struct ifnet *);
220static void xl_start_90xB (struct ifnet *);
221static int xl_ioctl (struct ifnet *, u_long, caddr_t);
222static void xl_init (void *);
223static void xl_stop (struct xl_softc *);
224static void xl_watchdog (struct ifnet *);
225static void xl_shutdown (device_t);
226static int xl_suspend (device_t);
227static int xl_resume (device_t);
228
229static int xl_ifmedia_upd (struct ifnet *);
230static void xl_ifmedia_sts (struct ifnet *, struct ifmediareq *);
231
232static int xl_eeprom_wait (struct xl_softc *);
233static int xl_read_eeprom (struct xl_softc *, caddr_t, int, int, int);
234static void xl_mii_sync (struct xl_softc *);
235static void xl_mii_send (struct xl_softc *, u_int32_t, int);
236static int xl_mii_readreg (struct xl_softc *, struct xl_mii_frame *);
237static int xl_mii_writereg (struct xl_softc *, struct xl_mii_frame *);
238
239static void xl_setcfg (struct xl_softc *);
240static void xl_setmode (struct xl_softc *, int);
241static u_int32_t xl_mchash (caddr_t);
242static void xl_setmulti (struct xl_softc *);
243static void xl_setmulti_hash (struct xl_softc *);
244static void xl_reset (struct xl_softc *);
245static int xl_list_rx_init (struct xl_softc *);
246static int xl_list_tx_init (struct xl_softc *);
247static int xl_list_tx_init_90xB (struct xl_softc *);
248static void xl_wait (struct xl_softc *);
249static void xl_mediacheck (struct xl_softc *);
250static void xl_choose_xcvr (struct xl_softc *, int);
251static void xl_dma_map_addr (void *, bus_dma_segment_t *, int, int);
252static void xl_dma_map_rxbuf (void *, bus_dma_segment_t *, int, bus_size_t,
253 int);
254static void xl_dma_map_txbuf (void *, bus_dma_segment_t *, int, bus_size_t,
255 int);
256#ifdef notdef
257static void xl_testpacket (struct xl_softc *);
258#endif
259
260static int xl_miibus_readreg (device_t, int, int);
261static int xl_miibus_writereg (device_t, int, int, int);
262static void xl_miibus_statchg (device_t);
263static void xl_miibus_mediainit (device_t);
264
265static device_method_t xl_methods[] = {
266 /* Device interface */
267 DEVMETHOD(device_probe, xl_probe),
268 DEVMETHOD(device_attach, xl_attach),
269 DEVMETHOD(device_detach, xl_detach),
270 DEVMETHOD(device_shutdown, xl_shutdown),
271 DEVMETHOD(device_suspend, xl_suspend),
272 DEVMETHOD(device_resume, xl_resume),
273
274 /* bus interface */
275 DEVMETHOD(bus_print_child, bus_generic_print_child),
276 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
277
278 /* MII interface */
279 DEVMETHOD(miibus_readreg, xl_miibus_readreg),
280 DEVMETHOD(miibus_writereg, xl_miibus_writereg),
281 DEVMETHOD(miibus_statchg, xl_miibus_statchg),
282 DEVMETHOD(miibus_mediainit, xl_miibus_mediainit),
283
284 { 0, 0 }
285};
286
287static driver_t xl_driver = {
288 "xl",
289 xl_methods,
290 sizeof(struct xl_softc)
291};
292
293static devclass_t xl_devclass;
294
295DRIVER_MODULE(xl, cardbus, xl_driver, xl_devclass, 0, 0);
295DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0);
296DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
297
298static void
299xl_dma_map_addr(arg, segs, nseg, error)
300 void *arg;
301 bus_dma_segment_t *segs;
302 int nseg, error;
303{
304 u_int32_t *paddr;
305
306 paddr = arg;
307 *paddr = segs->ds_addr;
308}
309
310static void
311xl_dma_map_rxbuf(arg, segs, nseg, mapsize, error)
312 void *arg;
313 bus_dma_segment_t *segs;
314 int nseg;
315 bus_size_t mapsize;
316 int error;
317{
318 u_int32_t *paddr;
319
320 if (error)
321 return;
322 KASSERT(nseg == 1, ("xl_dma_map_rxbuf: too many DMA segments"));
323 paddr = arg;
324 *paddr = segs->ds_addr;
325}
326
327static void
328xl_dma_map_txbuf(arg, segs, nseg, mapsize, error)
329 void *arg;
330 bus_dma_segment_t *segs;
331 int nseg;
332 bus_size_t mapsize;
333 int error;
334{
335 struct xl_list *l;
336 int i, total_len;
337
338 if (error)
339 return;
340
341 KASSERT(nseg <= XL_MAXFRAGS, ("too many DMA segments"));
342
343 total_len = 0;
344 l = arg;
345 for (i = 0; i < nseg; i++) {
346 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
347 l->xl_frag[i].xl_addr = htole32(segs[i].ds_addr);
348 l->xl_frag[i].xl_len = htole32(segs[i].ds_len);
349 total_len += segs[i].ds_len;
350 }
351 l->xl_frag[nseg - 1].xl_len = htole32(segs[nseg - 1].ds_len |
352 XL_LAST_FRAG);
353 l->xl_status = htole32(total_len);
354 l->xl_next = 0;
355}
356
357/*
358 * Murphy's law says that it's possible the chip can wedge and
359 * the 'command in progress' bit may never clear. Hence, we wait
360 * only a finite amount of time to avoid getting caught in an
361 * infinite loop. Normally this delay routine would be a macro,
362 * but it isn't called during normal operation so we can afford
363 * to make it a function.
364 */
365static void
366xl_wait(sc)
367 struct xl_softc *sc;
368{
369 register int i;
370
371 for (i = 0; i < XL_TIMEOUT; i++) {
372 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
373 break;
374 }
375
376 if (i == XL_TIMEOUT)
377 printf("xl%d: command never completed!\n", sc->xl_unit);
378
379 return;
380}
381
382/*
383 * MII access routines are provided for adapters with external
384 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
385 * autoneg logic that's faked up to look like a PHY (3c905B-TX).
386 * Note: if you don't perform the MDIO operations just right,
387 * it's possible to end up with code that works correctly with
388 * some chips/CPUs/processor speeds/bus speeds/etc but not
389 * with others.
390 */
391#define MII_SET(x) \
392 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
393 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
394
395#define MII_CLR(x) \
396 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
397 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
398
399/*
400 * Sync the PHYs by setting data bit and strobing the clock 32 times.
401 */
402static void
403xl_mii_sync(sc)
404 struct xl_softc *sc;
405{
406 register int i;
407
408 XL_SEL_WIN(4);
409 MII_SET(XL_MII_DIR|XL_MII_DATA);
410
411 for (i = 0; i < 32; i++) {
412 MII_SET(XL_MII_CLK);
413 MII_SET(XL_MII_DATA);
414 MII_SET(XL_MII_DATA);
415 MII_CLR(XL_MII_CLK);
416 MII_SET(XL_MII_DATA);
417 MII_SET(XL_MII_DATA);
418 }
419
420 return;
421}
422
423/*
424 * Clock a series of bits through the MII.
425 */
426static void
427xl_mii_send(sc, bits, cnt)
428 struct xl_softc *sc;
429 u_int32_t bits;
430 int cnt;
431{
432 int i;
433
434 XL_SEL_WIN(4);
435 MII_CLR(XL_MII_CLK);
436
437 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
438 if (bits & i) {
439 MII_SET(XL_MII_DATA);
440 } else {
441 MII_CLR(XL_MII_DATA);
442 }
443 MII_CLR(XL_MII_CLK);
444 MII_SET(XL_MII_CLK);
445 }
446}
447
448/*
449 * Read an PHY register through the MII.
450 */
451static int
452xl_mii_readreg(sc, frame)
453 struct xl_softc *sc;
454 struct xl_mii_frame *frame;
455
456{
457 int i, ack;
458
459 XL_LOCK(sc);
460
461 /*
462 * Set up frame for RX.
463 */
464 frame->mii_stdelim = XL_MII_STARTDELIM;
465 frame->mii_opcode = XL_MII_READOP;
466 frame->mii_turnaround = 0;
467 frame->mii_data = 0;
468
469 /*
470 * Select register window 4.
471 */
472
473 XL_SEL_WIN(4);
474
475 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
476 /*
477 * Turn on data xmit.
478 */
479 MII_SET(XL_MII_DIR);
480
481 xl_mii_sync(sc);
482
483 /*
484 * Send command/address info.
485 */
486 xl_mii_send(sc, frame->mii_stdelim, 2);
487 xl_mii_send(sc, frame->mii_opcode, 2);
488 xl_mii_send(sc, frame->mii_phyaddr, 5);
489 xl_mii_send(sc, frame->mii_regaddr, 5);
490
491 /* Idle bit */
492 MII_CLR((XL_MII_CLK|XL_MII_DATA));
493 MII_SET(XL_MII_CLK);
494
495 /* Turn off xmit. */
496 MII_CLR(XL_MII_DIR);
497
498 /* Check for ack */
499 MII_CLR(XL_MII_CLK);
500 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
501 MII_SET(XL_MII_CLK);
502
503 /*
504 * Now try reading data bits. If the ack failed, we still
505 * need to clock through 16 cycles to keep the PHY(s) in sync.
506 */
507 if (ack) {
508 for(i = 0; i < 16; i++) {
509 MII_CLR(XL_MII_CLK);
510 MII_SET(XL_MII_CLK);
511 }
512 goto fail;
513 }
514
515 for (i = 0x8000; i; i >>= 1) {
516 MII_CLR(XL_MII_CLK);
517 if (!ack) {
518 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
519 frame->mii_data |= i;
520 }
521 MII_SET(XL_MII_CLK);
522 }
523
524fail:
525
526 MII_CLR(XL_MII_CLK);
527 MII_SET(XL_MII_CLK);
528
529 XL_UNLOCK(sc);
530
531 if (ack)
532 return(1);
533 return(0);
534}
535
536/*
537 * Write to a PHY register through the MII.
538 */
539static int
540xl_mii_writereg(sc, frame)
541 struct xl_softc *sc;
542 struct xl_mii_frame *frame;
543
544{
545 XL_LOCK(sc);
546
547 /*
548 * Set up frame for TX.
549 */
550
551 frame->mii_stdelim = XL_MII_STARTDELIM;
552 frame->mii_opcode = XL_MII_WRITEOP;
553 frame->mii_turnaround = XL_MII_TURNAROUND;
554
555 /*
556 * Select the window 4.
557 */
558 XL_SEL_WIN(4);
559
560 /*
561 * Turn on data output.
562 */
563 MII_SET(XL_MII_DIR);
564
565 xl_mii_sync(sc);
566
567 xl_mii_send(sc, frame->mii_stdelim, 2);
568 xl_mii_send(sc, frame->mii_opcode, 2);
569 xl_mii_send(sc, frame->mii_phyaddr, 5);
570 xl_mii_send(sc, frame->mii_regaddr, 5);
571 xl_mii_send(sc, frame->mii_turnaround, 2);
572 xl_mii_send(sc, frame->mii_data, 16);
573
574 /* Idle bit. */
575 MII_SET(XL_MII_CLK);
576 MII_CLR(XL_MII_CLK);
577
578 /*
579 * Turn off xmit.
580 */
581 MII_CLR(XL_MII_DIR);
582
583 XL_UNLOCK(sc);
584
585 return(0);
586}
587
588static int
589xl_miibus_readreg(dev, phy, reg)
590 device_t dev;
591 int phy, reg;
592{
593 struct xl_softc *sc;
594 struct xl_mii_frame frame;
595
596 sc = device_get_softc(dev);
597
598 /*
599 * Pretend that PHYs are only available at MII address 24.
600 * This is to guard against problems with certain 3Com ASIC
601 * revisions that incorrectly map the internal transceiver
602 * control registers at all MII addresses. This can cause
603 * the miibus code to attach the same PHY several times over.
604 */
605 if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24)
606 return(0);
607
608 bzero((char *)&frame, sizeof(frame));
609
610 frame.mii_phyaddr = phy;
611 frame.mii_regaddr = reg;
612 xl_mii_readreg(sc, &frame);
613
614 return(frame.mii_data);
615}
616
617static int
618xl_miibus_writereg(dev, phy, reg, data)
619 device_t dev;
620 int phy, reg, data;
621{
622 struct xl_softc *sc;
623 struct xl_mii_frame frame;
624
625 sc = device_get_softc(dev);
626
627 if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24)
628 return(0);
629
630 bzero((char *)&frame, sizeof(frame));
631
632 frame.mii_phyaddr = phy;
633 frame.mii_regaddr = reg;
634 frame.mii_data = data;
635
636 xl_mii_writereg(sc, &frame);
637
638 return(0);
639}
640
641static void
642xl_miibus_statchg(dev)
643 device_t dev;
644{
645 struct xl_softc *sc;
646 struct mii_data *mii;
647
648
649 sc = device_get_softc(dev);
650 mii = device_get_softc(sc->xl_miibus);
651
652 XL_LOCK(sc);
653
654 xl_setcfg(sc);
655
656 /* Set ASIC's duplex mode to match the PHY. */
657 XL_SEL_WIN(3);
658 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
659 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
660 else
661 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
662 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
663
664 XL_UNLOCK(sc);
665
666 return;
667}
668
669/*
670 * Special support for the 3c905B-COMBO. This card has 10/100 support
671 * plus BNC and AUI ports. This means we will have both an miibus attached
672 * plus some non-MII media settings. In order to allow this, we have to
673 * add the extra media to the miibus's ifmedia struct, but we can't do
674 * that during xl_attach() because the miibus hasn't been attached yet.
675 * So instead, we wait until the miibus probe/attach is done, at which
676 * point we will get a callback telling is that it's safe to add our
677 * extra media.
678 */
679static void
680xl_miibus_mediainit(dev)
681 device_t dev;
682{
683 struct xl_softc *sc;
684 struct mii_data *mii;
685 struct ifmedia *ifm;
686
687 sc = device_get_softc(dev);
688 mii = device_get_softc(sc->xl_miibus);
689 ifm = &mii->mii_media;
690
691 XL_LOCK(sc);
692
693 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
694 /*
695 * Check for a 10baseFL board in disguise.
696 */
697 if (sc->xl_type == XL_TYPE_905B &&
698 sc->xl_media == XL_MEDIAOPT_10FL) {
699 if (bootverbose)
700 printf("xl%d: found 10baseFL\n", sc->xl_unit);
701 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
702 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL);
703 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
704 ifmedia_add(ifm,
705 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
706 } else {
707 if (bootverbose)
708 printf("xl%d: found AUI\n", sc->xl_unit);
709 ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
710 }
711 }
712
713 if (sc->xl_media & XL_MEDIAOPT_BNC) {
714 if (bootverbose)
715 printf("xl%d: found BNC\n", sc->xl_unit);
716 ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
717 }
718
719 XL_UNLOCK(sc);
720
721 return;
722}
723
724/*
725 * The EEPROM is slow: give it time to come ready after issuing
726 * it a command.
727 */
728static int
729xl_eeprom_wait(sc)
730 struct xl_softc *sc;
731{
732 int i;
733
734 for (i = 0; i < 100; i++) {
735 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
736 DELAY(162);
737 else
738 break;
739 }
740
741 if (i == 100) {
742 printf("xl%d: eeprom failed to come ready\n", sc->xl_unit);
743 return(1);
744 }
745
746 return(0);
747}
748
749/*
750 * Read a sequence of words from the EEPROM. Note that ethernet address
751 * data is stored in the EEPROM in network byte order.
752 */
753static int
754xl_read_eeprom(sc, dest, off, cnt, swap)
755 struct xl_softc *sc;
756 caddr_t dest;
757 int off;
758 int cnt;
759 int swap;
760{
761 int err = 0, i;
762 u_int16_t word = 0, *ptr;
763#define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
764#define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
765 /* WARNING! DANGER!
766 * It's easy to accidentally overwrite the rom content!
767 * Note: the 3c575 uses 8bit EEPROM offsets.
768 */
769 XL_SEL_WIN(0);
770
771 if (xl_eeprom_wait(sc))
772 return(1);
773
774 if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
775 off += 0x30;
776
777 for (i = 0; i < cnt; i++) {
778 if (sc->xl_flags & XL_FLAG_8BITROM)
779 CSR_WRITE_2(sc, XL_W0_EE_CMD,
780 XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
781 else
782 CSR_WRITE_2(sc, XL_W0_EE_CMD,
783 XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
784 err = xl_eeprom_wait(sc);
785 if (err)
786 break;
787 word = CSR_READ_2(sc, XL_W0_EE_DATA);
788 ptr = (u_int16_t *)(dest + (i * 2));
789 if (swap)
790 *ptr = ntohs(word);
791 else
792 *ptr = word;
793 }
794
795 return(err ? 1 : 0);
796}
797
798/*
799 * This routine is taken from the 3Com Etherlink XL manual,
800 * page 10-7. It calculates a CRC of the supplied multicast
801 * group address and returns the lower 8 bits, which are used
802 * as the multicast filter position.
803 * Note: the 3c905B currently only supports a 64-bit hash table,
804 * which means we really only need 6 bits, but the manual indicates
805 * that future chip revisions will have a 256-bit hash table,
806 * hence the routine is set up to calculate 8 bits of position
807 * info in case we need it some day.
808 * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a
809 * 256 bit hash table. This means we have to use all 8 bits regardless.
810 * On older cards, the upper 2 bits will be ignored. Grrrr....
811 */
812static u_int32_t
813xl_mchash(addr)
814 caddr_t addr;
815{
816 u_int32_t crc, carry;
817 int idx, bit;
818 u_int8_t data;
819
820 /* Compute CRC for the address value. */
821 crc = 0xFFFFFFFF; /* initial value */
822
823 for (idx = 0; idx < 6; idx++) {
824 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
825 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
826 crc <<= 1;
827 if (carry)
828 crc = (crc ^ 0x04c11db6) | carry;
829 }
830 }
831
832 /* return the filter bit position */
833 return(crc & 0x000000FF);
834}
835
836/*
837 * NICs older than the 3c905B have only one multicast option, which
838 * is to enable reception of all multicast frames.
839 */
840static void
841xl_setmulti(sc)
842 struct xl_softc *sc;
843{
844 struct ifnet *ifp;
845 struct ifmultiaddr *ifma;
846 u_int8_t rxfilt;
847 int mcnt = 0;
848
849 ifp = &sc->arpcom.ac_if;
850
851 XL_SEL_WIN(5);
852 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
853
854 if (ifp->if_flags & IFF_ALLMULTI) {
855 rxfilt |= XL_RXFILTER_ALLMULTI;
856 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
857 return;
858 }
859
860 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
861 mcnt++;
862
863 if (mcnt)
864 rxfilt |= XL_RXFILTER_ALLMULTI;
865 else
866 rxfilt &= ~XL_RXFILTER_ALLMULTI;
867
868 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
869
870 return;
871}
872
873/*
874 * 3c905B adapters have a hash filter that we can program.
875 */
876static void
877xl_setmulti_hash(sc)
878 struct xl_softc *sc;
879{
880 struct ifnet *ifp;
881 int h = 0, i;
882 struct ifmultiaddr *ifma;
883 u_int8_t rxfilt;
884 int mcnt = 0;
885
886 ifp = &sc->arpcom.ac_if;
887
888 XL_SEL_WIN(5);
889 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
890
891 if (ifp->if_flags & IFF_ALLMULTI) {
892 rxfilt |= XL_RXFILTER_ALLMULTI;
893 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
894 return;
895 } else
896 rxfilt &= ~XL_RXFILTER_ALLMULTI;
897
898
899 /* first, zot all the existing hash bits */
900 for (i = 0; i < XL_HASHFILT_SIZE; i++)
901 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
902
903 /* now program new ones */
904 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
905 if (ifma->ifma_addr->sa_family != AF_LINK)
906 continue;
907 h = xl_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
908 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
909 mcnt++;
910 }
911
912 if (mcnt)
913 rxfilt |= XL_RXFILTER_MULTIHASH;
914 else
915 rxfilt &= ~XL_RXFILTER_MULTIHASH;
916
917 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
918
919 return;
920}
921
922#ifdef notdef
923static void
924xl_testpacket(sc)
925 struct xl_softc *sc;
926{
927 struct mbuf *m;
928 struct ifnet *ifp;
929
930 ifp = &sc->arpcom.ac_if;
931
932 MGETHDR(m, M_DONTWAIT, MT_DATA);
933
934 if (m == NULL)
935 return;
936
937 bcopy(&sc->arpcom.ac_enaddr,
938 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
939 bcopy(&sc->arpcom.ac_enaddr,
940 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
941 mtod(m, struct ether_header *)->ether_type = htons(3);
942 mtod(m, unsigned char *)[14] = 0;
943 mtod(m, unsigned char *)[15] = 0;
944 mtod(m, unsigned char *)[16] = 0xE3;
945 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
946 IF_ENQUEUE(&ifp->if_snd, m);
947 xl_start(ifp);
948
949 return;
950}
951#endif
952
953static void
954xl_setcfg(sc)
955 struct xl_softc *sc;
956{
957 u_int32_t icfg;
958
959 XL_SEL_WIN(3);
960 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
961 icfg &= ~XL_ICFG_CONNECTOR_MASK;
962 if (sc->xl_media & XL_MEDIAOPT_MII ||
963 sc->xl_media & XL_MEDIAOPT_BT4)
964 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
965 if (sc->xl_media & XL_MEDIAOPT_BTX)
966 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
967
968 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
969 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
970
971 return;
972}
973
974static void
975xl_setmode(sc, media)
976 struct xl_softc *sc;
977 int media;
978{
979 u_int32_t icfg;
980 u_int16_t mediastat;
981
982 printf("xl%d: selecting ", sc->xl_unit);
983
984 XL_SEL_WIN(4);
985 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
986 XL_SEL_WIN(3);
987 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
988
989 if (sc->xl_media & XL_MEDIAOPT_BT) {
990 if (IFM_SUBTYPE(media) == IFM_10_T) {
991 printf("10baseT transceiver, ");
992 sc->xl_xcvr = XL_XCVR_10BT;
993 icfg &= ~XL_ICFG_CONNECTOR_MASK;
994 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
995 mediastat |= XL_MEDIASTAT_LINKBEAT|
996 XL_MEDIASTAT_JABGUARD;
997 mediastat &= ~XL_MEDIASTAT_SQEENB;
998 }
999 }
1000
1001 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1002 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1003 printf("100baseFX port, ");
1004 sc->xl_xcvr = XL_XCVR_100BFX;
1005 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1006 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1007 mediastat |= XL_MEDIASTAT_LINKBEAT;
1008 mediastat &= ~XL_MEDIASTAT_SQEENB;
1009 }
1010 }
1011
1012 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1013 if (IFM_SUBTYPE(media) == IFM_10_5) {
1014 printf("AUI port, ");
1015 sc->xl_xcvr = XL_XCVR_AUI;
1016 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1017 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1018 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1019 XL_MEDIASTAT_JABGUARD);
1020 mediastat |= ~XL_MEDIASTAT_SQEENB;
1021 }
1022 if (IFM_SUBTYPE(media) == IFM_10_FL) {
1023 printf("10baseFL transceiver, ");
1024 sc->xl_xcvr = XL_XCVR_AUI;
1025 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1026 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1027 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1028 XL_MEDIASTAT_JABGUARD);
1029 mediastat |= ~XL_MEDIASTAT_SQEENB;
1030 }
1031 }
1032
1033 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1034 if (IFM_SUBTYPE(media) == IFM_10_2) {
1035 printf("BNC port, ");
1036 sc->xl_xcvr = XL_XCVR_COAX;
1037 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1038 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
1039 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1040 XL_MEDIASTAT_JABGUARD|
1041 XL_MEDIASTAT_SQEENB);
1042 }
1043 }
1044
1045 if ((media & IFM_GMASK) == IFM_FDX ||
1046 IFM_SUBTYPE(media) == IFM_100_FX) {
1047 printf("full duplex\n");
1048 XL_SEL_WIN(3);
1049 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
1050 } else {
1051 printf("half duplex\n");
1052 XL_SEL_WIN(3);
1053 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
1054 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
1055 }
1056
1057 if (IFM_SUBTYPE(media) == IFM_10_2)
1058 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1059 else
1060 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1061 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1062 XL_SEL_WIN(4);
1063 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
1064 DELAY(800);
1065 XL_SEL_WIN(7);
1066
1067 return;
1068}
1069
1070static void
1071xl_reset(sc)
1072 struct xl_softc *sc;
1073{
1074 register int i;
1075
1076 XL_SEL_WIN(0);
1077 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
1078 ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
1079 XL_RESETOPT_DISADVFD:0));
1080
1081 /*
1082 * If we're using memory mapped register mode, pause briefly
1083 * after issuing the reset command before trying to access any
1084 * other registers. With my 3c575C cardbus card, failing to do
1085 * this results in the system locking up while trying to poll
1086 * the command busy bit in the status register.
1087 */
1088 if (sc->xl_flags & XL_FLAG_USE_MMIO)
1089 DELAY(100000);
1090
1091 for (i = 0; i < XL_TIMEOUT; i++) {
1092 DELAY(10);
1093 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
1094 break;
1095 }
1096
1097 if (i == XL_TIMEOUT)
1098 printf("xl%d: reset didn't complete\n", sc->xl_unit);
1099
1100 /* Reset TX and RX. */
1101 /* Note: the RX reset takes an absurd amount of time
1102 * on newer versions of the Tornado chips such as those
1103 * on the 3c905CX and newer 3c908C cards. We wait an
1104 * extra amount of time so that xl_wait() doesn't complain
1105 * and annoy the users.
1106 */
1107 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1108 DELAY(100000);
1109 xl_wait(sc);
1110 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1111 xl_wait(sc);
1112
1113 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
1114 sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
1115 XL_SEL_WIN(2);
1116 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
1117 XL_W2_RESET_OPTIONS)
1118 | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
1119 | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
1120 );
1121 }
1122
1123 /* Wait a little while for the chip to get its brains in order. */
1124 DELAY(100000);
1125 return;
1126}
1127
1128/*
1129 * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
1130 * IDs against our list and return a device name if we find a match.
1131 */
1132static int
1133xl_probe(dev)
1134 device_t dev;
1135{
1136 struct xl_type *t;
1137
1138 t = xl_devs;
1139
1140 while(t->xl_name != NULL) {
1141 if ((pci_get_vendor(dev) == t->xl_vid) &&
1142 (pci_get_device(dev) == t->xl_did)) {
1143 device_set_desc(dev, t->xl_name);
1144 return(0);
1145 }
1146 t++;
1147 }
1148
1149 return(ENXIO);
1150}
1151
1152/*
1153 * This routine is a kludge to work around possible hardware faults
1154 * or manufacturing defects that can cause the media options register
1155 * (or reset options register, as it's called for the first generation
1156 * 3c90x adapters) to return an incorrect result. I have encountered
1157 * one Dell Latitude laptop docking station with an integrated 3c905-TX
1158 * which doesn't have any of the 'mediaopt' bits set. This screws up
1159 * the attach routine pretty badly because it doesn't know what media
1160 * to look for. If we find ourselves in this predicament, this routine
1161 * will try to guess the media options values and warn the user of a
1162 * possible manufacturing defect with his adapter/system/whatever.
1163 */
1164static void
1165xl_mediacheck(sc)
1166 struct xl_softc *sc;
1167{
1168
1169 /*
1170 * If some of the media options bits are set, assume they are
1171 * correct. If not, try to figure it out down below.
1172 * XXX I should check for 10baseFL, but I don't have an adapter
1173 * to test with.
1174 */
1175 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
1176 /*
1177 * Check the XCVR value. If it's not in the normal range
1178 * of values, we need to fake it up here.
1179 */
1180 if (sc->xl_xcvr <= XL_XCVR_AUTO)
1181 return;
1182 else {
1183 printf("xl%d: bogus xcvr value "
1184 "in EEPROM (%x)\n", sc->xl_unit, sc->xl_xcvr);
1185 printf("xl%d: choosing new default based "
1186 "on card type\n", sc->xl_unit);
1187 }
1188 } else {
1189 if (sc->xl_type == XL_TYPE_905B &&
1190 sc->xl_media & XL_MEDIAOPT_10FL)
1191 return;
1192 printf("xl%d: WARNING: no media options bits set in "
1193 "the media options register!!\n", sc->xl_unit);
1194 printf("xl%d: this could be a manufacturing defect in "
1195 "your adapter or system\n", sc->xl_unit);
1196 printf("xl%d: attempting to guess media type; you "
1197 "should probably consult your vendor\n", sc->xl_unit);
1198 }
1199
1200 xl_choose_xcvr(sc, 1);
1201
1202 return;
1203}
1204
1205static void
1206xl_choose_xcvr(sc, verbose)
1207 struct xl_softc *sc;
1208 int verbose;
1209{
1210 u_int16_t devid;
1211
1212 /*
1213 * Read the device ID from the EEPROM.
1214 * This is what's loaded into the PCI device ID register, so it has
1215 * to be correct otherwise we wouldn't have gotten this far.
1216 */
1217 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
1218
1219 switch(devid) {
1220 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */
1221 case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */
1222 sc->xl_media = XL_MEDIAOPT_BT;
1223 sc->xl_xcvr = XL_XCVR_10BT;
1224 if (verbose)
1225 printf("xl%d: guessing 10BaseT "
1226 "transceiver\n", sc->xl_unit);
1227 break;
1228 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
1229 case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */
1230 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1231 sc->xl_xcvr = XL_XCVR_10BT;
1232 if (verbose)
1233 printf("xl%d: guessing COMBO "
1234 "(AUI/BNC/TP)\n", sc->xl_unit);
1235 break;
1236 case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */
1237 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
1238 sc->xl_xcvr = XL_XCVR_10BT;
1239 if (verbose)
1240 printf("xl%d: guessing TPC (BNC/TP)\n", sc->xl_unit);
1241 break;
1242 case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */
1243 sc->xl_media = XL_MEDIAOPT_10FL;
1244 sc->xl_xcvr = XL_XCVR_AUI;
1245 if (verbose)
1246 printf("xl%d: guessing 10baseFL\n", sc->xl_unit);
1247 break;
1248 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
1249 case TC_DEVICEID_HURRICANE_555: /* 3c555 */
1250 case TC_DEVICEID_HURRICANE_556: /* 3c556 */
1251 case TC_DEVICEID_HURRICANE_556B: /* 3c556B */
1252 case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */
1253 case TC_DEVICEID_HURRICANE_575B: /* 3c575B */
1254 case TC_DEVICEID_HURRICANE_575C: /* 3c575C */
1255 case TC_DEVICEID_HURRICANE_656: /* 3c656 */
1256 case TC_DEVICEID_HURRICANE_656B: /* 3c656B */
1257 case TC_DEVICEID_TORNADO_656C: /* 3c656C */
1258 case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
1259 sc->xl_media = XL_MEDIAOPT_MII;
1260 sc->xl_xcvr = XL_XCVR_MII;
1261 if (verbose)
1262 printf("xl%d: guessing MII\n", sc->xl_unit);
1263 break;
1264 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
1265 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
1266 sc->xl_media = XL_MEDIAOPT_BT4;
1267 sc->xl_xcvr = XL_XCVR_MII;
1268 if (verbose)
1269 printf("xl%d: guessing 100BaseT4/MII\n", sc->xl_unit);
1270 break;
1271 case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */
1272 case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
1273 case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
1274 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */
1275 case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */
1276 case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */
1277 sc->xl_media = XL_MEDIAOPT_BTX;
1278 sc->xl_xcvr = XL_XCVR_AUTO;
1279 if (verbose)
1280 printf("xl%d: guessing 10/100 internal\n", sc->xl_unit);
1281 break;
1282 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */
1283 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1284 sc->xl_xcvr = XL_XCVR_AUTO;
1285 if (verbose)
1286 printf("xl%d: guessing 10/100 "
1287 "plus BNC/AUI\n", sc->xl_unit);
1288 break;
1289 default:
1290 printf("xl%d: unknown device ID: %x -- "
1291 "defaulting to 10baseT\n", sc->xl_unit, devid);
1292 sc->xl_media = XL_MEDIAOPT_BT;
1293 break;
1294 }
1295
1296 return;
1297}
1298
1299/*
1300 * Attach the interface. Allocate softc structures, do ifmedia
1301 * setup and ethernet/BPF attach.
1302 */
1303static int
1304xl_attach(dev)
1305 device_t dev;
1306{
1307 u_char eaddr[ETHER_ADDR_LEN];
1308 u_int16_t xcvr[2];
1309 struct xl_softc *sc;
1310 struct ifnet *ifp;
1311 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1312 int unit, error = 0, rid, res;
1313 uint16_t did;
1314
1315 sc = device_get_softc(dev);
1316 unit = device_get_unit(dev);
1317
1318 mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1319 MTX_DEF | MTX_RECURSE);
1320 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
1321
1322 did = pci_get_device(dev);
1323
1324 sc->xl_flags = 0;
1325 if (did == TC_DEVICEID_HURRICANE_555)
1326 sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
1327 if (did == TC_DEVICEID_HURRICANE_556 ||
1328 did == TC_DEVICEID_HURRICANE_556B)
1329 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1330 XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
1331 XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
1332 if (did == TC_DEVICEID_HURRICANE_555 ||
1333 did == TC_DEVICEID_HURRICANE_556)
1334 sc->xl_flags |= XL_FLAG_8BITROM;
1335 if (did == TC_DEVICEID_HURRICANE_556B)
1336 sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
1337
1338 if (did == TC_DEVICEID_HURRICANE_575A ||
1339 did == TC_DEVICEID_HURRICANE_575B ||
1340 did == TC_DEVICEID_HURRICANE_575C ||
1341 did == TC_DEVICEID_HURRICANE_656B ||
1342 did == TC_DEVICEID_TORNADO_656C)
1343 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1344 XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM;
1345 if (did == TC_DEVICEID_HURRICANE_656)
1346 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
1347 if (did == TC_DEVICEID_HURRICANE_575B)
1348 sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
1349 if (did == TC_DEVICEID_HURRICANE_575C)
1350 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1351 if (did == TC_DEVICEID_TORNADO_656C)
1352 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1353 if (did == TC_DEVICEID_HURRICANE_656 ||
1354 did == TC_DEVICEID_HURRICANE_656B)
1355 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
1356 XL_FLAG_INVERT_LED_PWR;
1357 if (did == TC_DEVICEID_TORNADO_10_100BT_920B)
1358 sc->xl_flags |= XL_FLAG_PHYOK;
1359
1360 switch (did) {
1361 case TC_DEVICEID_HURRICANE_575A:
1362 case TC_DEVICEID_HURRICANE_575B:
1363 case TC_DEVICEID_HURRICANE_575C:
1364 sc->xl_flags |= XL_FLAG_NO_MMIO;
1365 break;
1366 default:
1367 break;
1368 }
1369
1370#ifndef BURN_BRIDGES
1371 /*
1372 * If this is a 3c905B, we have to check one extra thing.
1373 * The 905B supports power management and may be placed in
1374 * a low-power mode (D3 mode), typically by certain operating
1375 * systems which shall not be named. The PCI BIOS is supposed
1376 * to reset the NIC and bring it out of low-power mode, but
1377 * some do not. Consequently, we have to see if this chip
1378 * supports power management, and if so, make sure it's not
1379 * in low-power mode. If power management is available, the
1380 * capid byte will be 0x01.
1381 *
1382 * I _think_ that what actually happens is that the chip
1383 * loses its PCI configuration during the transition from
1384 * D3 back to D0; this means that it should be possible for
1385 * us to save the PCI iobase, membase and IRQ, put the chip
1386 * back in the D0 state, then restore the PCI config ourselves.
1387 */
1388
1389 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1390 u_int32_t iobase, membase, irq;
1391
1392 /* Save important PCI config data. */
1393 iobase = pci_read_config(dev, XL_PCI_LOIO, 4);
1394 membase = pci_read_config(dev, XL_PCI_LOMEM, 4);
1395 irq = pci_read_config(dev, XL_PCI_INTLINE, 4);
1396
1397 /* Reset the power state. */
1398 printf("xl%d: chip is in D%d power mode "
1399 "-- setting to D0\n", unit,
1400 pci_get_powerstate(dev));
1401
1402 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1403
1404 /* Restore PCI config data. */
1405 pci_write_config(dev, XL_PCI_LOIO, iobase, 4);
1406 pci_write_config(dev, XL_PCI_LOMEM, membase, 4);
1407 pci_write_config(dev, XL_PCI_INTLINE, irq, 4);
1408 }
1409#endif
1410
1411 /*
1412 * Map control/status registers.
1413 */
1414 pci_enable_busmaster(dev);
1415
1416 if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
1417 rid = XL_PCI_LOMEM;
1418 res = SYS_RES_MEMORY;
1419
1420 sc->xl_res = bus_alloc_resource(dev, res, &rid,
1421 0, ~0, 1, RF_ACTIVE);
1422 }
1423
1424 if (sc->xl_res != NULL) {
1425 sc->xl_flags |= XL_FLAG_USE_MMIO;
1426 if (bootverbose)
1427 printf("xl%d: using memory mapped I/O\n", unit);
1428 } else {
1429 rid = XL_PCI_LOIO;
1430 res = SYS_RES_IOPORT;
1431 sc->xl_res = bus_alloc_resource(dev, res, &rid,
1432 0, ~0, 1, RF_ACTIVE);
1433 if (sc->xl_res == NULL) {
1434 printf ("xl%d: couldn't map ports/memory\n", unit);
1435 error = ENXIO;
1436 goto fail;
1437 }
1438 if (bootverbose)
1439 printf("xl%d: using port I/O\n", unit);
1440 }
1441
1442 sc->xl_btag = rman_get_bustag(sc->xl_res);
1443 sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
1444
1445 if (sc->xl_flags & XL_FLAG_FUNCREG) {
1446 rid = XL_PCI_FUNCMEM;
1447 sc->xl_fres = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1448 0, ~0, 1, RF_ACTIVE);
1449
1450 if (sc->xl_fres == NULL) {
1451 printf ("xl%d: couldn't map ports/memory\n", unit);
1452 error = ENXIO;
1453 goto fail;
1454 }
1455
1456 sc->xl_ftag = rman_get_bustag(sc->xl_fres);
1457 sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
1458 }
1459
1460 /* Allocate interrupt */
1461 rid = 0;
1462 sc->xl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1463 RF_SHAREABLE | RF_ACTIVE);
1464 if (sc->xl_irq == NULL) {
1465 printf("xl%d: couldn't map interrupt\n", unit);
1466 error = ENXIO;
1467 goto fail;
1468 }
1469
1470 /* Reset the adapter. */
1471 xl_reset(sc);
1472
1473 /*
1474 * Get station address from the EEPROM.
1475 */
1476 if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
1477 printf("xl%d: failed to read station address\n", sc->xl_unit);
1478 error = ENXIO;
1479 goto fail;
1480 }
1481
1482 /*
1483 * A 3Com chip was detected. Inform the world.
1484 */
1485 printf("xl%d: Ethernet address: %6D\n", unit, eaddr, ":");
1486
1487 sc->xl_unit = unit;
1488 callout_handle_init(&sc->xl_stat_ch);
1489 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1490
1491 /*
1492 * Now allocate a tag for the DMA descriptor lists and a chunk
1493 * of DMA-able memory based on the tag. Also obtain the DMA
1494 * addresses of the RX and TX ring, which we'll need later.
1495 * All of our lists are allocated as a contiguous block
1496 * of memory.
1497 */
1498 error = bus_dma_tag_create(NULL, 8, 0,
1499 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1500 XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
1501 &sc->xl_ldata.xl_rx_tag);
1502 if (error) {
1503 printf("xl%d: failed to allocate rx dma tag\n", unit);
1504 goto fail;
1505 }
1506
1507 error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
1508 (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1509 &sc->xl_ldata.xl_rx_dmamap);
1510 if (error) {
1511 printf("xl%d: no memory for rx list buffers!\n", unit);
1512 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1513 sc->xl_ldata.xl_rx_tag = NULL;
1514 goto fail;
1515 }
1516
1517 error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
1518 sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
1519 XL_RX_LIST_SZ, xl_dma_map_addr,
1520 &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
1521 if (error) {
1522 printf("xl%d: cannot get dma address of the rx ring!\n", unit);
1523 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1524 sc->xl_ldata.xl_rx_dmamap);
1525 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1526 sc->xl_ldata.xl_rx_tag = NULL;
1527 goto fail;
1528 }
1529
1530 error = bus_dma_tag_create(NULL, 8, 0,
1531 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1532 XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
1533 &sc->xl_ldata.xl_tx_tag);
1534 if (error) {
1535 printf("xl%d: failed to allocate tx dma tag\n", unit);
1536 goto fail;
1537 }
1538
1539 error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
1540 (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1541 &sc->xl_ldata.xl_tx_dmamap);
1542 if (error) {
1543 printf("xl%d: no memory for list buffers!\n", unit);
1544 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1545 sc->xl_ldata.xl_tx_tag = NULL;
1546 goto fail;
1547 }
1548
1549 error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
1550 sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
1551 XL_TX_LIST_SZ, xl_dma_map_addr,
1552 &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
1553 if (error) {
1554 printf("xl%d: cannot get dma address of the tx ring!\n", unit);
1555 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1556 sc->xl_ldata.xl_tx_dmamap);
1557 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1558 sc->xl_ldata.xl_tx_tag = NULL;
1559 goto fail;
1560 }
1561
1562 /*
1563 * Allocate a DMA tag for the mapping of mbufs.
1564 */
1565 error = bus_dma_tag_create(NULL, 1, 0,
1566 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1567 MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
1568 NULL, &sc->xl_mtag);
1569 if (error) {
1570 printf("xl%d: failed to allocate mbuf dma tag\n", unit);
1571 goto fail;
1572 }
1573
1574 /* We need a spare DMA map for the RX ring. */
1575 error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
1576 if (error)
1577 goto fail;
1578
1579 /*
1580 * Figure out the card type. 3c905B adapters have the
1581 * 'supportsNoTxLength' bit set in the capabilities
1582 * word in the EEPROM.
1583 * Note: my 3c575C cardbus card lies. It returns a value
1584 * of 0x1578 for its capabilities word, which is somewhat
1585 * nonsensical. Another way to distinguish a 3c90x chip
1586 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
1587 * bit. This will only be set for 3c90x boomerage chips.
1588 */
1589 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
1590 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
1591 !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
1592 sc->xl_type = XL_TYPE_905B;
1593 else
1594 sc->xl_type = XL_TYPE_90X;
1595
1596 ifp = &sc->arpcom.ac_if;
1597 ifp->if_softc = sc;
1598 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1599 ifp->if_mtu = ETHERMTU;
1600 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1601 ifp->if_ioctl = xl_ioctl;
1602 ifp->if_output = ether_output;
1603 ifp->if_capabilities = IFCAP_VLAN_MTU;
1604 if (sc->xl_type == XL_TYPE_905B) {
1605 ifp->if_start = xl_start_90xB;
1606 ifp->if_hwassist = XL905B_CSUM_FEATURES;
1607 ifp->if_capabilities |= IFCAP_HWCSUM;
1608 } else
1609 ifp->if_start = xl_start;
1610 ifp->if_watchdog = xl_watchdog;
1611 ifp->if_init = xl_init;
1612 ifp->if_baudrate = 10000000;
1613 ifp->if_snd.ifq_maxlen = XL_TX_LIST_CNT - 1;
1614 ifp->if_capenable = ifp->if_capabilities;
1615
1616 /*
1617 * Now we have to see what sort of media we have.
1618 * This includes probing for an MII interace and a
1619 * possible PHY.
1620 */
1621 XL_SEL_WIN(3);
1622 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
1623 if (bootverbose)
1624 printf("xl%d: media options word: %x\n", sc->xl_unit,
1625 sc->xl_media);
1626
1627 xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
1628 sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
1629 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
1630 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
1631
1632 xl_mediacheck(sc);
1633
1634 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
1635 || sc->xl_media & XL_MEDIAOPT_BT4) {
1636 if (bootverbose)
1637 printf("xl%d: found MII/AUTO\n", sc->xl_unit);
1638 xl_setcfg(sc);
1639 if (mii_phy_probe(dev, &sc->xl_miibus,
1640 xl_ifmedia_upd, xl_ifmedia_sts)) {
1641 printf("xl%d: no PHY found!\n", sc->xl_unit);
1642 error = ENXIO;
1643 goto fail;
1644 }
1645
1646 goto done;
1647 }
1648
1649 /*
1650 * Sanity check. If the user has selected "auto" and this isn't
1651 * a 10/100 card of some kind, we need to force the transceiver
1652 * type to something sane.
1653 */
1654 if (sc->xl_xcvr == XL_XCVR_AUTO)
1655 xl_choose_xcvr(sc, bootverbose);
1656
1657 /*
1658 * Do ifmedia setup.
1659 */
1660 if (sc->xl_media & XL_MEDIAOPT_BT) {
1661 if (bootverbose)
1662 printf("xl%d: found 10baseT\n", sc->xl_unit);
1663 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1664 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1665 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1666 ifmedia_add(&sc->ifmedia,
1667 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1668 }
1669
1670 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1671 /*
1672 * Check for a 10baseFL board in disguise.
1673 */
1674 if (sc->xl_type == XL_TYPE_905B &&
1675 sc->xl_media == XL_MEDIAOPT_10FL) {
1676 if (bootverbose)
1677 printf("xl%d: found 10baseFL\n", sc->xl_unit);
1678 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
1679 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
1680 0, NULL);
1681 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1682 ifmedia_add(&sc->ifmedia,
1683 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
1684 } else {
1685 if (bootverbose)
1686 printf("xl%d: found AUI\n", sc->xl_unit);
1687 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1688 }
1689 }
1690
1691 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1692 if (bootverbose)
1693 printf("xl%d: found BNC\n", sc->xl_unit);
1694 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
1695 }
1696
1697 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1698 if (bootverbose)
1699 printf("xl%d: found 100baseFX\n", sc->xl_unit);
1700 ifp->if_baudrate = 100000000;
1701 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
1702 }
1703
1704 /* Choose a default media. */
1705 switch(sc->xl_xcvr) {
1706 case XL_XCVR_10BT:
1707 media = IFM_ETHER|IFM_10_T;
1708 xl_setmode(sc, media);
1709 break;
1710 case XL_XCVR_AUI:
1711 if (sc->xl_type == XL_TYPE_905B &&
1712 sc->xl_media == XL_MEDIAOPT_10FL) {
1713 media = IFM_ETHER|IFM_10_FL;
1714 xl_setmode(sc, media);
1715 } else {
1716 media = IFM_ETHER|IFM_10_5;
1717 xl_setmode(sc, media);
1718 }
1719 break;
1720 case XL_XCVR_COAX:
1721 media = IFM_ETHER|IFM_10_2;
1722 xl_setmode(sc, media);
1723 break;
1724 case XL_XCVR_AUTO:
1725 case XL_XCVR_100BTX:
1726 case XL_XCVR_MII:
1727 /* Chosen by miibus */
1728 break;
1729 case XL_XCVR_100BFX:
1730 media = IFM_ETHER|IFM_100_FX;
1731 break;
1732 default:
1733 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit,
1734 sc->xl_xcvr);
1735 /*
1736 * This will probably be wrong, but it prevents
1737 * the ifmedia code from panicking.
1738 */
1739 media = IFM_ETHER|IFM_10_T;
1740 break;
1741 }
1742
1743 if (sc->xl_miibus == NULL)
1744 ifmedia_set(&sc->ifmedia, media);
1745
1746done:
1747
1748 if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
1749 XL_SEL_WIN(0);
1750 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
1751 }
1752
1753 /*
1754 * Call MI attach routine.
1755 */
1756 ether_ifattach(ifp, eaddr);
1757
1758 /* Hook interrupt last to avoid having to lock softc */
1759 error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET,
1760 xl_intr, sc, &sc->xl_intrhand);
1761 if (error) {
1762 printf("xl%d: couldn't set up irq\n", unit);
1763 ether_ifdetach(ifp);
1764 goto fail;
1765 }
1766
1767fail:
1768 if (error)
1769 xl_detach(dev);
1770
1771 return(error);
1772}
1773
1774/*
1775 * Shutdown hardware and free up resources. This can be called any
1776 * time after the mutex has been initialized. It is called in both
1777 * the error case in attach and the normal detach case so it needs
1778 * to be careful about only freeing resources that have actually been
1779 * allocated.
1780 */
1781static int
1782xl_detach(dev)
1783 device_t dev;
1784{
1785 struct xl_softc *sc;
1786 struct ifnet *ifp;
1787 int rid, res;
1788
1789 sc = device_get_softc(dev);
1790 KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
1791 XL_LOCK(sc);
1792 ifp = &sc->arpcom.ac_if;
1793
1794 if (sc->xl_flags & XL_FLAG_USE_MMIO) {
1795 rid = XL_PCI_LOMEM;
1796 res = SYS_RES_MEMORY;
1797 } else {
1798 rid = XL_PCI_LOIO;
1799 res = SYS_RES_IOPORT;
1800 }
1801
1802 /* These should only be active if attach succeeded */
1803 if (device_is_attached(dev)) {
1804 xl_reset(sc);
1805 xl_stop(sc);
1806 ether_ifdetach(ifp);
1807 }
1808 if (sc->xl_miibus)
1809 device_delete_child(dev, sc->xl_miibus);
1810 bus_generic_detach(dev);
1811 ifmedia_removeall(&sc->ifmedia);
1812
1813 if (sc->xl_intrhand)
1814 bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
1815 if (sc->xl_irq)
1816 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
1817 if (sc->xl_fres != NULL)
1818 bus_release_resource(dev, SYS_RES_MEMORY,
1819 XL_PCI_FUNCMEM, sc->xl_fres);
1820 if (sc->xl_res)
1821 bus_release_resource(dev, res, rid, sc->xl_res);
1822
1823 if (sc->xl_mtag) {
1824 bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
1825 bus_dma_tag_destroy(sc->xl_mtag);
1826 }
1827 if (sc->xl_ldata.xl_rx_tag) {
1828 bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
1829 sc->xl_ldata.xl_rx_dmamap);
1830 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1831 sc->xl_ldata.xl_rx_dmamap);
1832 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1833 }
1834 if (sc->xl_ldata.xl_tx_tag) {
1835 bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
1836 sc->xl_ldata.xl_tx_dmamap);
1837 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1838 sc->xl_ldata.xl_tx_dmamap);
1839 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1840 }
1841
1842 XL_UNLOCK(sc);
1843 mtx_destroy(&sc->xl_mtx);
1844
1845 return(0);
1846}
1847
1848/*
1849 * Initialize the transmit descriptors.
1850 */
1851static int
1852xl_list_tx_init(sc)
1853 struct xl_softc *sc;
1854{
1855 struct xl_chain_data *cd;
1856 struct xl_list_data *ld;
1857 int error, i;
1858
1859 cd = &sc->xl_cdata;
1860 ld = &sc->xl_ldata;
1861 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1862 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1863 error = bus_dmamap_create(sc->xl_mtag, 0,
1864 &cd->xl_tx_chain[i].xl_map);
1865 if (error)
1866 return(error);
1867 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1868 i * sizeof(struct xl_list);
1869 if (i == (XL_TX_LIST_CNT - 1))
1870 cd->xl_tx_chain[i].xl_next = NULL;
1871 else
1872 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1873 }
1874
1875 cd->xl_tx_free = &cd->xl_tx_chain[0];
1876 cd->xl_tx_tail = cd->xl_tx_head = NULL;
1877
1878 bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1879 return(0);
1880}
1881
1882/*
1883 * Initialize the transmit descriptors.
1884 */
1885static int
1886xl_list_tx_init_90xB(sc)
1887 struct xl_softc *sc;
1888{
1889 struct xl_chain_data *cd;
1890 struct xl_list_data *ld;
1891 int error, i;
1892
1893 cd = &sc->xl_cdata;
1894 ld = &sc->xl_ldata;
1895 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1896 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1897 error = bus_dmamap_create(sc->xl_mtag, 0,
1898 &cd->xl_tx_chain[i].xl_map);
1899 if (error)
1900 return(error);
1901 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1902 i * sizeof(struct xl_list);
1903 if (i == (XL_TX_LIST_CNT - 1))
1904 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
1905 else
1906 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1907 if (i == 0)
1908 cd->xl_tx_chain[i].xl_prev =
1909 &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
1910 else
1911 cd->xl_tx_chain[i].xl_prev =
1912 &cd->xl_tx_chain[i - 1];
1913 }
1914
1915 bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
1916 ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1917
1918 cd->xl_tx_prod = 1;
1919 cd->xl_tx_cons = 1;
1920 cd->xl_tx_cnt = 0;
1921
1922 bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1923 return(0);
1924}
1925
1926/*
1927 * Initialize the RX descriptors and allocate mbufs for them. Note that
1928 * we arrange the descriptors in a closed ring, so that the last descriptor
1929 * points back to the first.
1930 */
1931static int
1932xl_list_rx_init(sc)
1933 struct xl_softc *sc;
1934{
1935 struct xl_chain_data *cd;
1936 struct xl_list_data *ld;
1937 int error, i, next;
1938 u_int32_t nextptr;
1939
1940 cd = &sc->xl_cdata;
1941 ld = &sc->xl_ldata;
1942
1943 for (i = 0; i < XL_RX_LIST_CNT; i++) {
1944 cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
1945 error = bus_dmamap_create(sc->xl_mtag, 0,
1946 &cd->xl_rx_chain[i].xl_map);
1947 if (error)
1948 return(error);
1949 error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
1950 if (error)
1951 return(error);
1952 if (i == (XL_RX_LIST_CNT - 1))
1953 next = 0;
1954 else
1955 next = i + 1;
1956 nextptr = ld->xl_rx_dmaaddr +
1957 next * sizeof(struct xl_list_onefrag);
1958 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
1959 ld->xl_rx_list[i].xl_next = htole32(nextptr);
1960 }
1961
1962 bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1963 cd->xl_rx_head = &cd->xl_rx_chain[0];
1964
1965 return(0);
1966}
1967
1968/*
1969 * Initialize an RX descriptor and attach an MBUF cluster.
1970 * If we fail to do so, we need to leave the old mbuf and
1971 * the old DMA map untouched so that it can be reused.
1972 */
1973static int
1974xl_newbuf(sc, c)
1975 struct xl_softc *sc;
1976 struct xl_chain_onefrag *c;
1977{
1978 struct mbuf *m_new = NULL;
1979 bus_dmamap_t map;
1980 int error;
1981 u_int32_t baddr;
1982
1983 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1984 if (m_new == NULL)
1985 return(ENOBUFS);
1986
1987 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1988
1989 /* Force longword alignment for packet payload. */
1990 m_adj(m_new, ETHER_ALIGN);
1991
1992 error = bus_dmamap_load_mbuf(sc->xl_mtag, sc->xl_tmpmap, m_new,
1993 xl_dma_map_rxbuf, &baddr, BUS_DMA_NOWAIT);
1994 if (error) {
1995 m_freem(m_new);
1996 printf("xl%d: can't map mbuf (error %d)\n", sc->xl_unit, error);
1997 return(error);
1998 }
1999
2000 bus_dmamap_unload(sc->xl_mtag, c->xl_map);
2001 map = c->xl_map;
2002 c->xl_map = sc->xl_tmpmap;
2003 sc->xl_tmpmap = map;
2004 c->xl_mbuf = m_new;
2005 c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
2006 c->xl_ptr->xl_status = 0;
2007 c->xl_ptr->xl_frag.xl_addr = htole32(baddr);
2008 bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
2009 return(0);
2010}
2011
2012static int
2013xl_rx_resync(sc)
2014 struct xl_softc *sc;
2015{
2016 struct xl_chain_onefrag *pos;
2017 int i;
2018
2019 pos = sc->xl_cdata.xl_rx_head;
2020
2021 for (i = 0; i < XL_RX_LIST_CNT; i++) {
2022 if (pos->xl_ptr->xl_status)
2023 break;
2024 pos = pos->xl_next;
2025 }
2026
2027 if (i == XL_RX_LIST_CNT)
2028 return(0);
2029
2030 sc->xl_cdata.xl_rx_head = pos;
2031
2032 return(EAGAIN);
2033}
2034
2035/*
2036 * A frame has been uploaded: pass the resulting mbuf chain up to
2037 * the higher level protocols.
2038 */
2039static void
2040xl_rxeof(sc)
2041 struct xl_softc *sc;
2042{
2043 struct mbuf *m;
2044 struct ifnet *ifp;
2045 struct xl_chain_onefrag *cur_rx;
2046 int total_len = 0;
2047 u_int32_t rxstat;
2048
2049 XL_LOCK_ASSERT(sc);
2050
2051 ifp = &sc->arpcom.ac_if;
2052
2053again:
2054
2055 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
2056 BUS_DMASYNC_POSTREAD);
2057 while((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
2058 cur_rx = sc->xl_cdata.xl_rx_head;
2059 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
2060 total_len = rxstat & XL_RXSTAT_LENMASK;
2061
2062 /*
2063 * Since we have told the chip to allow large frames,
2064 * we need to trap giant frame errors in software. We allow
2065 * a little more than the normal frame size to account for
2066 * frames with VLAN tags.
2067 */
2068 if (total_len > XL_MAX_FRAMELEN)
2069 rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
2070
2071 /*
2072 * If an error occurs, update stats, clear the
2073 * status word and leave the mbuf cluster in place:
2074 * it should simply get re-used next time this descriptor
2075 * comes up in the ring.
2076 */
2077 if (rxstat & XL_RXSTAT_UP_ERROR) {
2078 ifp->if_ierrors++;
2079 cur_rx->xl_ptr->xl_status = 0;
2080 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2081 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2082 continue;
2083 }
2084
2085 /*
2086 * If the error bit was not set, the upload complete
2087 * bit should be set which means we have a valid packet.
2088 * If not, something truly strange has happened.
2089 */
2090 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
2091 printf("xl%d: bad receive status -- "
2092 "packet dropped\n", sc->xl_unit);
2093 ifp->if_ierrors++;
2094 cur_rx->xl_ptr->xl_status = 0;
2095 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2096 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2097 continue;
2098 }
2099
2100 /* No errors; receive the packet. */
2101 bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
2102 BUS_DMASYNC_POSTREAD);
2103 m = cur_rx->xl_mbuf;
2104
2105 /*
2106 * Try to conjure up a new mbuf cluster. If that
2107 * fails, it means we have an out of memory condition and
2108 * should leave the buffer in place and continue. This will
2109 * result in a lost packet, but there's little else we
2110 * can do in this situation.
2111 */
2112 if (xl_newbuf(sc, cur_rx)) {
2113 ifp->if_ierrors++;
2114 cur_rx->xl_ptr->xl_status = 0;
2115 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2116 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2117 continue;
2118 }
2119 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2120 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2121
2122 ifp->if_ipackets++;
2123 m->m_pkthdr.rcvif = ifp;
2124 m->m_pkthdr.len = m->m_len = total_len;
2125
2126 if (ifp->if_capenable & IFCAP_RXCSUM) {
2127 /* Do IP checksum checking. */
2128 if (rxstat & XL_RXSTAT_IPCKOK)
2129 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2130 if (!(rxstat & XL_RXSTAT_IPCKERR))
2131 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2132 if ((rxstat & XL_RXSTAT_TCPCOK &&
2133 !(rxstat & XL_RXSTAT_TCPCKERR)) ||
2134 (rxstat & XL_RXSTAT_UDPCKOK &&
2135 !(rxstat & XL_RXSTAT_UDPCKERR))) {
2136 m->m_pkthdr.csum_flags |=
2137 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2138 m->m_pkthdr.csum_data = 0xffff;
2139 }
2140 }
2141
2142 XL_UNLOCK(sc);
2143 (*ifp->if_input)(ifp, m);
2144 XL_LOCK(sc);
2145 }
2146
2147 /*
2148 * Handle the 'end of channel' condition. When the upload
2149 * engine hits the end of the RX ring, it will stall. This
2150 * is our cue to flush the RX ring, reload the uplist pointer
2151 * register and unstall the engine.
2152 * XXX This is actually a little goofy. With the ThunderLAN
2153 * chip, you get an interrupt when the receiver hits the end
2154 * of the receive ring, which tells you exactly when you
2155 * you need to reload the ring pointer. Here we have to
2156 * fake it. I'm mad at myself for not being clever enough
2157 * to avoid the use of a goto here.
2158 */
2159 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
2160 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
2161 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2162 xl_wait(sc);
2163 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2164 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
2165 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2166 goto again;
2167 }
2168
2169 return;
2170}
2171
2172/*
2173 * A frame was downloaded to the chip. It's safe for us to clean up
2174 * the list buffers.
2175 */
2176static void
2177xl_txeof(sc)
2178 struct xl_softc *sc;
2179{
2180 struct xl_chain *cur_tx;
2181 struct ifnet *ifp;
2182
2183 ifp = &sc->arpcom.ac_if;
2184
2185 /* Clear the timeout timer. */
2186 ifp->if_timer = 0;
2187
2188 /*
2189 * Go through our tx list and free mbufs for those
2190 * frames that have been uploaded. Note: the 3c905B
2191 * sets a special bit in the status word to let us
2192 * know that a frame has been downloaded, but the
2193 * original 3c900/3c905 adapters don't do that.
2194 * Consequently, we have to use a different test if
2195 * xl_type != XL_TYPE_905B.
2196 */
2197 while(sc->xl_cdata.xl_tx_head != NULL) {
2198 cur_tx = sc->xl_cdata.xl_tx_head;
2199
2200 if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
2201 break;
2202
2203 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
2204 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2205 BUS_DMASYNC_POSTWRITE);
2206 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2207 m_freem(cur_tx->xl_mbuf);
2208 cur_tx->xl_mbuf = NULL;
2209 ifp->if_opackets++;
2210
2211 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
2212 sc->xl_cdata.xl_tx_free = cur_tx;
2213 }
2214
2215 if (sc->xl_cdata.xl_tx_head == NULL) {
2216 ifp->if_flags &= ~IFF_OACTIVE;
2217 sc->xl_cdata.xl_tx_tail = NULL;
2218 } else {
2219 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
2220 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2221 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2222 sc->xl_cdata.xl_tx_head->xl_phys);
2223 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2224 }
2225 }
2226
2227 return;
2228}
2229
2230static void
2231xl_txeof_90xB(sc)
2232 struct xl_softc *sc;
2233{
2234 struct xl_chain *cur_tx = NULL;
2235 struct ifnet *ifp;
2236 int idx;
2237
2238 ifp = &sc->arpcom.ac_if;
2239
2240 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2241 BUS_DMASYNC_POSTREAD);
2242 idx = sc->xl_cdata.xl_tx_cons;
2243 while(idx != sc->xl_cdata.xl_tx_prod) {
2244
2245 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2246
2247 if (!(le32toh(cur_tx->xl_ptr->xl_status) &
2248 XL_TXSTAT_DL_COMPLETE))
2249 break;
2250
2251 if (cur_tx->xl_mbuf != NULL) {
2252 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2253 BUS_DMASYNC_POSTWRITE);
2254 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2255 m_freem(cur_tx->xl_mbuf);
2256 cur_tx->xl_mbuf = NULL;
2257 }
2258
2259 ifp->if_opackets++;
2260
2261 sc->xl_cdata.xl_tx_cnt--;
2262 XL_INC(idx, XL_TX_LIST_CNT);
2263 ifp->if_timer = 0;
2264 }
2265
2266 sc->xl_cdata.xl_tx_cons = idx;
2267
2268 if (cur_tx != NULL)
2269 ifp->if_flags &= ~IFF_OACTIVE;
2270
2271 return;
2272}
2273
2274/*
2275 * TX 'end of channel' interrupt handler. Actually, we should
2276 * only get a 'TX complete' interrupt if there's a transmit error,
2277 * so this is really TX error handler.
2278 */
2279static void
2280xl_txeoc(sc)
2281 struct xl_softc *sc;
2282{
2283 u_int8_t txstat;
2284
2285 while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
2286 if (txstat & XL_TXSTATUS_UNDERRUN ||
2287 txstat & XL_TXSTATUS_JABBER ||
2288 txstat & XL_TXSTATUS_RECLAIM) {
2289 printf("xl%d: transmission error: %x\n",
2290 sc->xl_unit, txstat);
2291 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2292 xl_wait(sc);
2293 if (sc->xl_type == XL_TYPE_905B) {
2294 if (sc->xl_cdata.xl_tx_cnt) {
2295 int i;
2296 struct xl_chain *c;
2297 i = sc->xl_cdata.xl_tx_cons;
2298 c = &sc->xl_cdata.xl_tx_chain[i];
2299 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2300 c->xl_phys);
2301 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2302 }
2303 } else {
2304 if (sc->xl_cdata.xl_tx_head != NULL)
2305 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2306 sc->xl_cdata.xl_tx_head->xl_phys);
2307 }
2308 /*
2309 * Remember to set this for the
2310 * first generation 3c90X chips.
2311 */
2312 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2313 if (txstat & XL_TXSTATUS_UNDERRUN &&
2314 sc->xl_tx_thresh < XL_PACKET_SIZE) {
2315 sc->xl_tx_thresh += XL_MIN_FRAMELEN;
2316 printf("xl%d: tx underrun, increasing tx start"
2317 " threshold to %d bytes\n", sc->xl_unit,
2318 sc->xl_tx_thresh);
2319 }
2320 CSR_WRITE_2(sc, XL_COMMAND,
2321 XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2322 if (sc->xl_type == XL_TYPE_905B) {
2323 CSR_WRITE_2(sc, XL_COMMAND,
2324 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2325 }
2326 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2327 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2328 } else {
2329 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2330 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2331 }
2332 /*
2333 * Write an arbitrary byte to the TX_STATUS register
2334 * to clear this interrupt/error and advance to the next.
2335 */
2336 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
2337 }
2338
2339 return;
2340}
2341
2342static void
2343xl_intr(arg)
2344 void *arg;
2345{
2346 struct xl_softc *sc;
2347 struct ifnet *ifp;
2348 u_int16_t status;
2349
2350 sc = arg;
2351 XL_LOCK(sc);
2352 ifp = &sc->arpcom.ac_if;
2353
2354 while((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
2355
2356 CSR_WRITE_2(sc, XL_COMMAND,
2357 XL_CMD_INTR_ACK|(status & XL_INTRS));
2358
2359 if (status & XL_STAT_UP_COMPLETE) {
2360 int curpkts;
2361
2362 curpkts = ifp->if_ipackets;
2363 xl_rxeof(sc);
2364 if (curpkts == ifp->if_ipackets) {
2365 while (xl_rx_resync(sc))
2366 xl_rxeof(sc);
2367 }
2368 }
2369
2370 if (status & XL_STAT_DOWN_COMPLETE) {
2371 if (sc->xl_type == XL_TYPE_905B)
2372 xl_txeof_90xB(sc);
2373 else
2374 xl_txeof(sc);
2375 }
2376
2377 if (status & XL_STAT_TX_COMPLETE) {
2378 ifp->if_oerrors++;
2379 xl_txeoc(sc);
2380 }
2381
2382 if (status & XL_STAT_ADFAIL) {
2383 xl_reset(sc);
2384 xl_init(sc);
2385 }
2386
2387 if (status & XL_STAT_STATSOFLOW) {
2388 sc->xl_stats_no_timeout = 1;
2389 xl_stats_update(sc);
2390 sc->xl_stats_no_timeout = 0;
2391 }
2392 }
2393
2394 if (ifp->if_snd.ifq_head != NULL)
2395 (*ifp->if_start)(ifp);
2396
2397 XL_UNLOCK(sc);
2398
2399 return;
2400}
2401
2402static void
2403xl_stats_update(xsc)
2404 void *xsc;
2405{
2406 struct xl_softc *sc;
2407 struct ifnet *ifp;
2408 struct xl_stats xl_stats;
2409 u_int8_t *p;
2410 int i;
2411 struct mii_data *mii = NULL;
2412
2413 bzero((char *)&xl_stats, sizeof(struct xl_stats));
2414
2415 sc = xsc;
2416 ifp = &sc->arpcom.ac_if;
2417 if (sc->xl_miibus != NULL)
2418 mii = device_get_softc(sc->xl_miibus);
2419
2420 p = (u_int8_t *)&xl_stats;
2421
2422 /* Read all the stats registers. */
2423 XL_SEL_WIN(6);
2424
2425 for (i = 0; i < 16; i++)
2426 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
2427
2428 ifp->if_ierrors += xl_stats.xl_rx_overrun;
2429
2430 ifp->if_collisions += xl_stats.xl_tx_multi_collision +
2431 xl_stats.xl_tx_single_collision +
2432 xl_stats.xl_tx_late_collision;
2433
2434 /*
2435 * Boomerang and cyclone chips have an extra stats counter
2436 * in window 4 (BadSSD). We have to read this too in order
2437 * to clear out all the stats registers and avoid a statsoflow
2438 * interrupt.
2439 */
2440 XL_SEL_WIN(4);
2441 CSR_READ_1(sc, XL_W4_BADSSD);
2442
2443 if ((mii != NULL) && (!sc->xl_stats_no_timeout))
2444 mii_tick(mii);
2445
2446 XL_SEL_WIN(7);
2447
2448 if (!sc->xl_stats_no_timeout)
2449 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2450
2451 return;
2452}
2453
2454/*
2455 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2456 * pointers to the fragment pointers.
2457 */
2458static int
2459xl_encap(sc, c, m_head)
2460 struct xl_softc *sc;
2461 struct xl_chain *c;
2462 struct mbuf *m_head;
2463{
2464 int error;
2465 u_int32_t status;
2466 struct ifnet *ifp;
2467
2468 ifp = &sc->arpcom.ac_if;
2469
2470 /*
2471 * Start packing the mbufs in this chain into
2472 * the fragment pointers. Stop when we run out
2473 * of fragments or hit the end of the mbuf chain.
2474 */
2475 error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map, m_head,
2476 xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
2477
2478 if (error && error != EFBIG) {
2479 m_freem(m_head);
2480 printf("xl%d: can't map mbuf (error %d)\n", sc->xl_unit, error);
2481 return(1);
2482 }
2483
2484 /*
2485 * Handle special case: we used up all 63 fragments,
2486 * but we have more mbufs left in the chain. Copy the
2487 * data into an mbuf cluster. Note that we don't
2488 * bother clearing the values in the other fragment
2489 * pointers/counters; it wouldn't gain us anything,
2490 * and would waste cycles.
2491 */
2492 if (error) {
2493 struct mbuf *m_new;
2494
2495 m_new = m_defrag(m_head, M_DONTWAIT);
2496 if (m_new == NULL) {
2497 m_freem(m_head);
2498 return(1);
2499 } else {
2500 m_head = m_new;
2501 }
2502
2503 error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map,
2504 m_head, xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
2505 if (error) {
2506 m_freem(m_head);
2507 printf("xl%d: can't map mbuf (error %d)\n",
2508 sc->xl_unit, error);
2509 return(1);
2510 }
2511 }
2512
2513 if (sc->xl_type == XL_TYPE_905B) {
2514 status = XL_TXSTAT_RND_DEFEAT;
2515
2516 if (m_head->m_pkthdr.csum_flags) {
2517 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2518 status |= XL_TXSTAT_IPCKSUM;
2519 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
2520 status |= XL_TXSTAT_TCPCKSUM;
2521 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
2522 status |= XL_TXSTAT_UDPCKSUM;
2523 }
2524 c->xl_ptr->xl_status = htole32(status);
2525 }
2526
2527 c->xl_mbuf = m_head;
2528 bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
2529 return(0);
2530}
2531
2532/*
2533 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2534 * to the mbuf data regions directly in the transmit lists. We also save a
2535 * copy of the pointers since the transmit list fragment pointers are
2536 * physical addresses.
2537 */
2538static void
2539xl_start(ifp)
2540 struct ifnet *ifp;
2541{
2542 struct xl_softc *sc;
2543 struct mbuf *m_head = NULL;
2544 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2545 struct xl_chain *prev_tx;
2546 u_int32_t status;
2547 int error;
2548
2549 sc = ifp->if_softc;
2550 XL_LOCK(sc);
2551 /*
2552 * Check for an available queue slot. If there are none,
2553 * punt.
2554 */
2555 if (sc->xl_cdata.xl_tx_free == NULL) {
2556 xl_txeoc(sc);
2557 xl_txeof(sc);
2558 if (sc->xl_cdata.xl_tx_free == NULL) {
2559 ifp->if_flags |= IFF_OACTIVE;
2560 XL_UNLOCK(sc);
2561 return;
2562 }
2563 }
2564
2565 start_tx = sc->xl_cdata.xl_tx_free;
2566
2567 while(sc->xl_cdata.xl_tx_free != NULL) {
2568 IF_DEQUEUE(&ifp->if_snd, m_head);
2569 if (m_head == NULL)
2570 break;
2571
2572 /* Pick a descriptor off the free list. */
2573 prev_tx = cur_tx;
2574 cur_tx = sc->xl_cdata.xl_tx_free;
2575
2576 /* Pack the data into the descriptor. */
2577 error = xl_encap(sc, cur_tx, m_head);
2578 if (error) {
2579 cur_tx = prev_tx;
2580 continue;
2581 }
2582
2583 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
2584 cur_tx->xl_next = NULL;
2585
2586 /* Chain it together. */
2587 if (prev != NULL) {
2588 prev->xl_next = cur_tx;
2589 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2590 }
2591 prev = cur_tx;
2592
2593 /*
2594 * If there's a BPF listener, bounce a copy of this frame
2595 * to him.
2596 */
2597 BPF_MTAP(ifp, cur_tx->xl_mbuf);
2598 }
2599
2600 /*
2601 * If there are no packets queued, bail.
2602 */
2603 if (cur_tx == NULL) {
2604 XL_UNLOCK(sc);
2605 return;
2606 }
2607
2608 /*
2609 * Place the request for the upload interrupt
2610 * in the last descriptor in the chain. This way, if
2611 * we're chaining several packets at once, we'll only
2612 * get an interupt once for the whole chain rather than
2613 * once for each packet.
2614 */
2615 cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2616 XL_TXSTAT_DL_INTR);
2617 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2618 BUS_DMASYNC_PREWRITE);
2619
2620 /*
2621 * Queue the packets. If the TX channel is clear, update
2622 * the downlist pointer register.
2623 */
2624 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2625 xl_wait(sc);
2626
2627 if (sc->xl_cdata.xl_tx_head != NULL) {
2628 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
2629 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
2630 htole32(start_tx->xl_phys);
2631 status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status;
2632 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status =
2633 htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR);
2634 sc->xl_cdata.xl_tx_tail = cur_tx;
2635 } else {
2636 sc->xl_cdata.xl_tx_head = start_tx;
2637 sc->xl_cdata.xl_tx_tail = cur_tx;
2638 }
2639 if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
2640 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
2641
2642 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2643
2644 XL_SEL_WIN(7);
2645
2646 /*
2647 * Set a timeout in case the chip goes out to lunch.
2648 */
2649 ifp->if_timer = 5;
2650
2651 /*
2652 * XXX Under certain conditions, usually on slower machines
2653 * where interrupts may be dropped, it's possible for the
2654 * adapter to chew up all the buffers in the receive ring
2655 * and stall, without us being able to do anything about it.
2656 * To guard against this, we need to make a pass over the
2657 * RX queue to make sure there aren't any packets pending.
2658 * Doing it here means we can flush the receive ring at the
2659 * same time the chip is DMAing the transmit descriptors we
2660 * just gave it.
2661 *
2662 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
2663 * nature of their chips in all their marketing literature;
2664 * we may as well take advantage of it. :)
2665 */
2666 xl_rxeof(sc);
2667
2668 XL_UNLOCK(sc);
2669
2670 return;
2671}
2672
2673static void
2674xl_start_90xB(ifp)
2675 struct ifnet *ifp;
2676{
2677 struct xl_softc *sc;
2678 struct mbuf *m_head = NULL;
2679 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2680 struct xl_chain *prev_tx;
2681 int error, idx;
2682
2683 sc = ifp->if_softc;
2684 XL_LOCK(sc);
2685
2686 if (ifp->if_flags & IFF_OACTIVE) {
2687 XL_UNLOCK(sc);
2688 return;
2689 }
2690
2691 idx = sc->xl_cdata.xl_tx_prod;
2692 start_tx = &sc->xl_cdata.xl_tx_chain[idx];
2693
2694 while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
2695
2696 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
2697 ifp->if_flags |= IFF_OACTIVE;
2698 break;
2699 }
2700
2701 IF_DEQUEUE(&ifp->if_snd, m_head);
2702 if (m_head == NULL)
2703 break;
2704
2705 prev_tx = cur_tx;
2706 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2707
2708 /* Pack the data into the descriptor. */
2709 error = xl_encap(sc, cur_tx, m_head);
2710 if (error) {
2711 cur_tx = prev_tx;
2712 continue;
2713 }
2714
2715 /* Chain it together. */
2716 if (prev != NULL)
2717 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2718 prev = cur_tx;
2719
2720 /*
2721 * If there's a BPF listener, bounce a copy of this frame
2722 * to him.
2723 */
2724 BPF_MTAP(ifp, cur_tx->xl_mbuf);
2725
2726 XL_INC(idx, XL_TX_LIST_CNT);
2727 sc->xl_cdata.xl_tx_cnt++;
2728 }
2729
2730 /*
2731 * If there are no packets queued, bail.
2732 */
2733 if (cur_tx == NULL) {
2734 XL_UNLOCK(sc);
2735 return;
2736 }
2737
2738 /*
2739 * Place the request for the upload interrupt
2740 * in the last descriptor in the chain. This way, if
2741 * we're chaining several packets at once, we'll only
2742 * get an interupt once for the whole chain rather than
2743 * once for each packet.
2744 */
2745 cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2746 XL_TXSTAT_DL_INTR);
2747 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2748 BUS_DMASYNC_PREWRITE);
2749
2750 /* Start transmission */
2751 sc->xl_cdata.xl_tx_prod = idx;
2752 start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
2753
2754 /*
2755 * Set a timeout in case the chip goes out to lunch.
2756 */
2757 ifp->if_timer = 5;
2758
2759 XL_UNLOCK(sc);
2760
2761 return;
2762}
2763
2764static void
2765xl_init(xsc)
2766 void *xsc;
2767{
2768 struct xl_softc *sc = xsc;
2769 struct ifnet *ifp = &sc->arpcom.ac_if;
2770 int error, i;
2771 u_int16_t rxfilt = 0;
2772 struct mii_data *mii = NULL;
2773
2774 XL_LOCK(sc);
2775
2776 /*
2777 * Cancel pending I/O and free all RX/TX buffers.
2778 */
2779 xl_stop(sc);
2780
2781 if (sc->xl_miibus == NULL) {
2782 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2783 xl_wait(sc);
2784 }
2785 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2786 xl_wait(sc);
2787 DELAY(10000);
2788
2789 if (sc->xl_miibus != NULL)
2790 mii = device_get_softc(sc->xl_miibus);
2791
2792 /* Init our MAC address */
2793 XL_SEL_WIN(2);
2794 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2795 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
2796 sc->arpcom.ac_enaddr[i]);
2797 }
2798
2799 /* Clear the station mask. */
2800 for (i = 0; i < 3; i++)
2801 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
2802#ifdef notdef
2803 /* Reset TX and RX. */
2804 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2805 xl_wait(sc);
2806 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2807 xl_wait(sc);
2808#endif
2809 /* Init circular RX list. */
2810 error = xl_list_rx_init(sc);
2811 if (error) {
2812 printf("xl%d: initialization of the rx ring failed (%d)\n",
2813 sc->xl_unit, error);
2814 xl_stop(sc);
2815 XL_UNLOCK(sc);
2816 return;
2817 }
2818
2819 /* Init TX descriptors. */
2820 if (sc->xl_type == XL_TYPE_905B)
2821 error = xl_list_tx_init_90xB(sc);
2822 else
2823 error = xl_list_tx_init(sc);
2824 if (error) {
2825 printf("xl%d: initialization of the tx ring failed (%d)\n",
2826 sc->xl_unit, error);
2827 xl_stop(sc);
2828 XL_UNLOCK(sc);
2829 }
2830
2831 /*
2832 * Set the TX freethresh value.
2833 * Note that this has no effect on 3c905B "cyclone"
2834 * cards but is required for 3c900/3c905 "boomerang"
2835 * cards in order to enable the download engine.
2836 */
2837 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2838
2839 /* Set the TX start threshold for best performance. */
2840 sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2841 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2842
2843 /*
2844 * If this is a 3c905B, also set the tx reclaim threshold.
2845 * This helps cut down on the number of tx reclaim errors
2846 * that could happen on a busy network. The chip multiplies
2847 * the register value by 16 to obtain the actual threshold
2848 * in bytes, so we divide by 16 when setting the value here.
2849 * The existing threshold value can be examined by reading
2850 * the register at offset 9 in window 5.
2851 */
2852 if (sc->xl_type == XL_TYPE_905B) {
2853 CSR_WRITE_2(sc, XL_COMMAND,
2854 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2855 }
2856
2857 /* Set RX filter bits. */
2858 XL_SEL_WIN(5);
2859 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2860
2861 /* Set the individual bit to receive frames for this host only. */
2862 rxfilt |= XL_RXFILTER_INDIVIDUAL;
2863
2864 /* If we want promiscuous mode, set the allframes bit. */
2865 if (ifp->if_flags & IFF_PROMISC) {
2866 rxfilt |= XL_RXFILTER_ALLFRAMES;
2867 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2868 } else {
2869 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2870 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2871 }
2872
2873 /*
2874 * Set capture broadcast bit to capture broadcast frames.
2875 */
2876 if (ifp->if_flags & IFF_BROADCAST) {
2877 rxfilt |= XL_RXFILTER_BROADCAST;
2878 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2879 } else {
2880 rxfilt &= ~XL_RXFILTER_BROADCAST;
2881 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2882 }
2883
2884 /*
2885 * Program the multicast filter, if necessary.
2886 */
2887 if (sc->xl_type == XL_TYPE_905B)
2888 xl_setmulti_hash(sc);
2889 else
2890 xl_setmulti(sc);
2891
2892 /*
2893 * Load the address of the RX list. We have to
2894 * stall the upload engine before we can manipulate
2895 * the uplist pointer register, then unstall it when
2896 * we're finished. We also have to wait for the
2897 * stall command to complete before proceeding.
2898 * Note that we have to do this after any RX resets
2899 * have completed since the uplist register is cleared
2900 * by a reset.
2901 */
2902 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2903 xl_wait(sc);
2904 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2905 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2906 xl_wait(sc);
2907
2908
2909 if (sc->xl_type == XL_TYPE_905B) {
2910 /* Set polling interval */
2911 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2912 /* Load the address of the TX list */
2913 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2914 xl_wait(sc);
2915 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2916 sc->xl_cdata.xl_tx_chain[0].xl_phys);
2917 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2918 xl_wait(sc);
2919 }
2920
2921 /*
2922 * If the coax transceiver is on, make sure to enable
2923 * the DC-DC converter.
2924 */
2925 XL_SEL_WIN(3);
2926 if (sc->xl_xcvr == XL_XCVR_COAX)
2927 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2928 else
2929 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2930
2931 /*
2932 * increase packet size to allow reception of 802.1q or ISL packets.
2933 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
2934 * control register. For 3c90xB/C chips, use the RX packet size
2935 * register.
2936 */
2937
2938 if (sc->xl_type == XL_TYPE_905B)
2939 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
2940 else {
2941 u_int8_t macctl;
2942 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
2943 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
2944 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
2945 }
2946
2947 /* Clear out the stats counters. */
2948 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2949 sc->xl_stats_no_timeout = 1;
2950 xl_stats_update(sc);
2951 sc->xl_stats_no_timeout = 0;
2952 XL_SEL_WIN(4);
2953 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2954 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2955
2956 /*
2957 * Enable interrupts.
2958 */
2959 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2960 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2961 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2962 if (sc->xl_flags & XL_FLAG_FUNCREG)
2963 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
2964
2965 /* Set the RX early threshold */
2966 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2967 CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2968
2969 /* Enable receiver and transmitter. */
2970 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2971 xl_wait(sc);
2972 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2973 xl_wait(sc);
2974
2975 if (mii != NULL)
2976 mii_mediachg(mii);
2977
2978 /* Select window 7 for normal operations. */
2979 XL_SEL_WIN(7);
2980
2981 ifp->if_flags |= IFF_RUNNING;
2982 ifp->if_flags &= ~IFF_OACTIVE;
2983
2984 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2985
2986 XL_UNLOCK(sc);
2987
2988 return;
2989}
2990
2991/*
2992 * Set media options.
2993 */
2994static int
2995xl_ifmedia_upd(ifp)
2996 struct ifnet *ifp;
2997{
2998 struct xl_softc *sc;
2999 struct ifmedia *ifm = NULL;
3000 struct mii_data *mii = NULL;
3001
3002 sc = ifp->if_softc;
3003 if (sc->xl_miibus != NULL)
3004 mii = device_get_softc(sc->xl_miibus);
3005 if (mii == NULL)
3006 ifm = &sc->ifmedia;
3007 else
3008 ifm = &mii->mii_media;
3009
3010 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3011 case IFM_100_FX:
3012 case IFM_10_FL:
3013 case IFM_10_2:
3014 case IFM_10_5:
3015 xl_setmode(sc, ifm->ifm_media);
3016 return(0);
3017 break;
3018 default:
3019 break;
3020 }
3021
3022 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
3023 || sc->xl_media & XL_MEDIAOPT_BT4) {
3024 xl_init(sc);
3025 } else {
3026 xl_setmode(sc, ifm->ifm_media);
3027 }
3028
3029 return(0);
3030}
3031
3032/*
3033 * Report current media status.
3034 */
3035static void
3036xl_ifmedia_sts(ifp, ifmr)
3037 struct ifnet *ifp;
3038 struct ifmediareq *ifmr;
3039{
3040 struct xl_softc *sc;
3041 u_int32_t icfg;
3042 u_int16_t status = 0;
3043 struct mii_data *mii = NULL;
3044
3045 sc = ifp->if_softc;
3046 if (sc->xl_miibus != NULL)
3047 mii = device_get_softc(sc->xl_miibus);
3048
3049 XL_SEL_WIN(4);
3050 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
3051
3052 XL_SEL_WIN(3);
3053 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
3054 icfg >>= XL_ICFG_CONNECTOR_BITS;
3055
3056 ifmr->ifm_active = IFM_ETHER;
3057 ifmr->ifm_status = IFM_AVALID;
3058
3059 if ((status & XL_MEDIASTAT_CARRIER) == 0)
3060 ifmr->ifm_status |= IFM_ACTIVE;
3061
3062 switch(icfg) {
3063 case XL_XCVR_10BT:
3064 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
3065 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3066 ifmr->ifm_active |= IFM_FDX;
3067 else
3068 ifmr->ifm_active |= IFM_HDX;
3069 break;
3070 case XL_XCVR_AUI:
3071 if (sc->xl_type == XL_TYPE_905B &&
3072 sc->xl_media == XL_MEDIAOPT_10FL) {
3073 ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
3074 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3075 ifmr->ifm_active |= IFM_FDX;
3076 else
3077 ifmr->ifm_active |= IFM_HDX;
3078 } else
3079 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
3080 break;
3081 case XL_XCVR_COAX:
3082 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
3083 break;
3084 /*
3085 * XXX MII and BTX/AUTO should be separate cases.
3086 */
3087
3088 case XL_XCVR_100BTX:
3089 case XL_XCVR_AUTO:
3090 case XL_XCVR_MII:
3091 if (mii != NULL) {
3092 mii_pollstat(mii);
3093 ifmr->ifm_active = mii->mii_media_active;
3094 ifmr->ifm_status = mii->mii_media_status;
3095 }
3096 break;
3097 case XL_XCVR_100BFX:
3098 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
3099 break;
3100 default:
3101 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, icfg);
3102 break;
3103 }
3104
3105 return;
3106}
3107
3108static int
3109xl_ioctl(ifp, command, data)
3110 struct ifnet *ifp;
3111 u_long command;
3112 caddr_t data;
3113{
3114 struct xl_softc *sc = ifp->if_softc;
3115 struct ifreq *ifr = (struct ifreq *) data;
3116 int error = 0;
3117 struct mii_data *mii = NULL;
3118 u_int8_t rxfilt;
3119
3120 XL_LOCK(sc);
3121
3122 switch(command) {
3123 case SIOCSIFFLAGS:
3124 XL_SEL_WIN(5);
3125 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
3126 if (ifp->if_flags & IFF_UP) {
3127 if (ifp->if_flags & IFF_RUNNING &&
3128 ifp->if_flags & IFF_PROMISC &&
3129 !(sc->xl_if_flags & IFF_PROMISC)) {
3130 rxfilt |= XL_RXFILTER_ALLFRAMES;
3131 CSR_WRITE_2(sc, XL_COMMAND,
3132 XL_CMD_RX_SET_FILT|rxfilt);
3133 XL_SEL_WIN(7);
3134 } else if (ifp->if_flags & IFF_RUNNING &&
3135 !(ifp->if_flags & IFF_PROMISC) &&
3136 sc->xl_if_flags & IFF_PROMISC) {
3137 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
3138 CSR_WRITE_2(sc, XL_COMMAND,
3139 XL_CMD_RX_SET_FILT|rxfilt);
3140 XL_SEL_WIN(7);
3141 } else
3142 xl_init(sc);
3143 } else {
3144 if (ifp->if_flags & IFF_RUNNING)
3145 xl_stop(sc);
3146 }
3147 sc->xl_if_flags = ifp->if_flags;
3148 error = 0;
3149 break;
3150 case SIOCADDMULTI:
3151 case SIOCDELMULTI:
3152 if (sc->xl_type == XL_TYPE_905B)
3153 xl_setmulti_hash(sc);
3154 else
3155 xl_setmulti(sc);
3156 error = 0;
3157 break;
3158 case SIOCGIFMEDIA:
3159 case SIOCSIFMEDIA:
3160 if (sc->xl_miibus != NULL)
3161 mii = device_get_softc(sc->xl_miibus);
3162 if (mii == NULL)
3163 error = ifmedia_ioctl(ifp, ifr,
3164 &sc->ifmedia, command);
3165 else
3166 error = ifmedia_ioctl(ifp, ifr,
3167 &mii->mii_media, command);
3168 break;
3169 case SIOCSIFCAP:
3170 ifp->if_capenable = ifr->ifr_reqcap;
3171 if (ifp->if_capenable & IFCAP_TXCSUM)
3172 ifp->if_hwassist = XL905B_CSUM_FEATURES;
3173 else
3174 ifp->if_hwassist = 0;
3175 break;
3176 default:
3177 error = ether_ioctl(ifp, command, data);
3178 break;
3179 }
3180
3181 XL_UNLOCK(sc);
3182
3183 return(error);
3184}
3185
3186static void
3187xl_watchdog(ifp)
3188 struct ifnet *ifp;
3189{
3190 struct xl_softc *sc;
3191 u_int16_t status = 0;
3192
3193 sc = ifp->if_softc;
3194
3195 XL_LOCK(sc);
3196
3197 ifp->if_oerrors++;
3198 XL_SEL_WIN(4);
3199 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
3200 printf("xl%d: watchdog timeout\n", sc->xl_unit);
3201
3202 if (status & XL_MEDIASTAT_CARRIER)
3203 printf("xl%d: no carrier - transceiver cable problem?\n",
3204 sc->xl_unit);
3205 xl_txeoc(sc);
3206 xl_txeof(sc);
3207 xl_rxeof(sc);
3208 xl_reset(sc);
3209 xl_init(sc);
3210
3211 if (ifp->if_snd.ifq_head != NULL)
3212 (*ifp->if_start)(ifp);
3213
3214 XL_UNLOCK(sc);
3215
3216 return;
3217}
3218
3219/*
3220 * Stop the adapter and free any mbufs allocated to the
3221 * RX and TX lists.
3222 */
3223static void
3224xl_stop(sc)
3225 struct xl_softc *sc;
3226{
3227 register int i;
3228 struct ifnet *ifp;
3229
3230 XL_LOCK(sc);
3231
3232 ifp = &sc->arpcom.ac_if;
3233 ifp->if_timer = 0;
3234
3235 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
3236 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
3237 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
3238 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
3239 xl_wait(sc);
3240 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
3241 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
3242 DELAY(800);
3243
3244#ifdef foo
3245 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
3246 xl_wait(sc);
3247 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
3248 xl_wait(sc);
3249#endif
3250
3251 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
3252 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
3253 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
3254 if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4 (sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
3255
3256 /* Stop the stats updater. */
3257 untimeout(xl_stats_update, sc, sc->xl_stat_ch);
3258
3259 /*
3260 * Free data in the RX lists.
3261 */
3262 for (i = 0; i < XL_RX_LIST_CNT; i++) {
3263 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
3264 bus_dmamap_unload(sc->xl_mtag,
3265 sc->xl_cdata.xl_rx_chain[i].xl_map);
3266 bus_dmamap_destroy(sc->xl_mtag,
3267 sc->xl_cdata.xl_rx_chain[i].xl_map);
3268 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
3269 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
3270 }
3271 }
3272 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
3273 /*
3274 * Free the TX list buffers.
3275 */
3276 for (i = 0; i < XL_TX_LIST_CNT; i++) {
3277 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
3278 bus_dmamap_unload(sc->xl_mtag,
3279 sc->xl_cdata.xl_tx_chain[i].xl_map);
3280 bus_dmamap_destroy(sc->xl_mtag,
3281 sc->xl_cdata.xl_tx_chain[i].xl_map);
3282 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
3283 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
3284 }
3285 }
3286 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
3287
3288 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3289
3290 XL_UNLOCK(sc);
3291
3292 return;
3293}
3294
3295/*
3296 * Stop all chip I/O so that the kernel's probe routines don't
3297 * get confused by errant DMAs when rebooting.
3298 */
3299static void
3300xl_shutdown(dev)
3301 device_t dev;
3302{
3303 struct xl_softc *sc;
3304
3305 sc = device_get_softc(dev);
3306
3307 XL_LOCK(sc);
3308 xl_reset(sc);
3309 xl_stop(sc);
3310 XL_UNLOCK(sc);
3311
3312 return;
3313}
3314
3315static int
3316xl_suspend(dev)
3317 device_t dev;
3318{
3319 struct xl_softc *sc;
3320
3321 sc = device_get_softc(dev);
3322
3323 XL_LOCK(sc);
3324 xl_stop(sc);
3325 XL_UNLOCK(sc);
3326
3327 return(0);
3328}
3329
3330static int
3331xl_resume(dev)
3332 device_t dev;
3333{
3334 struct xl_softc *sc;
3335 struct ifnet *ifp;
3336
3337 sc = device_get_softc(dev);
3338 XL_LOCK(sc);
3339 ifp = &sc->arpcom.ac_if;
3340
3341 xl_reset(sc);
3342 if (ifp->if_flags & IFF_UP)
3343 xl_init(sc);
3344
3345 XL_UNLOCK(sc);
3346 return(0);
3347}
296DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0);
297DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
298
299static void
300xl_dma_map_addr(arg, segs, nseg, error)
301 void *arg;
302 bus_dma_segment_t *segs;
303 int nseg, error;
304{
305 u_int32_t *paddr;
306
307 paddr = arg;
308 *paddr = segs->ds_addr;
309}
310
311static void
312xl_dma_map_rxbuf(arg, segs, nseg, mapsize, error)
313 void *arg;
314 bus_dma_segment_t *segs;
315 int nseg;
316 bus_size_t mapsize;
317 int error;
318{
319 u_int32_t *paddr;
320
321 if (error)
322 return;
323 KASSERT(nseg == 1, ("xl_dma_map_rxbuf: too many DMA segments"));
324 paddr = arg;
325 *paddr = segs->ds_addr;
326}
327
328static void
329xl_dma_map_txbuf(arg, segs, nseg, mapsize, error)
330 void *arg;
331 bus_dma_segment_t *segs;
332 int nseg;
333 bus_size_t mapsize;
334 int error;
335{
336 struct xl_list *l;
337 int i, total_len;
338
339 if (error)
340 return;
341
342 KASSERT(nseg <= XL_MAXFRAGS, ("too many DMA segments"));
343
344 total_len = 0;
345 l = arg;
346 for (i = 0; i < nseg; i++) {
347 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
348 l->xl_frag[i].xl_addr = htole32(segs[i].ds_addr);
349 l->xl_frag[i].xl_len = htole32(segs[i].ds_len);
350 total_len += segs[i].ds_len;
351 }
352 l->xl_frag[nseg - 1].xl_len = htole32(segs[nseg - 1].ds_len |
353 XL_LAST_FRAG);
354 l->xl_status = htole32(total_len);
355 l->xl_next = 0;
356}
357
358/*
359 * Murphy's law says that it's possible the chip can wedge and
360 * the 'command in progress' bit may never clear. Hence, we wait
361 * only a finite amount of time to avoid getting caught in an
362 * infinite loop. Normally this delay routine would be a macro,
363 * but it isn't called during normal operation so we can afford
364 * to make it a function.
365 */
366static void
367xl_wait(sc)
368 struct xl_softc *sc;
369{
370 register int i;
371
372 for (i = 0; i < XL_TIMEOUT; i++) {
373 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
374 break;
375 }
376
377 if (i == XL_TIMEOUT)
378 printf("xl%d: command never completed!\n", sc->xl_unit);
379
380 return;
381}
382
383/*
384 * MII access routines are provided for adapters with external
385 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
386 * autoneg logic that's faked up to look like a PHY (3c905B-TX).
387 * Note: if you don't perform the MDIO operations just right,
388 * it's possible to end up with code that works correctly with
389 * some chips/CPUs/processor speeds/bus speeds/etc but not
390 * with others.
391 */
392#define MII_SET(x) \
393 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
394 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
395
396#define MII_CLR(x) \
397 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
398 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
399
400/*
401 * Sync the PHYs by setting data bit and strobing the clock 32 times.
402 */
403static void
404xl_mii_sync(sc)
405 struct xl_softc *sc;
406{
407 register int i;
408
409 XL_SEL_WIN(4);
410 MII_SET(XL_MII_DIR|XL_MII_DATA);
411
412 for (i = 0; i < 32; i++) {
413 MII_SET(XL_MII_CLK);
414 MII_SET(XL_MII_DATA);
415 MII_SET(XL_MII_DATA);
416 MII_CLR(XL_MII_CLK);
417 MII_SET(XL_MII_DATA);
418 MII_SET(XL_MII_DATA);
419 }
420
421 return;
422}
423
424/*
425 * Clock a series of bits through the MII.
426 */
427static void
428xl_mii_send(sc, bits, cnt)
429 struct xl_softc *sc;
430 u_int32_t bits;
431 int cnt;
432{
433 int i;
434
435 XL_SEL_WIN(4);
436 MII_CLR(XL_MII_CLK);
437
438 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
439 if (bits & i) {
440 MII_SET(XL_MII_DATA);
441 } else {
442 MII_CLR(XL_MII_DATA);
443 }
444 MII_CLR(XL_MII_CLK);
445 MII_SET(XL_MII_CLK);
446 }
447}
448
449/*
450 * Read an PHY register through the MII.
451 */
452static int
453xl_mii_readreg(sc, frame)
454 struct xl_softc *sc;
455 struct xl_mii_frame *frame;
456
457{
458 int i, ack;
459
460 XL_LOCK(sc);
461
462 /*
463 * Set up frame for RX.
464 */
465 frame->mii_stdelim = XL_MII_STARTDELIM;
466 frame->mii_opcode = XL_MII_READOP;
467 frame->mii_turnaround = 0;
468 frame->mii_data = 0;
469
470 /*
471 * Select register window 4.
472 */
473
474 XL_SEL_WIN(4);
475
476 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
477 /*
478 * Turn on data xmit.
479 */
480 MII_SET(XL_MII_DIR);
481
482 xl_mii_sync(sc);
483
484 /*
485 * Send command/address info.
486 */
487 xl_mii_send(sc, frame->mii_stdelim, 2);
488 xl_mii_send(sc, frame->mii_opcode, 2);
489 xl_mii_send(sc, frame->mii_phyaddr, 5);
490 xl_mii_send(sc, frame->mii_regaddr, 5);
491
492 /* Idle bit */
493 MII_CLR((XL_MII_CLK|XL_MII_DATA));
494 MII_SET(XL_MII_CLK);
495
496 /* Turn off xmit. */
497 MII_CLR(XL_MII_DIR);
498
499 /* Check for ack */
500 MII_CLR(XL_MII_CLK);
501 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
502 MII_SET(XL_MII_CLK);
503
504 /*
505 * Now try reading data bits. If the ack failed, we still
506 * need to clock through 16 cycles to keep the PHY(s) in sync.
507 */
508 if (ack) {
509 for(i = 0; i < 16; i++) {
510 MII_CLR(XL_MII_CLK);
511 MII_SET(XL_MII_CLK);
512 }
513 goto fail;
514 }
515
516 for (i = 0x8000; i; i >>= 1) {
517 MII_CLR(XL_MII_CLK);
518 if (!ack) {
519 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
520 frame->mii_data |= i;
521 }
522 MII_SET(XL_MII_CLK);
523 }
524
525fail:
526
527 MII_CLR(XL_MII_CLK);
528 MII_SET(XL_MII_CLK);
529
530 XL_UNLOCK(sc);
531
532 if (ack)
533 return(1);
534 return(0);
535}
536
537/*
538 * Write to a PHY register through the MII.
539 */
540static int
541xl_mii_writereg(sc, frame)
542 struct xl_softc *sc;
543 struct xl_mii_frame *frame;
544
545{
546 XL_LOCK(sc);
547
548 /*
549 * Set up frame for TX.
550 */
551
552 frame->mii_stdelim = XL_MII_STARTDELIM;
553 frame->mii_opcode = XL_MII_WRITEOP;
554 frame->mii_turnaround = XL_MII_TURNAROUND;
555
556 /*
557 * Select the window 4.
558 */
559 XL_SEL_WIN(4);
560
561 /*
562 * Turn on data output.
563 */
564 MII_SET(XL_MII_DIR);
565
566 xl_mii_sync(sc);
567
568 xl_mii_send(sc, frame->mii_stdelim, 2);
569 xl_mii_send(sc, frame->mii_opcode, 2);
570 xl_mii_send(sc, frame->mii_phyaddr, 5);
571 xl_mii_send(sc, frame->mii_regaddr, 5);
572 xl_mii_send(sc, frame->mii_turnaround, 2);
573 xl_mii_send(sc, frame->mii_data, 16);
574
575 /* Idle bit. */
576 MII_SET(XL_MII_CLK);
577 MII_CLR(XL_MII_CLK);
578
579 /*
580 * Turn off xmit.
581 */
582 MII_CLR(XL_MII_DIR);
583
584 XL_UNLOCK(sc);
585
586 return(0);
587}
588
589static int
590xl_miibus_readreg(dev, phy, reg)
591 device_t dev;
592 int phy, reg;
593{
594 struct xl_softc *sc;
595 struct xl_mii_frame frame;
596
597 sc = device_get_softc(dev);
598
599 /*
600 * Pretend that PHYs are only available at MII address 24.
601 * This is to guard against problems with certain 3Com ASIC
602 * revisions that incorrectly map the internal transceiver
603 * control registers at all MII addresses. This can cause
604 * the miibus code to attach the same PHY several times over.
605 */
606 if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24)
607 return(0);
608
609 bzero((char *)&frame, sizeof(frame));
610
611 frame.mii_phyaddr = phy;
612 frame.mii_regaddr = reg;
613 xl_mii_readreg(sc, &frame);
614
615 return(frame.mii_data);
616}
617
618static int
619xl_miibus_writereg(dev, phy, reg, data)
620 device_t dev;
621 int phy, reg, data;
622{
623 struct xl_softc *sc;
624 struct xl_mii_frame frame;
625
626 sc = device_get_softc(dev);
627
628 if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24)
629 return(0);
630
631 bzero((char *)&frame, sizeof(frame));
632
633 frame.mii_phyaddr = phy;
634 frame.mii_regaddr = reg;
635 frame.mii_data = data;
636
637 xl_mii_writereg(sc, &frame);
638
639 return(0);
640}
641
642static void
643xl_miibus_statchg(dev)
644 device_t dev;
645{
646 struct xl_softc *sc;
647 struct mii_data *mii;
648
649
650 sc = device_get_softc(dev);
651 mii = device_get_softc(sc->xl_miibus);
652
653 XL_LOCK(sc);
654
655 xl_setcfg(sc);
656
657 /* Set ASIC's duplex mode to match the PHY. */
658 XL_SEL_WIN(3);
659 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
660 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
661 else
662 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
663 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
664
665 XL_UNLOCK(sc);
666
667 return;
668}
669
670/*
671 * Special support for the 3c905B-COMBO. This card has 10/100 support
672 * plus BNC and AUI ports. This means we will have both an miibus attached
673 * plus some non-MII media settings. In order to allow this, we have to
674 * add the extra media to the miibus's ifmedia struct, but we can't do
675 * that during xl_attach() because the miibus hasn't been attached yet.
676 * So instead, we wait until the miibus probe/attach is done, at which
677 * point we will get a callback telling is that it's safe to add our
678 * extra media.
679 */
680static void
681xl_miibus_mediainit(dev)
682 device_t dev;
683{
684 struct xl_softc *sc;
685 struct mii_data *mii;
686 struct ifmedia *ifm;
687
688 sc = device_get_softc(dev);
689 mii = device_get_softc(sc->xl_miibus);
690 ifm = &mii->mii_media;
691
692 XL_LOCK(sc);
693
694 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
695 /*
696 * Check for a 10baseFL board in disguise.
697 */
698 if (sc->xl_type == XL_TYPE_905B &&
699 sc->xl_media == XL_MEDIAOPT_10FL) {
700 if (bootverbose)
701 printf("xl%d: found 10baseFL\n", sc->xl_unit);
702 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
703 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL);
704 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
705 ifmedia_add(ifm,
706 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
707 } else {
708 if (bootverbose)
709 printf("xl%d: found AUI\n", sc->xl_unit);
710 ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
711 }
712 }
713
714 if (sc->xl_media & XL_MEDIAOPT_BNC) {
715 if (bootverbose)
716 printf("xl%d: found BNC\n", sc->xl_unit);
717 ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
718 }
719
720 XL_UNLOCK(sc);
721
722 return;
723}
724
725/*
726 * The EEPROM is slow: give it time to come ready after issuing
727 * it a command.
728 */
729static int
730xl_eeprom_wait(sc)
731 struct xl_softc *sc;
732{
733 int i;
734
735 for (i = 0; i < 100; i++) {
736 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
737 DELAY(162);
738 else
739 break;
740 }
741
742 if (i == 100) {
743 printf("xl%d: eeprom failed to come ready\n", sc->xl_unit);
744 return(1);
745 }
746
747 return(0);
748}
749
750/*
751 * Read a sequence of words from the EEPROM. Note that ethernet address
752 * data is stored in the EEPROM in network byte order.
753 */
754static int
755xl_read_eeprom(sc, dest, off, cnt, swap)
756 struct xl_softc *sc;
757 caddr_t dest;
758 int off;
759 int cnt;
760 int swap;
761{
762 int err = 0, i;
763 u_int16_t word = 0, *ptr;
764#define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
765#define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
766 /* WARNING! DANGER!
767 * It's easy to accidentally overwrite the rom content!
768 * Note: the 3c575 uses 8bit EEPROM offsets.
769 */
770 XL_SEL_WIN(0);
771
772 if (xl_eeprom_wait(sc))
773 return(1);
774
775 if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
776 off += 0x30;
777
778 for (i = 0; i < cnt; i++) {
779 if (sc->xl_flags & XL_FLAG_8BITROM)
780 CSR_WRITE_2(sc, XL_W0_EE_CMD,
781 XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
782 else
783 CSR_WRITE_2(sc, XL_W0_EE_CMD,
784 XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
785 err = xl_eeprom_wait(sc);
786 if (err)
787 break;
788 word = CSR_READ_2(sc, XL_W0_EE_DATA);
789 ptr = (u_int16_t *)(dest + (i * 2));
790 if (swap)
791 *ptr = ntohs(word);
792 else
793 *ptr = word;
794 }
795
796 return(err ? 1 : 0);
797}
798
799/*
800 * This routine is taken from the 3Com Etherlink XL manual,
801 * page 10-7. It calculates a CRC of the supplied multicast
802 * group address and returns the lower 8 bits, which are used
803 * as the multicast filter position.
804 * Note: the 3c905B currently only supports a 64-bit hash table,
805 * which means we really only need 6 bits, but the manual indicates
806 * that future chip revisions will have a 256-bit hash table,
807 * hence the routine is set up to calculate 8 bits of position
808 * info in case we need it some day.
809 * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a
810 * 256 bit hash table. This means we have to use all 8 bits regardless.
811 * On older cards, the upper 2 bits will be ignored. Grrrr....
812 */
813static u_int32_t
814xl_mchash(addr)
815 caddr_t addr;
816{
817 u_int32_t crc, carry;
818 int idx, bit;
819 u_int8_t data;
820
821 /* Compute CRC for the address value. */
822 crc = 0xFFFFFFFF; /* initial value */
823
824 for (idx = 0; idx < 6; idx++) {
825 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
826 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
827 crc <<= 1;
828 if (carry)
829 crc = (crc ^ 0x04c11db6) | carry;
830 }
831 }
832
833 /* return the filter bit position */
834 return(crc & 0x000000FF);
835}
836
837/*
838 * NICs older than the 3c905B have only one multicast option, which
839 * is to enable reception of all multicast frames.
840 */
841static void
842xl_setmulti(sc)
843 struct xl_softc *sc;
844{
845 struct ifnet *ifp;
846 struct ifmultiaddr *ifma;
847 u_int8_t rxfilt;
848 int mcnt = 0;
849
850 ifp = &sc->arpcom.ac_if;
851
852 XL_SEL_WIN(5);
853 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
854
855 if (ifp->if_flags & IFF_ALLMULTI) {
856 rxfilt |= XL_RXFILTER_ALLMULTI;
857 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
858 return;
859 }
860
861 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
862 mcnt++;
863
864 if (mcnt)
865 rxfilt |= XL_RXFILTER_ALLMULTI;
866 else
867 rxfilt &= ~XL_RXFILTER_ALLMULTI;
868
869 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
870
871 return;
872}
873
874/*
875 * 3c905B adapters have a hash filter that we can program.
876 */
877static void
878xl_setmulti_hash(sc)
879 struct xl_softc *sc;
880{
881 struct ifnet *ifp;
882 int h = 0, i;
883 struct ifmultiaddr *ifma;
884 u_int8_t rxfilt;
885 int mcnt = 0;
886
887 ifp = &sc->arpcom.ac_if;
888
889 XL_SEL_WIN(5);
890 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
891
892 if (ifp->if_flags & IFF_ALLMULTI) {
893 rxfilt |= XL_RXFILTER_ALLMULTI;
894 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
895 return;
896 } else
897 rxfilt &= ~XL_RXFILTER_ALLMULTI;
898
899
900 /* first, zot all the existing hash bits */
901 for (i = 0; i < XL_HASHFILT_SIZE; i++)
902 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
903
904 /* now program new ones */
905 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
906 if (ifma->ifma_addr->sa_family != AF_LINK)
907 continue;
908 h = xl_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
909 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
910 mcnt++;
911 }
912
913 if (mcnt)
914 rxfilt |= XL_RXFILTER_MULTIHASH;
915 else
916 rxfilt &= ~XL_RXFILTER_MULTIHASH;
917
918 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
919
920 return;
921}
922
923#ifdef notdef
924static void
925xl_testpacket(sc)
926 struct xl_softc *sc;
927{
928 struct mbuf *m;
929 struct ifnet *ifp;
930
931 ifp = &sc->arpcom.ac_if;
932
933 MGETHDR(m, M_DONTWAIT, MT_DATA);
934
935 if (m == NULL)
936 return;
937
938 bcopy(&sc->arpcom.ac_enaddr,
939 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
940 bcopy(&sc->arpcom.ac_enaddr,
941 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
942 mtod(m, struct ether_header *)->ether_type = htons(3);
943 mtod(m, unsigned char *)[14] = 0;
944 mtod(m, unsigned char *)[15] = 0;
945 mtod(m, unsigned char *)[16] = 0xE3;
946 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
947 IF_ENQUEUE(&ifp->if_snd, m);
948 xl_start(ifp);
949
950 return;
951}
952#endif
953
954static void
955xl_setcfg(sc)
956 struct xl_softc *sc;
957{
958 u_int32_t icfg;
959
960 XL_SEL_WIN(3);
961 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
962 icfg &= ~XL_ICFG_CONNECTOR_MASK;
963 if (sc->xl_media & XL_MEDIAOPT_MII ||
964 sc->xl_media & XL_MEDIAOPT_BT4)
965 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
966 if (sc->xl_media & XL_MEDIAOPT_BTX)
967 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
968
969 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
970 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
971
972 return;
973}
974
975static void
976xl_setmode(sc, media)
977 struct xl_softc *sc;
978 int media;
979{
980 u_int32_t icfg;
981 u_int16_t mediastat;
982
983 printf("xl%d: selecting ", sc->xl_unit);
984
985 XL_SEL_WIN(4);
986 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
987 XL_SEL_WIN(3);
988 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
989
990 if (sc->xl_media & XL_MEDIAOPT_BT) {
991 if (IFM_SUBTYPE(media) == IFM_10_T) {
992 printf("10baseT transceiver, ");
993 sc->xl_xcvr = XL_XCVR_10BT;
994 icfg &= ~XL_ICFG_CONNECTOR_MASK;
995 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
996 mediastat |= XL_MEDIASTAT_LINKBEAT|
997 XL_MEDIASTAT_JABGUARD;
998 mediastat &= ~XL_MEDIASTAT_SQEENB;
999 }
1000 }
1001
1002 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1003 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1004 printf("100baseFX port, ");
1005 sc->xl_xcvr = XL_XCVR_100BFX;
1006 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1007 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1008 mediastat |= XL_MEDIASTAT_LINKBEAT;
1009 mediastat &= ~XL_MEDIASTAT_SQEENB;
1010 }
1011 }
1012
1013 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1014 if (IFM_SUBTYPE(media) == IFM_10_5) {
1015 printf("AUI port, ");
1016 sc->xl_xcvr = XL_XCVR_AUI;
1017 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1018 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1019 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1020 XL_MEDIASTAT_JABGUARD);
1021 mediastat |= ~XL_MEDIASTAT_SQEENB;
1022 }
1023 if (IFM_SUBTYPE(media) == IFM_10_FL) {
1024 printf("10baseFL transceiver, ");
1025 sc->xl_xcvr = XL_XCVR_AUI;
1026 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1027 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1028 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1029 XL_MEDIASTAT_JABGUARD);
1030 mediastat |= ~XL_MEDIASTAT_SQEENB;
1031 }
1032 }
1033
1034 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1035 if (IFM_SUBTYPE(media) == IFM_10_2) {
1036 printf("BNC port, ");
1037 sc->xl_xcvr = XL_XCVR_COAX;
1038 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1039 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
1040 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1041 XL_MEDIASTAT_JABGUARD|
1042 XL_MEDIASTAT_SQEENB);
1043 }
1044 }
1045
1046 if ((media & IFM_GMASK) == IFM_FDX ||
1047 IFM_SUBTYPE(media) == IFM_100_FX) {
1048 printf("full duplex\n");
1049 XL_SEL_WIN(3);
1050 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
1051 } else {
1052 printf("half duplex\n");
1053 XL_SEL_WIN(3);
1054 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
1055 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
1056 }
1057
1058 if (IFM_SUBTYPE(media) == IFM_10_2)
1059 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1060 else
1061 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1062 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1063 XL_SEL_WIN(4);
1064 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
1065 DELAY(800);
1066 XL_SEL_WIN(7);
1067
1068 return;
1069}
1070
1071static void
1072xl_reset(sc)
1073 struct xl_softc *sc;
1074{
1075 register int i;
1076
1077 XL_SEL_WIN(0);
1078 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
1079 ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
1080 XL_RESETOPT_DISADVFD:0));
1081
1082 /*
1083 * If we're using memory mapped register mode, pause briefly
1084 * after issuing the reset command before trying to access any
1085 * other registers. With my 3c575C cardbus card, failing to do
1086 * this results in the system locking up while trying to poll
1087 * the command busy bit in the status register.
1088 */
1089 if (sc->xl_flags & XL_FLAG_USE_MMIO)
1090 DELAY(100000);
1091
1092 for (i = 0; i < XL_TIMEOUT; i++) {
1093 DELAY(10);
1094 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
1095 break;
1096 }
1097
1098 if (i == XL_TIMEOUT)
1099 printf("xl%d: reset didn't complete\n", sc->xl_unit);
1100
1101 /* Reset TX and RX. */
1102 /* Note: the RX reset takes an absurd amount of time
1103 * on newer versions of the Tornado chips such as those
1104 * on the 3c905CX and newer 3c908C cards. We wait an
1105 * extra amount of time so that xl_wait() doesn't complain
1106 * and annoy the users.
1107 */
1108 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1109 DELAY(100000);
1110 xl_wait(sc);
1111 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1112 xl_wait(sc);
1113
1114 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
1115 sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
1116 XL_SEL_WIN(2);
1117 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
1118 XL_W2_RESET_OPTIONS)
1119 | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
1120 | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
1121 );
1122 }
1123
1124 /* Wait a little while for the chip to get its brains in order. */
1125 DELAY(100000);
1126 return;
1127}
1128
1129/*
1130 * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
1131 * IDs against our list and return a device name if we find a match.
1132 */
1133static int
1134xl_probe(dev)
1135 device_t dev;
1136{
1137 struct xl_type *t;
1138
1139 t = xl_devs;
1140
1141 while(t->xl_name != NULL) {
1142 if ((pci_get_vendor(dev) == t->xl_vid) &&
1143 (pci_get_device(dev) == t->xl_did)) {
1144 device_set_desc(dev, t->xl_name);
1145 return(0);
1146 }
1147 t++;
1148 }
1149
1150 return(ENXIO);
1151}
1152
1153/*
1154 * This routine is a kludge to work around possible hardware faults
1155 * or manufacturing defects that can cause the media options register
1156 * (or reset options register, as it's called for the first generation
1157 * 3c90x adapters) to return an incorrect result. I have encountered
1158 * one Dell Latitude laptop docking station with an integrated 3c905-TX
1159 * which doesn't have any of the 'mediaopt' bits set. This screws up
1160 * the attach routine pretty badly because it doesn't know what media
1161 * to look for. If we find ourselves in this predicament, this routine
1162 * will try to guess the media options values and warn the user of a
1163 * possible manufacturing defect with his adapter/system/whatever.
1164 */
1165static void
1166xl_mediacheck(sc)
1167 struct xl_softc *sc;
1168{
1169
1170 /*
1171 * If some of the media options bits are set, assume they are
1172 * correct. If not, try to figure it out down below.
1173 * XXX I should check for 10baseFL, but I don't have an adapter
1174 * to test with.
1175 */
1176 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
1177 /*
1178 * Check the XCVR value. If it's not in the normal range
1179 * of values, we need to fake it up here.
1180 */
1181 if (sc->xl_xcvr <= XL_XCVR_AUTO)
1182 return;
1183 else {
1184 printf("xl%d: bogus xcvr value "
1185 "in EEPROM (%x)\n", sc->xl_unit, sc->xl_xcvr);
1186 printf("xl%d: choosing new default based "
1187 "on card type\n", sc->xl_unit);
1188 }
1189 } else {
1190 if (sc->xl_type == XL_TYPE_905B &&
1191 sc->xl_media & XL_MEDIAOPT_10FL)
1192 return;
1193 printf("xl%d: WARNING: no media options bits set in "
1194 "the media options register!!\n", sc->xl_unit);
1195 printf("xl%d: this could be a manufacturing defect in "
1196 "your adapter or system\n", sc->xl_unit);
1197 printf("xl%d: attempting to guess media type; you "
1198 "should probably consult your vendor\n", sc->xl_unit);
1199 }
1200
1201 xl_choose_xcvr(sc, 1);
1202
1203 return;
1204}
1205
1206static void
1207xl_choose_xcvr(sc, verbose)
1208 struct xl_softc *sc;
1209 int verbose;
1210{
1211 u_int16_t devid;
1212
1213 /*
1214 * Read the device ID from the EEPROM.
1215 * This is what's loaded into the PCI device ID register, so it has
1216 * to be correct otherwise we wouldn't have gotten this far.
1217 */
1218 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
1219
1220 switch(devid) {
1221 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */
1222 case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */
1223 sc->xl_media = XL_MEDIAOPT_BT;
1224 sc->xl_xcvr = XL_XCVR_10BT;
1225 if (verbose)
1226 printf("xl%d: guessing 10BaseT "
1227 "transceiver\n", sc->xl_unit);
1228 break;
1229 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
1230 case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */
1231 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1232 sc->xl_xcvr = XL_XCVR_10BT;
1233 if (verbose)
1234 printf("xl%d: guessing COMBO "
1235 "(AUI/BNC/TP)\n", sc->xl_unit);
1236 break;
1237 case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */
1238 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
1239 sc->xl_xcvr = XL_XCVR_10BT;
1240 if (verbose)
1241 printf("xl%d: guessing TPC (BNC/TP)\n", sc->xl_unit);
1242 break;
1243 case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */
1244 sc->xl_media = XL_MEDIAOPT_10FL;
1245 sc->xl_xcvr = XL_XCVR_AUI;
1246 if (verbose)
1247 printf("xl%d: guessing 10baseFL\n", sc->xl_unit);
1248 break;
1249 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
1250 case TC_DEVICEID_HURRICANE_555: /* 3c555 */
1251 case TC_DEVICEID_HURRICANE_556: /* 3c556 */
1252 case TC_DEVICEID_HURRICANE_556B: /* 3c556B */
1253 case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */
1254 case TC_DEVICEID_HURRICANE_575B: /* 3c575B */
1255 case TC_DEVICEID_HURRICANE_575C: /* 3c575C */
1256 case TC_DEVICEID_HURRICANE_656: /* 3c656 */
1257 case TC_DEVICEID_HURRICANE_656B: /* 3c656B */
1258 case TC_DEVICEID_TORNADO_656C: /* 3c656C */
1259 case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
1260 sc->xl_media = XL_MEDIAOPT_MII;
1261 sc->xl_xcvr = XL_XCVR_MII;
1262 if (verbose)
1263 printf("xl%d: guessing MII\n", sc->xl_unit);
1264 break;
1265 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
1266 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
1267 sc->xl_media = XL_MEDIAOPT_BT4;
1268 sc->xl_xcvr = XL_XCVR_MII;
1269 if (verbose)
1270 printf("xl%d: guessing 100BaseT4/MII\n", sc->xl_unit);
1271 break;
1272 case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */
1273 case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
1274 case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
1275 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */
1276 case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */
1277 case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */
1278 sc->xl_media = XL_MEDIAOPT_BTX;
1279 sc->xl_xcvr = XL_XCVR_AUTO;
1280 if (verbose)
1281 printf("xl%d: guessing 10/100 internal\n", sc->xl_unit);
1282 break;
1283 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */
1284 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1285 sc->xl_xcvr = XL_XCVR_AUTO;
1286 if (verbose)
1287 printf("xl%d: guessing 10/100 "
1288 "plus BNC/AUI\n", sc->xl_unit);
1289 break;
1290 default:
1291 printf("xl%d: unknown device ID: %x -- "
1292 "defaulting to 10baseT\n", sc->xl_unit, devid);
1293 sc->xl_media = XL_MEDIAOPT_BT;
1294 break;
1295 }
1296
1297 return;
1298}
1299
1300/*
1301 * Attach the interface. Allocate softc structures, do ifmedia
1302 * setup and ethernet/BPF attach.
1303 */
1304static int
1305xl_attach(dev)
1306 device_t dev;
1307{
1308 u_char eaddr[ETHER_ADDR_LEN];
1309 u_int16_t xcvr[2];
1310 struct xl_softc *sc;
1311 struct ifnet *ifp;
1312 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1313 int unit, error = 0, rid, res;
1314 uint16_t did;
1315
1316 sc = device_get_softc(dev);
1317 unit = device_get_unit(dev);
1318
1319 mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1320 MTX_DEF | MTX_RECURSE);
1321 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
1322
1323 did = pci_get_device(dev);
1324
1325 sc->xl_flags = 0;
1326 if (did == TC_DEVICEID_HURRICANE_555)
1327 sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
1328 if (did == TC_DEVICEID_HURRICANE_556 ||
1329 did == TC_DEVICEID_HURRICANE_556B)
1330 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1331 XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
1332 XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
1333 if (did == TC_DEVICEID_HURRICANE_555 ||
1334 did == TC_DEVICEID_HURRICANE_556)
1335 sc->xl_flags |= XL_FLAG_8BITROM;
1336 if (did == TC_DEVICEID_HURRICANE_556B)
1337 sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
1338
1339 if (did == TC_DEVICEID_HURRICANE_575A ||
1340 did == TC_DEVICEID_HURRICANE_575B ||
1341 did == TC_DEVICEID_HURRICANE_575C ||
1342 did == TC_DEVICEID_HURRICANE_656B ||
1343 did == TC_DEVICEID_TORNADO_656C)
1344 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1345 XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM;
1346 if (did == TC_DEVICEID_HURRICANE_656)
1347 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
1348 if (did == TC_DEVICEID_HURRICANE_575B)
1349 sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
1350 if (did == TC_DEVICEID_HURRICANE_575C)
1351 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1352 if (did == TC_DEVICEID_TORNADO_656C)
1353 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
1354 if (did == TC_DEVICEID_HURRICANE_656 ||
1355 did == TC_DEVICEID_HURRICANE_656B)
1356 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
1357 XL_FLAG_INVERT_LED_PWR;
1358 if (did == TC_DEVICEID_TORNADO_10_100BT_920B)
1359 sc->xl_flags |= XL_FLAG_PHYOK;
1360
1361 switch (did) {
1362 case TC_DEVICEID_HURRICANE_575A:
1363 case TC_DEVICEID_HURRICANE_575B:
1364 case TC_DEVICEID_HURRICANE_575C:
1365 sc->xl_flags |= XL_FLAG_NO_MMIO;
1366 break;
1367 default:
1368 break;
1369 }
1370
1371#ifndef BURN_BRIDGES
1372 /*
1373 * If this is a 3c905B, we have to check one extra thing.
1374 * The 905B supports power management and may be placed in
1375 * a low-power mode (D3 mode), typically by certain operating
1376 * systems which shall not be named. The PCI BIOS is supposed
1377 * to reset the NIC and bring it out of low-power mode, but
1378 * some do not. Consequently, we have to see if this chip
1379 * supports power management, and if so, make sure it's not
1380 * in low-power mode. If power management is available, the
1381 * capid byte will be 0x01.
1382 *
1383 * I _think_ that what actually happens is that the chip
1384 * loses its PCI configuration during the transition from
1385 * D3 back to D0; this means that it should be possible for
1386 * us to save the PCI iobase, membase and IRQ, put the chip
1387 * back in the D0 state, then restore the PCI config ourselves.
1388 */
1389
1390 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1391 u_int32_t iobase, membase, irq;
1392
1393 /* Save important PCI config data. */
1394 iobase = pci_read_config(dev, XL_PCI_LOIO, 4);
1395 membase = pci_read_config(dev, XL_PCI_LOMEM, 4);
1396 irq = pci_read_config(dev, XL_PCI_INTLINE, 4);
1397
1398 /* Reset the power state. */
1399 printf("xl%d: chip is in D%d power mode "
1400 "-- setting to D0\n", unit,
1401 pci_get_powerstate(dev));
1402
1403 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1404
1405 /* Restore PCI config data. */
1406 pci_write_config(dev, XL_PCI_LOIO, iobase, 4);
1407 pci_write_config(dev, XL_PCI_LOMEM, membase, 4);
1408 pci_write_config(dev, XL_PCI_INTLINE, irq, 4);
1409 }
1410#endif
1411
1412 /*
1413 * Map control/status registers.
1414 */
1415 pci_enable_busmaster(dev);
1416
1417 if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) {
1418 rid = XL_PCI_LOMEM;
1419 res = SYS_RES_MEMORY;
1420
1421 sc->xl_res = bus_alloc_resource(dev, res, &rid,
1422 0, ~0, 1, RF_ACTIVE);
1423 }
1424
1425 if (sc->xl_res != NULL) {
1426 sc->xl_flags |= XL_FLAG_USE_MMIO;
1427 if (bootverbose)
1428 printf("xl%d: using memory mapped I/O\n", unit);
1429 } else {
1430 rid = XL_PCI_LOIO;
1431 res = SYS_RES_IOPORT;
1432 sc->xl_res = bus_alloc_resource(dev, res, &rid,
1433 0, ~0, 1, RF_ACTIVE);
1434 if (sc->xl_res == NULL) {
1435 printf ("xl%d: couldn't map ports/memory\n", unit);
1436 error = ENXIO;
1437 goto fail;
1438 }
1439 if (bootverbose)
1440 printf("xl%d: using port I/O\n", unit);
1441 }
1442
1443 sc->xl_btag = rman_get_bustag(sc->xl_res);
1444 sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
1445
1446 if (sc->xl_flags & XL_FLAG_FUNCREG) {
1447 rid = XL_PCI_FUNCMEM;
1448 sc->xl_fres = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1449 0, ~0, 1, RF_ACTIVE);
1450
1451 if (sc->xl_fres == NULL) {
1452 printf ("xl%d: couldn't map ports/memory\n", unit);
1453 error = ENXIO;
1454 goto fail;
1455 }
1456
1457 sc->xl_ftag = rman_get_bustag(sc->xl_fres);
1458 sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
1459 }
1460
1461 /* Allocate interrupt */
1462 rid = 0;
1463 sc->xl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1464 RF_SHAREABLE | RF_ACTIVE);
1465 if (sc->xl_irq == NULL) {
1466 printf("xl%d: couldn't map interrupt\n", unit);
1467 error = ENXIO;
1468 goto fail;
1469 }
1470
1471 /* Reset the adapter. */
1472 xl_reset(sc);
1473
1474 /*
1475 * Get station address from the EEPROM.
1476 */
1477 if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
1478 printf("xl%d: failed to read station address\n", sc->xl_unit);
1479 error = ENXIO;
1480 goto fail;
1481 }
1482
1483 /*
1484 * A 3Com chip was detected. Inform the world.
1485 */
1486 printf("xl%d: Ethernet address: %6D\n", unit, eaddr, ":");
1487
1488 sc->xl_unit = unit;
1489 callout_handle_init(&sc->xl_stat_ch);
1490 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1491
1492 /*
1493 * Now allocate a tag for the DMA descriptor lists and a chunk
1494 * of DMA-able memory based on the tag. Also obtain the DMA
1495 * addresses of the RX and TX ring, which we'll need later.
1496 * All of our lists are allocated as a contiguous block
1497 * of memory.
1498 */
1499 error = bus_dma_tag_create(NULL, 8, 0,
1500 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1501 XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL,
1502 &sc->xl_ldata.xl_rx_tag);
1503 if (error) {
1504 printf("xl%d: failed to allocate rx dma tag\n", unit);
1505 goto fail;
1506 }
1507
1508 error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag,
1509 (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1510 &sc->xl_ldata.xl_rx_dmamap);
1511 if (error) {
1512 printf("xl%d: no memory for rx list buffers!\n", unit);
1513 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1514 sc->xl_ldata.xl_rx_tag = NULL;
1515 goto fail;
1516 }
1517
1518 error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag,
1519 sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list,
1520 XL_RX_LIST_SZ, xl_dma_map_addr,
1521 &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT);
1522 if (error) {
1523 printf("xl%d: cannot get dma address of the rx ring!\n", unit);
1524 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1525 sc->xl_ldata.xl_rx_dmamap);
1526 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1527 sc->xl_ldata.xl_rx_tag = NULL;
1528 goto fail;
1529 }
1530
1531 error = bus_dma_tag_create(NULL, 8, 0,
1532 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1533 XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL,
1534 &sc->xl_ldata.xl_tx_tag);
1535 if (error) {
1536 printf("xl%d: failed to allocate tx dma tag\n", unit);
1537 goto fail;
1538 }
1539
1540 error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag,
1541 (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1542 &sc->xl_ldata.xl_tx_dmamap);
1543 if (error) {
1544 printf("xl%d: no memory for list buffers!\n", unit);
1545 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1546 sc->xl_ldata.xl_tx_tag = NULL;
1547 goto fail;
1548 }
1549
1550 error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag,
1551 sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list,
1552 XL_TX_LIST_SZ, xl_dma_map_addr,
1553 &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT);
1554 if (error) {
1555 printf("xl%d: cannot get dma address of the tx ring!\n", unit);
1556 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1557 sc->xl_ldata.xl_tx_dmamap);
1558 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1559 sc->xl_ldata.xl_tx_tag = NULL;
1560 goto fail;
1561 }
1562
1563 /*
1564 * Allocate a DMA tag for the mapping of mbufs.
1565 */
1566 error = bus_dma_tag_create(NULL, 1, 0,
1567 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1568 MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL,
1569 NULL, &sc->xl_mtag);
1570 if (error) {
1571 printf("xl%d: failed to allocate mbuf dma tag\n", unit);
1572 goto fail;
1573 }
1574
1575 /* We need a spare DMA map for the RX ring. */
1576 error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap);
1577 if (error)
1578 goto fail;
1579
1580 /*
1581 * Figure out the card type. 3c905B adapters have the
1582 * 'supportsNoTxLength' bit set in the capabilities
1583 * word in the EEPROM.
1584 * Note: my 3c575C cardbus card lies. It returns a value
1585 * of 0x1578 for its capabilities word, which is somewhat
1586 * nonsensical. Another way to distinguish a 3c90x chip
1587 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
1588 * bit. This will only be set for 3c90x boomerage chips.
1589 */
1590 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
1591 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
1592 !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
1593 sc->xl_type = XL_TYPE_905B;
1594 else
1595 sc->xl_type = XL_TYPE_90X;
1596
1597 ifp = &sc->arpcom.ac_if;
1598 ifp->if_softc = sc;
1599 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1600 ifp->if_mtu = ETHERMTU;
1601 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1602 ifp->if_ioctl = xl_ioctl;
1603 ifp->if_output = ether_output;
1604 ifp->if_capabilities = IFCAP_VLAN_MTU;
1605 if (sc->xl_type == XL_TYPE_905B) {
1606 ifp->if_start = xl_start_90xB;
1607 ifp->if_hwassist = XL905B_CSUM_FEATURES;
1608 ifp->if_capabilities |= IFCAP_HWCSUM;
1609 } else
1610 ifp->if_start = xl_start;
1611 ifp->if_watchdog = xl_watchdog;
1612 ifp->if_init = xl_init;
1613 ifp->if_baudrate = 10000000;
1614 ifp->if_snd.ifq_maxlen = XL_TX_LIST_CNT - 1;
1615 ifp->if_capenable = ifp->if_capabilities;
1616
1617 /*
1618 * Now we have to see what sort of media we have.
1619 * This includes probing for an MII interace and a
1620 * possible PHY.
1621 */
1622 XL_SEL_WIN(3);
1623 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
1624 if (bootverbose)
1625 printf("xl%d: media options word: %x\n", sc->xl_unit,
1626 sc->xl_media);
1627
1628 xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
1629 sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
1630 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
1631 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
1632
1633 xl_mediacheck(sc);
1634
1635 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
1636 || sc->xl_media & XL_MEDIAOPT_BT4) {
1637 if (bootverbose)
1638 printf("xl%d: found MII/AUTO\n", sc->xl_unit);
1639 xl_setcfg(sc);
1640 if (mii_phy_probe(dev, &sc->xl_miibus,
1641 xl_ifmedia_upd, xl_ifmedia_sts)) {
1642 printf("xl%d: no PHY found!\n", sc->xl_unit);
1643 error = ENXIO;
1644 goto fail;
1645 }
1646
1647 goto done;
1648 }
1649
1650 /*
1651 * Sanity check. If the user has selected "auto" and this isn't
1652 * a 10/100 card of some kind, we need to force the transceiver
1653 * type to something sane.
1654 */
1655 if (sc->xl_xcvr == XL_XCVR_AUTO)
1656 xl_choose_xcvr(sc, bootverbose);
1657
1658 /*
1659 * Do ifmedia setup.
1660 */
1661 if (sc->xl_media & XL_MEDIAOPT_BT) {
1662 if (bootverbose)
1663 printf("xl%d: found 10baseT\n", sc->xl_unit);
1664 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1665 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1666 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1667 ifmedia_add(&sc->ifmedia,
1668 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1669 }
1670
1671 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1672 /*
1673 * Check for a 10baseFL board in disguise.
1674 */
1675 if (sc->xl_type == XL_TYPE_905B &&
1676 sc->xl_media == XL_MEDIAOPT_10FL) {
1677 if (bootverbose)
1678 printf("xl%d: found 10baseFL\n", sc->xl_unit);
1679 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
1680 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
1681 0, NULL);
1682 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1683 ifmedia_add(&sc->ifmedia,
1684 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
1685 } else {
1686 if (bootverbose)
1687 printf("xl%d: found AUI\n", sc->xl_unit);
1688 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1689 }
1690 }
1691
1692 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1693 if (bootverbose)
1694 printf("xl%d: found BNC\n", sc->xl_unit);
1695 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
1696 }
1697
1698 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1699 if (bootverbose)
1700 printf("xl%d: found 100baseFX\n", sc->xl_unit);
1701 ifp->if_baudrate = 100000000;
1702 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
1703 }
1704
1705 /* Choose a default media. */
1706 switch(sc->xl_xcvr) {
1707 case XL_XCVR_10BT:
1708 media = IFM_ETHER|IFM_10_T;
1709 xl_setmode(sc, media);
1710 break;
1711 case XL_XCVR_AUI:
1712 if (sc->xl_type == XL_TYPE_905B &&
1713 sc->xl_media == XL_MEDIAOPT_10FL) {
1714 media = IFM_ETHER|IFM_10_FL;
1715 xl_setmode(sc, media);
1716 } else {
1717 media = IFM_ETHER|IFM_10_5;
1718 xl_setmode(sc, media);
1719 }
1720 break;
1721 case XL_XCVR_COAX:
1722 media = IFM_ETHER|IFM_10_2;
1723 xl_setmode(sc, media);
1724 break;
1725 case XL_XCVR_AUTO:
1726 case XL_XCVR_100BTX:
1727 case XL_XCVR_MII:
1728 /* Chosen by miibus */
1729 break;
1730 case XL_XCVR_100BFX:
1731 media = IFM_ETHER|IFM_100_FX;
1732 break;
1733 default:
1734 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit,
1735 sc->xl_xcvr);
1736 /*
1737 * This will probably be wrong, but it prevents
1738 * the ifmedia code from panicking.
1739 */
1740 media = IFM_ETHER|IFM_10_T;
1741 break;
1742 }
1743
1744 if (sc->xl_miibus == NULL)
1745 ifmedia_set(&sc->ifmedia, media);
1746
1747done:
1748
1749 if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
1750 XL_SEL_WIN(0);
1751 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
1752 }
1753
1754 /*
1755 * Call MI attach routine.
1756 */
1757 ether_ifattach(ifp, eaddr);
1758
1759 /* Hook interrupt last to avoid having to lock softc */
1760 error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET,
1761 xl_intr, sc, &sc->xl_intrhand);
1762 if (error) {
1763 printf("xl%d: couldn't set up irq\n", unit);
1764 ether_ifdetach(ifp);
1765 goto fail;
1766 }
1767
1768fail:
1769 if (error)
1770 xl_detach(dev);
1771
1772 return(error);
1773}
1774
1775/*
1776 * Shutdown hardware and free up resources. This can be called any
1777 * time after the mutex has been initialized. It is called in both
1778 * the error case in attach and the normal detach case so it needs
1779 * to be careful about only freeing resources that have actually been
1780 * allocated.
1781 */
1782static int
1783xl_detach(dev)
1784 device_t dev;
1785{
1786 struct xl_softc *sc;
1787 struct ifnet *ifp;
1788 int rid, res;
1789
1790 sc = device_get_softc(dev);
1791 KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized"));
1792 XL_LOCK(sc);
1793 ifp = &sc->arpcom.ac_if;
1794
1795 if (sc->xl_flags & XL_FLAG_USE_MMIO) {
1796 rid = XL_PCI_LOMEM;
1797 res = SYS_RES_MEMORY;
1798 } else {
1799 rid = XL_PCI_LOIO;
1800 res = SYS_RES_IOPORT;
1801 }
1802
1803 /* These should only be active if attach succeeded */
1804 if (device_is_attached(dev)) {
1805 xl_reset(sc);
1806 xl_stop(sc);
1807 ether_ifdetach(ifp);
1808 }
1809 if (sc->xl_miibus)
1810 device_delete_child(dev, sc->xl_miibus);
1811 bus_generic_detach(dev);
1812 ifmedia_removeall(&sc->ifmedia);
1813
1814 if (sc->xl_intrhand)
1815 bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
1816 if (sc->xl_irq)
1817 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
1818 if (sc->xl_fres != NULL)
1819 bus_release_resource(dev, SYS_RES_MEMORY,
1820 XL_PCI_FUNCMEM, sc->xl_fres);
1821 if (sc->xl_res)
1822 bus_release_resource(dev, res, rid, sc->xl_res);
1823
1824 if (sc->xl_mtag) {
1825 bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap);
1826 bus_dma_tag_destroy(sc->xl_mtag);
1827 }
1828 if (sc->xl_ldata.xl_rx_tag) {
1829 bus_dmamap_unload(sc->xl_ldata.xl_rx_tag,
1830 sc->xl_ldata.xl_rx_dmamap);
1831 bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list,
1832 sc->xl_ldata.xl_rx_dmamap);
1833 bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag);
1834 }
1835 if (sc->xl_ldata.xl_tx_tag) {
1836 bus_dmamap_unload(sc->xl_ldata.xl_tx_tag,
1837 sc->xl_ldata.xl_tx_dmamap);
1838 bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list,
1839 sc->xl_ldata.xl_tx_dmamap);
1840 bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag);
1841 }
1842
1843 XL_UNLOCK(sc);
1844 mtx_destroy(&sc->xl_mtx);
1845
1846 return(0);
1847}
1848
1849/*
1850 * Initialize the transmit descriptors.
1851 */
1852static int
1853xl_list_tx_init(sc)
1854 struct xl_softc *sc;
1855{
1856 struct xl_chain_data *cd;
1857 struct xl_list_data *ld;
1858 int error, i;
1859
1860 cd = &sc->xl_cdata;
1861 ld = &sc->xl_ldata;
1862 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1863 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1864 error = bus_dmamap_create(sc->xl_mtag, 0,
1865 &cd->xl_tx_chain[i].xl_map);
1866 if (error)
1867 return(error);
1868 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1869 i * sizeof(struct xl_list);
1870 if (i == (XL_TX_LIST_CNT - 1))
1871 cd->xl_tx_chain[i].xl_next = NULL;
1872 else
1873 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1874 }
1875
1876 cd->xl_tx_free = &cd->xl_tx_chain[0];
1877 cd->xl_tx_tail = cd->xl_tx_head = NULL;
1878
1879 bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1880 return(0);
1881}
1882
1883/*
1884 * Initialize the transmit descriptors.
1885 */
1886static int
1887xl_list_tx_init_90xB(sc)
1888 struct xl_softc *sc;
1889{
1890 struct xl_chain_data *cd;
1891 struct xl_list_data *ld;
1892 int error, i;
1893
1894 cd = &sc->xl_cdata;
1895 ld = &sc->xl_ldata;
1896 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1897 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1898 error = bus_dmamap_create(sc->xl_mtag, 0,
1899 &cd->xl_tx_chain[i].xl_map);
1900 if (error)
1901 return(error);
1902 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1903 i * sizeof(struct xl_list);
1904 if (i == (XL_TX_LIST_CNT - 1))
1905 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
1906 else
1907 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1908 if (i == 0)
1909 cd->xl_tx_chain[i].xl_prev =
1910 &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
1911 else
1912 cd->xl_tx_chain[i].xl_prev =
1913 &cd->xl_tx_chain[i - 1];
1914 }
1915
1916 bzero(ld->xl_tx_list, XL_TX_LIST_SZ);
1917 ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1918
1919 cd->xl_tx_prod = 1;
1920 cd->xl_tx_cons = 1;
1921 cd->xl_tx_cnt = 0;
1922
1923 bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE);
1924 return(0);
1925}
1926
1927/*
1928 * Initialize the RX descriptors and allocate mbufs for them. Note that
1929 * we arrange the descriptors in a closed ring, so that the last descriptor
1930 * points back to the first.
1931 */
1932static int
1933xl_list_rx_init(sc)
1934 struct xl_softc *sc;
1935{
1936 struct xl_chain_data *cd;
1937 struct xl_list_data *ld;
1938 int error, i, next;
1939 u_int32_t nextptr;
1940
1941 cd = &sc->xl_cdata;
1942 ld = &sc->xl_ldata;
1943
1944 for (i = 0; i < XL_RX_LIST_CNT; i++) {
1945 cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
1946 error = bus_dmamap_create(sc->xl_mtag, 0,
1947 &cd->xl_rx_chain[i].xl_map);
1948 if (error)
1949 return(error);
1950 error = xl_newbuf(sc, &cd->xl_rx_chain[i]);
1951 if (error)
1952 return(error);
1953 if (i == (XL_RX_LIST_CNT - 1))
1954 next = 0;
1955 else
1956 next = i + 1;
1957 nextptr = ld->xl_rx_dmaaddr +
1958 next * sizeof(struct xl_list_onefrag);
1959 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
1960 ld->xl_rx_list[i].xl_next = htole32(nextptr);
1961 }
1962
1963 bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
1964 cd->xl_rx_head = &cd->xl_rx_chain[0];
1965
1966 return(0);
1967}
1968
1969/*
1970 * Initialize an RX descriptor and attach an MBUF cluster.
1971 * If we fail to do so, we need to leave the old mbuf and
1972 * the old DMA map untouched so that it can be reused.
1973 */
1974static int
1975xl_newbuf(sc, c)
1976 struct xl_softc *sc;
1977 struct xl_chain_onefrag *c;
1978{
1979 struct mbuf *m_new = NULL;
1980 bus_dmamap_t map;
1981 int error;
1982 u_int32_t baddr;
1983
1984 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1985 if (m_new == NULL)
1986 return(ENOBUFS);
1987
1988 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1989
1990 /* Force longword alignment for packet payload. */
1991 m_adj(m_new, ETHER_ALIGN);
1992
1993 error = bus_dmamap_load_mbuf(sc->xl_mtag, sc->xl_tmpmap, m_new,
1994 xl_dma_map_rxbuf, &baddr, BUS_DMA_NOWAIT);
1995 if (error) {
1996 m_freem(m_new);
1997 printf("xl%d: can't map mbuf (error %d)\n", sc->xl_unit, error);
1998 return(error);
1999 }
2000
2001 bus_dmamap_unload(sc->xl_mtag, c->xl_map);
2002 map = c->xl_map;
2003 c->xl_map = sc->xl_tmpmap;
2004 sc->xl_tmpmap = map;
2005 c->xl_mbuf = m_new;
2006 c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG);
2007 c->xl_ptr->xl_status = 0;
2008 c->xl_ptr->xl_frag.xl_addr = htole32(baddr);
2009 bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD);
2010 return(0);
2011}
2012
2013static int
2014xl_rx_resync(sc)
2015 struct xl_softc *sc;
2016{
2017 struct xl_chain_onefrag *pos;
2018 int i;
2019
2020 pos = sc->xl_cdata.xl_rx_head;
2021
2022 for (i = 0; i < XL_RX_LIST_CNT; i++) {
2023 if (pos->xl_ptr->xl_status)
2024 break;
2025 pos = pos->xl_next;
2026 }
2027
2028 if (i == XL_RX_LIST_CNT)
2029 return(0);
2030
2031 sc->xl_cdata.xl_rx_head = pos;
2032
2033 return(EAGAIN);
2034}
2035
2036/*
2037 * A frame has been uploaded: pass the resulting mbuf chain up to
2038 * the higher level protocols.
2039 */
2040static void
2041xl_rxeof(sc)
2042 struct xl_softc *sc;
2043{
2044 struct mbuf *m;
2045 struct ifnet *ifp;
2046 struct xl_chain_onefrag *cur_rx;
2047 int total_len = 0;
2048 u_int32_t rxstat;
2049
2050 XL_LOCK_ASSERT(sc);
2051
2052 ifp = &sc->arpcom.ac_if;
2053
2054again:
2055
2056 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap,
2057 BUS_DMASYNC_POSTREAD);
2058 while((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
2059 cur_rx = sc->xl_cdata.xl_rx_head;
2060 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
2061 total_len = rxstat & XL_RXSTAT_LENMASK;
2062
2063 /*
2064 * Since we have told the chip to allow large frames,
2065 * we need to trap giant frame errors in software. We allow
2066 * a little more than the normal frame size to account for
2067 * frames with VLAN tags.
2068 */
2069 if (total_len > XL_MAX_FRAMELEN)
2070 rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
2071
2072 /*
2073 * If an error occurs, update stats, clear the
2074 * status word and leave the mbuf cluster in place:
2075 * it should simply get re-used next time this descriptor
2076 * comes up in the ring.
2077 */
2078 if (rxstat & XL_RXSTAT_UP_ERROR) {
2079 ifp->if_ierrors++;
2080 cur_rx->xl_ptr->xl_status = 0;
2081 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2082 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2083 continue;
2084 }
2085
2086 /*
2087 * If the error bit was not set, the upload complete
2088 * bit should be set which means we have a valid packet.
2089 * If not, something truly strange has happened.
2090 */
2091 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
2092 printf("xl%d: bad receive status -- "
2093 "packet dropped\n", sc->xl_unit);
2094 ifp->if_ierrors++;
2095 cur_rx->xl_ptr->xl_status = 0;
2096 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2097 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2098 continue;
2099 }
2100
2101 /* No errors; receive the packet. */
2102 bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map,
2103 BUS_DMASYNC_POSTREAD);
2104 m = cur_rx->xl_mbuf;
2105
2106 /*
2107 * Try to conjure up a new mbuf cluster. If that
2108 * fails, it means we have an out of memory condition and
2109 * should leave the buffer in place and continue. This will
2110 * result in a lost packet, but there's little else we
2111 * can do in this situation.
2112 */
2113 if (xl_newbuf(sc, cur_rx)) {
2114 ifp->if_ierrors++;
2115 cur_rx->xl_ptr->xl_status = 0;
2116 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2117 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2118 continue;
2119 }
2120 bus_dmamap_sync(sc->xl_ldata.xl_rx_tag,
2121 sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE);
2122
2123 ifp->if_ipackets++;
2124 m->m_pkthdr.rcvif = ifp;
2125 m->m_pkthdr.len = m->m_len = total_len;
2126
2127 if (ifp->if_capenable & IFCAP_RXCSUM) {
2128 /* Do IP checksum checking. */
2129 if (rxstat & XL_RXSTAT_IPCKOK)
2130 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2131 if (!(rxstat & XL_RXSTAT_IPCKERR))
2132 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2133 if ((rxstat & XL_RXSTAT_TCPCOK &&
2134 !(rxstat & XL_RXSTAT_TCPCKERR)) ||
2135 (rxstat & XL_RXSTAT_UDPCKOK &&
2136 !(rxstat & XL_RXSTAT_UDPCKERR))) {
2137 m->m_pkthdr.csum_flags |=
2138 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2139 m->m_pkthdr.csum_data = 0xffff;
2140 }
2141 }
2142
2143 XL_UNLOCK(sc);
2144 (*ifp->if_input)(ifp, m);
2145 XL_LOCK(sc);
2146 }
2147
2148 /*
2149 * Handle the 'end of channel' condition. When the upload
2150 * engine hits the end of the RX ring, it will stall. This
2151 * is our cue to flush the RX ring, reload the uplist pointer
2152 * register and unstall the engine.
2153 * XXX This is actually a little goofy. With the ThunderLAN
2154 * chip, you get an interrupt when the receiver hits the end
2155 * of the receive ring, which tells you exactly when you
2156 * you need to reload the ring pointer. Here we have to
2157 * fake it. I'm mad at myself for not being clever enough
2158 * to avoid the use of a goto here.
2159 */
2160 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
2161 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
2162 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2163 xl_wait(sc);
2164 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2165 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
2166 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2167 goto again;
2168 }
2169
2170 return;
2171}
2172
2173/*
2174 * A frame was downloaded to the chip. It's safe for us to clean up
2175 * the list buffers.
2176 */
2177static void
2178xl_txeof(sc)
2179 struct xl_softc *sc;
2180{
2181 struct xl_chain *cur_tx;
2182 struct ifnet *ifp;
2183
2184 ifp = &sc->arpcom.ac_if;
2185
2186 /* Clear the timeout timer. */
2187 ifp->if_timer = 0;
2188
2189 /*
2190 * Go through our tx list and free mbufs for those
2191 * frames that have been uploaded. Note: the 3c905B
2192 * sets a special bit in the status word to let us
2193 * know that a frame has been downloaded, but the
2194 * original 3c900/3c905 adapters don't do that.
2195 * Consequently, we have to use a different test if
2196 * xl_type != XL_TYPE_905B.
2197 */
2198 while(sc->xl_cdata.xl_tx_head != NULL) {
2199 cur_tx = sc->xl_cdata.xl_tx_head;
2200
2201 if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
2202 break;
2203
2204 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
2205 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2206 BUS_DMASYNC_POSTWRITE);
2207 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2208 m_freem(cur_tx->xl_mbuf);
2209 cur_tx->xl_mbuf = NULL;
2210 ifp->if_opackets++;
2211
2212 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
2213 sc->xl_cdata.xl_tx_free = cur_tx;
2214 }
2215
2216 if (sc->xl_cdata.xl_tx_head == NULL) {
2217 ifp->if_flags &= ~IFF_OACTIVE;
2218 sc->xl_cdata.xl_tx_tail = NULL;
2219 } else {
2220 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
2221 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2222 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2223 sc->xl_cdata.xl_tx_head->xl_phys);
2224 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2225 }
2226 }
2227
2228 return;
2229}
2230
2231static void
2232xl_txeof_90xB(sc)
2233 struct xl_softc *sc;
2234{
2235 struct xl_chain *cur_tx = NULL;
2236 struct ifnet *ifp;
2237 int idx;
2238
2239 ifp = &sc->arpcom.ac_if;
2240
2241 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2242 BUS_DMASYNC_POSTREAD);
2243 idx = sc->xl_cdata.xl_tx_cons;
2244 while(idx != sc->xl_cdata.xl_tx_prod) {
2245
2246 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2247
2248 if (!(le32toh(cur_tx->xl_ptr->xl_status) &
2249 XL_TXSTAT_DL_COMPLETE))
2250 break;
2251
2252 if (cur_tx->xl_mbuf != NULL) {
2253 bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map,
2254 BUS_DMASYNC_POSTWRITE);
2255 bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map);
2256 m_freem(cur_tx->xl_mbuf);
2257 cur_tx->xl_mbuf = NULL;
2258 }
2259
2260 ifp->if_opackets++;
2261
2262 sc->xl_cdata.xl_tx_cnt--;
2263 XL_INC(idx, XL_TX_LIST_CNT);
2264 ifp->if_timer = 0;
2265 }
2266
2267 sc->xl_cdata.xl_tx_cons = idx;
2268
2269 if (cur_tx != NULL)
2270 ifp->if_flags &= ~IFF_OACTIVE;
2271
2272 return;
2273}
2274
2275/*
2276 * TX 'end of channel' interrupt handler. Actually, we should
2277 * only get a 'TX complete' interrupt if there's a transmit error,
2278 * so this is really TX error handler.
2279 */
2280static void
2281xl_txeoc(sc)
2282 struct xl_softc *sc;
2283{
2284 u_int8_t txstat;
2285
2286 while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
2287 if (txstat & XL_TXSTATUS_UNDERRUN ||
2288 txstat & XL_TXSTATUS_JABBER ||
2289 txstat & XL_TXSTATUS_RECLAIM) {
2290 printf("xl%d: transmission error: %x\n",
2291 sc->xl_unit, txstat);
2292 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2293 xl_wait(sc);
2294 if (sc->xl_type == XL_TYPE_905B) {
2295 if (sc->xl_cdata.xl_tx_cnt) {
2296 int i;
2297 struct xl_chain *c;
2298 i = sc->xl_cdata.xl_tx_cons;
2299 c = &sc->xl_cdata.xl_tx_chain[i];
2300 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2301 c->xl_phys);
2302 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2303 }
2304 } else {
2305 if (sc->xl_cdata.xl_tx_head != NULL)
2306 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2307 sc->xl_cdata.xl_tx_head->xl_phys);
2308 }
2309 /*
2310 * Remember to set this for the
2311 * first generation 3c90X chips.
2312 */
2313 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2314 if (txstat & XL_TXSTATUS_UNDERRUN &&
2315 sc->xl_tx_thresh < XL_PACKET_SIZE) {
2316 sc->xl_tx_thresh += XL_MIN_FRAMELEN;
2317 printf("xl%d: tx underrun, increasing tx start"
2318 " threshold to %d bytes\n", sc->xl_unit,
2319 sc->xl_tx_thresh);
2320 }
2321 CSR_WRITE_2(sc, XL_COMMAND,
2322 XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2323 if (sc->xl_type == XL_TYPE_905B) {
2324 CSR_WRITE_2(sc, XL_COMMAND,
2325 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2326 }
2327 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2328 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2329 } else {
2330 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2331 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2332 }
2333 /*
2334 * Write an arbitrary byte to the TX_STATUS register
2335 * to clear this interrupt/error and advance to the next.
2336 */
2337 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
2338 }
2339
2340 return;
2341}
2342
2343static void
2344xl_intr(arg)
2345 void *arg;
2346{
2347 struct xl_softc *sc;
2348 struct ifnet *ifp;
2349 u_int16_t status;
2350
2351 sc = arg;
2352 XL_LOCK(sc);
2353 ifp = &sc->arpcom.ac_if;
2354
2355 while((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
2356
2357 CSR_WRITE_2(sc, XL_COMMAND,
2358 XL_CMD_INTR_ACK|(status & XL_INTRS));
2359
2360 if (status & XL_STAT_UP_COMPLETE) {
2361 int curpkts;
2362
2363 curpkts = ifp->if_ipackets;
2364 xl_rxeof(sc);
2365 if (curpkts == ifp->if_ipackets) {
2366 while (xl_rx_resync(sc))
2367 xl_rxeof(sc);
2368 }
2369 }
2370
2371 if (status & XL_STAT_DOWN_COMPLETE) {
2372 if (sc->xl_type == XL_TYPE_905B)
2373 xl_txeof_90xB(sc);
2374 else
2375 xl_txeof(sc);
2376 }
2377
2378 if (status & XL_STAT_TX_COMPLETE) {
2379 ifp->if_oerrors++;
2380 xl_txeoc(sc);
2381 }
2382
2383 if (status & XL_STAT_ADFAIL) {
2384 xl_reset(sc);
2385 xl_init(sc);
2386 }
2387
2388 if (status & XL_STAT_STATSOFLOW) {
2389 sc->xl_stats_no_timeout = 1;
2390 xl_stats_update(sc);
2391 sc->xl_stats_no_timeout = 0;
2392 }
2393 }
2394
2395 if (ifp->if_snd.ifq_head != NULL)
2396 (*ifp->if_start)(ifp);
2397
2398 XL_UNLOCK(sc);
2399
2400 return;
2401}
2402
2403static void
2404xl_stats_update(xsc)
2405 void *xsc;
2406{
2407 struct xl_softc *sc;
2408 struct ifnet *ifp;
2409 struct xl_stats xl_stats;
2410 u_int8_t *p;
2411 int i;
2412 struct mii_data *mii = NULL;
2413
2414 bzero((char *)&xl_stats, sizeof(struct xl_stats));
2415
2416 sc = xsc;
2417 ifp = &sc->arpcom.ac_if;
2418 if (sc->xl_miibus != NULL)
2419 mii = device_get_softc(sc->xl_miibus);
2420
2421 p = (u_int8_t *)&xl_stats;
2422
2423 /* Read all the stats registers. */
2424 XL_SEL_WIN(6);
2425
2426 for (i = 0; i < 16; i++)
2427 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
2428
2429 ifp->if_ierrors += xl_stats.xl_rx_overrun;
2430
2431 ifp->if_collisions += xl_stats.xl_tx_multi_collision +
2432 xl_stats.xl_tx_single_collision +
2433 xl_stats.xl_tx_late_collision;
2434
2435 /*
2436 * Boomerang and cyclone chips have an extra stats counter
2437 * in window 4 (BadSSD). We have to read this too in order
2438 * to clear out all the stats registers and avoid a statsoflow
2439 * interrupt.
2440 */
2441 XL_SEL_WIN(4);
2442 CSR_READ_1(sc, XL_W4_BADSSD);
2443
2444 if ((mii != NULL) && (!sc->xl_stats_no_timeout))
2445 mii_tick(mii);
2446
2447 XL_SEL_WIN(7);
2448
2449 if (!sc->xl_stats_no_timeout)
2450 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2451
2452 return;
2453}
2454
2455/*
2456 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2457 * pointers to the fragment pointers.
2458 */
2459static int
2460xl_encap(sc, c, m_head)
2461 struct xl_softc *sc;
2462 struct xl_chain *c;
2463 struct mbuf *m_head;
2464{
2465 int error;
2466 u_int32_t status;
2467 struct ifnet *ifp;
2468
2469 ifp = &sc->arpcom.ac_if;
2470
2471 /*
2472 * Start packing the mbufs in this chain into
2473 * the fragment pointers. Stop when we run out
2474 * of fragments or hit the end of the mbuf chain.
2475 */
2476 error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map, m_head,
2477 xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
2478
2479 if (error && error != EFBIG) {
2480 m_freem(m_head);
2481 printf("xl%d: can't map mbuf (error %d)\n", sc->xl_unit, error);
2482 return(1);
2483 }
2484
2485 /*
2486 * Handle special case: we used up all 63 fragments,
2487 * but we have more mbufs left in the chain. Copy the
2488 * data into an mbuf cluster. Note that we don't
2489 * bother clearing the values in the other fragment
2490 * pointers/counters; it wouldn't gain us anything,
2491 * and would waste cycles.
2492 */
2493 if (error) {
2494 struct mbuf *m_new;
2495
2496 m_new = m_defrag(m_head, M_DONTWAIT);
2497 if (m_new == NULL) {
2498 m_freem(m_head);
2499 return(1);
2500 } else {
2501 m_head = m_new;
2502 }
2503
2504 error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map,
2505 m_head, xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT);
2506 if (error) {
2507 m_freem(m_head);
2508 printf("xl%d: can't map mbuf (error %d)\n",
2509 sc->xl_unit, error);
2510 return(1);
2511 }
2512 }
2513
2514 if (sc->xl_type == XL_TYPE_905B) {
2515 status = XL_TXSTAT_RND_DEFEAT;
2516
2517 if (m_head->m_pkthdr.csum_flags) {
2518 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2519 status |= XL_TXSTAT_IPCKSUM;
2520 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
2521 status |= XL_TXSTAT_TCPCKSUM;
2522 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
2523 status |= XL_TXSTAT_UDPCKSUM;
2524 }
2525 c->xl_ptr->xl_status = htole32(status);
2526 }
2527
2528 c->xl_mbuf = m_head;
2529 bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
2530 return(0);
2531}
2532
2533/*
2534 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2535 * to the mbuf data regions directly in the transmit lists. We also save a
2536 * copy of the pointers since the transmit list fragment pointers are
2537 * physical addresses.
2538 */
2539static void
2540xl_start(ifp)
2541 struct ifnet *ifp;
2542{
2543 struct xl_softc *sc;
2544 struct mbuf *m_head = NULL;
2545 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2546 struct xl_chain *prev_tx;
2547 u_int32_t status;
2548 int error;
2549
2550 sc = ifp->if_softc;
2551 XL_LOCK(sc);
2552 /*
2553 * Check for an available queue slot. If there are none,
2554 * punt.
2555 */
2556 if (sc->xl_cdata.xl_tx_free == NULL) {
2557 xl_txeoc(sc);
2558 xl_txeof(sc);
2559 if (sc->xl_cdata.xl_tx_free == NULL) {
2560 ifp->if_flags |= IFF_OACTIVE;
2561 XL_UNLOCK(sc);
2562 return;
2563 }
2564 }
2565
2566 start_tx = sc->xl_cdata.xl_tx_free;
2567
2568 while(sc->xl_cdata.xl_tx_free != NULL) {
2569 IF_DEQUEUE(&ifp->if_snd, m_head);
2570 if (m_head == NULL)
2571 break;
2572
2573 /* Pick a descriptor off the free list. */
2574 prev_tx = cur_tx;
2575 cur_tx = sc->xl_cdata.xl_tx_free;
2576
2577 /* Pack the data into the descriptor. */
2578 error = xl_encap(sc, cur_tx, m_head);
2579 if (error) {
2580 cur_tx = prev_tx;
2581 continue;
2582 }
2583
2584 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
2585 cur_tx->xl_next = NULL;
2586
2587 /* Chain it together. */
2588 if (prev != NULL) {
2589 prev->xl_next = cur_tx;
2590 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2591 }
2592 prev = cur_tx;
2593
2594 /*
2595 * If there's a BPF listener, bounce a copy of this frame
2596 * to him.
2597 */
2598 BPF_MTAP(ifp, cur_tx->xl_mbuf);
2599 }
2600
2601 /*
2602 * If there are no packets queued, bail.
2603 */
2604 if (cur_tx == NULL) {
2605 XL_UNLOCK(sc);
2606 return;
2607 }
2608
2609 /*
2610 * Place the request for the upload interrupt
2611 * in the last descriptor in the chain. This way, if
2612 * we're chaining several packets at once, we'll only
2613 * get an interupt once for the whole chain rather than
2614 * once for each packet.
2615 */
2616 cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2617 XL_TXSTAT_DL_INTR);
2618 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2619 BUS_DMASYNC_PREWRITE);
2620
2621 /*
2622 * Queue the packets. If the TX channel is clear, update
2623 * the downlist pointer register.
2624 */
2625 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2626 xl_wait(sc);
2627
2628 if (sc->xl_cdata.xl_tx_head != NULL) {
2629 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
2630 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
2631 htole32(start_tx->xl_phys);
2632 status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status;
2633 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status =
2634 htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR);
2635 sc->xl_cdata.xl_tx_tail = cur_tx;
2636 } else {
2637 sc->xl_cdata.xl_tx_head = start_tx;
2638 sc->xl_cdata.xl_tx_tail = cur_tx;
2639 }
2640 if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
2641 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
2642
2643 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2644
2645 XL_SEL_WIN(7);
2646
2647 /*
2648 * Set a timeout in case the chip goes out to lunch.
2649 */
2650 ifp->if_timer = 5;
2651
2652 /*
2653 * XXX Under certain conditions, usually on slower machines
2654 * where interrupts may be dropped, it's possible for the
2655 * adapter to chew up all the buffers in the receive ring
2656 * and stall, without us being able to do anything about it.
2657 * To guard against this, we need to make a pass over the
2658 * RX queue to make sure there aren't any packets pending.
2659 * Doing it here means we can flush the receive ring at the
2660 * same time the chip is DMAing the transmit descriptors we
2661 * just gave it.
2662 *
2663 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
2664 * nature of their chips in all their marketing literature;
2665 * we may as well take advantage of it. :)
2666 */
2667 xl_rxeof(sc);
2668
2669 XL_UNLOCK(sc);
2670
2671 return;
2672}
2673
2674static void
2675xl_start_90xB(ifp)
2676 struct ifnet *ifp;
2677{
2678 struct xl_softc *sc;
2679 struct mbuf *m_head = NULL;
2680 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2681 struct xl_chain *prev_tx;
2682 int error, idx;
2683
2684 sc = ifp->if_softc;
2685 XL_LOCK(sc);
2686
2687 if (ifp->if_flags & IFF_OACTIVE) {
2688 XL_UNLOCK(sc);
2689 return;
2690 }
2691
2692 idx = sc->xl_cdata.xl_tx_prod;
2693 start_tx = &sc->xl_cdata.xl_tx_chain[idx];
2694
2695 while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
2696
2697 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
2698 ifp->if_flags |= IFF_OACTIVE;
2699 break;
2700 }
2701
2702 IF_DEQUEUE(&ifp->if_snd, m_head);
2703 if (m_head == NULL)
2704 break;
2705
2706 prev_tx = cur_tx;
2707 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2708
2709 /* Pack the data into the descriptor. */
2710 error = xl_encap(sc, cur_tx, m_head);
2711 if (error) {
2712 cur_tx = prev_tx;
2713 continue;
2714 }
2715
2716 /* Chain it together. */
2717 if (prev != NULL)
2718 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
2719 prev = cur_tx;
2720
2721 /*
2722 * If there's a BPF listener, bounce a copy of this frame
2723 * to him.
2724 */
2725 BPF_MTAP(ifp, cur_tx->xl_mbuf);
2726
2727 XL_INC(idx, XL_TX_LIST_CNT);
2728 sc->xl_cdata.xl_tx_cnt++;
2729 }
2730
2731 /*
2732 * If there are no packets queued, bail.
2733 */
2734 if (cur_tx == NULL) {
2735 XL_UNLOCK(sc);
2736 return;
2737 }
2738
2739 /*
2740 * Place the request for the upload interrupt
2741 * in the last descriptor in the chain. This way, if
2742 * we're chaining several packets at once, we'll only
2743 * get an interupt once for the whole chain rather than
2744 * once for each packet.
2745 */
2746 cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2747 XL_TXSTAT_DL_INTR);
2748 bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap,
2749 BUS_DMASYNC_PREWRITE);
2750
2751 /* Start transmission */
2752 sc->xl_cdata.xl_tx_prod = idx;
2753 start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
2754
2755 /*
2756 * Set a timeout in case the chip goes out to lunch.
2757 */
2758 ifp->if_timer = 5;
2759
2760 XL_UNLOCK(sc);
2761
2762 return;
2763}
2764
2765static void
2766xl_init(xsc)
2767 void *xsc;
2768{
2769 struct xl_softc *sc = xsc;
2770 struct ifnet *ifp = &sc->arpcom.ac_if;
2771 int error, i;
2772 u_int16_t rxfilt = 0;
2773 struct mii_data *mii = NULL;
2774
2775 XL_LOCK(sc);
2776
2777 /*
2778 * Cancel pending I/O and free all RX/TX buffers.
2779 */
2780 xl_stop(sc);
2781
2782 if (sc->xl_miibus == NULL) {
2783 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2784 xl_wait(sc);
2785 }
2786 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2787 xl_wait(sc);
2788 DELAY(10000);
2789
2790 if (sc->xl_miibus != NULL)
2791 mii = device_get_softc(sc->xl_miibus);
2792
2793 /* Init our MAC address */
2794 XL_SEL_WIN(2);
2795 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2796 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
2797 sc->arpcom.ac_enaddr[i]);
2798 }
2799
2800 /* Clear the station mask. */
2801 for (i = 0; i < 3; i++)
2802 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
2803#ifdef notdef
2804 /* Reset TX and RX. */
2805 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2806 xl_wait(sc);
2807 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2808 xl_wait(sc);
2809#endif
2810 /* Init circular RX list. */
2811 error = xl_list_rx_init(sc);
2812 if (error) {
2813 printf("xl%d: initialization of the rx ring failed (%d)\n",
2814 sc->xl_unit, error);
2815 xl_stop(sc);
2816 XL_UNLOCK(sc);
2817 return;
2818 }
2819
2820 /* Init TX descriptors. */
2821 if (sc->xl_type == XL_TYPE_905B)
2822 error = xl_list_tx_init_90xB(sc);
2823 else
2824 error = xl_list_tx_init(sc);
2825 if (error) {
2826 printf("xl%d: initialization of the tx ring failed (%d)\n",
2827 sc->xl_unit, error);
2828 xl_stop(sc);
2829 XL_UNLOCK(sc);
2830 }
2831
2832 /*
2833 * Set the TX freethresh value.
2834 * Note that this has no effect on 3c905B "cyclone"
2835 * cards but is required for 3c900/3c905 "boomerang"
2836 * cards in order to enable the download engine.
2837 */
2838 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2839
2840 /* Set the TX start threshold for best performance. */
2841 sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2842 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2843
2844 /*
2845 * If this is a 3c905B, also set the tx reclaim threshold.
2846 * This helps cut down on the number of tx reclaim errors
2847 * that could happen on a busy network. The chip multiplies
2848 * the register value by 16 to obtain the actual threshold
2849 * in bytes, so we divide by 16 when setting the value here.
2850 * The existing threshold value can be examined by reading
2851 * the register at offset 9 in window 5.
2852 */
2853 if (sc->xl_type == XL_TYPE_905B) {
2854 CSR_WRITE_2(sc, XL_COMMAND,
2855 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2856 }
2857
2858 /* Set RX filter bits. */
2859 XL_SEL_WIN(5);
2860 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2861
2862 /* Set the individual bit to receive frames for this host only. */
2863 rxfilt |= XL_RXFILTER_INDIVIDUAL;
2864
2865 /* If we want promiscuous mode, set the allframes bit. */
2866 if (ifp->if_flags & IFF_PROMISC) {
2867 rxfilt |= XL_RXFILTER_ALLFRAMES;
2868 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2869 } else {
2870 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2871 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2872 }
2873
2874 /*
2875 * Set capture broadcast bit to capture broadcast frames.
2876 */
2877 if (ifp->if_flags & IFF_BROADCAST) {
2878 rxfilt |= XL_RXFILTER_BROADCAST;
2879 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2880 } else {
2881 rxfilt &= ~XL_RXFILTER_BROADCAST;
2882 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2883 }
2884
2885 /*
2886 * Program the multicast filter, if necessary.
2887 */
2888 if (sc->xl_type == XL_TYPE_905B)
2889 xl_setmulti_hash(sc);
2890 else
2891 xl_setmulti(sc);
2892
2893 /*
2894 * Load the address of the RX list. We have to
2895 * stall the upload engine before we can manipulate
2896 * the uplist pointer register, then unstall it when
2897 * we're finished. We also have to wait for the
2898 * stall command to complete before proceeding.
2899 * Note that we have to do this after any RX resets
2900 * have completed since the uplist register is cleared
2901 * by a reset.
2902 */
2903 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2904 xl_wait(sc);
2905 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
2906 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2907 xl_wait(sc);
2908
2909
2910 if (sc->xl_type == XL_TYPE_905B) {
2911 /* Set polling interval */
2912 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2913 /* Load the address of the TX list */
2914 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2915 xl_wait(sc);
2916 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2917 sc->xl_cdata.xl_tx_chain[0].xl_phys);
2918 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2919 xl_wait(sc);
2920 }
2921
2922 /*
2923 * If the coax transceiver is on, make sure to enable
2924 * the DC-DC converter.
2925 */
2926 XL_SEL_WIN(3);
2927 if (sc->xl_xcvr == XL_XCVR_COAX)
2928 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2929 else
2930 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2931
2932 /*
2933 * increase packet size to allow reception of 802.1q or ISL packets.
2934 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
2935 * control register. For 3c90xB/C chips, use the RX packet size
2936 * register.
2937 */
2938
2939 if (sc->xl_type == XL_TYPE_905B)
2940 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
2941 else {
2942 u_int8_t macctl;
2943 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
2944 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
2945 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
2946 }
2947
2948 /* Clear out the stats counters. */
2949 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2950 sc->xl_stats_no_timeout = 1;
2951 xl_stats_update(sc);
2952 sc->xl_stats_no_timeout = 0;
2953 XL_SEL_WIN(4);
2954 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2955 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2956
2957 /*
2958 * Enable interrupts.
2959 */
2960 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2961 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2962 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2963 if (sc->xl_flags & XL_FLAG_FUNCREG)
2964 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
2965
2966 /* Set the RX early threshold */
2967 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2968 CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2969
2970 /* Enable receiver and transmitter. */
2971 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2972 xl_wait(sc);
2973 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2974 xl_wait(sc);
2975
2976 if (mii != NULL)
2977 mii_mediachg(mii);
2978
2979 /* Select window 7 for normal operations. */
2980 XL_SEL_WIN(7);
2981
2982 ifp->if_flags |= IFF_RUNNING;
2983 ifp->if_flags &= ~IFF_OACTIVE;
2984
2985 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2986
2987 XL_UNLOCK(sc);
2988
2989 return;
2990}
2991
2992/*
2993 * Set media options.
2994 */
2995static int
2996xl_ifmedia_upd(ifp)
2997 struct ifnet *ifp;
2998{
2999 struct xl_softc *sc;
3000 struct ifmedia *ifm = NULL;
3001 struct mii_data *mii = NULL;
3002
3003 sc = ifp->if_softc;
3004 if (sc->xl_miibus != NULL)
3005 mii = device_get_softc(sc->xl_miibus);
3006 if (mii == NULL)
3007 ifm = &sc->ifmedia;
3008 else
3009 ifm = &mii->mii_media;
3010
3011 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3012 case IFM_100_FX:
3013 case IFM_10_FL:
3014 case IFM_10_2:
3015 case IFM_10_5:
3016 xl_setmode(sc, ifm->ifm_media);
3017 return(0);
3018 break;
3019 default:
3020 break;
3021 }
3022
3023 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
3024 || sc->xl_media & XL_MEDIAOPT_BT4) {
3025 xl_init(sc);
3026 } else {
3027 xl_setmode(sc, ifm->ifm_media);
3028 }
3029
3030 return(0);
3031}
3032
3033/*
3034 * Report current media status.
3035 */
3036static void
3037xl_ifmedia_sts(ifp, ifmr)
3038 struct ifnet *ifp;
3039 struct ifmediareq *ifmr;
3040{
3041 struct xl_softc *sc;
3042 u_int32_t icfg;
3043 u_int16_t status = 0;
3044 struct mii_data *mii = NULL;
3045
3046 sc = ifp->if_softc;
3047 if (sc->xl_miibus != NULL)
3048 mii = device_get_softc(sc->xl_miibus);
3049
3050 XL_SEL_WIN(4);
3051 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
3052
3053 XL_SEL_WIN(3);
3054 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
3055 icfg >>= XL_ICFG_CONNECTOR_BITS;
3056
3057 ifmr->ifm_active = IFM_ETHER;
3058 ifmr->ifm_status = IFM_AVALID;
3059
3060 if ((status & XL_MEDIASTAT_CARRIER) == 0)
3061 ifmr->ifm_status |= IFM_ACTIVE;
3062
3063 switch(icfg) {
3064 case XL_XCVR_10BT:
3065 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
3066 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3067 ifmr->ifm_active |= IFM_FDX;
3068 else
3069 ifmr->ifm_active |= IFM_HDX;
3070 break;
3071 case XL_XCVR_AUI:
3072 if (sc->xl_type == XL_TYPE_905B &&
3073 sc->xl_media == XL_MEDIAOPT_10FL) {
3074 ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
3075 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3076 ifmr->ifm_active |= IFM_FDX;
3077 else
3078 ifmr->ifm_active |= IFM_HDX;
3079 } else
3080 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
3081 break;
3082 case XL_XCVR_COAX:
3083 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
3084 break;
3085 /*
3086 * XXX MII and BTX/AUTO should be separate cases.
3087 */
3088
3089 case XL_XCVR_100BTX:
3090 case XL_XCVR_AUTO:
3091 case XL_XCVR_MII:
3092 if (mii != NULL) {
3093 mii_pollstat(mii);
3094 ifmr->ifm_active = mii->mii_media_active;
3095 ifmr->ifm_status = mii->mii_media_status;
3096 }
3097 break;
3098 case XL_XCVR_100BFX:
3099 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
3100 break;
3101 default:
3102 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, icfg);
3103 break;
3104 }
3105
3106 return;
3107}
3108
3109static int
3110xl_ioctl(ifp, command, data)
3111 struct ifnet *ifp;
3112 u_long command;
3113 caddr_t data;
3114{
3115 struct xl_softc *sc = ifp->if_softc;
3116 struct ifreq *ifr = (struct ifreq *) data;
3117 int error = 0;
3118 struct mii_data *mii = NULL;
3119 u_int8_t rxfilt;
3120
3121 XL_LOCK(sc);
3122
3123 switch(command) {
3124 case SIOCSIFFLAGS:
3125 XL_SEL_WIN(5);
3126 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
3127 if (ifp->if_flags & IFF_UP) {
3128 if (ifp->if_flags & IFF_RUNNING &&
3129 ifp->if_flags & IFF_PROMISC &&
3130 !(sc->xl_if_flags & IFF_PROMISC)) {
3131 rxfilt |= XL_RXFILTER_ALLFRAMES;
3132 CSR_WRITE_2(sc, XL_COMMAND,
3133 XL_CMD_RX_SET_FILT|rxfilt);
3134 XL_SEL_WIN(7);
3135 } else if (ifp->if_flags & IFF_RUNNING &&
3136 !(ifp->if_flags & IFF_PROMISC) &&
3137 sc->xl_if_flags & IFF_PROMISC) {
3138 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
3139 CSR_WRITE_2(sc, XL_COMMAND,
3140 XL_CMD_RX_SET_FILT|rxfilt);
3141 XL_SEL_WIN(7);
3142 } else
3143 xl_init(sc);
3144 } else {
3145 if (ifp->if_flags & IFF_RUNNING)
3146 xl_stop(sc);
3147 }
3148 sc->xl_if_flags = ifp->if_flags;
3149 error = 0;
3150 break;
3151 case SIOCADDMULTI:
3152 case SIOCDELMULTI:
3153 if (sc->xl_type == XL_TYPE_905B)
3154 xl_setmulti_hash(sc);
3155 else
3156 xl_setmulti(sc);
3157 error = 0;
3158 break;
3159 case SIOCGIFMEDIA:
3160 case SIOCSIFMEDIA:
3161 if (sc->xl_miibus != NULL)
3162 mii = device_get_softc(sc->xl_miibus);
3163 if (mii == NULL)
3164 error = ifmedia_ioctl(ifp, ifr,
3165 &sc->ifmedia, command);
3166 else
3167 error = ifmedia_ioctl(ifp, ifr,
3168 &mii->mii_media, command);
3169 break;
3170 case SIOCSIFCAP:
3171 ifp->if_capenable = ifr->ifr_reqcap;
3172 if (ifp->if_capenable & IFCAP_TXCSUM)
3173 ifp->if_hwassist = XL905B_CSUM_FEATURES;
3174 else
3175 ifp->if_hwassist = 0;
3176 break;
3177 default:
3178 error = ether_ioctl(ifp, command, data);
3179 break;
3180 }
3181
3182 XL_UNLOCK(sc);
3183
3184 return(error);
3185}
3186
3187static void
3188xl_watchdog(ifp)
3189 struct ifnet *ifp;
3190{
3191 struct xl_softc *sc;
3192 u_int16_t status = 0;
3193
3194 sc = ifp->if_softc;
3195
3196 XL_LOCK(sc);
3197
3198 ifp->if_oerrors++;
3199 XL_SEL_WIN(4);
3200 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
3201 printf("xl%d: watchdog timeout\n", sc->xl_unit);
3202
3203 if (status & XL_MEDIASTAT_CARRIER)
3204 printf("xl%d: no carrier - transceiver cable problem?\n",
3205 sc->xl_unit);
3206 xl_txeoc(sc);
3207 xl_txeof(sc);
3208 xl_rxeof(sc);
3209 xl_reset(sc);
3210 xl_init(sc);
3211
3212 if (ifp->if_snd.ifq_head != NULL)
3213 (*ifp->if_start)(ifp);
3214
3215 XL_UNLOCK(sc);
3216
3217 return;
3218}
3219
3220/*
3221 * Stop the adapter and free any mbufs allocated to the
3222 * RX and TX lists.
3223 */
3224static void
3225xl_stop(sc)
3226 struct xl_softc *sc;
3227{
3228 register int i;
3229 struct ifnet *ifp;
3230
3231 XL_LOCK(sc);
3232
3233 ifp = &sc->arpcom.ac_if;
3234 ifp->if_timer = 0;
3235
3236 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
3237 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
3238 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
3239 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
3240 xl_wait(sc);
3241 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
3242 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
3243 DELAY(800);
3244
3245#ifdef foo
3246 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
3247 xl_wait(sc);
3248 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
3249 xl_wait(sc);
3250#endif
3251
3252 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
3253 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
3254 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
3255 if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4 (sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
3256
3257 /* Stop the stats updater. */
3258 untimeout(xl_stats_update, sc, sc->xl_stat_ch);
3259
3260 /*
3261 * Free data in the RX lists.
3262 */
3263 for (i = 0; i < XL_RX_LIST_CNT; i++) {
3264 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
3265 bus_dmamap_unload(sc->xl_mtag,
3266 sc->xl_cdata.xl_rx_chain[i].xl_map);
3267 bus_dmamap_destroy(sc->xl_mtag,
3268 sc->xl_cdata.xl_rx_chain[i].xl_map);
3269 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
3270 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
3271 }
3272 }
3273 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
3274 /*
3275 * Free the TX list buffers.
3276 */
3277 for (i = 0; i < XL_TX_LIST_CNT; i++) {
3278 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
3279 bus_dmamap_unload(sc->xl_mtag,
3280 sc->xl_cdata.xl_tx_chain[i].xl_map);
3281 bus_dmamap_destroy(sc->xl_mtag,
3282 sc->xl_cdata.xl_tx_chain[i].xl_map);
3283 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
3284 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
3285 }
3286 }
3287 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
3288
3289 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3290
3291 XL_UNLOCK(sc);
3292
3293 return;
3294}
3295
3296/*
3297 * Stop all chip I/O so that the kernel's probe routines don't
3298 * get confused by errant DMAs when rebooting.
3299 */
3300static void
3301xl_shutdown(dev)
3302 device_t dev;
3303{
3304 struct xl_softc *sc;
3305
3306 sc = device_get_softc(dev);
3307
3308 XL_LOCK(sc);
3309 xl_reset(sc);
3310 xl_stop(sc);
3311 XL_UNLOCK(sc);
3312
3313 return;
3314}
3315
3316static int
3317xl_suspend(dev)
3318 device_t dev;
3319{
3320 struct xl_softc *sc;
3321
3322 sc = device_get_softc(dev);
3323
3324 XL_LOCK(sc);
3325 xl_stop(sc);
3326 XL_UNLOCK(sc);
3327
3328 return(0);
3329}
3330
3331static int
3332xl_resume(dev)
3333 device_t dev;
3334{
3335 struct xl_softc *sc;
3336 struct ifnet *ifp;
3337
3338 sc = device_get_softc(dev);
3339 XL_LOCK(sc);
3340 ifp = &sc->arpcom.ac_if;
3341
3342 xl_reset(sc);
3343 if (ifp->if_flags & IFF_UP)
3344 xl_init(sc);
3345
3346 XL_UNLOCK(sc);
3347 return(0);
3348}