Deleted Added
full compact
if_xl.c (45629) if_xl.c (45693)
1/*
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
1/*
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $Id: if_xl.c,v 1.75 1999/04/12 20:23:08 wpaul Exp $
32 * $Id: if_xl.c,v 1.83 1999/04/15 02:34:54 wpaul Exp $
33 */
34
35/*
36 * 3Com 3c90x Etherlink XL PCI NIC driver
37 *
38 * Supports the 3Com "boomerang" and "cyclone" PCI
39 * bus-master chips (3c90x cards and embedded controllers) including
40 * the following:
41 *
42 * 3Com 3c900-TPO 10Mbps/RJ-45
43 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
44 * 3Com 3c905-TX 10/100Mbps/RJ-45
45 * 3Com 3c905-T4 10/100Mbps/RJ-45
46 * 3Com 3c900B-TPO 10Mbps/RJ-45
47 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
33 */
34
35/*
36 * 3Com 3c90x Etherlink XL PCI NIC driver
37 *
38 * Supports the 3Com "boomerang" and "cyclone" PCI
39 * bus-master chips (3c90x cards and embedded controllers) including
40 * the following:
41 *
42 * 3Com 3c900-TPO 10Mbps/RJ-45
43 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
44 * 3Com 3c905-TX 10/100Mbps/RJ-45
45 * 3Com 3c905-T4 10/100Mbps/RJ-45
46 * 3Com 3c900B-TPO 10Mbps/RJ-45
47 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
48 * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC
48 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC
49 * 3Com 3c905B-TX 10/100Mbps/RJ-45
50 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
51 * 3Com 3c980-TX 10/100Mbps server adapter
52 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45
53 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
54 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
55 * Dell Latitude laptop docking station embedded 3c905-TX
56 *
57 * Written by Bill Paul <wpaul@ctr.columbia.edu>
58 * Electrical Engineering Department
59 * Columbia University, New York City
60 */
61
62/*
63 * The 3c90x series chips use a bus-master DMA interface for transfering
64 * packets to and from the controller chip. Some of the "vortex" cards
65 * (3c59x) also supported a bus master mode, however for those chips
66 * you could only DMA packets to/from a contiguous memory buffer. For
67 * transmission this would mean copying the contents of the queued mbuf
68 * chain into a an mbuf cluster and then DMAing the cluster. This extra
69 * copy would sort of defeat the purpose of the bus master support for
70 * any packet that doesn't fit into a single mbuf.
71 *
72 * By contrast, the 3c90x cards support a fragment-based bus master
73 * mode where mbuf chains can be encapsulated using TX descriptors.
74 * This is similar to other PCI chips such as the Texas Instruments
75 * ThunderLAN and the Intel 82557/82558.
76 *
77 * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
78 * bus master chips because they maintain the old PIO interface for
79 * backwards compatibility, but starting with the 3c905B and the
80 * "cyclone" chips, the compatibility interface has been dropped.
81 * Since using bus master DMA is a big win, we use this driver to
82 * support the PCI "boomerang" chips even though they work with the
83 * "vortex" driver in order to obtain better performance.
84 *
85 * This driver is in the /sys/pci directory because it only supports
86 * PCI-based NICs.
87 */
88
89#include "bpfilter.h"
90
91#include <sys/param.h>
92#include <sys/systm.h>
93#include <sys/sockio.h>
94#include <sys/mbuf.h>
95#include <sys/malloc.h>
96#include <sys/kernel.h>
97#include <sys/socket.h>
98
99#include <net/if.h>
100#include <net/if_arp.h>
101#include <net/ethernet.h>
102#include <net/if_dl.h>
103#include <net/if_media.h>
104
105#if NBPFILTER > 0
106#include <net/bpf.h>
107#endif
108
109#include <vm/vm.h> /* for vtophys */
110#include <vm/pmap.h> /* for vtophys */
111#include <machine/clock.h> /* for DELAY */
112#include <machine/bus_memio.h>
113#include <machine/bus_pio.h>
114#include <machine/bus.h>
115
116#include <pci/pcireg.h>
117#include <pci/pcivar.h>
118
119/*
120 * The following #define causes the code to use PIO to access the
121 * chip's registers instead of memory mapped mode. The reason PIO mode
122 * is on by default is that the Etherlink XL manual seems to indicate
123 * that only the newer revision chips (3c905B) support both PIO and
124 * memory mapped access. Since we want to be compatible with the older
125 * bus master chips, we use PIO here. If you comment this out, the
126 * driver will use memory mapped I/O, which may be faster but which
127 * might not work on some devices.
128 */
129#define XL_USEIOSPACE
130
131/*
132 * This #define controls the behavior of autonegotiation during the
133 * bootstrap phase. It's possible to have the driver initiate an
134 * autonegotiation session and then set a timeout which will cause the
135 * autoneg results to be polled later, usually once the kernel has
136 * finished booting. This is clever and all, but it can have bad side
137 * effects in some cases, particularly where NFS is involved. For
138 * example, if we're booting diskless with an NFS rootfs, the network
139 * interface has to be up and running before we hit the mountroot()
140 * code, otherwise mounting the rootfs will fail and we'll probably
141 * panic.
142 *
143 * Consequently, the 'backgrounded' autoneg behavior is turned off
144 * by default and we actually sit and wait 5 seconds for autonegotiation
145 * to complete before proceeding with the other device probes. If you
146 * choose to use the other behavior, you can uncomment this #define and
147 * recompile.
148 */
149/* #define XL_BACKGROUND_AUTONEG */
150
151#include <pci/if_xlreg.h>
152
153#if !defined(lint)
154static const char rcsid[] =
49 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC
50 * 3Com 3c905B-TX 10/100Mbps/RJ-45
51 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
52 * 3Com 3c980-TX 10/100Mbps server adapter
53 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45
54 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
55 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
56 * Dell Latitude laptop docking station embedded 3c905-TX
57 *
58 * Written by Bill Paul <wpaul@ctr.columbia.edu>
59 * Electrical Engineering Department
60 * Columbia University, New York City
61 */
62
63/*
64 * The 3c90x series chips use a bus-master DMA interface for transfering
65 * packets to and from the controller chip. Some of the "vortex" cards
66 * (3c59x) also supported a bus master mode, however for those chips
67 * you could only DMA packets to/from a contiguous memory buffer. For
68 * transmission this would mean copying the contents of the queued mbuf
69 * chain into a an mbuf cluster and then DMAing the cluster. This extra
70 * copy would sort of defeat the purpose of the bus master support for
71 * any packet that doesn't fit into a single mbuf.
72 *
73 * By contrast, the 3c90x cards support a fragment-based bus master
74 * mode where mbuf chains can be encapsulated using TX descriptors.
75 * This is similar to other PCI chips such as the Texas Instruments
76 * ThunderLAN and the Intel 82557/82558.
77 *
78 * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
79 * bus master chips because they maintain the old PIO interface for
80 * backwards compatibility, but starting with the 3c905B and the
81 * "cyclone" chips, the compatibility interface has been dropped.
82 * Since using bus master DMA is a big win, we use this driver to
83 * support the PCI "boomerang" chips even though they work with the
84 * "vortex" driver in order to obtain better performance.
85 *
86 * This driver is in the /sys/pci directory because it only supports
87 * PCI-based NICs.
88 */
89
90#include "bpfilter.h"
91
92#include <sys/param.h>
93#include <sys/systm.h>
94#include <sys/sockio.h>
95#include <sys/mbuf.h>
96#include <sys/malloc.h>
97#include <sys/kernel.h>
98#include <sys/socket.h>
99
100#include <net/if.h>
101#include <net/if_arp.h>
102#include <net/ethernet.h>
103#include <net/if_dl.h>
104#include <net/if_media.h>
105
106#if NBPFILTER > 0
107#include <net/bpf.h>
108#endif
109
110#include <vm/vm.h> /* for vtophys */
111#include <vm/pmap.h> /* for vtophys */
112#include <machine/clock.h> /* for DELAY */
113#include <machine/bus_memio.h>
114#include <machine/bus_pio.h>
115#include <machine/bus.h>
116
117#include <pci/pcireg.h>
118#include <pci/pcivar.h>
119
120/*
121 * The following #define causes the code to use PIO to access the
122 * chip's registers instead of memory mapped mode. The reason PIO mode
123 * is on by default is that the Etherlink XL manual seems to indicate
124 * that only the newer revision chips (3c905B) support both PIO and
125 * memory mapped access. Since we want to be compatible with the older
126 * bus master chips, we use PIO here. If you comment this out, the
127 * driver will use memory mapped I/O, which may be faster but which
128 * might not work on some devices.
129 */
130#define XL_USEIOSPACE
131
132/*
133 * This #define controls the behavior of autonegotiation during the
134 * bootstrap phase. It's possible to have the driver initiate an
135 * autonegotiation session and then set a timeout which will cause the
136 * autoneg results to be polled later, usually once the kernel has
137 * finished booting. This is clever and all, but it can have bad side
138 * effects in some cases, particularly where NFS is involved. For
139 * example, if we're booting diskless with an NFS rootfs, the network
140 * interface has to be up and running before we hit the mountroot()
141 * code, otherwise mounting the rootfs will fail and we'll probably
142 * panic.
143 *
144 * Consequently, the 'backgrounded' autoneg behavior is turned off
145 * by default and we actually sit and wait 5 seconds for autonegotiation
146 * to complete before proceeding with the other device probes. If you
147 * choose to use the other behavior, you can uncomment this #define and
148 * recompile.
149 */
150/* #define XL_BACKGROUND_AUTONEG */
151
152#include <pci/if_xlreg.h>
153
154#if !defined(lint)
155static const char rcsid[] =
155 "$Id: if_xl.c,v 1.75 1999/04/12 20:23:08 wpaul Exp $";
156 "$Id: if_xl.c,v 1.83 1999/04/15 02:34:54 wpaul Exp $";
156#endif
157
158/*
159 * Various supported device vendors/types and their names.
160 */
161static struct xl_type xl_devs[] = {
162 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
163 "3Com 3c900-TPO Etherlink XL" },
164 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
165 "3Com 3c900-COMBO Etherlink XL" },
166 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
167 "3Com 3c905-TX Fast Etherlink XL" },
168 { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
169 "3Com 3c905-T4 Fast Etherlink XL" },
170 { TC_VENDORID, TC_DEVICEID_CYCLONE_10BT,
171 "3Com 3c900B-TPO Etherlink XL" },
172 { TC_VENDORID, TC_DEVICEID_CYCLONE_10BT_COMBO,
173 "3Com 3c900B-COMBO Etherlink XL" },
157#endif
158
159/*
160 * Various supported device vendors/types and their names.
161 */
162static struct xl_type xl_devs[] = {
163 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
164 "3Com 3c900-TPO Etherlink XL" },
165 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
166 "3Com 3c900-COMBO Etherlink XL" },
167 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
168 "3Com 3c905-TX Fast Etherlink XL" },
169 { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
170 "3Com 3c905-T4 Fast Etherlink XL" },
171 { TC_VENDORID, TC_DEVICEID_CYCLONE_10BT,
172 "3Com 3c900B-TPO Etherlink XL" },
173 { TC_VENDORID, TC_DEVICEID_CYCLONE_10BT_COMBO,
174 "3Com 3c900B-COMBO Etherlink XL" },
175 { TC_VENDORID, TC_DEVICEID_CYCLONE_10BT_TPC,
176 "3Com 3c900B-TPC Etherlink XL" },
177 { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
178 "3Com 3c900B-FL Etherlink XL" },
174 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT,
175 "3Com 3c905B-TX Fast Etherlink XL" },
176 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
177 "3Com 3c905B-T4 Fast Etherlink XL" },
178 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
179 "3Com 3c905B-FX/SC Fast Etherlink XL" },
180 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
181 "3Com 3c905B-COMBO Fast Etherlink XL" },
182 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT_SERV,
183 "3Com 3c980 Fast Etherlink XL" },
184 { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
185 "3Com 3cSOHO100-TX OfficeConnect" },
186 { 0, 0, NULL }
187};
188
189/*
190 * Various supported PHY vendors/types and their names. Note that
191 * this driver will work with pretty much any MII-compliant PHY,
192 * so failure to positively identify the chip is not a fatal error.
193 */
194
195static struct xl_type xl_phys[] = {
196 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
197 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
198 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
199 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
200 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
201 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
202 { 0, 0, "<MII-compliant physical interface>" }
203};
204
205static unsigned long xl_count = 0;
206static const char *xl_probe __P((pcici_t, pcidi_t));
207static void xl_attach __P((pcici_t, int));
208
209static int xl_newbuf __P((struct xl_softc *,
210 struct xl_chain_onefrag *));
211static void xl_stats_update __P((void *));
212static int xl_encap __P((struct xl_softc *, struct xl_chain *,
213 struct mbuf * ));
214
215static void xl_rxeof __P((struct xl_softc *));
216static void xl_txeof __P((struct xl_softc *));
217static void xl_txeoc __P((struct xl_softc *));
218static void xl_intr __P((void *));
219static void xl_start __P((struct ifnet *));
220static int xl_ioctl __P((struct ifnet *, u_long, caddr_t));
221static void xl_init __P((void *));
222static void xl_stop __P((struct xl_softc *));
223static void xl_watchdog __P((struct ifnet *));
224static void xl_shutdown __P((int, void *));
225static int xl_ifmedia_upd __P((struct ifnet *));
226static void xl_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
227
228static int xl_eeprom_wait __P((struct xl_softc *));
229static int xl_read_eeprom __P((struct xl_softc *, caddr_t, int,
230 int, int));
231static void xl_mii_sync __P((struct xl_softc *));
232static void xl_mii_send __P((struct xl_softc *, u_int32_t, int));
233static int xl_mii_readreg __P((struct xl_softc *, struct xl_mii_frame *));
234static int xl_mii_writereg __P((struct xl_softc *, struct xl_mii_frame *));
235static u_int16_t xl_phy_readreg __P((struct xl_softc *, int));
236static void xl_phy_writereg __P((struct xl_softc *, int, int));
237
238static void xl_autoneg_xmit __P((struct xl_softc *));
239static void xl_autoneg_mii __P((struct xl_softc *, int, int));
240static void xl_setmode_mii __P((struct xl_softc *, int));
241static void xl_getmode_mii __P((struct xl_softc *));
242static void xl_setmode __P((struct xl_softc *, int));
243static u_int8_t xl_calchash __P((caddr_t));
244static void xl_setmulti __P((struct xl_softc *));
245static void xl_setmulti_hash __P((struct xl_softc *));
246static void xl_reset __P((struct xl_softc *));
247static int xl_list_rx_init __P((struct xl_softc *));
248static int xl_list_tx_init __P((struct xl_softc *));
249static void xl_wait __P((struct xl_softc *));
250static void xl_mediacheck __P((struct xl_softc *));
251#ifdef notdef
252static void xl_testpacket __P((struct xl_softc *));
253#endif
254
255/*
256 * Murphy's law says that it's possible the chip can wedge and
257 * the 'command in progress' bit may never clear. Hence, we wait
258 * only a finite amount of time to avoid getting caught in an
259 * infinite loop. Normally this delay routine would be a macro,
260 * but it isn't called during normal operation so we can afford
261 * to make it a function.
262 */
263static void xl_wait(sc)
264 struct xl_softc *sc;
265{
266 register int i;
267
268 for (i = 0; i < XL_TIMEOUT; i++) {
269 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
270 break;
271 }
272
273 if (i == XL_TIMEOUT)
274 printf("xl%d: command never completed!\n", sc->xl_unit);
275
276 return;
277}
278
279/*
280 * MII access routines are provided for adapters with external
281 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
282 * autoneg logic that's faked up to look like a PHY (3c905B-TX).
283 * Note: if you don't perform the MDIO operations just right,
284 * it's possible to end up with code that works correctly with
285 * some chips/CPUs/processor speeds/bus speeds/etc but not
286 * with others.
287 */
288#define MII_SET(x) \
289 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
290 CSR_READ_2(sc, XL_W4_PHY_MGMT) | x)
291
292#define MII_CLR(x) \
293 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
294 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~x)
295
296/*
297 * Sync the PHYs by setting data bit and strobing the clock 32 times.
298 */
299static void xl_mii_sync(sc)
300 struct xl_softc *sc;
301{
302 register int i;
303
304 XL_SEL_WIN(4);
305 MII_SET(XL_MII_DIR|XL_MII_DATA);
306
307 for (i = 0; i < 32; i++) {
308 MII_SET(XL_MII_CLK);
309 DELAY(1);
310 MII_CLR(XL_MII_CLK);
311 DELAY(1);
312 }
313
314 return;
315}
316
317/*
318 * Clock a series of bits through the MII.
319 */
320static void xl_mii_send(sc, bits, cnt)
321 struct xl_softc *sc;
322 u_int32_t bits;
323 int cnt;
324{
325 int i;
326
327 XL_SEL_WIN(4);
328 MII_CLR(XL_MII_CLK);
329
330 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
331 if (bits & i) {
332 MII_SET(XL_MII_DATA);
333 } else {
334 MII_CLR(XL_MII_DATA);
335 }
336 DELAY(1);
337 MII_CLR(XL_MII_CLK);
338 DELAY(1);
339 MII_SET(XL_MII_CLK);
340 }
341}
342
343/*
344 * Read an PHY register through the MII.
345 */
346static int xl_mii_readreg(sc, frame)
347 struct xl_softc *sc;
348 struct xl_mii_frame *frame;
349
350{
351 int i, ack, s;
352
353 s = splimp();
354
355 /*
356 * Set up frame for RX.
357 */
358 frame->mii_stdelim = XL_MII_STARTDELIM;
359 frame->mii_opcode = XL_MII_READOP;
360 frame->mii_turnaround = 0;
361 frame->mii_data = 0;
362
363 /*
364 * Select register window 4.
365 */
366
367 XL_SEL_WIN(4);
368
369 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
370 /*
371 * Turn on data xmit.
372 */
373 MII_SET(XL_MII_DIR);
374
375 xl_mii_sync(sc);
376
377 /*
378 * Send command/address info.
379 */
380 xl_mii_send(sc, frame->mii_stdelim, 2);
381 xl_mii_send(sc, frame->mii_opcode, 2);
382 xl_mii_send(sc, frame->mii_phyaddr, 5);
383 xl_mii_send(sc, frame->mii_regaddr, 5);
384
385 /* Idle bit */
386 MII_CLR((XL_MII_CLK|XL_MII_DATA));
387 DELAY(1);
388 MII_SET(XL_MII_CLK);
389 DELAY(1);
390
391 /* Turn off xmit. */
392 MII_CLR(XL_MII_DIR);
393
394 /* Check for ack */
395 MII_CLR(XL_MII_CLK);
396 DELAY(1);
397 MII_SET(XL_MII_CLK);
398 DELAY(1);
399 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
400
401 /*
402 * Now try reading data bits. If the ack failed, we still
403 * need to clock through 16 cycles to keep the PHY(s) in sync.
404 */
405 if (ack) {
406 for(i = 0; i < 16; i++) {
407 MII_CLR(XL_MII_CLK);
408 DELAY(1);
409 MII_SET(XL_MII_CLK);
410 DELAY(1);
411 }
412 goto fail;
413 }
414
415 for (i = 0x8000; i; i >>= 1) {
416 MII_CLR(XL_MII_CLK);
417 DELAY(1);
418 if (!ack) {
419 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
420 frame->mii_data |= i;
421 DELAY(1);
422 }
423 MII_SET(XL_MII_CLK);
424 DELAY(1);
425 }
426
427fail:
428
429 MII_CLR(XL_MII_CLK);
430 DELAY(1);
431 MII_SET(XL_MII_CLK);
432 DELAY(1);
433
434 splx(s);
435
436 if (ack)
437 return(1);
438 return(0);
439}
440
441/*
442 * Write to a PHY register through the MII.
443 */
444static int xl_mii_writereg(sc, frame)
445 struct xl_softc *sc;
446 struct xl_mii_frame *frame;
447
448{
449 int s;
450
179 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT,
180 "3Com 3c905B-TX Fast Etherlink XL" },
181 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
182 "3Com 3c905B-T4 Fast Etherlink XL" },
183 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
184 "3Com 3c905B-FX/SC Fast Etherlink XL" },
185 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
186 "3Com 3c905B-COMBO Fast Etherlink XL" },
187 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT_SERV,
188 "3Com 3c980 Fast Etherlink XL" },
189 { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
190 "3Com 3cSOHO100-TX OfficeConnect" },
191 { 0, 0, NULL }
192};
193
194/*
195 * Various supported PHY vendors/types and their names. Note that
196 * this driver will work with pretty much any MII-compliant PHY,
197 * so failure to positively identify the chip is not a fatal error.
198 */
199
200static struct xl_type xl_phys[] = {
201 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
202 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
203 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
204 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
205 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
206 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
207 { 0, 0, "<MII-compliant physical interface>" }
208};
209
210static unsigned long xl_count = 0;
211static const char *xl_probe __P((pcici_t, pcidi_t));
212static void xl_attach __P((pcici_t, int));
213
214static int xl_newbuf __P((struct xl_softc *,
215 struct xl_chain_onefrag *));
216static void xl_stats_update __P((void *));
217static int xl_encap __P((struct xl_softc *, struct xl_chain *,
218 struct mbuf * ));
219
220static void xl_rxeof __P((struct xl_softc *));
221static void xl_txeof __P((struct xl_softc *));
222static void xl_txeoc __P((struct xl_softc *));
223static void xl_intr __P((void *));
224static void xl_start __P((struct ifnet *));
225static int xl_ioctl __P((struct ifnet *, u_long, caddr_t));
226static void xl_init __P((void *));
227static void xl_stop __P((struct xl_softc *));
228static void xl_watchdog __P((struct ifnet *));
229static void xl_shutdown __P((int, void *));
230static int xl_ifmedia_upd __P((struct ifnet *));
231static void xl_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
232
233static int xl_eeprom_wait __P((struct xl_softc *));
234static int xl_read_eeprom __P((struct xl_softc *, caddr_t, int,
235 int, int));
236static void xl_mii_sync __P((struct xl_softc *));
237static void xl_mii_send __P((struct xl_softc *, u_int32_t, int));
238static int xl_mii_readreg __P((struct xl_softc *, struct xl_mii_frame *));
239static int xl_mii_writereg __P((struct xl_softc *, struct xl_mii_frame *));
240static u_int16_t xl_phy_readreg __P((struct xl_softc *, int));
241static void xl_phy_writereg __P((struct xl_softc *, int, int));
242
243static void xl_autoneg_xmit __P((struct xl_softc *));
244static void xl_autoneg_mii __P((struct xl_softc *, int, int));
245static void xl_setmode_mii __P((struct xl_softc *, int));
246static void xl_getmode_mii __P((struct xl_softc *));
247static void xl_setmode __P((struct xl_softc *, int));
248static u_int8_t xl_calchash __P((caddr_t));
249static void xl_setmulti __P((struct xl_softc *));
250static void xl_setmulti_hash __P((struct xl_softc *));
251static void xl_reset __P((struct xl_softc *));
252static int xl_list_rx_init __P((struct xl_softc *));
253static int xl_list_tx_init __P((struct xl_softc *));
254static void xl_wait __P((struct xl_softc *));
255static void xl_mediacheck __P((struct xl_softc *));
256#ifdef notdef
257static void xl_testpacket __P((struct xl_softc *));
258#endif
259
260/*
261 * Murphy's law says that it's possible the chip can wedge and
262 * the 'command in progress' bit may never clear. Hence, we wait
263 * only a finite amount of time to avoid getting caught in an
264 * infinite loop. Normally this delay routine would be a macro,
265 * but it isn't called during normal operation so we can afford
266 * to make it a function.
267 */
268static void xl_wait(sc)
269 struct xl_softc *sc;
270{
271 register int i;
272
273 for (i = 0; i < XL_TIMEOUT; i++) {
274 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
275 break;
276 }
277
278 if (i == XL_TIMEOUT)
279 printf("xl%d: command never completed!\n", sc->xl_unit);
280
281 return;
282}
283
284/*
285 * MII access routines are provided for adapters with external
286 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
287 * autoneg logic that's faked up to look like a PHY (3c905B-TX).
288 * Note: if you don't perform the MDIO operations just right,
289 * it's possible to end up with code that works correctly with
290 * some chips/CPUs/processor speeds/bus speeds/etc but not
291 * with others.
292 */
293#define MII_SET(x) \
294 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
295 CSR_READ_2(sc, XL_W4_PHY_MGMT) | x)
296
297#define MII_CLR(x) \
298 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
299 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~x)
300
301/*
302 * Sync the PHYs by setting data bit and strobing the clock 32 times.
303 */
304static void xl_mii_sync(sc)
305 struct xl_softc *sc;
306{
307 register int i;
308
309 XL_SEL_WIN(4);
310 MII_SET(XL_MII_DIR|XL_MII_DATA);
311
312 for (i = 0; i < 32; i++) {
313 MII_SET(XL_MII_CLK);
314 DELAY(1);
315 MII_CLR(XL_MII_CLK);
316 DELAY(1);
317 }
318
319 return;
320}
321
322/*
323 * Clock a series of bits through the MII.
324 */
325static void xl_mii_send(sc, bits, cnt)
326 struct xl_softc *sc;
327 u_int32_t bits;
328 int cnt;
329{
330 int i;
331
332 XL_SEL_WIN(4);
333 MII_CLR(XL_MII_CLK);
334
335 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
336 if (bits & i) {
337 MII_SET(XL_MII_DATA);
338 } else {
339 MII_CLR(XL_MII_DATA);
340 }
341 DELAY(1);
342 MII_CLR(XL_MII_CLK);
343 DELAY(1);
344 MII_SET(XL_MII_CLK);
345 }
346}
347
348/*
349 * Read an PHY register through the MII.
350 */
351static int xl_mii_readreg(sc, frame)
352 struct xl_softc *sc;
353 struct xl_mii_frame *frame;
354
355{
356 int i, ack, s;
357
358 s = splimp();
359
360 /*
361 * Set up frame for RX.
362 */
363 frame->mii_stdelim = XL_MII_STARTDELIM;
364 frame->mii_opcode = XL_MII_READOP;
365 frame->mii_turnaround = 0;
366 frame->mii_data = 0;
367
368 /*
369 * Select register window 4.
370 */
371
372 XL_SEL_WIN(4);
373
374 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
375 /*
376 * Turn on data xmit.
377 */
378 MII_SET(XL_MII_DIR);
379
380 xl_mii_sync(sc);
381
382 /*
383 * Send command/address info.
384 */
385 xl_mii_send(sc, frame->mii_stdelim, 2);
386 xl_mii_send(sc, frame->mii_opcode, 2);
387 xl_mii_send(sc, frame->mii_phyaddr, 5);
388 xl_mii_send(sc, frame->mii_regaddr, 5);
389
390 /* Idle bit */
391 MII_CLR((XL_MII_CLK|XL_MII_DATA));
392 DELAY(1);
393 MII_SET(XL_MII_CLK);
394 DELAY(1);
395
396 /* Turn off xmit. */
397 MII_CLR(XL_MII_DIR);
398
399 /* Check for ack */
400 MII_CLR(XL_MII_CLK);
401 DELAY(1);
402 MII_SET(XL_MII_CLK);
403 DELAY(1);
404 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
405
406 /*
407 * Now try reading data bits. If the ack failed, we still
408 * need to clock through 16 cycles to keep the PHY(s) in sync.
409 */
410 if (ack) {
411 for(i = 0; i < 16; i++) {
412 MII_CLR(XL_MII_CLK);
413 DELAY(1);
414 MII_SET(XL_MII_CLK);
415 DELAY(1);
416 }
417 goto fail;
418 }
419
420 for (i = 0x8000; i; i >>= 1) {
421 MII_CLR(XL_MII_CLK);
422 DELAY(1);
423 if (!ack) {
424 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
425 frame->mii_data |= i;
426 DELAY(1);
427 }
428 MII_SET(XL_MII_CLK);
429 DELAY(1);
430 }
431
432fail:
433
434 MII_CLR(XL_MII_CLK);
435 DELAY(1);
436 MII_SET(XL_MII_CLK);
437 DELAY(1);
438
439 splx(s);
440
441 if (ack)
442 return(1);
443 return(0);
444}
445
446/*
447 * Write to a PHY register through the MII.
448 */
449static int xl_mii_writereg(sc, frame)
450 struct xl_softc *sc;
451 struct xl_mii_frame *frame;
452
453{
454 int s;
455
451
452
453 s = splimp();
454 /*
455 * Set up frame for TX.
456 */
457
458 frame->mii_stdelim = XL_MII_STARTDELIM;
459 frame->mii_opcode = XL_MII_WRITEOP;
460 frame->mii_turnaround = XL_MII_TURNAROUND;
461
462 /*
463 * Select the window 4.
464 */
465 XL_SEL_WIN(4);
466
467 /*
468 * Turn on data output.
469 */
470 MII_SET(XL_MII_DIR);
471
472 xl_mii_sync(sc);
473
474 xl_mii_send(sc, frame->mii_stdelim, 2);
475 xl_mii_send(sc, frame->mii_opcode, 2);
476 xl_mii_send(sc, frame->mii_phyaddr, 5);
477 xl_mii_send(sc, frame->mii_regaddr, 5);
478 xl_mii_send(sc, frame->mii_turnaround, 2);
479 xl_mii_send(sc, frame->mii_data, 16);
480
481 /* Idle bit. */
482 MII_SET(XL_MII_CLK);
483 DELAY(1);
484 MII_CLR(XL_MII_CLK);
485 DELAY(1);
486
487 /*
488 * Turn off xmit.
489 */
490 MII_CLR(XL_MII_DIR);
491
492 splx(s);
493
494 return(0);
495}
496
497static u_int16_t xl_phy_readreg(sc, reg)
498 struct xl_softc *sc;
499 int reg;
500{
501 struct xl_mii_frame frame;
502
503 bzero((char *)&frame, sizeof(frame));
504
505 frame.mii_phyaddr = sc->xl_phy_addr;
506 frame.mii_regaddr = reg;
507 xl_mii_readreg(sc, &frame);
508
509 return(frame.mii_data);
510}
511
512static void xl_phy_writereg(sc, reg, data)
513 struct xl_softc *sc;
514 int reg;
515 int data;
516{
517 struct xl_mii_frame frame;
518
519 bzero((char *)&frame, sizeof(frame));
520
521 frame.mii_phyaddr = sc->xl_phy_addr;
522 frame.mii_regaddr = reg;
523 frame.mii_data = data;
524
525 xl_mii_writereg(sc, &frame);
526
527 return;
528}
529
530/*
531 * The EEPROM is slow: give it time to come ready after issuing
532 * it a command.
533 */
534static int xl_eeprom_wait(sc)
535 struct xl_softc *sc;
536{
537 int i;
538
539 for (i = 0; i < 100; i++) {
540 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
541 DELAY(162);
542 else
543 break;
544 }
545
546 if (i == 100) {
547 printf("xl%d: eeprom failed to come ready\n", sc->xl_unit);
548 return(1);
549 }
550
551 return(0);
552}
553
554/*
555 * Read a sequence of words from the EEPROM. Note that ethernet address
556 * data is stored in the EEPROM in network byte order.
557 */
558static int xl_read_eeprom(sc, dest, off, cnt, swap)
559 struct xl_softc *sc;
560 caddr_t dest;
561 int off;
562 int cnt;
563 int swap;
564{
565 int err = 0, i;
566 u_int16_t word = 0, *ptr;
567
568 XL_SEL_WIN(0);
569
570 if (xl_eeprom_wait(sc))
571 return(1);
572
573 for (i = 0; i < cnt; i++) {
574 CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_READ | (off + i));
575 err = xl_eeprom_wait(sc);
576 if (err)
577 break;
578 word = CSR_READ_2(sc, XL_W0_EE_DATA);
579 ptr = (u_int16_t *)(dest + (i * 2));
580 if (swap)
581 *ptr = ntohs(word);
582 else
583 *ptr = word;
584 }
585
586 return(err ? 1 : 0);
587}
588
589/*
590 * This routine is taken from the 3Com Etherlink XL manual,
591 * page 10-7. It calculates a CRC of the supplied multicast
592 * group address and returns the lower 8 bits, which are used
593 * as the multicast filter position.
594 * Note: the 3c905B currently only supports a 64-bit hash table,
595 * which means we really only need 6 bits, but the manual indicates
596 * that future chip revisions will have a 256-bit hash table,
597 * hence the routine is set up to calculate 8 bits of position
598 * info in case we need it some day.
599 * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a
600 * 256 bit hash table. This means we have to use all 8 bits regardless.
601 * On older cards, the upper 2 bits will be ignored. Grrrr....
602 */
603static u_int8_t xl_calchash(addr)
604 caddr_t addr;
605{
606 u_int32_t crc, carry;
607 int i, j;
608 u_int8_t c;
609
610 /* Compute CRC for the address value. */
611 crc = 0xFFFFFFFF; /* initial value */
612
613 for (i = 0; i < 6; i++) {
614 c = *(addr + i);
615 for (j = 0; j < 8; j++) {
616 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
617 crc <<= 1;
618 c >>= 1;
619 if (carry)
620 crc = (crc ^ 0x04c11db6) | carry;
621 }
622 }
623
624 /* return the filter bit position */
625 return(crc & 0x000000FF);
626}
627
628/*
629 * NICs older than the 3c905B have only one multicast option, which
630 * is to enable reception of all multicast frames.
631 */
632static void xl_setmulti(sc)
633 struct xl_softc *sc;
634{
635 struct ifnet *ifp;
636 struct ifmultiaddr *ifma;
637 u_int8_t rxfilt;
638 int mcnt = 0;
639
640 ifp = &sc->arpcom.ac_if;
641
642 XL_SEL_WIN(5);
643 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
644
645 if (ifp->if_flags & IFF_ALLMULTI) {
646 rxfilt |= XL_RXFILTER_ALLMULTI;
647 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
648 return;
649 }
650
651 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
652 ifma = ifma->ifma_link.le_next)
653 mcnt++;
654
655 if (mcnt)
656 rxfilt |= XL_RXFILTER_ALLMULTI;
657 else
658 rxfilt &= ~XL_RXFILTER_ALLMULTI;
659
660 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
661
662 return;
663}
664
665/*
666 * 3c905B adapters have a hash filter that we can program.
667 */
668static void xl_setmulti_hash(sc)
669 struct xl_softc *sc;
670{
671 struct ifnet *ifp;
672 int h = 0, i;
673 struct ifmultiaddr *ifma;
674 u_int8_t rxfilt;
675 int mcnt = 0;
676
677 ifp = &sc->arpcom.ac_if;
678
679 XL_SEL_WIN(5);
680 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
681
682 if (ifp->if_flags & IFF_ALLMULTI) {
683 rxfilt |= XL_RXFILTER_ALLMULTI;
684 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
685 return;
686 } else
687 rxfilt &= ~XL_RXFILTER_ALLMULTI;
688
689
690 /* first, zot all the existing hash bits */
691 for (i = 0; i < XL_HASHFILT_SIZE; i++)
692 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
693
694 /* now program new ones */
695 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
696 ifma = ifma->ifma_link.le_next) {
697 if (ifma->ifma_addr->sa_family != AF_LINK)
698 continue;
699 h = xl_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
700 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
701 mcnt++;
702 }
703
704 if (mcnt)
705 rxfilt |= XL_RXFILTER_MULTIHASH;
706 else
707 rxfilt &= ~XL_RXFILTER_MULTIHASH;
708
709 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
710
711 return;
712}
713
714#ifdef notdef
715static void xl_testpacket(sc)
716 struct xl_softc *sc;
717{
718 struct mbuf *m;
719 struct ifnet *ifp;
720
721 ifp = &sc->arpcom.ac_if;
722
723 MGETHDR(m, M_DONTWAIT, MT_DATA);
724
725 if (m == NULL)
726 return;
727
728 bcopy(&sc->arpcom.ac_enaddr,
729 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
730 bcopy(&sc->arpcom.ac_enaddr,
731 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
732 mtod(m, struct ether_header *)->ether_type = htons(3);
733 mtod(m, unsigned char *)[14] = 0;
734 mtod(m, unsigned char *)[15] = 0;
735 mtod(m, unsigned char *)[16] = 0xE3;
736 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
737 IF_ENQUEUE(&ifp->if_snd, m);
738 xl_start(ifp);
739
740 return;
741}
742#endif
743
744/*
745 * Initiate an autonegotiation session.
746 */
747static void xl_autoneg_xmit(sc)
748 struct xl_softc *sc;
749{
750 u_int16_t phy_sts;
456 s = splimp();
457 /*
458 * Set up frame for TX.
459 */
460
461 frame->mii_stdelim = XL_MII_STARTDELIM;
462 frame->mii_opcode = XL_MII_WRITEOP;
463 frame->mii_turnaround = XL_MII_TURNAROUND;
464
465 /*
466 * Select the window 4.
467 */
468 XL_SEL_WIN(4);
469
470 /*
471 * Turn on data output.
472 */
473 MII_SET(XL_MII_DIR);
474
475 xl_mii_sync(sc);
476
477 xl_mii_send(sc, frame->mii_stdelim, 2);
478 xl_mii_send(sc, frame->mii_opcode, 2);
479 xl_mii_send(sc, frame->mii_phyaddr, 5);
480 xl_mii_send(sc, frame->mii_regaddr, 5);
481 xl_mii_send(sc, frame->mii_turnaround, 2);
482 xl_mii_send(sc, frame->mii_data, 16);
483
484 /* Idle bit. */
485 MII_SET(XL_MII_CLK);
486 DELAY(1);
487 MII_CLR(XL_MII_CLK);
488 DELAY(1);
489
490 /*
491 * Turn off xmit.
492 */
493 MII_CLR(XL_MII_DIR);
494
495 splx(s);
496
497 return(0);
498}
499
500static u_int16_t xl_phy_readreg(sc, reg)
501 struct xl_softc *sc;
502 int reg;
503{
504 struct xl_mii_frame frame;
505
506 bzero((char *)&frame, sizeof(frame));
507
508 frame.mii_phyaddr = sc->xl_phy_addr;
509 frame.mii_regaddr = reg;
510 xl_mii_readreg(sc, &frame);
511
512 return(frame.mii_data);
513}
514
515static void xl_phy_writereg(sc, reg, data)
516 struct xl_softc *sc;
517 int reg;
518 int data;
519{
520 struct xl_mii_frame frame;
521
522 bzero((char *)&frame, sizeof(frame));
523
524 frame.mii_phyaddr = sc->xl_phy_addr;
525 frame.mii_regaddr = reg;
526 frame.mii_data = data;
527
528 xl_mii_writereg(sc, &frame);
529
530 return;
531}
532
533/*
534 * The EEPROM is slow: give it time to come ready after issuing
535 * it a command.
536 */
537static int xl_eeprom_wait(sc)
538 struct xl_softc *sc;
539{
540 int i;
541
542 for (i = 0; i < 100; i++) {
543 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
544 DELAY(162);
545 else
546 break;
547 }
548
549 if (i == 100) {
550 printf("xl%d: eeprom failed to come ready\n", sc->xl_unit);
551 return(1);
552 }
553
554 return(0);
555}
556
557/*
558 * Read a sequence of words from the EEPROM. Note that ethernet address
559 * data is stored in the EEPROM in network byte order.
560 */
561static int xl_read_eeprom(sc, dest, off, cnt, swap)
562 struct xl_softc *sc;
563 caddr_t dest;
564 int off;
565 int cnt;
566 int swap;
567{
568 int err = 0, i;
569 u_int16_t word = 0, *ptr;
570
571 XL_SEL_WIN(0);
572
573 if (xl_eeprom_wait(sc))
574 return(1);
575
576 for (i = 0; i < cnt; i++) {
577 CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_READ | (off + i));
578 err = xl_eeprom_wait(sc);
579 if (err)
580 break;
581 word = CSR_READ_2(sc, XL_W0_EE_DATA);
582 ptr = (u_int16_t *)(dest + (i * 2));
583 if (swap)
584 *ptr = ntohs(word);
585 else
586 *ptr = word;
587 }
588
589 return(err ? 1 : 0);
590}
591
592/*
593 * This routine is taken from the 3Com Etherlink XL manual,
594 * page 10-7. It calculates a CRC of the supplied multicast
595 * group address and returns the lower 8 bits, which are used
596 * as the multicast filter position.
597 * Note: the 3c905B currently only supports a 64-bit hash table,
598 * which means we really only need 6 bits, but the manual indicates
599 * that future chip revisions will have a 256-bit hash table,
600 * hence the routine is set up to calculate 8 bits of position
601 * info in case we need it some day.
602 * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a
603 * 256 bit hash table. This means we have to use all 8 bits regardless.
604 * On older cards, the upper 2 bits will be ignored. Grrrr....
605 */
606static u_int8_t xl_calchash(addr)
607 caddr_t addr;
608{
609 u_int32_t crc, carry;
610 int i, j;
611 u_int8_t c;
612
613 /* Compute CRC for the address value. */
614 crc = 0xFFFFFFFF; /* initial value */
615
616 for (i = 0; i < 6; i++) {
617 c = *(addr + i);
618 for (j = 0; j < 8; j++) {
619 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
620 crc <<= 1;
621 c >>= 1;
622 if (carry)
623 crc = (crc ^ 0x04c11db6) | carry;
624 }
625 }
626
627 /* return the filter bit position */
628 return(crc & 0x000000FF);
629}
630
631/*
632 * NICs older than the 3c905B have only one multicast option, which
633 * is to enable reception of all multicast frames.
634 */
635static void xl_setmulti(sc)
636 struct xl_softc *sc;
637{
638 struct ifnet *ifp;
639 struct ifmultiaddr *ifma;
640 u_int8_t rxfilt;
641 int mcnt = 0;
642
643 ifp = &sc->arpcom.ac_if;
644
645 XL_SEL_WIN(5);
646 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
647
648 if (ifp->if_flags & IFF_ALLMULTI) {
649 rxfilt |= XL_RXFILTER_ALLMULTI;
650 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
651 return;
652 }
653
654 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
655 ifma = ifma->ifma_link.le_next)
656 mcnt++;
657
658 if (mcnt)
659 rxfilt |= XL_RXFILTER_ALLMULTI;
660 else
661 rxfilt &= ~XL_RXFILTER_ALLMULTI;
662
663 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
664
665 return;
666}
667
668/*
669 * 3c905B adapters have a hash filter that we can program.
670 */
671static void xl_setmulti_hash(sc)
672 struct xl_softc *sc;
673{
674 struct ifnet *ifp;
675 int h = 0, i;
676 struct ifmultiaddr *ifma;
677 u_int8_t rxfilt;
678 int mcnt = 0;
679
680 ifp = &sc->arpcom.ac_if;
681
682 XL_SEL_WIN(5);
683 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
684
685 if (ifp->if_flags & IFF_ALLMULTI) {
686 rxfilt |= XL_RXFILTER_ALLMULTI;
687 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
688 return;
689 } else
690 rxfilt &= ~XL_RXFILTER_ALLMULTI;
691
692
693 /* first, zot all the existing hash bits */
694 for (i = 0; i < XL_HASHFILT_SIZE; i++)
695 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
696
697 /* now program new ones */
698 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
699 ifma = ifma->ifma_link.le_next) {
700 if (ifma->ifma_addr->sa_family != AF_LINK)
701 continue;
702 h = xl_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
703 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
704 mcnt++;
705 }
706
707 if (mcnt)
708 rxfilt |= XL_RXFILTER_MULTIHASH;
709 else
710 rxfilt &= ~XL_RXFILTER_MULTIHASH;
711
712 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
713
714 return;
715}
716
717#ifdef notdef
718static void xl_testpacket(sc)
719 struct xl_softc *sc;
720{
721 struct mbuf *m;
722 struct ifnet *ifp;
723
724 ifp = &sc->arpcom.ac_if;
725
726 MGETHDR(m, M_DONTWAIT, MT_DATA);
727
728 if (m == NULL)
729 return;
730
731 bcopy(&sc->arpcom.ac_enaddr,
732 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
733 bcopy(&sc->arpcom.ac_enaddr,
734 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
735 mtod(m, struct ether_header *)->ether_type = htons(3);
736 mtod(m, unsigned char *)[14] = 0;
737 mtod(m, unsigned char *)[15] = 0;
738 mtod(m, unsigned char *)[16] = 0xE3;
739 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
740 IF_ENQUEUE(&ifp->if_snd, m);
741 xl_start(ifp);
742
743 return;
744}
745#endif
746
747/*
748 * Initiate an autonegotiation session.
749 */
750static void xl_autoneg_xmit(sc)
751 struct xl_softc *sc;
752{
753 u_int16_t phy_sts;
754 u_int32_t icfg;
751
755
756 xl_reset(sc);
757 XL_SEL_WIN(3);
758 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
759 icfg &= ~XL_ICFG_CONNECTOR_MASK;
760 if (sc->xl_media & XL_MEDIAOPT_MII ||
761 sc->xl_media & XL_MEDIAOPT_BT4)
762 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
763 if (sc->xl_media & XL_MEDIAOPT_BTX)
764 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
765 if (sc->xl_media & XL_MEDIAOPT_BFX)
766 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
767 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
768 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
769
752 xl_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
753 DELAY(500);
754 while(xl_phy_readreg(sc, XL_PHY_GENCTL)
755 & PHY_BMCR_RESET);
756
757 phy_sts = xl_phy_readreg(sc, PHY_BMCR);
758 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
759 xl_phy_writereg(sc, PHY_BMCR, phy_sts);
760
761 return;
762}
763
764/*
765 * Invoke autonegotiation on a PHY. Also used with the 3Com internal
766 * autoneg logic which is mapped onto the MII.
767 */
768static void xl_autoneg_mii(sc, flag, verbose)
769 struct xl_softc *sc;
770 int flag;
771 int verbose;
772{
773 u_int16_t phy_sts = 0, media, advert, ability;
774 struct ifnet *ifp;
775 struct ifmedia *ifm;
776
777 ifm = &sc->ifmedia;
778 ifp = &sc->arpcom.ac_if;
779
780 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
781
782 /*
783 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
784 * bit cleared in the status register, but has the 'autoneg enabled'
785 * bit set in the control register. This is a contradiction, and
786 * I'm not sure how to handle it. If you want to force an attempt
787 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
788 * and see what happens.
789 */
790#ifndef FORCE_AUTONEG_TFOUR
791 /*
792 * First, see if autoneg is supported. If not, there's
793 * no point in continuing.
794 */
795 phy_sts = xl_phy_readreg(sc, PHY_BMSR);
796 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
797 if (verbose)
798 printf("xl%d: autonegotiation not supported\n",
799 sc->xl_unit);
800 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
801 media = xl_phy_readreg(sc, PHY_BMCR);
802 media &= ~PHY_BMCR_SPEEDSEL;
803 media &= ~PHY_BMCR_DUPLEX;
804 xl_phy_writereg(sc, PHY_BMCR, media);
805 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
806 (CSR_READ_1(sc, XL_W3_MAC_CTRL) &
807 ~XL_MACCTRL_DUPLEX));
808 return;
809 }
810#endif
811
812 switch (flag) {
813 case XL_FLAG_FORCEDELAY:
814 /*
815 * XXX Never use this option anywhere but in the probe
816 * routine: making the kernel stop dead in its tracks
817 * for three whole seconds after we've gone multi-user
818 * is really bad manners.
819 */
820 xl_autoneg_xmit(sc);
821 DELAY(5000000);
822 break;
823 case XL_FLAG_SCHEDDELAY:
824 /*
825 * Wait for the transmitter to go idle before starting
826 * an autoneg session, otherwise xl_start() may clobber
827 * our timeout, and we don't want to allow transmission
828 * during an autoneg session since that can screw it up.
829 */
830 if (sc->xl_cdata.xl_tx_head != NULL) {
831 sc->xl_want_auto = 1;
832 return;
833 }
834 xl_autoneg_xmit(sc);
835 ifp->if_timer = 5;
836 sc->xl_autoneg = 1;
837 sc->xl_want_auto = 0;
838 return;
839 break;
840 case XL_FLAG_DELAYTIMEO:
841 ifp->if_timer = 0;
842 sc->xl_autoneg = 0;
843 break;
844 default:
845 printf("xl%d: invalid autoneg flag: %d\n", sc->xl_unit, flag);
846 return;
847 }
848
849 if (xl_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
850 if (verbose)
851 printf("xl%d: autoneg complete, ", sc->xl_unit);
852 phy_sts = xl_phy_readreg(sc, PHY_BMSR);
853 } else {
854 if (verbose)
855 printf("xl%d: autoneg not complete, ", sc->xl_unit);
856 }
857
858 media = xl_phy_readreg(sc, PHY_BMCR);
859
860 /* Link is good. Report modes and set duplex mode. */
861 if (xl_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
862 if (verbose)
863 printf("link status good ");
864 advert = xl_phy_readreg(sc, XL_PHY_ANAR);
865 ability = xl_phy_readreg(sc, XL_PHY_LPAR);
866
867 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
868 ifm->ifm_media = IFM_ETHER|IFM_100_T4;
869 media |= PHY_BMCR_SPEEDSEL;
870 media &= ~PHY_BMCR_DUPLEX;
871 printf("(100baseT4)\n");
872 } else if (advert & PHY_ANAR_100BTXFULL &&
873 ability & PHY_ANAR_100BTXFULL) {
874 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
875 media |= PHY_BMCR_SPEEDSEL;
876 media |= PHY_BMCR_DUPLEX;
877 printf("(full-duplex, 100Mbps)\n");
878 } else if (advert & PHY_ANAR_100BTXHALF &&
879 ability & PHY_ANAR_100BTXHALF) {
880 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
881 media |= PHY_BMCR_SPEEDSEL;
882 media &= ~PHY_BMCR_DUPLEX;
883 printf("(half-duplex, 100Mbps)\n");
884 } else if (advert & PHY_ANAR_10BTFULL &&
885 ability & PHY_ANAR_10BTFULL) {
886 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
887 media &= ~PHY_BMCR_SPEEDSEL;
888 media |= PHY_BMCR_DUPLEX;
889 printf("(full-duplex, 10Mbps)\n");
890 } else if (advert & PHY_ANAR_10BTHALF &&
891 ability & PHY_ANAR_10BTHALF) {
892 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
893 media &= ~PHY_BMCR_SPEEDSEL;
894 media &= ~PHY_BMCR_DUPLEX;
895 printf("(half-duplex, 10Mbps)\n");
896 }
897
898 /* Set ASIC's duplex mode to match the PHY. */
899 XL_SEL_WIN(3);
900 if (media & PHY_BMCR_DUPLEX)
901 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
902 else
903 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
904 (CSR_READ_1(sc, XL_W3_MAC_CTRL) &
905 ~XL_MACCTRL_DUPLEX));
906 xl_phy_writereg(sc, PHY_BMCR, media);
907 } else {
908 if (verbose)
909 printf("no carrier (forcing half-duplex, 10Mbps)\n");
910 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
911 media &= ~PHY_BMCR_SPEEDSEL;
912 media &= ~PHY_BMCR_DUPLEX;
913 xl_phy_writereg(sc, PHY_BMCR, media);
914 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
915 (CSR_READ_1(sc, XL_W3_MAC_CTRL) &
916 ~XL_MACCTRL_DUPLEX));
917 }
918
919 xl_init(sc);
920
921 if (sc->xl_tx_pend) {
922 sc->xl_autoneg = 0;
923 sc->xl_tx_pend = 0;
924 xl_start(ifp);
925 }
926
927 return;
928}
929
930static void xl_getmode_mii(sc)
931 struct xl_softc *sc;
932{
933 u_int16_t bmsr;
934 struct ifnet *ifp;
935
936 ifp = &sc->arpcom.ac_if;
937
938 bmsr = xl_phy_readreg(sc, PHY_BMSR);
939 if (bootverbose)
940 printf("xl%d: PHY status word: %x\n", sc->xl_unit, bmsr);
941
942 /* fallback */
943 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
944
945 if (bmsr & PHY_BMSR_10BTHALF) {
946 if (bootverbose)
947 printf("xl%d: 10Mbps half-duplex mode supported\n",
948 sc->xl_unit);
949 ifmedia_add(&sc->ifmedia,
950 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
951 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
952 }
953
954 if (bmsr & PHY_BMSR_10BTFULL) {
955 if (bootverbose)
956 printf("xl%d: 10Mbps full-duplex mode supported\n",
957 sc->xl_unit);
958 ifmedia_add(&sc->ifmedia,
959 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
960 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
961 }
962
963 if (bmsr & PHY_BMSR_100BTXHALF) {
964 if (bootverbose)
965 printf("xl%d: 100Mbps half-duplex mode supported\n",
966 sc->xl_unit);
967 ifp->if_baudrate = 100000000;
968 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
969 ifmedia_add(&sc->ifmedia,
970 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
971 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
972 }
973
974 if (bmsr & PHY_BMSR_100BTXFULL) {
975 if (bootverbose)
976 printf("xl%d: 100Mbps full-duplex mode supported\n",
977 sc->xl_unit);
978 ifp->if_baudrate = 100000000;
979 ifmedia_add(&sc->ifmedia,
980 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
981 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
982 }
983
984 /* Some also support 100BaseT4. */
985 if (bmsr & PHY_BMSR_100BT4) {
986 if (bootverbose)
987 printf("xl%d: 100baseT4 mode supported\n", sc->xl_unit);
988 ifp->if_baudrate = 100000000;
989 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
990 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
991#ifdef FORCE_AUTONEG_TFOUR
992 if (bootverbose)
993 printf("xl%d: forcing on autoneg support for BT4\n",
994 sc->xl_unit);
995 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
996 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
997#endif
998 }
999
1000 if (bmsr & PHY_BMSR_CANAUTONEG) {
1001 if (bootverbose)
1002 printf("xl%d: autoneg supported\n", sc->xl_unit);
1003 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1004 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
1005 }
1006
1007 return;
1008}
1009
1010/*
1011 * Set speed and duplex mode.
1012 */
1013static void xl_setmode_mii(sc, media)
1014 struct xl_softc *sc;
1015 int media;
1016{
1017 u_int16_t bmcr;
1018 u_int32_t icfg;
1019 struct ifnet *ifp;
1020
1021 ifp = &sc->arpcom.ac_if;
1022
1023 /*
1024 * If an autoneg session is in progress, stop it.
1025 */
1026 if (sc->xl_autoneg) {
1027 printf("xl%d: canceling autoneg session\n", sc->xl_unit);
1028 ifp->if_timer = sc->xl_autoneg = sc->xl_want_auto = 0;
1029 bmcr = xl_phy_readreg(sc, PHY_BMCR);
1030 bmcr &= ~PHY_BMCR_AUTONEGENBL;
1031 xl_phy_writereg(sc, PHY_BMCR, bmcr);
1032 }
1033
1034 printf("xl%d: selecting MII, ", sc->xl_unit);
1035
1036 XL_SEL_WIN(3);
1037 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
1038 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1039 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4)
1040 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
1041 if (sc->xl_media & XL_MEDIAOPT_BTX) {
1042 if (sc->xl_type == XL_TYPE_905B)
1043 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
1044 else
1045 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
1046 }
1047 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1048 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1049
1050 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1051 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1052 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1053 return;
1054 }
1055
1056 bmcr = xl_phy_readreg(sc, PHY_BMCR);
1057
1058 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
1059 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
1060
1061 if (IFM_SUBTYPE(media) == IFM_100_T4) {
1062 printf("100Mbps/T4, half-duplex\n");
1063 bmcr |= PHY_BMCR_SPEEDSEL;
1064 bmcr &= ~PHY_BMCR_DUPLEX;
1065 }
1066
1067 if (IFM_SUBTYPE(media) == IFM_100_TX) {
1068 printf("100Mbps, ");
1069 bmcr |= PHY_BMCR_SPEEDSEL;
1070 }
1071
1072 if (IFM_SUBTYPE(media) == IFM_10_T) {
1073 printf("10Mbps, ");
1074 bmcr &= ~PHY_BMCR_SPEEDSEL;
1075 }
1076
1077 if ((media & IFM_GMASK) == IFM_FDX) {
1078 printf("full duplex\n");
1079 bmcr |= PHY_BMCR_DUPLEX;
1080 XL_SEL_WIN(3);
1081 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
1082 } else {
1083 printf("half duplex\n");
1084 bmcr &= ~PHY_BMCR_DUPLEX;
1085 XL_SEL_WIN(3);
1086 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
1087 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
1088 }
1089
1090 xl_phy_writereg(sc, PHY_BMCR, bmcr);
1091
1092 return;
1093}
1094
1095static void xl_setmode(sc, media)
1096 struct xl_softc *sc;
1097 int media;
1098{
1099 u_int32_t icfg;
1100 u_int16_t mediastat;
1101
1102 printf("xl%d: selecting ", sc->xl_unit);
1103
1104 XL_SEL_WIN(4);
1105 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
1106 XL_SEL_WIN(3);
1107 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
1108
1109 if (sc->xl_media & XL_MEDIAOPT_BT) {
1110 if (IFM_SUBTYPE(media) == IFM_10_T) {
1111 printf("10baseT transceiver, ");
1112 sc->xl_xcvr = XL_XCVR_10BT;
1113 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1114 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
1115 mediastat |= XL_MEDIASTAT_LINKBEAT|
1116 XL_MEDIASTAT_JABGUARD;
1117 mediastat &= ~XL_MEDIASTAT_SQEENB;
1118 }
1119 }
1120
1121 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1122 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1123 printf("100baseFX port, ");
1124 sc->xl_xcvr = XL_XCVR_100BFX;
1125 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1126 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1127 mediastat |= XL_MEDIASTAT_LINKBEAT;
1128 mediastat &= ~XL_MEDIASTAT_SQEENB;
1129 }
1130 }
1131
1132 if (sc->xl_media & XL_MEDIAOPT_AUI) {
1133 if (IFM_SUBTYPE(media) == IFM_10_5) {
1134 printf("AUI port, ");
1135 sc->xl_xcvr = XL_XCVR_AUI;
1136 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1137 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1138 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1139 XL_MEDIASTAT_JABGUARD);
1140 mediastat |= ~XL_MEDIASTAT_SQEENB;
1141 }
1142 }
1143
1144 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1145 if (IFM_SUBTYPE(media) == IFM_10_2) {
1146 printf("BNC port, ");
1147 sc->xl_xcvr = XL_XCVR_COAX;
1148 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1149 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
1150 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1151 XL_MEDIASTAT_JABGUARD|
1152 XL_MEDIASTAT_SQEENB);
1153 }
1154 }
1155
1156 if ((media & IFM_GMASK) == IFM_FDX ||
1157 IFM_SUBTYPE(media) == IFM_100_FX) {
1158 printf("full duplex\n");
1159 XL_SEL_WIN(3);
1160 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
1161 } else {
1162 printf("half duplex\n");
1163 XL_SEL_WIN(3);
1164 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
1165 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
1166 }
1167
1168 if (IFM_SUBTYPE(media) == IFM_10_2)
1169 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1170 else
1171 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1172 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1173 XL_SEL_WIN(4);
1174 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
1175 DELAY(800);
1176 XL_SEL_WIN(7);
1177
1178 return;
1179}
1180
1181static void xl_reset(sc)
1182 struct xl_softc *sc;
1183{
1184 register int i;
1185
1186 XL_SEL_WIN(0);
1187 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET);
1188
1189 for (i = 0; i < XL_TIMEOUT; i++) {
1190 DELAY(10);
1191 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
1192 break;
1193 }
1194
1195 if (i == XL_TIMEOUT)
1196 printf("xl%d: reset didn't complete\n", sc->xl_unit);
1197
1198 /* Wait a little while for the chip to get its brains in order. */
1199 DELAY(1000);
1200 return;
1201}
1202
1203/*
1204 * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
1205 * IDs against our list and return a device name if we find a match.
1206 */
1207static const char *
1208xl_probe(config_id, device_id)
1209 pcici_t config_id;
1210 pcidi_t device_id;
1211{
1212 struct xl_type *t;
1213
1214 t = xl_devs;
1215
1216 while(t->xl_name != NULL) {
1217 if ((device_id & 0xFFFF) == t->xl_vid &&
1218 ((device_id >> 16) & 0xFFFF) == t->xl_did) {
1219 return(t->xl_name);
1220 }
1221 t++;
1222 }
1223
1224 return(NULL);
1225}
1226
1227/*
1228 * This routine is a kludge to work around possible hardware faults
1229 * or manufacturing defects that can cause the media options register
1230 * (or reset options register, as it's called for the first generation
1231 * 3cx90x adapters) to return an incorrect result. I have encountered
1232 * one Dell Latitude laptop docking station with an integrated 3c905-TX
1233 * which doesn't have any of the 'mediaopt' bits set. This screws up
1234 * the attach routine pretty badly because it doesn't know what media
1235 * to look for. If we find ourselves in this predicament, this routine
1236 * will try to guess the media options values and warn the user of a
1237 * possible manufacturing defect with his adapter/system/whatever.
1238 */
1239static void xl_mediacheck(sc)
1240 struct xl_softc *sc;
1241{
1242 u_int16_t devid;
1243
1244 /*
1245 * If some of the media options bits are set, assume they are
1246 * correct. If not, try to figure it out down below.
1247 * XXX I should check for 10baseFL, but I don't have an adapter
1248 * to test with.
1249 */
1250 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
1251 /*
1252 * Check the XCVR value. If it's not in the normal range
1253 * of values, we need to fake it up here.
1254 */
1255 if (sc->xl_xcvr <= XL_XCVR_AUTO)
1256 return;
1257 else {
1258 printf("xl%d: bogus xcvr value "
1259 "in EEPROM (%x)\n", sc->xl_unit, sc->xl_xcvr);
1260 printf("xl%d: choosing new default based "
1261 "on card type\n", sc->xl_unit);
1262 }
1263 } else {
1264 printf("xl%d: WARNING: no media options bits set in "
1265 "the media options register!!\n", sc->xl_unit);
1266 printf("xl%d: this could be a manufacturing defect in "
1267 "your adapter or system\n", sc->xl_unit);
1268 printf("xl%d: attempting to guess media type; you "
1269 "should probably consult your vendor\n", sc->xl_unit);
1270 }
1271
1272
1273 /*
1274 * Read the device ID from the EEPROM.
1275 * This is what's loaded into the PCI device ID register, so it has
1276 * to be correct otherwise we wouldn't have gotten this far.
1277 */
1278 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
1279
1280 switch(devid) {
1281 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TP */
1282 case TC_DEVICEID_CYCLONE_10BT: /* 3c900B-TP */
1283 sc->xl_media = XL_MEDIAOPT_BT;
1284 sc->xl_xcvr = XL_XCVR_10BT;
1285 printf("xl%d: guessing 10BaseT transceiver\n", sc->xl_unit);
1286 break;
1287 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
1288 case TC_DEVICEID_CYCLONE_10BT_COMBO: /* 3c900B-COMBO */
1289 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1290 sc->xl_xcvr = XL_XCVR_10BT;
1291 printf("xl%d: guessing COMBO (AUI/BNC/TP)\n", sc->xl_unit);
1292 break;
770 xl_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
771 DELAY(500);
772 while(xl_phy_readreg(sc, XL_PHY_GENCTL)
773 & PHY_BMCR_RESET);
774
775 phy_sts = xl_phy_readreg(sc, PHY_BMCR);
776 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
777 xl_phy_writereg(sc, PHY_BMCR, phy_sts);
778
779 return;
780}
781
782/*
783 * Invoke autonegotiation on a PHY. Also used with the 3Com internal
784 * autoneg logic which is mapped onto the MII.
785 */
786static void xl_autoneg_mii(sc, flag, verbose)
787 struct xl_softc *sc;
788 int flag;
789 int verbose;
790{
791 u_int16_t phy_sts = 0, media, advert, ability;
792 struct ifnet *ifp;
793 struct ifmedia *ifm;
794
795 ifm = &sc->ifmedia;
796 ifp = &sc->arpcom.ac_if;
797
798 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
799
800 /*
801 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
802 * bit cleared in the status register, but has the 'autoneg enabled'
803 * bit set in the control register. This is a contradiction, and
804 * I'm not sure how to handle it. If you want to force an attempt
805 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
806 * and see what happens.
807 */
808#ifndef FORCE_AUTONEG_TFOUR
809 /*
810 * First, see if autoneg is supported. If not, there's
811 * no point in continuing.
812 */
813 phy_sts = xl_phy_readreg(sc, PHY_BMSR);
814 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
815 if (verbose)
816 printf("xl%d: autonegotiation not supported\n",
817 sc->xl_unit);
818 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
819 media = xl_phy_readreg(sc, PHY_BMCR);
820 media &= ~PHY_BMCR_SPEEDSEL;
821 media &= ~PHY_BMCR_DUPLEX;
822 xl_phy_writereg(sc, PHY_BMCR, media);
823 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
824 (CSR_READ_1(sc, XL_W3_MAC_CTRL) &
825 ~XL_MACCTRL_DUPLEX));
826 return;
827 }
828#endif
829
830 switch (flag) {
831 case XL_FLAG_FORCEDELAY:
832 /*
833 * XXX Never use this option anywhere but in the probe
834 * routine: making the kernel stop dead in its tracks
835 * for three whole seconds after we've gone multi-user
836 * is really bad manners.
837 */
838 xl_autoneg_xmit(sc);
839 DELAY(5000000);
840 break;
841 case XL_FLAG_SCHEDDELAY:
842 /*
843 * Wait for the transmitter to go idle before starting
844 * an autoneg session, otherwise xl_start() may clobber
845 * our timeout, and we don't want to allow transmission
846 * during an autoneg session since that can screw it up.
847 */
848 if (sc->xl_cdata.xl_tx_head != NULL) {
849 sc->xl_want_auto = 1;
850 return;
851 }
852 xl_autoneg_xmit(sc);
853 ifp->if_timer = 5;
854 sc->xl_autoneg = 1;
855 sc->xl_want_auto = 0;
856 return;
857 break;
858 case XL_FLAG_DELAYTIMEO:
859 ifp->if_timer = 0;
860 sc->xl_autoneg = 0;
861 break;
862 default:
863 printf("xl%d: invalid autoneg flag: %d\n", sc->xl_unit, flag);
864 return;
865 }
866
867 if (xl_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
868 if (verbose)
869 printf("xl%d: autoneg complete, ", sc->xl_unit);
870 phy_sts = xl_phy_readreg(sc, PHY_BMSR);
871 } else {
872 if (verbose)
873 printf("xl%d: autoneg not complete, ", sc->xl_unit);
874 }
875
876 media = xl_phy_readreg(sc, PHY_BMCR);
877
878 /* Link is good. Report modes and set duplex mode. */
879 if (xl_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
880 if (verbose)
881 printf("link status good ");
882 advert = xl_phy_readreg(sc, XL_PHY_ANAR);
883 ability = xl_phy_readreg(sc, XL_PHY_LPAR);
884
885 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
886 ifm->ifm_media = IFM_ETHER|IFM_100_T4;
887 media |= PHY_BMCR_SPEEDSEL;
888 media &= ~PHY_BMCR_DUPLEX;
889 printf("(100baseT4)\n");
890 } else if (advert & PHY_ANAR_100BTXFULL &&
891 ability & PHY_ANAR_100BTXFULL) {
892 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
893 media |= PHY_BMCR_SPEEDSEL;
894 media |= PHY_BMCR_DUPLEX;
895 printf("(full-duplex, 100Mbps)\n");
896 } else if (advert & PHY_ANAR_100BTXHALF &&
897 ability & PHY_ANAR_100BTXHALF) {
898 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
899 media |= PHY_BMCR_SPEEDSEL;
900 media &= ~PHY_BMCR_DUPLEX;
901 printf("(half-duplex, 100Mbps)\n");
902 } else if (advert & PHY_ANAR_10BTFULL &&
903 ability & PHY_ANAR_10BTFULL) {
904 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
905 media &= ~PHY_BMCR_SPEEDSEL;
906 media |= PHY_BMCR_DUPLEX;
907 printf("(full-duplex, 10Mbps)\n");
908 } else if (advert & PHY_ANAR_10BTHALF &&
909 ability & PHY_ANAR_10BTHALF) {
910 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
911 media &= ~PHY_BMCR_SPEEDSEL;
912 media &= ~PHY_BMCR_DUPLEX;
913 printf("(half-duplex, 10Mbps)\n");
914 }
915
916 /* Set ASIC's duplex mode to match the PHY. */
917 XL_SEL_WIN(3);
918 if (media & PHY_BMCR_DUPLEX)
919 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
920 else
921 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
922 (CSR_READ_1(sc, XL_W3_MAC_CTRL) &
923 ~XL_MACCTRL_DUPLEX));
924 xl_phy_writereg(sc, PHY_BMCR, media);
925 } else {
926 if (verbose)
927 printf("no carrier (forcing half-duplex, 10Mbps)\n");
928 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
929 media &= ~PHY_BMCR_SPEEDSEL;
930 media &= ~PHY_BMCR_DUPLEX;
931 xl_phy_writereg(sc, PHY_BMCR, media);
932 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
933 (CSR_READ_1(sc, XL_W3_MAC_CTRL) &
934 ~XL_MACCTRL_DUPLEX));
935 }
936
937 xl_init(sc);
938
939 if (sc->xl_tx_pend) {
940 sc->xl_autoneg = 0;
941 sc->xl_tx_pend = 0;
942 xl_start(ifp);
943 }
944
945 return;
946}
947
948static void xl_getmode_mii(sc)
949 struct xl_softc *sc;
950{
951 u_int16_t bmsr;
952 struct ifnet *ifp;
953
954 ifp = &sc->arpcom.ac_if;
955
956 bmsr = xl_phy_readreg(sc, PHY_BMSR);
957 if (bootverbose)
958 printf("xl%d: PHY status word: %x\n", sc->xl_unit, bmsr);
959
960 /* fallback */
961 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
962
963 if (bmsr & PHY_BMSR_10BTHALF) {
964 if (bootverbose)
965 printf("xl%d: 10Mbps half-duplex mode supported\n",
966 sc->xl_unit);
967 ifmedia_add(&sc->ifmedia,
968 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
969 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
970 }
971
972 if (bmsr & PHY_BMSR_10BTFULL) {
973 if (bootverbose)
974 printf("xl%d: 10Mbps full-duplex mode supported\n",
975 sc->xl_unit);
976 ifmedia_add(&sc->ifmedia,
977 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
978 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
979 }
980
981 if (bmsr & PHY_BMSR_100BTXHALF) {
982 if (bootverbose)
983 printf("xl%d: 100Mbps half-duplex mode supported\n",
984 sc->xl_unit);
985 ifp->if_baudrate = 100000000;
986 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
987 ifmedia_add(&sc->ifmedia,
988 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
989 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
990 }
991
992 if (bmsr & PHY_BMSR_100BTXFULL) {
993 if (bootverbose)
994 printf("xl%d: 100Mbps full-duplex mode supported\n",
995 sc->xl_unit);
996 ifp->if_baudrate = 100000000;
997 ifmedia_add(&sc->ifmedia,
998 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
999 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1000 }
1001
1002 /* Some also support 100BaseT4. */
1003 if (bmsr & PHY_BMSR_100BT4) {
1004 if (bootverbose)
1005 printf("xl%d: 100baseT4 mode supported\n", sc->xl_unit);
1006 ifp->if_baudrate = 100000000;
1007 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
1008 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
1009#ifdef FORCE_AUTONEG_TFOUR
1010 if (bootverbose)
1011 printf("xl%d: forcing on autoneg support for BT4\n",
1012 sc->xl_unit);
1013 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1014 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
1015#endif
1016 }
1017
1018 if (bmsr & PHY_BMSR_CANAUTONEG) {
1019 if (bootverbose)
1020 printf("xl%d: autoneg supported\n", sc->xl_unit);
1021 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1022 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
1023 }
1024
1025 return;
1026}
1027
1028/*
1029 * Set speed and duplex mode.
1030 */
1031static void xl_setmode_mii(sc, media)
1032 struct xl_softc *sc;
1033 int media;
1034{
1035 u_int16_t bmcr;
1036 u_int32_t icfg;
1037 struct ifnet *ifp;
1038
1039 ifp = &sc->arpcom.ac_if;
1040
1041 /*
1042 * If an autoneg session is in progress, stop it.
1043 */
1044 if (sc->xl_autoneg) {
1045 printf("xl%d: canceling autoneg session\n", sc->xl_unit);
1046 ifp->if_timer = sc->xl_autoneg = sc->xl_want_auto = 0;
1047 bmcr = xl_phy_readreg(sc, PHY_BMCR);
1048 bmcr &= ~PHY_BMCR_AUTONEGENBL;
1049 xl_phy_writereg(sc, PHY_BMCR, bmcr);
1050 }
1051
1052 printf("xl%d: selecting MII, ", sc->xl_unit);
1053
1054 XL_SEL_WIN(3);
1055 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
1056 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1057 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4)
1058 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
1059 if (sc->xl_media & XL_MEDIAOPT_BTX) {
1060 if (sc->xl_type == XL_TYPE_905B)
1061 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
1062 else
1063 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
1064 }
1065 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1066 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1067
1068 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1069 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1070 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1071 return;
1072 }
1073
1074 bmcr = xl_phy_readreg(sc, PHY_BMCR);
1075
1076 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
1077 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
1078
1079 if (IFM_SUBTYPE(media) == IFM_100_T4) {
1080 printf("100Mbps/T4, half-duplex\n");
1081 bmcr |= PHY_BMCR_SPEEDSEL;
1082 bmcr &= ~PHY_BMCR_DUPLEX;
1083 }
1084
1085 if (IFM_SUBTYPE(media) == IFM_100_TX) {
1086 printf("100Mbps, ");
1087 bmcr |= PHY_BMCR_SPEEDSEL;
1088 }
1089
1090 if (IFM_SUBTYPE(media) == IFM_10_T) {
1091 printf("10Mbps, ");
1092 bmcr &= ~PHY_BMCR_SPEEDSEL;
1093 }
1094
1095 if ((media & IFM_GMASK) == IFM_FDX) {
1096 printf("full duplex\n");
1097 bmcr |= PHY_BMCR_DUPLEX;
1098 XL_SEL_WIN(3);
1099 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
1100 } else {
1101 printf("half duplex\n");
1102 bmcr &= ~PHY_BMCR_DUPLEX;
1103 XL_SEL_WIN(3);
1104 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
1105 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
1106 }
1107
1108 xl_phy_writereg(sc, PHY_BMCR, bmcr);
1109
1110 return;
1111}
1112
1113static void xl_setmode(sc, media)
1114 struct xl_softc *sc;
1115 int media;
1116{
1117 u_int32_t icfg;
1118 u_int16_t mediastat;
1119
1120 printf("xl%d: selecting ", sc->xl_unit);
1121
1122 XL_SEL_WIN(4);
1123 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
1124 XL_SEL_WIN(3);
1125 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
1126
1127 if (sc->xl_media & XL_MEDIAOPT_BT) {
1128 if (IFM_SUBTYPE(media) == IFM_10_T) {
1129 printf("10baseT transceiver, ");
1130 sc->xl_xcvr = XL_XCVR_10BT;
1131 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1132 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
1133 mediastat |= XL_MEDIASTAT_LINKBEAT|
1134 XL_MEDIASTAT_JABGUARD;
1135 mediastat &= ~XL_MEDIASTAT_SQEENB;
1136 }
1137 }
1138
1139 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1140 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1141 printf("100baseFX port, ");
1142 sc->xl_xcvr = XL_XCVR_100BFX;
1143 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1144 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1145 mediastat |= XL_MEDIASTAT_LINKBEAT;
1146 mediastat &= ~XL_MEDIASTAT_SQEENB;
1147 }
1148 }
1149
1150 if (sc->xl_media & XL_MEDIAOPT_AUI) {
1151 if (IFM_SUBTYPE(media) == IFM_10_5) {
1152 printf("AUI port, ");
1153 sc->xl_xcvr = XL_XCVR_AUI;
1154 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1155 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
1156 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1157 XL_MEDIASTAT_JABGUARD);
1158 mediastat |= ~XL_MEDIASTAT_SQEENB;
1159 }
1160 }
1161
1162 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1163 if (IFM_SUBTYPE(media) == IFM_10_2) {
1164 printf("BNC port, ");
1165 sc->xl_xcvr = XL_XCVR_COAX;
1166 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1167 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
1168 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
1169 XL_MEDIASTAT_JABGUARD|
1170 XL_MEDIASTAT_SQEENB);
1171 }
1172 }
1173
1174 if ((media & IFM_GMASK) == IFM_FDX ||
1175 IFM_SUBTYPE(media) == IFM_100_FX) {
1176 printf("full duplex\n");
1177 XL_SEL_WIN(3);
1178 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
1179 } else {
1180 printf("half duplex\n");
1181 XL_SEL_WIN(3);
1182 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
1183 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
1184 }
1185
1186 if (IFM_SUBTYPE(media) == IFM_10_2)
1187 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1188 else
1189 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1190 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1191 XL_SEL_WIN(4);
1192 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
1193 DELAY(800);
1194 XL_SEL_WIN(7);
1195
1196 return;
1197}
1198
1199static void xl_reset(sc)
1200 struct xl_softc *sc;
1201{
1202 register int i;
1203
1204 XL_SEL_WIN(0);
1205 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET);
1206
1207 for (i = 0; i < XL_TIMEOUT; i++) {
1208 DELAY(10);
1209 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
1210 break;
1211 }
1212
1213 if (i == XL_TIMEOUT)
1214 printf("xl%d: reset didn't complete\n", sc->xl_unit);
1215
1216 /* Wait a little while for the chip to get its brains in order. */
1217 DELAY(1000);
1218 return;
1219}
1220
1221/*
1222 * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
1223 * IDs against our list and return a device name if we find a match.
1224 */
1225static const char *
1226xl_probe(config_id, device_id)
1227 pcici_t config_id;
1228 pcidi_t device_id;
1229{
1230 struct xl_type *t;
1231
1232 t = xl_devs;
1233
1234 while(t->xl_name != NULL) {
1235 if ((device_id & 0xFFFF) == t->xl_vid &&
1236 ((device_id >> 16) & 0xFFFF) == t->xl_did) {
1237 return(t->xl_name);
1238 }
1239 t++;
1240 }
1241
1242 return(NULL);
1243}
1244
1245/*
1246 * This routine is a kludge to work around possible hardware faults
1247 * or manufacturing defects that can cause the media options register
1248 * (or reset options register, as it's called for the first generation
1249 * 3cx90x adapters) to return an incorrect result. I have encountered
1250 * one Dell Latitude laptop docking station with an integrated 3c905-TX
1251 * which doesn't have any of the 'mediaopt' bits set. This screws up
1252 * the attach routine pretty badly because it doesn't know what media
1253 * to look for. If we find ourselves in this predicament, this routine
1254 * will try to guess the media options values and warn the user of a
1255 * possible manufacturing defect with his adapter/system/whatever.
1256 */
1257static void xl_mediacheck(sc)
1258 struct xl_softc *sc;
1259{
1260 u_int16_t devid;
1261
1262 /*
1263 * If some of the media options bits are set, assume they are
1264 * correct. If not, try to figure it out down below.
1265 * XXX I should check for 10baseFL, but I don't have an adapter
1266 * to test with.
1267 */
1268 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
1269 /*
1270 * Check the XCVR value. If it's not in the normal range
1271 * of values, we need to fake it up here.
1272 */
1273 if (sc->xl_xcvr <= XL_XCVR_AUTO)
1274 return;
1275 else {
1276 printf("xl%d: bogus xcvr value "
1277 "in EEPROM (%x)\n", sc->xl_unit, sc->xl_xcvr);
1278 printf("xl%d: choosing new default based "
1279 "on card type\n", sc->xl_unit);
1280 }
1281 } else {
1282 printf("xl%d: WARNING: no media options bits set in "
1283 "the media options register!!\n", sc->xl_unit);
1284 printf("xl%d: this could be a manufacturing defect in "
1285 "your adapter or system\n", sc->xl_unit);
1286 printf("xl%d: attempting to guess media type; you "
1287 "should probably consult your vendor\n", sc->xl_unit);
1288 }
1289
1290
1291 /*
1292 * Read the device ID from the EEPROM.
1293 * This is what's loaded into the PCI device ID register, so it has
1294 * to be correct otherwise we wouldn't have gotten this far.
1295 */
1296 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
1297
1298 switch(devid) {
1299 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TP */
1300 case TC_DEVICEID_CYCLONE_10BT: /* 3c900B-TP */
1301 sc->xl_media = XL_MEDIAOPT_BT;
1302 sc->xl_xcvr = XL_XCVR_10BT;
1303 printf("xl%d: guessing 10BaseT transceiver\n", sc->xl_unit);
1304 break;
1305 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
1306 case TC_DEVICEID_CYCLONE_10BT_COMBO: /* 3c900B-COMBO */
1307 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1308 sc->xl_xcvr = XL_XCVR_10BT;
1309 printf("xl%d: guessing COMBO (AUI/BNC/TP)\n", sc->xl_unit);
1310 break;
1311 case TC_DEVICEID_CYCLONE_10BT_TPC: /* 3c900B-TPC */
1312 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
1313 sc->xl_xcvr = XL_XCVR_10BT;
1314 printf("xl%d: guessing TPC (BNC/TP)\n", sc->xl_unit);
1315 break;
1293 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
1294 sc->xl_media = XL_MEDIAOPT_MII;
1295 sc->xl_xcvr = XL_XCVR_MII;
1296 printf("xl%d: guessing MII\n", sc->xl_unit);
1297 break;
1298 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
1299 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
1300 sc->xl_media = XL_MEDIAOPT_BT4;
1301 sc->xl_xcvr = XL_XCVR_MII;
1302 printf("xl%d: guessing 100BaseT4/MII\n", sc->xl_unit);
1303 break;
1304 case TC_DEVICEID_CYCLONE_10_100BT: /* 3c905B-TX */
1316 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
1317 sc->xl_media = XL_MEDIAOPT_MII;
1318 sc->xl_xcvr = XL_XCVR_MII;
1319 printf("xl%d: guessing MII\n", sc->xl_unit);
1320 break;
1321 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
1322 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
1323 sc->xl_media = XL_MEDIAOPT_BT4;
1324 sc->xl_xcvr = XL_XCVR_MII;
1325 printf("xl%d: guessing 100BaseT4/MII\n", sc->xl_unit);
1326 break;
1327 case TC_DEVICEID_CYCLONE_10_100BT: /* 3c905B-TX */
1305 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */
1306 case TC_DEVICEID_CYCLONE_10_100BT_SERV: /* 3c980-TX */
1307 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */
1308 sc->xl_media = XL_MEDIAOPT_BTX;
1309 sc->xl_xcvr = XL_XCVR_AUTO;
1310 printf("xl%d: guessing 10/100 internal\n", sc->xl_unit);
1311 break;
1328 case TC_DEVICEID_CYCLONE_10_100BT_SERV: /* 3c980-TX */
1329 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */
1330 sc->xl_media = XL_MEDIAOPT_BTX;
1331 sc->xl_xcvr = XL_XCVR_AUTO;
1332 printf("xl%d: guessing 10/100 internal\n", sc->xl_unit);
1333 break;
1334 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */
1335 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1336 sc->xl_xcvr = XL_XCVR_AUTO;
1337 printf("xl%d: guessing 10/100 plus BNC/AUI\n", sc->xl_unit);
1338 break;
1312 default:
1313 printf("xl%d: unknown device ID: %x -- "
1314 "defaulting to 10baseT\n", sc->xl_unit, devid);
1315 sc->xl_media = XL_MEDIAOPT_BT;
1316 break;
1317 }
1318
1319 return;
1320}
1321
1322/*
1323 * Attach the interface. Allocate softc structures, do ifmedia
1324 * setup and ethernet/BPF attach.
1325 */
1326static void
1327xl_attach(config_id, unit)
1328 pcici_t config_id;
1329 int unit;
1330{
1331 int s, i;
1332#ifndef XL_USEIOSPACE
1333 vm_offset_t pbase, vbase;
1334#endif
1335 u_char eaddr[ETHER_ADDR_LEN];
1336 u_int32_t command;
1337 struct xl_softc *sc;
1338 struct ifnet *ifp;
1339 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1340 unsigned int round;
1341 caddr_t roundptr;
1342 struct xl_type *p;
1343 u_int16_t phy_vid, phy_did, phy_sts;
1344
1345 s = splimp();
1346
1347 sc = malloc(sizeof(struct xl_softc), M_DEVBUF, M_NOWAIT);
1348 if (sc == NULL) {
1349 printf("xl%d: no memory for softc struct!\n", unit);
1350 goto fail;
1351 }
1352 bzero(sc, sizeof(struct xl_softc));
1353
1354 /*
1355 * If this is a 3c905B, we have to check one extra thing.
1356 * The 905B supports power management and may be placed in
1357 * a low-power mode (D3 mode), typically by certain operating
1358 * systems which shall not be named. The PCI BIOS is supposed
1359 * to reset the NIC and bring it out of low-power mode, but
1360 * some do not. Consequently, we have to see if this chip
1361 * supports power management, and if so, make sure it's not
1362 * in low-power mode. If power management is available, the
1363 * capid byte will be 0x01.
1364 *
1365 * I _think_ that what actually happens is that the chip
1366 * loses its PCI configuration during the transition from
1367 * D3 back to D0; this means that it should be possible for
1368 * us to save the PCI iobase, membase and IRQ, put the chip
1369 * back in the D0 state, then restore the PCI config ourselves.
1370 */
1371
1372 command = pci_conf_read(config_id, XL_PCI_CAPID) & 0x000000FF;
1373 if (command == 0x01) {
1374
1375 command = pci_conf_read(config_id, XL_PCI_PWRMGMTCTRL);
1376 if (command & XL_PSTATE_MASK) {
1377 u_int32_t iobase, membase, irq;
1378
1379 /* Save important PCI config data. */
1380 iobase = pci_conf_read(config_id, XL_PCI_LOIO);
1381 membase = pci_conf_read(config_id, XL_PCI_LOMEM);
1382 irq = pci_conf_read(config_id, XL_PCI_INTLINE);
1383
1384 /* Reset the power state. */
1385 printf("xl%d: chip is in D%d power mode "
1386 "-- setting to D0\n", unit, command & XL_PSTATE_MASK);
1387 command &= 0xFFFFFFFC;
1388 pci_conf_write(config_id, XL_PCI_PWRMGMTCTRL, command);
1389
1390 /* Restore PCI config data. */
1391 pci_conf_write(config_id, XL_PCI_LOIO, iobase);
1392 pci_conf_write(config_id, XL_PCI_LOMEM, membase);
1393 pci_conf_write(config_id, XL_PCI_INTLINE, irq);
1394 }
1395 }
1396
1397 /*
1398 * Map control/status registers.
1399 */
1400 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1401 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1402 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
1403 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1404
1405#ifdef XL_USEIOSPACE
1406 if (!(command & PCIM_CMD_PORTEN)) {
1407 printf("xl%d: failed to enable I/O ports!\n", unit);
1408 free(sc, M_DEVBUF);
1409 goto fail;
1410 }
1411
1412 if (!pci_map_port(config_id, XL_PCI_LOIO,
1413 (u_short *)&(sc->xl_bhandle))) {
1414 printf ("xl%d: couldn't map port\n", unit);
1415 printf ("xl%d: WARNING: this shouldn't happen! "
1416 "Possible PCI support code bug!", unit);
1417 printf ("xl%d: attempting to map iobase manually", unit);
1418 sc->xl_bhandle =
1419 pci_conf_read(config_id, XL_PCI_LOIO) & 0xFFFFFFE0;
1420 /*goto fail;*/
1421 }
1422
1423#ifdef __i386__
1424 sc->xl_btag = I386_BUS_SPACE_IO;
1425#endif
1426#ifdef __alpha__
1427 sc->xl_btag = ALPHA_BUS_SPACE_IO;
1428#endif
1429#else
1430 if (!(command & PCIM_CMD_MEMEN)) {
1431 printf("xl%d: failed to enable memory mapping!\n", unit);
1432 goto fail;
1433 }
1434
1435 if (!pci_map_mem(config_id, XL_PCI_LOMEM, &vbase, &pbase)) {
1436 printf ("xl%d: couldn't map memory\n", unit);
1437 goto fail;
1438 }
1439 sc->xl_bhandle = vbase;
1440#ifdef __i386__
1441 sc->xl_btag = I386_BUS_SPACE_MEM;
1442#endif
1443#ifdef __alpha__
1444 sc->xl_btag = ALPHA_BUS_SPACE_MEM;
1445#endif
1446#endif
1447
1448 /* Allocate interrupt */
1449 if (!pci_map_int(config_id, xl_intr, sc, &net_imask)) {
1450 printf("xl%d: couldn't map interrupt\n", unit);
1451 goto fail;
1452 }
1453
1454 /* Reset the adapter. */
1455 xl_reset(sc);
1456
1457 /*
1458 * Get station address from the EEPROM.
1459 */
1460 if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
1461 printf("xl%d: failed to read station address\n", sc->xl_unit);
1462 free(sc, M_DEVBUF);
1463 goto fail;
1464 }
1465
1466 /*
1467 * A 3Com chip was detected. Inform the world.
1468 */
1469 printf("xl%d: Ethernet address: %6D\n", unit, eaddr, ":");
1470
1471 sc->xl_unit = unit;
1472 callout_handle_init(&sc->xl_stat_ch);
1473 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1474
1475 sc->xl_ldata_ptr = malloc(sizeof(struct xl_list_data) + 8,
1476 M_DEVBUF, M_NOWAIT);
1477 if (sc->xl_ldata_ptr == NULL) {
1478 free(sc, M_DEVBUF);
1479 printf("xl%d: no memory for list buffers!\n", unit);
1480 goto fail;
1481 }
1482
1483 sc->xl_ldata = (struct xl_list_data *)sc->xl_ldata_ptr;
1484 round = (unsigned int)sc->xl_ldata_ptr & 0xF;
1485 roundptr = sc->xl_ldata_ptr;
1486 for (i = 0; i < 8; i++) {
1487 if (round % 8) {
1488 round++;
1489 roundptr++;
1490 } else
1491 break;
1492 }
1493 sc->xl_ldata = (struct xl_list_data *)roundptr;
1494 bzero(sc->xl_ldata, sizeof(struct xl_list_data));
1495
1496 ifp = &sc->arpcom.ac_if;
1497 ifp->if_softc = sc;
1498 ifp->if_unit = unit;
1499 ifp->if_name = "xl";
1500 ifp->if_mtu = ETHERMTU;
1501 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1502 ifp->if_ioctl = xl_ioctl;
1503 ifp->if_output = ether_output;
1504 ifp->if_start = xl_start;
1505 ifp->if_watchdog = xl_watchdog;
1506 ifp->if_init = xl_init;
1507 ifp->if_baudrate = 10000000;
1508 ifp->if_snd.ifq_maxlen = XL_TX_LIST_CNT - 1;
1509
1510 /*
1511 * Figure out the card type. 3c905B adapters have the
1512 * 'supportsNoTxLength' bit set in the capabilities
1513 * word in the EEPROM.
1514 */
1515 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
1516 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH)
1517 sc->xl_type = XL_TYPE_905B;
1518 else
1519 sc->xl_type = XL_TYPE_90X;
1520
1521 /*
1522 * Now we have to see what sort of media we have.
1523 * This includes probing for an MII interace and a
1524 * possible PHY.
1525 */
1526 XL_SEL_WIN(3);
1527 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
1528 if (bootverbose)
1529 printf("xl%d: media options word: %x\n", sc->xl_unit,
1530 sc->xl_media);
1531
1532 xl_read_eeprom(sc, (char *)&sc->xl_xcvr, XL_EE_ICFG_0, 2, 0);
1533 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
1534 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
1535
1536 xl_mediacheck(sc);
1537
1538 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
1539 || sc->xl_media & XL_MEDIAOPT_BT4) {
1540 /*
1541 * In theory I shouldn't need this, but... if this
1542 * card supports an MII, either an external one or
1543 * an internal fake one, select it in the internal
1544 * config register before trying to probe it.
1545 */
1546 u_int32_t icfg;
1547
1548 XL_SEL_WIN(3);
1549 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
1550 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1551 if (sc->xl_media & XL_MEDIAOPT_MII ||
1552 sc->xl_media & XL_MEDIAOPT_BT4)
1553 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
1554 if (sc->xl_media & XL_MEDIAOPT_BTX)
1555 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
1556 if (sc->xl_media & XL_MEDIAOPT_BFX)
1557 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1558 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1559
1560 if (bootverbose)
1561 printf("xl%d: probing for a PHY\n", sc->xl_unit);
1562 for (i = XL_PHYADDR_MIN; i < XL_PHYADDR_MAX + 1; i++) {
1563 if (bootverbose)
1564 printf("xl%d: checking address: %d\n",
1565 sc->xl_unit, i);
1566 sc->xl_phy_addr = i;
1567 xl_phy_writereg(sc, XL_PHY_GENCTL, PHY_BMCR_RESET);
1568 DELAY(500);
1569 while(xl_phy_readreg(sc, XL_PHY_GENCTL)
1570 & PHY_BMCR_RESET);
1571 if ((phy_sts = xl_phy_readreg(sc, XL_PHY_GENSTS)))
1572 break;
1573 }
1574 if (phy_sts) {
1575 phy_vid = xl_phy_readreg(sc, XL_PHY_VENID);
1576 phy_did = xl_phy_readreg(sc, XL_PHY_DEVID);
1577 if (bootverbose)
1578 printf("xl%d: found PHY at address %d, ",
1579 sc->xl_unit, sc->xl_phy_addr);
1580 if (bootverbose)
1581 printf("vendor id: %x device id: %x\n",
1582 phy_vid, phy_did);
1583 p = xl_phys;
1584 while(p->xl_vid) {
1585 if (phy_vid == p->xl_vid &&
1586 (phy_did | 0x000F) == p->xl_did) {
1587 sc->xl_pinfo = p;
1588 break;
1589 }
1590 p++;
1591 }
1592 if (sc->xl_pinfo == NULL)
1593 sc->xl_pinfo = &xl_phys[PHY_UNKNOWN];
1594 if (bootverbose)
1595 printf("xl%d: PHY type: %s\n",
1596 sc->xl_unit, sc->xl_pinfo->xl_name);
1597 } else {
1598 printf("xl%d: MII without any phy!\n", sc->xl_unit);
1599 }
1600 }
1601
1602 /*
1603 * Do ifmedia setup.
1604 */
1605 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
1606
1607 if (sc->xl_media & XL_MEDIAOPT_BT) {
1608 if (bootverbose)
1609 printf("xl%d: found 10baseT\n", sc->xl_unit);
1610 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1611 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1612 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1613 ifmedia_add(&sc->ifmedia,
1614 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1615 }
1616
1617 if (sc->xl_media & XL_MEDIAOPT_AUI) {
1618 if (bootverbose)
1619 printf("xl%d: found AUI\n", sc->xl_unit);
1620 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1621 }
1622
1623 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1624 if (bootverbose)
1625 printf("xl%d: found BNC\n", sc->xl_unit);
1626 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
1627 }
1628
1629 /*
1630 * Technically we could use xl_getmode_mii() to scan the
1631 * modes, but the built-in BTX mode on the 3c905B implies
1632 * 10/100 full/half duplex support anyway, so why not just
1633 * do it and get it over with.
1634 */
1635 if (sc->xl_media & XL_MEDIAOPT_BTX) {
1636 if (bootverbose)
1637 printf("xl%d: found 100baseTX\n", sc->xl_unit);
1638 ifp->if_baudrate = 100000000;
1639 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
1640 ifmedia_add(&sc->ifmedia,
1641 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
1642 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1643 ifmedia_add(&sc->ifmedia,
1644 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
1645 if (sc->xl_pinfo != NULL)
1646 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1647 }
1648
1649 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1650 if (bootverbose)
1651 printf("xl%d: found 100baseFX\n", sc->xl_unit);
1652 ifp->if_baudrate = 100000000;
1653 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
1654 }
1655
1656 /*
1657 * If there's an MII, we have to probe its modes
1658 * separately.
1659 */
1660 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4) {
1661 if (bootverbose)
1662 printf("xl%d: found MII\n", sc->xl_unit);
1663 xl_getmode_mii(sc);
1664 }
1665
1666 /* Choose a default media. */
1667 switch(sc->xl_xcvr) {
1668 case XL_XCVR_10BT:
1669 media = IFM_ETHER|IFM_10_T;
1670 xl_setmode(sc, media);
1671 break;
1672 case XL_XCVR_AUI:
1673 media = IFM_ETHER|IFM_10_5;
1674 xl_setmode(sc, media);
1675 break;
1676 case XL_XCVR_COAX:
1677 media = IFM_ETHER|IFM_10_2;
1678 xl_setmode(sc, media);
1679 break;
1680 case XL_XCVR_AUTO:
1681#ifdef XL_BACKGROUND_AUTONEG
1682 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
1683#else
1684 xl_autoneg_mii(sc, XL_FLAG_FORCEDELAY, 1);
1685#endif
1686 media = sc->ifmedia.ifm_media;
1687 break;
1688 case XL_XCVR_100BTX:
1689 case XL_XCVR_MII:
1690#ifdef XL_BACKGROUND_AUTONEG
1691 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
1692#else
1693 xl_autoneg_mii(sc, XL_FLAG_FORCEDELAY, 1);
1694#endif
1695 media = sc->ifmedia.ifm_media;
1696 break;
1697 case XL_XCVR_100BFX:
1698 media = IFM_ETHER|IFM_100_FX;
1699 break;
1700 default:
1701 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit,
1702 sc->xl_xcvr);
1703 /*
1704 * This will probably be wrong, but it prevents
1705 * the ifmedia code from panicking.
1706 */
1707 media = IFM_ETHER|IFM_10_T;
1708 break;
1709 }
1710
1711 ifmedia_set(&sc->ifmedia, media);
1712
1713 /*
1714 * Call MI attach routines.
1715 */
1716 if_attach(ifp);
1717 ether_ifattach(ifp);
1718
1719#if NBPFILTER > 0
1720 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1721#endif
1722 at_shutdown(xl_shutdown, sc, SHUTDOWN_POST_SYNC);
1723
1724fail:
1725 splx(s);
1726 return;
1727}
1728
1729/*
1730 * Initialize the transmit descriptors.
1731 */
1732static int xl_list_tx_init(sc)
1733 struct xl_softc *sc;
1734{
1735 struct xl_chain_data *cd;
1736 struct xl_list_data *ld;
1737 int i;
1738
1739 cd = &sc->xl_cdata;
1740 ld = sc->xl_ldata;
1741 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1742 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1339 default:
1340 printf("xl%d: unknown device ID: %x -- "
1341 "defaulting to 10baseT\n", sc->xl_unit, devid);
1342 sc->xl_media = XL_MEDIAOPT_BT;
1343 break;
1344 }
1345
1346 return;
1347}
1348
1349/*
1350 * Attach the interface. Allocate softc structures, do ifmedia
1351 * setup and ethernet/BPF attach.
1352 */
1353static void
1354xl_attach(config_id, unit)
1355 pcici_t config_id;
1356 int unit;
1357{
1358 int s, i;
1359#ifndef XL_USEIOSPACE
1360 vm_offset_t pbase, vbase;
1361#endif
1362 u_char eaddr[ETHER_ADDR_LEN];
1363 u_int32_t command;
1364 struct xl_softc *sc;
1365 struct ifnet *ifp;
1366 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1367 unsigned int round;
1368 caddr_t roundptr;
1369 struct xl_type *p;
1370 u_int16_t phy_vid, phy_did, phy_sts;
1371
1372 s = splimp();
1373
1374 sc = malloc(sizeof(struct xl_softc), M_DEVBUF, M_NOWAIT);
1375 if (sc == NULL) {
1376 printf("xl%d: no memory for softc struct!\n", unit);
1377 goto fail;
1378 }
1379 bzero(sc, sizeof(struct xl_softc));
1380
1381 /*
1382 * If this is a 3c905B, we have to check one extra thing.
1383 * The 905B supports power management and may be placed in
1384 * a low-power mode (D3 mode), typically by certain operating
1385 * systems which shall not be named. The PCI BIOS is supposed
1386 * to reset the NIC and bring it out of low-power mode, but
1387 * some do not. Consequently, we have to see if this chip
1388 * supports power management, and if so, make sure it's not
1389 * in low-power mode. If power management is available, the
1390 * capid byte will be 0x01.
1391 *
1392 * I _think_ that what actually happens is that the chip
1393 * loses its PCI configuration during the transition from
1394 * D3 back to D0; this means that it should be possible for
1395 * us to save the PCI iobase, membase and IRQ, put the chip
1396 * back in the D0 state, then restore the PCI config ourselves.
1397 */
1398
1399 command = pci_conf_read(config_id, XL_PCI_CAPID) & 0x000000FF;
1400 if (command == 0x01) {
1401
1402 command = pci_conf_read(config_id, XL_PCI_PWRMGMTCTRL);
1403 if (command & XL_PSTATE_MASK) {
1404 u_int32_t iobase, membase, irq;
1405
1406 /* Save important PCI config data. */
1407 iobase = pci_conf_read(config_id, XL_PCI_LOIO);
1408 membase = pci_conf_read(config_id, XL_PCI_LOMEM);
1409 irq = pci_conf_read(config_id, XL_PCI_INTLINE);
1410
1411 /* Reset the power state. */
1412 printf("xl%d: chip is in D%d power mode "
1413 "-- setting to D0\n", unit, command & XL_PSTATE_MASK);
1414 command &= 0xFFFFFFFC;
1415 pci_conf_write(config_id, XL_PCI_PWRMGMTCTRL, command);
1416
1417 /* Restore PCI config data. */
1418 pci_conf_write(config_id, XL_PCI_LOIO, iobase);
1419 pci_conf_write(config_id, XL_PCI_LOMEM, membase);
1420 pci_conf_write(config_id, XL_PCI_INTLINE, irq);
1421 }
1422 }
1423
1424 /*
1425 * Map control/status registers.
1426 */
1427 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1428 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1429 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
1430 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1431
1432#ifdef XL_USEIOSPACE
1433 if (!(command & PCIM_CMD_PORTEN)) {
1434 printf("xl%d: failed to enable I/O ports!\n", unit);
1435 free(sc, M_DEVBUF);
1436 goto fail;
1437 }
1438
1439 if (!pci_map_port(config_id, XL_PCI_LOIO,
1440 (u_short *)&(sc->xl_bhandle))) {
1441 printf ("xl%d: couldn't map port\n", unit);
1442 printf ("xl%d: WARNING: this shouldn't happen! "
1443 "Possible PCI support code bug!", unit);
1444 printf ("xl%d: attempting to map iobase manually", unit);
1445 sc->xl_bhandle =
1446 pci_conf_read(config_id, XL_PCI_LOIO) & 0xFFFFFFE0;
1447 /*goto fail;*/
1448 }
1449
1450#ifdef __i386__
1451 sc->xl_btag = I386_BUS_SPACE_IO;
1452#endif
1453#ifdef __alpha__
1454 sc->xl_btag = ALPHA_BUS_SPACE_IO;
1455#endif
1456#else
1457 if (!(command & PCIM_CMD_MEMEN)) {
1458 printf("xl%d: failed to enable memory mapping!\n", unit);
1459 goto fail;
1460 }
1461
1462 if (!pci_map_mem(config_id, XL_PCI_LOMEM, &vbase, &pbase)) {
1463 printf ("xl%d: couldn't map memory\n", unit);
1464 goto fail;
1465 }
1466 sc->xl_bhandle = vbase;
1467#ifdef __i386__
1468 sc->xl_btag = I386_BUS_SPACE_MEM;
1469#endif
1470#ifdef __alpha__
1471 sc->xl_btag = ALPHA_BUS_SPACE_MEM;
1472#endif
1473#endif
1474
1475 /* Allocate interrupt */
1476 if (!pci_map_int(config_id, xl_intr, sc, &net_imask)) {
1477 printf("xl%d: couldn't map interrupt\n", unit);
1478 goto fail;
1479 }
1480
1481 /* Reset the adapter. */
1482 xl_reset(sc);
1483
1484 /*
1485 * Get station address from the EEPROM.
1486 */
1487 if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
1488 printf("xl%d: failed to read station address\n", sc->xl_unit);
1489 free(sc, M_DEVBUF);
1490 goto fail;
1491 }
1492
1493 /*
1494 * A 3Com chip was detected. Inform the world.
1495 */
1496 printf("xl%d: Ethernet address: %6D\n", unit, eaddr, ":");
1497
1498 sc->xl_unit = unit;
1499 callout_handle_init(&sc->xl_stat_ch);
1500 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1501
1502 sc->xl_ldata_ptr = malloc(sizeof(struct xl_list_data) + 8,
1503 M_DEVBUF, M_NOWAIT);
1504 if (sc->xl_ldata_ptr == NULL) {
1505 free(sc, M_DEVBUF);
1506 printf("xl%d: no memory for list buffers!\n", unit);
1507 goto fail;
1508 }
1509
1510 sc->xl_ldata = (struct xl_list_data *)sc->xl_ldata_ptr;
1511 round = (unsigned int)sc->xl_ldata_ptr & 0xF;
1512 roundptr = sc->xl_ldata_ptr;
1513 for (i = 0; i < 8; i++) {
1514 if (round % 8) {
1515 round++;
1516 roundptr++;
1517 } else
1518 break;
1519 }
1520 sc->xl_ldata = (struct xl_list_data *)roundptr;
1521 bzero(sc->xl_ldata, sizeof(struct xl_list_data));
1522
1523 ifp = &sc->arpcom.ac_if;
1524 ifp->if_softc = sc;
1525 ifp->if_unit = unit;
1526 ifp->if_name = "xl";
1527 ifp->if_mtu = ETHERMTU;
1528 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1529 ifp->if_ioctl = xl_ioctl;
1530 ifp->if_output = ether_output;
1531 ifp->if_start = xl_start;
1532 ifp->if_watchdog = xl_watchdog;
1533 ifp->if_init = xl_init;
1534 ifp->if_baudrate = 10000000;
1535 ifp->if_snd.ifq_maxlen = XL_TX_LIST_CNT - 1;
1536
1537 /*
1538 * Figure out the card type. 3c905B adapters have the
1539 * 'supportsNoTxLength' bit set in the capabilities
1540 * word in the EEPROM.
1541 */
1542 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
1543 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH)
1544 sc->xl_type = XL_TYPE_905B;
1545 else
1546 sc->xl_type = XL_TYPE_90X;
1547
1548 /*
1549 * Now we have to see what sort of media we have.
1550 * This includes probing for an MII interace and a
1551 * possible PHY.
1552 */
1553 XL_SEL_WIN(3);
1554 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
1555 if (bootverbose)
1556 printf("xl%d: media options word: %x\n", sc->xl_unit,
1557 sc->xl_media);
1558
1559 xl_read_eeprom(sc, (char *)&sc->xl_xcvr, XL_EE_ICFG_0, 2, 0);
1560 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
1561 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
1562
1563 xl_mediacheck(sc);
1564
1565 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
1566 || sc->xl_media & XL_MEDIAOPT_BT4) {
1567 /*
1568 * In theory I shouldn't need this, but... if this
1569 * card supports an MII, either an external one or
1570 * an internal fake one, select it in the internal
1571 * config register before trying to probe it.
1572 */
1573 u_int32_t icfg;
1574
1575 XL_SEL_WIN(3);
1576 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
1577 icfg &= ~XL_ICFG_CONNECTOR_MASK;
1578 if (sc->xl_media & XL_MEDIAOPT_MII ||
1579 sc->xl_media & XL_MEDIAOPT_BT4)
1580 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
1581 if (sc->xl_media & XL_MEDIAOPT_BTX)
1582 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
1583 if (sc->xl_media & XL_MEDIAOPT_BFX)
1584 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
1585 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
1586
1587 if (bootverbose)
1588 printf("xl%d: probing for a PHY\n", sc->xl_unit);
1589 for (i = XL_PHYADDR_MIN; i < XL_PHYADDR_MAX + 1; i++) {
1590 if (bootverbose)
1591 printf("xl%d: checking address: %d\n",
1592 sc->xl_unit, i);
1593 sc->xl_phy_addr = i;
1594 xl_phy_writereg(sc, XL_PHY_GENCTL, PHY_BMCR_RESET);
1595 DELAY(500);
1596 while(xl_phy_readreg(sc, XL_PHY_GENCTL)
1597 & PHY_BMCR_RESET);
1598 if ((phy_sts = xl_phy_readreg(sc, XL_PHY_GENSTS)))
1599 break;
1600 }
1601 if (phy_sts) {
1602 phy_vid = xl_phy_readreg(sc, XL_PHY_VENID);
1603 phy_did = xl_phy_readreg(sc, XL_PHY_DEVID);
1604 if (bootverbose)
1605 printf("xl%d: found PHY at address %d, ",
1606 sc->xl_unit, sc->xl_phy_addr);
1607 if (bootverbose)
1608 printf("vendor id: %x device id: %x\n",
1609 phy_vid, phy_did);
1610 p = xl_phys;
1611 while(p->xl_vid) {
1612 if (phy_vid == p->xl_vid &&
1613 (phy_did | 0x000F) == p->xl_did) {
1614 sc->xl_pinfo = p;
1615 break;
1616 }
1617 p++;
1618 }
1619 if (sc->xl_pinfo == NULL)
1620 sc->xl_pinfo = &xl_phys[PHY_UNKNOWN];
1621 if (bootverbose)
1622 printf("xl%d: PHY type: %s\n",
1623 sc->xl_unit, sc->xl_pinfo->xl_name);
1624 } else {
1625 printf("xl%d: MII without any phy!\n", sc->xl_unit);
1626 }
1627 }
1628
1629 /*
1630 * Do ifmedia setup.
1631 */
1632 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
1633
1634 if (sc->xl_media & XL_MEDIAOPT_BT) {
1635 if (bootverbose)
1636 printf("xl%d: found 10baseT\n", sc->xl_unit);
1637 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1638 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1639 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1640 ifmedia_add(&sc->ifmedia,
1641 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1642 }
1643
1644 if (sc->xl_media & XL_MEDIAOPT_AUI) {
1645 if (bootverbose)
1646 printf("xl%d: found AUI\n", sc->xl_unit);
1647 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1648 }
1649
1650 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1651 if (bootverbose)
1652 printf("xl%d: found BNC\n", sc->xl_unit);
1653 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
1654 }
1655
1656 /*
1657 * Technically we could use xl_getmode_mii() to scan the
1658 * modes, but the built-in BTX mode on the 3c905B implies
1659 * 10/100 full/half duplex support anyway, so why not just
1660 * do it and get it over with.
1661 */
1662 if (sc->xl_media & XL_MEDIAOPT_BTX) {
1663 if (bootverbose)
1664 printf("xl%d: found 100baseTX\n", sc->xl_unit);
1665 ifp->if_baudrate = 100000000;
1666 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
1667 ifmedia_add(&sc->ifmedia,
1668 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
1669 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1670 ifmedia_add(&sc->ifmedia,
1671 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
1672 if (sc->xl_pinfo != NULL)
1673 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1674 }
1675
1676 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1677 if (bootverbose)
1678 printf("xl%d: found 100baseFX\n", sc->xl_unit);
1679 ifp->if_baudrate = 100000000;
1680 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
1681 }
1682
1683 /*
1684 * If there's an MII, we have to probe its modes
1685 * separately.
1686 */
1687 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4) {
1688 if (bootverbose)
1689 printf("xl%d: found MII\n", sc->xl_unit);
1690 xl_getmode_mii(sc);
1691 }
1692
1693 /* Choose a default media. */
1694 switch(sc->xl_xcvr) {
1695 case XL_XCVR_10BT:
1696 media = IFM_ETHER|IFM_10_T;
1697 xl_setmode(sc, media);
1698 break;
1699 case XL_XCVR_AUI:
1700 media = IFM_ETHER|IFM_10_5;
1701 xl_setmode(sc, media);
1702 break;
1703 case XL_XCVR_COAX:
1704 media = IFM_ETHER|IFM_10_2;
1705 xl_setmode(sc, media);
1706 break;
1707 case XL_XCVR_AUTO:
1708#ifdef XL_BACKGROUND_AUTONEG
1709 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
1710#else
1711 xl_autoneg_mii(sc, XL_FLAG_FORCEDELAY, 1);
1712#endif
1713 media = sc->ifmedia.ifm_media;
1714 break;
1715 case XL_XCVR_100BTX:
1716 case XL_XCVR_MII:
1717#ifdef XL_BACKGROUND_AUTONEG
1718 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
1719#else
1720 xl_autoneg_mii(sc, XL_FLAG_FORCEDELAY, 1);
1721#endif
1722 media = sc->ifmedia.ifm_media;
1723 break;
1724 case XL_XCVR_100BFX:
1725 media = IFM_ETHER|IFM_100_FX;
1726 break;
1727 default:
1728 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit,
1729 sc->xl_xcvr);
1730 /*
1731 * This will probably be wrong, but it prevents
1732 * the ifmedia code from panicking.
1733 */
1734 media = IFM_ETHER|IFM_10_T;
1735 break;
1736 }
1737
1738 ifmedia_set(&sc->ifmedia, media);
1739
1740 /*
1741 * Call MI attach routines.
1742 */
1743 if_attach(ifp);
1744 ether_ifattach(ifp);
1745
1746#if NBPFILTER > 0
1747 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1748#endif
1749 at_shutdown(xl_shutdown, sc, SHUTDOWN_POST_SYNC);
1750
1751fail:
1752 splx(s);
1753 return;
1754}
1755
1756/*
1757 * Initialize the transmit descriptors.
1758 */
1759static int xl_list_tx_init(sc)
1760 struct xl_softc *sc;
1761{
1762 struct xl_chain_data *cd;
1763 struct xl_list_data *ld;
1764 int i;
1765
1766 cd = &sc->xl_cdata;
1767 ld = sc->xl_ldata;
1768 for (i = 0; i < XL_TX_LIST_CNT; i++) {
1769 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1770 cd->xl_tx_chain[i].xl_unsent = 0;
1743 if (i == (XL_TX_LIST_CNT - 1))
1744 cd->xl_tx_chain[i].xl_next = NULL;
1745 else
1746 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1747 }
1748
1749 cd->xl_tx_free = &cd->xl_tx_chain[0];
1750 cd->xl_tx_tail = cd->xl_tx_head = NULL;
1751
1752 return(0);
1753}
1754
1755/*
1756 * Initialize the RX descriptors and allocate mbufs for them. Note that
1757 * we arrange the descriptors in a closed ring, so that the last descriptor
1758 * points back to the first.
1759 */
1760static int xl_list_rx_init(sc)
1761 struct xl_softc *sc;
1762{
1763 struct xl_chain_data *cd;
1764 struct xl_list_data *ld;
1765 int i;
1766
1767 cd = &sc->xl_cdata;
1768 ld = sc->xl_ldata;
1769
1770 for (i = 0; i < XL_RX_LIST_CNT; i++) {
1771 cd->xl_rx_chain[i].xl_ptr =
1772 (struct xl_list_onefrag *)&ld->xl_rx_list[i];
1773 if (xl_newbuf(sc, &cd->xl_rx_chain[i]) == ENOBUFS)
1774 return(ENOBUFS);
1775 if (i == (XL_RX_LIST_CNT - 1)) {
1776 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[0];
1777 ld->xl_rx_list[i].xl_next =
1778 vtophys(&ld->xl_rx_list[0]);
1779 } else {
1780 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[i + 1];
1781 ld->xl_rx_list[i].xl_next =
1782 vtophys(&ld->xl_rx_list[i + 1]);
1783 }
1784 }
1785
1786 cd->xl_rx_head = &cd->xl_rx_chain[0];
1787
1788 return(0);
1789}
1790
1791/*
1792 * Initialize an RX descriptor and attach an MBUF cluster.
1793 */
1794static int xl_newbuf(sc, c)
1795 struct xl_softc *sc;
1796 struct xl_chain_onefrag *c;
1797{
1798 struct mbuf *m_new = NULL;
1799
1800 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1801 if (m_new == NULL) {
1802 printf("xl%d: no memory for rx list -- packet dropped!\n",
1803 sc->xl_unit);
1804 return(ENOBUFS);
1805 }
1806
1807 MCLGET(m_new, M_DONTWAIT);
1808 if (!(m_new->m_flags & M_EXT)) {
1809 printf("xl%d: no memory for rx list -- packet dropped!\n",
1810 sc->xl_unit);
1811 m_freem(m_new);
1812 return(ENOBUFS);
1813 }
1814
1815#ifdef __alpha__
1816 /* Force longword alignment for packet payload to pacify alpha. */
1817 m_new->m_data += 2;
1818#endif
1819
1820 c->xl_mbuf = m_new;
1821 c->xl_ptr->xl_status = 0;
1822 c->xl_ptr->xl_frag.xl_addr = vtophys(mtod(m_new, caddr_t));
1823 c->xl_ptr->xl_frag.xl_len = MCLBYTES | XL_LAST_FRAG;
1824
1825 return(0);
1826}
1827
1828/*
1829 * A frame has been uploaded: pass the resulting mbuf chain up to
1830 * the higher level protocols.
1831 */
1832static void xl_rxeof(sc)
1833 struct xl_softc *sc;
1834{
1835 struct ether_header *eh;
1836 struct mbuf *m;
1837 struct ifnet *ifp;
1838 struct xl_chain_onefrag *cur_rx;
1839 int total_len = 0;
1840 u_int16_t rxstat;
1841
1842 ifp = &sc->arpcom.ac_if;
1843
1844again:
1845
1846 while((rxstat = sc->xl_cdata.xl_rx_head->xl_ptr->xl_status)) {
1847 cur_rx = sc->xl_cdata.xl_rx_head;
1848 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
1849
1850 /*
1851 * If an error occurs, update stats, clear the
1852 * status word and leave the mbuf cluster in place:
1853 * it should simply get re-used next time this descriptor
1854 * comes up in the ring.
1855 */
1856 if (rxstat & XL_RXSTAT_UP_ERROR) {
1857 ifp->if_ierrors++;
1858 cur_rx->xl_ptr->xl_status = 0;
1859 continue;
1860 }
1861
1862 /*
1863 * If there error bit was not set, the upload complete
1864 * bit should be set which means we have a valid packet.
1865 * If not, something truly strange has happened.
1866 */
1867 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1868 printf("xl%d: bad receive status -- packet dropped",
1869 sc->xl_unit);
1870 ifp->if_ierrors++;
1871 cur_rx->xl_ptr->xl_status = 0;
1872 continue;
1873 }
1874
1875 /* No errors; receive the packet. */
1876 m = cur_rx->xl_mbuf;
1877 total_len = cur_rx->xl_ptr->xl_status & XL_RXSTAT_LENMASK;
1878
1879 /*
1880 * Try to conjure up a new mbuf cluster. If that
1881 * fails, it means we have an out of memory condition and
1882 * should leave the buffer in place and continue. This will
1883 * result in a lost packet, but there's little else we
1884 * can do in this situation.
1885 */
1886 if (xl_newbuf(sc, cur_rx) == ENOBUFS) {
1887 ifp->if_ierrors++;
1888 cur_rx->xl_ptr->xl_status = 0;
1889 continue;
1890 }
1891
1771 if (i == (XL_TX_LIST_CNT - 1))
1772 cd->xl_tx_chain[i].xl_next = NULL;
1773 else
1774 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1775 }
1776
1777 cd->xl_tx_free = &cd->xl_tx_chain[0];
1778 cd->xl_tx_tail = cd->xl_tx_head = NULL;
1779
1780 return(0);
1781}
1782
1783/*
1784 * Initialize the RX descriptors and allocate mbufs for them. Note that
1785 * we arrange the descriptors in a closed ring, so that the last descriptor
1786 * points back to the first.
1787 */
1788static int xl_list_rx_init(sc)
1789 struct xl_softc *sc;
1790{
1791 struct xl_chain_data *cd;
1792 struct xl_list_data *ld;
1793 int i;
1794
1795 cd = &sc->xl_cdata;
1796 ld = sc->xl_ldata;
1797
1798 for (i = 0; i < XL_RX_LIST_CNT; i++) {
1799 cd->xl_rx_chain[i].xl_ptr =
1800 (struct xl_list_onefrag *)&ld->xl_rx_list[i];
1801 if (xl_newbuf(sc, &cd->xl_rx_chain[i]) == ENOBUFS)
1802 return(ENOBUFS);
1803 if (i == (XL_RX_LIST_CNT - 1)) {
1804 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[0];
1805 ld->xl_rx_list[i].xl_next =
1806 vtophys(&ld->xl_rx_list[0]);
1807 } else {
1808 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[i + 1];
1809 ld->xl_rx_list[i].xl_next =
1810 vtophys(&ld->xl_rx_list[i + 1]);
1811 }
1812 }
1813
1814 cd->xl_rx_head = &cd->xl_rx_chain[0];
1815
1816 return(0);
1817}
1818
1819/*
1820 * Initialize an RX descriptor and attach an MBUF cluster.
1821 */
1822static int xl_newbuf(sc, c)
1823 struct xl_softc *sc;
1824 struct xl_chain_onefrag *c;
1825{
1826 struct mbuf *m_new = NULL;
1827
1828 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1829 if (m_new == NULL) {
1830 printf("xl%d: no memory for rx list -- packet dropped!\n",
1831 sc->xl_unit);
1832 return(ENOBUFS);
1833 }
1834
1835 MCLGET(m_new, M_DONTWAIT);
1836 if (!(m_new->m_flags & M_EXT)) {
1837 printf("xl%d: no memory for rx list -- packet dropped!\n",
1838 sc->xl_unit);
1839 m_freem(m_new);
1840 return(ENOBUFS);
1841 }
1842
1843#ifdef __alpha__
1844 /* Force longword alignment for packet payload to pacify alpha. */
1845 m_new->m_data += 2;
1846#endif
1847
1848 c->xl_mbuf = m_new;
1849 c->xl_ptr->xl_status = 0;
1850 c->xl_ptr->xl_frag.xl_addr = vtophys(mtod(m_new, caddr_t));
1851 c->xl_ptr->xl_frag.xl_len = MCLBYTES | XL_LAST_FRAG;
1852
1853 return(0);
1854}
1855
1856/*
1857 * A frame has been uploaded: pass the resulting mbuf chain up to
1858 * the higher level protocols.
1859 */
1860static void xl_rxeof(sc)
1861 struct xl_softc *sc;
1862{
1863 struct ether_header *eh;
1864 struct mbuf *m;
1865 struct ifnet *ifp;
1866 struct xl_chain_onefrag *cur_rx;
1867 int total_len = 0;
1868 u_int16_t rxstat;
1869
1870 ifp = &sc->arpcom.ac_if;
1871
1872again:
1873
1874 while((rxstat = sc->xl_cdata.xl_rx_head->xl_ptr->xl_status)) {
1875 cur_rx = sc->xl_cdata.xl_rx_head;
1876 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
1877
1878 /*
1879 * If an error occurs, update stats, clear the
1880 * status word and leave the mbuf cluster in place:
1881 * it should simply get re-used next time this descriptor
1882 * comes up in the ring.
1883 */
1884 if (rxstat & XL_RXSTAT_UP_ERROR) {
1885 ifp->if_ierrors++;
1886 cur_rx->xl_ptr->xl_status = 0;
1887 continue;
1888 }
1889
1890 /*
1891 * If there error bit was not set, the upload complete
1892 * bit should be set which means we have a valid packet.
1893 * If not, something truly strange has happened.
1894 */
1895 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1896 printf("xl%d: bad receive status -- packet dropped",
1897 sc->xl_unit);
1898 ifp->if_ierrors++;
1899 cur_rx->xl_ptr->xl_status = 0;
1900 continue;
1901 }
1902
1903 /* No errors; receive the packet. */
1904 m = cur_rx->xl_mbuf;
1905 total_len = cur_rx->xl_ptr->xl_status & XL_RXSTAT_LENMASK;
1906
1907 /*
1908 * Try to conjure up a new mbuf cluster. If that
1909 * fails, it means we have an out of memory condition and
1910 * should leave the buffer in place and continue. This will
1911 * result in a lost packet, but there's little else we
1912 * can do in this situation.
1913 */
1914 if (xl_newbuf(sc, cur_rx) == ENOBUFS) {
1915 ifp->if_ierrors++;
1916 cur_rx->xl_ptr->xl_status = 0;
1917 continue;
1918 }
1919
1920 ifp->if_ipackets++;
1892 eh = mtod(m, struct ether_header *);
1893 m->m_pkthdr.rcvif = ifp;
1894#if NBPFILTER > 0
1895 /*
1896 * Handle BPF listeners. Let the BPF user see the packet, but
1897 * don't pass it up to the ether_input() layer unless it's
1898 * a broadcast packet, multicast packet, matches our ethernet
1899 * address or the interface is in promiscuous mode.
1900 */
1901 if (ifp->if_bpf) {
1902 m->m_pkthdr.len = m->m_len = total_len;
1903 bpf_mtap(ifp, m);
1904 if (ifp->if_flags & IFF_PROMISC &&
1905 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1906 ETHER_ADDR_LEN) &&
1907 (eh->ether_dhost[0] & 1) == 0)) {
1908 m_freem(m);
1909 continue;
1910 }
1911 }
1912#endif
1913 /* Remove header from mbuf and pass it on. */
1914 m->m_pkthdr.len = m->m_len =
1915 total_len - sizeof(struct ether_header);
1916 m->m_data += sizeof(struct ether_header);
1917 ether_input(ifp, eh, m);
1918 }
1919
1920 /*
1921 * Handle the 'end of channel' condition. When the upload
1922 * engine hits the end of the RX ring, it will stall. This
1923 * is our cue to flush the RX ring, reload the uplist pointer
1924 * register and unstall the engine.
1925 * XXX This is actually a little goofy. With the ThunderLAN
1926 * chip, you get an interrupt when the receiver hits the end
1927 * of the receive ring, which tells you exactly when you
1928 * you need to reload the ring pointer. Here we have to
1929 * fake it. I'm mad at myself for not being clever enough
1930 * to avoid the use of a goto here.
1931 */
1932 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1933 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1934 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1935 xl_wait(sc);
1936 CSR_WRITE_4(sc, XL_UPLIST_PTR,
1937 vtophys(&sc->xl_ldata->xl_rx_list[0]));
1938 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
1939 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1940 goto again;
1941 }
1942
1943 return;
1944}
1945
1946/*
1947 * A frame was downloaded to the chip. It's safe for us to clean up
1948 * the list buffers.
1949 */
1950static void xl_txeof(sc)
1951 struct xl_softc *sc;
1952{
1953 struct xl_chain *cur_tx;
1954 struct ifnet *ifp;
1955
1956 ifp = &sc->arpcom.ac_if;
1957
1958 /* Clear the timeout timer. */
1959 ifp->if_timer = 0;
1960
1961 /*
1962 * Go through our tx list and free mbufs for those
1963 * frames that have been uploaded. Note: the 3c905B
1964 * sets a special bit in the status word to let us
1965 * know that a frame has been downloaded, but the
1966 * original 3c900/3c905 adapters don't do that.
1967 * Consequently, we have to use a different test if
1968 * xl_type != XL_TYPE_905B.
1969 */
1970 while(sc->xl_cdata.xl_tx_head != NULL) {
1971 cur_tx = sc->xl_cdata.xl_tx_head;
1972 if ((sc->xl_type == XL_TYPE_905B &&
1921 eh = mtod(m, struct ether_header *);
1922 m->m_pkthdr.rcvif = ifp;
1923#if NBPFILTER > 0
1924 /*
1925 * Handle BPF listeners. Let the BPF user see the packet, but
1926 * don't pass it up to the ether_input() layer unless it's
1927 * a broadcast packet, multicast packet, matches our ethernet
1928 * address or the interface is in promiscuous mode.
1929 */
1930 if (ifp->if_bpf) {
1931 m->m_pkthdr.len = m->m_len = total_len;
1932 bpf_mtap(ifp, m);
1933 if (ifp->if_flags & IFF_PROMISC &&
1934 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1935 ETHER_ADDR_LEN) &&
1936 (eh->ether_dhost[0] & 1) == 0)) {
1937 m_freem(m);
1938 continue;
1939 }
1940 }
1941#endif
1942 /* Remove header from mbuf and pass it on. */
1943 m->m_pkthdr.len = m->m_len =
1944 total_len - sizeof(struct ether_header);
1945 m->m_data += sizeof(struct ether_header);
1946 ether_input(ifp, eh, m);
1947 }
1948
1949 /*
1950 * Handle the 'end of channel' condition. When the upload
1951 * engine hits the end of the RX ring, it will stall. This
1952 * is our cue to flush the RX ring, reload the uplist pointer
1953 * register and unstall the engine.
1954 * XXX This is actually a little goofy. With the ThunderLAN
1955 * chip, you get an interrupt when the receiver hits the end
1956 * of the receive ring, which tells you exactly when you
1957 * you need to reload the ring pointer. Here we have to
1958 * fake it. I'm mad at myself for not being clever enough
1959 * to avoid the use of a goto here.
1960 */
1961 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1962 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1963 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1964 xl_wait(sc);
1965 CSR_WRITE_4(sc, XL_UPLIST_PTR,
1966 vtophys(&sc->xl_ldata->xl_rx_list[0]));
1967 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
1968 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1969 goto again;
1970 }
1971
1972 return;
1973}
1974
1975/*
1976 * A frame was downloaded to the chip. It's safe for us to clean up
1977 * the list buffers.
1978 */
1979static void xl_txeof(sc)
1980 struct xl_softc *sc;
1981{
1982 struct xl_chain *cur_tx;
1983 struct ifnet *ifp;
1984
1985 ifp = &sc->arpcom.ac_if;
1986
1987 /* Clear the timeout timer. */
1988 ifp->if_timer = 0;
1989
1990 /*
1991 * Go through our tx list and free mbufs for those
1992 * frames that have been uploaded. Note: the 3c905B
1993 * sets a special bit in the status word to let us
1994 * know that a frame has been downloaded, but the
1995 * original 3c900/3c905 adapters don't do that.
1996 * Consequently, we have to use a different test if
1997 * xl_type != XL_TYPE_905B.
1998 */
1999 while(sc->xl_cdata.xl_tx_head != NULL) {
2000 cur_tx = sc->xl_cdata.xl_tx_head;
2001 if ((sc->xl_type == XL_TYPE_905B &&
1973 !(cur_tx->xl_ptr->xl_status & XL_TXSTAT_DL_COMPLETE)) ||
1974 CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2002 !(cur_tx->xl_ptr->xl_status & XL_TXSTAT_DL_COMPLETE)) ||
2003 (CSR_READ_1(sc, XL_TX_STATUS) & XL_TXSTATUS_COMPLETE) ||
2004 cur_tx->xl_unsent) {
1975 break;
1976 }
1977 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
2005 break;
2006 }
2007 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1978
1979 m_freem(cur_tx->xl_mbuf);
1980 cur_tx->xl_mbuf = NULL;
2008 m_freem(cur_tx->xl_mbuf);
2009 cur_tx->xl_mbuf = NULL;
2010 ifp->if_opackets++;
1981
1982 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1983 sc->xl_cdata.xl_tx_free = cur_tx;
1984 }
1985
1986 if (sc->xl_cdata.xl_tx_head == NULL) {
1987 ifp->if_flags &= ~IFF_OACTIVE;
1988 sc->xl_cdata.xl_tx_tail = NULL;
1989 if (sc->xl_want_auto)
1990 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
1991 } else {
2011
2012 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
2013 sc->xl_cdata.xl_tx_free = cur_tx;
2014 }
2015
2016 if (sc->xl_cdata.xl_tx_head == NULL) {
2017 ifp->if_flags &= ~IFF_OACTIVE;
2018 sc->xl_cdata.xl_tx_tail = NULL;
2019 if (sc->xl_want_auto)
2020 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
2021 } else {
1992 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1993 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2022 if (sc->xl_cdata.xl_tx_head->xl_unsent) {
2023 sc->xl_cdata.xl_tx_head->xl_unsent = 0;
1994 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1995 vtophys(sc->xl_cdata.xl_tx_head->xl_ptr));
1996 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1997 }
1998 }
1999
2000 return;
2001}
2002
2003/*
2004 * TX 'end of channel' interrupt handler. Actually, we should
2005 * only get a 'TX complete' interrupt if there's a transmit error,
2006 * so this is really TX error handler.
2007 */
2008static void xl_txeoc(sc)
2009 struct xl_softc *sc;
2010{
2011 u_int8_t txstat;
2012
2013 while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
2014 if (txstat & XL_TXSTATUS_UNDERRUN ||
2015 txstat & XL_TXSTATUS_JABBER ||
2016 txstat & XL_TXSTATUS_RECLAIM) {
2017 printf("xl%d: transmission error: %x\n",
2018 sc->xl_unit, txstat);
2019 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2020 xl_wait(sc);
2021 if (sc->xl_cdata.xl_tx_head != NULL)
2022 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2023 vtophys(sc->xl_cdata.xl_tx_head->xl_ptr));
2024 /*
2025 * Remember to set this for the
2026 * first generation 3c90X chips.
2027 */
2028 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2029 if (sc->xl_type == XL_TYPE_905B) {
2030 CSR_WRITE_2(sc, XL_COMMAND,
2031 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2032 }
2033 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2034 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2035 } else {
2036 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2037 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2038 }
2039 /*
2040 * Write an arbitrary byte to the TX_STATUS register
2041 * to clear this interrupt/error and advance to the next.
2042 */
2043 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
2044 }
2045
2046 return;
2047}
2048
2049static void xl_intr(arg)
2050 void *arg;
2051{
2052 struct xl_softc *sc;
2053 struct ifnet *ifp;
2054 u_int16_t status;
2055
2056 sc = arg;
2057 ifp = &sc->arpcom.ac_if;
2058
2059 /* Disable interrupts. */
2060 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2061
2062 for (;;) {
2063
2064 status = CSR_READ_2(sc, XL_STATUS);
2065
2066 if ((status & XL_INTRS) == 0)
2067 break;
2068
2069 if (status & XL_STAT_UP_COMPLETE) {
2070 xl_rxeof(sc);
2071 CSR_WRITE_2(sc, XL_COMMAND,
2072 XL_CMD_INTR_ACK|XL_STAT_UP_COMPLETE);
2073 }
2074
2075 if (status & XL_STAT_DOWN_COMPLETE) {
2076 xl_txeof(sc);
2077 CSR_WRITE_2(sc, XL_COMMAND,
2078 XL_CMD_INTR_ACK|XL_STAT_DOWN_COMPLETE);
2079 }
2080
2081 if (status & XL_STAT_TX_COMPLETE) {
2082 ifp->if_oerrors++;
2083 xl_txeoc(sc);
2084 CSR_WRITE_2(sc, XL_COMMAND,
2085 XL_CMD_INTR_ACK|XL_STAT_TX_COMPLETE);
2086 }
2087
2088 if (status & XL_STAT_ADFAIL) {
2089 xl_reset(sc);
2090 xl_init(sc);
2091 CSR_WRITE_2(sc, XL_COMMAND,
2092 XL_CMD_INTR_ACK|XL_STAT_ADFAIL);
2093 }
2094
2095 if (status & XL_STAT_STATSOFLOW) {
2096 sc->xl_stats_no_timeout = 1;
2097 xl_stats_update(sc);
2098 sc->xl_stats_no_timeout = 0;
2099 }
2100
2101 CSR_WRITE_2(sc, XL_STATUS, XL_CMD_INTR_ACK|XL_STAT_INTREQ|
2102 XL_STAT_INTLATCH);
2103 }
2104
2105 /* Re-enable interrupts. */
2106 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2107
2108 XL_SEL_WIN(7);
2109
2110 if (ifp->if_snd.ifq_head != NULL) {
2111 xl_start(ifp);
2112 }
2113
2114 return;
2115}
2116
2117static void xl_stats_update(xsc)
2118 void *xsc;
2119{
2120 struct xl_softc *sc;
2121 struct ifnet *ifp;
2122 struct xl_stats xl_stats;
2123 u_int8_t *p;
2124 int i;
2125
2126 bzero((char *)&xl_stats, sizeof(struct xl_stats));
2127
2128 sc = xsc;
2129 ifp = &sc->arpcom.ac_if;
2130
2131 p = (u_int8_t *)&xl_stats;
2132
2133 /* Read all the stats registers. */
2134 XL_SEL_WIN(6);
2135
2136 for (i = 0; i < 16; i++)
2137 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
2138
2024 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2025 vtophys(sc->xl_cdata.xl_tx_head->xl_ptr));
2026 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2027 }
2028 }
2029
2030 return;
2031}
2032
2033/*
2034 * TX 'end of channel' interrupt handler. Actually, we should
2035 * only get a 'TX complete' interrupt if there's a transmit error,
2036 * so this is really TX error handler.
2037 */
2038static void xl_txeoc(sc)
2039 struct xl_softc *sc;
2040{
2041 u_int8_t txstat;
2042
2043 while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
2044 if (txstat & XL_TXSTATUS_UNDERRUN ||
2045 txstat & XL_TXSTATUS_JABBER ||
2046 txstat & XL_TXSTATUS_RECLAIM) {
2047 printf("xl%d: transmission error: %x\n",
2048 sc->xl_unit, txstat);
2049 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2050 xl_wait(sc);
2051 if (sc->xl_cdata.xl_tx_head != NULL)
2052 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2053 vtophys(sc->xl_cdata.xl_tx_head->xl_ptr));
2054 /*
2055 * Remember to set this for the
2056 * first generation 3c90X chips.
2057 */
2058 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2059 if (sc->xl_type == XL_TYPE_905B) {
2060 CSR_WRITE_2(sc, XL_COMMAND,
2061 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2062 }
2063 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2064 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2065 } else {
2066 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2067 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2068 }
2069 /*
2070 * Write an arbitrary byte to the TX_STATUS register
2071 * to clear this interrupt/error and advance to the next.
2072 */
2073 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
2074 }
2075
2076 return;
2077}
2078
2079static void xl_intr(arg)
2080 void *arg;
2081{
2082 struct xl_softc *sc;
2083 struct ifnet *ifp;
2084 u_int16_t status;
2085
2086 sc = arg;
2087 ifp = &sc->arpcom.ac_if;
2088
2089 /* Disable interrupts. */
2090 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2091
2092 for (;;) {
2093
2094 status = CSR_READ_2(sc, XL_STATUS);
2095
2096 if ((status & XL_INTRS) == 0)
2097 break;
2098
2099 if (status & XL_STAT_UP_COMPLETE) {
2100 xl_rxeof(sc);
2101 CSR_WRITE_2(sc, XL_COMMAND,
2102 XL_CMD_INTR_ACK|XL_STAT_UP_COMPLETE);
2103 }
2104
2105 if (status & XL_STAT_DOWN_COMPLETE) {
2106 xl_txeof(sc);
2107 CSR_WRITE_2(sc, XL_COMMAND,
2108 XL_CMD_INTR_ACK|XL_STAT_DOWN_COMPLETE);
2109 }
2110
2111 if (status & XL_STAT_TX_COMPLETE) {
2112 ifp->if_oerrors++;
2113 xl_txeoc(sc);
2114 CSR_WRITE_2(sc, XL_COMMAND,
2115 XL_CMD_INTR_ACK|XL_STAT_TX_COMPLETE);
2116 }
2117
2118 if (status & XL_STAT_ADFAIL) {
2119 xl_reset(sc);
2120 xl_init(sc);
2121 CSR_WRITE_2(sc, XL_COMMAND,
2122 XL_CMD_INTR_ACK|XL_STAT_ADFAIL);
2123 }
2124
2125 if (status & XL_STAT_STATSOFLOW) {
2126 sc->xl_stats_no_timeout = 1;
2127 xl_stats_update(sc);
2128 sc->xl_stats_no_timeout = 0;
2129 }
2130
2131 CSR_WRITE_2(sc, XL_STATUS, XL_CMD_INTR_ACK|XL_STAT_INTREQ|
2132 XL_STAT_INTLATCH);
2133 }
2134
2135 /* Re-enable interrupts. */
2136 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2137
2138 XL_SEL_WIN(7);
2139
2140 if (ifp->if_snd.ifq_head != NULL) {
2141 xl_start(ifp);
2142 }
2143
2144 return;
2145}
2146
2147static void xl_stats_update(xsc)
2148 void *xsc;
2149{
2150 struct xl_softc *sc;
2151 struct ifnet *ifp;
2152 struct xl_stats xl_stats;
2153 u_int8_t *p;
2154 int i;
2155
2156 bzero((char *)&xl_stats, sizeof(struct xl_stats));
2157
2158 sc = xsc;
2159 ifp = &sc->arpcom.ac_if;
2160
2161 p = (u_int8_t *)&xl_stats;
2162
2163 /* Read all the stats registers. */
2164 XL_SEL_WIN(6);
2165
2166 for (i = 0; i < 16; i++)
2167 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
2168
2139 ifp->if_ipackets += xl_rx_goodframes(xl_stats);
2140 ifp->if_opackets += xl_tx_goodframes(xl_stats);
2141
2142 ifp->if_ierrors += xl_stats.xl_rx_overrun;
2143
2144 ifp->if_collisions += xl_stats.xl_tx_multi_collision +
2145 xl_stats.xl_tx_single_collision +
2146 xl_stats.xl_tx_late_collision;
2147
2148 /*
2149 * Boomerang and cyclone chips have an extra stats counter
2150 * in window 4 (BadSSD). We have to read this too in order
2151 * to clear out all the stats registers and avoid a statsoflow
2152 * interrupt.
2153 */
2154 XL_SEL_WIN(4);
2155 CSR_READ_1(sc, XL_W4_BADSSD);
2156
2157 XL_SEL_WIN(7);
2158
2159 if (!sc->xl_stats_no_timeout)
2160 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2161
2162 return;
2163}
2164
2165/*
2166 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2167 * pointers to the fragment pointers.
2168 */
2169static int xl_encap(sc, c, m_head)
2170 struct xl_softc *sc;
2171 struct xl_chain *c;
2172 struct mbuf *m_head;
2173{
2174 int frag = 0;
2175 struct xl_frag *f = NULL;
2176 int total_len;
2177 struct mbuf *m;
2178
2179 /*
2180 * Start packing the mbufs in this chain into
2181 * the fragment pointers. Stop when we run out
2182 * of fragments or hit the end of the mbuf chain.
2183 */
2184 m = m_head;
2185 total_len = 0;
2186
2187 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
2188 if (m->m_len != 0) {
2189 if (frag == XL_MAXFRAGS)
2190 break;
2191 total_len+= m->m_len;
2192 c->xl_ptr->xl_frag[frag].xl_addr =
2193 vtophys(mtod(m, vm_offset_t));
2194 c->xl_ptr->xl_frag[frag].xl_len = m->m_len;
2195 frag++;
2196 }
2197 }
2198
2199 /*
2200 * Handle special case: we used up all 63 fragments,
2201 * but we have more mbufs left in the chain. Copy the
2202 * data into an mbuf cluster. Note that we don't
2203 * bother clearing the values in the other fragment
2204 * pointers/counters; it wouldn't gain us anything,
2205 * and would waste cycles.
2206 */
2207 if (m != NULL) {
2208 struct mbuf *m_new = NULL;
2209
2210 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
2211 if (m_new == NULL) {
2212 printf("xl%d: no memory for tx list", sc->xl_unit);
2213 return(1);
2214 }
2215 if (m_head->m_pkthdr.len > MHLEN) {
2216 MCLGET(m_new, M_DONTWAIT);
2217 if (!(m_new->m_flags & M_EXT)) {
2218 m_freem(m_new);
2219 printf("xl%d: no memory for tx list",
2220 sc->xl_unit);
2221 return(1);
2222 }
2223 }
2224 m_copydata(m_head, 0, m_head->m_pkthdr.len,
2225 mtod(m_new, caddr_t));
2226 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
2227 m_freem(m_head);
2228 m_head = m_new;
2229 f = &c->xl_ptr->xl_frag[0];
2230 f->xl_addr = vtophys(mtod(m_new, caddr_t));
2231 f->xl_len = total_len = m_new->m_len;
2232 frag = 1;
2233 }
2234
2235 c->xl_mbuf = m_head;
2236 c->xl_ptr->xl_frag[frag - 1].xl_len |= XL_LAST_FRAG;
2237 c->xl_ptr->xl_status = total_len;
2238 c->xl_ptr->xl_next = 0;
2239
2240 return(0);
2241}
2242
2243/*
2244 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2245 * to the mbuf data regions directly in the transmit lists. We also save a
2246 * copy of the pointers since the transmit list fragment pointers are
2247 * physical addresses.
2248 */
2249
2250static void xl_start(ifp)
2251 struct ifnet *ifp;
2252{
2253 struct xl_softc *sc;
2254 struct mbuf *m_head = NULL;
2255 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2256
2257 sc = ifp->if_softc;
2258
2259 if (sc->xl_autoneg) {
2260 sc->xl_tx_pend = 1;
2261 return;
2262 }
2263
2264 /*
2265 * Check for an available queue slot. If there are none,
2266 * punt.
2267 */
2268 if (sc->xl_cdata.xl_tx_free == NULL) {
2269 xl_txeoc(sc);
2270 xl_txeof(sc);
2271 if (sc->xl_cdata.xl_tx_free == NULL) {
2272 ifp->if_flags |= IFF_OACTIVE;
2273 return;
2274 }
2275 }
2276
2277 start_tx = sc->xl_cdata.xl_tx_free;
2278
2279 while(sc->xl_cdata.xl_tx_free != NULL) {
2280 IF_DEQUEUE(&ifp->if_snd, m_head);
2281 if (m_head == NULL)
2282 break;
2283
2284 /* Pick a descriptor off the free list. */
2285 cur_tx = sc->xl_cdata.xl_tx_free;
2286 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
2287
2288 cur_tx->xl_next = NULL;
2289
2290 /* Pack the data into the descriptor. */
2291 xl_encap(sc, cur_tx, m_head);
2292
2293 /* Chain it together. */
2294 if (prev != NULL) {
2295 prev->xl_next = cur_tx;
2296 prev->xl_ptr->xl_next = vtophys(cur_tx->xl_ptr);
2297 }
2298 prev = cur_tx;
2299
2300#if NBPFILTER > 0
2301 /*
2302 * If there's a BPF listener, bounce a copy of this frame
2303 * to him.
2304 */
2305 if (ifp->if_bpf)
2306 bpf_mtap(ifp, cur_tx->xl_mbuf);
2307#endif
2308 }
2309
2310 /*
2311 * If there are no packets queued, bail.
2312 */
2313 if (cur_tx == NULL)
2314 return;
2315
2316 /*
2317 * Place the request for the upload interrupt
2318 * in the last descriptor in the chain. This way, if
2319 * we're chaining several packets at once, we'll only
2320 * get an interupt once for the whole chain rather than
2321 * once for each packet.
2322 */
2323 cur_tx->xl_ptr->xl_status |= XL_TXSTAT_DL_INTR;
2324
2169 ifp->if_ierrors += xl_stats.xl_rx_overrun;
2170
2171 ifp->if_collisions += xl_stats.xl_tx_multi_collision +
2172 xl_stats.xl_tx_single_collision +
2173 xl_stats.xl_tx_late_collision;
2174
2175 /*
2176 * Boomerang and cyclone chips have an extra stats counter
2177 * in window 4 (BadSSD). We have to read this too in order
2178 * to clear out all the stats registers and avoid a statsoflow
2179 * interrupt.
2180 */
2181 XL_SEL_WIN(4);
2182 CSR_READ_1(sc, XL_W4_BADSSD);
2183
2184 XL_SEL_WIN(7);
2185
2186 if (!sc->xl_stats_no_timeout)
2187 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2188
2189 return;
2190}
2191
2192/*
2193 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2194 * pointers to the fragment pointers.
2195 */
2196static int xl_encap(sc, c, m_head)
2197 struct xl_softc *sc;
2198 struct xl_chain *c;
2199 struct mbuf *m_head;
2200{
2201 int frag = 0;
2202 struct xl_frag *f = NULL;
2203 int total_len;
2204 struct mbuf *m;
2205
2206 /*
2207 * Start packing the mbufs in this chain into
2208 * the fragment pointers. Stop when we run out
2209 * of fragments or hit the end of the mbuf chain.
2210 */
2211 m = m_head;
2212 total_len = 0;
2213
2214 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
2215 if (m->m_len != 0) {
2216 if (frag == XL_MAXFRAGS)
2217 break;
2218 total_len+= m->m_len;
2219 c->xl_ptr->xl_frag[frag].xl_addr =
2220 vtophys(mtod(m, vm_offset_t));
2221 c->xl_ptr->xl_frag[frag].xl_len = m->m_len;
2222 frag++;
2223 }
2224 }
2225
2226 /*
2227 * Handle special case: we used up all 63 fragments,
2228 * but we have more mbufs left in the chain. Copy the
2229 * data into an mbuf cluster. Note that we don't
2230 * bother clearing the values in the other fragment
2231 * pointers/counters; it wouldn't gain us anything,
2232 * and would waste cycles.
2233 */
2234 if (m != NULL) {
2235 struct mbuf *m_new = NULL;
2236
2237 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
2238 if (m_new == NULL) {
2239 printf("xl%d: no memory for tx list", sc->xl_unit);
2240 return(1);
2241 }
2242 if (m_head->m_pkthdr.len > MHLEN) {
2243 MCLGET(m_new, M_DONTWAIT);
2244 if (!(m_new->m_flags & M_EXT)) {
2245 m_freem(m_new);
2246 printf("xl%d: no memory for tx list",
2247 sc->xl_unit);
2248 return(1);
2249 }
2250 }
2251 m_copydata(m_head, 0, m_head->m_pkthdr.len,
2252 mtod(m_new, caddr_t));
2253 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
2254 m_freem(m_head);
2255 m_head = m_new;
2256 f = &c->xl_ptr->xl_frag[0];
2257 f->xl_addr = vtophys(mtod(m_new, caddr_t));
2258 f->xl_len = total_len = m_new->m_len;
2259 frag = 1;
2260 }
2261
2262 c->xl_mbuf = m_head;
2263 c->xl_ptr->xl_frag[frag - 1].xl_len |= XL_LAST_FRAG;
2264 c->xl_ptr->xl_status = total_len;
2265 c->xl_ptr->xl_next = 0;
2266
2267 return(0);
2268}
2269
2270/*
2271 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2272 * to the mbuf data regions directly in the transmit lists. We also save a
2273 * copy of the pointers since the transmit list fragment pointers are
2274 * physical addresses.
2275 */
2276
2277static void xl_start(ifp)
2278 struct ifnet *ifp;
2279{
2280 struct xl_softc *sc;
2281 struct mbuf *m_head = NULL;
2282 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
2283
2284 sc = ifp->if_softc;
2285
2286 if (sc->xl_autoneg) {
2287 sc->xl_tx_pend = 1;
2288 return;
2289 }
2290
2291 /*
2292 * Check for an available queue slot. If there are none,
2293 * punt.
2294 */
2295 if (sc->xl_cdata.xl_tx_free == NULL) {
2296 xl_txeoc(sc);
2297 xl_txeof(sc);
2298 if (sc->xl_cdata.xl_tx_free == NULL) {
2299 ifp->if_flags |= IFF_OACTIVE;
2300 return;
2301 }
2302 }
2303
2304 start_tx = sc->xl_cdata.xl_tx_free;
2305
2306 while(sc->xl_cdata.xl_tx_free != NULL) {
2307 IF_DEQUEUE(&ifp->if_snd, m_head);
2308 if (m_head == NULL)
2309 break;
2310
2311 /* Pick a descriptor off the free list. */
2312 cur_tx = sc->xl_cdata.xl_tx_free;
2313 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
2314
2315 cur_tx->xl_next = NULL;
2316
2317 /* Pack the data into the descriptor. */
2318 xl_encap(sc, cur_tx, m_head);
2319
2320 /* Chain it together. */
2321 if (prev != NULL) {
2322 prev->xl_next = cur_tx;
2323 prev->xl_ptr->xl_next = vtophys(cur_tx->xl_ptr);
2324 }
2325 prev = cur_tx;
2326
2327#if NBPFILTER > 0
2328 /*
2329 * If there's a BPF listener, bounce a copy of this frame
2330 * to him.
2331 */
2332 if (ifp->if_bpf)
2333 bpf_mtap(ifp, cur_tx->xl_mbuf);
2334#endif
2335 }
2336
2337 /*
2338 * If there are no packets queued, bail.
2339 */
2340 if (cur_tx == NULL)
2341 return;
2342
2343 /*
2344 * Place the request for the upload interrupt
2345 * in the last descriptor in the chain. This way, if
2346 * we're chaining several packets at once, we'll only
2347 * get an interupt once for the whole chain rather than
2348 * once for each packet.
2349 */
2350 cur_tx->xl_ptr->xl_status |= XL_TXSTAT_DL_INTR;
2351
2325 /*
2326 * Queue the packets. If the TX channel is clear, update
2327 * the downlist pointer register.
2328 */
2329 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2330 xl_wait(sc);
2331
2332 if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2333 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
2334 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
2335 vtophys(start_tx->xl_ptr);
2336 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
2337 ~XL_TXSTAT_DL_INTR;
2338 sc->xl_cdata.xl_tx_tail = cur_tx;
2339 } else {
2352 if (sc->xl_cdata.xl_tx_head == NULL) {
2340 sc->xl_cdata.xl_tx_head = start_tx;
2341 sc->xl_cdata.xl_tx_tail = cur_tx;
2342 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, vtophys(start_tx->xl_ptr));
2353 sc->xl_cdata.xl_tx_head = start_tx;
2354 sc->xl_cdata.xl_tx_tail = cur_tx;
2355 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, vtophys(start_tx->xl_ptr));
2356 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2357 } else {
2358 start_tx->xl_unsent++;
2359 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
2360 sc->xl_cdata.xl_tx_tail = cur_tx;
2343 }
2361 }
2344 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2345
2362
2346 XL_SEL_WIN(7);
2347
2348 /*
2349 * Set a timeout in case the chip goes out to lunch.
2350 */
2351 ifp->if_timer = 5;
2352
2353 /*
2354 * XXX Under certain conditions, usually on slower machines
2355 * where interrupts may be dropped, it's possible for the
2356 * adapter to chew up all the buffers in the receive ring
2357 * and stall, without us being able to do anything about it.
2358 * To guard against this, we need to make a pass over the
2359 * RX queue to make sure there aren't any packets pending.
2360 * Doing it here means we can flush the receive ring at the
2361 * same time the chip is DMAing the transmit descriptors we
2362 * just gave it.
2363 *
2364 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
2365 * nature of their chips in all their marketing literature;
2366 * we may as well take advantage of it. :)
2367 */
2368 xl_rxeof(sc);
2369
2370 return;
2371}
2372
2373static void xl_init(xsc)
2374 void *xsc;
2375{
2376 struct xl_softc *sc = xsc;
2377 struct ifnet *ifp = &sc->arpcom.ac_if;
2378 int s, i;
2379 u_int16_t rxfilt = 0;
2380 u_int16_t phy_bmcr = 0;
2381
2382 if (sc->xl_autoneg)
2383 return;
2384
2385 s = splimp();
2386
2387 /*
2388 * XXX Hack for the 3c905B: the built-in autoneg logic's state
2389 * gets reset by xl_init() when we don't want it to. Try
2390 * to preserve it. (For 3c905 cards with real external PHYs,
2391 * the BMCR register doesn't change, but this doesn't hurt.)
2392 */
2393 if (sc->xl_pinfo != NULL)
2394 phy_bmcr = xl_phy_readreg(sc, PHY_BMCR);
2395
2396 /*
2397 * Cancel pending I/O and free all RX/TX buffers.
2398 */
2399 xl_stop(sc);
2400
2401 xl_wait(sc);
2402
2403 /* Init our MAC address */
2404 XL_SEL_WIN(2);
2405 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2406 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
2407 sc->arpcom.ac_enaddr[i]);
2408 }
2409
2410 /* Clear the station mask. */
2411 for (i = 0; i < 3; i++)
2412 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
2413
2414#ifdef notdef
2415 /* Reset TX and RX. */
2416 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2417 xl_wait(sc);
2418 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2419 xl_wait(sc);
2420#endif
2421
2422 /* Init circular RX list. */
2423 if (xl_list_rx_init(sc) == ENOBUFS) {
2424 printf("xl%d: initialization failed: no "
2425 "memory for rx buffers\n", sc->xl_unit);
2426 xl_stop(sc);
2427 return;
2428 }
2429
2430 /* Init TX descriptors. */
2431 xl_list_tx_init(sc);
2432
2433 /*
2434 * Set the TX freethresh value.
2435 * Note that this has no effect on 3c905B "cyclone"
2436 * cards but is required for 3c900/3c905 "boomerang"
2437 * cards in order to enable the download engine.
2438 */
2439 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2440
2441 /*
2442 * If this is a 3c905B, also set the tx reclaim threshold.
2443 * This helps cut down on the number of tx reclaim errors
2444 * that could happen on a busy network. The chip multiplies
2445 * the register value by 16 to obtain the actual threshold
2446 * in bytes, so we divide by 16 when setting the value here.
2447 * The existing threshold value can be examined by reading
2448 * the register at offset 9 in window 5.
2449 */
2450 if (sc->xl_type == XL_TYPE_905B) {
2451 CSR_WRITE_2(sc, XL_COMMAND,
2452 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2453 }
2454
2455 /* Set RX filter bits. */
2456 XL_SEL_WIN(5);
2457 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2458
2459 /* Set the individual bit to receive frames for this host only. */
2460 rxfilt |= XL_RXFILTER_INDIVIDUAL;
2461
2462 /* If we want promiscuous mode, set the allframes bit. */
2463 if (ifp->if_flags & IFF_PROMISC) {
2464 rxfilt |= XL_RXFILTER_ALLFRAMES;
2465 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2466 } else {
2467 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2468 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2469 }
2470
2471 /*
2472 * Set capture broadcast bit to capture broadcast frames.
2473 */
2474 if (ifp->if_flags & IFF_BROADCAST) {
2475 rxfilt |= XL_RXFILTER_BROADCAST;
2476 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2477 } else {
2478 rxfilt &= ~XL_RXFILTER_BROADCAST;
2479 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2480 }
2481
2482 /*
2483 * Program the multicast filter, if necessary.
2484 */
2485 if (sc->xl_type == XL_TYPE_905B)
2486 xl_setmulti_hash(sc);
2487 else
2488 xl_setmulti(sc);
2489
2490 /*
2491 * Load the address of the RX list. We have to
2492 * stall the upload engine before we can manipulate
2493 * the uplist pointer register, then unstall it when
2494 * we're finished. We also have to wait for the
2495 * stall command to complete before proceeding.
2496 * Note that we have to do this after any RX resets
2497 * have completed since the uplist register is cleared
2498 * by a reset.
2499 */
2500 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2501 xl_wait(sc);
2502 CSR_WRITE_4(sc, XL_UPLIST_PTR, vtophys(&sc->xl_ldata->xl_rx_list[0]));
2503 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2504
2505 /*
2506 * If the coax transceiver is on, make sure to enable
2507 * the DC-DC converter.
2508 */
2509 XL_SEL_WIN(3);
2510 if (sc->xl_xcvr == XL_XCVR_COAX)
2511 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2512 else
2513 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2514
2515 /* Clear out the stats counters. */
2516 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2517 sc->xl_stats_no_timeout = 1;
2518 xl_stats_update(sc);
2519 sc->xl_stats_no_timeout = 0;
2520 XL_SEL_WIN(4);
2521 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2522 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2523
2524 /*
2525 * Enable interrupts.
2526 */
2527 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2528 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2529 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2530
2531 /* Set the RX early threshold */
2532 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2533 CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2534
2535 /* Enable receiver and transmitter. */
2536 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2537 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2538
2539 /* Restore state of BMCR */
2540 if (sc->xl_pinfo != NULL)
2541 xl_phy_writereg(sc, PHY_BMCR, phy_bmcr);
2542
2543 /* Select window 7 for normal operations. */
2544 XL_SEL_WIN(7);
2545
2546 ifp->if_flags |= IFF_RUNNING;
2547 ifp->if_flags &= ~IFF_OACTIVE;
2548
2549 (void)splx(s);
2550
2551 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2552
2553 return;
2554}
2555
2556/*
2557 * Set media options.
2558 */
2559static int xl_ifmedia_upd(ifp)
2560 struct ifnet *ifp;
2561{
2562 struct xl_softc *sc;
2563 struct ifmedia *ifm;
2564
2565 sc = ifp->if_softc;
2566 ifm = &sc->ifmedia;
2567
2568 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2569 return(EINVAL);
2570
2363 /*
2364 * Set a timeout in case the chip goes out to lunch.
2365 */
2366 ifp->if_timer = 5;
2367
2368 /*
2369 * XXX Under certain conditions, usually on slower machines
2370 * where interrupts may be dropped, it's possible for the
2371 * adapter to chew up all the buffers in the receive ring
2372 * and stall, without us being able to do anything about it.
2373 * To guard against this, we need to make a pass over the
2374 * RX queue to make sure there aren't any packets pending.
2375 * Doing it here means we can flush the receive ring at the
2376 * same time the chip is DMAing the transmit descriptors we
2377 * just gave it.
2378 *
2379 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
2380 * nature of their chips in all their marketing literature;
2381 * we may as well take advantage of it. :)
2382 */
2383 xl_rxeof(sc);
2384
2385 return;
2386}
2387
2388static void xl_init(xsc)
2389 void *xsc;
2390{
2391 struct xl_softc *sc = xsc;
2392 struct ifnet *ifp = &sc->arpcom.ac_if;
2393 int s, i;
2394 u_int16_t rxfilt = 0;
2395 u_int16_t phy_bmcr = 0;
2396
2397 if (sc->xl_autoneg)
2398 return;
2399
2400 s = splimp();
2401
2402 /*
2403 * XXX Hack for the 3c905B: the built-in autoneg logic's state
2404 * gets reset by xl_init() when we don't want it to. Try
2405 * to preserve it. (For 3c905 cards with real external PHYs,
2406 * the BMCR register doesn't change, but this doesn't hurt.)
2407 */
2408 if (sc->xl_pinfo != NULL)
2409 phy_bmcr = xl_phy_readreg(sc, PHY_BMCR);
2410
2411 /*
2412 * Cancel pending I/O and free all RX/TX buffers.
2413 */
2414 xl_stop(sc);
2415
2416 xl_wait(sc);
2417
2418 /* Init our MAC address */
2419 XL_SEL_WIN(2);
2420 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2421 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
2422 sc->arpcom.ac_enaddr[i]);
2423 }
2424
2425 /* Clear the station mask. */
2426 for (i = 0; i < 3; i++)
2427 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
2428
2429#ifdef notdef
2430 /* Reset TX and RX. */
2431 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2432 xl_wait(sc);
2433 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2434 xl_wait(sc);
2435#endif
2436
2437 /* Init circular RX list. */
2438 if (xl_list_rx_init(sc) == ENOBUFS) {
2439 printf("xl%d: initialization failed: no "
2440 "memory for rx buffers\n", sc->xl_unit);
2441 xl_stop(sc);
2442 return;
2443 }
2444
2445 /* Init TX descriptors. */
2446 xl_list_tx_init(sc);
2447
2448 /*
2449 * Set the TX freethresh value.
2450 * Note that this has no effect on 3c905B "cyclone"
2451 * cards but is required for 3c900/3c905 "boomerang"
2452 * cards in order to enable the download engine.
2453 */
2454 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2455
2456 /*
2457 * If this is a 3c905B, also set the tx reclaim threshold.
2458 * This helps cut down on the number of tx reclaim errors
2459 * that could happen on a busy network. The chip multiplies
2460 * the register value by 16 to obtain the actual threshold
2461 * in bytes, so we divide by 16 when setting the value here.
2462 * The existing threshold value can be examined by reading
2463 * the register at offset 9 in window 5.
2464 */
2465 if (sc->xl_type == XL_TYPE_905B) {
2466 CSR_WRITE_2(sc, XL_COMMAND,
2467 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2468 }
2469
2470 /* Set RX filter bits. */
2471 XL_SEL_WIN(5);
2472 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2473
2474 /* Set the individual bit to receive frames for this host only. */
2475 rxfilt |= XL_RXFILTER_INDIVIDUAL;
2476
2477 /* If we want promiscuous mode, set the allframes bit. */
2478 if (ifp->if_flags & IFF_PROMISC) {
2479 rxfilt |= XL_RXFILTER_ALLFRAMES;
2480 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2481 } else {
2482 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2483 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2484 }
2485
2486 /*
2487 * Set capture broadcast bit to capture broadcast frames.
2488 */
2489 if (ifp->if_flags & IFF_BROADCAST) {
2490 rxfilt |= XL_RXFILTER_BROADCAST;
2491 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2492 } else {
2493 rxfilt &= ~XL_RXFILTER_BROADCAST;
2494 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2495 }
2496
2497 /*
2498 * Program the multicast filter, if necessary.
2499 */
2500 if (sc->xl_type == XL_TYPE_905B)
2501 xl_setmulti_hash(sc);
2502 else
2503 xl_setmulti(sc);
2504
2505 /*
2506 * Load the address of the RX list. We have to
2507 * stall the upload engine before we can manipulate
2508 * the uplist pointer register, then unstall it when
2509 * we're finished. We also have to wait for the
2510 * stall command to complete before proceeding.
2511 * Note that we have to do this after any RX resets
2512 * have completed since the uplist register is cleared
2513 * by a reset.
2514 */
2515 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2516 xl_wait(sc);
2517 CSR_WRITE_4(sc, XL_UPLIST_PTR, vtophys(&sc->xl_ldata->xl_rx_list[0]));
2518 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2519
2520 /*
2521 * If the coax transceiver is on, make sure to enable
2522 * the DC-DC converter.
2523 */
2524 XL_SEL_WIN(3);
2525 if (sc->xl_xcvr == XL_XCVR_COAX)
2526 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2527 else
2528 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2529
2530 /* Clear out the stats counters. */
2531 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2532 sc->xl_stats_no_timeout = 1;
2533 xl_stats_update(sc);
2534 sc->xl_stats_no_timeout = 0;
2535 XL_SEL_WIN(4);
2536 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2537 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2538
2539 /*
2540 * Enable interrupts.
2541 */
2542 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2543 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2544 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2545
2546 /* Set the RX early threshold */
2547 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2548 CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2549
2550 /* Enable receiver and transmitter. */
2551 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2552 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2553
2554 /* Restore state of BMCR */
2555 if (sc->xl_pinfo != NULL)
2556 xl_phy_writereg(sc, PHY_BMCR, phy_bmcr);
2557
2558 /* Select window 7 for normal operations. */
2559 XL_SEL_WIN(7);
2560
2561 ifp->if_flags |= IFF_RUNNING;
2562 ifp->if_flags &= ~IFF_OACTIVE;
2563
2564 (void)splx(s);
2565
2566 sc->xl_stat_ch = timeout(xl_stats_update, sc, hz);
2567
2568 return;
2569}
2570
2571/*
2572 * Set media options.
2573 */
2574static int xl_ifmedia_upd(ifp)
2575 struct ifnet *ifp;
2576{
2577 struct xl_softc *sc;
2578 struct ifmedia *ifm;
2579
2580 sc = ifp->if_softc;
2581 ifm = &sc->ifmedia;
2582
2583 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2584 return(EINVAL);
2585
2586 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2587 case IFM_100_FX:
2588 case IFM_10_2:
2589 case IFM_10_5:
2590 xl_setmode(sc, ifm->ifm_media);
2591 return(0);
2592 break;
2593 default:
2594 break;
2595 }
2596
2571 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2572 || sc->xl_media & XL_MEDIAOPT_BT4) {
2573 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
2574 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
2575 else
2576 xl_setmode_mii(sc, ifm->ifm_media);
2577 } else {
2578 xl_setmode(sc, ifm->ifm_media);
2579 }
2580
2581 return(0);
2582}
2583
2584/*
2585 * Report current media status.
2586 */
2587static void xl_ifmedia_sts(ifp, ifmr)
2588 struct ifnet *ifp;
2589 struct ifmediareq *ifmr;
2590{
2591 struct xl_softc *sc;
2592 u_int16_t advert = 0, ability = 0;
2593 u_int32_t icfg;
2594
2595 sc = ifp->if_softc;
2596
2597 XL_SEL_WIN(3);
2598 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2599 icfg >>= XL_ICFG_CONNECTOR_BITS;
2600
2601 ifmr->ifm_active = IFM_ETHER;
2602
2603 switch(icfg) {
2604 case XL_XCVR_10BT:
2605 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2606 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2607 ifmr->ifm_active |= IFM_FDX;
2608 else
2609 ifmr->ifm_active |= IFM_HDX;
2610 break;
2611 case XL_XCVR_AUI:
2612 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2613 break;
2614 case XL_XCVR_COAX:
2615 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2616 break;
2617 /*
2618 * XXX MII and BTX/AUTO should be separate cases.
2619 */
2620
2621 case XL_XCVR_100BTX:
2622 case XL_XCVR_AUTO:
2623 case XL_XCVR_MII:
2624 if (!(xl_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
2625 if (xl_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
2626 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
2627 else
2628 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2629 XL_SEL_WIN(3);
2630 if (CSR_READ_2(sc, XL_W3_MAC_CTRL) &
2631 XL_MACCTRL_DUPLEX)
2632 ifmr->ifm_active |= IFM_FDX;
2633 else
2634 ifmr->ifm_active |= IFM_HDX;
2635 break;
2636 }
2637 ability = xl_phy_readreg(sc, XL_PHY_LPAR);
2638 advert = xl_phy_readreg(sc, XL_PHY_ANAR);
2639 if (advert & PHY_ANAR_100BT4 &&
2640 ability & PHY_ANAR_100BT4) {
2641 ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
2642 } else if (advert & PHY_ANAR_100BTXFULL &&
2643 ability & PHY_ANAR_100BTXFULL) {
2644 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
2645 } else if (advert & PHY_ANAR_100BTXHALF &&
2646 ability & PHY_ANAR_100BTXHALF) {
2647 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
2648 } else if (advert & PHY_ANAR_10BTFULL &&
2649 ability & PHY_ANAR_10BTFULL) {
2650 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
2651 } else if (advert & PHY_ANAR_10BTHALF &&
2652 ability & PHY_ANAR_10BTHALF) {
2653 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
2654 }
2655 break;
2656 case XL_XCVR_100BFX:
2657 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2658 break;
2659 default:
2660 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, icfg);
2661 break;
2662 }
2663
2664 return;
2665}
2666
2667static int xl_ioctl(ifp, command, data)
2668 struct ifnet *ifp;
2669 u_long command;
2670 caddr_t data;
2671{
2672 struct xl_softc *sc = ifp->if_softc;
2673 struct ifreq *ifr = (struct ifreq *) data;
2674 int s, error = 0;
2675
2676 s = splimp();
2677
2678 switch(command) {
2679 case SIOCSIFADDR:
2680 case SIOCGIFADDR:
2681 case SIOCSIFMTU:
2682 error = ether_ioctl(ifp, command, data);
2683 break;
2684 case SIOCSIFFLAGS:
2685 if (ifp->if_flags & IFF_UP) {
2686 xl_init(sc);
2687 } else {
2688 if (ifp->if_flags & IFF_RUNNING)
2689 xl_stop(sc);
2690 }
2691 error = 0;
2692 break;
2693 case SIOCADDMULTI:
2694 case SIOCDELMULTI:
2695 if (sc->xl_type == XL_TYPE_905B)
2696 xl_setmulti_hash(sc);
2697 else
2698 xl_setmulti(sc);
2699 error = 0;
2700 break;
2701 case SIOCGIFMEDIA:
2702 case SIOCSIFMEDIA:
2703 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2704 break;
2705 default:
2706 error = EINVAL;
2707 break;
2708 }
2709
2710 (void)splx(s);
2711
2712 return(error);
2713}
2714
2715static void xl_watchdog(ifp)
2716 struct ifnet *ifp;
2717{
2718 struct xl_softc *sc;
2719 u_int16_t status = 0;
2720
2721 sc = ifp->if_softc;
2722
2723 if (sc->xl_autoneg) {
2724 xl_autoneg_mii(sc, XL_FLAG_DELAYTIMEO, 1);
2725 return;
2726 }
2727
2728 ifp->if_oerrors++;
2729 XL_SEL_WIN(4);
2730 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2731 printf("xl%d: watchdog timeout\n", sc->xl_unit);
2732
2733 if (status & XL_MEDIASTAT_CARRIER)
2734 printf("xl%d: no carrier - transceiver cable problem?\n",
2735 sc->xl_unit);
2736 xl_txeoc(sc);
2737 xl_txeof(sc);
2738 xl_rxeof(sc);
2739 xl_init(sc);
2740
2741 if (ifp->if_snd.ifq_head != NULL)
2742 xl_start(ifp);
2743
2744 return;
2745}
2746
2747/*
2748 * Stop the adapter and free any mbufs allocated to the
2749 * RX and TX lists.
2750 */
2751static void xl_stop(sc)
2752 struct xl_softc *sc;
2753{
2754 register int i;
2755 struct ifnet *ifp;
2756
2757 ifp = &sc->arpcom.ac_if;
2758 ifp->if_timer = 0;
2759
2760 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2761 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2762 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2763 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2764 xl_wait(sc);
2765 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2766 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2767 DELAY(800);
2768#ifdef notdef
2769 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2770 xl_wait(sc);
2771 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2772 xl_wait(sc);
2773#endif
2774 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2775
2776 /* Stop the stats updater. */
2777 untimeout(xl_stats_update, sc, sc->xl_stat_ch);
2778
2779 /*
2780 * Free data in the RX lists.
2781 */
2782 for (i = 0; i < XL_RX_LIST_CNT; i++) {
2783 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2784 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2785 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2786 }
2787 }
2788 bzero((char *)&sc->xl_ldata->xl_rx_list,
2789 sizeof(sc->xl_ldata->xl_rx_list));
2790 /*
2791 * Free the TX list buffers.
2792 */
2793 for (i = 0; i < XL_TX_LIST_CNT; i++) {
2794 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2795 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2796 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2797 }
2798 }
2799 bzero((char *)&sc->xl_ldata->xl_tx_list,
2800 sizeof(sc->xl_ldata->xl_tx_list));
2801
2802 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2803
2804 return;
2805}
2806
2807/*
2808 * Stop all chip I/O so that the kernel's probe routines don't
2809 * get confused by errant DMAs when rebooting.
2810 */
2811static void xl_shutdown(howto, arg)
2812 int howto;
2813 void *arg;
2814{
2815 struct xl_softc *sc = (struct xl_softc *)arg;
2816
2817 xl_stop(sc);
2818
2819 return;
2820}
2821
2822
2823static struct pci_device xl_device = {
2824 "xl",
2825 xl_probe,
2826 xl_attach,
2827 &xl_count,
2828 NULL
2829};
2830DATA_SET(pcidevice_set, xl_device);
2597 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2598 || sc->xl_media & XL_MEDIAOPT_BT4) {
2599 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
2600 xl_autoneg_mii(sc, XL_FLAG_SCHEDDELAY, 1);
2601 else
2602 xl_setmode_mii(sc, ifm->ifm_media);
2603 } else {
2604 xl_setmode(sc, ifm->ifm_media);
2605 }
2606
2607 return(0);
2608}
2609
2610/*
2611 * Report current media status.
2612 */
2613static void xl_ifmedia_sts(ifp, ifmr)
2614 struct ifnet *ifp;
2615 struct ifmediareq *ifmr;
2616{
2617 struct xl_softc *sc;
2618 u_int16_t advert = 0, ability = 0;
2619 u_int32_t icfg;
2620
2621 sc = ifp->if_softc;
2622
2623 XL_SEL_WIN(3);
2624 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2625 icfg >>= XL_ICFG_CONNECTOR_BITS;
2626
2627 ifmr->ifm_active = IFM_ETHER;
2628
2629 switch(icfg) {
2630 case XL_XCVR_10BT:
2631 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2632 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2633 ifmr->ifm_active |= IFM_FDX;
2634 else
2635 ifmr->ifm_active |= IFM_HDX;
2636 break;
2637 case XL_XCVR_AUI:
2638 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2639 break;
2640 case XL_XCVR_COAX:
2641 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2642 break;
2643 /*
2644 * XXX MII and BTX/AUTO should be separate cases.
2645 */
2646
2647 case XL_XCVR_100BTX:
2648 case XL_XCVR_AUTO:
2649 case XL_XCVR_MII:
2650 if (!(xl_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
2651 if (xl_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
2652 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
2653 else
2654 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2655 XL_SEL_WIN(3);
2656 if (CSR_READ_2(sc, XL_W3_MAC_CTRL) &
2657 XL_MACCTRL_DUPLEX)
2658 ifmr->ifm_active |= IFM_FDX;
2659 else
2660 ifmr->ifm_active |= IFM_HDX;
2661 break;
2662 }
2663 ability = xl_phy_readreg(sc, XL_PHY_LPAR);
2664 advert = xl_phy_readreg(sc, XL_PHY_ANAR);
2665 if (advert & PHY_ANAR_100BT4 &&
2666 ability & PHY_ANAR_100BT4) {
2667 ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
2668 } else if (advert & PHY_ANAR_100BTXFULL &&
2669 ability & PHY_ANAR_100BTXFULL) {
2670 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
2671 } else if (advert & PHY_ANAR_100BTXHALF &&
2672 ability & PHY_ANAR_100BTXHALF) {
2673 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
2674 } else if (advert & PHY_ANAR_10BTFULL &&
2675 ability & PHY_ANAR_10BTFULL) {
2676 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
2677 } else if (advert & PHY_ANAR_10BTHALF &&
2678 ability & PHY_ANAR_10BTHALF) {
2679 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
2680 }
2681 break;
2682 case XL_XCVR_100BFX:
2683 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2684 break;
2685 default:
2686 printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, icfg);
2687 break;
2688 }
2689
2690 return;
2691}
2692
2693static int xl_ioctl(ifp, command, data)
2694 struct ifnet *ifp;
2695 u_long command;
2696 caddr_t data;
2697{
2698 struct xl_softc *sc = ifp->if_softc;
2699 struct ifreq *ifr = (struct ifreq *) data;
2700 int s, error = 0;
2701
2702 s = splimp();
2703
2704 switch(command) {
2705 case SIOCSIFADDR:
2706 case SIOCGIFADDR:
2707 case SIOCSIFMTU:
2708 error = ether_ioctl(ifp, command, data);
2709 break;
2710 case SIOCSIFFLAGS:
2711 if (ifp->if_flags & IFF_UP) {
2712 xl_init(sc);
2713 } else {
2714 if (ifp->if_flags & IFF_RUNNING)
2715 xl_stop(sc);
2716 }
2717 error = 0;
2718 break;
2719 case SIOCADDMULTI:
2720 case SIOCDELMULTI:
2721 if (sc->xl_type == XL_TYPE_905B)
2722 xl_setmulti_hash(sc);
2723 else
2724 xl_setmulti(sc);
2725 error = 0;
2726 break;
2727 case SIOCGIFMEDIA:
2728 case SIOCSIFMEDIA:
2729 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2730 break;
2731 default:
2732 error = EINVAL;
2733 break;
2734 }
2735
2736 (void)splx(s);
2737
2738 return(error);
2739}
2740
2741static void xl_watchdog(ifp)
2742 struct ifnet *ifp;
2743{
2744 struct xl_softc *sc;
2745 u_int16_t status = 0;
2746
2747 sc = ifp->if_softc;
2748
2749 if (sc->xl_autoneg) {
2750 xl_autoneg_mii(sc, XL_FLAG_DELAYTIMEO, 1);
2751 return;
2752 }
2753
2754 ifp->if_oerrors++;
2755 XL_SEL_WIN(4);
2756 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2757 printf("xl%d: watchdog timeout\n", sc->xl_unit);
2758
2759 if (status & XL_MEDIASTAT_CARRIER)
2760 printf("xl%d: no carrier - transceiver cable problem?\n",
2761 sc->xl_unit);
2762 xl_txeoc(sc);
2763 xl_txeof(sc);
2764 xl_rxeof(sc);
2765 xl_init(sc);
2766
2767 if (ifp->if_snd.ifq_head != NULL)
2768 xl_start(ifp);
2769
2770 return;
2771}
2772
2773/*
2774 * Stop the adapter and free any mbufs allocated to the
2775 * RX and TX lists.
2776 */
2777static void xl_stop(sc)
2778 struct xl_softc *sc;
2779{
2780 register int i;
2781 struct ifnet *ifp;
2782
2783 ifp = &sc->arpcom.ac_if;
2784 ifp->if_timer = 0;
2785
2786 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2787 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2788 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2789 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2790 xl_wait(sc);
2791 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2792 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2793 DELAY(800);
2794#ifdef notdef
2795 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2796 xl_wait(sc);
2797 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2798 xl_wait(sc);
2799#endif
2800 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2801
2802 /* Stop the stats updater. */
2803 untimeout(xl_stats_update, sc, sc->xl_stat_ch);
2804
2805 /*
2806 * Free data in the RX lists.
2807 */
2808 for (i = 0; i < XL_RX_LIST_CNT; i++) {
2809 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2810 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2811 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2812 }
2813 }
2814 bzero((char *)&sc->xl_ldata->xl_rx_list,
2815 sizeof(sc->xl_ldata->xl_rx_list));
2816 /*
2817 * Free the TX list buffers.
2818 */
2819 for (i = 0; i < XL_TX_LIST_CNT; i++) {
2820 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2821 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2822 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2823 }
2824 }
2825 bzero((char *)&sc->xl_ldata->xl_tx_list,
2826 sizeof(sc->xl_ldata->xl_tx_list));
2827
2828 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2829
2830 return;
2831}
2832
2833/*
2834 * Stop all chip I/O so that the kernel's probe routines don't
2835 * get confused by errant DMAs when rebooting.
2836 */
2837static void xl_shutdown(howto, arg)
2838 int howto;
2839 void *arg;
2840{
2841 struct xl_softc *sc = (struct xl_softc *)arg;
2842
2843 xl_stop(sc);
2844
2845 return;
2846}
2847
2848
2849static struct pci_device xl_device = {
2850 "xl",
2851 xl_probe,
2852 xl_attach,
2853 &xl_count,
2854 NULL
2855};
2856DATA_SET(pcidevice_set, xl_device);