if_dc.c revision 173839
1/*-
2 * Copyright (c) 1997, 1998, 1999
3 *	Bill Paul <wpaul@ee.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/dc/if_dc.c 173839 2007-11-22 02:45:00Z yongari $");
35
36/*
37 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
38 * series chips and several workalikes including the following:
39 *
40 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
41 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
42 * Lite-On 82c168/82c169 PNIC (www.litecom.com)
43 * ASIX Electronics AX88140A (www.asix.com.tw)
44 * ASIX Electronics AX88141 (www.asix.com.tw)
45 * ADMtek AL981 (www.admtek.com.tw)
46 * ADMtek AN985 (www.admtek.com.tw)
47 * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek AN985
48 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49 * Accton EN1217 (www.accton.com)
50 * Xircom X3201 (www.xircom.com)
51 * Abocom FE2500
52 * Conexant LANfinity (www.conexant.com)
53 * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com)
54 *
55 * Datasheets for the 21143 are available at developer.intel.com.
56 * Datasheets for the clone parts can be found at their respective sites.
57 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
58 * The PNIC II is essentially a Macronix 98715A chip; the only difference
59 * worth noting is that its multicast hash table is only 128 bits wide
60 * instead of 512.
61 *
62 * Written by Bill Paul <wpaul@ee.columbia.edu>
63 * Electrical Engineering Department
64 * Columbia University, New York City
65 */
66/*
67 * The Intel 21143 is the successor to the DEC 21140. It is basically
68 * the same as the 21140 but with a few new features. The 21143 supports
69 * three kinds of media attachments:
70 *
71 * o MII port, for 10Mbps and 100Mbps support and NWAY
72 *   autonegotiation provided by an external PHY.
73 * o SYM port, for symbol mode 100Mbps support.
74 * o 10baseT port.
75 * o AUI/BNC port.
76 *
77 * The 100Mbps SYM port and 10baseT port can be used together in
78 * combination with the internal NWAY support to create a 10/100
79 * autosensing configuration.
80 *
81 * Note that not all tulip workalikes are handled in this driver: we only
82 * deal with those which are relatively well behaved. The Winbond is
83 * handled separately due to its different register offsets and the
84 * special handling needed for its various bugs. The PNIC is handled
85 * here, but I'm not thrilled about it.
86 *
87 * All of the workalike chips use some form of MII transceiver support
88 * with the exception of the Macronix chips, which also have a SYM port.
89 * The ASIX AX88140A is also documented to have a SYM port, but all
90 * the cards I've seen use an MII transceiver, probably because the
91 * AX88140A doesn't support internal NWAY.
92 */
93
94#ifdef HAVE_KERNEL_OPTION_HEADERS
95#include "opt_device_polling.h"
96#endif
97
98#include <sys/param.h>
99#include <sys/endian.h>
100#include <sys/systm.h>
101#include <sys/sockio.h>
102#include <sys/mbuf.h>
103#include <sys/malloc.h>
104#include <sys/kernel.h>
105#include <sys/module.h>
106#include <sys/socket.h>
107
108#include <net/if.h>
109#include <net/if_arp.h>
110#include <net/ethernet.h>
111#include <net/if_dl.h>
112#include <net/if_media.h>
113#include <net/if_types.h>
114#include <net/if_vlan_var.h>
115
116#include <net/bpf.h>
117
118#include <machine/bus.h>
119#include <machine/resource.h>
120#include <sys/bus.h>
121#include <sys/rman.h>
122
123#include <dev/mii/mii.h>
124#include <dev/mii/miivar.h>
125
126#include <dev/pci/pcireg.h>
127#include <dev/pci/pcivar.h>
128
129#define DC_USEIOSPACE
130
131#include <dev/dc/if_dcreg.h>
132
133#ifdef __sparc64__
134#include <dev/ofw/openfirm.h>
135#include <machine/ofw_machdep.h>
136#endif
137
138MODULE_DEPEND(dc, pci, 1, 1, 1);
139MODULE_DEPEND(dc, ether, 1, 1, 1);
140MODULE_DEPEND(dc, miibus, 1, 1, 1);
141
142/*
143 * "device miibus" is required in kernel config.  See GENERIC if you get
144 * errors here.
145 */
146#include "miibus_if.h"
147
148/*
149 * Various supported device vendors/types and their names.
150 */
151static struct dc_type dc_devs[] = {
152	{ DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143), 0,
153		"Intel 21143 10/100BaseTX" },
154	{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009), 0,
155		"Davicom DM9009 10/100BaseTX" },
156	{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100), 0,
157		"Davicom DM9100 10/100BaseTX" },
158	{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), DC_REVISION_DM9102A,
159		"Davicom DM9102A 10/100BaseTX" },
160	{ DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), 0,
161		"Davicom DM9102 10/100BaseTX" },
162	{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981), 0,
163		"ADMtek AL981 10/100BaseTX" },
164	{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985), 0,
165		"ADMtek AN985 10/100BaseTX" },
166	{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511), 0,
167		"ADMtek ADM9511 10/100BaseTX" },
168	{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513), 0,
169		"ADMtek ADM9513 10/100BaseTX" },
170	{ DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_FA511), 0,
171		"Netgear FA511 10/100BaseTX" },
172	{ DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), DC_REVISION_88141,
173		"ASIX AX88141 10/100BaseTX" },
174	{ DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), 0,
175		"ASIX AX88140A 10/100BaseTX" },
176	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), DC_REVISION_98713A,
177		"Macronix 98713A 10/100BaseTX" },
178	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), 0,
179		"Macronix 98713 10/100BaseTX" },
180	{ DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), DC_REVISION_98713A,
181		"Compex RL100-TX 10/100BaseTX" },
182	{ DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), 0,
183		"Compex RL100-TX 10/100BaseTX" },
184	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98725,
185		"Macronix 98725 10/100BaseTX" },
186	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98715AEC_C,
187		"Macronix 98715AEC-C 10/100BaseTX" },
188	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), 0,
189		"Macronix 98715/98715A 10/100BaseTX" },
190	{ DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727), 0,
191		"Macronix 98727/98732 10/100BaseTX" },
192	{ DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115), 0,
193		"LC82C115 PNIC II 10/100BaseTX" },
194	{ DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), DC_REVISION_82C169,
195		"82c169 PNIC 10/100BaseTX" },
196	{ DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), 0,
197		"82c168 PNIC 10/100BaseTX" },
198	{ DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217), 0,
199		"Accton EN1217 10/100BaseTX" },
200	{ DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242), 0,
201		"Accton EN2242 MiniPCI 10/100BaseTX" },
202	{ DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201), 0,
203	  	"Xircom X3201 10/100BaseTX" },
204	{ DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD), 0,
205		"Neteasy DRP-32TXD Cardbus 10/100" },
206	{ DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500), 0,
207		"Abocom FE2500 10/100BaseTX" },
208	{ DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX), 0,
209		"Abocom FE2500MX 10/100BaseTX" },
210	{ DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112), 0,
211		"Conexant LANfinity MiniPCI 10/100BaseTX" },
212	{ DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX), 0,
213		"Hawking CB102 CardBus 10/100" },
214	{ DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T), 0,
215		"PlaneX FNW-3602-T CardBus 10/100" },
216	{ DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB), 0,
217		"3Com OfficeConnect 10/100B" },
218	{ DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120), 0,
219		"Microsoft MN-120 CardBus 10/100" },
220	{ DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130), 0,
221		"Microsoft MN-130 10/100" },
222	{ DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08), 0,
223		"Linksys PCMPC200 CardBus 10/100" },
224	{ DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09), 0,
225		"Linksys PCMPC200 CardBus 10/100" },
226	{ 0, 0, NULL }
227};
228
229static int dc_probe(device_t);
230static int dc_attach(device_t);
231static int dc_detach(device_t);
232static int dc_suspend(device_t);
233static int dc_resume(device_t);
234static struct dc_type *dc_devtype(device_t);
235static int dc_newbuf(struct dc_softc *, int, int);
236static int dc_encap(struct dc_softc *, struct mbuf **);
237static void dc_pnic_rx_bug_war(struct dc_softc *, int);
238static int dc_rx_resync(struct dc_softc *);
239static void dc_rxeof(struct dc_softc *);
240static void dc_txeof(struct dc_softc *);
241static void dc_tick(void *);
242static void dc_tx_underrun(struct dc_softc *);
243static void dc_intr(void *);
244static void dc_start(struct ifnet *);
245static void dc_start_locked(struct ifnet *);
246static int dc_ioctl(struct ifnet *, u_long, caddr_t);
247static void dc_init(void *);
248static void dc_init_locked(struct dc_softc *);
249static void dc_stop(struct dc_softc *);
250static void dc_watchdog(void *);
251static int dc_shutdown(device_t);
252static int dc_ifmedia_upd(struct ifnet *);
253static void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
254
255static void dc_delay(struct dc_softc *);
256static void dc_eeprom_idle(struct dc_softc *);
257static void dc_eeprom_putbyte(struct dc_softc *, int);
258static void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
259static void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
260static void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
261static void dc_eeprom_width(struct dc_softc *);
262static void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
263
264static void dc_mii_writebit(struct dc_softc *, int);
265static int dc_mii_readbit(struct dc_softc *);
266static void dc_mii_sync(struct dc_softc *);
267static void dc_mii_send(struct dc_softc *, u_int32_t, int);
268static int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
269static int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
270static int dc_miibus_readreg(device_t, int, int);
271static int dc_miibus_writereg(device_t, int, int, int);
272static void dc_miibus_statchg(device_t);
273static void dc_miibus_mediainit(device_t);
274
275static void dc_setcfg(struct dc_softc *, int);
276static uint32_t dc_mchash_le(struct dc_softc *, const uint8_t *);
277static uint32_t dc_mchash_be(const uint8_t *);
278static void dc_setfilt_21143(struct dc_softc *);
279static void dc_setfilt_asix(struct dc_softc *);
280static void dc_setfilt_admtek(struct dc_softc *);
281static void dc_setfilt_xircom(struct dc_softc *);
282
283static void dc_setfilt(struct dc_softc *);
284
285static void dc_reset(struct dc_softc *);
286static int dc_list_rx_init(struct dc_softc *);
287static int dc_list_tx_init(struct dc_softc *);
288
289static void dc_read_srom(struct dc_softc *, int);
290static void dc_parse_21143_srom(struct dc_softc *);
291static void dc_decode_leaf_sia(struct dc_softc *, struct dc_eblock_sia *);
292static void dc_decode_leaf_mii(struct dc_softc *, struct dc_eblock_mii *);
293static void dc_decode_leaf_sym(struct dc_softc *, struct dc_eblock_sym *);
294static void dc_apply_fixup(struct dc_softc *, int);
295
296static void dc_dma_map_txbuf(void *, bus_dma_segment_t *, int, bus_size_t, int);
297
298#ifdef DC_USEIOSPACE
299#define DC_RES			SYS_RES_IOPORT
300#define DC_RID			DC_PCI_CFBIO
301#else
302#define DC_RES			SYS_RES_MEMORY
303#define DC_RID			DC_PCI_CFBMA
304#endif
305
306static device_method_t dc_methods[] = {
307	/* Device interface */
308	DEVMETHOD(device_probe,		dc_probe),
309	DEVMETHOD(device_attach,	dc_attach),
310	DEVMETHOD(device_detach,	dc_detach),
311	DEVMETHOD(device_suspend,	dc_suspend),
312	DEVMETHOD(device_resume,	dc_resume),
313	DEVMETHOD(device_shutdown,	dc_shutdown),
314
315	/* bus interface */
316	DEVMETHOD(bus_print_child,	bus_generic_print_child),
317	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
318
319	/* MII interface */
320	DEVMETHOD(miibus_readreg,	dc_miibus_readreg),
321	DEVMETHOD(miibus_writereg,	dc_miibus_writereg),
322	DEVMETHOD(miibus_statchg,	dc_miibus_statchg),
323	DEVMETHOD(miibus_mediainit,	dc_miibus_mediainit),
324
325	{ 0, 0 }
326};
327
328static driver_t dc_driver = {
329	"dc",
330	dc_methods,
331	sizeof(struct dc_softc)
332};
333
334static devclass_t dc_devclass;
335
336DRIVER_MODULE(dc, cardbus, dc_driver, dc_devclass, 0, 0);
337DRIVER_MODULE(dc, pci, dc_driver, dc_devclass, 0, 0);
338DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0);
339
340#define DC_SETBIT(sc, reg, x)				\
341	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
342
343#define DC_CLRBIT(sc, reg, x)				\
344	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
345
346#define SIO_SET(x)	DC_SETBIT(sc, DC_SIO, (x))
347#define SIO_CLR(x)	DC_CLRBIT(sc, DC_SIO, (x))
348
349static void
350dc_delay(struct dc_softc *sc)
351{
352	int idx;
353
354	for (idx = (300 / 33) + 1; idx > 0; idx--)
355		CSR_READ_4(sc, DC_BUSCTL);
356}
357
358static void
359dc_eeprom_width(struct dc_softc *sc)
360{
361	int i;
362
363	/* Force EEPROM to idle state. */
364	dc_eeprom_idle(sc);
365
366	/* Enter EEPROM access mode. */
367	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
368	dc_delay(sc);
369	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
370	dc_delay(sc);
371	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
372	dc_delay(sc);
373	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
374	dc_delay(sc);
375
376	for (i = 3; i--;) {
377		if (6 & (1 << i))
378			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
379		else
380			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
381		dc_delay(sc);
382		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
383		dc_delay(sc);
384		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
385		dc_delay(sc);
386	}
387
388	for (i = 1; i <= 12; i++) {
389		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
390		dc_delay(sc);
391		if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
392			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
393			dc_delay(sc);
394			break;
395		}
396		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
397		dc_delay(sc);
398	}
399
400	/* Turn off EEPROM access mode. */
401	dc_eeprom_idle(sc);
402
403	if (i < 4 || i > 12)
404		sc->dc_romwidth = 6;
405	else
406		sc->dc_romwidth = i;
407
408	/* Enter EEPROM access mode. */
409	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
410	dc_delay(sc);
411	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
412	dc_delay(sc);
413	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
414	dc_delay(sc);
415	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
416	dc_delay(sc);
417
418	/* Turn off EEPROM access mode. */
419	dc_eeprom_idle(sc);
420}
421
422static void
423dc_eeprom_idle(struct dc_softc *sc)
424{
425	int i;
426
427	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
428	dc_delay(sc);
429	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
430	dc_delay(sc);
431	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
432	dc_delay(sc);
433	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
434	dc_delay(sc);
435
436	for (i = 0; i < 25; i++) {
437		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
438		dc_delay(sc);
439		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
440		dc_delay(sc);
441	}
442
443	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
444	dc_delay(sc);
445	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
446	dc_delay(sc);
447	CSR_WRITE_4(sc, DC_SIO, 0x00000000);
448}
449
450/*
451 * Send a read command and address to the EEPROM, check for ACK.
452 */
453static void
454dc_eeprom_putbyte(struct dc_softc *sc, int addr)
455{
456	int d, i;
457
458	d = DC_EECMD_READ >> 6;
459	for (i = 3; i--; ) {
460		if (d & (1 << i))
461			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
462		else
463			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
464		dc_delay(sc);
465		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
466		dc_delay(sc);
467		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
468		dc_delay(sc);
469	}
470
471	/*
472	 * Feed in each bit and strobe the clock.
473	 */
474	for (i = sc->dc_romwidth; i--;) {
475		if (addr & (1 << i)) {
476			SIO_SET(DC_SIO_EE_DATAIN);
477		} else {
478			SIO_CLR(DC_SIO_EE_DATAIN);
479		}
480		dc_delay(sc);
481		SIO_SET(DC_SIO_EE_CLK);
482		dc_delay(sc);
483		SIO_CLR(DC_SIO_EE_CLK);
484		dc_delay(sc);
485	}
486}
487
488/*
489 * Read a word of data stored in the EEPROM at address 'addr.'
490 * The PNIC 82c168/82c169 has its own non-standard way to read
491 * the EEPROM.
492 */
493static void
494dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest)
495{
496	int i;
497	u_int32_t r;
498
499	CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr);
500
501	for (i = 0; i < DC_TIMEOUT; i++) {
502		DELAY(1);
503		r = CSR_READ_4(sc, DC_SIO);
504		if (!(r & DC_PN_SIOCTL_BUSY)) {
505			*dest = (u_int16_t)(r & 0xFFFF);
506			return;
507		}
508	}
509}
510
511/*
512 * Read a word of data stored in the EEPROM at address 'addr.'
513 * The Xircom X3201 has its own non-standard way to read
514 * the EEPROM, too.
515 */
516static void
517dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
518{
519
520	SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
521
522	addr *= 2;
523	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
524	*dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
525	addr += 1;
526	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
527	*dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
528
529	SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
530}
531
532/*
533 * Read a word of data stored in the EEPROM at address 'addr.'
534 */
535static void
536dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest)
537{
538	int i;
539	u_int16_t word = 0;
540
541	/* Force EEPROM to idle state. */
542	dc_eeprom_idle(sc);
543
544	/* Enter EEPROM access mode. */
545	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
546	dc_delay(sc);
547	DC_SETBIT(sc, DC_SIO,  DC_SIO_ROMCTL_READ);
548	dc_delay(sc);
549	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
550	dc_delay(sc);
551	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
552	dc_delay(sc);
553
554	/*
555	 * Send address of word we want to read.
556	 */
557	dc_eeprom_putbyte(sc, addr);
558
559	/*
560	 * Start reading bits from EEPROM.
561	 */
562	for (i = 0x8000; i; i >>= 1) {
563		SIO_SET(DC_SIO_EE_CLK);
564		dc_delay(sc);
565		if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
566			word |= i;
567		dc_delay(sc);
568		SIO_CLR(DC_SIO_EE_CLK);
569		dc_delay(sc);
570	}
571
572	/* Turn off EEPROM access mode. */
573	dc_eeprom_idle(sc);
574
575	*dest = word;
576}
577
578/*
579 * Read a sequence of words from the EEPROM.
580 */
581static void
582dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int be)
583{
584	int i;
585	u_int16_t word = 0, *ptr;
586
587	for (i = 0; i < cnt; i++) {
588		if (DC_IS_PNIC(sc))
589			dc_eeprom_getword_pnic(sc, off + i, &word);
590		else if (DC_IS_XIRCOM(sc))
591			dc_eeprom_getword_xircom(sc, off + i, &word);
592		else
593			dc_eeprom_getword(sc, off + i, &word);
594		ptr = (u_int16_t *)(dest + (i * 2));
595		if (be)
596			*ptr = be16toh(word);
597		else
598			*ptr = le16toh(word);
599	}
600}
601
602/*
603 * The following two routines are taken from the Macronix 98713
604 * Application Notes pp.19-21.
605 */
606/*
607 * Write a bit to the MII bus.
608 */
609static void
610dc_mii_writebit(struct dc_softc *sc, int bit)
611{
612
613	if (bit)
614		CSR_WRITE_4(sc, DC_SIO,
615		    DC_SIO_ROMCTL_WRITE | DC_SIO_MII_DATAOUT);
616	else
617		CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
618
619	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
620	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
621}
622
623/*
624 * Read a bit from the MII bus.
625 */
626static int
627dc_mii_readbit(struct dc_softc *sc)
628{
629
630	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ | DC_SIO_MII_DIR);
631	CSR_READ_4(sc, DC_SIO);
632	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
633	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
634	if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
635		return (1);
636
637	return (0);
638}
639
640/*
641 * Sync the PHYs by setting data bit and strobing the clock 32 times.
642 */
643static void
644dc_mii_sync(struct dc_softc *sc)
645{
646	int i;
647
648	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
649
650	for (i = 0; i < 32; i++)
651		dc_mii_writebit(sc, 1);
652}
653
654/*
655 * Clock a series of bits through the MII.
656 */
657static void
658dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt)
659{
660	int i;
661
662	for (i = (0x1 << (cnt - 1)); i; i >>= 1)
663		dc_mii_writebit(sc, bits & i);
664}
665
666/*
667 * Read an PHY register through the MII.
668 */
669static int
670dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
671{
672	int i, ack;
673
674	/*
675	 * Set up frame for RX.
676	 */
677	frame->mii_stdelim = DC_MII_STARTDELIM;
678	frame->mii_opcode = DC_MII_READOP;
679	frame->mii_turnaround = 0;
680	frame->mii_data = 0;
681
682	/*
683	 * Sync the PHYs.
684	 */
685	dc_mii_sync(sc);
686
687	/*
688	 * Send command/address info.
689	 */
690	dc_mii_send(sc, frame->mii_stdelim, 2);
691	dc_mii_send(sc, frame->mii_opcode, 2);
692	dc_mii_send(sc, frame->mii_phyaddr, 5);
693	dc_mii_send(sc, frame->mii_regaddr, 5);
694
695#ifdef notdef
696	/* Idle bit */
697	dc_mii_writebit(sc, 1);
698	dc_mii_writebit(sc, 0);
699#endif
700
701	/* Check for ack. */
702	ack = dc_mii_readbit(sc);
703
704	/*
705	 * Now try reading data bits. If the ack failed, we still
706	 * need to clock through 16 cycles to keep the PHY(s) in sync.
707	 */
708	if (ack) {
709		for (i = 0; i < 16; i++)
710			dc_mii_readbit(sc);
711		goto fail;
712	}
713
714	for (i = 0x8000; i; i >>= 1) {
715		if (!ack) {
716			if (dc_mii_readbit(sc))
717				frame->mii_data |= i;
718		}
719	}
720
721fail:
722
723	dc_mii_writebit(sc, 0);
724	dc_mii_writebit(sc, 0);
725
726	if (ack)
727		return (1);
728	return (0);
729}
730
731/*
732 * Write to a PHY register through the MII.
733 */
734static int
735dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
736{
737
738	/*
739	 * Set up frame for TX.
740	 */
741
742	frame->mii_stdelim = DC_MII_STARTDELIM;
743	frame->mii_opcode = DC_MII_WRITEOP;
744	frame->mii_turnaround = DC_MII_TURNAROUND;
745
746	/*
747	 * Sync the PHYs.
748	 */
749	dc_mii_sync(sc);
750
751	dc_mii_send(sc, frame->mii_stdelim, 2);
752	dc_mii_send(sc, frame->mii_opcode, 2);
753	dc_mii_send(sc, frame->mii_phyaddr, 5);
754	dc_mii_send(sc, frame->mii_regaddr, 5);
755	dc_mii_send(sc, frame->mii_turnaround, 2);
756	dc_mii_send(sc, frame->mii_data, 16);
757
758	/* Idle bit. */
759	dc_mii_writebit(sc, 0);
760	dc_mii_writebit(sc, 0);
761
762	return (0);
763}
764
765static int
766dc_miibus_readreg(device_t dev, int phy, int reg)
767{
768	struct dc_mii_frame frame;
769	struct dc_softc	 *sc;
770	int i, rval, phy_reg = 0;
771
772	sc = device_get_softc(dev);
773	bzero(&frame, sizeof(frame));
774
775	/*
776	 * Note: both the AL981 and AN985 have internal PHYs,
777	 * however the AL981 provides direct access to the PHY
778	 * registers while the AN985 uses a serial MII interface.
779	 * The AN985's MII interface is also buggy in that you
780	 * can read from any MII address (0 to 31), but only address 1
781	 * behaves normally. To deal with both cases, we pretend
782	 * that the PHY is at MII address 1.
783	 */
784	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
785		return (0);
786
787	/*
788	 * Note: the ukphy probes of the RS7112 report a PHY at
789	 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
790	 * so we only respond to correct one.
791	 */
792	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
793		return (0);
794
795	if (sc->dc_pmode != DC_PMODE_MII) {
796		if (phy == (MII_NPHY - 1)) {
797			switch (reg) {
798			case MII_BMSR:
799			/*
800			 * Fake something to make the probe
801			 * code think there's a PHY here.
802			 */
803				return (BMSR_MEDIAMASK);
804				break;
805			case MII_PHYIDR1:
806				if (DC_IS_PNIC(sc))
807					return (DC_VENDORID_LO);
808				return (DC_VENDORID_DEC);
809				break;
810			case MII_PHYIDR2:
811				if (DC_IS_PNIC(sc))
812					return (DC_DEVICEID_82C168);
813				return (DC_DEVICEID_21143);
814				break;
815			default:
816				return (0);
817				break;
818			}
819		} else
820			return (0);
821	}
822
823	if (DC_IS_PNIC(sc)) {
824		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
825		    (phy << 23) | (reg << 18));
826		for (i = 0; i < DC_TIMEOUT; i++) {
827			DELAY(1);
828			rval = CSR_READ_4(sc, DC_PN_MII);
829			if (!(rval & DC_PN_MII_BUSY)) {
830				rval &= 0xFFFF;
831				return (rval == 0xFFFF ? 0 : rval);
832			}
833		}
834		return (0);
835	}
836
837	if (DC_IS_COMET(sc)) {
838		switch (reg) {
839		case MII_BMCR:
840			phy_reg = DC_AL_BMCR;
841			break;
842		case MII_BMSR:
843			phy_reg = DC_AL_BMSR;
844			break;
845		case MII_PHYIDR1:
846			phy_reg = DC_AL_VENID;
847			break;
848		case MII_PHYIDR2:
849			phy_reg = DC_AL_DEVID;
850			break;
851		case MII_ANAR:
852			phy_reg = DC_AL_ANAR;
853			break;
854		case MII_ANLPAR:
855			phy_reg = DC_AL_LPAR;
856			break;
857		case MII_ANER:
858			phy_reg = DC_AL_ANER;
859			break;
860		default:
861			device_printf(dev, "phy_read: bad phy register %x\n",
862			    reg);
863			return (0);
864			break;
865		}
866
867		rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
868
869		if (rval == 0xFFFF)
870			return (0);
871		return (rval);
872	}
873
874	frame.mii_phyaddr = phy;
875	frame.mii_regaddr = reg;
876	if (sc->dc_type == DC_TYPE_98713) {
877		phy_reg = CSR_READ_4(sc, DC_NETCFG);
878		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
879	}
880	dc_mii_readreg(sc, &frame);
881	if (sc->dc_type == DC_TYPE_98713)
882		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
883
884	return (frame.mii_data);
885}
886
887static int
888dc_miibus_writereg(device_t dev, int phy, int reg, int data)
889{
890	struct dc_softc *sc;
891	struct dc_mii_frame frame;
892	int i, phy_reg = 0;
893
894	sc = device_get_softc(dev);
895	bzero(&frame, sizeof(frame));
896
897	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
898		return (0);
899
900	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
901		return (0);
902
903	if (DC_IS_PNIC(sc)) {
904		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
905		    (phy << 23) | (reg << 10) | data);
906		for (i = 0; i < DC_TIMEOUT; i++) {
907			if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
908				break;
909		}
910		return (0);
911	}
912
913	if (DC_IS_COMET(sc)) {
914		switch (reg) {
915		case MII_BMCR:
916			phy_reg = DC_AL_BMCR;
917			break;
918		case MII_BMSR:
919			phy_reg = DC_AL_BMSR;
920			break;
921		case MII_PHYIDR1:
922			phy_reg = DC_AL_VENID;
923			break;
924		case MII_PHYIDR2:
925			phy_reg = DC_AL_DEVID;
926			break;
927		case MII_ANAR:
928			phy_reg = DC_AL_ANAR;
929			break;
930		case MII_ANLPAR:
931			phy_reg = DC_AL_LPAR;
932			break;
933		case MII_ANER:
934			phy_reg = DC_AL_ANER;
935			break;
936		default:
937			device_printf(dev, "phy_write: bad phy register %x\n",
938			    reg);
939			return (0);
940			break;
941		}
942
943		CSR_WRITE_4(sc, phy_reg, data);
944		return (0);
945	}
946
947	frame.mii_phyaddr = phy;
948	frame.mii_regaddr = reg;
949	frame.mii_data = data;
950
951	if (sc->dc_type == DC_TYPE_98713) {
952		phy_reg = CSR_READ_4(sc, DC_NETCFG);
953		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
954	}
955	dc_mii_writereg(sc, &frame);
956	if (sc->dc_type == DC_TYPE_98713)
957		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
958
959	return (0);
960}
961
962static void
963dc_miibus_statchg(device_t dev)
964{
965	struct dc_softc *sc;
966	struct mii_data *mii;
967	struct ifmedia *ifm;
968
969	sc = device_get_softc(dev);
970	if (DC_IS_ADMTEK(sc))
971		return;
972
973	mii = device_get_softc(sc->dc_miibus);
974	ifm = &mii->mii_media;
975	if (DC_IS_DAVICOM(sc) &&
976	    IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
977		dc_setcfg(sc, ifm->ifm_media);
978		sc->dc_if_media = ifm->ifm_media;
979	} else {
980		dc_setcfg(sc, mii->mii_media_active);
981		sc->dc_if_media = mii->mii_media_active;
982	}
983}
984
985/*
986 * Special support for DM9102A cards with HomePNA PHYs. Note:
987 * with the Davicom DM9102A/DM9801 eval board that I have, it seems
988 * to be impossible to talk to the management interface of the DM9801
989 * PHY (its MDIO pin is not connected to anything). Consequently,
990 * the driver has to just 'know' about the additional mode and deal
991 * with it itself. *sigh*
992 */
993static void
994dc_miibus_mediainit(device_t dev)
995{
996	struct dc_softc *sc;
997	struct mii_data *mii;
998	struct ifmedia *ifm;
999	int rev;
1000
1001	rev = pci_get_revid(dev);
1002
1003	sc = device_get_softc(dev);
1004	mii = device_get_softc(sc->dc_miibus);
1005	ifm = &mii->mii_media;
1006
1007	if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A)
1008		ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL);
1009}
1010
1011#define DC_BITS_512	9
1012#define DC_BITS_128	7
1013#define DC_BITS_64	6
1014
1015static uint32_t
1016dc_mchash_le(struct dc_softc *sc, const uint8_t *addr)
1017{
1018	uint32_t crc;
1019
1020	/* Compute CRC for the address value. */
1021	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
1022
1023	/*
1024	 * The hash table on the PNIC II and the MX98715AEC-C/D/E
1025	 * chips is only 128 bits wide.
1026	 */
1027	if (sc->dc_flags & DC_128BIT_HASH)
1028		return (crc & ((1 << DC_BITS_128) - 1));
1029
1030	/* The hash table on the MX98715BEC is only 64 bits wide. */
1031	if (sc->dc_flags & DC_64BIT_HASH)
1032		return (crc & ((1 << DC_BITS_64) - 1));
1033
1034	/* Xircom's hash filtering table is different (read: weird) */
1035	/* Xircom uses the LEAST significant bits */
1036	if (DC_IS_XIRCOM(sc)) {
1037		if ((crc & 0x180) == 0x180)
1038			return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4));
1039		else
1040			return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 +
1041			    (12 << 4));
1042	}
1043
1044	return (crc & ((1 << DC_BITS_512) - 1));
1045}
1046
1047/*
1048 * Calculate CRC of a multicast group address, return the lower 6 bits.
1049 */
1050static uint32_t
1051dc_mchash_be(const uint8_t *addr)
1052{
1053	uint32_t crc;
1054
1055	/* Compute CRC for the address value. */
1056	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
1057
1058	/* Return the filter bit position. */
1059	return ((crc >> 26) & 0x0000003F);
1060}
1061
1062/*
1063 * 21143-style RX filter setup routine. Filter programming is done by
1064 * downloading a special setup frame into the TX engine. 21143, Macronix,
1065 * PNIC, PNIC II and Davicom chips are programmed this way.
1066 *
1067 * We always program the chip using 'hash perfect' mode, i.e. one perfect
1068 * address (our node address) and a 512-bit hash filter for multicast
1069 * frames. We also sneak the broadcast address into the hash filter since
1070 * we need that too.
1071 */
1072static void
1073dc_setfilt_21143(struct dc_softc *sc)
1074{
1075	uint16_t eaddr[(ETHER_ADDR_LEN+1)/2];
1076	struct dc_desc *sframe;
1077	u_int32_t h, *sp;
1078	struct ifmultiaddr *ifma;
1079	struct ifnet *ifp;
1080	int i;
1081
1082	ifp = sc->dc_ifp;
1083
1084	i = sc->dc_cdata.dc_tx_prod;
1085	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1086	sc->dc_cdata.dc_tx_cnt++;
1087	sframe = &sc->dc_ldata->dc_tx_list[i];
1088	sp = sc->dc_cdata.dc_sbuf;
1089	bzero(sp, DC_SFRAME_LEN);
1090
1091	sframe->dc_data = htole32(sc->dc_saddr);
1092	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1093	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1094
1095	sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
1096
1097	/* If we want promiscuous mode, set the allframes bit. */
1098	if (ifp->if_flags & IFF_PROMISC)
1099		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1100	else
1101		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1102
1103	if (ifp->if_flags & IFF_ALLMULTI)
1104		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1105	else
1106		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1107
1108	IF_ADDR_LOCK(ifp);
1109	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1110		if (ifma->ifma_addr->sa_family != AF_LINK)
1111			continue;
1112		h = dc_mchash_le(sc,
1113		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1114		sp[h >> 4] |= htole32(1 << (h & 0xF));
1115	}
1116	IF_ADDR_UNLOCK(ifp);
1117
1118	if (ifp->if_flags & IFF_BROADCAST) {
1119		h = dc_mchash_le(sc, ifp->if_broadcastaddr);
1120		sp[h >> 4] |= htole32(1 << (h & 0xF));
1121	}
1122
1123	/* Set our MAC address. */
1124	bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
1125	sp[39] = DC_SP_MAC(eaddr[0]);
1126	sp[40] = DC_SP_MAC(eaddr[1]);
1127	sp[41] = DC_SP_MAC(eaddr[2]);
1128
1129	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1130	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1131
1132	/*
1133	 * The PNIC takes an exceedingly long time to process its
1134	 * setup frame; wait 10ms after posting the setup frame
1135	 * before proceeding, just so it has time to swallow its
1136	 * medicine.
1137	 */
1138	DELAY(10000);
1139
1140	sc->dc_wdog_timer = 5;
1141}
1142
1143static void
1144dc_setfilt_admtek(struct dc_softc *sc)
1145{
1146	uint32_t eaddr[(ETHER_ADDR_LEN+3)/4];
1147	struct ifnet *ifp;
1148	struct ifmultiaddr *ifma;
1149	int h = 0;
1150	u_int32_t hashes[2] = { 0, 0 };
1151
1152	ifp = sc->dc_ifp;
1153
1154	/* Init our MAC address. */
1155	bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
1156	CSR_WRITE_4(sc, DC_AL_PAR0, eaddr[0]);
1157	CSR_WRITE_4(sc, DC_AL_PAR1, eaddr[1]);
1158
1159	/* If we want promiscuous mode, set the allframes bit. */
1160	if (ifp->if_flags & IFF_PROMISC)
1161		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1162	else
1163		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1164
1165	if (ifp->if_flags & IFF_ALLMULTI)
1166		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1167	else
1168		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1169
1170	/* First, zot all the existing hash bits. */
1171	CSR_WRITE_4(sc, DC_AL_MAR0, 0);
1172	CSR_WRITE_4(sc, DC_AL_MAR1, 0);
1173
1174	/*
1175	 * If we're already in promisc or allmulti mode, we
1176	 * don't have to bother programming the multicast filter.
1177	 */
1178	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))
1179		return;
1180
1181	/* Now program new ones. */
1182	IF_ADDR_LOCK(ifp);
1183	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1184		if (ifma->ifma_addr->sa_family != AF_LINK)
1185			continue;
1186		if (DC_IS_CENTAUR(sc))
1187			h = dc_mchash_le(sc,
1188			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1189		else
1190			h = dc_mchash_be(
1191			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1192		if (h < 32)
1193			hashes[0] |= (1 << h);
1194		else
1195			hashes[1] |= (1 << (h - 32));
1196	}
1197	IF_ADDR_UNLOCK(ifp);
1198
1199	CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1200	CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1201}
1202
1203static void
1204dc_setfilt_asix(struct dc_softc *sc)
1205{
1206	uint32_t eaddr[(ETHER_ADDR_LEN+3)/4];
1207	struct ifnet *ifp;
1208	struct ifmultiaddr *ifma;
1209	int h = 0;
1210	u_int32_t hashes[2] = { 0, 0 };
1211
1212	ifp = sc->dc_ifp;
1213
1214	/* Init our MAC address. */
1215	bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
1216	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1217	CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[0]);
1218	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1219	CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[1]);
1220
1221	/* If we want promiscuous mode, set the allframes bit. */
1222	if (ifp->if_flags & IFF_PROMISC)
1223		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1224	else
1225		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1226
1227	if (ifp->if_flags & IFF_ALLMULTI)
1228		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1229	else
1230		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1231
1232	/*
1233	 * The ASIX chip has a special bit to enable reception
1234	 * of broadcast frames.
1235	 */
1236	if (ifp->if_flags & IFF_BROADCAST)
1237		DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1238	else
1239		DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1240
1241	/* first, zot all the existing hash bits */
1242	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1243	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1244	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1245	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1246
1247	/*
1248	 * If we're already in promisc or allmulti mode, we
1249	 * don't have to bother programming the multicast filter.
1250	 */
1251	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))
1252		return;
1253
1254	/* now program new ones */
1255	IF_ADDR_LOCK(ifp);
1256	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1257		if (ifma->ifma_addr->sa_family != AF_LINK)
1258			continue;
1259		h = dc_mchash_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1260		if (h < 32)
1261			hashes[0] |= (1 << h);
1262		else
1263			hashes[1] |= (1 << (h - 32));
1264	}
1265	IF_ADDR_UNLOCK(ifp);
1266
1267	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1268	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1269	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1270	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1271}
1272
1273static void
1274dc_setfilt_xircom(struct dc_softc *sc)
1275{
1276	uint16_t eaddr[(ETHER_ADDR_LEN+1)/2];
1277	struct ifnet *ifp;
1278	struct ifmultiaddr *ifma;
1279	struct dc_desc *sframe;
1280	u_int32_t h, *sp;
1281	int i;
1282
1283	ifp = sc->dc_ifp;
1284	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
1285
1286	i = sc->dc_cdata.dc_tx_prod;
1287	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1288	sc->dc_cdata.dc_tx_cnt++;
1289	sframe = &sc->dc_ldata->dc_tx_list[i];
1290	sp = sc->dc_cdata.dc_sbuf;
1291	bzero(sp, DC_SFRAME_LEN);
1292
1293	sframe->dc_data = htole32(sc->dc_saddr);
1294	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1295	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1296
1297	sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf;
1298
1299	/* If we want promiscuous mode, set the allframes bit. */
1300	if (ifp->if_flags & IFF_PROMISC)
1301		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1302	else
1303		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1304
1305	if (ifp->if_flags & IFF_ALLMULTI)
1306		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1307	else
1308		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1309
1310	IF_ADDR_LOCK(ifp);
1311	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1312		if (ifma->ifma_addr->sa_family != AF_LINK)
1313			continue;
1314		h = dc_mchash_le(sc,
1315		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1316		sp[h >> 4] |= htole32(1 << (h & 0xF));
1317	}
1318	IF_ADDR_UNLOCK(ifp);
1319
1320	if (ifp->if_flags & IFF_BROADCAST) {
1321		h = dc_mchash_le(sc, ifp->if_broadcastaddr);
1322		sp[h >> 4] |= htole32(1 << (h & 0xF));
1323	}
1324
1325	/* Set our MAC address. */
1326	bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN);
1327	sp[0] = DC_SP_MAC(eaddr[0]);
1328	sp[1] = DC_SP_MAC(eaddr[1]);
1329	sp[2] = DC_SP_MAC(eaddr[2]);
1330
1331	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1332	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1333	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1334	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1335	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1336
1337	/*
1338	 * Wait some time...
1339	 */
1340	DELAY(1000);
1341
1342	sc->dc_wdog_timer = 5;
1343}
1344
1345static void
1346dc_setfilt(struct dc_softc *sc)
1347{
1348
1349	if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1350	    DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1351		dc_setfilt_21143(sc);
1352
1353	if (DC_IS_ASIX(sc))
1354		dc_setfilt_asix(sc);
1355
1356	if (DC_IS_ADMTEK(sc))
1357		dc_setfilt_admtek(sc);
1358
1359	if (DC_IS_XIRCOM(sc))
1360		dc_setfilt_xircom(sc);
1361}
1362
1363/*
1364 * In order to fiddle with the 'full-duplex' and '100Mbps' bits in
1365 * the netconfig register, we first have to put the transmit and/or
1366 * receive logic in the idle state.
1367 */
1368static void
1369dc_setcfg(struct dc_softc *sc, int media)
1370{
1371	int i, restart = 0, watchdogreg;
1372	u_int32_t isr;
1373
1374	if (IFM_SUBTYPE(media) == IFM_NONE)
1375		return;
1376
1377	if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) {
1378		restart = 1;
1379		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON));
1380
1381		for (i = 0; i < DC_TIMEOUT; i++) {
1382			isr = CSR_READ_4(sc, DC_ISR);
1383			if (isr & DC_ISR_TX_IDLE &&
1384			    ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1385			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1386				break;
1387			DELAY(10);
1388		}
1389
1390		if (i == DC_TIMEOUT) {
1391			if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc))
1392				device_printf(sc->dc_dev,
1393				    "%s: failed to force tx to idle state\n",
1394				    __func__);
1395			if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1396			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
1397			    !(DC_IS_CENTAUR(sc) || DC_IS_CONEXANT(sc) ||
1398			    (DC_IS_DAVICOM(sc) && pci_get_revid(sc->dc_dev) >=
1399			    DC_REVISION_DM9102A)))
1400				device_printf(sc->dc_dev,
1401				    "%s: failed to force rx to idle state\n",
1402				    __func__);
1403		}
1404	}
1405
1406	if (IFM_SUBTYPE(media) == IFM_100_TX) {
1407		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1408		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1409		if (sc->dc_pmode == DC_PMODE_MII) {
1410			if (DC_IS_INTEL(sc)) {
1411			/* There's a write enable bit here that reads as 1. */
1412				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1413				watchdogreg &= ~DC_WDOG_CTLWREN;
1414				watchdogreg |= DC_WDOG_JABBERDIS;
1415				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1416			} else {
1417				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1418			}
1419			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1420			    DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER));
1421			if (sc->dc_type == DC_TYPE_98713)
1422				DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1423				    DC_NETCFG_SCRAMBLER));
1424			if (!DC_IS_DAVICOM(sc))
1425				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1426			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1427			if (DC_IS_INTEL(sc))
1428				dc_apply_fixup(sc, IFM_AUTO);
1429		} else {
1430			if (DC_IS_PNIC(sc)) {
1431				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1432				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1433				DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1434			}
1435			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1436			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1437			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1438			if (DC_IS_INTEL(sc))
1439				dc_apply_fixup(sc,
1440				    (media & IFM_GMASK) == IFM_FDX ?
1441				    IFM_100_TX | IFM_FDX : IFM_100_TX);
1442		}
1443	}
1444
1445	if (IFM_SUBTYPE(media) == IFM_10_T) {
1446		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1447		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1448		if (sc->dc_pmode == DC_PMODE_MII) {
1449			/* There's a write enable bit here that reads as 1. */
1450			if (DC_IS_INTEL(sc)) {
1451				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1452				watchdogreg &= ~DC_WDOG_CTLWREN;
1453				watchdogreg |= DC_WDOG_JABBERDIS;
1454				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1455			} else {
1456				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1457			}
1458			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS |
1459			    DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER));
1460			if (sc->dc_type == DC_TYPE_98713)
1461				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1462			if (!DC_IS_DAVICOM(sc))
1463				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1464			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1465			if (DC_IS_INTEL(sc))
1466				dc_apply_fixup(sc, IFM_AUTO);
1467		} else {
1468			if (DC_IS_PNIC(sc)) {
1469				DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1470				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1471				DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1472			}
1473			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1474			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1475			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1476			if (DC_IS_INTEL(sc)) {
1477				DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1478				DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1479				if ((media & IFM_GMASK) == IFM_FDX)
1480					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1481				else
1482					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1483				DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1484				DC_CLRBIT(sc, DC_10BTCTRL,
1485				    DC_TCTL_AUTONEGENBL);
1486				dc_apply_fixup(sc,
1487				    (media & IFM_GMASK) == IFM_FDX ?
1488				    IFM_10_T | IFM_FDX : IFM_10_T);
1489				DELAY(20000);
1490			}
1491		}
1492	}
1493
1494	/*
1495	 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1496	 * PHY and we want HomePNA mode, set the portsel bit to turn
1497	 * on the external MII port.
1498	 */
1499	if (DC_IS_DAVICOM(sc)) {
1500		if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1501			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1502			sc->dc_link = 1;
1503		} else {
1504			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1505		}
1506	}
1507
1508	if ((media & IFM_GMASK) == IFM_FDX) {
1509		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1510		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1511			DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1512	} else {
1513		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1514		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1515			DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1516	}
1517
1518	if (restart)
1519		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON);
1520}
1521
1522static void
1523dc_reset(struct dc_softc *sc)
1524{
1525	int i;
1526
1527	DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1528
1529	for (i = 0; i < DC_TIMEOUT; i++) {
1530		DELAY(10);
1531		if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1532			break;
1533	}
1534
1535	if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) ||
1536	    DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) {
1537		DELAY(10000);
1538		DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1539		i = 0;
1540	}
1541
1542	if (i == DC_TIMEOUT)
1543		device_printf(sc->dc_dev, "reset never completed!\n");
1544
1545	/* Wait a little while for the chip to get its brains in order. */
1546	DELAY(1000);
1547
1548	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1549	CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1550	CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1551
1552	/*
1553	 * Bring the SIA out of reset. In some cases, it looks
1554	 * like failing to unreset the SIA soon enough gets it
1555	 * into a state where it will never come out of reset
1556	 * until we reset the whole chip again.
1557	 */
1558	if (DC_IS_INTEL(sc)) {
1559		DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1560		CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1561		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1562	}
1563}
1564
1565static struct dc_type *
1566dc_devtype(device_t dev)
1567{
1568	struct dc_type *t;
1569	u_int32_t devid;
1570	u_int8_t rev;
1571
1572	t = dc_devs;
1573	devid = pci_get_devid(dev);
1574	rev = pci_get_revid(dev);
1575
1576	while (t->dc_name != NULL) {
1577		if (devid == t->dc_devid && rev >= t->dc_minrev)
1578			return (t);
1579		t++;
1580	}
1581
1582	return (NULL);
1583}
1584
1585/*
1586 * Probe for a 21143 or clone chip. Check the PCI vendor and device
1587 * IDs against our list and return a device name if we find a match.
1588 * We do a little bit of extra work to identify the exact type of
1589 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID,
1590 * but different revision IDs. The same is true for 98715/98715A
1591 * chips and the 98725, as well as the ASIX and ADMtek chips. In some
1592 * cases, the exact chip revision affects driver behavior.
1593 */
1594static int
1595dc_probe(device_t dev)
1596{
1597	struct dc_type *t;
1598
1599	t = dc_devtype(dev);
1600
1601	if (t != NULL) {
1602		device_set_desc(dev, t->dc_name);
1603		return (BUS_PROBE_DEFAULT);
1604	}
1605
1606	return (ENXIO);
1607}
1608
1609static void
1610dc_apply_fixup(struct dc_softc *sc, int media)
1611{
1612	struct dc_mediainfo *m;
1613	u_int8_t *p;
1614	int i;
1615	u_int32_t reg;
1616
1617	m = sc->dc_mi;
1618
1619	while (m != NULL) {
1620		if (m->dc_media == media)
1621			break;
1622		m = m->dc_next;
1623	}
1624
1625	if (m == NULL)
1626		return;
1627
1628	for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1629		reg = (p[0] | (p[1] << 8)) << 16;
1630		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1631	}
1632
1633	for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1634		reg = (p[0] | (p[1] << 8)) << 16;
1635		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1636	}
1637}
1638
1639static void
1640dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1641{
1642	struct dc_mediainfo *m;
1643
1644	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1645	switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1646	case DC_SIA_CODE_10BT:
1647		m->dc_media = IFM_10_T;
1648		break;
1649	case DC_SIA_CODE_10BT_FDX:
1650		m->dc_media = IFM_10_T | IFM_FDX;
1651		break;
1652	case DC_SIA_CODE_10B2:
1653		m->dc_media = IFM_10_2;
1654		break;
1655	case DC_SIA_CODE_10B5:
1656		m->dc_media = IFM_10_5;
1657		break;
1658	default:
1659		break;
1660	}
1661
1662	/*
1663	 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1664	 * Things apparently already work for cards that do
1665	 * supply Media Specific Data.
1666	 */
1667	if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1668		m->dc_gp_len = 2;
1669		m->dc_gp_ptr =
1670		(u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1671	} else {
1672		m->dc_gp_len = 2;
1673		m->dc_gp_ptr =
1674		(u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1675	}
1676
1677	m->dc_next = sc->dc_mi;
1678	sc->dc_mi = m;
1679
1680	sc->dc_pmode = DC_PMODE_SIA;
1681}
1682
1683static void
1684dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1685{
1686	struct dc_mediainfo *m;
1687
1688	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1689	if (l->dc_sym_code == DC_SYM_CODE_100BT)
1690		m->dc_media = IFM_100_TX;
1691
1692	if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1693		m->dc_media = IFM_100_TX | IFM_FDX;
1694
1695	m->dc_gp_len = 2;
1696	m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1697
1698	m->dc_next = sc->dc_mi;
1699	sc->dc_mi = m;
1700
1701	sc->dc_pmode = DC_PMODE_SYM;
1702}
1703
1704static void
1705dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1706{
1707	struct dc_mediainfo *m;
1708	u_int8_t *p;
1709
1710	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO);
1711	/* We abuse IFM_AUTO to represent MII. */
1712	m->dc_media = IFM_AUTO;
1713	m->dc_gp_len = l->dc_gpr_len;
1714
1715	p = (u_int8_t *)l;
1716	p += sizeof(struct dc_eblock_mii);
1717	m->dc_gp_ptr = p;
1718	p += 2 * l->dc_gpr_len;
1719	m->dc_reset_len = *p;
1720	p++;
1721	m->dc_reset_ptr = p;
1722
1723	m->dc_next = sc->dc_mi;
1724	sc->dc_mi = m;
1725}
1726
1727static void
1728dc_read_srom(struct dc_softc *sc, int bits)
1729{
1730	int size;
1731
1732	size = 2 << bits;
1733	sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT);
1734	dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
1735}
1736
1737static void
1738dc_parse_21143_srom(struct dc_softc *sc)
1739{
1740	struct dc_leaf_hdr *lhdr;
1741	struct dc_eblock_hdr *hdr;
1742	int have_mii, i, loff;
1743	char *ptr;
1744
1745	have_mii = 0;
1746	loff = sc->dc_srom[27];
1747	lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1748
1749	ptr = (char *)lhdr;
1750	ptr += sizeof(struct dc_leaf_hdr) - 1;
1751	/*
1752	 * Look if we got a MII media block.
1753	 */
1754	for (i = 0; i < lhdr->dc_mcnt; i++) {
1755		hdr = (struct dc_eblock_hdr *)ptr;
1756		if (hdr->dc_type == DC_EBLOCK_MII)
1757		    have_mii++;
1758
1759		ptr += (hdr->dc_len & 0x7F);
1760		ptr++;
1761	}
1762
1763	/*
1764	 * Do the same thing again. Only use SIA and SYM media
1765	 * blocks if no MII media block is available.
1766	 */
1767	ptr = (char *)lhdr;
1768	ptr += sizeof(struct dc_leaf_hdr) - 1;
1769	for (i = 0; i < lhdr->dc_mcnt; i++) {
1770		hdr = (struct dc_eblock_hdr *)ptr;
1771		switch (hdr->dc_type) {
1772		case DC_EBLOCK_MII:
1773			dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1774			break;
1775		case DC_EBLOCK_SIA:
1776			if (! have_mii)
1777				dc_decode_leaf_sia(sc,
1778				    (struct dc_eblock_sia *)hdr);
1779			break;
1780		case DC_EBLOCK_SYM:
1781			if (! have_mii)
1782				dc_decode_leaf_sym(sc,
1783				    (struct dc_eblock_sym *)hdr);
1784			break;
1785		default:
1786			/* Don't care. Yet. */
1787			break;
1788		}
1789		ptr += (hdr->dc_len & 0x7F);
1790		ptr++;
1791	}
1792}
1793
1794static void
1795dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1796{
1797	u_int32_t *paddr;
1798
1799	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
1800	paddr = arg;
1801	*paddr = segs->ds_addr;
1802}
1803
1804/*
1805 * Attach the interface. Allocate softc structures, do ifmedia
1806 * setup and ethernet/BPF attach.
1807 */
1808static int
1809dc_attach(device_t dev)
1810{
1811	int tmp = 0;
1812	uint32_t eaddr[(ETHER_ADDR_LEN+3)/4];
1813	u_int32_t command;
1814	struct dc_softc *sc;
1815	struct ifnet *ifp;
1816	u_int32_t revision;
1817	int error = 0, rid, mac_offset;
1818	int i;
1819	u_int8_t *mac;
1820
1821	sc = device_get_softc(dev);
1822	sc->dc_dev = dev;
1823
1824	mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1825	    MTX_DEF);
1826
1827	/*
1828	 * Map control/status registers.
1829	 */
1830	pci_enable_busmaster(dev);
1831
1832	rid = DC_RID;
1833	sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE);
1834
1835	if (sc->dc_res == NULL) {
1836		device_printf(dev, "couldn't map ports/memory\n");
1837		error = ENXIO;
1838		goto fail;
1839	}
1840
1841	sc->dc_btag = rman_get_bustag(sc->dc_res);
1842	sc->dc_bhandle = rman_get_bushandle(sc->dc_res);
1843
1844	/* Allocate interrupt. */
1845	rid = 0;
1846	sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1847	    RF_SHAREABLE | RF_ACTIVE);
1848
1849	if (sc->dc_irq == NULL) {
1850		device_printf(dev, "couldn't map interrupt\n");
1851		error = ENXIO;
1852		goto fail;
1853	}
1854
1855	/* Need this info to decide on a chip type. */
1856	sc->dc_info = dc_devtype(dev);
1857	revision = pci_get_revid(dev);
1858
1859	/* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */
1860	if (sc->dc_info->dc_devid !=
1861	    DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168) &&
1862	    sc->dc_info->dc_devid !=
1863	    DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201))
1864		dc_eeprom_width(sc);
1865
1866	switch (sc->dc_info->dc_devid) {
1867	case DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143):
1868		sc->dc_type = DC_TYPE_21143;
1869		sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1870		sc->dc_flags |= DC_REDUCED_MII_POLL;
1871		/* Save EEPROM contents so we can parse them later. */
1872		dc_read_srom(sc, sc->dc_romwidth);
1873		break;
1874	case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009):
1875	case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100):
1876	case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102):
1877		sc->dc_type = DC_TYPE_DM9102;
1878		sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS;
1879		sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD;
1880		sc->dc_flags |= DC_TX_ALIGN;
1881		sc->dc_pmode = DC_PMODE_MII;
1882
1883		/* Increase the latency timer value. */
1884		pci_write_config(dev, PCIR_LATTIMER, 0x80, 1);
1885		break;
1886	case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981):
1887		sc->dc_type = DC_TYPE_AL981;
1888		sc->dc_flags |= DC_TX_USE_TX_INTR;
1889		sc->dc_flags |= DC_TX_ADMTEK_WAR;
1890		sc->dc_pmode = DC_PMODE_MII;
1891		dc_read_srom(sc, sc->dc_romwidth);
1892		break;
1893	case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985):
1894	case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511):
1895	case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513):
1896	case DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD):
1897	case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_FA511):
1898	case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500):
1899	case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX):
1900	case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242):
1901	case DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX):
1902	case DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T):
1903	case DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB):
1904	case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120):
1905	case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130):
1906	case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08):
1907	case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09):
1908		sc->dc_type = DC_TYPE_AN985;
1909		sc->dc_flags |= DC_64BIT_HASH;
1910		sc->dc_flags |= DC_TX_USE_TX_INTR;
1911		sc->dc_flags |= DC_TX_ADMTEK_WAR;
1912		sc->dc_pmode = DC_PMODE_MII;
1913		/* Don't read SROM for - auto-loaded on reset */
1914		break;
1915	case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713):
1916	case DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP):
1917		if (revision < DC_REVISION_98713A) {
1918			sc->dc_type = DC_TYPE_98713;
1919		}
1920		if (revision >= DC_REVISION_98713A) {
1921			sc->dc_type = DC_TYPE_98713A;
1922			sc->dc_flags |= DC_21143_NWAY;
1923		}
1924		sc->dc_flags |= DC_REDUCED_MII_POLL;
1925		sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1926		break;
1927	case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5):
1928	case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217):
1929		/*
1930		 * Macronix MX98715AEC-C/D/E parts have only a
1931		 * 128-bit hash table. We need to deal with these
1932		 * in the same manner as the PNIC II so that we
1933		 * get the right number of bits out of the
1934		 * CRC routine.
1935		 */
1936		if (revision >= DC_REVISION_98715AEC_C &&
1937		    revision < DC_REVISION_98725)
1938			sc->dc_flags |= DC_128BIT_HASH;
1939		sc->dc_type = DC_TYPE_987x5;
1940		sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1941		sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
1942		break;
1943	case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727):
1944		sc->dc_type = DC_TYPE_987x5;
1945		sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR;
1946		sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
1947		break;
1948	case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115):
1949		sc->dc_type = DC_TYPE_PNICII;
1950		sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH;
1951		sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY;
1952		break;
1953	case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168):
1954		sc->dc_type = DC_TYPE_PNIC;
1955		sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS;
1956		sc->dc_flags |= DC_PNIC_RX_BUG_WAR;
1957		sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT);
1958		if (revision < DC_REVISION_82C169)
1959			sc->dc_pmode = DC_PMODE_SYM;
1960		break;
1961	case DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A):
1962		sc->dc_type = DC_TYPE_ASIX;
1963		sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG;
1964		sc->dc_flags |= DC_REDUCED_MII_POLL;
1965		sc->dc_pmode = DC_PMODE_MII;
1966		break;
1967	case DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201):
1968		sc->dc_type = DC_TYPE_XIRCOM;
1969		sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE |
1970				DC_TX_ALIGN;
1971		/*
1972		 * We don't actually need to coalesce, but we're doing
1973		 * it to obtain a double word aligned buffer.
1974		 * The DC_TX_COALESCE flag is required.
1975		 */
1976		sc->dc_pmode = DC_PMODE_MII;
1977		break;
1978	case DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112):
1979		sc->dc_type = DC_TYPE_CONEXANT;
1980		sc->dc_flags |= DC_TX_INTR_ALWAYS;
1981		sc->dc_flags |= DC_REDUCED_MII_POLL;
1982		sc->dc_pmode = DC_PMODE_MII;
1983		dc_read_srom(sc, sc->dc_romwidth);
1984		break;
1985	default:
1986		device_printf(dev, "unknown device: %x\n",
1987		    sc->dc_info->dc_devid);
1988		break;
1989	}
1990
1991	/* Save the cache line size. */
1992	if (DC_IS_DAVICOM(sc))
1993		sc->dc_cachesize = 0;
1994	else
1995		sc->dc_cachesize = pci_get_cachelnsz(dev);
1996
1997	/* Reset the adapter. */
1998	dc_reset(sc);
1999
2000	/* Take 21143 out of snooze mode */
2001	if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) {
2002		command = pci_read_config(dev, DC_PCI_CFDD, 4);
2003		command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE);
2004		pci_write_config(dev, DC_PCI_CFDD, command, 4);
2005	}
2006
2007	/*
2008	 * Try to learn something about the supported media.
2009	 * We know that ASIX and ADMtek and Davicom devices
2010	 * will *always* be using MII media, so that's a no-brainer.
2011	 * The tricky ones are the Macronix/PNIC II and the
2012	 * Intel 21143.
2013	 */
2014	if (DC_IS_INTEL(sc))
2015		dc_parse_21143_srom(sc);
2016	else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2017		if (sc->dc_type == DC_TYPE_98713)
2018			sc->dc_pmode = DC_PMODE_MII;
2019		else
2020			sc->dc_pmode = DC_PMODE_SYM;
2021	} else if (!sc->dc_pmode)
2022		sc->dc_pmode = DC_PMODE_MII;
2023
2024	/*
2025	 * Get station address from the EEPROM.
2026	 */
2027	switch(sc->dc_type) {
2028	case DC_TYPE_98713:
2029	case DC_TYPE_98713A:
2030	case DC_TYPE_987x5:
2031	case DC_TYPE_PNICII:
2032		dc_read_eeprom(sc, (caddr_t)&mac_offset,
2033		    (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
2034		dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0);
2035		break;
2036	case DC_TYPE_PNIC:
2037		dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1);
2038		break;
2039	case DC_TYPE_DM9102:
2040		dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2041#ifdef __sparc64__
2042		/*
2043		 * If this is an onboard dc(4) the station address read from
2044		 * the EEPROM is all zero and we have to get it from the FCode.
2045		 */
2046		if (eaddr[0] == 0 && (eaddr[1] & ~0xffff) == 0)
2047			OF_getetheraddr(dev, (caddr_t)&eaddr);
2048#endif
2049		break;
2050	case DC_TYPE_21143:
2051	case DC_TYPE_ASIX:
2052		dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2053		break;
2054	case DC_TYPE_AL981:
2055	case DC_TYPE_AN985:
2056		eaddr[0] = CSR_READ_4(sc, DC_AL_PAR0);
2057		eaddr[1] = CSR_READ_4(sc, DC_AL_PAR1);
2058		break;
2059	case DC_TYPE_CONEXANT:
2060		bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr,
2061		    ETHER_ADDR_LEN);
2062		break;
2063	case DC_TYPE_XIRCOM:
2064		/* The MAC comes from the CIS. */
2065		mac = pci_get_ether(dev);
2066		if (!mac) {
2067			device_printf(dev, "No station address in CIS!\n");
2068			error = ENXIO;
2069			goto fail;
2070		}
2071		bcopy(mac, eaddr, ETHER_ADDR_LEN);
2072		break;
2073	default:
2074		dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0);
2075		break;
2076	}
2077
2078	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
2079	error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0,
2080	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2081	    sizeof(struct dc_list_data), 1, sizeof(struct dc_list_data),
2082	    0, NULL, NULL, &sc->dc_ltag);
2083	if (error) {
2084		device_printf(dev, "failed to allocate busdma tag\n");
2085		error = ENXIO;
2086		goto fail;
2087	}
2088	error = bus_dmamem_alloc(sc->dc_ltag, (void **)&sc->dc_ldata,
2089	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->dc_lmap);
2090	if (error) {
2091		device_printf(dev, "failed to allocate DMA safe memory\n");
2092		error = ENXIO;
2093		goto fail;
2094	}
2095	error = bus_dmamap_load(sc->dc_ltag, sc->dc_lmap, sc->dc_ldata,
2096	    sizeof(struct dc_list_data), dc_dma_map_addr, &sc->dc_laddr,
2097	    BUS_DMA_NOWAIT);
2098	if (error) {
2099		device_printf(dev, "cannot get address of the descriptors\n");
2100		error = ENXIO;
2101		goto fail;
2102	}
2103
2104	/*
2105	 * Allocate a busdma tag and DMA safe memory for the multicast
2106	 * setup frame.
2107	 */
2108	error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0,
2109	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2110	    DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, DC_SFRAME_LEN + DC_MIN_FRAMELEN,
2111	    0, NULL, NULL, &sc->dc_stag);
2112	if (error) {
2113		device_printf(dev, "failed to allocate busdma tag\n");
2114		error = ENXIO;
2115		goto fail;
2116	}
2117	error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf,
2118	    BUS_DMA_NOWAIT, &sc->dc_smap);
2119	if (error) {
2120		device_printf(dev, "failed to allocate DMA safe memory\n");
2121		error = ENXIO;
2122		goto fail;
2123	}
2124	error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf,
2125	    DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT);
2126	if (error) {
2127		device_printf(dev, "cannot get address of the descriptors\n");
2128		error = ENXIO;
2129		goto fail;
2130	}
2131
2132	/* Allocate a busdma tag for mbufs. */
2133	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
2134	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2135	    MCLBYTES, DC_TX_LIST_CNT, MCLBYTES,
2136	    0, NULL, NULL, &sc->dc_mtag);
2137	if (error) {
2138		device_printf(dev, "failed to allocate busdma tag\n");
2139		error = ENXIO;
2140		goto fail;
2141	}
2142
2143	/* Create the TX/RX busdma maps. */
2144	for (i = 0; i < DC_TX_LIST_CNT; i++) {
2145		error = bus_dmamap_create(sc->dc_mtag, 0,
2146		    &sc->dc_cdata.dc_tx_map[i]);
2147		if (error) {
2148			device_printf(dev, "failed to init TX ring\n");
2149			error = ENXIO;
2150			goto fail;
2151		}
2152	}
2153	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2154		error = bus_dmamap_create(sc->dc_mtag, 0,
2155		    &sc->dc_cdata.dc_rx_map[i]);
2156		if (error) {
2157			device_printf(dev, "failed to init RX ring\n");
2158			error = ENXIO;
2159			goto fail;
2160		}
2161	}
2162	error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_sparemap);
2163	if (error) {
2164		device_printf(dev, "failed to init RX ring\n");
2165		error = ENXIO;
2166		goto fail;
2167	}
2168
2169	ifp = sc->dc_ifp = if_alloc(IFT_ETHER);
2170	if (ifp == NULL) {
2171		device_printf(dev, "can not if_alloc()\n");
2172		error = ENOSPC;
2173		goto fail;
2174	}
2175	ifp->if_softc = sc;
2176	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2177	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2178	ifp->if_ioctl = dc_ioctl;
2179	ifp->if_start = dc_start;
2180	ifp->if_init = dc_init;
2181	IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1);
2182	ifp->if_snd.ifq_drv_maxlen = DC_TX_LIST_CNT - 1;
2183	IFQ_SET_READY(&ifp->if_snd);
2184
2185	/*
2186	 * Do MII setup. If this is a 21143, check for a PHY on the
2187	 * MII bus after applying any necessary fixups to twiddle the
2188	 * GPIO bits. If we don't end up finding a PHY, restore the
2189	 * old selection (SIA only or SIA/SYM) and attach the dcphy
2190	 * driver instead.
2191	 */
2192	if (DC_IS_INTEL(sc)) {
2193		dc_apply_fixup(sc, IFM_AUTO);
2194		tmp = sc->dc_pmode;
2195		sc->dc_pmode = DC_PMODE_MII;
2196	}
2197
2198	/*
2199	 * Setup General Purpose port mode and data so the tulip can talk
2200	 * to the MII.  This needs to be done before mii_phy_probe so that
2201	 * we can actually see them.
2202	 */
2203	if (DC_IS_XIRCOM(sc)) {
2204		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2205		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2206		DELAY(10);
2207		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2208		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2209		DELAY(10);
2210	}
2211
2212	error = mii_phy_probe(dev, &sc->dc_miibus,
2213	    dc_ifmedia_upd, dc_ifmedia_sts);
2214
2215	if (error && DC_IS_INTEL(sc)) {
2216		sc->dc_pmode = tmp;
2217		if (sc->dc_pmode != DC_PMODE_SIA)
2218			sc->dc_pmode = DC_PMODE_SYM;
2219		sc->dc_flags |= DC_21143_NWAY;
2220		mii_phy_probe(dev, &sc->dc_miibus,
2221		    dc_ifmedia_upd, dc_ifmedia_sts);
2222		/*
2223		 * For non-MII cards, we need to have the 21143
2224		 * drive the LEDs. Except there are some systems
2225		 * like the NEC VersaPro NoteBook PC which have no
2226		 * LEDs, and twiddling these bits has adverse effects
2227		 * on them. (I.e. you suddenly can't get a link.)
2228		 */
2229		if (!(pci_get_subvendor(dev) == 0x1033 &&
2230		    pci_get_subdevice(dev) == 0x8028))
2231			sc->dc_flags |= DC_TULIP_LEDS;
2232		error = 0;
2233	}
2234
2235	if (error) {
2236		device_printf(dev, "MII without any PHY!\n");
2237		goto fail;
2238	}
2239
2240	if (DC_IS_ADMTEK(sc)) {
2241		/*
2242		 * Set automatic TX underrun recovery for the ADMtek chips
2243		 */
2244		DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
2245	}
2246
2247	/*
2248	 * Tell the upper layer(s) we support long frames.
2249	 */
2250	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2251	ifp->if_capabilities |= IFCAP_VLAN_MTU;
2252	ifp->if_capenable = ifp->if_capabilities;
2253#ifdef DEVICE_POLLING
2254	ifp->if_capabilities |= IFCAP_POLLING;
2255#endif
2256
2257	callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0);
2258	callout_init_mtx(&sc->dc_wdog_ch, &sc->dc_mtx, 0);
2259
2260	/*
2261	 * Call MI attach routine.
2262	 */
2263	ether_ifattach(ifp, (caddr_t)eaddr);
2264
2265	/* Hook interrupt last to avoid having to lock softc */
2266	error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | INTR_MPSAFE,
2267	    NULL, dc_intr, sc, &sc->dc_intrhand);
2268
2269	if (error) {
2270		device_printf(dev, "couldn't set up irq\n");
2271		ether_ifdetach(ifp);
2272		goto fail;
2273	}
2274
2275fail:
2276	if (error)
2277		dc_detach(dev);
2278	return (error);
2279}
2280
2281/*
2282 * Shutdown hardware and free up resources. This can be called any
2283 * time after the mutex has been initialized. It is called in both
2284 * the error case in attach and the normal detach case so it needs
2285 * to be careful about only freeing resources that have actually been
2286 * allocated.
2287 */
2288static int
2289dc_detach(device_t dev)
2290{
2291	struct dc_softc *sc;
2292	struct ifnet *ifp;
2293	struct dc_mediainfo *m;
2294	int i;
2295
2296	sc = device_get_softc(dev);
2297	KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized"));
2298
2299	ifp = sc->dc_ifp;
2300
2301#ifdef DEVICE_POLLING
2302	if (ifp->if_capenable & IFCAP_POLLING)
2303		ether_poll_deregister(ifp);
2304#endif
2305
2306	/* These should only be active if attach succeeded */
2307	if (device_is_attached(dev)) {
2308		DC_LOCK(sc);
2309		dc_stop(sc);
2310		DC_UNLOCK(sc);
2311		callout_drain(&sc->dc_stat_ch);
2312		callout_drain(&sc->dc_wdog_ch);
2313		ether_ifdetach(ifp);
2314	}
2315	if (sc->dc_miibus)
2316		device_delete_child(dev, sc->dc_miibus);
2317	bus_generic_detach(dev);
2318
2319	if (sc->dc_intrhand)
2320		bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand);
2321	if (sc->dc_irq)
2322		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq);
2323	if (sc->dc_res)
2324		bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res);
2325
2326	if (ifp)
2327		if_free(ifp);
2328
2329	if (sc->dc_cdata.dc_sbuf != NULL)
2330		bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap);
2331	if (sc->dc_ldata != NULL)
2332		bus_dmamem_free(sc->dc_ltag, sc->dc_ldata, sc->dc_lmap);
2333	if (sc->dc_mtag) {
2334		for (i = 0; i < DC_TX_LIST_CNT; i++)
2335			if (sc->dc_cdata.dc_tx_map[i] != NULL)
2336				bus_dmamap_destroy(sc->dc_mtag,
2337				    sc->dc_cdata.dc_tx_map[i]);
2338		for (i = 0; i < DC_RX_LIST_CNT; i++)
2339			if (sc->dc_cdata.dc_rx_map[i] != NULL)
2340				bus_dmamap_destroy(sc->dc_mtag,
2341				    sc->dc_cdata.dc_rx_map[i]);
2342		bus_dmamap_destroy(sc->dc_mtag, sc->dc_sparemap);
2343	}
2344	if (sc->dc_stag)
2345		bus_dma_tag_destroy(sc->dc_stag);
2346	if (sc->dc_mtag)
2347		bus_dma_tag_destroy(sc->dc_mtag);
2348	if (sc->dc_ltag)
2349		bus_dma_tag_destroy(sc->dc_ltag);
2350
2351	free(sc->dc_pnic_rx_buf, M_DEVBUF);
2352
2353	while (sc->dc_mi != NULL) {
2354		m = sc->dc_mi->dc_next;
2355		free(sc->dc_mi, M_DEVBUF);
2356		sc->dc_mi = m;
2357	}
2358	free(sc->dc_srom, M_DEVBUF);
2359
2360	mtx_destroy(&sc->dc_mtx);
2361
2362	return (0);
2363}
2364
2365/*
2366 * Initialize the transmit descriptors.
2367 */
2368static int
2369dc_list_tx_init(struct dc_softc *sc)
2370{
2371	struct dc_chain_data *cd;
2372	struct dc_list_data *ld;
2373	int i, nexti;
2374
2375	cd = &sc->dc_cdata;
2376	ld = sc->dc_ldata;
2377	for (i = 0; i < DC_TX_LIST_CNT; i++) {
2378		if (i == DC_TX_LIST_CNT - 1)
2379			nexti = 0;
2380		else
2381			nexti = i + 1;
2382		ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti));
2383		cd->dc_tx_chain[i] = NULL;
2384		ld->dc_tx_list[i].dc_data = 0;
2385		ld->dc_tx_list[i].dc_ctl = 0;
2386	}
2387
2388	cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
2389	bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
2390	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2391	return (0);
2392}
2393
2394
2395/*
2396 * Initialize the RX descriptors and allocate mbufs for them. Note that
2397 * we arrange the descriptors in a closed ring, so that the last descriptor
2398 * points back to the first.
2399 */
2400static int
2401dc_list_rx_init(struct dc_softc *sc)
2402{
2403	struct dc_chain_data *cd;
2404	struct dc_list_data *ld;
2405	int i, nexti;
2406
2407	cd = &sc->dc_cdata;
2408	ld = sc->dc_ldata;
2409
2410	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2411		if (dc_newbuf(sc, i, 1) != 0)
2412			return (ENOBUFS);
2413		if (i == DC_RX_LIST_CNT - 1)
2414			nexti = 0;
2415		else
2416			nexti = i + 1;
2417		ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti));
2418	}
2419
2420	cd->dc_rx_prod = 0;
2421	bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
2422	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2423	return (0);
2424}
2425
2426/*
2427 * Initialize an RX descriptor and attach an MBUF cluster.
2428 */
2429static int
2430dc_newbuf(struct dc_softc *sc, int i, int alloc)
2431{
2432	struct mbuf *m_new;
2433	bus_dmamap_t tmp;
2434	bus_dma_segment_t segs[1];
2435	int error, nseg;
2436
2437	if (alloc) {
2438		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2439		if (m_new == NULL)
2440			return (ENOBUFS);
2441	} else {
2442		m_new = sc->dc_cdata.dc_rx_chain[i];
2443		m_new->m_data = m_new->m_ext.ext_buf;
2444	}
2445	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
2446	m_adj(m_new, sizeof(u_int64_t));
2447
2448	/*
2449	 * If this is a PNIC chip, zero the buffer. This is part
2450	 * of the workaround for the receive bug in the 82c168 and
2451	 * 82c169 chips.
2452	 */
2453	if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
2454		bzero(mtod(m_new, char *), m_new->m_len);
2455
2456	/* No need to remap the mbuf if we're reusing it. */
2457	if (alloc) {
2458		error = bus_dmamap_load_mbuf_sg(sc->dc_mtag, sc->dc_sparemap,
2459		    m_new, segs, &nseg, 0);
2460		KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
2461		if (error) {
2462			m_freem(m_new);
2463			return (error);
2464		}
2465		sc->dc_ldata->dc_rx_list[i].dc_data = htole32(segs->ds_addr);
2466		bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]);
2467		tmp = sc->dc_cdata.dc_rx_map[i];
2468		sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap;
2469		sc->dc_sparemap = tmp;
2470		sc->dc_cdata.dc_rx_chain[i] = m_new;
2471	}
2472
2473	sc->dc_ldata->dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN);
2474	sc->dc_ldata->dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN);
2475	bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i],
2476	    BUS_DMASYNC_PREREAD);
2477	bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
2478	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2479	return (0);
2480}
2481
2482/*
2483 * Grrrrr.
2484 * The PNIC chip has a terrible bug in it that manifests itself during
2485 * periods of heavy activity. The exact mode of failure if difficult to
2486 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
2487 * will happen on slow machines. The bug is that sometimes instead of
2488 * uploading one complete frame during reception, it uploads what looks
2489 * like the entire contents of its FIFO memory. The frame we want is at
2490 * the end of the whole mess, but we never know exactly how much data has
2491 * been uploaded, so salvaging the frame is hard.
2492 *
2493 * There is only one way to do it reliably, and it's disgusting.
2494 * Here's what we know:
2495 *
2496 * - We know there will always be somewhere between one and three extra
2497 *   descriptors uploaded.
2498 *
2499 * - We know the desired received frame will always be at the end of the
2500 *   total data upload.
2501 *
2502 * - We know the size of the desired received frame because it will be
2503 *   provided in the length field of the status word in the last descriptor.
2504 *
2505 * Here's what we do:
2506 *
2507 * - When we allocate buffers for the receive ring, we bzero() them.
2508 *   This means that we know that the buffer contents should be all
2509 *   zeros, except for data uploaded by the chip.
2510 *
2511 * - We also force the PNIC chip to upload frames that include the
2512 *   ethernet CRC at the end.
2513 *
2514 * - We gather all of the bogus frame data into a single buffer.
2515 *
2516 * - We then position a pointer at the end of this buffer and scan
2517 *   backwards until we encounter the first non-zero byte of data.
2518 *   This is the end of the received frame. We know we will encounter
2519 *   some data at the end of the frame because the CRC will always be
2520 *   there, so even if the sender transmits a packet of all zeros,
2521 *   we won't be fooled.
2522 *
2523 * - We know the size of the actual received frame, so we subtract
2524 *   that value from the current pointer location. This brings us
2525 *   to the start of the actual received packet.
2526 *
2527 * - We copy this into an mbuf and pass it on, along with the actual
2528 *   frame length.
2529 *
2530 * The performance hit is tremendous, but it beats dropping frames all
2531 * the time.
2532 */
2533
2534#define DC_WHOLEFRAME	(DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG)
2535static void
2536dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
2537{
2538	struct dc_desc *cur_rx;
2539	struct dc_desc *c = NULL;
2540	struct mbuf *m = NULL;
2541	unsigned char *ptr;
2542	int i, total_len;
2543	u_int32_t rxstat = 0;
2544
2545	i = sc->dc_pnic_rx_bug_save;
2546	cur_rx = &sc->dc_ldata->dc_rx_list[idx];
2547	ptr = sc->dc_pnic_rx_buf;
2548	bzero(ptr, DC_RXLEN * 5);
2549
2550	/* Copy all the bytes from the bogus buffers. */
2551	while (1) {
2552		c = &sc->dc_ldata->dc_rx_list[i];
2553		rxstat = le32toh(c->dc_status);
2554		m = sc->dc_cdata.dc_rx_chain[i];
2555		bcopy(mtod(m, char *), ptr, DC_RXLEN);
2556		ptr += DC_RXLEN;
2557		/* If this is the last buffer, break out. */
2558		if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
2559			break;
2560		dc_newbuf(sc, i, 0);
2561		DC_INC(i, DC_RX_LIST_CNT);
2562	}
2563
2564	/* Find the length of the actual receive frame. */
2565	total_len = DC_RXBYTES(rxstat);
2566
2567	/* Scan backwards until we hit a non-zero byte. */
2568	while (*ptr == 0x00)
2569		ptr--;
2570
2571	/* Round off. */
2572	if ((uintptr_t)(ptr) & 0x3)
2573		ptr -= 1;
2574
2575	/* Now find the start of the frame. */
2576	ptr -= total_len;
2577	if (ptr < sc->dc_pnic_rx_buf)
2578		ptr = sc->dc_pnic_rx_buf;
2579
2580	/*
2581	 * Now copy the salvaged frame to the last mbuf and fake up
2582	 * the status word to make it look like a successful
2583	 * frame reception.
2584	 */
2585	dc_newbuf(sc, i, 0);
2586	bcopy(ptr, mtod(m, char *), total_len);
2587	cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2588}
2589
2590/*
2591 * This routine searches the RX ring for dirty descriptors in the
2592 * event that the rxeof routine falls out of sync with the chip's
2593 * current descriptor pointer. This may happen sometimes as a result
2594 * of a "no RX buffer available" condition that happens when the chip
2595 * consumes all of the RX buffers before the driver has a chance to
2596 * process the RX ring. This routine may need to be called more than
2597 * once to bring the driver back in sync with the chip, however we
2598 * should still be getting RX DONE interrupts to drive the search
2599 * for new packets in the RX ring, so we should catch up eventually.
2600 */
2601static int
2602dc_rx_resync(struct dc_softc *sc)
2603{
2604	struct dc_desc *cur_rx;
2605	int i, pos;
2606
2607	pos = sc->dc_cdata.dc_rx_prod;
2608
2609	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2610		cur_rx = &sc->dc_ldata->dc_rx_list[pos];
2611		if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN))
2612			break;
2613		DC_INC(pos, DC_RX_LIST_CNT);
2614	}
2615
2616	/* If the ring really is empty, then just return. */
2617	if (i == DC_RX_LIST_CNT)
2618		return (0);
2619
2620	/* We've fallen behing the chip: catch it. */
2621	sc->dc_cdata.dc_rx_prod = pos;
2622
2623	return (EAGAIN);
2624}
2625
2626/*
2627 * A frame has been uploaded: pass the resulting mbuf chain up to
2628 * the higher level protocols.
2629 */
2630static void
2631dc_rxeof(struct dc_softc *sc)
2632{
2633	struct mbuf *m, *m0;
2634	struct ifnet *ifp;
2635	struct dc_desc *cur_rx;
2636	int i, total_len = 0;
2637	u_int32_t rxstat;
2638
2639	DC_LOCK_ASSERT(sc);
2640
2641	ifp = sc->dc_ifp;
2642	i = sc->dc_cdata.dc_rx_prod;
2643
2644	bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD);
2645	while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) &
2646	    DC_RXSTAT_OWN)) {
2647#ifdef DEVICE_POLLING
2648		if (ifp->if_capenable & IFCAP_POLLING) {
2649			if (sc->rxcycles <= 0)
2650				break;
2651			sc->rxcycles--;
2652		}
2653#endif
2654		cur_rx = &sc->dc_ldata->dc_rx_list[i];
2655		rxstat = le32toh(cur_rx->dc_status);
2656		m = sc->dc_cdata.dc_rx_chain[i];
2657		bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i],
2658		    BUS_DMASYNC_POSTREAD);
2659		total_len = DC_RXBYTES(rxstat);
2660
2661		if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2662			if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2663				if (rxstat & DC_RXSTAT_FIRSTFRAG)
2664					sc->dc_pnic_rx_bug_save = i;
2665				if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2666					DC_INC(i, DC_RX_LIST_CNT);
2667					continue;
2668				}
2669				dc_pnic_rx_bug_war(sc, i);
2670				rxstat = le32toh(cur_rx->dc_status);
2671				total_len = DC_RXBYTES(rxstat);
2672			}
2673		}
2674
2675		/*
2676		 * If an error occurs, update stats, clear the
2677		 * status word and leave the mbuf cluster in place:
2678		 * it should simply get re-used next time this descriptor
2679		 * comes up in the ring.  However, don't report long
2680		 * frames as errors since they could be vlans.
2681		 */
2682		if ((rxstat & DC_RXSTAT_RXERR)) {
2683			if (!(rxstat & DC_RXSTAT_GIANT) ||
2684			    (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2685				       DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2686				       DC_RXSTAT_RUNT   | DC_RXSTAT_DE))) {
2687				ifp->if_ierrors++;
2688				if (rxstat & DC_RXSTAT_COLLSEEN)
2689					ifp->if_collisions++;
2690				dc_newbuf(sc, i, 0);
2691				if (rxstat & DC_RXSTAT_CRCERR) {
2692					DC_INC(i, DC_RX_LIST_CNT);
2693					continue;
2694				} else {
2695					dc_init_locked(sc);
2696					return;
2697				}
2698			}
2699		}
2700
2701		/* No errors; receive the packet. */
2702		total_len -= ETHER_CRC_LEN;
2703#ifdef __NO_STRICT_ALIGNMENT
2704		/*
2705		 * On architectures without alignment problems we try to
2706		 * allocate a new buffer for the receive ring, and pass up
2707		 * the one where the packet is already, saving the expensive
2708		 * copy done in m_devget().
2709		 * If we are on an architecture with alignment problems, or
2710		 * if the allocation fails, then use m_devget and leave the
2711		 * existing buffer in the receive ring.
2712		 */
2713		if (dc_newbuf(sc, i, 1) == 0) {
2714			m->m_pkthdr.rcvif = ifp;
2715			m->m_pkthdr.len = m->m_len = total_len;
2716			DC_INC(i, DC_RX_LIST_CNT);
2717		} else
2718#endif
2719		{
2720			m0 = m_devget(mtod(m, char *), total_len,
2721				ETHER_ALIGN, ifp, NULL);
2722			dc_newbuf(sc, i, 0);
2723			DC_INC(i, DC_RX_LIST_CNT);
2724			if (m0 == NULL) {
2725				ifp->if_ierrors++;
2726				continue;
2727			}
2728			m = m0;
2729		}
2730
2731		ifp->if_ipackets++;
2732		DC_UNLOCK(sc);
2733		(*ifp->if_input)(ifp, m);
2734		DC_LOCK(sc);
2735	}
2736
2737	sc->dc_cdata.dc_rx_prod = i;
2738}
2739
2740/*
2741 * A frame was downloaded to the chip. It's safe for us to clean up
2742 * the list buffers.
2743 */
2744
2745static void
2746dc_txeof(struct dc_softc *sc)
2747{
2748	struct dc_desc *cur_tx = NULL;
2749	struct ifnet *ifp;
2750	int idx;
2751	u_int32_t ctl, txstat;
2752
2753	ifp = sc->dc_ifp;
2754
2755	/*
2756	 * Go through our tx list and free mbufs for those
2757	 * frames that have been transmitted.
2758	 */
2759	bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD);
2760	idx = sc->dc_cdata.dc_tx_cons;
2761	while (idx != sc->dc_cdata.dc_tx_prod) {
2762
2763		cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2764		txstat = le32toh(cur_tx->dc_status);
2765		ctl = le32toh(cur_tx->dc_ctl);
2766
2767		if (txstat & DC_TXSTAT_OWN)
2768			break;
2769
2770		if (!(ctl & DC_TXCTL_LASTFRAG) || ctl & DC_TXCTL_SETUP) {
2771			if (ctl & DC_TXCTL_SETUP) {
2772				/*
2773				 * Yes, the PNIC is so brain damaged
2774				 * that it will sometimes generate a TX
2775				 * underrun error while DMAing the RX
2776				 * filter setup frame. If we detect this,
2777				 * we have to send the setup frame again,
2778				 * or else the filter won't be programmed
2779				 * correctly.
2780				 */
2781				if (DC_IS_PNIC(sc)) {
2782					if (txstat & DC_TXSTAT_ERRSUM)
2783						dc_setfilt(sc);
2784				}
2785				sc->dc_cdata.dc_tx_chain[idx] = NULL;
2786			}
2787			sc->dc_cdata.dc_tx_cnt--;
2788			DC_INC(idx, DC_TX_LIST_CNT);
2789			continue;
2790		}
2791
2792		if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2793			/*
2794			 * XXX: Why does my Xircom taunt me so?
2795			 * For some reason it likes setting the CARRLOST flag
2796			 * even when the carrier is there. wtf?!?
2797			 * Who knows, but Conexant chips have the
2798			 * same problem. Maybe they took lessons
2799			 * from Xircom.
2800			 */
2801			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2802			    sc->dc_pmode == DC_PMODE_MII &&
2803			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM |
2804			    DC_TXSTAT_NOCARRIER)))
2805				txstat &= ~DC_TXSTAT_ERRSUM;
2806		} else {
2807			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2808			    sc->dc_pmode == DC_PMODE_MII &&
2809			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM |
2810			    DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST)))
2811				txstat &= ~DC_TXSTAT_ERRSUM;
2812		}
2813
2814		if (txstat & DC_TXSTAT_ERRSUM) {
2815			ifp->if_oerrors++;
2816			if (txstat & DC_TXSTAT_EXCESSCOLL)
2817				ifp->if_collisions++;
2818			if (txstat & DC_TXSTAT_LATECOLL)
2819				ifp->if_collisions++;
2820			if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2821				dc_init_locked(sc);
2822				return;
2823			}
2824		}
2825
2826		ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2827
2828		ifp->if_opackets++;
2829		if (sc->dc_cdata.dc_tx_chain[idx] != NULL) {
2830			bus_dmamap_sync(sc->dc_mtag,
2831			    sc->dc_cdata.dc_tx_map[idx],
2832			    BUS_DMASYNC_POSTWRITE);
2833			bus_dmamap_unload(sc->dc_mtag,
2834			    sc->dc_cdata.dc_tx_map[idx]);
2835			m_freem(sc->dc_cdata.dc_tx_chain[idx]);
2836			sc->dc_cdata.dc_tx_chain[idx] = NULL;
2837		}
2838
2839		sc->dc_cdata.dc_tx_cnt--;
2840		DC_INC(idx, DC_TX_LIST_CNT);
2841	}
2842	sc->dc_cdata.dc_tx_cons = idx;
2843
2844	if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > DC_TX_LIST_RSVD)
2845		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2846
2847	if (sc->dc_cdata.dc_tx_cnt == 0)
2848		sc->dc_wdog_timer = 0;
2849}
2850
2851static void
2852dc_tick(void *xsc)
2853{
2854	struct dc_softc *sc;
2855	struct mii_data *mii;
2856	struct ifnet *ifp;
2857	u_int32_t r;
2858
2859	sc = xsc;
2860	DC_LOCK_ASSERT(sc);
2861	ifp = sc->dc_ifp;
2862	mii = device_get_softc(sc->dc_miibus);
2863
2864	if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2865		if (sc->dc_flags & DC_21143_NWAY) {
2866			r = CSR_READ_4(sc, DC_10BTSTAT);
2867			if (IFM_SUBTYPE(mii->mii_media_active) ==
2868			    IFM_100_TX && (r & DC_TSTAT_LS100)) {
2869				sc->dc_link = 0;
2870				mii_mediachg(mii);
2871			}
2872			if (IFM_SUBTYPE(mii->mii_media_active) ==
2873			    IFM_10_T && (r & DC_TSTAT_LS10)) {
2874				sc->dc_link = 0;
2875				mii_mediachg(mii);
2876			}
2877			if (sc->dc_link == 0)
2878				mii_tick(mii);
2879		} else {
2880			r = CSR_READ_4(sc, DC_ISR);
2881			if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT &&
2882			    sc->dc_cdata.dc_tx_cnt == 0) {
2883				mii_tick(mii);
2884				if (!(mii->mii_media_status & IFM_ACTIVE))
2885					sc->dc_link = 0;
2886			}
2887		}
2888	} else
2889		mii_tick(mii);
2890
2891	/*
2892	 * When the init routine completes, we expect to be able to send
2893	 * packets right away, and in fact the network code will send a
2894	 * gratuitous ARP the moment the init routine marks the interface
2895	 * as running. However, even though the MAC may have been initialized,
2896	 * there may be a delay of a few seconds before the PHY completes
2897	 * autonegotiation and the link is brought up. Any transmissions
2898	 * made during that delay will be lost. Dealing with this is tricky:
2899	 * we can't just pause in the init routine while waiting for the
2900	 * PHY to come ready since that would bring the whole system to
2901	 * a screeching halt for several seconds.
2902	 *
2903	 * What we do here is prevent the TX start routine from sending
2904	 * any packets until a link has been established. After the
2905	 * interface has been initialized, the tick routine will poll
2906	 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2907	 * that time, packets will stay in the send queue, and once the
2908	 * link comes up, they will be flushed out to the wire.
2909	 */
2910	if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE &&
2911	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2912		sc->dc_link++;
2913		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2914			dc_start_locked(ifp);
2915	}
2916
2917	if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2918		callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc);
2919	else
2920		callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc);
2921}
2922
2923/*
2924 * A transmit underrun has occurred.  Back off the transmit threshold,
2925 * or switch to store and forward mode if we have to.
2926 */
2927static void
2928dc_tx_underrun(struct dc_softc *sc)
2929{
2930	u_int32_t isr;
2931	int i;
2932
2933	if (DC_IS_DAVICOM(sc))
2934		dc_init_locked(sc);
2935
2936	if (DC_IS_INTEL(sc)) {
2937		/*
2938		 * The real 21143 requires that the transmitter be idle
2939		 * in order to change the transmit threshold or store
2940		 * and forward state.
2941		 */
2942		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2943
2944		for (i = 0; i < DC_TIMEOUT; i++) {
2945			isr = CSR_READ_4(sc, DC_ISR);
2946			if (isr & DC_ISR_TX_IDLE)
2947				break;
2948			DELAY(10);
2949		}
2950		if (i == DC_TIMEOUT) {
2951			device_printf(sc->dc_dev,
2952			    "%s: failed to force tx to idle state\n",
2953			    __func__);
2954			dc_init_locked(sc);
2955		}
2956	}
2957
2958	device_printf(sc->dc_dev, "TX underrun -- ");
2959	sc->dc_txthresh += DC_TXTHRESH_INC;
2960	if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2961		printf("using store and forward mode\n");
2962		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2963	} else {
2964		printf("increasing TX threshold\n");
2965		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2966		DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2967	}
2968
2969	if (DC_IS_INTEL(sc))
2970		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2971}
2972
2973#ifdef DEVICE_POLLING
2974static poll_handler_t dc_poll;
2975
2976static void
2977dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2978{
2979	struct dc_softc *sc = ifp->if_softc;
2980
2981	DC_LOCK(sc);
2982
2983	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2984		DC_UNLOCK(sc);
2985		return;
2986	}
2987
2988	sc->rxcycles = count;
2989	dc_rxeof(sc);
2990	dc_txeof(sc);
2991	if (!IFQ_IS_EMPTY(&ifp->if_snd) &&
2992	    !(ifp->if_drv_flags & IFF_DRV_OACTIVE))
2993		dc_start_locked(ifp);
2994
2995	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2996		u_int32_t	status;
2997
2998		status = CSR_READ_4(sc, DC_ISR);
2999		status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF |
3000			DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN |
3001			DC_ISR_BUS_ERR);
3002		if (!status) {
3003			DC_UNLOCK(sc);
3004			return;
3005		}
3006		/* ack what we have */
3007		CSR_WRITE_4(sc, DC_ISR, status);
3008
3009		if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) {
3010			u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED);
3011			ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff);
3012
3013			if (dc_rx_resync(sc))
3014				dc_rxeof(sc);
3015		}
3016		/* restart transmit unit if necessary */
3017		if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt)
3018			CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3019
3020		if (status & DC_ISR_TX_UNDERRUN)
3021			dc_tx_underrun(sc);
3022
3023		if (status & DC_ISR_BUS_ERR) {
3024			if_printf(ifp, "%s: bus error\n", __func__);
3025			dc_reset(sc);
3026			dc_init_locked(sc);
3027		}
3028	}
3029	DC_UNLOCK(sc);
3030}
3031#endif /* DEVICE_POLLING */
3032
3033static void
3034dc_intr(void *arg)
3035{
3036	struct dc_softc *sc;
3037	struct ifnet *ifp;
3038	u_int32_t status;
3039
3040	sc = arg;
3041
3042	if (sc->suspended)
3043		return;
3044
3045	if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0)
3046		return;
3047
3048	DC_LOCK(sc);
3049	ifp = sc->dc_ifp;
3050#ifdef DEVICE_POLLING
3051	if (ifp->if_capenable & IFCAP_POLLING) {
3052		DC_UNLOCK(sc);
3053		return;
3054	}
3055#endif
3056
3057	/* Suppress unwanted interrupts */
3058	if (!(ifp->if_flags & IFF_UP)) {
3059		if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
3060			dc_stop(sc);
3061		DC_UNLOCK(sc);
3062		return;
3063	}
3064
3065	/* Disable interrupts. */
3066	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3067
3068	while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
3069	    status != 0xFFFFFFFF &&
3070	    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3071
3072		CSR_WRITE_4(sc, DC_ISR, status);
3073
3074		if (status & DC_ISR_RX_OK) {
3075			int		curpkts;
3076			curpkts = ifp->if_ipackets;
3077			dc_rxeof(sc);
3078			if (curpkts == ifp->if_ipackets) {
3079				while (dc_rx_resync(sc))
3080					dc_rxeof(sc);
3081			}
3082		}
3083
3084		if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF))
3085			dc_txeof(sc);
3086
3087		if (status & DC_ISR_TX_IDLE) {
3088			dc_txeof(sc);
3089			if (sc->dc_cdata.dc_tx_cnt) {
3090				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3091				CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3092			}
3093		}
3094
3095		if (status & DC_ISR_TX_UNDERRUN)
3096			dc_tx_underrun(sc);
3097
3098		if ((status & DC_ISR_RX_WATDOGTIMEO)
3099		    || (status & DC_ISR_RX_NOBUF)) {
3100			int		curpkts;
3101			curpkts = ifp->if_ipackets;
3102			dc_rxeof(sc);
3103			if (curpkts == ifp->if_ipackets) {
3104				while (dc_rx_resync(sc))
3105					dc_rxeof(sc);
3106			}
3107		}
3108
3109		if (status & DC_ISR_BUS_ERR) {
3110			dc_reset(sc);
3111			dc_init_locked(sc);
3112		}
3113	}
3114
3115	/* Re-enable interrupts. */
3116	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3117
3118	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3119		dc_start_locked(ifp);
3120
3121	DC_UNLOCK(sc);
3122}
3123
3124static void
3125dc_dma_map_txbuf(arg, segs, nseg, mapsize, error)
3126	void *arg;
3127	bus_dma_segment_t *segs;
3128	int nseg;
3129	bus_size_t mapsize;
3130	int error;
3131{
3132	struct dc_softc *sc;
3133	struct dc_desc *f;
3134	int cur, first, frag, i;
3135
3136	sc = arg;
3137	if (error)
3138		return;
3139
3140	first = cur = frag = sc->dc_cdata.dc_tx_prod;
3141	for (i = 0; i < nseg; i++) {
3142		if ((sc->dc_flags & DC_TX_ADMTEK_WAR) &&
3143		    (frag == (DC_TX_LIST_CNT - 1)) &&
3144		    (first != sc->dc_cdata.dc_tx_first)) {
3145			bus_dmamap_unload(sc->dc_mtag,
3146			    sc->dc_cdata.dc_tx_map[first]);
3147			sc->dc_cdata.dc_tx_err = ENOBUFS;
3148			return;
3149		}
3150
3151		f = &sc->dc_ldata->dc_tx_list[frag];
3152		f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len);
3153		if (i == 0) {
3154			f->dc_status = 0;
3155			f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
3156		} else
3157			f->dc_status = htole32(DC_TXSTAT_OWN);
3158		f->dc_data = htole32(segs[i].ds_addr);
3159		cur = frag;
3160		DC_INC(frag, DC_TX_LIST_CNT);
3161	}
3162
3163	sc->dc_cdata.dc_tx_err = 0;
3164	sc->dc_cdata.dc_tx_prod = frag;
3165	sc->dc_cdata.dc_tx_cnt += nseg;
3166	sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
3167	sc->dc_cdata.dc_tx_chain[cur] = sc->dc_cdata.dc_tx_mapping;
3168	if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
3169		sc->dc_ldata->dc_tx_list[first].dc_ctl |=
3170		    htole32(DC_TXCTL_FINT);
3171	if (sc->dc_flags & DC_TX_INTR_ALWAYS)
3172		sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT);
3173	if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
3174		sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT);
3175	sc->dc_ldata->dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN);
3176}
3177
3178/*
3179 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
3180 * pointers to the fragment pointers.
3181 */
3182static int
3183dc_encap(struct dc_softc *sc, struct mbuf **m_head)
3184{
3185	struct mbuf *m;
3186	int error, idx, chainlen = 0;
3187
3188	/*
3189	 * If there's no way we can send any packets, return now.
3190	 */
3191	if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt <= DC_TX_LIST_RSVD)
3192		return (ENOBUFS);
3193
3194	/*
3195	 * Count the number of frags in this chain to see if
3196	 * we need to m_defrag.  Since the descriptor list is shared
3197	 * by all packets, we'll m_defrag long chains so that they
3198	 * do not use up the entire list, even if they would fit.
3199	 */
3200	for (m = *m_head; m != NULL; m = m->m_next)
3201		chainlen++;
3202
3203	m = NULL;
3204	if ((sc->dc_flags & DC_TX_COALESCE && ((*m_head)->m_next != NULL ||
3205	    sc->dc_flags & DC_TX_ALIGN)) || (chainlen > DC_TX_LIST_CNT / 4) ||
3206	    (DC_TX_LIST_CNT - (chainlen + sc->dc_cdata.dc_tx_cnt) <=
3207	    DC_TX_LIST_RSVD)) {
3208		m = m_defrag(*m_head, M_DONTWAIT);
3209		if (m == NULL) {
3210			m_freem(*m_head);
3211			*m_head = NULL;
3212			return (ENOBUFS);
3213		}
3214		*m_head = m;
3215	}
3216	idx = sc->dc_cdata.dc_tx_prod;
3217	sc->dc_cdata.dc_tx_mapping = *m_head;
3218	error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx],
3219	    *m_head, dc_dma_map_txbuf, sc, 0);
3220	if (error != 0 || sc->dc_cdata.dc_tx_err != 0) {
3221		if (m != NULL) {
3222			m_freem(m);
3223			*m_head = NULL;
3224		}
3225		return (error != 0 ? error : sc->dc_cdata.dc_tx_err);
3226	}
3227	bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx],
3228	    BUS_DMASYNC_PREWRITE);
3229	bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap,
3230	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3231	return (0);
3232}
3233
3234/*
3235 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3236 * to the mbuf data regions directly in the transmit lists. We also save a
3237 * copy of the pointers since the transmit list fragment pointers are
3238 * physical addresses.
3239 */
3240
3241static void
3242dc_start(struct ifnet *ifp)
3243{
3244	struct dc_softc *sc;
3245
3246	sc = ifp->if_softc;
3247	DC_LOCK(sc);
3248	dc_start_locked(ifp);
3249	DC_UNLOCK(sc);
3250}
3251
3252static void
3253dc_start_locked(struct ifnet *ifp)
3254{
3255	struct dc_softc *sc;
3256	struct mbuf *m_head = NULL;
3257	unsigned int queued = 0;
3258	int idx;
3259
3260	sc = ifp->if_softc;
3261
3262	DC_LOCK_ASSERT(sc);
3263
3264	if (!sc->dc_link && ifp->if_snd.ifq_len < 10)
3265		return;
3266
3267	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
3268		return;
3269
3270	idx = sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod;
3271
3272	while (sc->dc_cdata.dc_tx_chain[idx] == NULL) {
3273		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
3274		if (m_head == NULL)
3275			break;
3276
3277		if (dc_encap(sc, &m_head)) {
3278			if (m_head == NULL)
3279				break;
3280			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
3281			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3282			break;
3283		}
3284		idx = sc->dc_cdata.dc_tx_prod;
3285
3286		queued++;
3287		/*
3288		 * If there's a BPF listener, bounce a copy of this frame
3289		 * to him.
3290		 */
3291		BPF_MTAP(ifp, m_head);
3292
3293		if (sc->dc_flags & DC_TX_ONE) {
3294			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3295			break;
3296		}
3297	}
3298
3299	if (queued > 0) {
3300		/* Transmit */
3301		if (!(sc->dc_flags & DC_TX_POLL))
3302			CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
3303
3304		/*
3305		 * Set a timeout in case the chip goes out to lunch.
3306		 */
3307		sc->dc_wdog_timer = 5;
3308	}
3309}
3310
3311static void
3312dc_init(void *xsc)
3313{
3314	struct dc_softc *sc = xsc;
3315
3316	DC_LOCK(sc);
3317	dc_init_locked(sc);
3318	DC_UNLOCK(sc);
3319}
3320
3321static void
3322dc_init_locked(struct dc_softc *sc)
3323{
3324	struct ifnet *ifp = sc->dc_ifp;
3325	struct mii_data *mii;
3326
3327	DC_LOCK_ASSERT(sc);
3328
3329	mii = device_get_softc(sc->dc_miibus);
3330
3331	/*
3332	 * Cancel pending I/O and free all RX/TX buffers.
3333	 */
3334	dc_stop(sc);
3335	dc_reset(sc);
3336
3337	/*
3338	 * Set cache alignment and burst length.
3339	 */
3340	if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
3341		CSR_WRITE_4(sc, DC_BUSCTL, 0);
3342	else
3343		CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE);
3344	/*
3345	 * Evenly share the bus between receive and transmit process.
3346	 */
3347	if (DC_IS_INTEL(sc))
3348		DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
3349	if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
3350		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
3351	} else {
3352		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
3353	}
3354	if (sc->dc_flags & DC_TX_POLL)
3355		DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
3356	switch(sc->dc_cachesize) {
3357	case 32:
3358		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
3359		break;
3360	case 16:
3361		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
3362		break;
3363	case 8:
3364		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
3365		break;
3366	case 0:
3367	default:
3368		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
3369		break;
3370	}
3371
3372	if (sc->dc_flags & DC_TX_STORENFWD)
3373		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3374	else {
3375		if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
3376			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3377		} else {
3378			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
3379			DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
3380		}
3381	}
3382
3383	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
3384	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
3385
3386	if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
3387		/*
3388		 * The app notes for the 98713 and 98715A say that
3389		 * in order to have the chips operate properly, a magic
3390		 * number must be written to CSR16. Macronix does not
3391		 * document the meaning of these bits so there's no way
3392		 * to know exactly what they do. The 98713 has a magic
3393		 * number all its own; the rest all use a different one.
3394		 */
3395		DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
3396		if (sc->dc_type == DC_TYPE_98713)
3397			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
3398		else
3399			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
3400	}
3401
3402	if (DC_IS_XIRCOM(sc)) {
3403		/*
3404		 * setup General Purpose Port mode and data so the tulip
3405		 * can talk to the MII.
3406		 */
3407		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
3408			   DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
3409		DELAY(10);
3410		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
3411			   DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
3412		DELAY(10);
3413	}
3414
3415	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
3416	DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
3417
3418	/* Init circular RX list. */
3419	if (dc_list_rx_init(sc) == ENOBUFS) {
3420		device_printf(sc->dc_dev,
3421		    "initialization failed: no memory for rx buffers\n");
3422		dc_stop(sc);
3423		return;
3424	}
3425
3426	/*
3427	 * Init TX descriptors.
3428	 */
3429	dc_list_tx_init(sc);
3430
3431	/*
3432	 * Load the address of the RX list.
3433	 */
3434	CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0));
3435	CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0));
3436
3437	/*
3438	 * Enable interrupts.
3439	 */
3440#ifdef DEVICE_POLLING
3441	/*
3442	 * ... but only if we are not polling, and make sure they are off in
3443	 * the case of polling. Some cards (e.g. fxp) turn interrupts on
3444	 * after a reset.
3445	 */
3446	if (ifp->if_capenable & IFCAP_POLLING)
3447		CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3448	else
3449#endif
3450	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3451	CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
3452
3453	/* Enable transmitter. */
3454	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
3455
3456	/*
3457	 * If this is an Intel 21143 and we're not using the
3458	 * MII port, program the LED control pins so we get
3459	 * link and activity indications.
3460	 */
3461	if (sc->dc_flags & DC_TULIP_LEDS) {
3462		CSR_WRITE_4(sc, DC_WATCHDOG,
3463		    DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY);
3464		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
3465	}
3466
3467	/*
3468	 * Load the RX/multicast filter. We do this sort of late
3469	 * because the filter programming scheme on the 21143 and
3470	 * some clones requires DMAing a setup frame via the TX
3471	 * engine, and we need the transmitter enabled for that.
3472	 */
3473	dc_setfilt(sc);
3474
3475	/* Enable receiver. */
3476	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
3477	CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
3478
3479	mii_mediachg(mii);
3480	dc_setcfg(sc, sc->dc_if_media);
3481
3482	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3483	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3484
3485	/* Don't start the ticker if this is a homePNA link. */
3486	if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
3487		sc->dc_link = 1;
3488	else {
3489		if (sc->dc_flags & DC_21143_NWAY)
3490			callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc);
3491		else
3492			callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc);
3493	}
3494
3495	sc->dc_wdog_timer = 0;
3496	callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc);
3497}
3498
3499/*
3500 * Set media options.
3501 */
3502static int
3503dc_ifmedia_upd(struct ifnet *ifp)
3504{
3505	struct dc_softc *sc;
3506	struct mii_data *mii;
3507	struct ifmedia *ifm;
3508
3509	sc = ifp->if_softc;
3510	mii = device_get_softc(sc->dc_miibus);
3511	DC_LOCK(sc);
3512	mii_mediachg(mii);
3513	ifm = &mii->mii_media;
3514
3515	if (DC_IS_DAVICOM(sc) &&
3516	    IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
3517		dc_setcfg(sc, ifm->ifm_media);
3518	else
3519		sc->dc_link = 0;
3520	DC_UNLOCK(sc);
3521
3522	return (0);
3523}
3524
3525/*
3526 * Report current media status.
3527 */
3528static void
3529dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3530{
3531	struct dc_softc *sc;
3532	struct mii_data *mii;
3533	struct ifmedia *ifm;
3534
3535	sc = ifp->if_softc;
3536	mii = device_get_softc(sc->dc_miibus);
3537	DC_LOCK(sc);
3538	mii_pollstat(mii);
3539	ifm = &mii->mii_media;
3540	if (DC_IS_DAVICOM(sc)) {
3541		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
3542			ifmr->ifm_active = ifm->ifm_media;
3543			ifmr->ifm_status = 0;
3544			DC_UNLOCK(sc);
3545			return;
3546		}
3547	}
3548	ifmr->ifm_active = mii->mii_media_active;
3549	ifmr->ifm_status = mii->mii_media_status;
3550	DC_UNLOCK(sc);
3551}
3552
3553static int
3554dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3555{
3556	struct dc_softc *sc = ifp->if_softc;
3557	struct ifreq *ifr = (struct ifreq *)data;
3558	struct mii_data *mii;
3559	int error = 0;
3560
3561	switch (command) {
3562	case SIOCSIFFLAGS:
3563		DC_LOCK(sc);
3564		if (ifp->if_flags & IFF_UP) {
3565			int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) &
3566				(IFF_PROMISC | IFF_ALLMULTI);
3567
3568			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3569				if (need_setfilt)
3570					dc_setfilt(sc);
3571			} else {
3572				sc->dc_txthresh = 0;
3573				dc_init_locked(sc);
3574			}
3575		} else {
3576			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3577				dc_stop(sc);
3578		}
3579		sc->dc_if_flags = ifp->if_flags;
3580		DC_UNLOCK(sc);
3581		error = 0;
3582		break;
3583	case SIOCADDMULTI:
3584	case SIOCDELMULTI:
3585		DC_LOCK(sc);
3586		dc_setfilt(sc);
3587		DC_UNLOCK(sc);
3588		error = 0;
3589		break;
3590	case SIOCGIFMEDIA:
3591	case SIOCSIFMEDIA:
3592		mii = device_get_softc(sc->dc_miibus);
3593		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3594		break;
3595	case SIOCSIFCAP:
3596#ifdef DEVICE_POLLING
3597		if (ifr->ifr_reqcap & IFCAP_POLLING &&
3598		    !(ifp->if_capenable & IFCAP_POLLING)) {
3599			error = ether_poll_register(dc_poll, ifp);
3600			if (error)
3601				return(error);
3602			DC_LOCK(sc);
3603			/* Disable interrupts */
3604			CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3605			ifp->if_capenable |= IFCAP_POLLING;
3606			DC_UNLOCK(sc);
3607			return (error);
3608
3609		}
3610		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
3611		    ifp->if_capenable & IFCAP_POLLING) {
3612			error = ether_poll_deregister(ifp);
3613			/* Enable interrupts. */
3614			DC_LOCK(sc);
3615			CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
3616			ifp->if_capenable &= ~IFCAP_POLLING;
3617			DC_UNLOCK(sc);
3618			return (error);
3619		}
3620#endif /* DEVICE_POLLING */
3621		break;
3622	default:
3623		error = ether_ioctl(ifp, command, data);
3624		break;
3625	}
3626
3627	return (error);
3628}
3629
3630static void
3631dc_watchdog(void *xsc)
3632{
3633	struct dc_softc *sc = xsc;
3634	struct ifnet *ifp;
3635
3636	DC_LOCK_ASSERT(sc);
3637
3638	if (sc->dc_wdog_timer == 0 || --sc->dc_wdog_timer != 0) {
3639		callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc);
3640		return;
3641	}
3642
3643	ifp = sc->dc_ifp;
3644	ifp->if_oerrors++;
3645	device_printf(sc->dc_dev, "watchdog timeout\n");
3646
3647	dc_stop(sc);
3648	dc_reset(sc);
3649	dc_init_locked(sc);
3650
3651	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3652		dc_start_locked(ifp);
3653}
3654
3655/*
3656 * Stop the adapter and free any mbufs allocated to the
3657 * RX and TX lists.
3658 */
3659static void
3660dc_stop(struct dc_softc *sc)
3661{
3662	struct ifnet *ifp;
3663	struct dc_list_data *ld;
3664	struct dc_chain_data *cd;
3665	int i;
3666	u_int32_t ctl;
3667
3668	DC_LOCK_ASSERT(sc);
3669
3670	ifp = sc->dc_ifp;
3671	ld = sc->dc_ldata;
3672	cd = &sc->dc_cdata;
3673
3674	callout_stop(&sc->dc_stat_ch);
3675	callout_stop(&sc->dc_wdog_ch);
3676	sc->dc_wdog_timer = 0;
3677
3678	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3679
3680	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON));
3681	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3682	CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3683	CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3684	sc->dc_link = 0;
3685
3686	/*
3687	 * Free data in the RX lists.
3688	 */
3689	for (i = 0; i < DC_RX_LIST_CNT; i++) {
3690		if (cd->dc_rx_chain[i] != NULL) {
3691			m_freem(cd->dc_rx_chain[i]);
3692			cd->dc_rx_chain[i] = NULL;
3693		}
3694	}
3695	bzero(&ld->dc_rx_list, sizeof(ld->dc_rx_list));
3696
3697	/*
3698	 * Free the TX list buffers.
3699	 */
3700	for (i = 0; i < DC_TX_LIST_CNT; i++) {
3701		if (cd->dc_tx_chain[i] != NULL) {
3702			ctl = le32toh(ld->dc_tx_list[i].dc_ctl);
3703			if ((ctl & DC_TXCTL_SETUP) ||
3704			    !(ctl & DC_TXCTL_LASTFRAG)) {
3705				cd->dc_tx_chain[i] = NULL;
3706				continue;
3707			}
3708			bus_dmamap_unload(sc->dc_mtag, cd->dc_tx_map[i]);
3709			m_freem(cd->dc_tx_chain[i]);
3710			cd->dc_tx_chain[i] = NULL;
3711		}
3712	}
3713	bzero(&ld->dc_tx_list, sizeof(ld->dc_tx_list));
3714}
3715
3716/*
3717 * Device suspend routine.  Stop the interface and save some PCI
3718 * settings in case the BIOS doesn't restore them properly on
3719 * resume.
3720 */
3721static int
3722dc_suspend(device_t dev)
3723{
3724	struct dc_softc *sc;
3725
3726	sc = device_get_softc(dev);
3727	DC_LOCK(sc);
3728	dc_stop(sc);
3729	sc->suspended = 1;
3730	DC_UNLOCK(sc);
3731
3732	return (0);
3733}
3734
3735/*
3736 * Device resume routine.  Restore some PCI settings in case the BIOS
3737 * doesn't, re-enable busmastering, and restart the interface if
3738 * appropriate.
3739 */
3740static int
3741dc_resume(device_t dev)
3742{
3743	struct dc_softc *sc;
3744	struct ifnet *ifp;
3745
3746	sc = device_get_softc(dev);
3747	ifp = sc->dc_ifp;
3748
3749	/* reinitialize interface if necessary */
3750	DC_LOCK(sc);
3751	if (ifp->if_flags & IFF_UP)
3752		dc_init_locked(sc);
3753
3754	sc->suspended = 0;
3755	DC_UNLOCK(sc);
3756
3757	return (0);
3758}
3759
3760/*
3761 * Stop all chip I/O so that the kernel's probe routines don't
3762 * get confused by errant DMAs when rebooting.
3763 */
3764static int
3765dc_shutdown(device_t dev)
3766{
3767	struct dc_softc *sc;
3768
3769	sc = device_get_softc(dev);
3770
3771	DC_LOCK(sc);
3772	dc_stop(sc);
3773	DC_UNLOCK(sc);
3774
3775	return (0);
3776}
3777