1/*	$NetBSD: elinkxl.c,v 1.140 2024/06/29 12:11:11 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.140 2024/06/29 12:11:11 riastradh Exp $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/callout.h>
38#include <sys/kernel.h>
39#include <sys/mbuf.h>
40#include <sys/socket.h>
41#include <sys/ioctl.h>
42#include <sys/errno.h>
43#include <sys/syslog.h>
44#include <sys/select.h>
45#include <sys/device.h>
46#include <sys/rndsource.h>
47
48#include <net/if.h>
49#include <net/if_dl.h>
50#include <net/if_ether.h>
51#include <net/if_media.h>
52#include <net/bpf.h>
53
54#include <sys/cpu.h>
55#include <sys/bus.h>
56#include <sys/intr.h>
57#include <machine/endian.h>
58
59#include <dev/mii/miivar.h>
60#include <dev/mii/mii.h>
61#include <dev/mii/mii_bitbang.h>
62
63#include <dev/ic/elink3reg.h>
64/* #include <dev/ic/elink3var.h> */
65#include <dev/ic/elinkxlreg.h>
66#include <dev/ic/elinkxlvar.h>
67
68#ifdef DEBUG
69int exdebug = 0;
70#endif
71
72/* ifmedia callbacks */
73int ex_media_chg(struct ifnet *ifp);
74void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req);
75
76static int ex_ifflags_cb(struct ethercom *);
77
78void ex_probe_media(struct ex_softc *);
79void ex_set_filter(struct ex_softc *);
80void ex_set_media(struct ex_softc *);
81void ex_set_xcvr(struct ex_softc *, uint16_t);
82struct mbuf *ex_get(struct ex_softc *, int);
83uint16_t ex_read_eeprom(struct ex_softc *, int);
84int ex_init(struct ifnet *);
85void ex_read(struct ex_softc *);
86void ex_reset(struct ex_softc *);
87void ex_set_mc(struct ex_softc *);
88void ex_getstats(struct ex_softc *);
89void ex_tick(void *);
90
91static int ex_eeprom_busy(struct ex_softc *);
92static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *);
93static void ex_init_txdescs(struct ex_softc *);
94
95static void ex_setup_tx(struct ex_softc *);
96static bool ex_shutdown(device_t, int);
97static void ex_start(struct ifnet *);
98static void ex_txstat(struct ex_softc *);
99
100int ex_mii_readreg(device_t, int, int, uint16_t *);
101int ex_mii_writereg(device_t, int, int, uint16_t);
102void ex_mii_statchg(struct ifnet *);
103
104void ex_probemedia(struct ex_softc *);
105
106/*
107 * Structure to map media-present bits in boards to ifmedia codes and
108 * printable media names.  Used for table-driven ifmedia initialization.
109 */
110struct ex_media {
111	int	exm_mpbit;		/* media present bit */
112	const char *exm_name;		/* name of medium */
113	int	exm_ifmedia;		/* ifmedia word for medium */
114	int	exm_epmedia;		/* ELINKMEDIA_* constant */
115};
116
117/*
118 * Media table for 3c90x chips.  Note that chips with MII have no
119 * `native' media.
120 */
121static const struct ex_media ex_native_media[] = {
122	{ ELINK_PCI_10BASE_T,	"10baseT",	IFM_ETHER | IFM_10_T,
123	  ELINKMEDIA_10BASE_T },
124	{ ELINK_PCI_10BASE_T,	"10baseT-FDX",	IFM_ETHER | IFM_10_T | IFM_FDX,
125	  ELINKMEDIA_10BASE_T },
126	{ ELINK_PCI_AUI,	"10base5",	IFM_ETHER | IFM_10_5,
127	  ELINKMEDIA_AUI },
128	{ ELINK_PCI_BNC,	"10base2",	IFM_ETHER | IFM_10_2,
129	  ELINKMEDIA_10BASE_2 },
130	{ ELINK_PCI_100BASE_TX,	"100baseTX",	IFM_ETHER | IFM_100_TX,
131	  ELINKMEDIA_100BASE_TX },
132	{ ELINK_PCI_100BASE_TX,	"100baseTX-FDX",IFM_ETHER | IFM_100_TX|IFM_FDX,
133	  ELINKMEDIA_100BASE_TX },
134	{ ELINK_PCI_100BASE_FX,	"100baseFX",	IFM_ETHER | IFM_100_FX,
135	  ELINKMEDIA_100BASE_FX },
136	{ ELINK_PCI_100BASE_MII,"manual",	IFM_ETHER | IFM_MANUAL,
137	  ELINKMEDIA_MII },
138	{ ELINK_PCI_100BASE_T4,	"100baseT4",	IFM_ETHER | IFM_100_T4,
139	  ELINKMEDIA_100BASE_T4 },
140	{ 0,			NULL,		0,
141	  0 },
142};
143
144/*
145 * MII bit-bang glue.
146 */
147uint32_t ex_mii_bitbang_read(device_t);
148void ex_mii_bitbang_write(device_t, uint32_t);
149
150const struct mii_bitbang_ops ex_mii_bitbang_ops = {
151	ex_mii_bitbang_read,
152	ex_mii_bitbang_write,
153	{
154		ELINK_PHY_DATA,		/* MII_BIT_MDO */
155		ELINK_PHY_DATA,		/* MII_BIT_MDI */
156		ELINK_PHY_CLK,		/* MII_BIT_MDC */
157		ELINK_PHY_DIR,		/* MII_BIT_DIR_HOST_PHY */
158		0,			/* MII_BIT_DIR_PHY_HOST */
159	}
160};
161
162/*
163 * Back-end attach and configure.
164 */
165void
166ex_config(struct ex_softc *sc)
167{
168	struct ifnet *ifp;
169	struct mii_data * const mii = &sc->ex_mii;
170	uint16_t val;
171	uint8_t macaddr[ETHER_ADDR_LEN] = {0};
172	bus_space_tag_t iot = sc->sc_iot;
173	bus_space_handle_t ioh = sc->sc_ioh;
174	int i, error, attach_stage;
175
176	pmf_self_suspensor_init(sc->sc_dev, &sc->sc_suspensor, &sc->sc_qual);
177
178	callout_init(&sc->ex_mii_callout, 0);
179	callout_setfunc(&sc->ex_mii_callout, ex_tick, sc);
180
181	ex_reset(sc);
182
183	val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
184	macaddr[0] = val >> 8;
185	macaddr[1] = val & 0xff;
186	val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
187	macaddr[2] = val >> 8;
188	macaddr[3] = val & 0xff;
189	val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
190	macaddr[4] = val >> 8;
191	macaddr[5] = val & 0xff;
192
193	aprint_normal_dev(sc->sc_dev, "MAC address %s\n",
194	    ether_sprintf(macaddr));
195
196	if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY | EX_CONF_PHY_POWER)) {
197		GO_WINDOW(2);
198		val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
199		if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
200			val |= ELINK_RESET_OPT_LEDPOLAR;
201		if (sc->ex_conf & EX_CONF_PHY_POWER)
202			val |= ELINK_RESET_OPT_PHYPOWER;
203		bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
204	}
205	if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) {
206		GO_WINDOW(0);
207		bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID,
208		    EX_XCVR_PWR_MAGICBITS);
209	}
210
211	attach_stage = 0;
212
213	/*
214	 * Allocate the upload descriptors, and create and load the DMA
215	 * map for them.
216	 */
217	if ((error = bus_dmamem_alloc(sc->sc_dmat,
218	    EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1,
219	    &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
220		aprint_error_dev(sc->sc_dev,
221		    "can't allocate upload descriptors, error = %d\n", error);
222		goto fail;
223	}
224
225	attach_stage = 1;
226
227	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
228	    EX_NUPD * sizeof (struct ex_upd), (void **)&sc->sc_upd,
229	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
230		aprint_error_dev(sc->sc_dev,
231		    "can't map upload descriptors, error = %d\n", error);
232		goto fail;
233	}
234
235	attach_stage = 2;
236
237	if ((error = bus_dmamap_create(sc->sc_dmat,
238	    EX_NUPD * sizeof (struct ex_upd), 1,
239	    EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
240	    &sc->sc_upd_dmamap)) != 0) {
241		aprint_error_dev(sc->sc_dev,
242		    "can't create upload desc. DMA map, error = %d\n", error);
243		goto fail;
244	}
245
246	attach_stage = 3;
247
248	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
249	    sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
250	    BUS_DMA_NOWAIT)) != 0) {
251		aprint_error_dev(sc->sc_dev,
252		    "can't load upload desc. DMA map, error = %d\n", error);
253		goto fail;
254	}
255
256	attach_stage = 4;
257
258	/*
259	 * Allocate the download descriptors, and create and load the DMA
260	 * map for them.
261	 */
262	if ((error = bus_dmamem_alloc(sc->sc_dmat,
263	    DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1,
264	    &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
265		aprint_error_dev(sc->sc_dev,
266		    "can't allocate download descriptors, error = %d\n", error);
267		goto fail;
268	}
269
270	attach_stage = 5;
271
272	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
273	    DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (void **)&sc->sc_dpd,
274	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
275		aprint_error_dev(sc->sc_dev,
276		    "can't map download descriptors, error = %d\n", error);
277		goto fail;
278	}
279	memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN);
280
281	attach_stage = 6;
282
283	if ((error = bus_dmamap_create(sc->sc_dmat,
284	    DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1,
285	    DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT,
286	    &sc->sc_dpd_dmamap)) != 0) {
287		aprint_error_dev(sc->sc_dev,
288		    "can't create download desc. DMA map, error = %d\n", error);
289		goto fail;
290	}
291
292	attach_stage = 7;
293
294	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
295	    sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL,
296	    BUS_DMA_NOWAIT)) != 0) {
297		aprint_error_dev(sc->sc_dev,
298		    "can't load download desc. DMA map, error = %d\n", error);
299		goto fail;
300	}
301	bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
302	    DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE);
303
304	attach_stage = 8;
305
306
307	/*
308	 * Create the transmit buffer DMA maps.
309	 */
310	for (i = 0; i < EX_NDPD; i++) {
311		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
312		    EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
313		    &sc->sc_tx_dmamaps[i])) != 0) {
314			aprint_error_dev(sc->sc_dev,
315			    "can't create tx DMA map %d, error = %d\n",
316			    i, error);
317			goto fail;
318		}
319	}
320
321	attach_stage = 9;
322
323	/*
324	 * Create the receive buffer DMA maps.
325	 */
326	for (i = 0; i < EX_NUPD; i++) {
327		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
328		    EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
329		    &sc->sc_rx_dmamaps[i])) != 0) {
330			aprint_error_dev(sc->sc_dev,
331			    "can't create rx DMA map %d, error = %d\n",
332			    i, error);
333			goto fail;
334		}
335	}
336
337	attach_stage = 10;
338
339	/*
340	 * Create ring of upload descriptors, only once. The DMA engine
341	 * will loop over this when receiving packets, stalling if it
342	 * hits an UPD with a finished receive.
343	 */
344	for (i = 0; i < EX_NUPD; i++) {
345		sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
346		sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
347		sc->sc_upd[i].upd_frags[0].fr_len =
348		    htole32((MCLBYTES - 2) | EX_FR_LAST);
349		if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
350			aprint_error_dev(sc->sc_dev,
351			    "can't allocate or map rx buffers\n");
352			goto fail;
353		}
354	}
355
356	bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
357	    EX_NUPD * sizeof (struct ex_upd),
358	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
359
360	ex_init_txdescs(sc);
361
362	attach_stage = 11;
363
364
365	GO_WINDOW(3);
366	val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
367	if (val & ELINK_MEDIACAP_MII)
368		sc->ex_conf |= EX_CONF_MII;
369
370	ifp = &sc->sc_ethercom.ec_if;
371
372	/*
373	 * Initialize our media structures and MII info.  We'll
374	 * probe the MII if we discover that we have one.
375	 */
376	mii->mii_ifp = ifp;
377	mii->mii_readreg = ex_mii_readreg;
378	mii->mii_writereg = ex_mii_writereg;
379	mii->mii_statchg = ex_mii_statchg;
380	sc->sc_ethercom.ec_mii = mii;
381	ifmedia_init(&mii->mii_media, IFM_IMASK, ex_media_chg, ex_media_stat);
382
383	if (sc->ex_conf & EX_CONF_MII) {
384		/*
385		 * Find PHY, extract media information from it.
386		 * First, select the right transceiver.
387		 */
388		ex_set_xcvr(sc, val);
389
390		mii_attach(sc->sc_dev, mii, 0xffffffff,
391		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
392		if (LIST_FIRST(&mii->mii_phys) == NULL) {
393			ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE,
394			    0, NULL);
395			ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
396		} else {
397			ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
398		}
399	} else
400		ex_probemedia(sc);
401
402	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
403	ifp->if_softc = sc;
404	ifp->if_start = ex_start;
405	ifp->if_ioctl = ex_ioctl;
406	ifp->if_watchdog = ex_watchdog;
407	ifp->if_init = ex_init;
408	ifp->if_stop = ex_stop;
409	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
410	sc->sc_if_flags = ifp->if_flags;
411	IFQ_SET_READY(&ifp->if_snd);
412
413	/*
414	 * We can support 802.1Q VLAN-sized frames.
415	 */
416	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
417
418	/*
419	 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support.
420	 */
421	if (sc->ex_conf & EX_CONF_90XB)
422		sc->sc_ethercom.ec_if.if_capabilities |=
423		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
424		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
425		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
426
427	if_attach(ifp);
428	if_deferred_start_init(ifp, NULL);
429	ether_ifattach(ifp, macaddr);
430	ether_set_ifflags_cb(&sc->sc_ethercom, ex_ifflags_cb);
431
432	GO_WINDOW(1);
433
434	sc->tx_start_thresh = 20;
435	sc->tx_succ_ok = 0;
436
437	/* TODO: set queues to 0 */
438
439	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
440			  RND_TYPE_NET, RND_FLAG_DEFAULT);
441
442	if (pmf_device_register1(sc->sc_dev, NULL, NULL, ex_shutdown))
443		pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if);
444	else
445		aprint_error_dev(sc->sc_dev,
446		    "couldn't establish power handler\n");
447
448	/* The attach is successful. */
449	sc->ex_flags |= EX_FLAGS_ATTACHED;
450	return;
451
452 fail:
453	/*
454	 * Free any resources we've allocated during the failed attach
455	 * attempt.  Do this in reverse order and fall though.
456	 */
457	switch (attach_stage) {
458	case 11:
459	    {
460		struct ex_rxdesc *rxd;
461
462		for (i = 0; i < EX_NUPD; i++) {
463			rxd = &sc->sc_rxdescs[i];
464			if (rxd->rx_mbhead != NULL) {
465				bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
466				m_freem(rxd->rx_mbhead);
467			}
468		}
469	    }
470		/* FALLTHROUGH */
471
472	case 10:
473		for (i = 0; i < EX_NUPD; i++)
474			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
475		/* FALLTHROUGH */
476
477	case 9:
478		for (i = 0; i < EX_NDPD; i++)
479			bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
480		/* FALLTHROUGH */
481	case 8:
482		bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
483		/* FALLTHROUGH */
484
485	case 7:
486		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
487		/* FALLTHROUGH */
488
489	case 6:
490		bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
491		    EX_NDPD * sizeof (struct ex_dpd));
492		/* FALLTHROUGH */
493
494	case 5:
495		bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
496		break;
497
498	case 4:
499		bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
500		/* FALLTHROUGH */
501
502	case 3:
503		bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
504		/* FALLTHROUGH */
505
506	case 2:
507		bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
508		    EX_NUPD * sizeof (struct ex_upd));
509		/* FALLTHROUGH */
510
511	case 1:
512		bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
513		break;
514	}
515
516}
517
518/*
519 * Find the media present on non-MII chips.
520 */
521void
522ex_probemedia(struct ex_softc *sc)
523{
524	bus_space_tag_t iot = sc->sc_iot;
525	bus_space_handle_t ioh = sc->sc_ioh;
526	struct ifmedia *ifm = &sc->ex_mii.mii_media;
527	const struct ex_media *exm;
528	uint16_t config1, reset_options, default_media;
529	int defmedia = 0;
530	const char *sep = "", *defmedianame = NULL;
531
532	GO_WINDOW(3);
533	config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
534	reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
535	GO_WINDOW(0);
536
537	default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
538
539	/* Sanity check that there are any media! */
540	if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
541		aprint_error_dev(sc->sc_dev, "no media present!\n");
542		ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
543		ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
544		return;
545	}
546
547	aprint_normal_dev(sc->sc_dev, "");
548
549#define	PRINT(str)	aprint_normal("%s%s", sep, str); sep = ", "
550
551	for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
552		if (reset_options & exm->exm_mpbit) {
553			/*
554			 * Default media is a little complicated.  We
555			 * support full-duplex which uses the same
556			 * reset options bit.
557			 *
558			 * XXX Check EEPROM for default to FDX?
559			 */
560			if (exm->exm_epmedia == default_media) {
561				if ((exm->exm_ifmedia & IFM_FDX) == 0) {
562					defmedia = exm->exm_ifmedia;
563					defmedianame = exm->exm_name;
564				}
565			} else if (defmedia == 0) {
566				defmedia = exm->exm_ifmedia;
567				defmedianame = exm->exm_name;
568			}
569			ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
570			    NULL);
571			PRINT(exm->exm_name);
572		}
573	}
574
575#undef PRINT
576
577#ifdef DIAGNOSTIC
578	if (defmedia == 0)
579		panic("ex_probemedia: impossible");
580#endif
581
582	aprint_normal(", default %s\n", defmedianame);
583	ifmedia_set(ifm, defmedia);
584}
585
586/*
587 * Setup transmitter parameters.
588 */
589static void
590ex_setup_tx(struct ex_softc *sc)
591{
592	bus_space_tag_t iot = sc->sc_iot;
593	bus_space_handle_t ioh = sc->sc_ioh;
594
595	/*
596	 * Disable reclaim threshold for 90xB, set free threshold to
597	 * 6 * 256 = 1536 for 90x.
598	 */
599	if (sc->ex_conf & EX_CONF_90XB)
600		bus_space_write_2(iot, ioh, ELINK_COMMAND,
601		    ELINK_TXRECLTHRESH | 255);
602	else
603		bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
604
605	/* Setup early transmission start threshold. */
606	bus_space_write_2(iot, ioh, ELINK_COMMAND,
607	    ELINK_TXSTARTTHRESH | sc->tx_start_thresh);
608}
609
610/*
611 * Bring device up.
612 */
613int
614ex_init(struct ifnet *ifp)
615{
616	struct ex_softc *sc = ifp->if_softc;
617	bus_space_tag_t iot = sc->sc_iot;
618	bus_space_handle_t ioh = sc->sc_ioh;
619	int i;
620	uint16_t val;
621	int error = 0;
622
623	if ((error = ex_enable(sc)) != 0)
624		goto out;
625
626	ex_waitcmd(sc);
627	ex_stop(ifp, 0);
628
629	GO_WINDOW(2);
630
631	/* Turn on PHY power. */
632	if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
633		val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
634		if (sc->ex_conf & EX_CONF_PHY_POWER)
635			val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */
636		if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
637			val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */
638		bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
639	}
640
641	/*
642	 * Set the station address and clear the station mask. The latter
643	 * is needed for 90x cards, 0 is the default for 90xB cards.
644	 */
645	for (i = 0; i < ETHER_ADDR_LEN; i++) {
646		bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
647		    CLLADDR(ifp->if_sadl)[i]);
648		bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
649	}
650
651	GO_WINDOW(3);
652
653	bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
654	ex_waitcmd(sc);
655	bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
656	ex_waitcmd(sc);
657
658	/* Load Tx parameters. */
659	ex_setup_tx(sc);
660
661	bus_space_write_2(iot, ioh, ELINK_COMMAND,
662	    SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
663
664	bus_space_write_4(iot, ioh, ELINK_DMACTRL,
665	    bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
666
667	bus_space_write_2(iot, ioh, ELINK_COMMAND,
668	    SET_RD_0_MASK | XL_WATCHED_INTERRUPTS);
669	bus_space_write_2(iot, ioh, ELINK_COMMAND,
670	    SET_INTR_MASK | XL_WATCHED_INTERRUPTS);
671
672	bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
673	if (sc->intr_ack)
674	    (* sc->intr_ack)(sc);
675	ex_set_media(sc);
676	ex_set_mc(sc);
677
678
679	bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
680	bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
681	bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
682	bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
683	bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
684
685	ifp->if_flags |= IFF_RUNNING;
686	ifp->if_flags &= ~IFF_OACTIVE;
687	ex_start(ifp);
688	sc->sc_if_flags = ifp->if_flags;
689
690	GO_WINDOW(1);
691
692	callout_schedule(&sc->ex_mii_callout, hz);
693
694 out:
695	if (error) {
696		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
697		ifp->if_timer = 0;
698		aprint_error_dev(sc->sc_dev, "interface not running\n");
699	}
700	return (error);
701}
702
703#define	MCHASHSIZE		256
704#define	ex_mchash(addr)		(ether_crc32_be((addr), ETHER_ADDR_LEN) & \
705				    (MCHASHSIZE - 1))
706
707/*
708 * Set multicast receive filter. Also take care of promiscuous mode
709 * here (XXX).
710 */
711void
712ex_set_mc(struct ex_softc *sc)
713{
714	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
715	struct ethercom *ec = &sc->sc_ethercom;
716	struct ether_multi *enm;
717	struct ether_multistep estep;
718	int i;
719	uint16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
720
721	if (ifp->if_flags & IFF_PROMISC) {
722		mask |= FIL_PROMISC;
723		goto allmulti;
724	}
725
726	ETHER_LOCK(ec);
727	ETHER_FIRST_MULTI(estep, ec, enm);
728	if (enm == NULL) {
729		ETHER_UNLOCK(ec);
730		goto nomulti;
731	}
732
733	if ((sc->ex_conf & EX_CONF_90XB) == 0) {
734		/* No multicast hash filtering. */
735		ETHER_UNLOCK(ec);
736		goto allmulti;
737	}
738
739	for (i = 0; i < MCHASHSIZE; i++)
740		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
741		    ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i);
742
743	do {
744		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
745		    ETHER_ADDR_LEN) != 0) {
746			ETHER_UNLOCK(ec);
747			goto allmulti;
748		}
749
750		i = ex_mchash(enm->enm_addrlo);
751		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
752		    ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
753		ETHER_NEXT_MULTI(estep, enm);
754	} while (enm != NULL);
755	ETHER_UNLOCK(ec);
756	mask |= FIL_MULTIHASH;
757
758nomulti:
759	ifp->if_flags &= ~IFF_ALLMULTI;
760	bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
761	    SET_RX_FILTER | mask);
762	return;
763
764allmulti:
765	ifp->if_flags |= IFF_ALLMULTI;
766	mask |= FIL_MULTICAST;
767	bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
768	    SET_RX_FILTER | mask);
769}
770
771
772/*
773 * The Tx Complete interrupts occur only on errors,
774 * and this is the error handler.
775 */
776static void
777ex_txstat(struct ex_softc *sc)
778{
779	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
780	bus_space_tag_t iot = sc->sc_iot;
781	bus_space_handle_t ioh = sc->sc_ioh;
782	int i, err = 0;
783
784	/*
785	 * We need to read+write TX_STATUS until we get a 0 status
786	 * in order to turn off the interrupt flag.
787	 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER.
788	 */
789	for (;;) {
790		i = bus_space_read_2(iot, ioh, ELINK_TIMER);
791		if ((i & TXS_COMPLETE) == 0)
792			break;
793		bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0);
794		err |= i;
795	}
796	err &= ~TXS_TIMER;
797
798	if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM))
799	    || err == 0 /* should not happen, just in case */) {
800		/*
801		 * Make sure the transmission is stopped.
802		 */
803		bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL);
804		for (i = 1000; i > 0; i--)
805			if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) &
806			    ELINK_DMAC_DNINPROG) == 0)
807				break;
808
809		/*
810		 * Reset the transmitter.
811		 */
812		bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
813
814		/* Resetting takes a while and we will do more than wait. */
815
816		ifp->if_flags &= ~IFF_OACTIVE;
817		if_statinc(ifp, if_oerrors);
818		aprint_error_dev(sc->sc_dev, "%s%s%s",
819		    (err & TXS_UNDERRUN) ? " transmit underrun" : "",
820		    (err & TXS_JABBER) ? " jabber" : "",
821		    (err & TXS_RECLAIM) ? " reclaim" : "");
822		if (err == 0)
823			aprint_error(" unknown Tx error");
824		printf(" (%x)", err);
825		if (err & TXS_UNDERRUN) {
826			aprint_error(" @%d", sc->tx_start_thresh);
827			if (sc->tx_succ_ok < 256 &&
828			    (i = uimin(ETHER_MAX_LEN, sc->tx_start_thresh + 20))
829			    > sc->tx_start_thresh) {
830				aprint_error(", new threshold is %d", i);
831				sc->tx_start_thresh = i;
832			}
833			sc->tx_succ_ok = 0;
834		}
835		aprint_error("\n");
836		if (err & TXS_MAX_COLLISION)
837			if_statinc(ifp, if_collisions);
838
839		/* Wait for TX_RESET to finish. */
840		ex_waitcmd(sc);
841
842		/* Reload Tx parameters. */
843		ex_setup_tx(sc);
844	} else {
845		if (err & TXS_MAX_COLLISION)
846			if_statinc(ifp, if_collisions);
847		sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
848	}
849
850	bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
851
852	/* Retransmit current packet if any. */
853	if (sc->tx_head) {
854		ifp->if_flags |= IFF_OACTIVE;
855		bus_space_write_2(iot, ioh, ELINK_COMMAND,
856		    ELINK_DNUNSTALL);
857		bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
858		    DPD_DMADDR(sc, sc->tx_head));
859
860		/* Retrigger watchdog if stopped. */
861		if (ifp->if_timer == 0)
862			ifp->if_timer = 1;
863	}
864}
865
866int
867ex_media_chg(struct ifnet *ifp)
868{
869
870	if (ifp->if_flags & IFF_UP)
871		ex_init(ifp);
872	return 0;
873}
874
875void
876ex_set_xcvr(struct ex_softc *sc, const uint16_t media)
877{
878	bus_space_tag_t iot = sc->sc_iot;
879	bus_space_handle_t ioh = sc->sc_ioh;
880	uint32_t icfg;
881
882	/*
883	 * We're already in Window 3
884	 */
885	icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
886	icfg &= ~(CONFIG_XCVR_SEL << 16);
887	if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
888		icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
889	if (media & ELINK_MEDIACAP_100BASETX)
890		icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
891	if (media & ELINK_MEDIACAP_100BASEFX)
892		icfg |= ELINKMEDIA_100BASE_FX
893			<< (CONFIG_XCVR_SEL_SHIFT + 16);
894	bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
895}
896
897void
898ex_set_media(struct ex_softc *sc)
899{
900	bus_space_tag_t iot = sc->sc_iot;
901	bus_space_handle_t ioh = sc->sc_ioh;
902	uint32_t configreg;
903
904	if (((sc->ex_conf & EX_CONF_MII) &&
905	    (sc->ex_mii.mii_media_active & IFM_FDX))
906	    || (!(sc->ex_conf & EX_CONF_MII) &&
907	    (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
908		bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
909		    MAC_CONTROL_FDX);
910	} else {
911		bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
912	}
913
914	/*
915	 * If the device has MII, select it, and then tell the
916	 * PHY which media to use.
917	 */
918	if (sc->ex_conf & EX_CONF_MII) {
919		uint16_t val;
920
921		GO_WINDOW(3);
922		val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
923		ex_set_xcvr(sc, val);
924		mii_mediachg(&sc->ex_mii);
925		return;
926	}
927
928	GO_WINDOW(4);
929	bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
930	bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
931	delay(800);
932
933	/*
934	 * Now turn on the selected media/transceiver.
935	 */
936	switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
937	case IFM_10_T:
938		bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
939		    JABBER_GUARD_ENABLE | LINKBEAT_ENABLE);
940		break;
941
942	case IFM_10_2:
943		bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
944		DELAY(800);
945		break;
946
947	case IFM_100_TX:
948	case IFM_100_FX:
949		bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
950		    LINKBEAT_ENABLE);
951		DELAY(800);
952		break;
953
954	case IFM_10_5:
955		bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
956		    SQE_ENABLE);
957		DELAY(800);
958		break;
959
960	case IFM_MANUAL:
961		break;
962
963	case IFM_NONE:
964		return;
965
966	default:
967		panic("ex_set_media: impossible");
968	}
969
970	GO_WINDOW(3);
971	configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
972
973	configreg &= ~(CONFIG_MEDIAMASK << 16);
974	configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
975	    (CONFIG_MEDIAMASK_SHIFT + 16));
976
977	bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
978}
979
980/*
981 * Get currently-selected media from card.
982 * (if_media callback, may be called before interface is brought up).
983 */
984void
985ex_media_stat(struct ifnet *ifp, struct ifmediareq *req)
986{
987	struct ex_softc *sc = ifp->if_softc;
988	uint16_t help;
989
990	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) {
991		if (sc->ex_conf & EX_CONF_MII) {
992			mii_pollstat(&sc->ex_mii);
993			req->ifm_status = sc->ex_mii.mii_media_status;
994			req->ifm_active = sc->ex_mii.mii_media_active;
995		} else {
996			GO_WINDOW(4);
997			req->ifm_status = IFM_AVALID;
998			req->ifm_active =
999			    sc->ex_mii.mii_media.ifm_cur->ifm_media;
1000			help = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
1001						ELINK_W4_MEDIA_TYPE);
1002			if (help & LINKBEAT_DETECT)
1003				req->ifm_status |= IFM_ACTIVE;
1004			GO_WINDOW(1);
1005		}
1006	}
1007}
1008
1009
1010
1011/*
1012 * Start outputting on the interface.
1013 */
1014static void
1015ex_start(struct ifnet *ifp)
1016{
1017	struct ex_softc *sc = ifp->if_softc;
1018	bus_space_tag_t iot = sc->sc_iot;
1019	bus_space_handle_t ioh = sc->sc_ioh;
1020	volatile struct ex_fraghdr *fr = NULL;
1021	volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
1022	struct ex_txdesc *txp;
1023	struct mbuf *mb_head;
1024	bus_dmamap_t dmamap;
1025	int m_csumflags, offset, seglen, totlen, segment, error;
1026	uint32_t csum_flags;
1027
1028	if (sc->tx_head || sc->tx_free == NULL)
1029		return;
1030
1031	txp = NULL;
1032
1033	/*
1034	 * We're finished if there is nothing more to add to the list or if
1035	 * we're all filled up with buffers to transmit.
1036	 */
1037	while (sc->tx_free != NULL) {
1038		/*
1039		 * Grab a packet to transmit.
1040		 */
1041		IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1042		if (mb_head == NULL)
1043			break;
1044
1045		/*
1046		 * mb_head might be updated later,
1047		 * so preserve csum_flags here.
1048		 */
1049		m_csumflags = mb_head->m_pkthdr.csum_flags;
1050
1051		/*
1052		 * Get pointer to next available tx desc.
1053		 */
1054		txp = sc->tx_free;
1055		dmamap = txp->tx_dmamap;
1056
1057		/*
1058		 * Go through each of the mbufs in the chain and initialize
1059		 * the transmit buffer descriptors with the physical address
1060		 * and size of the mbuf.
1061		 */
1062 reload:
1063		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1064		    mb_head, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1065		switch (error) {
1066		case 0:
1067			/* Success. */
1068			break;
1069
1070		case EFBIG:
1071		    {
1072			struct mbuf *mn;
1073
1074			/*
1075			 * We ran out of segments.  We have to recopy this
1076			 * mbuf chain first.  Bail out if we can't get the
1077			 * new buffers.
1078			 */
1079			aprint_error_dev(sc->sc_dev, "too many segments, ");
1080
1081			MGETHDR(mn, M_DONTWAIT, MT_DATA);
1082			if (mn == NULL) {
1083				m_freem(mb_head);
1084				aprint_error("aborting\n");
1085				goto out;
1086			}
1087			MCLAIM(mn, &sc->sc_ethercom.ec_tx_mowner);
1088			if (mb_head->m_pkthdr.len > MHLEN) {
1089				MCLGET(mn, M_DONTWAIT);
1090				if ((mn->m_flags & M_EXT) == 0) {
1091					m_freem(mn);
1092					m_freem(mb_head);
1093					aprint_error("aborting\n");
1094					goto out;
1095				}
1096			}
1097			m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1098			    mtod(mn, void *));
1099			mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1100			m_freem(mb_head);
1101			mb_head = mn;
1102			aprint_error("retrying\n");
1103			goto reload;
1104		    }
1105
1106		default:
1107			/*
1108			 * Some other problem; report it.
1109			 */
1110			aprint_error_dev(sc->sc_dev,
1111			    "can't load mbuf chain, error = %d\n", error);
1112			m_freem(mb_head);
1113			goto out;
1114		}
1115
1116		/*
1117		 * remove our tx desc from freelist.
1118		 */
1119		sc->tx_free = txp->tx_next;
1120		txp->tx_next = NULL;
1121
1122		fr = &txp->tx_dpd->dpd_frags[0];
1123		totlen = 0;
1124		for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1125			fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1126			seglen = dmamap->dm_segs[segment].ds_len;
1127			fr->fr_len = htole32(seglen);
1128			totlen += seglen;
1129		}
1130		if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN &&
1131		    (m_csumflags & M_CSUM_IPv4) != 0)) {
1132			/*
1133			 * Pad short packets to avoid ip4csum-tx bug.
1134			 *
1135			 * XXX Should we still consider if such short
1136			 *     (36 bytes or less) packets might already
1137			 *     occupy EX_NTFRAG (== 32) fragments here?
1138			 */
1139			KASSERT(segment < EX_NTFRAGS);
1140			fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc));
1141			seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen;
1142			fr->fr_len = htole32(EX_FR_LAST | seglen);
1143			totlen += seglen;
1144		} else {
1145			fr--;
1146			fr->fr_len |= htole32(EX_FR_LAST);
1147		}
1148		txp->tx_mbhead = mb_head;
1149
1150		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1151		    BUS_DMASYNC_PREWRITE);
1152
1153		dpd = txp->tx_dpd;
1154		dpd->dpd_nextptr = 0;
1155		dpd->dpd_fsh = htole32(totlen);
1156
1157		/* Byte-swap constants so compiler can optimize. */
1158
1159		if (sc->ex_conf & EX_CONF_90XB) {
1160			csum_flags = 0;
1161
1162			if (m_csumflags & M_CSUM_IPv4)
1163				csum_flags |= htole32(EX_DPD_IPCKSUM);
1164
1165			if (m_csumflags & M_CSUM_TCPv4)
1166				csum_flags |= htole32(EX_DPD_TCPCKSUM);
1167			else if (m_csumflags & M_CSUM_UDPv4)
1168				csum_flags |= htole32(EX_DPD_UDPCKSUM);
1169
1170			dpd->dpd_fsh |= csum_flags;
1171		} else {
1172			KDASSERT((mb_head->m_pkthdr.csum_flags &
1173			    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) == 0);
1174		}
1175
1176		bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1177		    ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd),
1178		    sizeof (struct ex_dpd),
1179		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1180
1181		/*
1182		 * No need to stall the download engine, we know it's
1183		 * not busy right now.
1184		 *
1185		 * Fix up pointers in both the "soft" tx and the physical
1186		 * tx list.
1187		 */
1188		if (sc->tx_head != NULL) {
1189			prevdpd = sc->tx_tail->tx_dpd;
1190			offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd);
1191			bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1192			    offset, sizeof (struct ex_dpd),
1193			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1194			prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1195			bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1196			    offset, sizeof (struct ex_dpd),
1197			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1198			sc->tx_tail->tx_next = txp;
1199			sc->tx_tail = txp;
1200		} else {
1201			sc->tx_tail = sc->tx_head = txp;
1202		}
1203
1204		/*
1205		 * Pass packet to bpf if there is a listener.
1206		 */
1207		bpf_mtap(ifp, mb_head, BPF_D_OUT);
1208	}
1209 out:
1210	if (sc->tx_head) {
1211		sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1212		bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1213		    ((char *)sc->tx_tail->tx_dpd - (char *)sc->sc_dpd),
1214		    sizeof (struct ex_dpd),
1215		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1216		ifp->if_flags |= IFF_OACTIVE;
1217		bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1218		bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1219		    DPD_DMADDR(sc, sc->tx_head));
1220
1221		/* trigger watchdog */
1222		ifp->if_timer = 5;
1223	}
1224}
1225
1226
1227int
1228ex_intr(void *arg)
1229{
1230	struct ex_softc *sc = arg;
1231	bus_space_tag_t iot = sc->sc_iot;
1232	bus_space_handle_t ioh = sc->sc_ioh;
1233	uint16_t stat;
1234	int ret = 0;
1235	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1236
1237	if ((ifp->if_flags & IFF_RUNNING) == 0 ||
1238	    !device_is_active(sc->sc_dev))
1239		return (0);
1240
1241	for (;;) {
1242		stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1243
1244		if ((stat & XL_WATCHED_INTERRUPTS) == 0) {
1245			if ((stat & INTR_LATCH) == 0) {
1246#if 0
1247				aprint_error_dev(sc->sc_dev,
1248				       "intr latch cleared\n");
1249#endif
1250				break;
1251			}
1252		}
1253
1254		ret = 1;
1255
1256		/*
1257		 * Acknowledge interrupts.
1258		 */
1259		bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1260		    (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH)));
1261		if (sc->intr_ack)
1262			(*sc->intr_ack)(sc);
1263
1264		if (stat & HOST_ERROR) {
1265			aprint_error_dev(sc->sc_dev,
1266			    "adapter failure (%x)\n", stat);
1267			ex_reset(sc);
1268			ex_init(ifp);
1269			return 1;
1270		}
1271		if (stat & UPD_STATS) {
1272			ex_getstats(sc);
1273		}
1274		if (stat & TX_COMPLETE) {
1275			ex_txstat(sc);
1276#if 0
1277			if (stat & DN_COMPLETE)
1278				aprint_error_dev(sc->sc_dev,
1279				    "Ignoring Dn interrupt (%x)\n", stat);
1280#endif
1281			/*
1282			 * In some rare cases, both Tx Complete and
1283			 * Dn Complete bits are set.  However, the packet
1284			 * has been reloaded in ex_txstat() and should not
1285			 * handle the Dn Complete event here.
1286			 * Hence the "else" below.
1287			 */
1288		} else if (stat & DN_COMPLETE) {
1289			struct ex_txdesc *txp, *ptxp = NULL;
1290			bus_dmamap_t txmap;
1291
1292			/* reset watchdog timer, was set in ex_start() */
1293			ifp->if_timer = 0;
1294
1295			for (txp = sc->tx_head; txp != NULL;
1296			    txp = txp->tx_next) {
1297				bus_dmamap_sync(sc->sc_dmat,
1298				    sc->sc_dpd_dmamap,
1299				    (char *)txp->tx_dpd - (char *)sc->sc_dpd,
1300				    sizeof (struct ex_dpd),
1301				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1302				if (txp->tx_mbhead != NULL) {
1303					txmap = txp->tx_dmamap;
1304					bus_dmamap_sync(sc->sc_dmat, txmap,
1305					    0, txmap->dm_mapsize,
1306					    BUS_DMASYNC_POSTWRITE);
1307					bus_dmamap_unload(sc->sc_dmat, txmap);
1308					m_freem(txp->tx_mbhead);
1309					txp->tx_mbhead = NULL;
1310				}
1311				ptxp = txp;
1312			}
1313
1314			/*
1315			 * Move finished tx buffers back to the tx free list.
1316			 */
1317			if (sc->tx_free) {
1318				sc->tx_ftail->tx_next = sc->tx_head;
1319				sc->tx_ftail = ptxp;
1320			} else
1321				sc->tx_ftail = sc->tx_free = sc->tx_head;
1322
1323			sc->tx_head = sc->tx_tail = NULL;
1324			ifp->if_flags &= ~IFF_OACTIVE;
1325
1326			if (sc->tx_succ_ok < 256)
1327				sc->tx_succ_ok++;
1328		}
1329
1330		if (stat & UP_COMPLETE) {
1331			struct ex_rxdesc *rxd;
1332			struct mbuf *m;
1333			struct ex_upd *upd;
1334			bus_dmamap_t rxmap;
1335			uint32_t pktstat;
1336
1337 rcvloop:
1338			rxd = sc->rx_head;
1339			rxmap = rxd->rx_dmamap;
1340			m = rxd->rx_mbhead;
1341			upd = rxd->rx_upd;
1342
1343			bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1344			    rxmap->dm_mapsize,
1345			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1346			bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1347			    ((char *)upd - (char *)sc->sc_upd),
1348			    sizeof (struct ex_upd),
1349			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1350			pktstat = le32toh(upd->upd_pktstatus);
1351
1352			if (pktstat & EX_UPD_COMPLETE) {
1353				/*
1354				 * Remove first packet from the chain.
1355				 */
1356				sc->rx_head = rxd->rx_next;
1357				rxd->rx_next = NULL;
1358
1359				/*
1360				 * Add a new buffer to the receive chain.
1361				 * If this fails, the old buffer is recycled
1362				 * instead.
1363				 */
1364				if (ex_add_rxbuf(sc, rxd) == 0) {
1365					uint16_t total_len;
1366
1367					if (pktstat &
1368					    ((sc->sc_ethercom.ec_capenable &
1369					    ETHERCAP_VLAN_MTU) ?
1370					    EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1371						if_statinc(ifp, if_ierrors);
1372						m_freem(m);
1373						goto rcvloop;
1374					}
1375
1376					total_len = pktstat & EX_UPD_PKTLENMASK;
1377					if (total_len <
1378					    sizeof(struct ether_header)) {
1379						m_freem(m);
1380						goto rcvloop;
1381					}
1382					m_set_rcvif(m, ifp);
1383					m->m_pkthdr.len = m->m_len = total_len;
1384		/*
1385		 * Set the incoming checksum information for the packet.
1386		 */
1387		if ((sc->ex_conf & EX_CONF_90XB) != 0 &&
1388		    (pktstat & EX_UPD_IPCHECKED) != 0) {
1389			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1390			if (pktstat & EX_UPD_IPCKSUMERR)
1391				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1392			if (pktstat & EX_UPD_TCPCHECKED) {
1393				m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1394				if (pktstat & EX_UPD_TCPCKSUMERR)
1395					m->m_pkthdr.csum_flags |=
1396					    M_CSUM_TCP_UDP_BAD;
1397			} else if (pktstat & EX_UPD_UDPCHECKED) {
1398				m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1399				if (pktstat & EX_UPD_UDPCKSUMERR)
1400					m->m_pkthdr.csum_flags |=
1401					    M_CSUM_TCP_UDP_BAD;
1402			}
1403		}
1404					if_percpuq_enqueue(ifp->if_percpuq, m);
1405				}
1406				goto rcvloop;
1407			}
1408			/*
1409			 * Just in case we filled up all UPDs and the DMA engine
1410			 * stalled. We could be more subtle about this.
1411			 */
1412			if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1413				aprint_error_dev(sc->sc_dev,
1414				       "uplistptr was 0\n");
1415				ex_init(ifp);
1416			} else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1417				   & 0x2000) {
1418				aprint_error_dev(sc->sc_dev,
1419				       "receive stalled\n");
1420				bus_space_write_2(iot, ioh, ELINK_COMMAND,
1421						  ELINK_UPUNSTALL);
1422			}
1423		}
1424
1425		if (stat)
1426			rnd_add_uint32(&sc->rnd_source, stat);
1427	}
1428
1429	/* no more interrupts */
1430	if (ret)
1431		if_schedule_deferred_start(ifp);
1432	return ret;
1433}
1434
1435static int
1436ex_ifflags_cb(struct ethercom *ec)
1437{
1438	struct ifnet *ifp = &ec->ec_if;
1439	struct ex_softc *sc = ifp->if_softc;
1440	u_short change = ifp->if_flags ^ sc->sc_if_flags;
1441
1442	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
1443		return ENETRESET;
1444	else if ((change & IFF_PROMISC) != 0)
1445		ex_set_mc(sc);
1446	return 0;
1447}
1448
1449int
1450ex_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1451{
1452	struct ex_softc *sc = ifp->if_softc;
1453	int s, error;
1454
1455	s = splnet();
1456
1457	switch (cmd) {
1458	default:
1459		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1460			break;
1461
1462		error = 0;
1463
1464		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1465			;
1466		else if (ifp->if_flags & IFF_RUNNING) {
1467			/*
1468			 * Multicast list has changed; set the hardware filter
1469			 * accordingly.
1470			 */
1471			ex_set_mc(sc);
1472		}
1473		break;
1474	}
1475
1476	sc->sc_if_flags = ifp->if_flags;
1477	splx(s);
1478	return (error);
1479}
1480
1481void
1482ex_getstats(struct ex_softc *sc)
1483{
1484	bus_space_handle_t ioh = sc->sc_ioh;
1485	bus_space_tag_t iot = sc->sc_iot;
1486	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1487	uint8_t upperok;
1488
1489	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1490
1491	GO_WINDOW(6);
1492	upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1493	if_statadd_ref(ifp, nsr, if_opackets,
1494	    bus_space_read_1(iot, ioh, TX_FRAMES_OK));
1495	if_statadd_ref(ifp, nsr, if_opackets, (upperok & 0x30) << 4);
1496	if_statadd_ref(ifp, nsr, if_ierrors,
1497	    bus_space_read_1(iot, ioh, RX_OVERRUNS));
1498	if_statadd_ref(ifp, nsr, if_collisions,
1499	    bus_space_read_1(iot, ioh, TX_COLLISIONS));
1500	/*
1501	 * There seems to be no way to get the exact number of collisions,
1502	 * this is the number that occurred at the very least.
1503	 */
1504	if_statadd_ref(ifp, nsr, if_collisions,
1505	    2 * bus_space_read_1(iot, ioh, TX_AFTER_X_COLLISIONS));
1506
1507	IF_STAT_PUTREF(ifp);
1508
1509	/*
1510	 * Interface byte counts are counted by ether_input() and
1511	 * ether_output(), so don't accumulate them here.  Just
1512	 * read the NIC counters so they don't generate overflow interrupts.
1513	 * Upper byte counters are latched from reading the totals, so
1514	 * they don't need to be read if we don't need their values.
1515	 */
1516	(void)bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1517	(void)bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1518
1519	/*
1520	 * Clear the following to avoid stats overflow interrupts
1521	 */
1522	(void)bus_space_read_1(iot, ioh, TX_DEFERRALS);
1523	(void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1524	(void)bus_space_read_1(iot, ioh, TX_NO_SQE);
1525	(void)bus_space_read_1(iot, ioh, TX_CD_LOST);
1526	(void)bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1527	GO_WINDOW(4);
1528	(void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1529	GO_WINDOW(1);
1530}
1531
1532void
1533ex_tick(void *arg)
1534{
1535	struct ex_softc *sc = arg;
1536	int s;
1537
1538	if (!device_is_active(sc->sc_dev))
1539		return;
1540
1541	s = splnet();
1542
1543	if (sc->ex_conf & EX_CONF_MII)
1544		mii_tick(&sc->ex_mii);
1545
1546	if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1547	    & COMMAND_IN_PROGRESS))
1548		ex_getstats(sc);
1549
1550	splx(s);
1551
1552	callout_schedule(&sc->ex_mii_callout, hz);
1553}
1554
1555void
1556ex_reset(struct ex_softc *sc)
1557{
1558	uint16_t val = GLOBAL_RESET;
1559
1560	if (sc->ex_conf & EX_CONF_RESETHACK)
1561		val |= 0x10;
1562	bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1563	/*
1564	 * XXX apparently the command in progress bit can't be trusted
1565	 * during a reset, so we just always wait this long. Fortunately
1566	 * we normally only reset the chip during autoconfig.
1567	 */
1568	delay(100000);
1569	ex_waitcmd(sc);
1570}
1571
1572void
1573ex_watchdog(struct ifnet *ifp)
1574{
1575	struct ex_softc *sc = ifp->if_softc;
1576
1577	log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
1578	if_statinc(ifp, if_oerrors);
1579
1580	ex_reset(sc);
1581	ex_init(ifp);
1582}
1583
1584void
1585ex_stop(struct ifnet *ifp, int disable)
1586{
1587	struct ex_softc *sc = ifp->if_softc;
1588	bus_space_tag_t iot = sc->sc_iot;
1589	bus_space_handle_t ioh = sc->sc_ioh;
1590	struct ex_txdesc *tx;
1591	struct ex_rxdesc *rx;
1592	int i;
1593
1594	bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1595	bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1596	bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1597
1598	for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1599		if (tx->tx_mbhead == NULL)
1600			continue;
1601		bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1602		tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1603		m_freem(tx->tx_mbhead);
1604		tx->tx_mbhead = NULL;
1605		bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1606		    ((char *)tx->tx_dpd - (char *)sc->sc_dpd),
1607		    sizeof (struct ex_dpd),
1608		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1609	}
1610	sc->tx_tail = sc->tx_head = NULL;
1611	ex_init_txdescs(sc);
1612
1613	sc->rx_tail = sc->rx_head = 0;
1614	for (i = 0; i < EX_NUPD; i++) {
1615		rx = &sc->sc_rxdescs[i];
1616		if (rx->rx_mbhead != NULL) {
1617			bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1618			m_freem(rx->rx_mbhead);
1619			rx->rx_mbhead = NULL;
1620		}
1621		ex_add_rxbuf(sc, rx);
1622	}
1623
1624	bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH);
1625
1626	callout_stop(&sc->ex_mii_callout);
1627	if (sc->ex_conf & EX_CONF_MII)
1628		mii_down(&sc->ex_mii);
1629
1630	if (disable)
1631		ex_disable(sc);
1632
1633	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1634	sc->sc_if_flags = ifp->if_flags;
1635	ifp->if_timer = 0;
1636}
1637
1638static void
1639ex_init_txdescs(struct ex_softc *sc)
1640{
1641	int i;
1642
1643	for (i = 0; i < EX_NDPD; i++) {
1644		sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1645		sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1646		if (i < EX_NDPD - 1)
1647			sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1648		else
1649			sc->sc_txdescs[i].tx_next = NULL;
1650	}
1651	sc->tx_free = &sc->sc_txdescs[0];
1652	sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1653}
1654
1655
1656int
1657ex_activate(device_t self, enum devact act)
1658{
1659	struct ex_softc *sc = device_private(self);
1660
1661	switch (act) {
1662	case DVACT_DEACTIVATE:
1663		if_deactivate(&sc->sc_ethercom.ec_if);
1664		return 0;
1665	default:
1666		return EOPNOTSUPP;
1667	}
1668}
1669
1670int
1671ex_detach(struct ex_softc *sc)
1672{
1673	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1674	struct ex_rxdesc *rxd;
1675	int i, s;
1676
1677	/* Succeed now if there's no work to do. */
1678	if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1679		return (0);
1680
1681	s = splnet();
1682	/* Stop the interface. Callouts are stopped in it. */
1683	ex_stop(ifp, 1);
1684	splx(s);
1685
1686	/* Destroy our callout. */
1687	callout_destroy(&sc->ex_mii_callout);
1688
1689	if (sc->ex_conf & EX_CONF_MII) {
1690		/* Detach all PHYs */
1691		mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1692	}
1693
1694	rnd_detach_source(&sc->rnd_source);
1695	ether_ifdetach(ifp);
1696	if_detach(ifp);
1697
1698	/* Delete all remaining media. */
1699	ifmedia_fini(&sc->ex_mii.mii_media);
1700
1701	for (i = 0; i < EX_NUPD; i++) {
1702		rxd = &sc->sc_rxdescs[i];
1703		if (rxd->rx_mbhead != NULL) {
1704			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1705			m_freem(rxd->rx_mbhead);
1706			rxd->rx_mbhead = NULL;
1707		}
1708	}
1709	for (i = 0; i < EX_NUPD; i++)
1710		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1711	for (i = 0; i < EX_NDPD; i++)
1712		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1713	bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1714	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1715	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
1716	    EX_NDPD * sizeof (struct ex_dpd));
1717	bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1718	bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1719	bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1720	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
1721	    EX_NUPD * sizeof (struct ex_upd));
1722	bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1723
1724	pmf_device_deregister(sc->sc_dev);
1725
1726	return (0);
1727}
1728
1729/*
1730 * Before reboots, reset card completely.
1731 */
1732static bool
1733ex_shutdown(device_t self, int flags)
1734{
1735	struct ex_softc *sc = device_private(self);
1736
1737	ex_stop(&sc->sc_ethercom.ec_if, 1);
1738	/*
1739	 * Make sure the interface is powered up when we reboot,
1740	 * otherwise firmware on some systems gets really confused.
1741	 */
1742	(void) ex_enable(sc);
1743	return true;
1744}
1745
1746/*
1747 * Read EEPROM data.
1748 * XXX what to do if EEPROM doesn't unbusy?
1749 */
1750uint16_t
1751ex_read_eeprom(struct ex_softc *sc, int offset)
1752{
1753	bus_space_tag_t iot = sc->sc_iot;
1754	bus_space_handle_t ioh = sc->sc_ioh;
1755	uint16_t data = 0, cmd = READ_EEPROM;
1756	int off;
1757
1758	off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1759	cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1760
1761	GO_WINDOW(0);
1762	if (ex_eeprom_busy(sc))
1763		goto out;
1764	bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1765	    cmd | (off + (offset & 0x3f)));
1766	if (ex_eeprom_busy(sc))
1767		goto out;
1768	data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1769out:
1770	return data;
1771}
1772
1773static int
1774ex_eeprom_busy(struct ex_softc *sc)
1775{
1776	bus_space_tag_t iot = sc->sc_iot;
1777	bus_space_handle_t ioh = sc->sc_ioh;
1778	int i = 100;
1779
1780	while (i--) {
1781		if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1782		    EEPROM_BUSY))
1783			return 0;
1784		delay(100);
1785	}
1786	aprint_error_dev(sc->sc_dev, "eeprom stays busy.\n");
1787	return (1);
1788}
1789
1790/*
1791 * Create a new rx buffer and add it to the 'soft' rx list.
1792 */
1793static int
1794ex_add_rxbuf(struct ex_softc *sc, struct ex_rxdesc *rxd)
1795{
1796	struct mbuf *m, *oldm;
1797	bus_dmamap_t rxmap;
1798	int error, rval = 0;
1799
1800	oldm = rxd->rx_mbhead;
1801	rxmap = rxd->rx_dmamap;
1802
1803	MGETHDR(m, M_DONTWAIT, MT_DATA);
1804	if (m != NULL) {
1805		MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1806		MCLGET(m, M_DONTWAIT);
1807		if ((m->m_flags & M_EXT) == 0) {
1808			m_freem(m);
1809			if (oldm == NULL)
1810				return 1;
1811			m = oldm;
1812			MRESETDATA(m);
1813			rval = 1;
1814		}
1815	} else {
1816		if (oldm == NULL)
1817			return 1;
1818		m = oldm;
1819		MRESETDATA(m);
1820		rval = 1;
1821	}
1822
1823	/*
1824	 * Setup the DMA map for this receive buffer.
1825	 */
1826	if (m != oldm) {
1827		if (oldm != NULL)
1828			bus_dmamap_unload(sc->sc_dmat, rxmap);
1829		error = bus_dmamap_load(sc->sc_dmat, rxmap,
1830		    m->m_ext.ext_buf, MCLBYTES, NULL,
1831		    BUS_DMA_READ | BUS_DMA_NOWAIT);
1832		if (error) {
1833			aprint_error_dev(sc->sc_dev, "can't load rx buffer, error = %d\n",
1834			    error);
1835			panic("ex_add_rxbuf");	/* XXX */
1836		}
1837	}
1838
1839	/*
1840	 * Align for data after 14 byte header.
1841	 */
1842	m->m_data += 2;
1843
1844	rxd->rx_mbhead = m;
1845	rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1846	rxd->rx_upd->upd_frags[0].fr_addr =
1847	    htole32(rxmap->dm_segs[0].ds_addr + 2);
1848	rxd->rx_upd->upd_nextptr = 0;
1849
1850	/*
1851	 * Attach it to the end of the list.
1852	 */
1853	if (sc->rx_head != NULL) {
1854		sc->rx_tail->rx_next = rxd;
1855		sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1856		    ((char *)rxd->rx_upd - (char *)sc->sc_upd));
1857		bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1858		    (char *)sc->rx_tail->rx_upd - (char *)sc->sc_upd,
1859		    sizeof (struct ex_upd),
1860		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1861	} else {
1862		sc->rx_head = rxd;
1863	}
1864	sc->rx_tail = rxd;
1865
1866	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1867	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1868	bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1869	    ((char *)rxd->rx_upd - (char *)sc->sc_upd),
1870	    sizeof (struct ex_upd), BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
1871	return (rval);
1872}
1873
1874uint32_t
1875ex_mii_bitbang_read(device_t self)
1876{
1877	struct ex_softc *sc = device_private(self);
1878
1879	/* We're already in Window 4. */
1880	return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1881}
1882
1883void
1884ex_mii_bitbang_write(device_t self, uint32_t val)
1885{
1886	struct ex_softc *sc = device_private(self);
1887
1888	/* We're already in Window 4. */
1889	bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1890}
1891
1892int
1893ex_mii_readreg(device_t v, int phy, int reg, uint16_t *val)
1894{
1895	struct ex_softc *sc = device_private(v);
1896	int rv;
1897
1898	if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1899		return -1;
1900
1901	GO_WINDOW(4);
1902
1903	rv = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg, val);
1904
1905	GO_WINDOW(1);
1906
1907	return rv;
1908}
1909
1910int
1911ex_mii_writereg(device_t v, int phy, int reg, uint16_t val)
1912{
1913	struct ex_softc *sc = device_private(v);
1914	int rv;
1915
1916	GO_WINDOW(4);
1917
1918	rv = mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, val);
1919
1920	GO_WINDOW(1);
1921
1922	return rv;
1923}
1924
1925void
1926ex_mii_statchg(struct ifnet *ifp)
1927{
1928	struct ex_softc *sc = ifp->if_softc;
1929	bus_space_tag_t iot = sc->sc_iot;
1930	bus_space_handle_t ioh = sc->sc_ioh;
1931	int mctl;
1932
1933	GO_WINDOW(3);
1934	mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1935	if (sc->ex_mii.mii_media_active & IFM_FDX)
1936		mctl |= MAC_CONTROL_FDX;
1937	else
1938		mctl &= ~MAC_CONTROL_FDX;
1939	bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1940	GO_WINDOW(1);	/* back to operating window */
1941}
1942
1943int
1944ex_enable(struct ex_softc *sc)
1945{
1946	if (sc->enabled == 0 && sc->enable != NULL) {
1947		if ((*sc->enable)(sc) != 0) {
1948			aprint_error_dev(sc->sc_dev, "device enable failed\n");
1949			return (EIO);
1950		}
1951		sc->enabled = 1;
1952	}
1953	return (0);
1954}
1955
1956void
1957ex_disable(struct ex_softc *sc)
1958{
1959	if (sc->enabled == 1 && sc->disable != NULL) {
1960		(*sc->disable)(sc);
1961		sc->enabled = 0;
1962	}
1963}
1964
1965