1/*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2
3/*-
4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5 *
6 * Copyright (c) 2001 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Jason R. Thorpe.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34/*
35 * Device driver for the Sundance Tech. TC9021 10/100/1000
36 * Ethernet controller.
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD$");
41
42#ifdef HAVE_KERNEL_OPTION_HEADERS
43#include "opt_device_polling.h"
44#endif
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/endian.h>
49#include <sys/mbuf.h>
50#include <sys/malloc.h>
51#include <sys/kernel.h>
52#include <sys/module.h>
53#include <sys/socket.h>
54#include <sys/sockio.h>
55#include <sys/sysctl.h>
56#include <sys/taskqueue.h>
57
58#include <net/bpf.h>
59#include <net/ethernet.h>
60#include <net/if.h>
61#include <net/if_var.h>
62#include <net/if_dl.h>
63#include <net/if_media.h>
64#include <net/if_types.h>
65#include <net/if_vlan_var.h>
66
67#include <machine/bus.h>
68#include <machine/resource.h>
69#include <sys/bus.h>
70#include <sys/rman.h>
71
72#include <dev/mii/mii.h>
73#include <dev/mii/mii_bitbang.h>
74#include <dev/mii/miivar.h>
75
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcivar.h>
78
79#include <dev/stge/if_stgereg.h>
80
81#define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
82
83MODULE_DEPEND(stge, pci, 1, 1, 1);
84MODULE_DEPEND(stge, ether, 1, 1, 1);
85MODULE_DEPEND(stge, miibus, 1, 1, 1);
86
87/* "device miibus" required.  See GENERIC if you get errors here. */
88#include "miibus_if.h"
89
90/*
91 * Devices supported by this driver.
92 */
93static const struct stge_product {
94	uint16_t	stge_vendorid;
95	uint16_t	stge_deviceid;
96	const char	*stge_name;
97} stge_products[] = {
98	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
99	  "Sundance ST-1023 Gigabit Ethernet" },
100
101	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
102	  "Sundance ST-2021 Gigabit Ethernet" },
103
104	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
105	  "Tamarack TC9021 Gigabit Ethernet" },
106
107	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
108	  "Tamarack TC9021 Gigabit Ethernet" },
109
110	/*
111	 * The Sundance sample boards use the Sundance vendor ID,
112	 * but the Tamarack product ID.
113	 */
114	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
115	  "Sundance TC9021 Gigabit Ethernet" },
116
117	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
118	  "Sundance TC9021 Gigabit Ethernet" },
119
120	{ VENDOR_DLINK,		DEVICEID_DLINK_DL4000,
121	  "D-Link DL-4000 Gigabit Ethernet" },
122
123	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
124	  "Antares Gigabit Ethernet" }
125};
126
127static int	stge_probe(device_t);
128static int	stge_attach(device_t);
129static int	stge_detach(device_t);
130static int	stge_shutdown(device_t);
131static int	stge_suspend(device_t);
132static int	stge_resume(device_t);
133
134static int	stge_encap(struct stge_softc *, struct mbuf **);
135static void	stge_start(struct ifnet *);
136static void	stge_start_locked(struct ifnet *);
137static void	stge_watchdog(struct stge_softc *);
138static int	stge_ioctl(struct ifnet *, u_long, caddr_t);
139static void	stge_init(void *);
140static void	stge_init_locked(struct stge_softc *);
141static void	stge_vlan_setup(struct stge_softc *);
142static void	stge_stop(struct stge_softc *);
143static void	stge_start_tx(struct stge_softc *);
144static void	stge_start_rx(struct stge_softc *);
145static void	stge_stop_tx(struct stge_softc *);
146static void	stge_stop_rx(struct stge_softc *);
147
148static void	stge_reset(struct stge_softc *, uint32_t);
149static int	stge_eeprom_wait(struct stge_softc *);
150static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
151static void	stge_tick(void *);
152static void	stge_stats_update(struct stge_softc *);
153static void	stge_set_filter(struct stge_softc *);
154static void	stge_set_multi(struct stge_softc *);
155
156static void	stge_link_task(void *, int);
157static void	stge_intr(void *);
158static __inline int stge_tx_error(struct stge_softc *);
159static void	stge_txeof(struct stge_softc *);
160static int	stge_rxeof(struct stge_softc *);
161static __inline void stge_discard_rxbuf(struct stge_softc *, int);
162static int	stge_newbuf(struct stge_softc *, int);
163#ifndef __NO_STRICT_ALIGNMENT
164static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
165#endif
166
167static int	stge_miibus_readreg(device_t, int, int);
168static int	stge_miibus_writereg(device_t, int, int, int);
169static void	stge_miibus_statchg(device_t);
170static int	stge_mediachange(struct ifnet *);
171static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
172
173static void	stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
174static int	stge_dma_alloc(struct stge_softc *);
175static void	stge_dma_free(struct stge_softc *);
176static void	stge_dma_wait(struct stge_softc *);
177static void	stge_init_tx_ring(struct stge_softc *);
178static int	stge_init_rx_ring(struct stge_softc *);
179#ifdef DEVICE_POLLING
180static int	stge_poll(struct ifnet *, enum poll_cmd, int);
181#endif
182
183static void	stge_setwol(struct stge_softc *);
184static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
185static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
186static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
187
188/*
189 * MII bit-bang glue
190 */
191static uint32_t stge_mii_bitbang_read(device_t);
192static void	stge_mii_bitbang_write(device_t, uint32_t);
193
194static const struct mii_bitbang_ops stge_mii_bitbang_ops = {
195	stge_mii_bitbang_read,
196	stge_mii_bitbang_write,
197	{
198		PC_MgmtData,		/* MII_BIT_MDO */
199		PC_MgmtData,		/* MII_BIT_MDI */
200		PC_MgmtClk,		/* MII_BIT_MDC */
201		PC_MgmtDir,		/* MII_BIT_DIR_HOST_PHY */
202		0,			/* MII_BIT_DIR_PHY_HOST */
203	}
204};
205
206static device_method_t stge_methods[] = {
207	/* Device interface */
208	DEVMETHOD(device_probe,		stge_probe),
209	DEVMETHOD(device_attach,	stge_attach),
210	DEVMETHOD(device_detach,	stge_detach),
211	DEVMETHOD(device_shutdown,	stge_shutdown),
212	DEVMETHOD(device_suspend,	stge_suspend),
213	DEVMETHOD(device_resume,	stge_resume),
214
215	/* MII interface */
216	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
217	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
218	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
219
220	DEVMETHOD_END
221};
222
223static driver_t stge_driver = {
224	"stge",
225	stge_methods,
226	sizeof(struct stge_softc)
227};
228
229static devclass_t stge_devclass;
230
231DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0);
232DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
233
234static struct resource_spec stge_res_spec_io[] = {
235	{ SYS_RES_IOPORT,	PCIR_BAR(0),	RF_ACTIVE },
236	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
237	{ -1,			0,		0 }
238};
239
240static struct resource_spec stge_res_spec_mem[] = {
241	{ SYS_RES_MEMORY,	PCIR_BAR(1),	RF_ACTIVE },
242	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
243	{ -1,			0,		0 }
244};
245
246/*
247 * stge_mii_bitbang_read: [mii bit-bang interface function]
248 *
249 *	Read the MII serial port for the MII bit-bang module.
250 */
251static uint32_t
252stge_mii_bitbang_read(device_t dev)
253{
254	struct stge_softc *sc;
255	uint32_t val;
256
257	sc = device_get_softc(dev);
258
259	val = CSR_READ_1(sc, STGE_PhyCtrl);
260	CSR_BARRIER(sc, STGE_PhyCtrl, 1,
261	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
262	return (val);
263}
264
265/*
266 * stge_mii_bitbang_write: [mii big-bang interface function]
267 *
268 *	Write the MII serial port for the MII bit-bang module.
269 */
270static void
271stge_mii_bitbang_write(device_t dev, uint32_t val)
272{
273	struct stge_softc *sc;
274
275	sc = device_get_softc(dev);
276
277	CSR_WRITE_1(sc, STGE_PhyCtrl, val);
278	CSR_BARRIER(sc, STGE_PhyCtrl, 1,
279	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
280}
281
282/*
283 * sc_miibus_readreg:	[mii interface function]
284 *
285 *	Read a PHY register on the MII of the TC9021.
286 */
287static int
288stge_miibus_readreg(device_t dev, int phy, int reg)
289{
290	struct stge_softc *sc;
291	int error, val;
292
293	sc = device_get_softc(dev);
294
295	if (reg == STGE_PhyCtrl) {
296		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
297		STGE_MII_LOCK(sc);
298		error = CSR_READ_1(sc, STGE_PhyCtrl);
299		STGE_MII_UNLOCK(sc);
300		return (error);
301	}
302
303	STGE_MII_LOCK(sc);
304	val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg);
305	STGE_MII_UNLOCK(sc);
306	return (val);
307}
308
309/*
310 * stge_miibus_writereg:	[mii interface function]
311 *
312 *	Write a PHY register on the MII of the TC9021.
313 */
314static int
315stge_miibus_writereg(device_t dev, int phy, int reg, int val)
316{
317	struct stge_softc *sc;
318
319	sc = device_get_softc(dev);
320
321	STGE_MII_LOCK(sc);
322	mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val);
323	STGE_MII_UNLOCK(sc);
324	return (0);
325}
326
327/*
328 * stge_miibus_statchg:	[mii interface function]
329 *
330 *	Callback from MII layer when media changes.
331 */
332static void
333stge_miibus_statchg(device_t dev)
334{
335	struct stge_softc *sc;
336
337	sc = device_get_softc(dev);
338	taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
339}
340
341/*
342 * stge_mediastatus:	[ifmedia interface function]
343 *
344 *	Get the current interface media status.
345 */
346static void
347stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
348{
349	struct stge_softc *sc;
350	struct mii_data *mii;
351
352	sc = ifp->if_softc;
353	mii = device_get_softc(sc->sc_miibus);
354
355	mii_pollstat(mii);
356	ifmr->ifm_status = mii->mii_media_status;
357	ifmr->ifm_active = mii->mii_media_active;
358}
359
360/*
361 * stge_mediachange:	[ifmedia interface function]
362 *
363 *	Set hardware to newly-selected media.
364 */
365static int
366stge_mediachange(struct ifnet *ifp)
367{
368	struct stge_softc *sc;
369	struct mii_data *mii;
370
371	sc = ifp->if_softc;
372	mii = device_get_softc(sc->sc_miibus);
373	mii_mediachg(mii);
374
375	return (0);
376}
377
378static int
379stge_eeprom_wait(struct stge_softc *sc)
380{
381	int i;
382
383	for (i = 0; i < STGE_TIMEOUT; i++) {
384		DELAY(1000);
385		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
386			return (0);
387	}
388	return (1);
389}
390
391/*
392 * stge_read_eeprom:
393 *
394 *	Read data from the serial EEPROM.
395 */
396static void
397stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
398{
399
400	if (stge_eeprom_wait(sc))
401		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
402
403	CSR_WRITE_2(sc, STGE_EepromCtrl,
404	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
405	if (stge_eeprom_wait(sc))
406		device_printf(sc->sc_dev, "EEPROM read timed out\n");
407	*data = CSR_READ_2(sc, STGE_EepromData);
408}
409
410
411static int
412stge_probe(device_t dev)
413{
414	const struct stge_product *sp;
415	int i;
416	uint16_t vendor, devid;
417
418	vendor = pci_get_vendor(dev);
419	devid = pci_get_device(dev);
420	sp = stge_products;
421	for (i = 0; i < nitems(stge_products); i++, sp++) {
422		if (vendor == sp->stge_vendorid &&
423		    devid == sp->stge_deviceid) {
424			device_set_desc(dev, sp->stge_name);
425			return (BUS_PROBE_DEFAULT);
426		}
427	}
428
429	return (ENXIO);
430}
431
432static int
433stge_attach(device_t dev)
434{
435	struct stge_softc *sc;
436	struct ifnet *ifp;
437	uint8_t enaddr[ETHER_ADDR_LEN];
438	int error, flags, i;
439	uint16_t cmd;
440	uint32_t val;
441
442	error = 0;
443	sc = device_get_softc(dev);
444	sc->sc_dev = dev;
445
446	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
447	    MTX_DEF);
448	mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
449	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
450	TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
451
452	/*
453	 * Map the device.
454	 */
455	pci_enable_busmaster(dev);
456	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
457	val = pci_read_config(dev, PCIR_BAR(1), 4);
458	if (PCI_BAR_IO(val))
459		sc->sc_spec = stge_res_spec_mem;
460	else {
461		val = pci_read_config(dev, PCIR_BAR(0), 4);
462		if (!PCI_BAR_IO(val)) {
463			device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
464			error = ENXIO;
465			goto fail;
466		}
467		sc->sc_spec = stge_res_spec_io;
468	}
469	error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
470	if (error != 0) {
471		device_printf(dev, "couldn't allocate %s resources\n",
472		    sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
473		goto fail;
474	}
475	sc->sc_rev = pci_get_revid(dev);
476
477	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
478	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
479	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
480	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
481
482	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
483	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
484	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
485	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
486
487	/* Pull in device tunables. */
488	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
489	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
490	    "rxint_nframe", &sc->sc_rxint_nframe);
491	if (error == 0) {
492		if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
493		    sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
494			device_printf(dev, "rxint_nframe value out of range; "
495			    "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
496			sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
497		}
498	}
499
500	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
501	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
502	    "rxint_dmawait", &sc->sc_rxint_dmawait);
503	if (error == 0) {
504		if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
505		    sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
506			device_printf(dev, "rxint_dmawait value out of range; "
507			    "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
508			sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
509		}
510	}
511
512	if ((error = stge_dma_alloc(sc)) != 0)
513		goto fail;
514
515	/*
516	 * Determine if we're copper or fiber.  It affects how we
517	 * reset the card.
518	 */
519	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
520		sc->sc_usefiber = 1;
521	else
522		sc->sc_usefiber = 0;
523
524	/* Load LED configuration from EEPROM. */
525	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
526
527	/*
528	 * Reset the chip to a known state.
529	 */
530	STGE_LOCK(sc);
531	stge_reset(sc, STGE_RESET_FULL);
532	STGE_UNLOCK(sc);
533
534	/*
535	 * Reading the station address from the EEPROM doesn't seem
536	 * to work, at least on my sample boards.  Instead, since
537	 * the reset sequence does AutoInit, read it from the station
538	 * address registers. For Sundance 1023 you can only read it
539	 * from EEPROM.
540	 */
541	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
542		uint16_t v;
543
544		v = CSR_READ_2(sc, STGE_StationAddress0);
545		enaddr[0] = v & 0xff;
546		enaddr[1] = v >> 8;
547		v = CSR_READ_2(sc, STGE_StationAddress1);
548		enaddr[2] = v & 0xff;
549		enaddr[3] = v >> 8;
550		v = CSR_READ_2(sc, STGE_StationAddress2);
551		enaddr[4] = v & 0xff;
552		enaddr[5] = v >> 8;
553		sc->sc_stge1023 = 0;
554	} else {
555		uint16_t myaddr[ETHER_ADDR_LEN / 2];
556		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
557			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
558			    &myaddr[i]);
559			myaddr[i] = le16toh(myaddr[i]);
560		}
561		bcopy(myaddr, enaddr, sizeof(enaddr));
562		sc->sc_stge1023 = 1;
563	}
564
565	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
566	if (ifp == NULL) {
567		device_printf(sc->sc_dev, "failed to if_alloc()\n");
568		error = ENXIO;
569		goto fail;
570	}
571
572	ifp->if_softc = sc;
573	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
574	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
575	ifp->if_ioctl = stge_ioctl;
576	ifp->if_start = stge_start;
577	ifp->if_init = stge_init;
578	ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1;
579	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
580	IFQ_SET_READY(&ifp->if_snd);
581	/* Revision B3 and earlier chips have checksum bug. */
582	if (sc->sc_rev >= 0x0c) {
583		ifp->if_hwassist = STGE_CSUM_FEATURES;
584		ifp->if_capabilities = IFCAP_HWCSUM;
585	} else {
586		ifp->if_hwassist = 0;
587		ifp->if_capabilities = 0;
588	}
589	ifp->if_capabilities |= IFCAP_WOL_MAGIC;
590	ifp->if_capenable = ifp->if_capabilities;
591
592	/*
593	 * Read some important bits from the PhyCtrl register.
594	 */
595	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
596	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
597
598	/* Set up MII bus. */
599	flags = MIIF_DOPAUSE;
600	if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e)
601		flags |= MIIF_MACPRIV0;
602	error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange,
603	    stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
604	    flags);
605	if (error != 0) {
606		device_printf(sc->sc_dev, "attaching PHYs failed\n");
607		goto fail;
608	}
609
610	ether_ifattach(ifp, enaddr);
611
612	/* VLAN capability setup */
613	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
614	if (sc->sc_rev >= 0x0c)
615		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
616	ifp->if_capenable = ifp->if_capabilities;
617#ifdef DEVICE_POLLING
618	ifp->if_capabilities |= IFCAP_POLLING;
619#endif
620	/*
621	 * Tell the upper layer(s) we support long frames.
622	 * Must appear after the call to ether_ifattach() because
623	 * ether_ifattach() sets ifi_hdrlen to the default value.
624	 */
625	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
626
627	/*
628	 * The manual recommends disabling early transmit, so we
629	 * do.  It's disabled anyway, if using IP checksumming,
630	 * since the entire packet must be in the FIFO in order
631	 * for the chip to perform the checksum.
632	 */
633	sc->sc_txthresh = 0x0fff;
634
635	/*
636	 * Disable MWI if the PCI layer tells us to.
637	 */
638	sc->sc_DMACtrl = 0;
639	if ((cmd & PCIM_CMD_MWRICEN) == 0)
640		sc->sc_DMACtrl |= DMAC_MWIDisable;
641
642	/*
643	 * Hookup IRQ
644	 */
645	error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
646	    NULL, stge_intr, sc, &sc->sc_ih);
647	if (error != 0) {
648		ether_ifdetach(ifp);
649		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
650		sc->sc_ifp = NULL;
651		goto fail;
652	}
653
654fail:
655	if (error != 0)
656		stge_detach(dev);
657
658	return (error);
659}
660
661static int
662stge_detach(device_t dev)
663{
664	struct stge_softc *sc;
665	struct ifnet *ifp;
666
667	sc = device_get_softc(dev);
668
669	ifp = sc->sc_ifp;
670#ifdef DEVICE_POLLING
671	if (ifp && ifp->if_capenable & IFCAP_POLLING)
672		ether_poll_deregister(ifp);
673#endif
674	if (device_is_attached(dev)) {
675		STGE_LOCK(sc);
676		/* XXX */
677		sc->sc_detach = 1;
678		stge_stop(sc);
679		STGE_UNLOCK(sc);
680		callout_drain(&sc->sc_tick_ch);
681		taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
682		ether_ifdetach(ifp);
683	}
684
685	if (sc->sc_miibus != NULL) {
686		device_delete_child(dev, sc->sc_miibus);
687		sc->sc_miibus = NULL;
688	}
689	bus_generic_detach(dev);
690	stge_dma_free(sc);
691
692	if (ifp != NULL) {
693		if_free(ifp);
694		sc->sc_ifp = NULL;
695	}
696
697	if (sc->sc_ih) {
698		bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
699		sc->sc_ih = NULL;
700	}
701	bus_release_resources(dev, sc->sc_spec, sc->sc_res);
702
703	mtx_destroy(&sc->sc_mii_mtx);
704	mtx_destroy(&sc->sc_mtx);
705
706	return (0);
707}
708
709struct stge_dmamap_arg {
710	bus_addr_t	stge_busaddr;
711};
712
713static void
714stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
715{
716	struct stge_dmamap_arg *ctx;
717
718	if (error != 0)
719		return;
720
721	ctx = (struct stge_dmamap_arg *)arg;
722	ctx->stge_busaddr = segs[0].ds_addr;
723}
724
725static int
726stge_dma_alloc(struct stge_softc *sc)
727{
728	struct stge_dmamap_arg ctx;
729	struct stge_txdesc *txd;
730	struct stge_rxdesc *rxd;
731	int error, i;
732
733	/* create parent tag. */
734	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
735		    1, 0,			/* algnmnt, boundary */
736		    STGE_DMA_MAXADDR,		/* lowaddr */
737		    BUS_SPACE_MAXADDR,		/* highaddr */
738		    NULL, NULL,			/* filter, filterarg */
739		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
740		    0,				/* nsegments */
741		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
742		    0,				/* flags */
743		    NULL, NULL,			/* lockfunc, lockarg */
744		    &sc->sc_cdata.stge_parent_tag);
745	if (error != 0) {
746		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
747		goto fail;
748	}
749	/* create tag for Tx ring. */
750	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
751		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
752		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
753		    BUS_SPACE_MAXADDR,		/* highaddr */
754		    NULL, NULL,			/* filter, filterarg */
755		    STGE_TX_RING_SZ,		/* maxsize */
756		    1,				/* nsegments */
757		    STGE_TX_RING_SZ,		/* maxsegsize */
758		    0,				/* flags */
759		    NULL, NULL,			/* lockfunc, lockarg */
760		    &sc->sc_cdata.stge_tx_ring_tag);
761	if (error != 0) {
762		device_printf(sc->sc_dev,
763		    "failed to allocate Tx ring DMA tag\n");
764		goto fail;
765	}
766
767	/* create tag for Rx ring. */
768	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
769		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
770		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
771		    BUS_SPACE_MAXADDR,		/* highaddr */
772		    NULL, NULL,			/* filter, filterarg */
773		    STGE_RX_RING_SZ,		/* maxsize */
774		    1,				/* nsegments */
775		    STGE_RX_RING_SZ,		/* maxsegsize */
776		    0,				/* flags */
777		    NULL, NULL,			/* lockfunc, lockarg */
778		    &sc->sc_cdata.stge_rx_ring_tag);
779	if (error != 0) {
780		device_printf(sc->sc_dev,
781		    "failed to allocate Rx ring DMA tag\n");
782		goto fail;
783	}
784
785	/* create tag for Tx buffers. */
786	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
787		    1, 0,			/* algnmnt, boundary */
788		    BUS_SPACE_MAXADDR,		/* lowaddr */
789		    BUS_SPACE_MAXADDR,		/* highaddr */
790		    NULL, NULL,			/* filter, filterarg */
791		    MCLBYTES * STGE_MAXTXSEGS,	/* maxsize */
792		    STGE_MAXTXSEGS,		/* nsegments */
793		    MCLBYTES,			/* maxsegsize */
794		    0,				/* flags */
795		    NULL, NULL,			/* lockfunc, lockarg */
796		    &sc->sc_cdata.stge_tx_tag);
797	if (error != 0) {
798		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
799		goto fail;
800	}
801
802	/* create tag for Rx buffers. */
803	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
804		    1, 0,			/* algnmnt, boundary */
805		    BUS_SPACE_MAXADDR,		/* lowaddr */
806		    BUS_SPACE_MAXADDR,		/* highaddr */
807		    NULL, NULL,			/* filter, filterarg */
808		    MCLBYTES,			/* maxsize */
809		    1,				/* nsegments */
810		    MCLBYTES,			/* maxsegsize */
811		    0,				/* flags */
812		    NULL, NULL,			/* lockfunc, lockarg */
813		    &sc->sc_cdata.stge_rx_tag);
814	if (error != 0) {
815		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
816		goto fail;
817	}
818
819	/* allocate DMA'able memory and load the DMA map for Tx ring. */
820	error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
821	    (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT |
822	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map);
823	if (error != 0) {
824		device_printf(sc->sc_dev,
825		    "failed to allocate DMA'able memory for Tx ring\n");
826		goto fail;
827	}
828
829	ctx.stge_busaddr = 0;
830	error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
831	    sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
832	    STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
833	if (error != 0 || ctx.stge_busaddr == 0) {
834		device_printf(sc->sc_dev,
835		    "failed to load DMA'able memory for Tx ring\n");
836		goto fail;
837	}
838	sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
839
840	/* allocate DMA'able memory and load the DMA map for Rx ring. */
841	error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
842	    (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT |
843	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map);
844	if (error != 0) {
845		device_printf(sc->sc_dev,
846		    "failed to allocate DMA'able memory for Rx ring\n");
847		goto fail;
848	}
849
850	ctx.stge_busaddr = 0;
851	error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
852	    sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
853	    STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
854	if (error != 0 || ctx.stge_busaddr == 0) {
855		device_printf(sc->sc_dev,
856		    "failed to load DMA'able memory for Rx ring\n");
857		goto fail;
858	}
859	sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
860
861	/* create DMA maps for Tx buffers. */
862	for (i = 0; i < STGE_TX_RING_CNT; i++) {
863		txd = &sc->sc_cdata.stge_txdesc[i];
864		txd->tx_m = NULL;
865		txd->tx_dmamap = 0;
866		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
867		    &txd->tx_dmamap);
868		if (error != 0) {
869			device_printf(sc->sc_dev,
870			    "failed to create Tx dmamap\n");
871			goto fail;
872		}
873	}
874	/* create DMA maps for Rx buffers. */
875	if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
876	    &sc->sc_cdata.stge_rx_sparemap)) != 0) {
877		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
878		goto fail;
879	}
880	for (i = 0; i < STGE_RX_RING_CNT; i++) {
881		rxd = &sc->sc_cdata.stge_rxdesc[i];
882		rxd->rx_m = NULL;
883		rxd->rx_dmamap = 0;
884		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
885		    &rxd->rx_dmamap);
886		if (error != 0) {
887			device_printf(sc->sc_dev,
888			    "failed to create Rx dmamap\n");
889			goto fail;
890		}
891	}
892
893fail:
894	return (error);
895}
896
897static void
898stge_dma_free(struct stge_softc *sc)
899{
900	struct stge_txdesc *txd;
901	struct stge_rxdesc *rxd;
902	int i;
903
904	/* Tx ring */
905	if (sc->sc_cdata.stge_tx_ring_tag) {
906		if (sc->sc_rdata.stge_tx_ring_paddr)
907			bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
908			    sc->sc_cdata.stge_tx_ring_map);
909		if (sc->sc_rdata.stge_tx_ring)
910			bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
911			    sc->sc_rdata.stge_tx_ring,
912			    sc->sc_cdata.stge_tx_ring_map);
913		sc->sc_rdata.stge_tx_ring = NULL;
914		sc->sc_rdata.stge_tx_ring_paddr = 0;
915		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
916		sc->sc_cdata.stge_tx_ring_tag = NULL;
917	}
918	/* Rx ring */
919	if (sc->sc_cdata.stge_rx_ring_tag) {
920		if (sc->sc_rdata.stge_rx_ring_paddr)
921			bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
922			    sc->sc_cdata.stge_rx_ring_map);
923		if (sc->sc_rdata.stge_rx_ring)
924			bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
925			    sc->sc_rdata.stge_rx_ring,
926			    sc->sc_cdata.stge_rx_ring_map);
927		sc->sc_rdata.stge_rx_ring = NULL;
928		sc->sc_rdata.stge_rx_ring_paddr = 0;
929		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
930		sc->sc_cdata.stge_rx_ring_tag = NULL;
931	}
932	/* Tx buffers */
933	if (sc->sc_cdata.stge_tx_tag) {
934		for (i = 0; i < STGE_TX_RING_CNT; i++) {
935			txd = &sc->sc_cdata.stge_txdesc[i];
936			if (txd->tx_dmamap) {
937				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
938				    txd->tx_dmamap);
939				txd->tx_dmamap = 0;
940			}
941		}
942		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
943		sc->sc_cdata.stge_tx_tag = NULL;
944	}
945	/* Rx buffers */
946	if (sc->sc_cdata.stge_rx_tag) {
947		for (i = 0; i < STGE_RX_RING_CNT; i++) {
948			rxd = &sc->sc_cdata.stge_rxdesc[i];
949			if (rxd->rx_dmamap) {
950				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
951				    rxd->rx_dmamap);
952				rxd->rx_dmamap = 0;
953			}
954		}
955		if (sc->sc_cdata.stge_rx_sparemap) {
956			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
957			    sc->sc_cdata.stge_rx_sparemap);
958			sc->sc_cdata.stge_rx_sparemap = 0;
959		}
960		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
961		sc->sc_cdata.stge_rx_tag = NULL;
962	}
963
964	if (sc->sc_cdata.stge_parent_tag) {
965		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
966		sc->sc_cdata.stge_parent_tag = NULL;
967	}
968}
969
970/*
971 * stge_shutdown:
972 *
973 *	Make sure the interface is stopped at reboot time.
974 */
975static int
976stge_shutdown(device_t dev)
977{
978
979	return (stge_suspend(dev));
980}
981
982static void
983stge_setwol(struct stge_softc *sc)
984{
985	struct ifnet *ifp;
986	uint8_t v;
987
988	STGE_LOCK_ASSERT(sc);
989
990	ifp = sc->sc_ifp;
991	v = CSR_READ_1(sc, STGE_WakeEvent);
992	/* Disable all WOL bits. */
993	v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
994	    WE_WakeOnLanEnable);
995	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
996		v |= WE_MagicPktEnable | WE_WakeOnLanEnable;
997	CSR_WRITE_1(sc, STGE_WakeEvent, v);
998	/* Reset Tx and prevent transmission. */
999	CSR_WRITE_4(sc, STGE_AsicCtrl,
1000	    CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset);
1001	/*
1002	 * TC9021 automatically reset link speed to 100Mbps when it's put
1003	 * into sleep so there is no need to try to resetting link speed.
1004	 */
1005}
1006
1007static int
1008stge_suspend(device_t dev)
1009{
1010	struct stge_softc *sc;
1011
1012	sc = device_get_softc(dev);
1013
1014	STGE_LOCK(sc);
1015	stge_stop(sc);
1016	sc->sc_suspended = 1;
1017	stge_setwol(sc);
1018	STGE_UNLOCK(sc);
1019
1020	return (0);
1021}
1022
1023static int
1024stge_resume(device_t dev)
1025{
1026	struct stge_softc *sc;
1027	struct ifnet *ifp;
1028	uint8_t v;
1029
1030	sc = device_get_softc(dev);
1031
1032	STGE_LOCK(sc);
1033	/*
1034	 * Clear WOL bits, so special frames wouldn't interfere
1035	 * normal Rx operation anymore.
1036	 */
1037	v = CSR_READ_1(sc, STGE_WakeEvent);
1038	v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
1039	    WE_WakeOnLanEnable);
1040	CSR_WRITE_1(sc, STGE_WakeEvent, v);
1041	ifp = sc->sc_ifp;
1042	if (ifp->if_flags & IFF_UP)
1043		stge_init_locked(sc);
1044
1045	sc->sc_suspended = 0;
1046	STGE_UNLOCK(sc);
1047
1048	return (0);
1049}
1050
1051static void
1052stge_dma_wait(struct stge_softc *sc)
1053{
1054	int i;
1055
1056	for (i = 0; i < STGE_TIMEOUT; i++) {
1057		DELAY(2);
1058		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1059			break;
1060	}
1061
1062	if (i == STGE_TIMEOUT)
1063		device_printf(sc->sc_dev, "DMA wait timed out\n");
1064}
1065
1066static int
1067stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1068{
1069	struct stge_txdesc *txd;
1070	struct stge_tfd *tfd;
1071	struct mbuf *m;
1072	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1073	int error, i, nsegs, si;
1074	uint64_t csum_flags, tfc;
1075
1076	STGE_LOCK_ASSERT(sc);
1077
1078	if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1079		return (ENOBUFS);
1080
1081	error =  bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1082	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1083	if (error == EFBIG) {
1084		m = m_collapse(*m_head, M_NOWAIT, STGE_MAXTXSEGS);
1085		if (m == NULL) {
1086			m_freem(*m_head);
1087			*m_head = NULL;
1088			return (ENOMEM);
1089		}
1090		*m_head = m;
1091		error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1092		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1093		if (error != 0) {
1094			m_freem(*m_head);
1095			*m_head = NULL;
1096			return (error);
1097		}
1098	} else if (error != 0)
1099		return (error);
1100	if (nsegs == 0) {
1101		m_freem(*m_head);
1102		*m_head = NULL;
1103		return (EIO);
1104	}
1105
1106	m = *m_head;
1107	csum_flags = 0;
1108	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1109		if (m->m_pkthdr.csum_flags & CSUM_IP)
1110			csum_flags |= TFD_IPChecksumEnable;
1111		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1112			csum_flags |= TFD_TCPChecksumEnable;
1113		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1114			csum_flags |= TFD_UDPChecksumEnable;
1115	}
1116
1117	si = sc->sc_cdata.stge_tx_prod;
1118	tfd = &sc->sc_rdata.stge_tx_ring[si];
1119	for (i = 0; i < nsegs; i++)
1120		tfd->tfd_frags[i].frag_word0 =
1121		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1122		    FRAG_LEN(txsegs[i].ds_len));
1123	sc->sc_cdata.stge_tx_cnt++;
1124
1125	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1126	    TFD_FragCount(nsegs) | csum_flags;
1127	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1128		tfc |= TFD_TxDMAIndicate;
1129
1130	/* Update producer index. */
1131	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1132
1133	/* Check if we have a VLAN tag to insert. */
1134	if (m->m_flags & M_VLANTAG)
1135		tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
1136	tfd->tfd_control = htole64(tfc);
1137
1138	/* Update Tx Queue. */
1139	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1140	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1141	txd->tx_m = m;
1142
1143	/* Sync descriptors. */
1144	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1145	    BUS_DMASYNC_PREWRITE);
1146	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1147	    sc->sc_cdata.stge_tx_ring_map,
1148	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1149
1150	return (0);
1151}
1152
1153/*
1154 * stge_start:		[ifnet interface function]
1155 *
1156 *	Start packet transmission on the interface.
1157 */
1158static void
1159stge_start(struct ifnet *ifp)
1160{
1161	struct stge_softc *sc;
1162
1163	sc = ifp->if_softc;
1164	STGE_LOCK(sc);
1165	stge_start_locked(ifp);
1166	STGE_UNLOCK(sc);
1167}
1168
1169static void
1170stge_start_locked(struct ifnet *ifp)
1171{
1172        struct stge_softc *sc;
1173        struct mbuf *m_head;
1174	int enq;
1175
1176	sc = ifp->if_softc;
1177
1178	STGE_LOCK_ASSERT(sc);
1179
1180	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1181	    IFF_DRV_RUNNING || sc->sc_link == 0)
1182		return;
1183
1184	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1185		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1186			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1187			break;
1188		}
1189
1190		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1191		if (m_head == NULL)
1192			break;
1193		/*
1194		 * Pack the data into the transmit ring. If we
1195		 * don't have room, set the OACTIVE flag and wait
1196		 * for the NIC to drain the ring.
1197		 */
1198		if (stge_encap(sc, &m_head)) {
1199			if (m_head == NULL)
1200				break;
1201			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1202			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1203			break;
1204		}
1205
1206		enq++;
1207		/*
1208		 * If there's a BPF listener, bounce a copy of this frame
1209		 * to him.
1210		 */
1211		ETHER_BPF_MTAP(ifp, m_head);
1212	}
1213
1214	if (enq > 0) {
1215		/* Transmit */
1216		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1217
1218		/* Set a timeout in case the chip goes out to lunch. */
1219		sc->sc_watchdog_timer = 5;
1220	}
1221}
1222
1223/*
1224 * stge_watchdog:
1225 *
1226 *	Watchdog timer handler.
1227 */
1228static void
1229stge_watchdog(struct stge_softc *sc)
1230{
1231	struct ifnet *ifp;
1232
1233	STGE_LOCK_ASSERT(sc);
1234
1235	if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
1236		return;
1237
1238	ifp = sc->sc_ifp;
1239	if_printf(sc->sc_ifp, "device timeout\n");
1240	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1241	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1242	stge_init_locked(sc);
1243	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1244		stge_start_locked(ifp);
1245}
1246
1247/*
1248 * stge_ioctl:		[ifnet interface function]
1249 *
1250 *	Handle control requests from the operator.
1251 */
1252static int
1253stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1254{
1255	struct stge_softc *sc;
1256	struct ifreq *ifr;
1257	struct mii_data *mii;
1258	int error, mask;
1259
1260	sc = ifp->if_softc;
1261	ifr = (struct ifreq *)data;
1262	error = 0;
1263	switch (cmd) {
1264	case SIOCSIFMTU:
1265		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1266			error = EINVAL;
1267		else if (ifp->if_mtu != ifr->ifr_mtu) {
1268			ifp->if_mtu = ifr->ifr_mtu;
1269			STGE_LOCK(sc);
1270			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1271				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1272				stge_init_locked(sc);
1273			}
1274			STGE_UNLOCK(sc);
1275		}
1276		break;
1277	case SIOCSIFFLAGS:
1278		STGE_LOCK(sc);
1279		if ((ifp->if_flags & IFF_UP) != 0) {
1280			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1281				if (((ifp->if_flags ^ sc->sc_if_flags)
1282				    & IFF_PROMISC) != 0)
1283					stge_set_filter(sc);
1284			} else {
1285				if (sc->sc_detach == 0)
1286					stge_init_locked(sc);
1287			}
1288		} else {
1289			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1290				stge_stop(sc);
1291		}
1292		sc->sc_if_flags = ifp->if_flags;
1293		STGE_UNLOCK(sc);
1294		break;
1295	case SIOCADDMULTI:
1296	case SIOCDELMULTI:
1297		STGE_LOCK(sc);
1298		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1299			stge_set_multi(sc);
1300		STGE_UNLOCK(sc);
1301		break;
1302	case SIOCSIFMEDIA:
1303	case SIOCGIFMEDIA:
1304		mii = device_get_softc(sc->sc_miibus);
1305		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1306		break;
1307	case SIOCSIFCAP:
1308		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1309#ifdef DEVICE_POLLING
1310		if ((mask & IFCAP_POLLING) != 0) {
1311			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1312				error = ether_poll_register(stge_poll, ifp);
1313				if (error != 0)
1314					break;
1315				STGE_LOCK(sc);
1316				CSR_WRITE_2(sc, STGE_IntEnable, 0);
1317				ifp->if_capenable |= IFCAP_POLLING;
1318				STGE_UNLOCK(sc);
1319			} else {
1320				error = ether_poll_deregister(ifp);
1321				if (error != 0)
1322					break;
1323				STGE_LOCK(sc);
1324				CSR_WRITE_2(sc, STGE_IntEnable,
1325				    sc->sc_IntEnable);
1326				ifp->if_capenable &= ~IFCAP_POLLING;
1327				STGE_UNLOCK(sc);
1328			}
1329		}
1330#endif
1331		if ((mask & IFCAP_HWCSUM) != 0) {
1332			ifp->if_capenable ^= IFCAP_HWCSUM;
1333			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1334			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1335				ifp->if_hwassist = STGE_CSUM_FEATURES;
1336			else
1337				ifp->if_hwassist = 0;
1338		}
1339		if ((mask & IFCAP_WOL) != 0 &&
1340		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
1341			if ((mask & IFCAP_WOL_MAGIC) != 0)
1342				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1343		}
1344		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1345			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1346			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1347				STGE_LOCK(sc);
1348				stge_vlan_setup(sc);
1349				STGE_UNLOCK(sc);
1350			}
1351		}
1352		VLAN_CAPABILITIES(ifp);
1353		break;
1354	default:
1355		error = ether_ioctl(ifp, cmd, data);
1356		break;
1357	}
1358
1359	return (error);
1360}
1361
1362static void
1363stge_link_task(void *arg, int pending)
1364{
1365	struct stge_softc *sc;
1366	struct mii_data *mii;
1367	uint32_t v, ac;
1368	int i;
1369
1370	sc = (struct stge_softc *)arg;
1371	STGE_LOCK(sc);
1372
1373	mii = device_get_softc(sc->sc_miibus);
1374	if (mii->mii_media_status & IFM_ACTIVE) {
1375		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1376			sc->sc_link = 1;
1377	} else
1378		sc->sc_link = 0;
1379
1380	sc->sc_MACCtrl = 0;
1381	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
1382		sc->sc_MACCtrl |= MC_DuplexSelect;
1383	if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0)
1384		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
1385	if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0)
1386		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
1387	/*
1388	 * Update STGE_MACCtrl register depending on link status.
1389	 * (duplex, flow control etc)
1390	 */
1391	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1392	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1393	v |= sc->sc_MACCtrl;
1394	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1395	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1396		/* Duplex setting changed, reset Tx/Rx functions. */
1397		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1398		ac |= AC_TxReset | AC_RxReset;
1399		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1400		for (i = 0; i < STGE_TIMEOUT; i++) {
1401			DELAY(100);
1402			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1403				break;
1404		}
1405		if (i == STGE_TIMEOUT)
1406			device_printf(sc->sc_dev, "reset failed to complete\n");
1407	}
1408	STGE_UNLOCK(sc);
1409}
1410
1411static __inline int
1412stge_tx_error(struct stge_softc *sc)
1413{
1414	uint32_t txstat;
1415	int error;
1416
1417	for (error = 0;;) {
1418		txstat = CSR_READ_4(sc, STGE_TxStatus);
1419		if ((txstat & TS_TxComplete) == 0)
1420			break;
1421		/* Tx underrun */
1422		if ((txstat & TS_TxUnderrun) != 0) {
1423			/*
1424			 * XXX
1425			 * There should be a more better way to recover
1426			 * from Tx underrun instead of a full reset.
1427			 */
1428			if (sc->sc_nerr++ < STGE_MAXERR)
1429				device_printf(sc->sc_dev, "Tx underrun, "
1430				    "resetting...\n");
1431			if (sc->sc_nerr == STGE_MAXERR)
1432				device_printf(sc->sc_dev, "too many errors; "
1433				    "not reporting any more\n");
1434			error = -1;
1435			break;
1436		}
1437		/* Maximum/Late collisions, Re-enable Tx MAC. */
1438		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1439			CSR_WRITE_4(sc, STGE_MACCtrl,
1440			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1441			    MC_TxEnable);
1442	}
1443
1444	return (error);
1445}
1446
1447/*
1448 * stge_intr:
1449 *
1450 *	Interrupt service routine.
1451 */
1452static void
1453stge_intr(void *arg)
1454{
1455	struct stge_softc *sc;
1456	struct ifnet *ifp;
1457	int reinit;
1458	uint16_t status;
1459
1460	sc = (struct stge_softc *)arg;
1461	ifp = sc->sc_ifp;
1462
1463	STGE_LOCK(sc);
1464
1465#ifdef DEVICE_POLLING
1466	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1467		goto done_locked;
1468#endif
1469	status = CSR_READ_2(sc, STGE_IntStatus);
1470	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1471		goto done_locked;
1472
1473	/* Disable interrupts. */
1474	for (reinit = 0;;) {
1475		status = CSR_READ_2(sc, STGE_IntStatusAck);
1476		status &= sc->sc_IntEnable;
1477		if (status == 0)
1478			break;
1479		/* Host interface errors. */
1480		if ((status & IS_HostError) != 0) {
1481			device_printf(sc->sc_dev,
1482			    "Host interface error, resetting...\n");
1483			reinit = 1;
1484			goto force_init;
1485		}
1486
1487		/* Receive interrupts. */
1488		if ((status & IS_RxDMAComplete) != 0) {
1489			stge_rxeof(sc);
1490			if ((status & IS_RFDListEnd) != 0)
1491				CSR_WRITE_4(sc, STGE_DMACtrl,
1492				    DMAC_RxDMAPollNow);
1493		}
1494
1495		/* Transmit interrupts. */
1496		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1497			stge_txeof(sc);
1498
1499		/* Transmission errors.*/
1500		if ((status & IS_TxComplete) != 0) {
1501			if ((reinit = stge_tx_error(sc)) != 0)
1502				break;
1503		}
1504	}
1505
1506force_init:
1507	if (reinit != 0) {
1508		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1509		stge_init_locked(sc);
1510	}
1511
1512	/* Re-enable interrupts. */
1513	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1514
1515	/* Try to get more packets going. */
1516	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1517		stge_start_locked(ifp);
1518
1519done_locked:
1520	STGE_UNLOCK(sc);
1521}
1522
1523/*
1524 * stge_txeof:
1525 *
1526 *	Helper; handle transmit interrupts.
1527 */
1528static void
1529stge_txeof(struct stge_softc *sc)
1530{
1531	struct ifnet *ifp;
1532	struct stge_txdesc *txd;
1533	uint64_t control;
1534	int cons;
1535
1536	STGE_LOCK_ASSERT(sc);
1537
1538	ifp = sc->sc_ifp;
1539
1540	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1541	if (txd == NULL)
1542		return;
1543	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1544	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1545
1546	/*
1547	 * Go through our Tx list and free mbufs for those
1548	 * frames which have been transmitted.
1549	 */
1550	for (cons = sc->sc_cdata.stge_tx_cons;;
1551	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1552		if (sc->sc_cdata.stge_tx_cnt <= 0)
1553			break;
1554		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1555		if ((control & TFD_TFDDone) == 0)
1556			break;
1557		sc->sc_cdata.stge_tx_cnt--;
1558		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1559
1560		bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1561		    BUS_DMASYNC_POSTWRITE);
1562		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1563
1564		/* Output counter is updated with statistics register */
1565		m_freem(txd->tx_m);
1566		txd->tx_m = NULL;
1567		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1568		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1569		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1570	}
1571	sc->sc_cdata.stge_tx_cons = cons;
1572	if (sc->sc_cdata.stge_tx_cnt == 0)
1573		sc->sc_watchdog_timer = 0;
1574
1575        bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1576	    sc->sc_cdata.stge_tx_ring_map,
1577	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1578}
1579
1580static __inline void
1581stge_discard_rxbuf(struct stge_softc *sc, int idx)
1582{
1583	struct stge_rfd *rfd;
1584
1585	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1586	rfd->rfd_status = 0;
1587}
1588
1589#ifndef __NO_STRICT_ALIGNMENT
1590/*
1591 * It seems that TC9021's DMA engine has alignment restrictions in
1592 * DMA scatter operations. The first DMA segment has no address
1593 * alignment restrictins but the rest should be aligned on 4(?) bytes
1594 * boundary. Otherwise it would corrupt random memory. Since we don't
1595 * know which one is used for the first segment in advance we simply
1596 * don't align at all.
1597 * To avoid copying over an entire frame to align, we allocate a new
1598 * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1599 * prepended into the existing mbuf chain.
1600 */
1601static __inline struct mbuf *
1602stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1603{
1604	struct mbuf *n;
1605
1606	n = NULL;
1607	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1608		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1609		m->m_data += ETHER_HDR_LEN;
1610		n = m;
1611	} else {
1612		MGETHDR(n, M_NOWAIT, MT_DATA);
1613		if (n != NULL) {
1614			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1615			m->m_data += ETHER_HDR_LEN;
1616			m->m_len -= ETHER_HDR_LEN;
1617			n->m_len = ETHER_HDR_LEN;
1618			M_MOVE_PKTHDR(n, m);
1619			n->m_next = m;
1620		} else
1621			m_freem(m);
1622	}
1623
1624	return (n);
1625}
1626#endif
1627
1628/*
1629 * stge_rxeof:
1630 *
1631 *	Helper; handle receive interrupts.
1632 */
1633static int
1634stge_rxeof(struct stge_softc *sc)
1635{
1636	struct ifnet *ifp;
1637	struct stge_rxdesc *rxd;
1638	struct mbuf *mp, *m;
1639	uint64_t status64;
1640	uint32_t status;
1641	int cons, prog, rx_npkts;
1642
1643	STGE_LOCK_ASSERT(sc);
1644
1645	rx_npkts = 0;
1646	ifp = sc->sc_ifp;
1647
1648	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1649	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1650
1651	prog = 0;
1652	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1653	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1654		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1655		status = RFD_RxStatus(status64);
1656		if ((status & RFD_RFDDone) == 0)
1657			break;
1658#ifdef DEVICE_POLLING
1659		if (ifp->if_capenable & IFCAP_POLLING) {
1660			if (sc->sc_cdata.stge_rxcycles <= 0)
1661				break;
1662			sc->sc_cdata.stge_rxcycles--;
1663		}
1664#endif
1665		prog++;
1666		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1667		mp = rxd->rx_m;
1668
1669		/*
1670		 * If the packet had an error, drop it.  Note we count
1671		 * the error later in the periodic stats update.
1672		 */
1673		if ((status & RFD_FrameEnd) != 0 && (status &
1674		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1675		    RFD_RxAlignmentError | RFD_RxFCSError |
1676		    RFD_RxLengthError)) != 0) {
1677			stge_discard_rxbuf(sc, cons);
1678			if (sc->sc_cdata.stge_rxhead != NULL) {
1679				m_freem(sc->sc_cdata.stge_rxhead);
1680				STGE_RXCHAIN_RESET(sc);
1681			}
1682			continue;
1683		}
1684		/*
1685		 * Add a new receive buffer to the ring.
1686		 */
1687		if (stge_newbuf(sc, cons) != 0) {
1688			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1689			stge_discard_rxbuf(sc, cons);
1690			if (sc->sc_cdata.stge_rxhead != NULL) {
1691				m_freem(sc->sc_cdata.stge_rxhead);
1692				STGE_RXCHAIN_RESET(sc);
1693			}
1694			continue;
1695		}
1696
1697		if ((status & RFD_FrameEnd) != 0)
1698			mp->m_len = RFD_RxDMAFrameLen(status) -
1699			    sc->sc_cdata.stge_rxlen;
1700		sc->sc_cdata.stge_rxlen += mp->m_len;
1701
1702		/* Chain mbufs. */
1703		if (sc->sc_cdata.stge_rxhead == NULL) {
1704			sc->sc_cdata.stge_rxhead = mp;
1705			sc->sc_cdata.stge_rxtail = mp;
1706		} else {
1707			mp->m_flags &= ~M_PKTHDR;
1708			sc->sc_cdata.stge_rxtail->m_next = mp;
1709			sc->sc_cdata.stge_rxtail = mp;
1710		}
1711
1712		if ((status & RFD_FrameEnd) != 0) {
1713			m = sc->sc_cdata.stge_rxhead;
1714			m->m_pkthdr.rcvif = ifp;
1715			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1716
1717			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1718				m_freem(m);
1719				STGE_RXCHAIN_RESET(sc);
1720				continue;
1721			}
1722			/*
1723			 * Set the incoming checksum information for
1724			 * the packet.
1725			 */
1726			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1727				if ((status & RFD_IPDetected) != 0) {
1728					m->m_pkthdr.csum_flags |=
1729						CSUM_IP_CHECKED;
1730					if ((status & RFD_IPError) == 0)
1731						m->m_pkthdr.csum_flags |=
1732						    CSUM_IP_VALID;
1733				}
1734				if (((status & RFD_TCPDetected) != 0 &&
1735				    (status & RFD_TCPError) == 0) ||
1736				    ((status & RFD_UDPDetected) != 0 &&
1737				    (status & RFD_UDPError) == 0)) {
1738					m->m_pkthdr.csum_flags |=
1739					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1740					m->m_pkthdr.csum_data = 0xffff;
1741				}
1742			}
1743
1744#ifndef __NO_STRICT_ALIGNMENT
1745			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1746				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1747					STGE_RXCHAIN_RESET(sc);
1748					continue;
1749				}
1750			}
1751#endif
1752			/* Check for VLAN tagged packets. */
1753			if ((status & RFD_VLANDetected) != 0 &&
1754			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1755				m->m_pkthdr.ether_vtag = RFD_TCI(status64);
1756				m->m_flags |= M_VLANTAG;
1757			}
1758
1759			STGE_UNLOCK(sc);
1760			/* Pass it on. */
1761			(*ifp->if_input)(ifp, m);
1762			STGE_LOCK(sc);
1763			rx_npkts++;
1764
1765			STGE_RXCHAIN_RESET(sc);
1766		}
1767	}
1768
1769	if (prog > 0) {
1770		/* Update the consumer index. */
1771		sc->sc_cdata.stge_rx_cons = cons;
1772		bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1773		    sc->sc_cdata.stge_rx_ring_map,
1774		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1775	}
1776	return (rx_npkts);
1777}
1778
1779#ifdef DEVICE_POLLING
1780static int
1781stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1782{
1783	struct stge_softc *sc;
1784	uint16_t status;
1785	int rx_npkts;
1786
1787	rx_npkts = 0;
1788	sc = ifp->if_softc;
1789	STGE_LOCK(sc);
1790	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1791		STGE_UNLOCK(sc);
1792		return (rx_npkts);
1793	}
1794
1795	sc->sc_cdata.stge_rxcycles = count;
1796	rx_npkts = stge_rxeof(sc);
1797	stge_txeof(sc);
1798
1799	if (cmd == POLL_AND_CHECK_STATUS) {
1800		status = CSR_READ_2(sc, STGE_IntStatus);
1801		status &= sc->sc_IntEnable;
1802		if (status != 0) {
1803			if ((status & IS_HostError) != 0) {
1804				device_printf(sc->sc_dev,
1805				    "Host interface error, resetting...\n");
1806				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1807				stge_init_locked(sc);
1808			}
1809			if ((status & IS_TxComplete) != 0) {
1810				if (stge_tx_error(sc) != 0) {
1811					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1812					stge_init_locked(sc);
1813				}
1814			}
1815		}
1816
1817	}
1818
1819	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1820		stge_start_locked(ifp);
1821
1822	STGE_UNLOCK(sc);
1823	return (rx_npkts);
1824}
1825#endif	/* DEVICE_POLLING */
1826
1827/*
1828 * stge_tick:
1829 *
1830 *	One second timer, used to tick the MII.
1831 */
1832static void
1833stge_tick(void *arg)
1834{
1835	struct stge_softc *sc;
1836	struct mii_data *mii;
1837
1838	sc = (struct stge_softc *)arg;
1839
1840	STGE_LOCK_ASSERT(sc);
1841
1842	mii = device_get_softc(sc->sc_miibus);
1843	mii_tick(mii);
1844
1845	/* Update statistics counters. */
1846	stge_stats_update(sc);
1847
1848	/*
1849	 * Relcaim any pending Tx descriptors to release mbufs in a
1850	 * timely manner as we don't generate Tx completion interrupts
1851	 * for every frame. This limits the delay to a maximum of one
1852	 * second.
1853	 */
1854	if (sc->sc_cdata.stge_tx_cnt != 0)
1855		stge_txeof(sc);
1856
1857	stge_watchdog(sc);
1858
1859	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1860}
1861
1862/*
1863 * stge_stats_update:
1864 *
1865 *	Read the TC9021 statistics counters.
1866 */
1867static void
1868stge_stats_update(struct stge_softc *sc)
1869{
1870	struct ifnet *ifp;
1871
1872	STGE_LOCK_ASSERT(sc);
1873
1874	ifp = sc->sc_ifp;
1875
1876	CSR_READ_4(sc,STGE_OctetRcvOk);
1877
1878	if_inc_counter(ifp, IFCOUNTER_IPACKETS, CSR_READ_4(sc, STGE_FramesRcvdOk));
1879
1880	if_inc_counter(ifp, IFCOUNTER_IERRORS, CSR_READ_2(sc, STGE_FramesLostRxErrors));
1881
1882	CSR_READ_4(sc, STGE_OctetXmtdOk);
1883
1884	if_inc_counter(ifp, IFCOUNTER_OPACKETS, CSR_READ_4(sc, STGE_FramesXmtdOk));
1885
1886	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1887	    CSR_READ_4(sc, STGE_LateCollisions) +
1888	    CSR_READ_4(sc, STGE_MultiColFrames) +
1889	    CSR_READ_4(sc, STGE_SingleColFrames));
1890
1891	if_inc_counter(ifp, IFCOUNTER_OERRORS,
1892	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1893	    CSR_READ_2(sc, STGE_FramesWEXDeferal));
1894}
1895
1896/*
1897 * stge_reset:
1898 *
1899 *	Perform a soft reset on the TC9021.
1900 */
1901static void
1902stge_reset(struct stge_softc *sc, uint32_t how)
1903{
1904	uint32_t ac;
1905	uint8_t v;
1906	int i, dv;
1907
1908	STGE_LOCK_ASSERT(sc);
1909
1910	dv = 5000;
1911	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1912	switch (how) {
1913	case STGE_RESET_TX:
1914		ac |= AC_TxReset | AC_FIFO;
1915		dv = 100;
1916		break;
1917	case STGE_RESET_RX:
1918		ac |= AC_RxReset | AC_FIFO;
1919		dv = 100;
1920		break;
1921	case STGE_RESET_FULL:
1922	default:
1923		/*
1924		 * Only assert RstOut if we're fiber.  We need GMII clocks
1925		 * to be present in order for the reset to complete on fiber
1926		 * cards.
1927		 */
1928		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1929		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1930		    (sc->sc_usefiber ? AC_RstOut : 0);
1931		break;
1932	}
1933
1934	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1935
1936	/* Account for reset problem at 10Mbps. */
1937	DELAY(dv);
1938
1939	for (i = 0; i < STGE_TIMEOUT; i++) {
1940		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1941			break;
1942		DELAY(dv);
1943	}
1944
1945	if (i == STGE_TIMEOUT)
1946		device_printf(sc->sc_dev, "reset failed to complete\n");
1947
1948	/* Set LED, from Linux IPG driver. */
1949	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1950	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1951	if ((sc->sc_led & 0x01) != 0)
1952		ac |= AC_LEDMode;
1953	if ((sc->sc_led & 0x03) != 0)
1954		ac |= AC_LEDModeBit1;
1955	if ((sc->sc_led & 0x08) != 0)
1956		ac |= AC_LEDSpeed;
1957	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1958
1959	/* Set PHY, from Linux IPG driver */
1960	v = CSR_READ_1(sc, STGE_PhySet);
1961	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1962	v |= ((sc->sc_led & 0x70) >> 4);
1963	CSR_WRITE_1(sc, STGE_PhySet, v);
1964}
1965
1966/*
1967 * stge_init:		[ ifnet interface function ]
1968 *
1969 *	Initialize the interface.
1970 */
1971static void
1972stge_init(void *xsc)
1973{
1974	struct stge_softc *sc;
1975
1976	sc = (struct stge_softc *)xsc;
1977	STGE_LOCK(sc);
1978	stge_init_locked(sc);
1979	STGE_UNLOCK(sc);
1980}
1981
1982static void
1983stge_init_locked(struct stge_softc *sc)
1984{
1985	struct ifnet *ifp;
1986	struct mii_data *mii;
1987	uint16_t eaddr[3];
1988	uint32_t v;
1989	int error;
1990
1991	STGE_LOCK_ASSERT(sc);
1992
1993	ifp = sc->sc_ifp;
1994	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1995		return;
1996	mii = device_get_softc(sc->sc_miibus);
1997
1998	/*
1999	 * Cancel any pending I/O.
2000	 */
2001	stge_stop(sc);
2002
2003	/*
2004	 * Reset the chip to a known state.
2005	 */
2006	stge_reset(sc, STGE_RESET_FULL);
2007
2008	/* Init descriptors. */
2009	error = stge_init_rx_ring(sc);
2010        if (error != 0) {
2011                device_printf(sc->sc_dev,
2012                    "initialization failed: no memory for rx buffers\n");
2013                stge_stop(sc);
2014		goto out;
2015        }
2016	stge_init_tx_ring(sc);
2017
2018	/* Set the station address. */
2019	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2020	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2021	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2022	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2023
2024	/*
2025	 * Set the statistics masks.  Disable all the RMON stats,
2026	 * and disable selected stats in the non-RMON stats registers.
2027	 */
2028	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2029	CSR_WRITE_4(sc, STGE_StatisticsMask,
2030	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2031	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2032	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2033	    (1U << 21));
2034
2035	/* Set up the receive filter. */
2036	stge_set_filter(sc);
2037	/* Program multicast filter. */
2038	stge_set_multi(sc);
2039
2040	/*
2041	 * Give the transmit and receive ring to the chip.
2042	 */
2043	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2044	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2045	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2046	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2047
2048	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2049	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2050	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2051	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2052
2053	/*
2054	 * Initialize the Tx auto-poll period.  It's OK to make this number
2055	 * large (255 is the max, but we use 127) -- we explicitly kick the
2056	 * transmit engine when there's actually a packet.
2057	 */
2058	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2059
2060	/* ..and the Rx auto-poll period. */
2061	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2062
2063	/* Initialize the Tx start threshold. */
2064	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2065
2066	/* Rx DMA thresholds, from Linux */
2067	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2068	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2069
2070	/* Rx early threhold, from Linux */
2071	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2072
2073	/* Tx DMA thresholds, from Linux */
2074	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2075	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2076
2077	/*
2078	 * Initialize the Rx DMA interrupt control register.  We
2079	 * request an interrupt after every incoming packet, but
2080	 * defer it for sc_rxint_dmawait us. When the number of
2081	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2082	 * deferring the interrupt, and signal it immediately.
2083	 */
2084	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2085	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2086	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2087
2088	/*
2089	 * Initialize the interrupt mask.
2090	 */
2091	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2092	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2093#ifdef DEVICE_POLLING
2094	/* Disable interrupts if we are polling. */
2095	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2096		CSR_WRITE_2(sc, STGE_IntEnable, 0);
2097	else
2098#endif
2099	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2100
2101	/*
2102	 * Configure the DMA engine.
2103	 * XXX Should auto-tune TxBurstLimit.
2104	 */
2105	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2106
2107	/*
2108	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2109	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2110	 * in the Rx FIFO.
2111	 */
2112	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2113	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2114
2115	/*
2116	 * Set the maximum frame size.
2117	 */
2118	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2119	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2120
2121	/*
2122	 * Initialize MacCtrl -- do it before setting the media,
2123	 * as setting the media will actually program the register.
2124	 *
2125	 * Note: We have to poke the IFS value before poking
2126	 * anything else.
2127	 */
2128	/* Tx/Rx MAC should be disabled before programming IFS.*/
2129	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2130
2131	stge_vlan_setup(sc);
2132
2133	if (sc->sc_rev >= 6) {		/* >= B.2 */
2134		/* Multi-frag frame bug work-around. */
2135		CSR_WRITE_2(sc, STGE_DebugCtrl,
2136		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2137
2138		/* Tx Poll Now bug work-around. */
2139		CSR_WRITE_2(sc, STGE_DebugCtrl,
2140		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2141		/* Tx Poll Now bug work-around. */
2142		CSR_WRITE_2(sc, STGE_DebugCtrl,
2143		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2144	}
2145
2146	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2147	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2148	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2149	/*
2150	 * It seems that transmitting frames without checking the state of
2151	 * Rx/Tx MAC wedge the hardware.
2152	 */
2153	stge_start_tx(sc);
2154	stge_start_rx(sc);
2155
2156	sc->sc_link = 0;
2157	/*
2158	 * Set the current media.
2159	 */
2160	mii_mediachg(mii);
2161
2162	/*
2163	 * Start the one second MII clock.
2164	 */
2165	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2166
2167	/*
2168	 * ...all done!
2169	 */
2170	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2171	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2172
2173 out:
2174	if (error != 0)
2175		device_printf(sc->sc_dev, "interface not running\n");
2176}
2177
2178static void
2179stge_vlan_setup(struct stge_softc *sc)
2180{
2181	struct ifnet *ifp;
2182	uint32_t v;
2183
2184	ifp = sc->sc_ifp;
2185	/*
2186	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2187	 * MC_AutoVLANuntagging bit.
2188	 * MC_AutoVLANtagging bit selects which VLAN source to use
2189	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2190	 * bit has priority over MC_AutoVLANtagging bit. So we always
2191	 * use TFC instead of STGE_VLANTag register.
2192	 */
2193	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2194	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2195		v |= MC_AutoVLANuntagging;
2196	else
2197		v &= ~MC_AutoVLANuntagging;
2198	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2199}
2200
2201/*
2202 *	Stop transmission on the interface.
2203 */
2204static void
2205stge_stop(struct stge_softc *sc)
2206{
2207	struct ifnet *ifp;
2208	struct stge_txdesc *txd;
2209	struct stge_rxdesc *rxd;
2210	uint32_t v;
2211	int i;
2212
2213	STGE_LOCK_ASSERT(sc);
2214	/*
2215	 * Stop the one second clock.
2216	 */
2217	callout_stop(&sc->sc_tick_ch);
2218	sc->sc_watchdog_timer = 0;
2219
2220	/*
2221	 * Disable interrupts.
2222	 */
2223	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2224
2225	/*
2226	 * Stop receiver, transmitter, and stats update.
2227	 */
2228	stge_stop_rx(sc);
2229	stge_stop_tx(sc);
2230	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2231	v |= MC_StatisticsDisable;
2232	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2233
2234	/*
2235	 * Stop the transmit and receive DMA.
2236	 */
2237	stge_dma_wait(sc);
2238	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2239	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2240	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2241	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2242
2243	/*
2244	 * Free RX and TX mbufs still in the queues.
2245	 */
2246	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2247		rxd = &sc->sc_cdata.stge_rxdesc[i];
2248		if (rxd->rx_m != NULL) {
2249			bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2250			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2251			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2252			    rxd->rx_dmamap);
2253			m_freem(rxd->rx_m);
2254			rxd->rx_m = NULL;
2255		}
2256        }
2257	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2258		txd = &sc->sc_cdata.stge_txdesc[i];
2259		if (txd->tx_m != NULL) {
2260			bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2261			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2262			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2263			    txd->tx_dmamap);
2264			m_freem(txd->tx_m);
2265			txd->tx_m = NULL;
2266		}
2267        }
2268
2269	/*
2270	 * Mark the interface down and cancel the watchdog timer.
2271	 */
2272	ifp = sc->sc_ifp;
2273	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2274	sc->sc_link = 0;
2275}
2276
2277static void
2278stge_start_tx(struct stge_softc *sc)
2279{
2280	uint32_t v;
2281	int i;
2282
2283	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2284	if ((v & MC_TxEnabled) != 0)
2285		return;
2286	v |= MC_TxEnable;
2287	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2288	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2289	for (i = STGE_TIMEOUT; i > 0; i--) {
2290		DELAY(10);
2291		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2292		if ((v & MC_TxEnabled) != 0)
2293			break;
2294	}
2295	if (i == 0)
2296		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2297}
2298
2299static void
2300stge_start_rx(struct stge_softc *sc)
2301{
2302	uint32_t v;
2303	int i;
2304
2305	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2306	if ((v & MC_RxEnabled) != 0)
2307		return;
2308	v |= MC_RxEnable;
2309	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2310	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2311	for (i = STGE_TIMEOUT; i > 0; i--) {
2312		DELAY(10);
2313		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2314		if ((v & MC_RxEnabled) != 0)
2315			break;
2316	}
2317	if (i == 0)
2318		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2319}
2320
2321static void
2322stge_stop_tx(struct stge_softc *sc)
2323{
2324	uint32_t v;
2325	int i;
2326
2327	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2328	if ((v & MC_TxEnabled) == 0)
2329		return;
2330	v |= MC_TxDisable;
2331	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2332	for (i = STGE_TIMEOUT; i > 0; i--) {
2333		DELAY(10);
2334		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2335		if ((v & MC_TxEnabled) == 0)
2336			break;
2337	}
2338	if (i == 0)
2339		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2340}
2341
2342static void
2343stge_stop_rx(struct stge_softc *sc)
2344{
2345	uint32_t v;
2346	int i;
2347
2348	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2349	if ((v & MC_RxEnabled) == 0)
2350		return;
2351	v |= MC_RxDisable;
2352	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2353	for (i = STGE_TIMEOUT; i > 0; i--) {
2354		DELAY(10);
2355		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2356		if ((v & MC_RxEnabled) == 0)
2357			break;
2358	}
2359	if (i == 0)
2360		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2361}
2362
2363static void
2364stge_init_tx_ring(struct stge_softc *sc)
2365{
2366	struct stge_ring_data *rd;
2367	struct stge_txdesc *txd;
2368	bus_addr_t addr;
2369	int i;
2370
2371	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2372	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2373
2374	sc->sc_cdata.stge_tx_prod = 0;
2375	sc->sc_cdata.stge_tx_cons = 0;
2376	sc->sc_cdata.stge_tx_cnt = 0;
2377
2378	rd = &sc->sc_rdata;
2379	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2380	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2381		if (i == (STGE_TX_RING_CNT - 1))
2382			addr = STGE_TX_RING_ADDR(sc, 0);
2383		else
2384			addr = STGE_TX_RING_ADDR(sc, i + 1);
2385		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2386		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2387		txd = &sc->sc_cdata.stge_txdesc[i];
2388		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2389	}
2390
2391	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2392	    sc->sc_cdata.stge_tx_ring_map,
2393	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2394
2395}
2396
2397static int
2398stge_init_rx_ring(struct stge_softc *sc)
2399{
2400	struct stge_ring_data *rd;
2401	bus_addr_t addr;
2402	int i;
2403
2404	sc->sc_cdata.stge_rx_cons = 0;
2405	STGE_RXCHAIN_RESET(sc);
2406
2407	rd = &sc->sc_rdata;
2408	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2409	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2410		if (stge_newbuf(sc, i) != 0)
2411			return (ENOBUFS);
2412		if (i == (STGE_RX_RING_CNT - 1))
2413			addr = STGE_RX_RING_ADDR(sc, 0);
2414		else
2415			addr = STGE_RX_RING_ADDR(sc, i + 1);
2416		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2417		rd->stge_rx_ring[i].rfd_status = 0;
2418	}
2419
2420	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2421	    sc->sc_cdata.stge_rx_ring_map,
2422	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2423
2424	return (0);
2425}
2426
2427/*
2428 * stge_newbuf:
2429 *
2430 *	Add a receive buffer to the indicated descriptor.
2431 */
2432static int
2433stge_newbuf(struct stge_softc *sc, int idx)
2434{
2435	struct stge_rxdesc *rxd;
2436	struct stge_rfd *rfd;
2437	struct mbuf *m;
2438	bus_dma_segment_t segs[1];
2439	bus_dmamap_t map;
2440	int nsegs;
2441
2442	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2443	if (m == NULL)
2444		return (ENOBUFS);
2445	m->m_len = m->m_pkthdr.len = MCLBYTES;
2446	/*
2447	 * The hardware requires 4bytes aligned DMA address when JUMBO
2448	 * frame is used.
2449	 */
2450	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2451		m_adj(m, ETHER_ALIGN);
2452
2453	if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
2454	    sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2455		m_freem(m);
2456		return (ENOBUFS);
2457	}
2458	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2459
2460	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2461	if (rxd->rx_m != NULL) {
2462		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2463		    BUS_DMASYNC_POSTREAD);
2464		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2465	}
2466	map = rxd->rx_dmamap;
2467	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2468	sc->sc_cdata.stge_rx_sparemap = map;
2469	bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2470	    BUS_DMASYNC_PREREAD);
2471	rxd->rx_m = m;
2472
2473	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2474	rfd->rfd_frag.frag_word0 =
2475	    htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2476	rfd->rfd_status = 0;
2477
2478	return (0);
2479}
2480
2481/*
2482 * stge_set_filter:
2483 *
2484 *	Set up the receive filter.
2485 */
2486static void
2487stge_set_filter(struct stge_softc *sc)
2488{
2489	struct ifnet *ifp;
2490	uint16_t mode;
2491
2492	STGE_LOCK_ASSERT(sc);
2493
2494	ifp = sc->sc_ifp;
2495
2496	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2497	mode |= RM_ReceiveUnicast;
2498	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2499		mode |= RM_ReceiveBroadcast;
2500	else
2501		mode &= ~RM_ReceiveBroadcast;
2502	if ((ifp->if_flags & IFF_PROMISC) != 0)
2503		mode |= RM_ReceiveAllFrames;
2504	else
2505		mode &= ~RM_ReceiveAllFrames;
2506
2507	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2508}
2509
2510static void
2511stge_set_multi(struct stge_softc *sc)
2512{
2513	struct ifnet *ifp;
2514	struct ifmultiaddr *ifma;
2515	uint32_t crc;
2516	uint32_t mchash[2];
2517	uint16_t mode;
2518	int count;
2519
2520	STGE_LOCK_ASSERT(sc);
2521
2522	ifp = sc->sc_ifp;
2523
2524	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2525	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2526		if ((ifp->if_flags & IFF_PROMISC) != 0)
2527			mode |= RM_ReceiveAllFrames;
2528		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2529			mode |= RM_ReceiveMulticast;
2530		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2531		return;
2532	}
2533
2534	/* clear existing filters. */
2535	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2536	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2537
2538	/*
2539	 * Set up the multicast address filter by passing all multicast
2540	 * addresses through a CRC generator, and then using the low-order
2541	 * 6 bits as an index into the 64 bit multicast hash table.  The
2542	 * high order bits select the register, while the rest of the bits
2543	 * select the bit within the register.
2544	 */
2545
2546	bzero(mchash, sizeof(mchash));
2547
2548	count = 0;
2549	if_maddr_rlock(sc->sc_ifp);
2550	CK_STAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) {
2551		if (ifma->ifma_addr->sa_family != AF_LINK)
2552			continue;
2553		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2554		    ifma->ifma_addr), ETHER_ADDR_LEN);
2555
2556		/* Just want the 6 least significant bits. */
2557		crc &= 0x3f;
2558
2559		/* Set the corresponding bit in the hash table. */
2560		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2561		count++;
2562	}
2563	if_maddr_runlock(ifp);
2564
2565	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2566	if (count > 0)
2567		mode |= RM_ReceiveMulticastHash;
2568	else
2569		mode &= ~RM_ReceiveMulticastHash;
2570
2571	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2572	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2573	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2574}
2575
2576static int
2577sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2578{
2579	int error, value;
2580
2581	if (!arg1)
2582		return (EINVAL);
2583	value = *(int *)arg1;
2584	error = sysctl_handle_int(oidp, &value, 0, req);
2585	if (error || !req->newptr)
2586		return (error);
2587	if (value < low || value > high)
2588		return (EINVAL);
2589        *(int *)arg1 = value;
2590
2591        return (0);
2592}
2593
2594static int
2595sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2596{
2597	return (sysctl_int_range(oidp, arg1, arg2, req,
2598	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2599}
2600
2601static int
2602sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2603{
2604	return (sysctl_int_range(oidp, arg1, arg2, req,
2605	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2606}
2607