if_stge.c revision 271837
1/*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2
3/*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Device driver for the Sundance Tech. TC9021 10/100/1000
34 * Ethernet controller.
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/dev/stge/if_stge.c 271837 2014-09-18 21:16:05Z glebius $");
39
40#ifdef HAVE_KERNEL_OPTION_HEADERS
41#include "opt_device_polling.h"
42#endif
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/endian.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
49#include <sys/kernel.h>
50#include <sys/module.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/sysctl.h>
54#include <sys/taskqueue.h>
55
56#include <net/bpf.h>
57#include <net/ethernet.h>
58#include <net/if.h>
59#include <net/if_var.h>
60#include <net/if_dl.h>
61#include <net/if_media.h>
62#include <net/if_types.h>
63#include <net/if_vlan_var.h>
64
65#include <machine/bus.h>
66#include <machine/resource.h>
67#include <sys/bus.h>
68#include <sys/rman.h>
69
70#include <dev/mii/mii.h>
71#include <dev/mii/mii_bitbang.h>
72#include <dev/mii/miivar.h>
73
74#include <dev/pci/pcireg.h>
75#include <dev/pci/pcivar.h>
76
77#include <dev/stge/if_stgereg.h>
78
79#define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
80
81MODULE_DEPEND(stge, pci, 1, 1, 1);
82MODULE_DEPEND(stge, ether, 1, 1, 1);
83MODULE_DEPEND(stge, miibus, 1, 1, 1);
84
85/* "device miibus" required.  See GENERIC if you get errors here. */
86#include "miibus_if.h"
87
88/*
89 * Devices supported by this driver.
90 */
91static const struct stge_product {
92	uint16_t	stge_vendorid;
93	uint16_t	stge_deviceid;
94	const char	*stge_name;
95} stge_products[] = {
96	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
97	  "Sundance ST-1023 Gigabit Ethernet" },
98
99	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
100	  "Sundance ST-2021 Gigabit Ethernet" },
101
102	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
103	  "Tamarack TC9021 Gigabit Ethernet" },
104
105	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
106	  "Tamarack TC9021 Gigabit Ethernet" },
107
108	/*
109	 * The Sundance sample boards use the Sundance vendor ID,
110	 * but the Tamarack product ID.
111	 */
112	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
113	  "Sundance TC9021 Gigabit Ethernet" },
114
115	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
116	  "Sundance TC9021 Gigabit Ethernet" },
117
118	{ VENDOR_DLINK,		DEVICEID_DLINK_DL4000,
119	  "D-Link DL-4000 Gigabit Ethernet" },
120
121	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
122	  "Antares Gigabit Ethernet" }
123};
124
125static int	stge_probe(device_t);
126static int	stge_attach(device_t);
127static int	stge_detach(device_t);
128static int	stge_shutdown(device_t);
129static int	stge_suspend(device_t);
130static int	stge_resume(device_t);
131
132static int	stge_encap(struct stge_softc *, struct mbuf **);
133static void	stge_start(struct ifnet *);
134static void	stge_start_locked(struct ifnet *);
135static void	stge_watchdog(struct stge_softc *);
136static int	stge_ioctl(struct ifnet *, u_long, caddr_t);
137static void	stge_init(void *);
138static void	stge_init_locked(struct stge_softc *);
139static void	stge_vlan_setup(struct stge_softc *);
140static void	stge_stop(struct stge_softc *);
141static void	stge_start_tx(struct stge_softc *);
142static void	stge_start_rx(struct stge_softc *);
143static void	stge_stop_tx(struct stge_softc *);
144static void	stge_stop_rx(struct stge_softc *);
145
146static void	stge_reset(struct stge_softc *, uint32_t);
147static int	stge_eeprom_wait(struct stge_softc *);
148static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
149static void	stge_tick(void *);
150static void	stge_stats_update(struct stge_softc *);
151static void	stge_set_filter(struct stge_softc *);
152static void	stge_set_multi(struct stge_softc *);
153
154static void	stge_link_task(void *, int);
155static void	stge_intr(void *);
156static __inline int stge_tx_error(struct stge_softc *);
157static void	stge_txeof(struct stge_softc *);
158static int	stge_rxeof(struct stge_softc *);
159static __inline void stge_discard_rxbuf(struct stge_softc *, int);
160static int	stge_newbuf(struct stge_softc *, int);
161#ifndef __NO_STRICT_ALIGNMENT
162static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
163#endif
164
165static int	stge_miibus_readreg(device_t, int, int);
166static int	stge_miibus_writereg(device_t, int, int, int);
167static void	stge_miibus_statchg(device_t);
168static int	stge_mediachange(struct ifnet *);
169static void	stge_mediastatus(struct ifnet *, struct ifmediareq *);
170
171static void	stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
172static int	stge_dma_alloc(struct stge_softc *);
173static void	stge_dma_free(struct stge_softc *);
174static void	stge_dma_wait(struct stge_softc *);
175static void	stge_init_tx_ring(struct stge_softc *);
176static int	stge_init_rx_ring(struct stge_softc *);
177#ifdef DEVICE_POLLING
178static int	stge_poll(struct ifnet *, enum poll_cmd, int);
179#endif
180
181static void	stge_setwol(struct stge_softc *);
182static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
183static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
184static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
185
186/*
187 * MII bit-bang glue
188 */
189static uint32_t stge_mii_bitbang_read(device_t);
190static void	stge_mii_bitbang_write(device_t, uint32_t);
191
192static const struct mii_bitbang_ops stge_mii_bitbang_ops = {
193	stge_mii_bitbang_read,
194	stge_mii_bitbang_write,
195	{
196		PC_MgmtData,		/* MII_BIT_MDO */
197		PC_MgmtData,		/* MII_BIT_MDI */
198		PC_MgmtClk,		/* MII_BIT_MDC */
199		PC_MgmtDir,		/* MII_BIT_DIR_HOST_PHY */
200		0,			/* MII_BIT_DIR_PHY_HOST */
201	}
202};
203
204static device_method_t stge_methods[] = {
205	/* Device interface */
206	DEVMETHOD(device_probe,		stge_probe),
207	DEVMETHOD(device_attach,	stge_attach),
208	DEVMETHOD(device_detach,	stge_detach),
209	DEVMETHOD(device_shutdown,	stge_shutdown),
210	DEVMETHOD(device_suspend,	stge_suspend),
211	DEVMETHOD(device_resume,	stge_resume),
212
213	/* MII interface */
214	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
215	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
216	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
217
218	DEVMETHOD_END
219};
220
221static driver_t stge_driver = {
222	"stge",
223	stge_methods,
224	sizeof(struct stge_softc)
225};
226
227static devclass_t stge_devclass;
228
229DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0);
230DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0);
231
232static struct resource_spec stge_res_spec_io[] = {
233	{ SYS_RES_IOPORT,	PCIR_BAR(0),	RF_ACTIVE },
234	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
235	{ -1,			0,		0 }
236};
237
238static struct resource_spec stge_res_spec_mem[] = {
239	{ SYS_RES_MEMORY,	PCIR_BAR(1),	RF_ACTIVE },
240	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
241	{ -1,			0,		0 }
242};
243
244/*
245 * stge_mii_bitbang_read: [mii bit-bang interface function]
246 *
247 *	Read the MII serial port for the MII bit-bang module.
248 */
249static uint32_t
250stge_mii_bitbang_read(device_t dev)
251{
252	struct stge_softc *sc;
253	uint32_t val;
254
255	sc = device_get_softc(dev);
256
257	val = CSR_READ_1(sc, STGE_PhyCtrl);
258	CSR_BARRIER(sc, STGE_PhyCtrl, 1,
259	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
260	return (val);
261}
262
263/*
264 * stge_mii_bitbang_write: [mii big-bang interface function]
265 *
266 *	Write the MII serial port for the MII bit-bang module.
267 */
268static void
269stge_mii_bitbang_write(device_t dev, uint32_t val)
270{
271	struct stge_softc *sc;
272
273	sc = device_get_softc(dev);
274
275	CSR_WRITE_1(sc, STGE_PhyCtrl, val);
276	CSR_BARRIER(sc, STGE_PhyCtrl, 1,
277	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
278}
279
280/*
281 * sc_miibus_readreg:	[mii interface function]
282 *
283 *	Read a PHY register on the MII of the TC9021.
284 */
285static int
286stge_miibus_readreg(device_t dev, int phy, int reg)
287{
288	struct stge_softc *sc;
289	int error, val;
290
291	sc = device_get_softc(dev);
292
293	if (reg == STGE_PhyCtrl) {
294		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
295		STGE_MII_LOCK(sc);
296		error = CSR_READ_1(sc, STGE_PhyCtrl);
297		STGE_MII_UNLOCK(sc);
298		return (error);
299	}
300
301	STGE_MII_LOCK(sc);
302	val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg);
303	STGE_MII_UNLOCK(sc);
304	return (val);
305}
306
307/*
308 * stge_miibus_writereg:	[mii interface function]
309 *
310 *	Write a PHY register on the MII of the TC9021.
311 */
312static int
313stge_miibus_writereg(device_t dev, int phy, int reg, int val)
314{
315	struct stge_softc *sc;
316
317	sc = device_get_softc(dev);
318
319	STGE_MII_LOCK(sc);
320	mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val);
321	STGE_MII_UNLOCK(sc);
322	return (0);
323}
324
325/*
326 * stge_miibus_statchg:	[mii interface function]
327 *
328 *	Callback from MII layer when media changes.
329 */
330static void
331stge_miibus_statchg(device_t dev)
332{
333	struct stge_softc *sc;
334
335	sc = device_get_softc(dev);
336	taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
337}
338
339/*
340 * stge_mediastatus:	[ifmedia interface function]
341 *
342 *	Get the current interface media status.
343 */
344static void
345stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
346{
347	struct stge_softc *sc;
348	struct mii_data *mii;
349
350	sc = ifp->if_softc;
351	mii = device_get_softc(sc->sc_miibus);
352
353	mii_pollstat(mii);
354	ifmr->ifm_status = mii->mii_media_status;
355	ifmr->ifm_active = mii->mii_media_active;
356}
357
358/*
359 * stge_mediachange:	[ifmedia interface function]
360 *
361 *	Set hardware to newly-selected media.
362 */
363static int
364stge_mediachange(struct ifnet *ifp)
365{
366	struct stge_softc *sc;
367	struct mii_data *mii;
368
369	sc = ifp->if_softc;
370	mii = device_get_softc(sc->sc_miibus);
371	mii_mediachg(mii);
372
373	return (0);
374}
375
376static int
377stge_eeprom_wait(struct stge_softc *sc)
378{
379	int i;
380
381	for (i = 0; i < STGE_TIMEOUT; i++) {
382		DELAY(1000);
383		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
384			return (0);
385	}
386	return (1);
387}
388
389/*
390 * stge_read_eeprom:
391 *
392 *	Read data from the serial EEPROM.
393 */
394static void
395stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
396{
397
398	if (stge_eeprom_wait(sc))
399		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
400
401	CSR_WRITE_2(sc, STGE_EepromCtrl,
402	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
403	if (stge_eeprom_wait(sc))
404		device_printf(sc->sc_dev, "EEPROM read timed out\n");
405	*data = CSR_READ_2(sc, STGE_EepromData);
406}
407
408
409static int
410stge_probe(device_t dev)
411{
412	const struct stge_product *sp;
413	int i;
414	uint16_t vendor, devid;
415
416	vendor = pci_get_vendor(dev);
417	devid = pci_get_device(dev);
418	sp = stge_products;
419	for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]);
420	    i++, sp++) {
421		if (vendor == sp->stge_vendorid &&
422		    devid == sp->stge_deviceid) {
423			device_set_desc(dev, sp->stge_name);
424			return (BUS_PROBE_DEFAULT);
425		}
426	}
427
428	return (ENXIO);
429}
430
431static int
432stge_attach(device_t dev)
433{
434	struct stge_softc *sc;
435	struct ifnet *ifp;
436	uint8_t enaddr[ETHER_ADDR_LEN];
437	int error, flags, i;
438	uint16_t cmd;
439	uint32_t val;
440
441	error = 0;
442	sc = device_get_softc(dev);
443	sc->sc_dev = dev;
444
445	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
446	    MTX_DEF);
447	mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
448	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
449	TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
450
451	/*
452	 * Map the device.
453	 */
454	pci_enable_busmaster(dev);
455	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
456	val = pci_read_config(dev, PCIR_BAR(1), 4);
457	if (PCI_BAR_IO(val))
458		sc->sc_spec = stge_res_spec_mem;
459	else {
460		val = pci_read_config(dev, PCIR_BAR(0), 4);
461		if (!PCI_BAR_IO(val)) {
462			device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
463			error = ENXIO;
464			goto fail;
465		}
466		sc->sc_spec = stge_res_spec_io;
467	}
468	error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
469	if (error != 0) {
470		device_printf(dev, "couldn't allocate %s resources\n",
471		    sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
472		goto fail;
473	}
474	sc->sc_rev = pci_get_revid(dev);
475
476	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
477	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
478	    "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0,
479	    sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe");
480
481	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
482	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
483	    "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0,
484	    sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait");
485
486	/* Pull in device tunables. */
487	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
488	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
489	    "rxint_nframe", &sc->sc_rxint_nframe);
490	if (error == 0) {
491		if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
492		    sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
493			device_printf(dev, "rxint_nframe value out of range; "
494			    "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
495			sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
496		}
497	}
498
499	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
500	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
501	    "rxint_dmawait", &sc->sc_rxint_dmawait);
502	if (error == 0) {
503		if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
504		    sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
505			device_printf(dev, "rxint_dmawait value out of range; "
506			    "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
507			sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
508		}
509	}
510
511	if ((error = stge_dma_alloc(sc) != 0))
512		goto fail;
513
514	/*
515	 * Determine if we're copper or fiber.  It affects how we
516	 * reset the card.
517	 */
518	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
519		sc->sc_usefiber = 1;
520	else
521		sc->sc_usefiber = 0;
522
523	/* Load LED configuration from EEPROM. */
524	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
525
526	/*
527	 * Reset the chip to a known state.
528	 */
529	STGE_LOCK(sc);
530	stge_reset(sc, STGE_RESET_FULL);
531	STGE_UNLOCK(sc);
532
533	/*
534	 * Reading the station address from the EEPROM doesn't seem
535	 * to work, at least on my sample boards.  Instead, since
536	 * the reset sequence does AutoInit, read it from the station
537	 * address registers. For Sundance 1023 you can only read it
538	 * from EEPROM.
539	 */
540	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
541		uint16_t v;
542
543		v = CSR_READ_2(sc, STGE_StationAddress0);
544		enaddr[0] = v & 0xff;
545		enaddr[1] = v >> 8;
546		v = CSR_READ_2(sc, STGE_StationAddress1);
547		enaddr[2] = v & 0xff;
548		enaddr[3] = v >> 8;
549		v = CSR_READ_2(sc, STGE_StationAddress2);
550		enaddr[4] = v & 0xff;
551		enaddr[5] = v >> 8;
552		sc->sc_stge1023 = 0;
553	} else {
554		uint16_t myaddr[ETHER_ADDR_LEN / 2];
555		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
556			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
557			    &myaddr[i]);
558			myaddr[i] = le16toh(myaddr[i]);
559		}
560		bcopy(myaddr, enaddr, sizeof(enaddr));
561		sc->sc_stge1023 = 1;
562	}
563
564	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
565	if (ifp == NULL) {
566		device_printf(sc->sc_dev, "failed to if_alloc()\n");
567		error = ENXIO;
568		goto fail;
569	}
570
571	ifp->if_softc = sc;
572	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
573	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
574	ifp->if_ioctl = stge_ioctl;
575	ifp->if_start = stge_start;
576	ifp->if_init = stge_init;
577	ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1;
578	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
579	IFQ_SET_READY(&ifp->if_snd);
580	/* Revision B3 and earlier chips have checksum bug. */
581	if (sc->sc_rev >= 0x0c) {
582		ifp->if_hwassist = STGE_CSUM_FEATURES;
583		ifp->if_capabilities = IFCAP_HWCSUM;
584	} else {
585		ifp->if_hwassist = 0;
586		ifp->if_capabilities = 0;
587	}
588	ifp->if_capabilities |= IFCAP_WOL_MAGIC;
589	ifp->if_capenable = ifp->if_capabilities;
590
591	/*
592	 * Read some important bits from the PhyCtrl register.
593	 */
594	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
595	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
596
597	/* Set up MII bus. */
598	flags = MIIF_DOPAUSE;
599	if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e)
600		flags |= MIIF_MACPRIV0;
601	error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange,
602	    stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
603	    flags);
604	if (error != 0) {
605		device_printf(sc->sc_dev, "attaching PHYs failed\n");
606		goto fail;
607	}
608
609	ether_ifattach(ifp, enaddr);
610
611	/* VLAN capability setup */
612	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
613	if (sc->sc_rev >= 0x0c)
614		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
615	ifp->if_capenable = ifp->if_capabilities;
616#ifdef DEVICE_POLLING
617	ifp->if_capabilities |= IFCAP_POLLING;
618#endif
619	/*
620	 * Tell the upper layer(s) we support long frames.
621	 * Must appear after the call to ether_ifattach() because
622	 * ether_ifattach() sets ifi_hdrlen to the default value.
623	 */
624	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
625
626	/*
627	 * The manual recommends disabling early transmit, so we
628	 * do.  It's disabled anyway, if using IP checksumming,
629	 * since the entire packet must be in the FIFO in order
630	 * for the chip to perform the checksum.
631	 */
632	sc->sc_txthresh = 0x0fff;
633
634	/*
635	 * Disable MWI if the PCI layer tells us to.
636	 */
637	sc->sc_DMACtrl = 0;
638	if ((cmd & PCIM_CMD_MWRICEN) == 0)
639		sc->sc_DMACtrl |= DMAC_MWIDisable;
640
641	/*
642	 * Hookup IRQ
643	 */
644	error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
645	    NULL, stge_intr, sc, &sc->sc_ih);
646	if (error != 0) {
647		ether_ifdetach(ifp);
648		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
649		sc->sc_ifp = NULL;
650		goto fail;
651	}
652
653fail:
654	if (error != 0)
655		stge_detach(dev);
656
657	return (error);
658}
659
660static int
661stge_detach(device_t dev)
662{
663	struct stge_softc *sc;
664	struct ifnet *ifp;
665
666	sc = device_get_softc(dev);
667
668	ifp = sc->sc_ifp;
669#ifdef DEVICE_POLLING
670	if (ifp && ifp->if_capenable & IFCAP_POLLING)
671		ether_poll_deregister(ifp);
672#endif
673	if (device_is_attached(dev)) {
674		STGE_LOCK(sc);
675		/* XXX */
676		sc->sc_detach = 1;
677		stge_stop(sc);
678		STGE_UNLOCK(sc);
679		callout_drain(&sc->sc_tick_ch);
680		taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
681		ether_ifdetach(ifp);
682	}
683
684	if (sc->sc_miibus != NULL) {
685		device_delete_child(dev, sc->sc_miibus);
686		sc->sc_miibus = NULL;
687	}
688	bus_generic_detach(dev);
689	stge_dma_free(sc);
690
691	if (ifp != NULL) {
692		if_free(ifp);
693		sc->sc_ifp = NULL;
694	}
695
696	if (sc->sc_ih) {
697		bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
698		sc->sc_ih = NULL;
699	}
700	bus_release_resources(dev, sc->sc_spec, sc->sc_res);
701
702	mtx_destroy(&sc->sc_mii_mtx);
703	mtx_destroy(&sc->sc_mtx);
704
705	return (0);
706}
707
708struct stge_dmamap_arg {
709	bus_addr_t	stge_busaddr;
710};
711
712static void
713stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
714{
715	struct stge_dmamap_arg *ctx;
716
717	if (error != 0)
718		return;
719
720	ctx = (struct stge_dmamap_arg *)arg;
721	ctx->stge_busaddr = segs[0].ds_addr;
722}
723
724static int
725stge_dma_alloc(struct stge_softc *sc)
726{
727	struct stge_dmamap_arg ctx;
728	struct stge_txdesc *txd;
729	struct stge_rxdesc *rxd;
730	int error, i;
731
732	/* create parent tag. */
733	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
734		    1, 0,			/* algnmnt, boundary */
735		    STGE_DMA_MAXADDR,		/* lowaddr */
736		    BUS_SPACE_MAXADDR,		/* highaddr */
737		    NULL, NULL,			/* filter, filterarg */
738		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
739		    0,				/* nsegments */
740		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
741		    0,				/* flags */
742		    NULL, NULL,			/* lockfunc, lockarg */
743		    &sc->sc_cdata.stge_parent_tag);
744	if (error != 0) {
745		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
746		goto fail;
747	}
748	/* create tag for Tx ring. */
749	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
750		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
751		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
752		    BUS_SPACE_MAXADDR,		/* highaddr */
753		    NULL, NULL,			/* filter, filterarg */
754		    STGE_TX_RING_SZ,		/* maxsize */
755		    1,				/* nsegments */
756		    STGE_TX_RING_SZ,		/* maxsegsize */
757		    0,				/* flags */
758		    NULL, NULL,			/* lockfunc, lockarg */
759		    &sc->sc_cdata.stge_tx_ring_tag);
760	if (error != 0) {
761		device_printf(sc->sc_dev,
762		    "failed to allocate Tx ring DMA tag\n");
763		goto fail;
764	}
765
766	/* create tag for Rx ring. */
767	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
768		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
769		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
770		    BUS_SPACE_MAXADDR,		/* highaddr */
771		    NULL, NULL,			/* filter, filterarg */
772		    STGE_RX_RING_SZ,		/* maxsize */
773		    1,				/* nsegments */
774		    STGE_RX_RING_SZ,		/* maxsegsize */
775		    0,				/* flags */
776		    NULL, NULL,			/* lockfunc, lockarg */
777		    &sc->sc_cdata.stge_rx_ring_tag);
778	if (error != 0) {
779		device_printf(sc->sc_dev,
780		    "failed to allocate Rx ring DMA tag\n");
781		goto fail;
782	}
783
784	/* create tag for Tx buffers. */
785	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
786		    1, 0,			/* algnmnt, boundary */
787		    BUS_SPACE_MAXADDR,		/* lowaddr */
788		    BUS_SPACE_MAXADDR,		/* highaddr */
789		    NULL, NULL,			/* filter, filterarg */
790		    MCLBYTES * STGE_MAXTXSEGS,	/* maxsize */
791		    STGE_MAXTXSEGS,		/* nsegments */
792		    MCLBYTES,			/* maxsegsize */
793		    0,				/* flags */
794		    NULL, NULL,			/* lockfunc, lockarg */
795		    &sc->sc_cdata.stge_tx_tag);
796	if (error != 0) {
797		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
798		goto fail;
799	}
800
801	/* create tag for Rx buffers. */
802	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
803		    1, 0,			/* algnmnt, boundary */
804		    BUS_SPACE_MAXADDR,		/* lowaddr */
805		    BUS_SPACE_MAXADDR,		/* highaddr */
806		    NULL, NULL,			/* filter, filterarg */
807		    MCLBYTES,			/* maxsize */
808		    1,				/* nsegments */
809		    MCLBYTES,			/* maxsegsize */
810		    0,				/* flags */
811		    NULL, NULL,			/* lockfunc, lockarg */
812		    &sc->sc_cdata.stge_rx_tag);
813	if (error != 0) {
814		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
815		goto fail;
816	}
817
818	/* allocate DMA'able memory and load the DMA map for Tx ring. */
819	error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
820	    (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT |
821	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map);
822	if (error != 0) {
823		device_printf(sc->sc_dev,
824		    "failed to allocate DMA'able memory for Tx ring\n");
825		goto fail;
826	}
827
828	ctx.stge_busaddr = 0;
829	error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
830	    sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
831	    STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
832	if (error != 0 || ctx.stge_busaddr == 0) {
833		device_printf(sc->sc_dev,
834		    "failed to load DMA'able memory for Tx ring\n");
835		goto fail;
836	}
837	sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
838
839	/* allocate DMA'able memory and load the DMA map for Rx ring. */
840	error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
841	    (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT |
842	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map);
843	if (error != 0) {
844		device_printf(sc->sc_dev,
845		    "failed to allocate DMA'able memory for Rx ring\n");
846		goto fail;
847	}
848
849	ctx.stge_busaddr = 0;
850	error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
851	    sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
852	    STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
853	if (error != 0 || ctx.stge_busaddr == 0) {
854		device_printf(sc->sc_dev,
855		    "failed to load DMA'able memory for Rx ring\n");
856		goto fail;
857	}
858	sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
859
860	/* create DMA maps for Tx buffers. */
861	for (i = 0; i < STGE_TX_RING_CNT; i++) {
862		txd = &sc->sc_cdata.stge_txdesc[i];
863		txd->tx_m = NULL;
864		txd->tx_dmamap = 0;
865		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
866		    &txd->tx_dmamap);
867		if (error != 0) {
868			device_printf(sc->sc_dev,
869			    "failed to create Tx dmamap\n");
870			goto fail;
871		}
872	}
873	/* create DMA maps for Rx buffers. */
874	if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
875	    &sc->sc_cdata.stge_rx_sparemap)) != 0) {
876		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
877		goto fail;
878	}
879	for (i = 0; i < STGE_RX_RING_CNT; i++) {
880		rxd = &sc->sc_cdata.stge_rxdesc[i];
881		rxd->rx_m = NULL;
882		rxd->rx_dmamap = 0;
883		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
884		    &rxd->rx_dmamap);
885		if (error != 0) {
886			device_printf(sc->sc_dev,
887			    "failed to create Rx dmamap\n");
888			goto fail;
889		}
890	}
891
892fail:
893	return (error);
894}
895
896static void
897stge_dma_free(struct stge_softc *sc)
898{
899	struct stge_txdesc *txd;
900	struct stge_rxdesc *rxd;
901	int i;
902
903	/* Tx ring */
904	if (sc->sc_cdata.stge_tx_ring_tag) {
905		if (sc->sc_rdata.stge_tx_ring_paddr)
906			bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
907			    sc->sc_cdata.stge_tx_ring_map);
908		if (sc->sc_rdata.stge_tx_ring)
909			bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
910			    sc->sc_rdata.stge_tx_ring,
911			    sc->sc_cdata.stge_tx_ring_map);
912		sc->sc_rdata.stge_tx_ring = NULL;
913		sc->sc_rdata.stge_tx_ring_paddr = 0;
914		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
915		sc->sc_cdata.stge_tx_ring_tag = NULL;
916	}
917	/* Rx ring */
918	if (sc->sc_cdata.stge_rx_ring_tag) {
919		if (sc->sc_rdata.stge_rx_ring_paddr)
920			bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
921			    sc->sc_cdata.stge_rx_ring_map);
922		if (sc->sc_rdata.stge_rx_ring)
923			bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
924			    sc->sc_rdata.stge_rx_ring,
925			    sc->sc_cdata.stge_rx_ring_map);
926		sc->sc_rdata.stge_rx_ring = NULL;
927		sc->sc_rdata.stge_rx_ring_paddr = 0;
928		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
929		sc->sc_cdata.stge_rx_ring_tag = NULL;
930	}
931	/* Tx buffers */
932	if (sc->sc_cdata.stge_tx_tag) {
933		for (i = 0; i < STGE_TX_RING_CNT; i++) {
934			txd = &sc->sc_cdata.stge_txdesc[i];
935			if (txd->tx_dmamap) {
936				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
937				    txd->tx_dmamap);
938				txd->tx_dmamap = 0;
939			}
940		}
941		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
942		sc->sc_cdata.stge_tx_tag = NULL;
943	}
944	/* Rx buffers */
945	if (sc->sc_cdata.stge_rx_tag) {
946		for (i = 0; i < STGE_RX_RING_CNT; i++) {
947			rxd = &sc->sc_cdata.stge_rxdesc[i];
948			if (rxd->rx_dmamap) {
949				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
950				    rxd->rx_dmamap);
951				rxd->rx_dmamap = 0;
952			}
953		}
954		if (sc->sc_cdata.stge_rx_sparemap) {
955			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
956			    sc->sc_cdata.stge_rx_sparemap);
957			sc->sc_cdata.stge_rx_sparemap = 0;
958		}
959		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
960		sc->sc_cdata.stge_rx_tag = NULL;
961	}
962
963	if (sc->sc_cdata.stge_parent_tag) {
964		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
965		sc->sc_cdata.stge_parent_tag = NULL;
966	}
967}
968
969/*
970 * stge_shutdown:
971 *
972 *	Make sure the interface is stopped at reboot time.
973 */
974static int
975stge_shutdown(device_t dev)
976{
977
978	return (stge_suspend(dev));
979}
980
981static void
982stge_setwol(struct stge_softc *sc)
983{
984	struct ifnet *ifp;
985	uint8_t v;
986
987	STGE_LOCK_ASSERT(sc);
988
989	ifp = sc->sc_ifp;
990	v = CSR_READ_1(sc, STGE_WakeEvent);
991	/* Disable all WOL bits. */
992	v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
993	    WE_WakeOnLanEnable);
994	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
995		v |= WE_MagicPktEnable | WE_WakeOnLanEnable;
996	CSR_WRITE_1(sc, STGE_WakeEvent, v);
997	/* Reset Tx and prevent transmission. */
998	CSR_WRITE_4(sc, STGE_AsicCtrl,
999	    CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset);
1000	/*
1001	 * TC9021 automatically reset link speed to 100Mbps when it's put
1002	 * into sleep so there is no need to try to resetting link speed.
1003	 */
1004}
1005
1006static int
1007stge_suspend(device_t dev)
1008{
1009	struct stge_softc *sc;
1010
1011	sc = device_get_softc(dev);
1012
1013	STGE_LOCK(sc);
1014	stge_stop(sc);
1015	sc->sc_suspended = 1;
1016	stge_setwol(sc);
1017	STGE_UNLOCK(sc);
1018
1019	return (0);
1020}
1021
1022static int
1023stge_resume(device_t dev)
1024{
1025	struct stge_softc *sc;
1026	struct ifnet *ifp;
1027	uint8_t v;
1028
1029	sc = device_get_softc(dev);
1030
1031	STGE_LOCK(sc);
1032	/*
1033	 * Clear WOL bits, so special frames wouldn't interfere
1034	 * normal Rx operation anymore.
1035	 */
1036	v = CSR_READ_1(sc, STGE_WakeEvent);
1037	v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
1038	    WE_WakeOnLanEnable);
1039	CSR_WRITE_1(sc, STGE_WakeEvent, v);
1040	ifp = sc->sc_ifp;
1041	if (ifp->if_flags & IFF_UP)
1042		stge_init_locked(sc);
1043
1044	sc->sc_suspended = 0;
1045	STGE_UNLOCK(sc);
1046
1047	return (0);
1048}
1049
1050static void
1051stge_dma_wait(struct stge_softc *sc)
1052{
1053	int i;
1054
1055	for (i = 0; i < STGE_TIMEOUT; i++) {
1056		DELAY(2);
1057		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1058			break;
1059	}
1060
1061	if (i == STGE_TIMEOUT)
1062		device_printf(sc->sc_dev, "DMA wait timed out\n");
1063}
1064
1065static int
1066stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1067{
1068	struct stge_txdesc *txd;
1069	struct stge_tfd *tfd;
1070	struct mbuf *m;
1071	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1072	int error, i, nsegs, si;
1073	uint64_t csum_flags, tfc;
1074
1075	STGE_LOCK_ASSERT(sc);
1076
1077	if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1078		return (ENOBUFS);
1079
1080	error =  bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1081	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1082	if (error == EFBIG) {
1083		m = m_collapse(*m_head, M_NOWAIT, STGE_MAXTXSEGS);
1084		if (m == NULL) {
1085			m_freem(*m_head);
1086			*m_head = NULL;
1087			return (ENOMEM);
1088		}
1089		*m_head = m;
1090		error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1091		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1092		if (error != 0) {
1093			m_freem(*m_head);
1094			*m_head = NULL;
1095			return (error);
1096		}
1097	} else if (error != 0)
1098		return (error);
1099	if (nsegs == 0) {
1100		m_freem(*m_head);
1101		*m_head = NULL;
1102		return (EIO);
1103	}
1104
1105	m = *m_head;
1106	csum_flags = 0;
1107	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1108		if (m->m_pkthdr.csum_flags & CSUM_IP)
1109			csum_flags |= TFD_IPChecksumEnable;
1110		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1111			csum_flags |= TFD_TCPChecksumEnable;
1112		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1113			csum_flags |= TFD_UDPChecksumEnable;
1114	}
1115
1116	si = sc->sc_cdata.stge_tx_prod;
1117	tfd = &sc->sc_rdata.stge_tx_ring[si];
1118	for (i = 0; i < nsegs; i++)
1119		tfd->tfd_frags[i].frag_word0 =
1120		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1121		    FRAG_LEN(txsegs[i].ds_len));
1122	sc->sc_cdata.stge_tx_cnt++;
1123
1124	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1125	    TFD_FragCount(nsegs) | csum_flags;
1126	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1127		tfc |= TFD_TxDMAIndicate;
1128
1129	/* Update producer index. */
1130	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1131
1132	/* Check if we have a VLAN tag to insert. */
1133	if (m->m_flags & M_VLANTAG)
1134		tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
1135	tfd->tfd_control = htole64(tfc);
1136
1137	/* Update Tx Queue. */
1138	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1139	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1140	txd->tx_m = m;
1141
1142	/* Sync descriptors. */
1143	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1144	    BUS_DMASYNC_PREWRITE);
1145	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1146	    sc->sc_cdata.stge_tx_ring_map,
1147	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1148
1149	return (0);
1150}
1151
1152/*
1153 * stge_start:		[ifnet interface function]
1154 *
1155 *	Start packet transmission on the interface.
1156 */
1157static void
1158stge_start(struct ifnet *ifp)
1159{
1160	struct stge_softc *sc;
1161
1162	sc = ifp->if_softc;
1163	STGE_LOCK(sc);
1164	stge_start_locked(ifp);
1165	STGE_UNLOCK(sc);
1166}
1167
1168static void
1169stge_start_locked(struct ifnet *ifp)
1170{
1171        struct stge_softc *sc;
1172        struct mbuf *m_head;
1173	int enq;
1174
1175	sc = ifp->if_softc;
1176
1177	STGE_LOCK_ASSERT(sc);
1178
1179	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1180	    IFF_DRV_RUNNING || sc->sc_link == 0)
1181		return;
1182
1183	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1184		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1185			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1186			break;
1187		}
1188
1189		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1190		if (m_head == NULL)
1191			break;
1192		/*
1193		 * Pack the data into the transmit ring. If we
1194		 * don't have room, set the OACTIVE flag and wait
1195		 * for the NIC to drain the ring.
1196		 */
1197		if (stge_encap(sc, &m_head)) {
1198			if (m_head == NULL)
1199				break;
1200			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1201			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1202			break;
1203		}
1204
1205		enq++;
1206		/*
1207		 * If there's a BPF listener, bounce a copy of this frame
1208		 * to him.
1209		 */
1210		ETHER_BPF_MTAP(ifp, m_head);
1211	}
1212
1213	if (enq > 0) {
1214		/* Transmit */
1215		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1216
1217		/* Set a timeout in case the chip goes out to lunch. */
1218		sc->sc_watchdog_timer = 5;
1219	}
1220}
1221
1222/*
1223 * stge_watchdog:
1224 *
1225 *	Watchdog timer handler.
1226 */
1227static void
1228stge_watchdog(struct stge_softc *sc)
1229{
1230	struct ifnet *ifp;
1231
1232	STGE_LOCK_ASSERT(sc);
1233
1234	if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
1235		return;
1236
1237	ifp = sc->sc_ifp;
1238	if_printf(sc->sc_ifp, "device timeout\n");
1239	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1240	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1241	stge_init_locked(sc);
1242	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1243		stge_start_locked(ifp);
1244}
1245
1246/*
1247 * stge_ioctl:		[ifnet interface function]
1248 *
1249 *	Handle control requests from the operator.
1250 */
1251static int
1252stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1253{
1254	struct stge_softc *sc;
1255	struct ifreq *ifr;
1256	struct mii_data *mii;
1257	int error, mask;
1258
1259	sc = ifp->if_softc;
1260	ifr = (struct ifreq *)data;
1261	error = 0;
1262	switch (cmd) {
1263	case SIOCSIFMTU:
1264		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1265			error = EINVAL;
1266		else if (ifp->if_mtu != ifr->ifr_mtu) {
1267			ifp->if_mtu = ifr->ifr_mtu;
1268			STGE_LOCK(sc);
1269			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1270				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1271				stge_init_locked(sc);
1272			}
1273			STGE_UNLOCK(sc);
1274		}
1275		break;
1276	case SIOCSIFFLAGS:
1277		STGE_LOCK(sc);
1278		if ((ifp->if_flags & IFF_UP) != 0) {
1279			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1280				if (((ifp->if_flags ^ sc->sc_if_flags)
1281				    & IFF_PROMISC) != 0)
1282					stge_set_filter(sc);
1283			} else {
1284				if (sc->sc_detach == 0)
1285					stge_init_locked(sc);
1286			}
1287		} else {
1288			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1289				stge_stop(sc);
1290		}
1291		sc->sc_if_flags = ifp->if_flags;
1292		STGE_UNLOCK(sc);
1293		break;
1294	case SIOCADDMULTI:
1295	case SIOCDELMULTI:
1296		STGE_LOCK(sc);
1297		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1298			stge_set_multi(sc);
1299		STGE_UNLOCK(sc);
1300		break;
1301	case SIOCSIFMEDIA:
1302	case SIOCGIFMEDIA:
1303		mii = device_get_softc(sc->sc_miibus);
1304		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1305		break;
1306	case SIOCSIFCAP:
1307		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1308#ifdef DEVICE_POLLING
1309		if ((mask & IFCAP_POLLING) != 0) {
1310			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1311				error = ether_poll_register(stge_poll, ifp);
1312				if (error != 0)
1313					break;
1314				STGE_LOCK(sc);
1315				CSR_WRITE_2(sc, STGE_IntEnable, 0);
1316				ifp->if_capenable |= IFCAP_POLLING;
1317				STGE_UNLOCK(sc);
1318			} else {
1319				error = ether_poll_deregister(ifp);
1320				if (error != 0)
1321					break;
1322				STGE_LOCK(sc);
1323				CSR_WRITE_2(sc, STGE_IntEnable,
1324				    sc->sc_IntEnable);
1325				ifp->if_capenable &= ~IFCAP_POLLING;
1326				STGE_UNLOCK(sc);
1327			}
1328		}
1329#endif
1330		if ((mask & IFCAP_HWCSUM) != 0) {
1331			ifp->if_capenable ^= IFCAP_HWCSUM;
1332			if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 &&
1333			    (IFCAP_HWCSUM & ifp->if_capabilities) != 0)
1334				ifp->if_hwassist = STGE_CSUM_FEATURES;
1335			else
1336				ifp->if_hwassist = 0;
1337		}
1338		if ((mask & IFCAP_WOL) != 0 &&
1339		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
1340			if ((mask & IFCAP_WOL_MAGIC) != 0)
1341				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1342		}
1343		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1344			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1345			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1346				STGE_LOCK(sc);
1347				stge_vlan_setup(sc);
1348				STGE_UNLOCK(sc);
1349			}
1350		}
1351		VLAN_CAPABILITIES(ifp);
1352		break;
1353	default:
1354		error = ether_ioctl(ifp, cmd, data);
1355		break;
1356	}
1357
1358	return (error);
1359}
1360
1361static void
1362stge_link_task(void *arg, int pending)
1363{
1364	struct stge_softc *sc;
1365	struct mii_data *mii;
1366	uint32_t v, ac;
1367	int i;
1368
1369	sc = (struct stge_softc *)arg;
1370	STGE_LOCK(sc);
1371
1372	mii = device_get_softc(sc->sc_miibus);
1373	if (mii->mii_media_status & IFM_ACTIVE) {
1374		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1375			sc->sc_link = 1;
1376	} else
1377		sc->sc_link = 0;
1378
1379	sc->sc_MACCtrl = 0;
1380	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
1381		sc->sc_MACCtrl |= MC_DuplexSelect;
1382	if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0)
1383		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
1384	if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0)
1385		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
1386	/*
1387	 * Update STGE_MACCtrl register depending on link status.
1388	 * (duplex, flow control etc)
1389	 */
1390	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1391	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1392	v |= sc->sc_MACCtrl;
1393	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1394	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1395		/* Duplex setting changed, reset Tx/Rx functions. */
1396		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1397		ac |= AC_TxReset | AC_RxReset;
1398		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1399		for (i = 0; i < STGE_TIMEOUT; i++) {
1400			DELAY(100);
1401			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1402				break;
1403		}
1404		if (i == STGE_TIMEOUT)
1405			device_printf(sc->sc_dev, "reset failed to complete\n");
1406	}
1407	STGE_UNLOCK(sc);
1408}
1409
1410static __inline int
1411stge_tx_error(struct stge_softc *sc)
1412{
1413	uint32_t txstat;
1414	int error;
1415
1416	for (error = 0;;) {
1417		txstat = CSR_READ_4(sc, STGE_TxStatus);
1418		if ((txstat & TS_TxComplete) == 0)
1419			break;
1420		/* Tx underrun */
1421		if ((txstat & TS_TxUnderrun) != 0) {
1422			/*
1423			 * XXX
1424			 * There should be a more better way to recover
1425			 * from Tx underrun instead of a full reset.
1426			 */
1427			if (sc->sc_nerr++ < STGE_MAXERR)
1428				device_printf(sc->sc_dev, "Tx underrun, "
1429				    "resetting...\n");
1430			if (sc->sc_nerr == STGE_MAXERR)
1431				device_printf(sc->sc_dev, "too many errors; "
1432				    "not reporting any more\n");
1433			error = -1;
1434			break;
1435		}
1436		/* Maximum/Late collisions, Re-enable Tx MAC. */
1437		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1438			CSR_WRITE_4(sc, STGE_MACCtrl,
1439			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1440			    MC_TxEnable);
1441	}
1442
1443	return (error);
1444}
1445
1446/*
1447 * stge_intr:
1448 *
1449 *	Interrupt service routine.
1450 */
1451static void
1452stge_intr(void *arg)
1453{
1454	struct stge_softc *sc;
1455	struct ifnet *ifp;
1456	int reinit;
1457	uint16_t status;
1458
1459	sc = (struct stge_softc *)arg;
1460	ifp = sc->sc_ifp;
1461
1462	STGE_LOCK(sc);
1463
1464#ifdef DEVICE_POLLING
1465	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1466		goto done_locked;
1467#endif
1468	status = CSR_READ_2(sc, STGE_IntStatus);
1469	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1470		goto done_locked;
1471
1472	/* Disable interrupts. */
1473	for (reinit = 0;;) {
1474		status = CSR_READ_2(sc, STGE_IntStatusAck);
1475		status &= sc->sc_IntEnable;
1476		if (status == 0)
1477			break;
1478		/* Host interface errors. */
1479		if ((status & IS_HostError) != 0) {
1480			device_printf(sc->sc_dev,
1481			    "Host interface error, resetting...\n");
1482			reinit = 1;
1483			goto force_init;
1484		}
1485
1486		/* Receive interrupts. */
1487		if ((status & IS_RxDMAComplete) != 0) {
1488			stge_rxeof(sc);
1489			if ((status & IS_RFDListEnd) != 0)
1490				CSR_WRITE_4(sc, STGE_DMACtrl,
1491				    DMAC_RxDMAPollNow);
1492		}
1493
1494		/* Transmit interrupts. */
1495		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1496			stge_txeof(sc);
1497
1498		/* Transmission errors.*/
1499		if ((status & IS_TxComplete) != 0) {
1500			if ((reinit = stge_tx_error(sc)) != 0)
1501				break;
1502		}
1503	}
1504
1505force_init:
1506	if (reinit != 0) {
1507		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1508		stge_init_locked(sc);
1509	}
1510
1511	/* Re-enable interrupts. */
1512	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1513
1514	/* Try to get more packets going. */
1515	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1516		stge_start_locked(ifp);
1517
1518done_locked:
1519	STGE_UNLOCK(sc);
1520}
1521
1522/*
1523 * stge_txeof:
1524 *
1525 *	Helper; handle transmit interrupts.
1526 */
1527static void
1528stge_txeof(struct stge_softc *sc)
1529{
1530	struct ifnet *ifp;
1531	struct stge_txdesc *txd;
1532	uint64_t control;
1533	int cons;
1534
1535	STGE_LOCK_ASSERT(sc);
1536
1537	ifp = sc->sc_ifp;
1538
1539	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1540	if (txd == NULL)
1541		return;
1542	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1543	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1544
1545	/*
1546	 * Go through our Tx list and free mbufs for those
1547	 * frames which have been transmitted.
1548	 */
1549	for (cons = sc->sc_cdata.stge_tx_cons;;
1550	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1551		if (sc->sc_cdata.stge_tx_cnt <= 0)
1552			break;
1553		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1554		if ((control & TFD_TFDDone) == 0)
1555			break;
1556		sc->sc_cdata.stge_tx_cnt--;
1557		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1558
1559		bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1560		    BUS_DMASYNC_POSTWRITE);
1561		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1562
1563		/* Output counter is updated with statistics register */
1564		m_freem(txd->tx_m);
1565		txd->tx_m = NULL;
1566		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1567		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1568		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1569	}
1570	sc->sc_cdata.stge_tx_cons = cons;
1571	if (sc->sc_cdata.stge_tx_cnt == 0)
1572		sc->sc_watchdog_timer = 0;
1573
1574        bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1575	    sc->sc_cdata.stge_tx_ring_map,
1576	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1577}
1578
1579static __inline void
1580stge_discard_rxbuf(struct stge_softc *sc, int idx)
1581{
1582	struct stge_rfd *rfd;
1583
1584	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1585	rfd->rfd_status = 0;
1586}
1587
1588#ifndef __NO_STRICT_ALIGNMENT
1589/*
1590 * It seems that TC9021's DMA engine has alignment restrictions in
1591 * DMA scatter operations. The first DMA segment has no address
1592 * alignment restrictins but the rest should be aligned on 4(?) bytes
1593 * boundary. Otherwise it would corrupt random memory. Since we don't
1594 * know which one is used for the first segment in advance we simply
1595 * don't align at all.
1596 * To avoid copying over an entire frame to align, we allocate a new
1597 * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1598 * prepended into the existing mbuf chain.
1599 */
1600static __inline struct mbuf *
1601stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1602{
1603	struct mbuf *n;
1604
1605	n = NULL;
1606	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1607		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1608		m->m_data += ETHER_HDR_LEN;
1609		n = m;
1610	} else {
1611		MGETHDR(n, M_NOWAIT, MT_DATA);
1612		if (n != NULL) {
1613			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1614			m->m_data += ETHER_HDR_LEN;
1615			m->m_len -= ETHER_HDR_LEN;
1616			n->m_len = ETHER_HDR_LEN;
1617			M_MOVE_PKTHDR(n, m);
1618			n->m_next = m;
1619		} else
1620			m_freem(m);
1621	}
1622
1623	return (n);
1624}
1625#endif
1626
1627/*
1628 * stge_rxeof:
1629 *
1630 *	Helper; handle receive interrupts.
1631 */
1632static int
1633stge_rxeof(struct stge_softc *sc)
1634{
1635	struct ifnet *ifp;
1636	struct stge_rxdesc *rxd;
1637	struct mbuf *mp, *m;
1638	uint64_t status64;
1639	uint32_t status;
1640	int cons, prog, rx_npkts;
1641
1642	STGE_LOCK_ASSERT(sc);
1643
1644	rx_npkts = 0;
1645	ifp = sc->sc_ifp;
1646
1647	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1648	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1649
1650	prog = 0;
1651	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1652	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1653		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1654		status = RFD_RxStatus(status64);
1655		if ((status & RFD_RFDDone) == 0)
1656			break;
1657#ifdef DEVICE_POLLING
1658		if (ifp->if_capenable & IFCAP_POLLING) {
1659			if (sc->sc_cdata.stge_rxcycles <= 0)
1660				break;
1661			sc->sc_cdata.stge_rxcycles--;
1662		}
1663#endif
1664		prog++;
1665		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1666		mp = rxd->rx_m;
1667
1668		/*
1669		 * If the packet had an error, drop it.  Note we count
1670		 * the error later in the periodic stats update.
1671		 */
1672		if ((status & RFD_FrameEnd) != 0 && (status &
1673		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1674		    RFD_RxAlignmentError | RFD_RxFCSError |
1675		    RFD_RxLengthError)) != 0) {
1676			stge_discard_rxbuf(sc, cons);
1677			if (sc->sc_cdata.stge_rxhead != NULL) {
1678				m_freem(sc->sc_cdata.stge_rxhead);
1679				STGE_RXCHAIN_RESET(sc);
1680			}
1681			continue;
1682		}
1683		/*
1684		 * Add a new receive buffer to the ring.
1685		 */
1686		if (stge_newbuf(sc, cons) != 0) {
1687			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1688			stge_discard_rxbuf(sc, cons);
1689			if (sc->sc_cdata.stge_rxhead != NULL) {
1690				m_freem(sc->sc_cdata.stge_rxhead);
1691				STGE_RXCHAIN_RESET(sc);
1692			}
1693			continue;
1694		}
1695
1696		if ((status & RFD_FrameEnd) != 0)
1697			mp->m_len = RFD_RxDMAFrameLen(status) -
1698			    sc->sc_cdata.stge_rxlen;
1699		sc->sc_cdata.stge_rxlen += mp->m_len;
1700
1701		/* Chain mbufs. */
1702		if (sc->sc_cdata.stge_rxhead == NULL) {
1703			sc->sc_cdata.stge_rxhead = mp;
1704			sc->sc_cdata.stge_rxtail = mp;
1705		} else {
1706			mp->m_flags &= ~M_PKTHDR;
1707			sc->sc_cdata.stge_rxtail->m_next = mp;
1708			sc->sc_cdata.stge_rxtail = mp;
1709		}
1710
1711		if ((status & RFD_FrameEnd) != 0) {
1712			m = sc->sc_cdata.stge_rxhead;
1713			m->m_pkthdr.rcvif = ifp;
1714			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1715
1716			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1717				m_freem(m);
1718				STGE_RXCHAIN_RESET(sc);
1719				continue;
1720			}
1721			/*
1722			 * Set the incoming checksum information for
1723			 * the packet.
1724			 */
1725			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1726				if ((status & RFD_IPDetected) != 0) {
1727					m->m_pkthdr.csum_flags |=
1728						CSUM_IP_CHECKED;
1729					if ((status & RFD_IPError) == 0)
1730						m->m_pkthdr.csum_flags |=
1731						    CSUM_IP_VALID;
1732				}
1733				if (((status & RFD_TCPDetected) != 0 &&
1734				    (status & RFD_TCPError) == 0) ||
1735				    ((status & RFD_UDPDetected) != 0 &&
1736				    (status & RFD_UDPError) == 0)) {
1737					m->m_pkthdr.csum_flags |=
1738					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1739					m->m_pkthdr.csum_data = 0xffff;
1740				}
1741			}
1742
1743#ifndef __NO_STRICT_ALIGNMENT
1744			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1745				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1746					STGE_RXCHAIN_RESET(sc);
1747					continue;
1748				}
1749			}
1750#endif
1751			/* Check for VLAN tagged packets. */
1752			if ((status & RFD_VLANDetected) != 0 &&
1753			    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1754				m->m_pkthdr.ether_vtag = RFD_TCI(status64);
1755				m->m_flags |= M_VLANTAG;
1756			}
1757
1758			STGE_UNLOCK(sc);
1759			/* Pass it on. */
1760			(*ifp->if_input)(ifp, m);
1761			STGE_LOCK(sc);
1762			rx_npkts++;
1763
1764			STGE_RXCHAIN_RESET(sc);
1765		}
1766	}
1767
1768	if (prog > 0) {
1769		/* Update the consumer index. */
1770		sc->sc_cdata.stge_rx_cons = cons;
1771		bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1772		    sc->sc_cdata.stge_rx_ring_map,
1773		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1774	}
1775	return (rx_npkts);
1776}
1777
1778#ifdef DEVICE_POLLING
1779static int
1780stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1781{
1782	struct stge_softc *sc;
1783	uint16_t status;
1784	int rx_npkts;
1785
1786	rx_npkts = 0;
1787	sc = ifp->if_softc;
1788	STGE_LOCK(sc);
1789	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1790		STGE_UNLOCK(sc);
1791		return (rx_npkts);
1792	}
1793
1794	sc->sc_cdata.stge_rxcycles = count;
1795	rx_npkts = stge_rxeof(sc);
1796	stge_txeof(sc);
1797
1798	if (cmd == POLL_AND_CHECK_STATUS) {
1799		status = CSR_READ_2(sc, STGE_IntStatus);
1800		status &= sc->sc_IntEnable;
1801		if (status != 0) {
1802			if ((status & IS_HostError) != 0) {
1803				device_printf(sc->sc_dev,
1804				    "Host interface error, resetting...\n");
1805				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1806				stge_init_locked(sc);
1807			}
1808			if ((status & IS_TxComplete) != 0) {
1809				if (stge_tx_error(sc) != 0) {
1810					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1811					stge_init_locked(sc);
1812				}
1813			}
1814		}
1815
1816	}
1817
1818	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1819		stge_start_locked(ifp);
1820
1821	STGE_UNLOCK(sc);
1822	return (rx_npkts);
1823}
1824#endif	/* DEVICE_POLLING */
1825
1826/*
1827 * stge_tick:
1828 *
1829 *	One second timer, used to tick the MII.
1830 */
1831static void
1832stge_tick(void *arg)
1833{
1834	struct stge_softc *sc;
1835	struct mii_data *mii;
1836
1837	sc = (struct stge_softc *)arg;
1838
1839	STGE_LOCK_ASSERT(sc);
1840
1841	mii = device_get_softc(sc->sc_miibus);
1842	mii_tick(mii);
1843
1844	/* Update statistics counters. */
1845	stge_stats_update(sc);
1846
1847	/*
1848	 * Relcaim any pending Tx descriptors to release mbufs in a
1849	 * timely manner as we don't generate Tx completion interrupts
1850	 * for every frame. This limits the delay to a maximum of one
1851	 * second.
1852	 */
1853	if (sc->sc_cdata.stge_tx_cnt != 0)
1854		stge_txeof(sc);
1855
1856	stge_watchdog(sc);
1857
1858	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1859}
1860
1861/*
1862 * stge_stats_update:
1863 *
1864 *	Read the TC9021 statistics counters.
1865 */
1866static void
1867stge_stats_update(struct stge_softc *sc)
1868{
1869	struct ifnet *ifp;
1870
1871	STGE_LOCK_ASSERT(sc);
1872
1873	ifp = sc->sc_ifp;
1874
1875	CSR_READ_4(sc,STGE_OctetRcvOk);
1876
1877	if_inc_counter(ifp, IFCOUNTER_IPACKETS, CSR_READ_4(sc, STGE_FramesRcvdOk));
1878
1879	if_inc_counter(ifp, IFCOUNTER_IERRORS, CSR_READ_2(sc, STGE_FramesLostRxErrors));
1880
1881	CSR_READ_4(sc, STGE_OctetXmtdOk);
1882
1883	if_inc_counter(ifp, IFCOUNTER_OPACKETS, CSR_READ_4(sc, STGE_FramesXmtdOk));
1884
1885	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1886	    CSR_READ_4(sc, STGE_LateCollisions) +
1887	    CSR_READ_4(sc, STGE_MultiColFrames) +
1888	    CSR_READ_4(sc, STGE_SingleColFrames));
1889
1890	if_inc_counter(ifp, IFCOUNTER_OERRORS,
1891	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1892	    CSR_READ_2(sc, STGE_FramesWEXDeferal));
1893}
1894
1895/*
1896 * stge_reset:
1897 *
1898 *	Perform a soft reset on the TC9021.
1899 */
1900static void
1901stge_reset(struct stge_softc *sc, uint32_t how)
1902{
1903	uint32_t ac;
1904	uint8_t v;
1905	int i, dv;
1906
1907	STGE_LOCK_ASSERT(sc);
1908
1909	dv = 5000;
1910	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1911	switch (how) {
1912	case STGE_RESET_TX:
1913		ac |= AC_TxReset | AC_FIFO;
1914		dv = 100;
1915		break;
1916	case STGE_RESET_RX:
1917		ac |= AC_RxReset | AC_FIFO;
1918		dv = 100;
1919		break;
1920	case STGE_RESET_FULL:
1921	default:
1922		/*
1923		 * Only assert RstOut if we're fiber.  We need GMII clocks
1924		 * to be present in order for the reset to complete on fiber
1925		 * cards.
1926		 */
1927		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1928		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1929		    (sc->sc_usefiber ? AC_RstOut : 0);
1930		break;
1931	}
1932
1933	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1934
1935	/* Account for reset problem at 10Mbps. */
1936	DELAY(dv);
1937
1938	for (i = 0; i < STGE_TIMEOUT; i++) {
1939		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1940			break;
1941		DELAY(dv);
1942	}
1943
1944	if (i == STGE_TIMEOUT)
1945		device_printf(sc->sc_dev, "reset failed to complete\n");
1946
1947	/* Set LED, from Linux IPG driver. */
1948	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1949	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1950	if ((sc->sc_led & 0x01) != 0)
1951		ac |= AC_LEDMode;
1952	if ((sc->sc_led & 0x03) != 0)
1953		ac |= AC_LEDModeBit1;
1954	if ((sc->sc_led & 0x08) != 0)
1955		ac |= AC_LEDSpeed;
1956	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1957
1958	/* Set PHY, from Linux IPG driver */
1959	v = CSR_READ_1(sc, STGE_PhySet);
1960	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1961	v |= ((sc->sc_led & 0x70) >> 4);
1962	CSR_WRITE_1(sc, STGE_PhySet, v);
1963}
1964
1965/*
1966 * stge_init:		[ ifnet interface function ]
1967 *
1968 *	Initialize the interface.
1969 */
1970static void
1971stge_init(void *xsc)
1972{
1973	struct stge_softc *sc;
1974
1975	sc = (struct stge_softc *)xsc;
1976	STGE_LOCK(sc);
1977	stge_init_locked(sc);
1978	STGE_UNLOCK(sc);
1979}
1980
1981static void
1982stge_init_locked(struct stge_softc *sc)
1983{
1984	struct ifnet *ifp;
1985	struct mii_data *mii;
1986	uint16_t eaddr[3];
1987	uint32_t v;
1988	int error;
1989
1990	STGE_LOCK_ASSERT(sc);
1991
1992	ifp = sc->sc_ifp;
1993	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1994		return;
1995	mii = device_get_softc(sc->sc_miibus);
1996
1997	/*
1998	 * Cancel any pending I/O.
1999	 */
2000	stge_stop(sc);
2001
2002	/*
2003	 * Reset the chip to a known state.
2004	 */
2005	stge_reset(sc, STGE_RESET_FULL);
2006
2007	/* Init descriptors. */
2008	error = stge_init_rx_ring(sc);
2009        if (error != 0) {
2010                device_printf(sc->sc_dev,
2011                    "initialization failed: no memory for rx buffers\n");
2012                stge_stop(sc);
2013		goto out;
2014        }
2015	stge_init_tx_ring(sc);
2016
2017	/* Set the station address. */
2018	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2019	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2020	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2021	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2022
2023	/*
2024	 * Set the statistics masks.  Disable all the RMON stats,
2025	 * and disable selected stats in the non-RMON stats registers.
2026	 */
2027	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2028	CSR_WRITE_4(sc, STGE_StatisticsMask,
2029	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2030	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2031	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2032	    (1U << 21));
2033
2034	/* Set up the receive filter. */
2035	stge_set_filter(sc);
2036	/* Program multicast filter. */
2037	stge_set_multi(sc);
2038
2039	/*
2040	 * Give the transmit and receive ring to the chip.
2041	 */
2042	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2043	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2044	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2045	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2046
2047	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2048	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2049	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2050	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2051
2052	/*
2053	 * Initialize the Tx auto-poll period.  It's OK to make this number
2054	 * large (255 is the max, but we use 127) -- we explicitly kick the
2055	 * transmit engine when there's actually a packet.
2056	 */
2057	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2058
2059	/* ..and the Rx auto-poll period. */
2060	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2061
2062	/* Initialize the Tx start threshold. */
2063	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2064
2065	/* Rx DMA thresholds, from Linux */
2066	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2067	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2068
2069	/* Rx early threhold, from Linux */
2070	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2071
2072	/* Tx DMA thresholds, from Linux */
2073	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2074	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2075
2076	/*
2077	 * Initialize the Rx DMA interrupt control register.  We
2078	 * request an interrupt after every incoming packet, but
2079	 * defer it for sc_rxint_dmawait us. When the number of
2080	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2081	 * deferring the interrupt, and signal it immediately.
2082	 */
2083	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2084	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2085	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2086
2087	/*
2088	 * Initialize the interrupt mask.
2089	 */
2090	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2091	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2092#ifdef DEVICE_POLLING
2093	/* Disable interrupts if we are polling. */
2094	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2095		CSR_WRITE_2(sc, STGE_IntEnable, 0);
2096	else
2097#endif
2098	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2099
2100	/*
2101	 * Configure the DMA engine.
2102	 * XXX Should auto-tune TxBurstLimit.
2103	 */
2104	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2105
2106	/*
2107	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2108	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2109	 * in the Rx FIFO.
2110	 */
2111	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2112	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2113
2114	/*
2115	 * Set the maximum frame size.
2116	 */
2117	sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2118	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2119
2120	/*
2121	 * Initialize MacCtrl -- do it before setting the media,
2122	 * as setting the media will actually program the register.
2123	 *
2124	 * Note: We have to poke the IFS value before poking
2125	 * anything else.
2126	 */
2127	/* Tx/Rx MAC should be disabled before programming IFS.*/
2128	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2129
2130	stge_vlan_setup(sc);
2131
2132	if (sc->sc_rev >= 6) {		/* >= B.2 */
2133		/* Multi-frag frame bug work-around. */
2134		CSR_WRITE_2(sc, STGE_DebugCtrl,
2135		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2136
2137		/* Tx Poll Now bug work-around. */
2138		CSR_WRITE_2(sc, STGE_DebugCtrl,
2139		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2140		/* Tx Poll Now bug work-around. */
2141		CSR_WRITE_2(sc, STGE_DebugCtrl,
2142		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2143	}
2144
2145	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2146	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2147	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2148	/*
2149	 * It seems that transmitting frames without checking the state of
2150	 * Rx/Tx MAC wedge the hardware.
2151	 */
2152	stge_start_tx(sc);
2153	stge_start_rx(sc);
2154
2155	sc->sc_link = 0;
2156	/*
2157	 * Set the current media.
2158	 */
2159	mii_mediachg(mii);
2160
2161	/*
2162	 * Start the one second MII clock.
2163	 */
2164	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2165
2166	/*
2167	 * ...all done!
2168	 */
2169	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2170	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2171
2172 out:
2173	if (error != 0)
2174		device_printf(sc->sc_dev, "interface not running\n");
2175}
2176
2177static void
2178stge_vlan_setup(struct stge_softc *sc)
2179{
2180	struct ifnet *ifp;
2181	uint32_t v;
2182
2183	ifp = sc->sc_ifp;
2184	/*
2185	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2186	 * MC_AutoVLANuntagging bit.
2187	 * MC_AutoVLANtagging bit selects which VLAN source to use
2188	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2189	 * bit has priority over MC_AutoVLANtagging bit. So we always
2190	 * use TFC instead of STGE_VLANTag register.
2191	 */
2192	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2193	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2194		v |= MC_AutoVLANuntagging;
2195	else
2196		v &= ~MC_AutoVLANuntagging;
2197	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2198}
2199
2200/*
2201 *	Stop transmission on the interface.
2202 */
2203static void
2204stge_stop(struct stge_softc *sc)
2205{
2206	struct ifnet *ifp;
2207	struct stge_txdesc *txd;
2208	struct stge_rxdesc *rxd;
2209	uint32_t v;
2210	int i;
2211
2212	STGE_LOCK_ASSERT(sc);
2213	/*
2214	 * Stop the one second clock.
2215	 */
2216	callout_stop(&sc->sc_tick_ch);
2217	sc->sc_watchdog_timer = 0;
2218
2219	/*
2220	 * Disable interrupts.
2221	 */
2222	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2223
2224	/*
2225	 * Stop receiver, transmitter, and stats update.
2226	 */
2227	stge_stop_rx(sc);
2228	stge_stop_tx(sc);
2229	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2230	v |= MC_StatisticsDisable;
2231	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2232
2233	/*
2234	 * Stop the transmit and receive DMA.
2235	 */
2236	stge_dma_wait(sc);
2237	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2238	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2239	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2240	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2241
2242	/*
2243	 * Free RX and TX mbufs still in the queues.
2244	 */
2245	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2246		rxd = &sc->sc_cdata.stge_rxdesc[i];
2247		if (rxd->rx_m != NULL) {
2248			bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2249			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2250			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2251			    rxd->rx_dmamap);
2252			m_freem(rxd->rx_m);
2253			rxd->rx_m = NULL;
2254		}
2255        }
2256	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2257		txd = &sc->sc_cdata.stge_txdesc[i];
2258		if (txd->tx_m != NULL) {
2259			bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2260			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2261			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2262			    txd->tx_dmamap);
2263			m_freem(txd->tx_m);
2264			txd->tx_m = NULL;
2265		}
2266        }
2267
2268	/*
2269	 * Mark the interface down and cancel the watchdog timer.
2270	 */
2271	ifp = sc->sc_ifp;
2272	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2273	sc->sc_link = 0;
2274}
2275
2276static void
2277stge_start_tx(struct stge_softc *sc)
2278{
2279	uint32_t v;
2280	int i;
2281
2282	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2283	if ((v & MC_TxEnabled) != 0)
2284		return;
2285	v |= MC_TxEnable;
2286	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2287	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2288	for (i = STGE_TIMEOUT; i > 0; i--) {
2289		DELAY(10);
2290		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2291		if ((v & MC_TxEnabled) != 0)
2292			break;
2293	}
2294	if (i == 0)
2295		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2296}
2297
2298static void
2299stge_start_rx(struct stge_softc *sc)
2300{
2301	uint32_t v;
2302	int i;
2303
2304	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2305	if ((v & MC_RxEnabled) != 0)
2306		return;
2307	v |= MC_RxEnable;
2308	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2309	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2310	for (i = STGE_TIMEOUT; i > 0; i--) {
2311		DELAY(10);
2312		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2313		if ((v & MC_RxEnabled) != 0)
2314			break;
2315	}
2316	if (i == 0)
2317		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2318}
2319
2320static void
2321stge_stop_tx(struct stge_softc *sc)
2322{
2323	uint32_t v;
2324	int i;
2325
2326	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2327	if ((v & MC_TxEnabled) == 0)
2328		return;
2329	v |= MC_TxDisable;
2330	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2331	for (i = STGE_TIMEOUT; i > 0; i--) {
2332		DELAY(10);
2333		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2334		if ((v & MC_TxEnabled) == 0)
2335			break;
2336	}
2337	if (i == 0)
2338		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2339}
2340
2341static void
2342stge_stop_rx(struct stge_softc *sc)
2343{
2344	uint32_t v;
2345	int i;
2346
2347	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2348	if ((v & MC_RxEnabled) == 0)
2349		return;
2350	v |= MC_RxDisable;
2351	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2352	for (i = STGE_TIMEOUT; i > 0; i--) {
2353		DELAY(10);
2354		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2355		if ((v & MC_RxEnabled) == 0)
2356			break;
2357	}
2358	if (i == 0)
2359		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2360}
2361
2362static void
2363stge_init_tx_ring(struct stge_softc *sc)
2364{
2365	struct stge_ring_data *rd;
2366	struct stge_txdesc *txd;
2367	bus_addr_t addr;
2368	int i;
2369
2370	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2371	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2372
2373	sc->sc_cdata.stge_tx_prod = 0;
2374	sc->sc_cdata.stge_tx_cons = 0;
2375	sc->sc_cdata.stge_tx_cnt = 0;
2376
2377	rd = &sc->sc_rdata;
2378	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2379	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2380		if (i == (STGE_TX_RING_CNT - 1))
2381			addr = STGE_TX_RING_ADDR(sc, 0);
2382		else
2383			addr = STGE_TX_RING_ADDR(sc, i + 1);
2384		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2385		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2386		txd = &sc->sc_cdata.stge_txdesc[i];
2387		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2388	}
2389
2390	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2391	    sc->sc_cdata.stge_tx_ring_map,
2392	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2393
2394}
2395
2396static int
2397stge_init_rx_ring(struct stge_softc *sc)
2398{
2399	struct stge_ring_data *rd;
2400	bus_addr_t addr;
2401	int i;
2402
2403	sc->sc_cdata.stge_rx_cons = 0;
2404	STGE_RXCHAIN_RESET(sc);
2405
2406	rd = &sc->sc_rdata;
2407	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2408	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2409		if (stge_newbuf(sc, i) != 0)
2410			return (ENOBUFS);
2411		if (i == (STGE_RX_RING_CNT - 1))
2412			addr = STGE_RX_RING_ADDR(sc, 0);
2413		else
2414			addr = STGE_RX_RING_ADDR(sc, i + 1);
2415		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2416		rd->stge_rx_ring[i].rfd_status = 0;
2417	}
2418
2419	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2420	    sc->sc_cdata.stge_rx_ring_map,
2421	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2422
2423	return (0);
2424}
2425
2426/*
2427 * stge_newbuf:
2428 *
2429 *	Add a receive buffer to the indicated descriptor.
2430 */
2431static int
2432stge_newbuf(struct stge_softc *sc, int idx)
2433{
2434	struct stge_rxdesc *rxd;
2435	struct stge_rfd *rfd;
2436	struct mbuf *m;
2437	bus_dma_segment_t segs[1];
2438	bus_dmamap_t map;
2439	int nsegs;
2440
2441	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2442	if (m == NULL)
2443		return (ENOBUFS);
2444	m->m_len = m->m_pkthdr.len = MCLBYTES;
2445	/*
2446	 * The hardware requires 4bytes aligned DMA address when JUMBO
2447	 * frame is used.
2448	 */
2449	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2450		m_adj(m, ETHER_ALIGN);
2451
2452	if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
2453	    sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2454		m_freem(m);
2455		return (ENOBUFS);
2456	}
2457	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2458
2459	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2460	if (rxd->rx_m != NULL) {
2461		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2462		    BUS_DMASYNC_POSTREAD);
2463		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2464	}
2465	map = rxd->rx_dmamap;
2466	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2467	sc->sc_cdata.stge_rx_sparemap = map;
2468	bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2469	    BUS_DMASYNC_PREREAD);
2470	rxd->rx_m = m;
2471
2472	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2473	rfd->rfd_frag.frag_word0 =
2474	    htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2475	rfd->rfd_status = 0;
2476
2477	return (0);
2478}
2479
2480/*
2481 * stge_set_filter:
2482 *
2483 *	Set up the receive filter.
2484 */
2485static void
2486stge_set_filter(struct stge_softc *sc)
2487{
2488	struct ifnet *ifp;
2489	uint16_t mode;
2490
2491	STGE_LOCK_ASSERT(sc);
2492
2493	ifp = sc->sc_ifp;
2494
2495	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2496	mode |= RM_ReceiveUnicast;
2497	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2498		mode |= RM_ReceiveBroadcast;
2499	else
2500		mode &= ~RM_ReceiveBroadcast;
2501	if ((ifp->if_flags & IFF_PROMISC) != 0)
2502		mode |= RM_ReceiveAllFrames;
2503	else
2504		mode &= ~RM_ReceiveAllFrames;
2505
2506	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2507}
2508
2509static void
2510stge_set_multi(struct stge_softc *sc)
2511{
2512	struct ifnet *ifp;
2513	struct ifmultiaddr *ifma;
2514	uint32_t crc;
2515	uint32_t mchash[2];
2516	uint16_t mode;
2517	int count;
2518
2519	STGE_LOCK_ASSERT(sc);
2520
2521	ifp = sc->sc_ifp;
2522
2523	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2524	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2525		if ((ifp->if_flags & IFF_PROMISC) != 0)
2526			mode |= RM_ReceiveAllFrames;
2527		else if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2528			mode |= RM_ReceiveMulticast;
2529		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2530		return;
2531	}
2532
2533	/* clear existing filters. */
2534	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2535	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2536
2537	/*
2538	 * Set up the multicast address filter by passing all multicast
2539	 * addresses through a CRC generator, and then using the low-order
2540	 * 6 bits as an index into the 64 bit multicast hash table.  The
2541	 * high order bits select the register, while the rest of the bits
2542	 * select the bit within the register.
2543	 */
2544
2545	bzero(mchash, sizeof(mchash));
2546
2547	count = 0;
2548	if_maddr_rlock(sc->sc_ifp);
2549	TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) {
2550		if (ifma->ifma_addr->sa_family != AF_LINK)
2551			continue;
2552		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2553		    ifma->ifma_addr), ETHER_ADDR_LEN);
2554
2555		/* Just want the 6 least significant bits. */
2556		crc &= 0x3f;
2557
2558		/* Set the corresponding bit in the hash table. */
2559		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2560		count++;
2561	}
2562	if_maddr_runlock(ifp);
2563
2564	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2565	if (count > 0)
2566		mode |= RM_ReceiveMulticastHash;
2567	else
2568		mode &= ~RM_ReceiveMulticastHash;
2569
2570	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2571	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2572	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2573}
2574
2575static int
2576sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2577{
2578	int error, value;
2579
2580	if (!arg1)
2581		return (EINVAL);
2582	value = *(int *)arg1;
2583	error = sysctl_handle_int(oidp, &value, 0, req);
2584	if (error || !req->newptr)
2585		return (error);
2586	if (value < low || value > high)
2587		return (EINVAL);
2588        *(int *)arg1 = value;
2589
2590        return (0);
2591}
2592
2593static int
2594sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2595{
2596	return (sysctl_int_range(oidp, arg1, arg2, req,
2597	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2598}
2599
2600static int
2601sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2602{
2603	return (sysctl_int_range(oidp, arg1, arg2, req,
2604	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2605}
2606