1/*	$NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $	*/
2
3/*-
4 * SPDX-License-Identifier: BSD-2-Clause
5 *
6 * Copyright (c) 2001 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Jason R. Thorpe.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34/*
35 * Device driver for the Sundance Tech. TC9021 10/100/1000
36 * Ethernet controller.
37 */
38
39#include <sys/cdefs.h>
40#ifdef HAVE_KERNEL_OPTION_HEADERS
41#include "opt_device_polling.h"
42#endif
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/endian.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
49#include <sys/kernel.h>
50#include <sys/module.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/sysctl.h>
54#include <sys/taskqueue.h>
55
56#include <net/bpf.h>
57#include <net/ethernet.h>
58#include <net/if.h>
59#include <net/if_var.h>
60#include <net/if_dl.h>
61#include <net/if_media.h>
62#include <net/if_types.h>
63#include <net/if_vlan_var.h>
64
65#include <machine/bus.h>
66#include <machine/resource.h>
67#include <sys/bus.h>
68#include <sys/rman.h>
69
70#include <dev/mii/mii.h>
71#include <dev/mii/mii_bitbang.h>
72#include <dev/mii/miivar.h>
73
74#include <dev/pci/pcireg.h>
75#include <dev/pci/pcivar.h>
76
77#include <dev/stge/if_stgereg.h>
78
79#define	STGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
80
81MODULE_DEPEND(stge, pci, 1, 1, 1);
82MODULE_DEPEND(stge, ether, 1, 1, 1);
83MODULE_DEPEND(stge, miibus, 1, 1, 1);
84
85/* "device miibus" required.  See GENERIC if you get errors here. */
86#include "miibus_if.h"
87
88/*
89 * Devices supported by this driver.
90 */
91static const struct stge_product {
92	uint16_t	stge_vendorid;
93	uint16_t	stge_deviceid;
94	const char	*stge_name;
95} stge_products[] = {
96	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST1023,
97	  "Sundance ST-1023 Gigabit Ethernet" },
98
99	{ VENDOR_SUNDANCETI,	DEVICEID_SUNDANCETI_ST2021,
100	  "Sundance ST-2021 Gigabit Ethernet" },
101
102	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021,
103	  "Tamarack TC9021 Gigabit Ethernet" },
104
105	{ VENDOR_TAMARACK,	DEVICEID_TAMARACK_TC9021_ALT,
106	  "Tamarack TC9021 Gigabit Ethernet" },
107
108	/*
109	 * The Sundance sample boards use the Sundance vendor ID,
110	 * but the Tamarack product ID.
111	 */
112	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021,
113	  "Sundance TC9021 Gigabit Ethernet" },
114
115	{ VENDOR_SUNDANCETI,	DEVICEID_TAMARACK_TC9021_ALT,
116	  "Sundance TC9021 Gigabit Ethernet" },
117
118	{ VENDOR_DLINK,		DEVICEID_DLINK_DL4000,
119	  "D-Link DL-4000 Gigabit Ethernet" },
120
121	{ VENDOR_ANTARES,	DEVICEID_ANTARES_TC9021,
122	  "Antares Gigabit Ethernet" }
123};
124
125static int	stge_probe(device_t);
126static int	stge_attach(device_t);
127static int	stge_detach(device_t);
128static int	stge_shutdown(device_t);
129static int	stge_suspend(device_t);
130static int	stge_resume(device_t);
131
132static int	stge_encap(struct stge_softc *, struct mbuf **);
133static void	stge_start(if_t);
134static void	stge_start_locked(if_t);
135static void	stge_watchdog(struct stge_softc *);
136static int	stge_ioctl(if_t, u_long, caddr_t);
137static void	stge_init(void *);
138static void	stge_init_locked(struct stge_softc *);
139static void	stge_vlan_setup(struct stge_softc *);
140static void	stge_stop(struct stge_softc *);
141static void	stge_start_tx(struct stge_softc *);
142static void	stge_start_rx(struct stge_softc *);
143static void	stge_stop_tx(struct stge_softc *);
144static void	stge_stop_rx(struct stge_softc *);
145
146static void	stge_reset(struct stge_softc *, uint32_t);
147static int	stge_eeprom_wait(struct stge_softc *);
148static void	stge_read_eeprom(struct stge_softc *, int, uint16_t *);
149static void	stge_tick(void *);
150static void	stge_stats_update(struct stge_softc *);
151static void	stge_set_filter(struct stge_softc *);
152static void	stge_set_multi(struct stge_softc *);
153
154static void	stge_link_task(void *, int);
155static void	stge_intr(void *);
156static __inline int stge_tx_error(struct stge_softc *);
157static void	stge_txeof(struct stge_softc *);
158static int	stge_rxeof(struct stge_softc *);
159static __inline void stge_discard_rxbuf(struct stge_softc *, int);
160static int	stge_newbuf(struct stge_softc *, int);
161#ifndef __NO_STRICT_ALIGNMENT
162static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *);
163#endif
164
165static int	stge_miibus_readreg(device_t, int, int);
166static int	stge_miibus_writereg(device_t, int, int, int);
167static void	stge_miibus_statchg(device_t);
168static int	stge_mediachange(if_t);
169static void	stge_mediastatus(if_t, struct ifmediareq *);
170
171static void	stge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
172static int	stge_dma_alloc(struct stge_softc *);
173static void	stge_dma_free(struct stge_softc *);
174static void	stge_dma_wait(struct stge_softc *);
175static void	stge_init_tx_ring(struct stge_softc *);
176static int	stge_init_rx_ring(struct stge_softc *);
177#ifdef DEVICE_POLLING
178static int	stge_poll(if_t, enum poll_cmd, int);
179#endif
180
181static void	stge_setwol(struct stge_softc *);
182static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
183static int	sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS);
184static int	sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS);
185
186/*
187 * MII bit-bang glue
188 */
189static uint32_t stge_mii_bitbang_read(device_t);
190static void	stge_mii_bitbang_write(device_t, uint32_t);
191
192static const struct mii_bitbang_ops stge_mii_bitbang_ops = {
193	stge_mii_bitbang_read,
194	stge_mii_bitbang_write,
195	{
196		PC_MgmtData,		/* MII_BIT_MDO */
197		PC_MgmtData,		/* MII_BIT_MDI */
198		PC_MgmtClk,		/* MII_BIT_MDC */
199		PC_MgmtDir,		/* MII_BIT_DIR_HOST_PHY */
200		0,			/* MII_BIT_DIR_PHY_HOST */
201	}
202};
203
204static device_method_t stge_methods[] = {
205	/* Device interface */
206	DEVMETHOD(device_probe,		stge_probe),
207	DEVMETHOD(device_attach,	stge_attach),
208	DEVMETHOD(device_detach,	stge_detach),
209	DEVMETHOD(device_shutdown,	stge_shutdown),
210	DEVMETHOD(device_suspend,	stge_suspend),
211	DEVMETHOD(device_resume,	stge_resume),
212
213	/* MII interface */
214	DEVMETHOD(miibus_readreg,	stge_miibus_readreg),
215	DEVMETHOD(miibus_writereg,	stge_miibus_writereg),
216	DEVMETHOD(miibus_statchg,	stge_miibus_statchg),
217
218	DEVMETHOD_END
219};
220
221static driver_t stge_driver = {
222	"stge",
223	stge_methods,
224	sizeof(struct stge_softc)
225};
226
227DRIVER_MODULE(stge, pci, stge_driver, 0, 0);
228DRIVER_MODULE(miibus, stge, miibus_driver, 0, 0);
229
230static struct resource_spec stge_res_spec_io[] = {
231	{ SYS_RES_IOPORT,	PCIR_BAR(0),	RF_ACTIVE },
232	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
233	{ -1,			0,		0 }
234};
235
236static struct resource_spec stge_res_spec_mem[] = {
237	{ SYS_RES_MEMORY,	PCIR_BAR(1),	RF_ACTIVE },
238	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
239	{ -1,			0,		0 }
240};
241
242/*
243 * stge_mii_bitbang_read: [mii bit-bang interface function]
244 *
245 *	Read the MII serial port for the MII bit-bang module.
246 */
247static uint32_t
248stge_mii_bitbang_read(device_t dev)
249{
250	struct stge_softc *sc;
251	uint32_t val;
252
253	sc = device_get_softc(dev);
254
255	val = CSR_READ_1(sc, STGE_PhyCtrl);
256	CSR_BARRIER(sc, STGE_PhyCtrl, 1,
257	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
258	return (val);
259}
260
261/*
262 * stge_mii_bitbang_write: [mii big-bang interface function]
263 *
264 *	Write the MII serial port for the MII bit-bang module.
265 */
266static void
267stge_mii_bitbang_write(device_t dev, uint32_t val)
268{
269	struct stge_softc *sc;
270
271	sc = device_get_softc(dev);
272
273	CSR_WRITE_1(sc, STGE_PhyCtrl, val);
274	CSR_BARRIER(sc, STGE_PhyCtrl, 1,
275	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
276}
277
278/*
279 * sc_miibus_readreg:	[mii interface function]
280 *
281 *	Read a PHY register on the MII of the TC9021.
282 */
283static int
284stge_miibus_readreg(device_t dev, int phy, int reg)
285{
286	struct stge_softc *sc;
287	int error, val;
288
289	sc = device_get_softc(dev);
290
291	if (reg == STGE_PhyCtrl) {
292		/* XXX allow ip1000phy read STGE_PhyCtrl register. */
293		STGE_MII_LOCK(sc);
294		error = CSR_READ_1(sc, STGE_PhyCtrl);
295		STGE_MII_UNLOCK(sc);
296		return (error);
297	}
298
299	STGE_MII_LOCK(sc);
300	val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg);
301	STGE_MII_UNLOCK(sc);
302	return (val);
303}
304
305/*
306 * stge_miibus_writereg:	[mii interface function]
307 *
308 *	Write a PHY register on the MII of the TC9021.
309 */
310static int
311stge_miibus_writereg(device_t dev, int phy, int reg, int val)
312{
313	struct stge_softc *sc;
314
315	sc = device_get_softc(dev);
316
317	STGE_MII_LOCK(sc);
318	mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val);
319	STGE_MII_UNLOCK(sc);
320	return (0);
321}
322
323/*
324 * stge_miibus_statchg:	[mii interface function]
325 *
326 *	Callback from MII layer when media changes.
327 */
328static void
329stge_miibus_statchg(device_t dev)
330{
331	struct stge_softc *sc;
332
333	sc = device_get_softc(dev);
334	taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task);
335}
336
337/*
338 * stge_mediastatus:	[ifmedia interface function]
339 *
340 *	Get the current interface media status.
341 */
342static void
343stge_mediastatus(if_t ifp, struct ifmediareq *ifmr)
344{
345	struct stge_softc *sc;
346	struct mii_data *mii;
347
348	sc = if_getsoftc(ifp);
349	mii = device_get_softc(sc->sc_miibus);
350
351	mii_pollstat(mii);
352	ifmr->ifm_status = mii->mii_media_status;
353	ifmr->ifm_active = mii->mii_media_active;
354}
355
356/*
357 * stge_mediachange:	[ifmedia interface function]
358 *
359 *	Set hardware to newly-selected media.
360 */
361static int
362stge_mediachange(if_t ifp)
363{
364	struct stge_softc *sc;
365	struct mii_data *mii;
366
367	sc = if_getsoftc(ifp);
368	mii = device_get_softc(sc->sc_miibus);
369	mii_mediachg(mii);
370
371	return (0);
372}
373
374static int
375stge_eeprom_wait(struct stge_softc *sc)
376{
377	int i;
378
379	for (i = 0; i < STGE_TIMEOUT; i++) {
380		DELAY(1000);
381		if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
382			return (0);
383	}
384	return (1);
385}
386
387/*
388 * stge_read_eeprom:
389 *
390 *	Read data from the serial EEPROM.
391 */
392static void
393stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
394{
395
396	if (stge_eeprom_wait(sc))
397		device_printf(sc->sc_dev, "EEPROM failed to come ready\n");
398
399	CSR_WRITE_2(sc, STGE_EepromCtrl,
400	    EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
401	if (stge_eeprom_wait(sc))
402		device_printf(sc->sc_dev, "EEPROM read timed out\n");
403	*data = CSR_READ_2(sc, STGE_EepromData);
404}
405
406static int
407stge_probe(device_t dev)
408{
409	const struct stge_product *sp;
410	int i;
411	uint16_t vendor, devid;
412
413	vendor = pci_get_vendor(dev);
414	devid = pci_get_device(dev);
415	sp = stge_products;
416	for (i = 0; i < nitems(stge_products); i++, sp++) {
417		if (vendor == sp->stge_vendorid &&
418		    devid == sp->stge_deviceid) {
419			device_set_desc(dev, sp->stge_name);
420			return (BUS_PROBE_DEFAULT);
421		}
422	}
423
424	return (ENXIO);
425}
426
427static int
428stge_attach(device_t dev)
429{
430	struct stge_softc *sc;
431	if_t ifp;
432	uint8_t enaddr[ETHER_ADDR_LEN];
433	int error, flags, i;
434	uint16_t cmd;
435	uint32_t val;
436
437	error = 0;
438	sc = device_get_softc(dev);
439	sc->sc_dev = dev;
440
441	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
442	    MTX_DEF);
443	mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF);
444	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
445	TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc);
446
447	/*
448	 * Map the device.
449	 */
450	pci_enable_busmaster(dev);
451	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
452	val = pci_read_config(dev, PCIR_BAR(1), 4);
453	if (PCI_BAR_IO(val))
454		sc->sc_spec = stge_res_spec_mem;
455	else {
456		val = pci_read_config(dev, PCIR_BAR(0), 4);
457		if (!PCI_BAR_IO(val)) {
458			device_printf(sc->sc_dev, "couldn't locate IO BAR\n");
459			error = ENXIO;
460			goto fail;
461		}
462		sc->sc_spec = stge_res_spec_io;
463	}
464	error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res);
465	if (error != 0) {
466		device_printf(dev, "couldn't allocate %s resources\n",
467		    sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O");
468		goto fail;
469	}
470	sc->sc_rev = pci_get_revid(dev);
471
472	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
473	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
474	    "rxint_nframe", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
475	    &sc->sc_rxint_nframe, 0, sysctl_hw_stge_rxint_nframe, "I",
476	    "stge rx interrupt nframe");
477
478	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
479	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
480	    "rxint_dmawait", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
481	    &sc->sc_rxint_dmawait, 0, sysctl_hw_stge_rxint_dmawait, "I",
482	    "stge rx interrupt dmawait");
483
484	/* Pull in device tunables. */
485	sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
486	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
487	    "rxint_nframe", &sc->sc_rxint_nframe);
488	if (error == 0) {
489		if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN ||
490		    sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) {
491			device_printf(dev, "rxint_nframe value out of range; "
492			    "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT);
493			sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT;
494		}
495	}
496
497	sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
498	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
499	    "rxint_dmawait", &sc->sc_rxint_dmawait);
500	if (error == 0) {
501		if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN ||
502		    sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) {
503			device_printf(dev, "rxint_dmawait value out of range; "
504			    "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT);
505			sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT;
506		}
507	}
508
509	if ((error = stge_dma_alloc(sc)) != 0)
510		goto fail;
511
512	/*
513	 * Determine if we're copper or fiber.  It affects how we
514	 * reset the card.
515	 */
516	if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
517		sc->sc_usefiber = 1;
518	else
519		sc->sc_usefiber = 0;
520
521	/* Load LED configuration from EEPROM. */
522	stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led);
523
524	/*
525	 * Reset the chip to a known state.
526	 */
527	STGE_LOCK(sc);
528	stge_reset(sc, STGE_RESET_FULL);
529	STGE_UNLOCK(sc);
530
531	/*
532	 * Reading the station address from the EEPROM doesn't seem
533	 * to work, at least on my sample boards.  Instead, since
534	 * the reset sequence does AutoInit, read it from the station
535	 * address registers. For Sundance 1023 you can only read it
536	 * from EEPROM.
537	 */
538	if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) {
539		uint16_t v;
540
541		v = CSR_READ_2(sc, STGE_StationAddress0);
542		enaddr[0] = v & 0xff;
543		enaddr[1] = v >> 8;
544		v = CSR_READ_2(sc, STGE_StationAddress1);
545		enaddr[2] = v & 0xff;
546		enaddr[3] = v >> 8;
547		v = CSR_READ_2(sc, STGE_StationAddress2);
548		enaddr[4] = v & 0xff;
549		enaddr[5] = v >> 8;
550		sc->sc_stge1023 = 0;
551	} else {
552		uint16_t myaddr[ETHER_ADDR_LEN / 2];
553		for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
554			stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
555			    &myaddr[i]);
556			myaddr[i] = le16toh(myaddr[i]);
557		}
558		bcopy(myaddr, enaddr, sizeof(enaddr));
559		sc->sc_stge1023 = 1;
560	}
561
562	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
563	if (ifp == NULL) {
564		device_printf(sc->sc_dev, "failed to if_alloc()\n");
565		error = ENXIO;
566		goto fail;
567	}
568
569	if_setsoftc(ifp, sc);
570	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
571	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
572	if_setioctlfn(ifp, stge_ioctl);
573	if_setstartfn(ifp, stge_start);
574	if_setinitfn(ifp, stge_init);
575	if_setsendqlen(ifp, STGE_TX_RING_CNT - 1);
576	if_setsendqready(ifp);
577	/* Revision B3 and earlier chips have checksum bug. */
578	if (sc->sc_rev >= 0x0c) {
579		if_sethwassist(ifp, STGE_CSUM_FEATURES);
580		if_setcapabilities(ifp, IFCAP_HWCSUM);
581	} else {
582		if_sethwassist(ifp, 0);
583		if_setcapabilities(ifp, 0);
584	}
585	if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
586	if_setcapenable(ifp, if_getcapabilities(ifp));
587
588	/*
589	 * Read some important bits from the PhyCtrl register.
590	 */
591	sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
592	    (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
593
594	/* Set up MII bus. */
595	flags = MIIF_DOPAUSE;
596	if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e)
597		flags |= MIIF_MACPRIV0;
598	error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange,
599	    stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
600	    flags);
601	if (error != 0) {
602		device_printf(sc->sc_dev, "attaching PHYs failed\n");
603		goto fail;
604	}
605
606	ether_ifattach(ifp, enaddr);
607
608	/* VLAN capability setup */
609	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING, 0);
610	if (sc->sc_rev >= 0x0c)
611		if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
612	if_setcapenable(ifp, if_getcapabilities(ifp));
613#ifdef DEVICE_POLLING
614	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
615#endif
616	/*
617	 * Tell the upper layer(s) we support long frames.
618	 * Must appear after the call to ether_ifattach() because
619	 * ether_ifattach() sets ifi_hdrlen to the default value.
620	 */
621	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
622
623	/*
624	 * The manual recommends disabling early transmit, so we
625	 * do.  It's disabled anyway, if using IP checksumming,
626	 * since the entire packet must be in the FIFO in order
627	 * for the chip to perform the checksum.
628	 */
629	sc->sc_txthresh = 0x0fff;
630
631	/*
632	 * Disable MWI if the PCI layer tells us to.
633	 */
634	sc->sc_DMACtrl = 0;
635	if ((cmd & PCIM_CMD_MWRICEN) == 0)
636		sc->sc_DMACtrl |= DMAC_MWIDisable;
637
638	/*
639	 * Hookup IRQ
640	 */
641	error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE,
642	    NULL, stge_intr, sc, &sc->sc_ih);
643	if (error != 0) {
644		ether_ifdetach(ifp);
645		device_printf(sc->sc_dev, "couldn't set up IRQ\n");
646		sc->sc_ifp = NULL;
647		goto fail;
648	}
649
650fail:
651	if (error != 0)
652		stge_detach(dev);
653
654	return (error);
655}
656
657static int
658stge_detach(device_t dev)
659{
660	struct stge_softc *sc;
661	if_t ifp;
662
663	sc = device_get_softc(dev);
664
665	ifp = sc->sc_ifp;
666#ifdef DEVICE_POLLING
667	if (ifp && if_getcapenable(ifp) & IFCAP_POLLING)
668		ether_poll_deregister(ifp);
669#endif
670	if (device_is_attached(dev)) {
671		STGE_LOCK(sc);
672		/* XXX */
673		sc->sc_detach = 1;
674		stge_stop(sc);
675		STGE_UNLOCK(sc);
676		callout_drain(&sc->sc_tick_ch);
677		taskqueue_drain(taskqueue_swi, &sc->sc_link_task);
678		ether_ifdetach(ifp);
679	}
680
681	if (sc->sc_miibus != NULL) {
682		device_delete_child(dev, sc->sc_miibus);
683		sc->sc_miibus = NULL;
684	}
685	bus_generic_detach(dev);
686	stge_dma_free(sc);
687
688	if (ifp != NULL) {
689		if_free(ifp);
690		sc->sc_ifp = NULL;
691	}
692
693	if (sc->sc_ih) {
694		bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih);
695		sc->sc_ih = NULL;
696	}
697
698	if (sc->sc_spec)
699		bus_release_resources(dev, sc->sc_spec, sc->sc_res);
700
701	mtx_destroy(&sc->sc_mii_mtx);
702	mtx_destroy(&sc->sc_mtx);
703
704	return (0);
705}
706
707struct stge_dmamap_arg {
708	bus_addr_t	stge_busaddr;
709};
710
711static void
712stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
713{
714	struct stge_dmamap_arg *ctx;
715
716	if (error != 0)
717		return;
718
719	ctx = (struct stge_dmamap_arg *)arg;
720	ctx->stge_busaddr = segs[0].ds_addr;
721}
722
723static int
724stge_dma_alloc(struct stge_softc *sc)
725{
726	struct stge_dmamap_arg ctx;
727	struct stge_txdesc *txd;
728	struct stge_rxdesc *rxd;
729	int error, i;
730
731	/* create parent tag. */
732	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */
733		    1, 0,			/* algnmnt, boundary */
734		    STGE_DMA_MAXADDR,		/* lowaddr */
735		    BUS_SPACE_MAXADDR,		/* highaddr */
736		    NULL, NULL,			/* filter, filterarg */
737		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
738		    0,				/* nsegments */
739		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
740		    0,				/* flags */
741		    NULL, NULL,			/* lockfunc, lockarg */
742		    &sc->sc_cdata.stge_parent_tag);
743	if (error != 0) {
744		device_printf(sc->sc_dev, "failed to create parent DMA tag\n");
745		goto fail;
746	}
747	/* create tag for Tx ring. */
748	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
749		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
750		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
751		    BUS_SPACE_MAXADDR,		/* highaddr */
752		    NULL, NULL,			/* filter, filterarg */
753		    STGE_TX_RING_SZ,		/* maxsize */
754		    1,				/* nsegments */
755		    STGE_TX_RING_SZ,		/* maxsegsize */
756		    0,				/* flags */
757		    NULL, NULL,			/* lockfunc, lockarg */
758		    &sc->sc_cdata.stge_tx_ring_tag);
759	if (error != 0) {
760		device_printf(sc->sc_dev,
761		    "failed to allocate Tx ring DMA tag\n");
762		goto fail;
763	}
764
765	/* create tag for Rx ring. */
766	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
767		    STGE_RING_ALIGN, 0,		/* algnmnt, boundary */
768		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
769		    BUS_SPACE_MAXADDR,		/* highaddr */
770		    NULL, NULL,			/* filter, filterarg */
771		    STGE_RX_RING_SZ,		/* maxsize */
772		    1,				/* nsegments */
773		    STGE_RX_RING_SZ,		/* maxsegsize */
774		    0,				/* flags */
775		    NULL, NULL,			/* lockfunc, lockarg */
776		    &sc->sc_cdata.stge_rx_ring_tag);
777	if (error != 0) {
778		device_printf(sc->sc_dev,
779		    "failed to allocate Rx ring DMA tag\n");
780		goto fail;
781	}
782
783	/* create tag for Tx buffers. */
784	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
785		    1, 0,			/* algnmnt, boundary */
786		    BUS_SPACE_MAXADDR,		/* lowaddr */
787		    BUS_SPACE_MAXADDR,		/* highaddr */
788		    NULL, NULL,			/* filter, filterarg */
789		    MCLBYTES * STGE_MAXTXSEGS,	/* maxsize */
790		    STGE_MAXTXSEGS,		/* nsegments */
791		    MCLBYTES,			/* maxsegsize */
792		    0,				/* flags */
793		    NULL, NULL,			/* lockfunc, lockarg */
794		    &sc->sc_cdata.stge_tx_tag);
795	if (error != 0) {
796		device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n");
797		goto fail;
798	}
799
800	/* create tag for Rx buffers. */
801	error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */
802		    1, 0,			/* algnmnt, boundary */
803		    BUS_SPACE_MAXADDR,		/* lowaddr */
804		    BUS_SPACE_MAXADDR,		/* highaddr */
805		    NULL, NULL,			/* filter, filterarg */
806		    MCLBYTES,			/* maxsize */
807		    1,				/* nsegments */
808		    MCLBYTES,			/* maxsegsize */
809		    0,				/* flags */
810		    NULL, NULL,			/* lockfunc, lockarg */
811		    &sc->sc_cdata.stge_rx_tag);
812	if (error != 0) {
813		device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n");
814		goto fail;
815	}
816
817	/* allocate DMA'able memory and load the DMA map for Tx ring. */
818	error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag,
819	    (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT |
820	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map);
821	if (error != 0) {
822		device_printf(sc->sc_dev,
823		    "failed to allocate DMA'able memory for Tx ring\n");
824		goto fail;
825	}
826
827	ctx.stge_busaddr = 0;
828	error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag,
829	    sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring,
830	    STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
831	if (error != 0 || ctx.stge_busaddr == 0) {
832		device_printf(sc->sc_dev,
833		    "failed to load DMA'able memory for Tx ring\n");
834		goto fail;
835	}
836	sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr;
837
838	/* allocate DMA'able memory and load the DMA map for Rx ring. */
839	error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag,
840	    (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT |
841	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map);
842	if (error != 0) {
843		device_printf(sc->sc_dev,
844		    "failed to allocate DMA'able memory for Rx ring\n");
845		goto fail;
846	}
847
848	ctx.stge_busaddr = 0;
849	error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag,
850	    sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring,
851	    STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
852	if (error != 0 || ctx.stge_busaddr == 0) {
853		device_printf(sc->sc_dev,
854		    "failed to load DMA'able memory for Rx ring\n");
855		goto fail;
856	}
857	sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr;
858
859	/* create DMA maps for Tx buffers. */
860	for (i = 0; i < STGE_TX_RING_CNT; i++) {
861		txd = &sc->sc_cdata.stge_txdesc[i];
862		txd->tx_m = NULL;
863		txd->tx_dmamap = 0;
864		error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0,
865		    &txd->tx_dmamap);
866		if (error != 0) {
867			device_printf(sc->sc_dev,
868			    "failed to create Tx dmamap\n");
869			goto fail;
870		}
871	}
872	/* create DMA maps for Rx buffers. */
873	if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
874	    &sc->sc_cdata.stge_rx_sparemap)) != 0) {
875		device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n");
876		goto fail;
877	}
878	for (i = 0; i < STGE_RX_RING_CNT; i++) {
879		rxd = &sc->sc_cdata.stge_rxdesc[i];
880		rxd->rx_m = NULL;
881		rxd->rx_dmamap = 0;
882		error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0,
883		    &rxd->rx_dmamap);
884		if (error != 0) {
885			device_printf(sc->sc_dev,
886			    "failed to create Rx dmamap\n");
887			goto fail;
888		}
889	}
890
891fail:
892	return (error);
893}
894
895static void
896stge_dma_free(struct stge_softc *sc)
897{
898	struct stge_txdesc *txd;
899	struct stge_rxdesc *rxd;
900	int i;
901
902	/* Tx ring */
903	if (sc->sc_cdata.stge_tx_ring_tag) {
904		if (sc->sc_rdata.stge_tx_ring_paddr)
905			bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag,
906			    sc->sc_cdata.stge_tx_ring_map);
907		if (sc->sc_rdata.stge_tx_ring)
908			bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag,
909			    sc->sc_rdata.stge_tx_ring,
910			    sc->sc_cdata.stge_tx_ring_map);
911		sc->sc_rdata.stge_tx_ring = NULL;
912		sc->sc_rdata.stge_tx_ring_paddr = 0;
913		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag);
914		sc->sc_cdata.stge_tx_ring_tag = NULL;
915	}
916	/* Rx ring */
917	if (sc->sc_cdata.stge_rx_ring_tag) {
918		if (sc->sc_rdata.stge_rx_ring_paddr)
919			bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag,
920			    sc->sc_cdata.stge_rx_ring_map);
921		if (sc->sc_rdata.stge_rx_ring)
922			bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag,
923			    sc->sc_rdata.stge_rx_ring,
924			    sc->sc_cdata.stge_rx_ring_map);
925		sc->sc_rdata.stge_rx_ring = NULL;
926		sc->sc_rdata.stge_rx_ring_paddr = 0;
927		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag);
928		sc->sc_cdata.stge_rx_ring_tag = NULL;
929	}
930	/* Tx buffers */
931	if (sc->sc_cdata.stge_tx_tag) {
932		for (i = 0; i < STGE_TX_RING_CNT; i++) {
933			txd = &sc->sc_cdata.stge_txdesc[i];
934			if (txd->tx_dmamap) {
935				bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag,
936				    txd->tx_dmamap);
937				txd->tx_dmamap = 0;
938			}
939		}
940		bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag);
941		sc->sc_cdata.stge_tx_tag = NULL;
942	}
943	/* Rx buffers */
944	if (sc->sc_cdata.stge_rx_tag) {
945		for (i = 0; i < STGE_RX_RING_CNT; i++) {
946			rxd = &sc->sc_cdata.stge_rxdesc[i];
947			if (rxd->rx_dmamap) {
948				bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
949				    rxd->rx_dmamap);
950				rxd->rx_dmamap = 0;
951			}
952		}
953		if (sc->sc_cdata.stge_rx_sparemap) {
954			bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag,
955			    sc->sc_cdata.stge_rx_sparemap);
956			sc->sc_cdata.stge_rx_sparemap = 0;
957		}
958		bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag);
959		sc->sc_cdata.stge_rx_tag = NULL;
960	}
961
962	if (sc->sc_cdata.stge_parent_tag) {
963		bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag);
964		sc->sc_cdata.stge_parent_tag = NULL;
965	}
966}
967
968/*
969 * stge_shutdown:
970 *
971 *	Make sure the interface is stopped at reboot time.
972 */
973static int
974stge_shutdown(device_t dev)
975{
976
977	return (stge_suspend(dev));
978}
979
980static void
981stge_setwol(struct stge_softc *sc)
982{
983	if_t ifp;
984	uint8_t v;
985
986	STGE_LOCK_ASSERT(sc);
987
988	ifp = sc->sc_ifp;
989	v = CSR_READ_1(sc, STGE_WakeEvent);
990	/* Disable all WOL bits. */
991	v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
992	    WE_WakeOnLanEnable);
993	if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
994		v |= WE_MagicPktEnable | WE_WakeOnLanEnable;
995	CSR_WRITE_1(sc, STGE_WakeEvent, v);
996	/* Reset Tx and prevent transmission. */
997	CSR_WRITE_4(sc, STGE_AsicCtrl,
998	    CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset);
999	/*
1000	 * TC9021 automatically reset link speed to 100Mbps when it's put
1001	 * into sleep so there is no need to try to resetting link speed.
1002	 */
1003}
1004
1005static int
1006stge_suspend(device_t dev)
1007{
1008	struct stge_softc *sc;
1009
1010	sc = device_get_softc(dev);
1011
1012	STGE_LOCK(sc);
1013	stge_stop(sc);
1014	sc->sc_suspended = 1;
1015	stge_setwol(sc);
1016	STGE_UNLOCK(sc);
1017
1018	return (0);
1019}
1020
1021static int
1022stge_resume(device_t dev)
1023{
1024	struct stge_softc *sc;
1025	if_t ifp;
1026	uint8_t v;
1027
1028	sc = device_get_softc(dev);
1029
1030	STGE_LOCK(sc);
1031	/*
1032	 * Clear WOL bits, so special frames wouldn't interfere
1033	 * normal Rx operation anymore.
1034	 */
1035	v = CSR_READ_1(sc, STGE_WakeEvent);
1036	v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable |
1037	    WE_WakeOnLanEnable);
1038	CSR_WRITE_1(sc, STGE_WakeEvent, v);
1039	ifp = sc->sc_ifp;
1040	if (if_getflags(ifp) & IFF_UP)
1041		stge_init_locked(sc);
1042
1043	sc->sc_suspended = 0;
1044	STGE_UNLOCK(sc);
1045
1046	return (0);
1047}
1048
1049static void
1050stge_dma_wait(struct stge_softc *sc)
1051{
1052	int i;
1053
1054	for (i = 0; i < STGE_TIMEOUT; i++) {
1055		DELAY(2);
1056		if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
1057			break;
1058	}
1059
1060	if (i == STGE_TIMEOUT)
1061		device_printf(sc->sc_dev, "DMA wait timed out\n");
1062}
1063
1064static int
1065stge_encap(struct stge_softc *sc, struct mbuf **m_head)
1066{
1067	struct stge_txdesc *txd;
1068	struct stge_tfd *tfd;
1069	struct mbuf *m;
1070	bus_dma_segment_t txsegs[STGE_MAXTXSEGS];
1071	int error, i, nsegs, si;
1072	uint64_t csum_flags, tfc;
1073
1074	STGE_LOCK_ASSERT(sc);
1075
1076	if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL)
1077		return (ENOBUFS);
1078
1079	error =  bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1080	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1081	if (error == EFBIG) {
1082		m = m_collapse(*m_head, M_NOWAIT, STGE_MAXTXSEGS);
1083		if (m == NULL) {
1084			m_freem(*m_head);
1085			*m_head = NULL;
1086			return (ENOMEM);
1087		}
1088		*m_head = m;
1089		error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
1090		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1091		if (error != 0) {
1092			m_freem(*m_head);
1093			*m_head = NULL;
1094			return (error);
1095		}
1096	} else if (error != 0)
1097		return (error);
1098	if (nsegs == 0) {
1099		m_freem(*m_head);
1100		*m_head = NULL;
1101		return (EIO);
1102	}
1103
1104	m = *m_head;
1105	csum_flags = 0;
1106	if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) {
1107		if (m->m_pkthdr.csum_flags & CSUM_IP)
1108			csum_flags |= TFD_IPChecksumEnable;
1109		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1110			csum_flags |= TFD_TCPChecksumEnable;
1111		else if (m->m_pkthdr.csum_flags & CSUM_UDP)
1112			csum_flags |= TFD_UDPChecksumEnable;
1113	}
1114
1115	si = sc->sc_cdata.stge_tx_prod;
1116	tfd = &sc->sc_rdata.stge_tx_ring[si];
1117	for (i = 0; i < nsegs; i++)
1118		tfd->tfd_frags[i].frag_word0 =
1119		    htole64(FRAG_ADDR(txsegs[i].ds_addr) |
1120		    FRAG_LEN(txsegs[i].ds_len));
1121	sc->sc_cdata.stge_tx_cnt++;
1122
1123	tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) |
1124	    TFD_FragCount(nsegs) | csum_flags;
1125	if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT)
1126		tfc |= TFD_TxDMAIndicate;
1127
1128	/* Update producer index. */
1129	sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT;
1130
1131	/* Check if we have a VLAN tag to insert. */
1132	if (m->m_flags & M_VLANTAG)
1133		tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag));
1134	tfd->tfd_control = htole64(tfc);
1135
1136	/* Update Tx Queue. */
1137	STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q);
1138	STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q);
1139	txd->tx_m = m;
1140
1141	/* Sync descriptors. */
1142	bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1143	    BUS_DMASYNC_PREWRITE);
1144	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1145	    sc->sc_cdata.stge_tx_ring_map,
1146	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1147
1148	return (0);
1149}
1150
1151/*
1152 * stge_start:		[ifnet interface function]
1153 *
1154 *	Start packet transmission on the interface.
1155 */
1156static void
1157stge_start(if_t ifp)
1158{
1159	struct stge_softc *sc;
1160
1161	sc = if_getsoftc(ifp);
1162	STGE_LOCK(sc);
1163	stge_start_locked(ifp);
1164	STGE_UNLOCK(sc);
1165}
1166
1167static void
1168stge_start_locked(if_t ifp)
1169{
1170        struct stge_softc *sc;
1171        struct mbuf *m_head;
1172	int enq;
1173
1174	sc = if_getsoftc(ifp);
1175
1176	STGE_LOCK_ASSERT(sc);
1177
1178	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1179	    IFF_DRV_RUNNING || sc->sc_link == 0)
1180		return;
1181
1182	for (enq = 0; !if_sendq_empty(ifp); ) {
1183		if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) {
1184			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1185			break;
1186		}
1187
1188		m_head = if_dequeue(ifp);
1189		if (m_head == NULL)
1190			break;
1191		/*
1192		 * Pack the data into the transmit ring. If we
1193		 * don't have room, set the OACTIVE flag and wait
1194		 * for the NIC to drain the ring.
1195		 */
1196		if (stge_encap(sc, &m_head)) {
1197			if (m_head == NULL)
1198				break;
1199			if_sendq_prepend(ifp, m_head);
1200			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1201			break;
1202		}
1203
1204		enq++;
1205		/*
1206		 * If there's a BPF listener, bounce a copy of this frame
1207		 * to him.
1208		 */
1209		ETHER_BPF_MTAP(ifp, m_head);
1210	}
1211
1212	if (enq > 0) {
1213		/* Transmit */
1214		CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow);
1215
1216		/* Set a timeout in case the chip goes out to lunch. */
1217		sc->sc_watchdog_timer = 5;
1218	}
1219}
1220
1221/*
1222 * stge_watchdog:
1223 *
1224 *	Watchdog timer handler.
1225 */
1226static void
1227stge_watchdog(struct stge_softc *sc)
1228{
1229	if_t ifp;
1230
1231	STGE_LOCK_ASSERT(sc);
1232
1233	if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
1234		return;
1235
1236	ifp = sc->sc_ifp;
1237	if_printf(sc->sc_ifp, "device timeout\n");
1238	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1239	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1240	stge_init_locked(sc);
1241	if (!if_sendq_empty(ifp))
1242		stge_start_locked(ifp);
1243}
1244
1245/*
1246 * stge_ioctl:		[ifnet interface function]
1247 *
1248 *	Handle control requests from the operator.
1249 */
1250static int
1251stge_ioctl(if_t ifp, u_long cmd, caddr_t data)
1252{
1253	struct stge_softc *sc;
1254	struct ifreq *ifr;
1255	struct mii_data *mii;
1256	int error, mask;
1257
1258	sc = if_getsoftc(ifp);
1259	ifr = (struct ifreq *)data;
1260	error = 0;
1261	switch (cmd) {
1262	case SIOCSIFMTU:
1263		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU)
1264			error = EINVAL;
1265		else if (if_getmtu(ifp) != ifr->ifr_mtu) {
1266			if_setmtu(ifp, ifr->ifr_mtu);
1267			STGE_LOCK(sc);
1268			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1269				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1270				stge_init_locked(sc);
1271			}
1272			STGE_UNLOCK(sc);
1273		}
1274		break;
1275	case SIOCSIFFLAGS:
1276		STGE_LOCK(sc);
1277		if ((if_getflags(ifp) & IFF_UP) != 0) {
1278			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1279				if (((if_getflags(ifp) ^ sc->sc_if_flags)
1280				    & IFF_PROMISC) != 0)
1281					stge_set_filter(sc);
1282			} else {
1283				if (sc->sc_detach == 0)
1284					stge_init_locked(sc);
1285			}
1286		} else {
1287			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1288				stge_stop(sc);
1289		}
1290		sc->sc_if_flags = if_getflags(ifp);
1291		STGE_UNLOCK(sc);
1292		break;
1293	case SIOCADDMULTI:
1294	case SIOCDELMULTI:
1295		STGE_LOCK(sc);
1296		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1297			stge_set_multi(sc);
1298		STGE_UNLOCK(sc);
1299		break;
1300	case SIOCSIFMEDIA:
1301	case SIOCGIFMEDIA:
1302		mii = device_get_softc(sc->sc_miibus);
1303		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1304		break;
1305	case SIOCSIFCAP:
1306		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1307#ifdef DEVICE_POLLING
1308		if ((mask & IFCAP_POLLING) != 0) {
1309			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1310				error = ether_poll_register(stge_poll, ifp);
1311				if (error != 0)
1312					break;
1313				STGE_LOCK(sc);
1314				CSR_WRITE_2(sc, STGE_IntEnable, 0);
1315				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1316				STGE_UNLOCK(sc);
1317			} else {
1318				error = ether_poll_deregister(ifp);
1319				if (error != 0)
1320					break;
1321				STGE_LOCK(sc);
1322				CSR_WRITE_2(sc, STGE_IntEnable,
1323				    sc->sc_IntEnable);
1324				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1325				STGE_UNLOCK(sc);
1326			}
1327		}
1328#endif
1329		if ((mask & IFCAP_HWCSUM) != 0) {
1330			if_togglecapenable(ifp, IFCAP_HWCSUM);
1331			if ((IFCAP_HWCSUM & if_getcapenable(ifp)) != 0 &&
1332			    (IFCAP_HWCSUM & if_getcapabilities(ifp)) != 0)
1333				if_sethwassist(ifp, STGE_CSUM_FEATURES);
1334			else
1335				if_sethwassist(ifp, 0);
1336		}
1337		if ((mask & IFCAP_WOL) != 0 &&
1338		    (if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
1339			if ((mask & IFCAP_WOL_MAGIC) != 0)
1340				if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1341		}
1342		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
1343			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
1344			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1345				STGE_LOCK(sc);
1346				stge_vlan_setup(sc);
1347				STGE_UNLOCK(sc);
1348			}
1349		}
1350		VLAN_CAPABILITIES(ifp);
1351		break;
1352	default:
1353		error = ether_ioctl(ifp, cmd, data);
1354		break;
1355	}
1356
1357	return (error);
1358}
1359
1360static void
1361stge_link_task(void *arg, int pending)
1362{
1363	struct stge_softc *sc;
1364	struct mii_data *mii;
1365	uint32_t v, ac;
1366	int i;
1367
1368	sc = (struct stge_softc *)arg;
1369	STGE_LOCK(sc);
1370
1371	mii = device_get_softc(sc->sc_miibus);
1372	if (mii->mii_media_status & IFM_ACTIVE) {
1373		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1374			sc->sc_link = 1;
1375	} else
1376		sc->sc_link = 0;
1377
1378	sc->sc_MACCtrl = 0;
1379	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
1380		sc->sc_MACCtrl |= MC_DuplexSelect;
1381	if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0)
1382		sc->sc_MACCtrl |= MC_RxFlowControlEnable;
1383	if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0)
1384		sc->sc_MACCtrl |= MC_TxFlowControlEnable;
1385	/*
1386	 * Update STGE_MACCtrl register depending on link status.
1387	 * (duplex, flow control etc)
1388	 */
1389	v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
1390	v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable);
1391	v |= sc->sc_MACCtrl;
1392	CSR_WRITE_4(sc, STGE_MACCtrl, v);
1393	if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) {
1394		/* Duplex setting changed, reset Tx/Rx functions. */
1395		ac = CSR_READ_4(sc, STGE_AsicCtrl);
1396		ac |= AC_TxReset | AC_RxReset;
1397		CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1398		for (i = 0; i < STGE_TIMEOUT; i++) {
1399			DELAY(100);
1400			if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1401				break;
1402		}
1403		if (i == STGE_TIMEOUT)
1404			device_printf(sc->sc_dev, "reset failed to complete\n");
1405	}
1406	STGE_UNLOCK(sc);
1407}
1408
1409static __inline int
1410stge_tx_error(struct stge_softc *sc)
1411{
1412	uint32_t txstat;
1413	int error;
1414
1415	for (error = 0;;) {
1416		txstat = CSR_READ_4(sc, STGE_TxStatus);
1417		if ((txstat & TS_TxComplete) == 0)
1418			break;
1419		/* Tx underrun */
1420		if ((txstat & TS_TxUnderrun) != 0) {
1421			/*
1422			 * XXX
1423			 * There should be a more better way to recover
1424			 * from Tx underrun instead of a full reset.
1425			 */
1426			if (sc->sc_nerr++ < STGE_MAXERR)
1427				device_printf(sc->sc_dev, "Tx underrun, "
1428				    "resetting...\n");
1429			if (sc->sc_nerr == STGE_MAXERR)
1430				device_printf(sc->sc_dev, "too many errors; "
1431				    "not reporting any more\n");
1432			error = -1;
1433			break;
1434		}
1435		/* Maximum/Late collisions, Re-enable Tx MAC. */
1436		if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0)
1437			CSR_WRITE_4(sc, STGE_MACCtrl,
1438			    (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) |
1439			    MC_TxEnable);
1440	}
1441
1442	return (error);
1443}
1444
1445/*
1446 * stge_intr:
1447 *
1448 *	Interrupt service routine.
1449 */
1450static void
1451stge_intr(void *arg)
1452{
1453	struct stge_softc *sc;
1454	if_t ifp;
1455	int reinit;
1456	uint16_t status;
1457
1458	sc = (struct stge_softc *)arg;
1459	ifp = sc->sc_ifp;
1460
1461	STGE_LOCK(sc);
1462
1463#ifdef DEVICE_POLLING
1464	if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0)
1465		goto done_locked;
1466#endif
1467	status = CSR_READ_2(sc, STGE_IntStatus);
1468	if (sc->sc_suspended || (status & IS_InterruptStatus) == 0)
1469		goto done_locked;
1470
1471	/* Disable interrupts. */
1472	for (reinit = 0;;) {
1473		status = CSR_READ_2(sc, STGE_IntStatusAck);
1474		status &= sc->sc_IntEnable;
1475		if (status == 0)
1476			break;
1477		/* Host interface errors. */
1478		if ((status & IS_HostError) != 0) {
1479			device_printf(sc->sc_dev,
1480			    "Host interface error, resetting...\n");
1481			reinit = 1;
1482			goto force_init;
1483		}
1484
1485		/* Receive interrupts. */
1486		if ((status & IS_RxDMAComplete) != 0) {
1487			stge_rxeof(sc);
1488			if ((status & IS_RFDListEnd) != 0)
1489				CSR_WRITE_4(sc, STGE_DMACtrl,
1490				    DMAC_RxDMAPollNow);
1491		}
1492
1493		/* Transmit interrupts. */
1494		if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0)
1495			stge_txeof(sc);
1496
1497		/* Transmission errors.*/
1498		if ((status & IS_TxComplete) != 0) {
1499			if ((reinit = stge_tx_error(sc)) != 0)
1500				break;
1501		}
1502	}
1503
1504force_init:
1505	if (reinit != 0) {
1506		if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1507		stge_init_locked(sc);
1508	}
1509
1510	/* Re-enable interrupts. */
1511	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1512
1513	/* Try to get more packets going. */
1514	if (!if_sendq_empty(ifp))
1515		stge_start_locked(ifp);
1516
1517done_locked:
1518	STGE_UNLOCK(sc);
1519}
1520
1521/*
1522 * stge_txeof:
1523 *
1524 *	Helper; handle transmit interrupts.
1525 */
1526static void
1527stge_txeof(struct stge_softc *sc)
1528{
1529	if_t ifp;
1530	struct stge_txdesc *txd;
1531	uint64_t control;
1532	int cons;
1533
1534	STGE_LOCK_ASSERT(sc);
1535
1536	ifp = sc->sc_ifp;
1537
1538	txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1539	if (txd == NULL)
1540		return;
1541	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1542	    sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD);
1543
1544	/*
1545	 * Go through our Tx list and free mbufs for those
1546	 * frames which have been transmitted.
1547	 */
1548	for (cons = sc->sc_cdata.stge_tx_cons;;
1549	    cons = (cons + 1) % STGE_TX_RING_CNT) {
1550		if (sc->sc_cdata.stge_tx_cnt <= 0)
1551			break;
1552		control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control);
1553		if ((control & TFD_TFDDone) == 0)
1554			break;
1555		sc->sc_cdata.stge_tx_cnt--;
1556		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1557
1558		bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap,
1559		    BUS_DMASYNC_POSTWRITE);
1560		bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap);
1561
1562		/* Output counter is updated with statistics register */
1563		m_freem(txd->tx_m);
1564		txd->tx_m = NULL;
1565		STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q);
1566		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
1567		txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq);
1568	}
1569	sc->sc_cdata.stge_tx_cons = cons;
1570	if (sc->sc_cdata.stge_tx_cnt == 0)
1571		sc->sc_watchdog_timer = 0;
1572
1573        bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
1574	    sc->sc_cdata.stge_tx_ring_map,
1575	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1576}
1577
1578static __inline void
1579stge_discard_rxbuf(struct stge_softc *sc, int idx)
1580{
1581	struct stge_rfd *rfd;
1582
1583	rfd = &sc->sc_rdata.stge_rx_ring[idx];
1584	rfd->rfd_status = 0;
1585}
1586
1587#ifndef __NO_STRICT_ALIGNMENT
1588/*
1589 * It seems that TC9021's DMA engine has alignment restrictions in
1590 * DMA scatter operations. The first DMA segment has no address
1591 * alignment restrictins but the rest should be aligned on 4(?) bytes
1592 * boundary. Otherwise it would corrupt random memory. Since we don't
1593 * know which one is used for the first segment in advance we simply
1594 * don't align at all.
1595 * To avoid copying over an entire frame to align, we allocate a new
1596 * mbuf and copy ethernet header to the new mbuf. The new mbuf is
1597 * prepended into the existing mbuf chain.
1598 */
1599static __inline struct mbuf *
1600stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
1601{
1602	struct mbuf *n;
1603
1604	n = NULL;
1605	if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
1606		bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
1607		m->m_data += ETHER_HDR_LEN;
1608		n = m;
1609	} else {
1610		MGETHDR(n, M_NOWAIT, MT_DATA);
1611		if (n != NULL) {
1612			bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
1613			m->m_data += ETHER_HDR_LEN;
1614			m->m_len -= ETHER_HDR_LEN;
1615			n->m_len = ETHER_HDR_LEN;
1616			M_MOVE_PKTHDR(n, m);
1617			n->m_next = m;
1618		} else
1619			m_freem(m);
1620	}
1621
1622	return (n);
1623}
1624#endif
1625
1626/*
1627 * stge_rxeof:
1628 *
1629 *	Helper; handle receive interrupts.
1630 */
1631static int
1632stge_rxeof(struct stge_softc *sc)
1633{
1634	if_t ifp;
1635	struct stge_rxdesc *rxd;
1636	struct mbuf *mp, *m;
1637	uint64_t status64;
1638	uint32_t status;
1639	int cons, prog, rx_npkts;
1640
1641	STGE_LOCK_ASSERT(sc);
1642
1643	rx_npkts = 0;
1644	ifp = sc->sc_ifp;
1645
1646	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1647	    sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD);
1648
1649	prog = 0;
1650	for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT;
1651	    prog++, cons = (cons + 1) % STGE_RX_RING_CNT) {
1652		status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status);
1653		status = RFD_RxStatus(status64);
1654		if ((status & RFD_RFDDone) == 0)
1655			break;
1656#ifdef DEVICE_POLLING
1657		if (if_getcapenable(ifp) & IFCAP_POLLING) {
1658			if (sc->sc_cdata.stge_rxcycles <= 0)
1659				break;
1660			sc->sc_cdata.stge_rxcycles--;
1661		}
1662#endif
1663		prog++;
1664		rxd = &sc->sc_cdata.stge_rxdesc[cons];
1665		mp = rxd->rx_m;
1666
1667		/*
1668		 * If the packet had an error, drop it.  Note we count
1669		 * the error later in the periodic stats update.
1670		 */
1671		if ((status & RFD_FrameEnd) != 0 && (status &
1672		    (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1673		    RFD_RxAlignmentError | RFD_RxFCSError |
1674		    RFD_RxLengthError)) != 0) {
1675			stge_discard_rxbuf(sc, cons);
1676			if (sc->sc_cdata.stge_rxhead != NULL) {
1677				m_freem(sc->sc_cdata.stge_rxhead);
1678				STGE_RXCHAIN_RESET(sc);
1679			}
1680			continue;
1681		}
1682		/*
1683		 * Add a new receive buffer to the ring.
1684		 */
1685		if (stge_newbuf(sc, cons) != 0) {
1686			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1687			stge_discard_rxbuf(sc, cons);
1688			if (sc->sc_cdata.stge_rxhead != NULL) {
1689				m_freem(sc->sc_cdata.stge_rxhead);
1690				STGE_RXCHAIN_RESET(sc);
1691			}
1692			continue;
1693		}
1694
1695		if ((status & RFD_FrameEnd) != 0)
1696			mp->m_len = RFD_RxDMAFrameLen(status) -
1697			    sc->sc_cdata.stge_rxlen;
1698		sc->sc_cdata.stge_rxlen += mp->m_len;
1699
1700		/* Chain mbufs. */
1701		if (sc->sc_cdata.stge_rxhead == NULL) {
1702			sc->sc_cdata.stge_rxhead = mp;
1703			sc->sc_cdata.stge_rxtail = mp;
1704		} else {
1705			mp->m_flags &= ~M_PKTHDR;
1706			sc->sc_cdata.stge_rxtail->m_next = mp;
1707			sc->sc_cdata.stge_rxtail = mp;
1708		}
1709
1710		if ((status & RFD_FrameEnd) != 0) {
1711			m = sc->sc_cdata.stge_rxhead;
1712			m->m_pkthdr.rcvif = ifp;
1713			m->m_pkthdr.len = sc->sc_cdata.stge_rxlen;
1714
1715			if (m->m_pkthdr.len > sc->sc_if_framesize) {
1716				m_freem(m);
1717				STGE_RXCHAIN_RESET(sc);
1718				continue;
1719			}
1720			/*
1721			 * Set the incoming checksum information for
1722			 * the packet.
1723			 */
1724			if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1725				if ((status & RFD_IPDetected) != 0) {
1726					m->m_pkthdr.csum_flags |=
1727						CSUM_IP_CHECKED;
1728					if ((status & RFD_IPError) == 0)
1729						m->m_pkthdr.csum_flags |=
1730						    CSUM_IP_VALID;
1731				}
1732				if (((status & RFD_TCPDetected) != 0 &&
1733				    (status & RFD_TCPError) == 0) ||
1734				    ((status & RFD_UDPDetected) != 0 &&
1735				    (status & RFD_UDPError) == 0)) {
1736					m->m_pkthdr.csum_flags |=
1737					    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1738					m->m_pkthdr.csum_data = 0xffff;
1739				}
1740			}
1741
1742#ifndef __NO_STRICT_ALIGNMENT
1743			if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) {
1744				if ((m = stge_fixup_rx(sc, m)) == NULL) {
1745					STGE_RXCHAIN_RESET(sc);
1746					continue;
1747				}
1748			}
1749#endif
1750			/* Check for VLAN tagged packets. */
1751			if ((status & RFD_VLANDetected) != 0 &&
1752			    (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
1753				m->m_pkthdr.ether_vtag = RFD_TCI(status64);
1754				m->m_flags |= M_VLANTAG;
1755			}
1756
1757			STGE_UNLOCK(sc);
1758			/* Pass it on. */
1759			if_input(ifp, m);
1760			STGE_LOCK(sc);
1761			rx_npkts++;
1762
1763			STGE_RXCHAIN_RESET(sc);
1764		}
1765	}
1766
1767	if (prog > 0) {
1768		/* Update the consumer index. */
1769		sc->sc_cdata.stge_rx_cons = cons;
1770		bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
1771		    sc->sc_cdata.stge_rx_ring_map,
1772		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1773	}
1774	return (rx_npkts);
1775}
1776
1777#ifdef DEVICE_POLLING
1778static int
1779stge_poll(if_t ifp, enum poll_cmd cmd, int count)
1780{
1781	struct stge_softc *sc;
1782	uint16_t status;
1783	int rx_npkts;
1784
1785	rx_npkts = 0;
1786	sc = if_getsoftc(ifp);
1787	STGE_LOCK(sc);
1788	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1789		STGE_UNLOCK(sc);
1790		return (rx_npkts);
1791	}
1792
1793	sc->sc_cdata.stge_rxcycles = count;
1794	rx_npkts = stge_rxeof(sc);
1795	stge_txeof(sc);
1796
1797	if (cmd == POLL_AND_CHECK_STATUS) {
1798		status = CSR_READ_2(sc, STGE_IntStatus);
1799		status &= sc->sc_IntEnable;
1800		if (status != 0) {
1801			if ((status & IS_HostError) != 0) {
1802				device_printf(sc->sc_dev,
1803				    "Host interface error, resetting...\n");
1804				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1805				stge_init_locked(sc);
1806			}
1807			if ((status & IS_TxComplete) != 0) {
1808				if (stge_tx_error(sc) != 0) {
1809					if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1810					stge_init_locked(sc);
1811				}
1812			}
1813		}
1814	}
1815
1816	if (!if_sendq_empty(ifp))
1817		stge_start_locked(ifp);
1818
1819	STGE_UNLOCK(sc);
1820	return (rx_npkts);
1821}
1822#endif	/* DEVICE_POLLING */
1823
1824/*
1825 * stge_tick:
1826 *
1827 *	One second timer, used to tick the MII.
1828 */
1829static void
1830stge_tick(void *arg)
1831{
1832	struct stge_softc *sc;
1833	struct mii_data *mii;
1834
1835	sc = (struct stge_softc *)arg;
1836
1837	STGE_LOCK_ASSERT(sc);
1838
1839	mii = device_get_softc(sc->sc_miibus);
1840	mii_tick(mii);
1841
1842	/* Update statistics counters. */
1843	stge_stats_update(sc);
1844
1845	/*
1846	 * Relcaim any pending Tx descriptors to release mbufs in a
1847	 * timely manner as we don't generate Tx completion interrupts
1848	 * for every frame. This limits the delay to a maximum of one
1849	 * second.
1850	 */
1851	if (sc->sc_cdata.stge_tx_cnt != 0)
1852		stge_txeof(sc);
1853
1854	stge_watchdog(sc);
1855
1856	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
1857}
1858
1859/*
1860 * stge_stats_update:
1861 *
1862 *	Read the TC9021 statistics counters.
1863 */
1864static void
1865stge_stats_update(struct stge_softc *sc)
1866{
1867	if_t ifp;
1868
1869	STGE_LOCK_ASSERT(sc);
1870
1871	ifp = sc->sc_ifp;
1872
1873	CSR_READ_4(sc,STGE_OctetRcvOk);
1874
1875	if_inc_counter(ifp, IFCOUNTER_IPACKETS, CSR_READ_4(sc, STGE_FramesRcvdOk));
1876
1877	if_inc_counter(ifp, IFCOUNTER_IERRORS, CSR_READ_2(sc, STGE_FramesLostRxErrors));
1878
1879	CSR_READ_4(sc, STGE_OctetXmtdOk);
1880
1881	if_inc_counter(ifp, IFCOUNTER_OPACKETS, CSR_READ_4(sc, STGE_FramesXmtdOk));
1882
1883	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1884	    CSR_READ_4(sc, STGE_LateCollisions) +
1885	    CSR_READ_4(sc, STGE_MultiColFrames) +
1886	    CSR_READ_4(sc, STGE_SingleColFrames));
1887
1888	if_inc_counter(ifp, IFCOUNTER_OERRORS,
1889	    CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1890	    CSR_READ_2(sc, STGE_FramesWEXDeferal));
1891}
1892
1893/*
1894 * stge_reset:
1895 *
1896 *	Perform a soft reset on the TC9021.
1897 */
1898static void
1899stge_reset(struct stge_softc *sc, uint32_t how)
1900{
1901	uint32_t ac;
1902	uint8_t v;
1903	int i, dv;
1904
1905	STGE_LOCK_ASSERT(sc);
1906
1907	dv = 5000;
1908	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1909	switch (how) {
1910	case STGE_RESET_TX:
1911		ac |= AC_TxReset | AC_FIFO;
1912		dv = 100;
1913		break;
1914	case STGE_RESET_RX:
1915		ac |= AC_RxReset | AC_FIFO;
1916		dv = 100;
1917		break;
1918	case STGE_RESET_FULL:
1919	default:
1920		/*
1921		 * Only assert RstOut if we're fiber.  We need GMII clocks
1922		 * to be present in order for the reset to complete on fiber
1923		 * cards.
1924		 */
1925		ac |= AC_GlobalReset | AC_RxReset | AC_TxReset |
1926		    AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1927		    (sc->sc_usefiber ? AC_RstOut : 0);
1928		break;
1929	}
1930
1931	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1932
1933	/* Account for reset problem at 10Mbps. */
1934	DELAY(dv);
1935
1936	for (i = 0; i < STGE_TIMEOUT; i++) {
1937		if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1938			break;
1939		DELAY(dv);
1940	}
1941
1942	if (i == STGE_TIMEOUT)
1943		device_printf(sc->sc_dev, "reset failed to complete\n");
1944
1945	/* Set LED, from Linux IPG driver. */
1946	ac = CSR_READ_4(sc, STGE_AsicCtrl);
1947	ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1);
1948	if ((sc->sc_led & 0x01) != 0)
1949		ac |= AC_LEDMode;
1950	if ((sc->sc_led & 0x03) != 0)
1951		ac |= AC_LEDModeBit1;
1952	if ((sc->sc_led & 0x08) != 0)
1953		ac |= AC_LEDSpeed;
1954	CSR_WRITE_4(sc, STGE_AsicCtrl, ac);
1955
1956	/* Set PHY, from Linux IPG driver */
1957	v = CSR_READ_1(sc, STGE_PhySet);
1958	v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet);
1959	v |= ((sc->sc_led & 0x70) >> 4);
1960	CSR_WRITE_1(sc, STGE_PhySet, v);
1961}
1962
1963/*
1964 * stge_init:		[ ifnet interface function ]
1965 *
1966 *	Initialize the interface.
1967 */
1968static void
1969stge_init(void *xsc)
1970{
1971	struct stge_softc *sc;
1972
1973	sc = (struct stge_softc *)xsc;
1974	STGE_LOCK(sc);
1975	stge_init_locked(sc);
1976	STGE_UNLOCK(sc);
1977}
1978
1979static void
1980stge_init_locked(struct stge_softc *sc)
1981{
1982	if_t ifp;
1983	struct mii_data *mii;
1984	uint16_t eaddr[3];
1985	uint32_t v;
1986	int error;
1987
1988	STGE_LOCK_ASSERT(sc);
1989
1990	ifp = sc->sc_ifp;
1991	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1992		return;
1993	mii = device_get_softc(sc->sc_miibus);
1994
1995	/*
1996	 * Cancel any pending I/O.
1997	 */
1998	stge_stop(sc);
1999
2000	/*
2001	 * Reset the chip to a known state.
2002	 */
2003	stge_reset(sc, STGE_RESET_FULL);
2004
2005	/* Init descriptors. */
2006	error = stge_init_rx_ring(sc);
2007        if (error != 0) {
2008                device_printf(sc->sc_dev,
2009                    "initialization failed: no memory for rx buffers\n");
2010                stge_stop(sc);
2011		goto out;
2012        }
2013	stge_init_tx_ring(sc);
2014
2015	/* Set the station address. */
2016	bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN);
2017	CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0]));
2018	CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1]));
2019	CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2]));
2020
2021	/*
2022	 * Set the statistics masks.  Disable all the RMON stats,
2023	 * and disable selected stats in the non-RMON stats registers.
2024	 */
2025	CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
2026	CSR_WRITE_4(sc, STGE_StatisticsMask,
2027	    (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
2028	    (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
2029	    (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
2030	    (1U << 21));
2031
2032	/* Set up the receive filter. */
2033	stge_set_filter(sc);
2034	/* Program multicast filter. */
2035	stge_set_multi(sc);
2036
2037	/*
2038	 * Give the transmit and receive ring to the chip.
2039	 */
2040	CSR_WRITE_4(sc, STGE_TFDListPtrHi,
2041	    STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0)));
2042	CSR_WRITE_4(sc, STGE_TFDListPtrLo,
2043	    STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0)));
2044
2045	CSR_WRITE_4(sc, STGE_RFDListPtrHi,
2046	    STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0)));
2047	CSR_WRITE_4(sc, STGE_RFDListPtrLo,
2048	    STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0)));
2049
2050	/*
2051	 * Initialize the Tx auto-poll period.  It's OK to make this number
2052	 * large (255 is the max, but we use 127) -- we explicitly kick the
2053	 * transmit engine when there's actually a packet.
2054	 */
2055	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2056
2057	/* ..and the Rx auto-poll period. */
2058	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2059
2060	/* Initialize the Tx start threshold. */
2061	CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
2062
2063	/* Rx DMA thresholds, from Linux */
2064	CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
2065	CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
2066
2067	/* Rx early threhold, from Linux */
2068	CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
2069
2070	/* Tx DMA thresholds, from Linux */
2071	CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
2072	CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
2073
2074	/*
2075	 * Initialize the Rx DMA interrupt control register.  We
2076	 * request an interrupt after every incoming packet, but
2077	 * defer it for sc_rxint_dmawait us. When the number of
2078	 * interrupts pending reaches STGE_RXINT_NFRAME, we stop
2079	 * deferring the interrupt, and signal it immediately.
2080	 */
2081	CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
2082	    RDIC_RxFrameCount(sc->sc_rxint_nframe) |
2083	    RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait)));
2084
2085	/*
2086	 * Initialize the interrupt mask.
2087	 */
2088	sc->sc_IntEnable = IS_HostError | IS_TxComplete |
2089	    IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
2090#ifdef DEVICE_POLLING
2091	/* Disable interrupts if we are polling. */
2092	if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0)
2093		CSR_WRITE_2(sc, STGE_IntEnable, 0);
2094	else
2095#endif
2096	CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
2097
2098	/*
2099	 * Configure the DMA engine.
2100	 * XXX Should auto-tune TxBurstLimit.
2101	 */
2102	CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3));
2103
2104	/*
2105	 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
2106	 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
2107	 * in the Rx FIFO.
2108	 */
2109	CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
2110	CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
2111
2112	/*
2113	 * Set the maximum frame size.
2114	 */
2115	sc->sc_if_framesize = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2116	CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize);
2117
2118	/*
2119	 * Initialize MacCtrl -- do it before setting the media,
2120	 * as setting the media will actually program the register.
2121	 *
2122	 * Note: We have to poke the IFS value before poking
2123	 * anything else.
2124	 */
2125	/* Tx/Rx MAC should be disabled before programming IFS.*/
2126	CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit));
2127
2128	stge_vlan_setup(sc);
2129
2130	if (sc->sc_rev >= 6) {		/* >= B.2 */
2131		/* Multi-frag frame bug work-around. */
2132		CSR_WRITE_2(sc, STGE_DebugCtrl,
2133		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
2134
2135		/* Tx Poll Now bug work-around. */
2136		CSR_WRITE_2(sc, STGE_DebugCtrl,
2137		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
2138		/* Tx Poll Now bug work-around. */
2139		CSR_WRITE_2(sc, STGE_DebugCtrl,
2140		    CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
2141	}
2142
2143	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2144	v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
2145	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2146	/*
2147	 * It seems that transmitting frames without checking the state of
2148	 * Rx/Tx MAC wedge the hardware.
2149	 */
2150	stge_start_tx(sc);
2151	stge_start_rx(sc);
2152
2153	sc->sc_link = 0;
2154	/*
2155	 * Set the current media.
2156	 */
2157	mii_mediachg(mii);
2158
2159	/*
2160	 * Start the one second MII clock.
2161	 */
2162	callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc);
2163
2164	/*
2165	 * ...all done!
2166	 */
2167	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2168	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2169
2170 out:
2171	if (error != 0)
2172		device_printf(sc->sc_dev, "interface not running\n");
2173}
2174
2175static void
2176stge_vlan_setup(struct stge_softc *sc)
2177{
2178	if_t ifp;
2179	uint32_t v;
2180
2181	ifp = sc->sc_ifp;
2182	/*
2183	 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl
2184	 * MC_AutoVLANuntagging bit.
2185	 * MC_AutoVLANtagging bit selects which VLAN source to use
2186	 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert
2187	 * bit has priority over MC_AutoVLANtagging bit. So we always
2188	 * use TFC instead of STGE_VLANTag register.
2189	 */
2190	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2191	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2192		v |= MC_AutoVLANuntagging;
2193	else
2194		v &= ~MC_AutoVLANuntagging;
2195	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2196}
2197
2198/*
2199 *	Stop transmission on the interface.
2200 */
2201static void
2202stge_stop(struct stge_softc *sc)
2203{
2204	if_t ifp;
2205	struct stge_txdesc *txd;
2206	struct stge_rxdesc *rxd;
2207	uint32_t v;
2208	int i;
2209
2210	STGE_LOCK_ASSERT(sc);
2211	/*
2212	 * Stop the one second clock.
2213	 */
2214	callout_stop(&sc->sc_tick_ch);
2215	sc->sc_watchdog_timer = 0;
2216
2217	/*
2218	 * Disable interrupts.
2219	 */
2220	CSR_WRITE_2(sc, STGE_IntEnable, 0);
2221
2222	/*
2223	 * Stop receiver, transmitter, and stats update.
2224	 */
2225	stge_stop_rx(sc);
2226	stge_stop_tx(sc);
2227	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2228	v |= MC_StatisticsDisable;
2229	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2230
2231	/*
2232	 * Stop the transmit and receive DMA.
2233	 */
2234	stge_dma_wait(sc);
2235	CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
2236	CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
2237	CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
2238	CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
2239
2240	/*
2241	 * Free RX and TX mbufs still in the queues.
2242	 */
2243	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2244		rxd = &sc->sc_cdata.stge_rxdesc[i];
2245		if (rxd->rx_m != NULL) {
2246			bus_dmamap_sync(sc->sc_cdata.stge_rx_tag,
2247			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2248			bus_dmamap_unload(sc->sc_cdata.stge_rx_tag,
2249			    rxd->rx_dmamap);
2250			m_freem(rxd->rx_m);
2251			rxd->rx_m = NULL;
2252		}
2253        }
2254	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2255		txd = &sc->sc_cdata.stge_txdesc[i];
2256		if (txd->tx_m != NULL) {
2257			bus_dmamap_sync(sc->sc_cdata.stge_tx_tag,
2258			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2259			bus_dmamap_unload(sc->sc_cdata.stge_tx_tag,
2260			    txd->tx_dmamap);
2261			m_freem(txd->tx_m);
2262			txd->tx_m = NULL;
2263		}
2264        }
2265
2266	/*
2267	 * Mark the interface down and cancel the watchdog timer.
2268	 */
2269	ifp = sc->sc_ifp;
2270	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2271	sc->sc_link = 0;
2272}
2273
2274static void
2275stge_start_tx(struct stge_softc *sc)
2276{
2277	uint32_t v;
2278	int i;
2279
2280	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2281	if ((v & MC_TxEnabled) != 0)
2282		return;
2283	v |= MC_TxEnable;
2284	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2285	CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
2286	for (i = STGE_TIMEOUT; i > 0; i--) {
2287		DELAY(10);
2288		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2289		if ((v & MC_TxEnabled) != 0)
2290			break;
2291	}
2292	if (i == 0)
2293		device_printf(sc->sc_dev, "Starting Tx MAC timed out\n");
2294}
2295
2296static void
2297stge_start_rx(struct stge_softc *sc)
2298{
2299	uint32_t v;
2300	int i;
2301
2302	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2303	if ((v & MC_RxEnabled) != 0)
2304		return;
2305	v |= MC_RxEnable;
2306	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2307	CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1);
2308	for (i = STGE_TIMEOUT; i > 0; i--) {
2309		DELAY(10);
2310		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2311		if ((v & MC_RxEnabled) != 0)
2312			break;
2313	}
2314	if (i == 0)
2315		device_printf(sc->sc_dev, "Starting Rx MAC timed out\n");
2316}
2317
2318static void
2319stge_stop_tx(struct stge_softc *sc)
2320{
2321	uint32_t v;
2322	int i;
2323
2324	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2325	if ((v & MC_TxEnabled) == 0)
2326		return;
2327	v |= MC_TxDisable;
2328	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2329	for (i = STGE_TIMEOUT; i > 0; i--) {
2330		DELAY(10);
2331		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2332		if ((v & MC_TxEnabled) == 0)
2333			break;
2334	}
2335	if (i == 0)
2336		device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n");
2337}
2338
2339static void
2340stge_stop_rx(struct stge_softc *sc)
2341{
2342	uint32_t v;
2343	int i;
2344
2345	v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2346	if ((v & MC_RxEnabled) == 0)
2347		return;
2348	v |= MC_RxDisable;
2349	CSR_WRITE_4(sc, STGE_MACCtrl, v);
2350	for (i = STGE_TIMEOUT; i > 0; i--) {
2351		DELAY(10);
2352		v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK;
2353		if ((v & MC_RxEnabled) == 0)
2354			break;
2355	}
2356	if (i == 0)
2357		device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n");
2358}
2359
2360static void
2361stge_init_tx_ring(struct stge_softc *sc)
2362{
2363	struct stge_ring_data *rd;
2364	struct stge_txdesc *txd;
2365	bus_addr_t addr;
2366	int i;
2367
2368	STAILQ_INIT(&sc->sc_cdata.stge_txfreeq);
2369	STAILQ_INIT(&sc->sc_cdata.stge_txbusyq);
2370
2371	sc->sc_cdata.stge_tx_prod = 0;
2372	sc->sc_cdata.stge_tx_cons = 0;
2373	sc->sc_cdata.stge_tx_cnt = 0;
2374
2375	rd = &sc->sc_rdata;
2376	bzero(rd->stge_tx_ring, STGE_TX_RING_SZ);
2377	for (i = 0; i < STGE_TX_RING_CNT; i++) {
2378		if (i == (STGE_TX_RING_CNT - 1))
2379			addr = STGE_TX_RING_ADDR(sc, 0);
2380		else
2381			addr = STGE_TX_RING_ADDR(sc, i + 1);
2382		rd->stge_tx_ring[i].tfd_next = htole64(addr);
2383		rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone);
2384		txd = &sc->sc_cdata.stge_txdesc[i];
2385		STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q);
2386	}
2387
2388	bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag,
2389	    sc->sc_cdata.stge_tx_ring_map,
2390	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2391
2392}
2393
2394static int
2395stge_init_rx_ring(struct stge_softc *sc)
2396{
2397	struct stge_ring_data *rd;
2398	bus_addr_t addr;
2399	int i;
2400
2401	sc->sc_cdata.stge_rx_cons = 0;
2402	STGE_RXCHAIN_RESET(sc);
2403
2404	rd = &sc->sc_rdata;
2405	bzero(rd->stge_rx_ring, STGE_RX_RING_SZ);
2406	for (i = 0; i < STGE_RX_RING_CNT; i++) {
2407		if (stge_newbuf(sc, i) != 0)
2408			return (ENOBUFS);
2409		if (i == (STGE_RX_RING_CNT - 1))
2410			addr = STGE_RX_RING_ADDR(sc, 0);
2411		else
2412			addr = STGE_RX_RING_ADDR(sc, i + 1);
2413		rd->stge_rx_ring[i].rfd_next = htole64(addr);
2414		rd->stge_rx_ring[i].rfd_status = 0;
2415	}
2416
2417	bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag,
2418	    sc->sc_cdata.stge_rx_ring_map,
2419	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2420
2421	return (0);
2422}
2423
2424/*
2425 * stge_newbuf:
2426 *
2427 *	Add a receive buffer to the indicated descriptor.
2428 */
2429static int
2430stge_newbuf(struct stge_softc *sc, int idx)
2431{
2432	struct stge_rxdesc *rxd;
2433	struct stge_rfd *rfd;
2434	struct mbuf *m;
2435	bus_dma_segment_t segs[1];
2436	bus_dmamap_t map;
2437	int nsegs;
2438
2439	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2440	if (m == NULL)
2441		return (ENOBUFS);
2442	m->m_len = m->m_pkthdr.len = MCLBYTES;
2443	/*
2444	 * The hardware requires 4bytes aligned DMA address when JUMBO
2445	 * frame is used.
2446	 */
2447	if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN))
2448		m_adj(m, ETHER_ALIGN);
2449
2450	if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag,
2451	    sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2452		m_freem(m);
2453		return (ENOBUFS);
2454	}
2455	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2456
2457	rxd = &sc->sc_cdata.stge_rxdesc[idx];
2458	if (rxd->rx_m != NULL) {
2459		bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2460		    BUS_DMASYNC_POSTREAD);
2461		bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap);
2462	}
2463	map = rxd->rx_dmamap;
2464	rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap;
2465	sc->sc_cdata.stge_rx_sparemap = map;
2466	bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap,
2467	    BUS_DMASYNC_PREREAD);
2468	rxd->rx_m = m;
2469
2470	rfd = &sc->sc_rdata.stge_rx_ring[idx];
2471	rfd->rfd_frag.frag_word0 =
2472	    htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len));
2473	rfd->rfd_status = 0;
2474
2475	return (0);
2476}
2477
2478/*
2479 * stge_set_filter:
2480 *
2481 *	Set up the receive filter.
2482 */
2483static void
2484stge_set_filter(struct stge_softc *sc)
2485{
2486	if_t ifp;
2487	uint16_t mode;
2488
2489	STGE_LOCK_ASSERT(sc);
2490
2491	ifp = sc->sc_ifp;
2492
2493	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2494	mode |= RM_ReceiveUnicast;
2495	if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
2496		mode |= RM_ReceiveBroadcast;
2497	else
2498		mode &= ~RM_ReceiveBroadcast;
2499	if ((if_getflags(ifp) & IFF_PROMISC) != 0)
2500		mode |= RM_ReceiveAllFrames;
2501	else
2502		mode &= ~RM_ReceiveAllFrames;
2503
2504	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2505}
2506
2507static u_int
2508stge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2509{
2510	uint32_t crc, *mchash = arg;
2511
2512	crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
2513	/* Just want the 6 least significant bits. */
2514	crc &= 0x3f;
2515	/* Set the corresponding bit in the hash table. */
2516	mchash[crc >> 5] |= 1 << (crc & 0x1f);
2517
2518	return (1);
2519}
2520
2521static void
2522stge_set_multi(struct stge_softc *sc)
2523{
2524	if_t ifp;
2525	uint32_t mchash[2];
2526	uint16_t mode;
2527	int count;
2528
2529	STGE_LOCK_ASSERT(sc);
2530
2531	ifp = sc->sc_ifp;
2532
2533	mode = CSR_READ_2(sc, STGE_ReceiveMode);
2534	if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2535		if ((if_getflags(ifp) & IFF_PROMISC) != 0)
2536			mode |= RM_ReceiveAllFrames;
2537		else if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
2538			mode |= RM_ReceiveMulticast;
2539		CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2540		return;
2541	}
2542
2543	/* clear existing filters. */
2544	CSR_WRITE_4(sc, STGE_HashTable0, 0);
2545	CSR_WRITE_4(sc, STGE_HashTable1, 0);
2546
2547	/*
2548	 * Set up the multicast address filter by passing all multicast
2549	 * addresses through a CRC generator, and then using the low-order
2550	 * 6 bits as an index into the 64 bit multicast hash table.  The
2551	 * high order bits select the register, while the rest of the bits
2552	 * select the bit within the register.
2553	 */
2554	bzero(mchash, sizeof(mchash));
2555	count = if_foreach_llmaddr(ifp, stge_hash_maddr, mchash);
2556
2557	mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames);
2558	if (count > 0)
2559		mode |= RM_ReceiveMulticastHash;
2560	else
2561		mode &= ~RM_ReceiveMulticastHash;
2562
2563	CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
2564	CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
2565	CSR_WRITE_2(sc, STGE_ReceiveMode, mode);
2566}
2567
2568static int
2569sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2570{
2571	int error, value;
2572
2573	if (!arg1)
2574		return (EINVAL);
2575	value = *(int *)arg1;
2576	error = sysctl_handle_int(oidp, &value, 0, req);
2577	if (error || !req->newptr)
2578		return (error);
2579	if (value < low || value > high)
2580		return (EINVAL);
2581        *(int *)arg1 = value;
2582
2583        return (0);
2584}
2585
2586static int
2587sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS)
2588{
2589	return (sysctl_int_range(oidp, arg1, arg2, req,
2590	    STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX));
2591}
2592
2593static int
2594sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS)
2595{
2596	return (sysctl_int_range(oidp, arg1, arg2, req,
2597	    STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX));
2598}
2599