if_sge.c revision 207380
1/*-
2 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com>
3 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net>
4 * Copyright (c) 1997, 1998, 1999
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS''
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
25 * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL AUTHORS OR
26 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
33 * OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/dev/sge/if_sge.c 207380 2010-04-29 18:14:14Z yongari $");
38
39/*
40 * SiS 190/191 PCI Ethernet NIC driver.
41 *
42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original
43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by
44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu
45 * <kmliu@sis.com>.  Thanks to Pyun YongHyeon <pyunyh@gmail.com> for
46 * review and very useful comments.
47 *
48 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the
49 * Linux and Solaris drivers.
50 */
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/bus.h>
55#include <sys/endian.h>
56#include <sys/kernel.h>
57#include <sys/lock.h>
58#include <sys/malloc.h>
59#include <sys/mbuf.h>
60#include <sys/module.h>
61#include <sys/mutex.h>
62#include <sys/rman.h>
63#include <sys/socket.h>
64#include <sys/sockio.h>
65
66#include <net/bpf.h>
67#include <net/if.h>
68#include <net/if_arp.h>
69#include <net/ethernet.h>
70#include <net/if_dl.h>
71#include <net/if_media.h>
72#include <net/if_types.h>
73#include <net/if_vlan_var.h>
74
75#include <machine/bus.h>
76#include <machine/resource.h>
77
78#include <dev/mii/mii.h>
79#include <dev/mii/miivar.h>
80
81#include <dev/pci/pcireg.h>
82#include <dev/pci/pcivar.h>
83
84#include <dev/sge/if_sgereg.h>
85
86MODULE_DEPEND(sge, pci, 1, 1, 1);
87MODULE_DEPEND(sge, ether, 1, 1, 1);
88MODULE_DEPEND(sge, miibus, 1, 1, 1);
89
90/* "device miibus0" required.  See GENERIC if you get errors here. */
91#include "miibus_if.h"
92
93/*
94 * Various supported device vendors/types and their names.
95 */
96static struct sge_type sge_devs[] = {
97	{ SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" },
98	{ SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" },
99	{ 0, 0, NULL }
100};
101
102static int	sge_probe(device_t);
103static int	sge_attach(device_t);
104static int	sge_detach(device_t);
105static int	sge_shutdown(device_t);
106static int	sge_suspend(device_t);
107static int	sge_resume(device_t);
108
109static int	sge_miibus_readreg(device_t, int, int);
110static int	sge_miibus_writereg(device_t, int, int, int);
111static void	sge_miibus_statchg(device_t);
112
113static int	sge_newbuf(struct sge_softc *, int);
114static int	sge_encap(struct sge_softc *, struct mbuf **);
115#ifndef __NO_STRICT_ALIGNMENT
116static __inline void
117		sge_fixup_rx(struct mbuf *);
118#endif
119static __inline void
120		sge_discard_rxbuf(struct sge_softc *, int);
121static void	sge_rxeof(struct sge_softc *);
122static void	sge_txeof(struct sge_softc *);
123static void	sge_intr(void *);
124static void	sge_tick(void *);
125static void	sge_start(struct ifnet *);
126static void	sge_start_locked(struct ifnet *);
127static int	sge_ioctl(struct ifnet *, u_long, caddr_t);
128static void	sge_init(void *);
129static void	sge_init_locked(struct sge_softc *);
130static void	sge_stop(struct sge_softc *);
131static void	sge_watchdog(struct sge_softc *);
132static int	sge_ifmedia_upd(struct ifnet *);
133static void	sge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
134
135static int	sge_get_mac_addr_apc(struct sge_softc *, uint8_t *);
136static int	sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *);
137static uint16_t	sge_read_eeprom(struct sge_softc *, int);
138
139static void	sge_rxfilter(struct sge_softc *);
140static void	sge_setvlan(struct sge_softc *);
141static void	sge_reset(struct sge_softc *);
142static int	sge_list_rx_init(struct sge_softc *);
143static int	sge_list_rx_free(struct sge_softc *);
144static int	sge_list_tx_init(struct sge_softc *);
145static int	sge_list_tx_free(struct sge_softc *);
146
147static int	sge_dma_alloc(struct sge_softc *);
148static void	sge_dma_free(struct sge_softc *);
149static void	sge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
150
151static device_method_t sge_methods[] = {
152	/* Device interface */
153	DEVMETHOD(device_probe,		sge_probe),
154	DEVMETHOD(device_attach,	sge_attach),
155	DEVMETHOD(device_detach,	sge_detach),
156	DEVMETHOD(device_suspend,	sge_suspend),
157	DEVMETHOD(device_resume,	sge_resume),
158	DEVMETHOD(device_shutdown,	sge_shutdown),
159
160	/* Bus interface */
161	DEVMETHOD(bus_print_child,	bus_generic_print_child),
162	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
163
164	/* MII interface */
165	DEVMETHOD(miibus_readreg,	sge_miibus_readreg),
166	DEVMETHOD(miibus_writereg,	sge_miibus_writereg),
167	DEVMETHOD(miibus_statchg,	sge_miibus_statchg),
168
169	KOBJMETHOD_END
170};
171
172static driver_t sge_driver = {
173	"sge", sge_methods, sizeof(struct sge_softc)
174};
175
176static devclass_t sge_devclass;
177
178DRIVER_MODULE(sge, pci, sge_driver, sge_devclass, 0, 0);
179DRIVER_MODULE(miibus, sge, miibus_driver, miibus_devclass, 0, 0);
180
181/*
182 * Register space access macros.
183 */
184#define	CSR_WRITE_4(sc, reg, val)	bus_write_4(sc->sge_res, reg, val)
185#define	CSR_WRITE_2(sc, reg, val)	bus_write_2(sc->sge_res, reg, val)
186#define	CSR_WRITE_1(cs, reg, val)	bus_write_1(sc->sge_res, reg, val)
187
188#define	CSR_READ_4(sc, reg)		bus_read_4(sc->sge_res, reg)
189#define	CSR_READ_2(sc, reg)		bus_read_2(sc->sge_res, reg)
190#define	CSR_READ_1(sc, reg)		bus_read_1(sc->sge_res, reg)
191
192/* Define to show Tx/Rx error status. */
193#undef SGE_SHOW_ERRORS
194
195#define	SGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
196
197static void
198sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
199{
200	bus_addr_t *p;
201
202	if (error != 0)
203		return;
204	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
205	p  = arg;
206	*p = segs->ds_addr;
207}
208
209/*
210 * Read a sequence of words from the EEPROM.
211 */
212static uint16_t
213sge_read_eeprom(struct sge_softc *sc, int offset)
214{
215	uint32_t val;
216	int i;
217
218	KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big"));
219	CSR_WRITE_4(sc, ROMInterface,
220	    EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT));
221	DELAY(500);
222	for (i = 0; i < SGE_TIMEOUT; i++) {
223		val = CSR_READ_4(sc, ROMInterface);
224		if ((val & EI_REQ) == 0)
225			break;
226		DELAY(100);
227	}
228	if (i == SGE_TIMEOUT) {
229		device_printf(sc->sge_dev,
230		    "EEPROM read timeout : 0x%08x\n", val);
231		return (0xffff);
232	}
233
234	return ((val & EI_DATA) >> EI_DATA_SHIFT);
235}
236
237static int
238sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest)
239{
240	uint16_t val;
241	int i;
242
243	val = sge_read_eeprom(sc, EEPROMSignature);
244	if (val == 0xffff || val == 0) {
245		device_printf(sc->sge_dev,
246		    "invalid EEPROM signature : 0x%04x\n", val);
247		return (EINVAL);
248	}
249
250	for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
251		val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2);
252		dest[i + 0] = (uint8_t)val;
253		dest[i + 1] = (uint8_t)(val >> 8);
254	}
255
256	if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0)
257		sc->sge_flags |= SGE_FLAG_RGMII;
258	return (0);
259}
260
261/*
262 * For SiS96x, APC CMOS RAM is used to store ethernet address.
263 * APC CMOS RAM is accessed through ISA bridge.
264 */
265static int
266sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest)
267{
268#if defined(__amd64__) || defined(__i386__)
269	devclass_t pci;
270	device_t bus, dev = NULL;
271	device_t *kids;
272	struct apc_tbl {
273		uint16_t vid;
274		uint16_t did;
275	} *tp, apc_tbls[] = {
276		{ SIS_VENDORID, 0x0965 },
277		{ SIS_VENDORID, 0x0966 },
278		{ SIS_VENDORID, 0x0968 }
279	};
280	uint8_t reg;
281	int busnum, cnt, i, j, numkids;
282
283	cnt = sizeof(apc_tbls) / sizeof(apc_tbls[0]);
284	pci = devclass_find("pci");
285	for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
286		bus = devclass_get_device(pci, busnum);
287		if (!bus)
288			continue;
289		if (device_get_children(bus, &kids, &numkids) != 0)
290			continue;
291		for (i = 0; i < numkids; i++) {
292			dev = kids[i];
293			if (pci_get_class(dev) == PCIC_BRIDGE &&
294			    pci_get_subclass(dev) == PCIS_BRIDGE_ISA) {
295				tp = apc_tbls;
296				for (j = 0; j < cnt; j++) {
297					if (pci_get_vendor(dev) == tp->vid &&
298					    pci_get_device(dev) == tp->did) {
299						free(kids, M_TEMP);
300						goto apc_found;
301					}
302					tp++;
303				}
304			}
305                }
306		free(kids, M_TEMP);
307	}
308	device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n");
309	return (EINVAL);
310apc_found:
311	/* Enable port 0x78 and 0x79 to access APC registers. */
312	reg = pci_read_config(dev, 0x48, 1);
313	pci_write_config(dev, 0x48, reg & ~0x02, 1);
314	DELAY(50);
315	pci_read_config(dev, 0x48, 1);
316	/* Read stored ethernet address. */
317	for (i = 0; i < ETHER_ADDR_LEN; i++) {
318		outb(0x78, 0x09 + i);
319		dest[i] = inb(0x79);
320	}
321	outb(0x78, 0x12);
322	if ((inb(0x79) & 0x80) != 0)
323		sc->sge_flags |= SGE_FLAG_RGMII;
324	/* Restore access to APC registers. */
325	pci_write_config(dev, 0x48, reg, 1);
326
327	return (0);
328#else
329	return (EINVAL);
330#endif
331}
332
333static int
334sge_miibus_readreg(device_t dev, int phy, int reg)
335{
336	struct sge_softc *sc;
337	uint32_t val;
338	int i;
339
340	sc = device_get_softc(dev);
341	CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) |
342	    (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ);
343	DELAY(10);
344	for (i = 0; i < SGE_TIMEOUT; i++) {
345		val = CSR_READ_4(sc, GMIIControl);
346		if ((val & GMI_REQ) == 0)
347			break;
348		DELAY(10);
349	}
350	if (i == SGE_TIMEOUT) {
351		device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg);
352		return (0);
353	}
354	return ((val & GMI_DATA) >> GMI_DATA_SHIFT);
355}
356
357static int
358sge_miibus_writereg(device_t dev, int phy, int reg, int data)
359{
360	struct sge_softc *sc;
361	uint32_t val;
362	int i;
363
364	sc = device_get_softc(dev);
365	CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) |
366	    (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) |
367	    GMI_OP_WR | GMI_REQ);
368	DELAY(10);
369	for (i = 0; i < SGE_TIMEOUT; i++) {
370		val = CSR_READ_4(sc, GMIIControl);
371		if ((val & GMI_REQ) == 0)
372			break;
373		DELAY(10);
374	}
375	if (i == SGE_TIMEOUT)
376		device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg);
377	return (0);
378}
379
380static void
381sge_miibus_statchg(device_t dev)
382{
383	struct sge_softc *sc;
384	struct mii_data *mii;
385	struct ifnet *ifp;
386	uint32_t ctl, speed;
387
388	sc = device_get_softc(dev);
389	mii = device_get_softc(sc->sge_miibus);
390	ifp = sc->sge_ifp;
391	if (mii == NULL || ifp == NULL ||
392	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
393		return;
394	speed = 0;
395	sc->sge_flags &= ~SGE_FLAG_LINK;
396	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
397	    (IFM_ACTIVE | IFM_AVALID)) {
398		switch (IFM_SUBTYPE(mii->mii_media_active)) {
399		case IFM_10_T:
400			sc->sge_flags |= SGE_FLAG_LINK;
401			speed = SC_SPEED_10;
402			break;
403		case IFM_100_TX:
404			sc->sge_flags |= SGE_FLAG_LINK;
405			speed = SC_SPEED_100;
406			break;
407		case IFM_1000_T:
408			if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) {
409				sc->sge_flags |= SGE_FLAG_LINK;
410				speed = SC_SPEED_1000;
411			}
412			break;
413		default:
414			break;
415                }
416        }
417	if ((sc->sge_flags & SGE_FLAG_LINK) == 0)
418		return;
419	/* Reprogram MAC to resolved speed/duplex/flow-control parameters. */
420	ctl = CSR_READ_4(sc, StationControl);
421	ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK);
422	if (speed == SC_SPEED_1000) {
423		ctl |= 0x07000000;
424		sc->sge_flags |= SGE_FLAG_SPEED_1000;
425	} else {
426		ctl |= 0x04000000;
427		sc->sge_flags &= ~SGE_FLAG_SPEED_1000;
428	}
429#ifdef notyet
430	if ((sc->sge_flags & SGE_FLAG_GMII) != 0)
431		ctl |= 0x03000000;
432#endif
433	ctl |= speed;
434	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
435		ctl |= SC_FDX;
436		sc->sge_flags |= SGE_FLAG_FDX;
437	} else
438		sc->sge_flags &= ~SGE_FLAG_FDX;
439	CSR_WRITE_4(sc, StationControl, ctl);
440	if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) {
441		CSR_WRITE_4(sc, RGMIIDelay, 0x0441);
442		CSR_WRITE_4(sc, RGMIIDelay, 0x0440);
443	}
444}
445
446static void
447sge_rxfilter(struct sge_softc *sc)
448{
449	struct ifnet *ifp;
450	struct ifmultiaddr *ifma;
451	uint32_t crc, hashes[2];
452	uint16_t rxfilt;
453
454	SGE_LOCK_ASSERT(sc);
455
456	ifp = sc->sge_ifp;
457	rxfilt = CSR_READ_2(sc, RxMacControl);
458	rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast);
459	rxfilt |= AcceptMyPhys;
460	if ((ifp->if_flags & IFF_BROADCAST) != 0)
461		rxfilt |= AcceptBroadcast;
462	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
463		if ((ifp->if_flags & IFF_PROMISC) != 0)
464			rxfilt |= AcceptAllPhys;
465		rxfilt |= AcceptMulticast;
466		hashes[0] = 0xFFFFFFFF;
467		hashes[1] = 0xFFFFFFFF;
468	} else {
469		rxfilt |= AcceptMulticast;
470		hashes[0] = hashes[1] = 0;
471		/* Now program new ones. */
472		if_maddr_rlock(ifp);
473		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
474			if (ifma->ifma_addr->sa_family != AF_LINK)
475				continue;
476			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
477			    ifma->ifma_addr), ETHER_ADDR_LEN);
478			hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
479		}
480		if_maddr_runlock(ifp);
481	}
482	CSR_WRITE_2(sc, RxMacControl, rxfilt | 0x02);
483	CSR_WRITE_4(sc, RxHashTable, hashes[0]);
484	CSR_WRITE_4(sc, RxHashTable2, hashes[1]);
485}
486
487static void
488sge_setvlan(struct sge_softc *sc)
489{
490	struct ifnet *ifp;
491	uint16_t rxfilt;
492
493	SGE_LOCK_ASSERT(sc);
494
495	ifp = sc->sge_ifp;
496	if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0)
497		return;
498	rxfilt = CSR_READ_2(sc, RxMacControl);
499	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
500		rxfilt |= RXMAC_STRIP_VLAN;
501	else
502		rxfilt &= ~RXMAC_STRIP_VLAN;
503	CSR_WRITE_2(sc, RxMacControl, rxfilt);
504}
505
506static void
507sge_reset(struct sge_softc *sc)
508{
509
510	CSR_WRITE_4(sc, IntrMask, 0);
511	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
512
513	/* Soft reset. */
514	CSR_WRITE_4(sc, IntrControl, 0x8000);
515	CSR_READ_4(sc, IntrControl);
516	DELAY(100);
517	CSR_WRITE_4(sc, IntrControl, 0);
518	/* Stop MAC. */
519	CSR_WRITE_4(sc, TX_CTL, 0x1a00);
520	CSR_WRITE_4(sc, RX_CTL, 0x1a00);
521
522	CSR_WRITE_4(sc, IntrMask, 0);
523	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
524
525	CSR_WRITE_4(sc, GMIIControl, 0);
526}
527
528/*
529 * Probe for an SiS chip. Check the PCI vendor and device
530 * IDs against our list and return a device name if we find a match.
531 */
532static int
533sge_probe(device_t dev)
534{
535	struct sge_type *t;
536
537	t = sge_devs;
538	while (t->sge_name != NULL) {
539		if ((pci_get_vendor(dev) == t->sge_vid) &&
540		    (pci_get_device(dev) == t->sge_did)) {
541			device_set_desc(dev, t->sge_name);
542			return (BUS_PROBE_DEFAULT);
543		}
544		t++;
545	}
546
547	return (ENXIO);
548}
549
550/*
551 * Attach the interface.  Allocate softc structures, do ifmedia
552 * setup and ethernet/BPF attach.
553 */
554static int
555sge_attach(device_t dev)
556{
557	struct sge_softc *sc;
558	struct ifnet *ifp;
559	uint8_t eaddr[ETHER_ADDR_LEN];
560	int error = 0, rid;
561
562	sc = device_get_softc(dev);
563	sc->sge_dev = dev;
564
565	mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
566	    MTX_DEF);
567        callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0);
568
569	/*
570	 * Map control/status registers.
571	 */
572	pci_enable_busmaster(dev);
573
574	/* Allocate resources. */
575	sc->sge_res_id = PCIR_BAR(0);
576	sc->sge_res_type = SYS_RES_MEMORY;
577	sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type,
578	    &sc->sge_res_id, RF_ACTIVE);
579	if (sc->sge_res == NULL) {
580		device_printf(dev, "couldn't allocate resource\n");
581		error = ENXIO;
582		goto fail;
583	}
584
585	rid = 0;
586	sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
587	    RF_SHAREABLE | RF_ACTIVE);
588	if (sc->sge_irq == NULL) {
589		device_printf(dev, "couldn't allocate IRQ resources\n");
590		error = ENXIO;
591		goto fail;
592	}
593	sc->sge_rev = pci_get_revid(dev);
594	if (pci_get_device(dev) == SIS_DEVICEID_190)
595		sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190;
596	/* Reset the adapter. */
597	sge_reset(sc);
598
599	/* Get MAC address from the EEPROM. */
600	if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0)
601		sge_get_mac_addr_apc(sc, eaddr);
602	else
603		sge_get_mac_addr_eeprom(sc, eaddr);
604
605	if ((error = sge_dma_alloc(sc)) != 0)
606		goto fail;
607
608	ifp = sc->sge_ifp = if_alloc(IFT_ETHER);
609	if (ifp == NULL) {
610		device_printf(dev, "cannot allocate ifnet structure.\n");
611		error = ENOSPC;
612		goto fail;
613	}
614	ifp->if_softc = sc;
615	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
616	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
617	ifp->if_ioctl = sge_ioctl;
618	ifp->if_start = sge_start;
619	ifp->if_init = sge_init;
620	ifp->if_snd.ifq_drv_maxlen = SGE_TX_RING_CNT - 1;
621	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
622	IFQ_SET_READY(&ifp->if_snd);
623	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
624	ifp->if_hwassist = SGE_CSUM_FEATURES;
625	ifp->if_capenable = ifp->if_capabilities;
626	/*
627	 * Do MII setup.
628	 */
629	if (mii_phy_probe(dev, &sc->sge_miibus, sge_ifmedia_upd,
630	    sge_ifmedia_sts)) {
631		device_printf(dev, "no PHY found!\n");
632		error = ENXIO;
633		goto fail;
634	}
635
636	/*
637	 * Call MI attach routine.
638	 */
639	ether_ifattach(ifp, eaddr);
640
641	/* VLAN setup. */
642	if ((sc->sge_flags & SGE_FLAG_SIS190) == 0)
643		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING |
644		    IFCAP_VLAN_HWCSUM;
645	ifp->if_capabilities |= IFCAP_VLAN_MTU;
646	ifp->if_capenable = ifp->if_capabilities;
647	/* Tell the upper layer(s) we support long frames. */
648	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
649
650	/* Hook interrupt last to avoid having to lock softc */
651	error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE,
652	    NULL, sge_intr, sc, &sc->sge_intrhand);
653	if (error) {
654		device_printf(dev, "couldn't set up irq\n");
655		ether_ifdetach(ifp);
656		goto fail;
657	}
658
659fail:
660	if (error)
661		sge_detach(dev);
662
663	return (error);
664}
665
666/*
667 * Shutdown hardware and free up resources.  This can be called any
668 * time after the mutex has been initialized.  It is called in both
669 * the error case in attach and the normal detach case so it needs
670 * to be careful about only freeing resources that have actually been
671 * allocated.
672 */
673static int
674sge_detach(device_t dev)
675{
676	struct sge_softc *sc;
677	struct ifnet *ifp;
678
679	sc = device_get_softc(dev);
680	ifp = sc->sge_ifp;
681	/* These should only be active if attach succeeded. */
682	if (device_is_attached(dev)) {
683		ether_ifdetach(ifp);
684		SGE_LOCK(sc);
685		sge_stop(sc);
686		SGE_UNLOCK(sc);
687		callout_drain(&sc->sge_stat_ch);
688	}
689	if (sc->sge_miibus)
690		device_delete_child(dev, sc->sge_miibus);
691	bus_generic_detach(dev);
692
693	if (sc->sge_intrhand)
694		bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand);
695	if (sc->sge_irq)
696		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq);
697	if (sc->sge_res)
698		bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id,
699		    sc->sge_res);
700	if (ifp)
701		if_free(ifp);
702	sge_dma_free(sc);
703	mtx_destroy(&sc->sge_mtx);
704
705	return (0);
706}
707
708/*
709 * Stop all chip I/O so that the kernel's probe routines don't
710 * get confused by errant DMAs when rebooting.
711 */
712static int
713sge_shutdown(device_t dev)
714{
715	struct sge_softc *sc;
716
717	sc = device_get_softc(dev);
718	SGE_LOCK(sc);
719	sge_stop(sc);
720	SGE_UNLOCK(sc);
721	return (0);
722}
723
724static int
725sge_suspend(device_t dev)
726{
727	struct sge_softc *sc;
728	struct ifnet *ifp;
729
730	sc = device_get_softc(dev);
731	SGE_LOCK(sc);
732	ifp = sc->sge_ifp;
733	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
734		sge_stop(sc);
735	SGE_UNLOCK(sc);
736	return (0);
737}
738
739static int
740sge_resume(device_t dev)
741{
742	struct sge_softc *sc;
743	struct ifnet *ifp;
744
745	sc = device_get_softc(dev);
746	SGE_LOCK(sc);
747	ifp = sc->sge_ifp;
748	if ((ifp->if_flags & IFF_UP) != 0)
749		sge_init_locked(sc);
750	SGE_UNLOCK(sc);
751	return (0);
752}
753
754static int
755sge_dma_alloc(struct sge_softc *sc)
756{
757	struct sge_chain_data *cd;
758	struct sge_list_data *ld;
759	int error, i;
760
761	cd = &sc->sge_cdata;
762	ld = &sc->sge_ldata;
763	error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev),
764	    1, 0,			/* alignment, boundary */
765	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
766	    BUS_SPACE_MAXADDR,		/* highaddr */
767	    NULL, NULL,			/* filter, filterarg */
768	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
769	    1,				/* nsegments */
770	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
771	    0,				/* flags */
772	    NULL,			/* lockfunc */
773	    NULL,			/* lockarg */
774	    &cd->sge_tag);
775	if (error != 0) {
776		device_printf(sc->sge_dev,
777		    "could not create parent DMA tag.\n");
778		goto fail;
779	}
780
781	/* RX descriptor ring */
782	error = bus_dma_tag_create(cd->sge_tag,
783	    SGE_DESC_ALIGN, 0,		/* alignment, boundary */
784	    BUS_SPACE_MAXADDR,		/* lowaddr */
785	    BUS_SPACE_MAXADDR,		/* highaddr */
786	    NULL, NULL,			/* filter, filterarg */
787	    SGE_RX_RING_SZ, 1,		/* maxsize,nsegments */
788	    SGE_RX_RING_SZ,		/* maxsegsize */
789	    0,				/* flags */
790	    NULL,			/* lockfunc */
791	    NULL,			/* lockarg */
792	    &cd->sge_rx_tag);
793	if (error != 0) {
794		device_printf(sc->sge_dev,
795		    "could not create Rx ring DMA tag.\n");
796		goto fail;
797	}
798	/* Allocate DMA'able memory and load DMA map for RX ring. */
799	error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring,
800	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
801	    &cd->sge_rx_dmamap);
802	if (error != 0) {
803		device_printf(sc->sge_dev,
804		    "could not allocate DMA'able memory for Rx ring.\n");
805		goto fail;
806	}
807	error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap,
808	    ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr,
809	    &ld->sge_rx_paddr, BUS_DMA_NOWAIT);
810	if (error != 0) {
811		device_printf(sc->sge_dev,
812		    "could not load DMA'able memory for Rx ring.\n");
813	}
814
815	/* TX descriptor ring */
816	error = bus_dma_tag_create(cd->sge_tag,
817	    SGE_DESC_ALIGN, 0,		/* alignment, boundary */
818	    BUS_SPACE_MAXADDR,		/* lowaddr */
819	    BUS_SPACE_MAXADDR,		/* highaddr */
820	    NULL, NULL,			/* filter, filterarg */
821	    SGE_TX_RING_SZ, 1,		/* maxsize,nsegments */
822	    SGE_TX_RING_SZ,		/* maxsegsize */
823	    0,				/* flags */
824	    NULL,			/* lockfunc */
825	    NULL,			/* lockarg */
826	    &cd->sge_tx_tag);
827	if (error != 0) {
828		device_printf(sc->sge_dev,
829		    "could not create Rx ring DMA tag.\n");
830		goto fail;
831	}
832	/* Allocate DMA'able memory and load DMA map for TX ring. */
833	error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring,
834	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
835	    &cd->sge_tx_dmamap);
836	if (error != 0) {
837		device_printf(sc->sge_dev,
838		    "could not allocate DMA'able memory for Tx ring.\n");
839		goto fail;
840	}
841	error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap,
842	    ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr,
843	    &ld->sge_tx_paddr, BUS_DMA_NOWAIT);
844	if (error != 0) {
845		device_printf(sc->sge_dev,
846		    "could not load DMA'able memory for Rx ring.\n");
847		goto fail;
848	}
849
850	/* Create DMA tag for Tx buffers. */
851	error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR,
852	    BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * SGE_MAXTXSEGS,
853	    SGE_MAXTXSEGS, MCLBYTES, 0, NULL, NULL, &cd->sge_txmbuf_tag);
854	if (error != 0) {
855		device_printf(sc->sge_dev,
856		    "could not create Tx mbuf DMA tag.\n");
857		goto fail;
858	}
859
860	/* Create DMA tag for Rx buffers. */
861	error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0,
862	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
863	    MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag);
864	if (error != 0) {
865		device_printf(sc->sge_dev,
866		    "could not create Rx mbuf DMA tag.\n");
867		goto fail;
868	}
869
870	/* Create DMA maps for Tx buffers. */
871	for (i = 0; i < SGE_TX_RING_CNT; i++) {
872		error = bus_dmamap_create(cd->sge_txmbuf_tag, 0,
873		    &cd->sge_tx_map[i]);
874		if (error != 0) {
875			device_printf(sc->sge_dev,
876			    "could not create Tx DMA map.\n");
877			goto fail;
878		}
879	}
880	/* Create spare DMA map for Rx buffer. */
881	error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map);
882	if (error != 0) {
883		device_printf(sc->sge_dev,
884		    "could not create spare Rx DMA map.\n");
885		goto fail;
886	}
887	/* Create DMA maps for Rx buffers. */
888	for (i = 0; i < SGE_RX_RING_CNT; i++) {
889		error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0,
890		    &cd->sge_rx_map[i]);
891		if (error) {
892			device_printf(sc->sge_dev,
893			    "could not create Rx DMA map.\n");
894			goto fail;
895		}
896	}
897fail:
898	return (error);
899}
900
901static void
902sge_dma_free(struct sge_softc *sc)
903{
904	struct sge_chain_data *cd;
905	struct sge_list_data *ld;
906	int i;
907
908	cd = &sc->sge_cdata;
909	ld = &sc->sge_ldata;
910	/* Rx ring. */
911	if (cd->sge_rx_tag != NULL) {
912		if (cd->sge_rx_dmamap != NULL)
913			bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap);
914		if (cd->sge_rx_dmamap != NULL && ld->sge_rx_ring != NULL)
915			bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring,
916			    cd->sge_rx_dmamap);
917		ld->sge_rx_ring = NULL;
918		cd->sge_rx_dmamap = NULL;
919		bus_dma_tag_destroy(cd->sge_rx_tag);
920		cd->sge_rx_tag = NULL;
921	}
922	/* Tx ring. */
923	if (cd->sge_tx_tag != NULL) {
924		if (cd->sge_tx_dmamap != NULL)
925			bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap);
926		if (cd->sge_tx_dmamap != NULL && ld->sge_tx_ring != NULL)
927			bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring,
928			    cd->sge_tx_dmamap);
929		ld->sge_tx_ring = NULL;
930		cd->sge_tx_dmamap = NULL;
931		bus_dma_tag_destroy(cd->sge_tx_tag);
932		cd->sge_tx_tag = NULL;
933	}
934	/* Rx buffers. */
935	if (cd->sge_rxmbuf_tag != NULL) {
936		for (i = 0; i < SGE_RX_RING_CNT; i++) {
937			if (cd->sge_rx_map[i] != NULL) {
938				bus_dmamap_destroy(cd->sge_rxmbuf_tag,
939				    cd->sge_rx_map[i]);
940				cd->sge_rx_map[i] = NULL;
941			}
942		}
943		if (cd->sge_rx_spare_map != NULL) {
944			bus_dmamap_destroy(cd->sge_rxmbuf_tag,
945			    cd->sge_rx_spare_map);
946			cd->sge_rx_spare_map = NULL;
947		}
948		bus_dma_tag_destroy(cd->sge_rxmbuf_tag);
949		cd->sge_rxmbuf_tag = NULL;
950	}
951	/* Tx buffers. */
952	if (cd->sge_txmbuf_tag != NULL) {
953		for (i = 0; i < SGE_TX_RING_CNT; i++) {
954			if (cd->sge_tx_map[i] != NULL) {
955				bus_dmamap_destroy(cd->sge_txmbuf_tag,
956				    cd->sge_tx_map[i]);
957				cd->sge_tx_map[i] = NULL;
958			}
959		}
960		bus_dma_tag_destroy(cd->sge_txmbuf_tag);
961		cd->sge_txmbuf_tag = NULL;
962	}
963	if (cd->sge_tag != NULL)
964		bus_dma_tag_destroy(cd->sge_tag);
965	cd->sge_tag = NULL;
966}
967
968/*
969 * Initialize the TX descriptors.
970 */
971static int
972sge_list_tx_init(struct sge_softc *sc)
973{
974	struct sge_list_data *ld;
975	struct sge_chain_data *cd;
976
977	SGE_LOCK_ASSERT(sc);
978	ld = &sc->sge_ldata;
979	cd = &sc->sge_cdata;
980	bzero(ld->sge_tx_ring, SGE_TX_RING_SZ);
981	ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END);
982	bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap,
983	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
984	cd->sge_tx_prod = 0;
985	cd->sge_tx_cons = 0;
986	cd->sge_tx_cnt = 0;
987	return (0);
988}
989
990static int
991sge_list_tx_free(struct sge_softc *sc)
992{
993	struct sge_chain_data *cd;
994	int i;
995
996	SGE_LOCK_ASSERT(sc);
997	cd = &sc->sge_cdata;
998	for (i = 0; i < SGE_TX_RING_CNT; i++) {
999		if (cd->sge_tx_mbuf[i] != NULL) {
1000			bus_dmamap_sync(cd->sge_txmbuf_tag,
1001			    cd->sge_tx_map[i], BUS_DMASYNC_POSTWRITE);
1002			bus_dmamap_unload(cd->sge_txmbuf_tag,
1003			    cd->sge_tx_map[i]);
1004			m_free(cd->sge_tx_mbuf[i]);
1005			cd->sge_tx_mbuf[i] = NULL;
1006		}
1007	}
1008
1009	return (0);
1010}
1011
1012/*
1013 * Initialize the RX descriptors and allocate mbufs for them.  Note that
1014 * we arrange the descriptors in a closed ring, so that the last descriptor
1015 * has RING_END flag set.
1016 */
1017static int
1018sge_list_rx_init(struct sge_softc *sc)
1019{
1020	struct sge_chain_data *cd;
1021	int i;
1022
1023	SGE_LOCK_ASSERT(sc);
1024	cd = &sc->sge_cdata;
1025	cd->sge_rx_cons = 0;
1026	bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ);
1027	for (i = 0; i < SGE_RX_RING_CNT; i++) {
1028		if (sge_newbuf(sc, i) != 0)
1029			return (ENOBUFS);
1030	}
1031	bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap,
1032	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1033	return (0);
1034}
1035
1036static int
1037sge_list_rx_free(struct sge_softc *sc)
1038{
1039	struct sge_chain_data *cd;
1040	int i;
1041
1042	SGE_LOCK_ASSERT(sc);
1043	cd = &sc->sge_cdata;
1044	for (i = 0; i < SGE_RX_RING_CNT; i++) {
1045		if (cd->sge_rx_mbuf[i] != NULL) {
1046			bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[i],
1047			    BUS_DMASYNC_POSTREAD);
1048			bus_dmamap_unload(cd->sge_rxmbuf_tag,
1049			    cd->sge_rx_map[i]);
1050			m_free(cd->sge_rx_mbuf[i]);
1051			cd->sge_rx_mbuf[i] = NULL;
1052		}
1053	}
1054	return (0);
1055}
1056
1057/*
1058 * Initialize an RX descriptor and attach an MBUF cluster.
1059 */
1060static int
1061sge_newbuf(struct sge_softc *sc, int prod)
1062{
1063	struct mbuf *m;
1064	struct sge_desc *desc;
1065	struct sge_chain_data *cd;
1066	bus_dma_segment_t segs[1];
1067	bus_dmamap_t map;
1068	int error, nsegs;
1069
1070	SGE_LOCK_ASSERT(sc);
1071
1072	cd = &sc->sge_cdata;
1073	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1074	if (m == NULL)
1075		return (ENOBUFS);
1076	m->m_len = m->m_pkthdr.len = MCLBYTES;
1077	m_adj(m, SGE_RX_BUF_ALIGN);
1078	error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag,
1079	    cd->sge_rx_spare_map, m, segs, &nsegs, 0);
1080	if (error != 0) {
1081		m_freem(m);
1082		return (error);
1083	}
1084	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1085	if (cd->sge_rx_mbuf[prod] != NULL) {
1086		bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod],
1087		    BUS_DMASYNC_POSTREAD);
1088		bus_dmamap_unload(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod]);
1089	}
1090	map = cd->sge_rx_map[prod];
1091	cd->sge_rx_map[prod] =  cd->sge_rx_spare_map;
1092	cd->sge_rx_spare_map = map;
1093	bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod],
1094	    BUS_DMASYNC_PREREAD);
1095	cd->sge_rx_mbuf[prod] = m;
1096
1097	desc = &sc->sge_ldata.sge_rx_ring[prod];
1098	desc->sge_sts_size = 0;
1099	desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr));
1100	desc->sge_flags = htole32(segs[0].ds_len);
1101	if (prod == SGE_RX_RING_CNT - 1)
1102		desc->sge_flags |= htole32(RING_END);
1103	desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM |
1104	    RDC_TCP_CSUM | RDC_UDP_CSUM);
1105	return (0);
1106}
1107
1108#ifndef __NO_STRICT_ALIGNMENT
1109static __inline void
1110sge_fixup_rx(struct mbuf *m)
1111{
1112        int i;
1113        uint16_t *src, *dst;
1114
1115	src = mtod(m, uint16_t *);
1116	dst = src - 3;
1117
1118	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1119		*dst++ = *src++;
1120
1121	m->m_data -= (SGE_RX_BUF_ALIGN - ETHER_ALIGN);
1122}
1123#endif
1124
1125static __inline void
1126sge_discard_rxbuf(struct sge_softc *sc, int index)
1127{
1128	struct sge_desc *desc;
1129
1130	desc = &sc->sge_ldata.sge_rx_ring[index];
1131	desc->sge_sts_size = 0;
1132	desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN);
1133	if (index == SGE_RX_RING_CNT - 1)
1134		desc->sge_flags |= htole32(RING_END);
1135	desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM |
1136	    RDC_TCP_CSUM | RDC_UDP_CSUM);
1137}
1138
1139/*
1140 * A frame has been uploaded: pass the resulting mbuf chain up to
1141 * the higher level protocols.
1142 */
1143static void
1144sge_rxeof(struct sge_softc *sc)
1145{
1146        struct ifnet *ifp;
1147        struct mbuf *m;
1148	struct sge_chain_data *cd;
1149	struct sge_desc	*cur_rx;
1150	uint32_t rxinfo, rxstat;
1151	int cons, prog;
1152
1153	SGE_LOCK_ASSERT(sc);
1154
1155	ifp = sc->sge_ifp;
1156	cd = &sc->sge_cdata;
1157
1158	bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap,
1159	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1160	cons = cd->sge_rx_cons;
1161	for (prog = 0; prog < SGE_RX_RING_CNT; prog++,
1162	    SGE_INC(cons, SGE_RX_RING_CNT)) {
1163		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1164			break;
1165		cur_rx = &sc->sge_ldata.sge_rx_ring[cons];
1166		rxinfo = le32toh(cur_rx->sge_cmdsts);
1167		if ((rxinfo & RDC_OWN) != 0)
1168			break;
1169		rxstat = le32toh(cur_rx->sge_sts_size);
1170		if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 ||
1171		    SGE_RX_NSEGS(rxstat) != 1) {
1172			/* XXX We don't support multi-segment frames yet. */
1173#ifdef SGE_SHOW_ERRORS
1174			device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat,
1175			    RX_ERR_BITS);
1176#endif
1177			sge_discard_rxbuf(sc, cons);
1178			ifp->if_ierrors++;
1179			continue;
1180		}
1181		m = cd->sge_rx_mbuf[cons];
1182		if (sge_newbuf(sc, cons) != 0) {
1183			sge_discard_rxbuf(sc, cons);
1184			ifp->if_iqdrops++;
1185			continue;
1186		}
1187		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1188			if ((rxinfo & RDC_IP_CSUM) != 0 &&
1189			    (rxinfo & RDC_IP_CSUM_OK) != 0)
1190				m->m_pkthdr.csum_flags |=
1191				    CSUM_IP_CHECKED | CSUM_IP_VALID;
1192			if (((rxinfo & RDC_TCP_CSUM) != 0 &&
1193			    (rxinfo & RDC_TCP_CSUM_OK) != 0) ||
1194			    ((rxinfo & RDC_UDP_CSUM) != 0 &&
1195			    (rxinfo & RDC_UDP_CSUM_OK) != 0)) {
1196				m->m_pkthdr.csum_flags |=
1197				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1198				m->m_pkthdr.csum_data = 0xffff;
1199			}
1200		}
1201		/* Check for VLAN tagged frame. */
1202		if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1203		    (rxstat & RDS_VLAN) != 0) {
1204			m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK;
1205			m->m_flags |= M_VLANTAG;
1206		}
1207		if ((sc->sge_flags & SGE_FLAG_SIS190) == 0) {
1208			/*
1209			 * Account for 10bytes auto padding which is used
1210			 * to align IP header on 32bit boundary.  Also note,
1211			 * CRC bytes is automatically removed by the
1212			 * hardware.
1213			 */
1214			m->m_data += SGE_RX_PAD_BYTES;
1215			m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) -
1216			    SGE_RX_PAD_BYTES;
1217		} else {
1218			m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) -
1219			    ETHER_CRC_LEN;
1220#ifndef __NO_STRICT_ALIGNMENT
1221			sge_fixup_rx(m);
1222#endif
1223		}
1224		m->m_pkthdr.rcvif = ifp;
1225		ifp->if_ipackets++;
1226		SGE_UNLOCK(sc);
1227		(*ifp->if_input)(ifp, m);
1228		SGE_LOCK(sc);
1229	}
1230
1231	if (prog > 0) {
1232		bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap,
1233		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1234		cd->sge_rx_cons = cons;
1235	}
1236}
1237
1238/*
1239 * A frame was downloaded to the chip.  It's safe for us to clean up
1240 * the list buffers.
1241 */
1242static void
1243sge_txeof(struct sge_softc *sc)
1244{
1245	struct ifnet *ifp;
1246	struct sge_list_data *ld;
1247	struct sge_chain_data *cd;
1248	uint32_t txstat;
1249	int cons, prod;
1250
1251	SGE_LOCK_ASSERT(sc);
1252
1253	ifp = sc->sge_ifp;
1254	ld = &sc->sge_ldata;
1255	cd = &sc->sge_cdata;
1256
1257	if (cd->sge_tx_cnt == 0)
1258		return;
1259	bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap,
1260	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1261	cons = cd->sge_tx_cons;
1262	prod = cd->sge_tx_prod;
1263	for (; cons != prod; SGE_INC(cons, SGE_TX_RING_CNT)) {
1264		txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts);
1265		if ((txstat & TDC_OWN) != 0)
1266			break;
1267		cd->sge_tx_cnt--;
1268		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1269		if (cd->sge_tx_mbuf[cons] != NULL) {
1270			bus_dmamap_sync(cd->sge_txmbuf_tag,
1271			    cd->sge_tx_map[cons], BUS_DMASYNC_POSTWRITE);
1272			bus_dmamap_unload(cd->sge_txmbuf_tag,
1273			    cd->sge_tx_map[cons]);
1274			m_freem(cd->sge_tx_mbuf[cons]);
1275			cd->sge_tx_mbuf[cons] = NULL;
1276			if (SGE_TX_ERROR(txstat) != 0) {
1277#ifdef SGE_SHOW_ERRORS
1278				device_printf(sc->sge_dev, "Tx error : 0x%b\n",
1279				    txstat, TX_ERR_BITS);
1280#endif
1281				ifp->if_oerrors++;
1282			} else {
1283#ifdef notyet
1284				ifp->if_collisions += (txstat & 0xFFFF) - 1;
1285#endif
1286				ifp->if_opackets++;
1287			}
1288		}
1289
1290	}
1291	cd->sge_tx_cons = cons;
1292	if (cd->sge_tx_cnt == 0)
1293		sc->sge_timer = 0;
1294}
1295
1296static void
1297sge_tick(void *arg)
1298{
1299	struct sge_softc *sc;
1300	struct mii_data *mii;
1301	struct ifnet *ifp;
1302
1303	sc = arg;
1304	SGE_LOCK_ASSERT(sc);
1305
1306	ifp = sc->sge_ifp;
1307	mii = device_get_softc(sc->sge_miibus);
1308	mii_tick(mii);
1309	if ((sc->sge_flags & SGE_FLAG_LINK) == 0) {
1310		sge_miibus_statchg(sc->sge_dev);
1311		if ((sc->sge_flags & SGE_FLAG_LINK) != 0 &&
1312		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1313			sge_start_locked(ifp);
1314	}
1315	/*
1316	 * Reclaim transmitted frames here as we do not request
1317	 * Tx completion interrupt for every queued frames to
1318	 * reduce excessive interrupts.
1319	 */
1320	sge_txeof(sc);
1321	sge_watchdog(sc);
1322	callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc);
1323}
1324
1325static void
1326sge_intr(void *arg)
1327{
1328	struct sge_softc *sc;
1329	struct ifnet *ifp;
1330	uint32_t status;
1331
1332	sc = arg;
1333	SGE_LOCK(sc);
1334	ifp = sc->sge_ifp;
1335
1336	status = CSR_READ_4(sc, IntrStatus);
1337	if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) {
1338		/* Not ours. */
1339		SGE_UNLOCK(sc);
1340		return;
1341	}
1342	/* Acknowledge interrupts. */
1343	CSR_WRITE_4(sc, IntrStatus, status);
1344	/* Disable further interrupts. */
1345	CSR_WRITE_4(sc, IntrMask, 0);
1346	/*
1347	 * It seems the controller supports some kind of interrupt
1348	 * moderation mechanism but we still don't know how to
1349	 * enable that.  To reduce number of generated interrupts
1350	 * under load we check pending interrupts in a loop.  This
1351	 * will increase number of register access and is not correct
1352	 * way to handle interrupt moderation but there seems to be
1353	 * no other way at this time.
1354	 */
1355	for (;;) {
1356		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1357			break;
1358		if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) {
1359			sge_rxeof(sc);
1360			/* Wakeup Rx MAC. */
1361			if ((status & INTR_RX_IDLE) != 0)
1362				CSR_WRITE_4(sc, RX_CTL,
1363				    0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
1364		}
1365		if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0)
1366			sge_txeof(sc);
1367		status = CSR_READ_4(sc, IntrStatus);
1368		if ((status & SGE_INTRS) == 0)
1369			break;
1370		/* Acknowledge interrupts. */
1371		CSR_WRITE_4(sc, IntrStatus, status);
1372	}
1373	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1374		/* Re-enable interrupts */
1375		CSR_WRITE_4(sc, IntrMask, SGE_INTRS);
1376		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1377			sge_start_locked(ifp);
1378	}
1379	SGE_UNLOCK(sc);
1380}
1381
1382/*
1383 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1384 * pointers to the fragment pointers.
1385 */
1386static int
1387sge_encap(struct sge_softc *sc, struct mbuf **m_head)
1388{
1389	struct mbuf *m;
1390	struct sge_desc *desc;
1391	bus_dma_segment_t txsegs[SGE_MAXTXSEGS];
1392	bus_dmamap_t map;
1393	uint32_t cflags;
1394	int error, nsegs, prod;
1395
1396	SGE_LOCK_ASSERT(sc);
1397
1398	prod = sc->sge_cdata.sge_tx_prod;
1399	map = sc->sge_cdata.sge_tx_map[prod];
1400	/*
1401	 * Reading Windows inf file indicates SiS controller supports
1402	 * TSO, VLAN hardware tag insertion/stripping, interrupt
1403	 * moderation and Tx/Rx checksum offloading.  Unfortunately
1404	 * vendor didn't release these information so we're guessing
1405	 * descriptor usage with trial and errors.
1406	 *
1407	 * Controller seems to support multi-fragmented buffers but
1408	 * don't know how to enable that feature so limit number of
1409	 * fragmented Tx buffers to single buffer until we understand
1410	 * the controller internals.
1411	 * I assume the controller can pad zero bytes if frame length
1412	 * is less than 60 bytes and I also think the controller has
1413	 * no Tx buffer alignment limitation. - Need testing!
1414	 */
1415	if ((*m_head)->m_next != NULL) {
1416		m = m_defrag(*m_head, M_DONTWAIT);
1417		if (m == NULL) {
1418			m_freem(*m_head);
1419			*m_head = NULL;
1420			return (ENOBUFS);
1421		}
1422		*m_head = m;
1423	}
1424	error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_tx_tag, map,
1425	    *m_head, txsegs, &nsegs, 0);
1426	if (error != 0) {
1427		m_freem(*m_head);
1428		*m_head = NULL;
1429		return (error);
1430	}
1431	/* Check descriptor overrun. */
1432	if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) {
1433		bus_dmamap_unload(sc->sge_cdata.sge_tx_tag, map);
1434		return (ENOBUFS);
1435	}
1436	bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, map, BUS_DMASYNC_PREWRITE);
1437
1438	cflags = 0;
1439	if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
1440		cflags |= TDC_IP_CSUM;
1441	if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
1442		cflags |= TDC_TCP_CSUM;
1443	if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
1444		cflags |= TDC_UDP_CSUM;
1445	desc = &sc->sge_ldata.sge_tx_ring[prod];
1446	desc->sge_sts_size = htole32((*m_head)->m_pkthdr.len);
1447	desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[0].ds_addr));
1448	desc->sge_flags = htole32(txsegs[0].ds_len);
1449	if (prod == SGE_TX_RING_CNT - 1)
1450		desc->sge_flags |= htole32(RING_END);
1451	/* Configure VLAN. */
1452	if(((*m_head)->m_flags & M_VLANTAG) != 0) {
1453		cflags |= (*m_head)->m_pkthdr.ether_vtag;
1454		desc->sge_sts_size |= htole32(TDS_INS_VLAN);
1455	}
1456	desc->sge_cmdsts = htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags);
1457#if 1
1458	if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0)
1459		desc->sge_cmdsts |= htole32(TDC_BST);
1460#else
1461	if ((sc->sge_flags & SGE_FLAG_FDX) == 0) {
1462		desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF);
1463		if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0)
1464			desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST);
1465	}
1466#endif
1467	/* Request interrupt and give ownership to controller. */
1468	if ((prod % SGE_TX_INTR_FRAMES) == 0)
1469		desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR);
1470	else
1471		desc->sge_cmdsts |= htole32(TDC_OWN);
1472	sc->sge_cdata.sge_tx_mbuf[prod] = *m_head;
1473	sc->sge_cdata.sge_tx_cnt++;
1474	SGE_INC(sc->sge_cdata.sge_tx_prod, SGE_TX_RING_CNT);
1475	return (0);
1476}
1477
1478static void
1479sge_start(struct ifnet *ifp)
1480{
1481	struct sge_softc *sc;
1482
1483	sc = ifp->if_softc;
1484	SGE_LOCK(sc);
1485	sge_start_locked(ifp);
1486	SGE_UNLOCK(sc);
1487}
1488
1489static void
1490sge_start_locked(struct ifnet *ifp)
1491{
1492	struct sge_softc *sc;
1493	struct mbuf *m_head;
1494	int queued = 0;
1495
1496	sc = ifp->if_softc;
1497	SGE_LOCK_ASSERT(sc);
1498
1499	if ((sc->sge_flags & SGE_FLAG_LINK) == 0 ||
1500	    (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1501	    IFF_DRV_RUNNING)
1502		return;
1503
1504	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1505		if (sc->sge_cdata.sge_tx_cnt == SGE_TX_RING_CNT - 1) {
1506			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1507			break;
1508		}
1509		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1510		if (m_head == NULL)
1511			break;
1512		if (sge_encap(sc, &m_head)) {
1513			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1514			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1515			break;
1516		}
1517		queued++;
1518		/*
1519		 * If there's a BPF listener, bounce a copy of this frame
1520		 * to him.
1521		 */
1522		BPF_MTAP(ifp, m_head);
1523	}
1524
1525	if (queued > 0) {
1526		bus_dmamap_sync(sc->sge_cdata.sge_tx_tag,
1527		    sc->sge_cdata.sge_tx_dmamap,
1528		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1529		CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL);
1530		sc->sge_timer = 5;
1531	}
1532}
1533
1534static void
1535sge_init(void *arg)
1536{
1537	struct sge_softc *sc;
1538
1539	sc = arg;
1540	SGE_LOCK(sc);
1541	sge_init_locked(sc);
1542	SGE_UNLOCK(sc);
1543}
1544
1545static void
1546sge_init_locked(struct sge_softc *sc)
1547{
1548	struct ifnet *ifp;
1549	struct mii_data *mii;
1550	uint16_t rxfilt;
1551	int i;
1552
1553	SGE_LOCK_ASSERT(sc);
1554	ifp = sc->sge_ifp;
1555	mii = device_get_softc(sc->sge_miibus);
1556	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1557		return;
1558	/*
1559	 * Cancel pending I/O and free all RX/TX buffers.
1560	 */
1561	sge_stop(sc);
1562	sge_reset(sc);
1563
1564	/* Init circular RX list. */
1565	if (sge_list_rx_init(sc) == ENOBUFS) {
1566		device_printf(sc->sge_dev, "no memory for Rx buffers\n");
1567		sge_stop(sc);
1568		return;
1569	}
1570	/* Init TX descriptors. */
1571	sge_list_tx_init(sc);
1572	/*
1573	 * Load the address of the RX and TX lists.
1574	 */
1575	CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr));
1576	CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr));
1577
1578	CSR_WRITE_4(sc, TxMacControl, 0x60);
1579	CSR_WRITE_4(sc, 0x6c, 0);
1580	CSR_WRITE_4(sc, RxWakeOnLan, 0);
1581	CSR_WRITE_4(sc, RxWakeOnLanData, 0);
1582	/* Allow receiving VLAN frames. */
1583	if ((sc->sge_flags & SGE_FLAG_SIS190) == 0)
1584		CSR_WRITE_2(sc, RxMPSControl,
1585		    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + SGE_RX_PAD_BYTES);
1586	else
1587		CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1588
1589	for (i = 0; i < ETHER_ADDR_LEN; i++)
1590		CSR_WRITE_1(sc, RxMacAddr + i, IF_LLADDR(ifp)[i]);
1591	/* Configure RX MAC. */
1592	rxfilt = 0;
1593	if ((sc->sge_flags & SGE_FLAG_SIS190) == 0)
1594		rxfilt |= RXMAC_STRIP_FCS | RXMAC_PAD_ENB;
1595	CSR_WRITE_2(sc, RxMacControl, rxfilt);
1596	sge_rxfilter(sc);
1597	sge_setvlan(sc);
1598
1599	/* Initialize default speed/duplex information. */
1600	if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0)
1601		sc->sge_flags |= SGE_FLAG_SPEED_1000;
1602	sc->sge_flags |= SGE_FLAG_FDX;
1603	if ((sc->sge_flags & SGE_FLAG_RGMII) != 0)
1604		CSR_WRITE_4(sc, StationControl, 0x04008001);
1605	else
1606		CSR_WRITE_4(sc, StationControl, 0x04000001);
1607	/*
1608	 * XXX Try to mitigate interrupts.
1609	 */
1610	CSR_WRITE_4(sc, IntrControl, 0x08880000);
1611#ifdef notyet
1612	if (sc->sge_intrcontrol != 0)
1613		CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol);
1614	if (sc->sge_intrtimer != 0)
1615		CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer);
1616#endif
1617
1618	/*
1619	 * Clear and enable interrupts.
1620	 */
1621	CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF);
1622	CSR_WRITE_4(sc, IntrMask, SGE_INTRS);
1623
1624	/* Enable receiver and transmitter. */
1625	CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB);
1626	CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
1627
1628	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1629	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1630
1631	sc->sge_flags &= ~SGE_FLAG_LINK;
1632	mii_mediachg(mii);
1633	callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc);
1634}
1635
1636/*
1637 * Set media options.
1638 */
1639static int
1640sge_ifmedia_upd(struct ifnet *ifp)
1641{
1642	struct sge_softc *sc;
1643	struct mii_data *mii;
1644	int error;
1645
1646	sc = ifp->if_softc;
1647	SGE_LOCK(sc);
1648	mii = device_get_softc(sc->sge_miibus);
1649	if (mii->mii_instance) {
1650		struct mii_softc *miisc;
1651		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1652			mii_phy_reset(miisc);
1653	}
1654	error = mii_mediachg(mii);
1655	SGE_UNLOCK(sc);
1656
1657	return (error);
1658}
1659
1660/*
1661 * Report current media status.
1662 */
1663static void
1664sge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1665{
1666	struct sge_softc *sc;
1667	struct mii_data *mii;
1668
1669	sc = ifp->if_softc;
1670	SGE_LOCK(sc);
1671	mii = device_get_softc(sc->sge_miibus);
1672	if ((ifp->if_flags & IFF_UP) == 0) {
1673		SGE_UNLOCK(sc);
1674		return;
1675	}
1676	mii_pollstat(mii);
1677	SGE_UNLOCK(sc);
1678	ifmr->ifm_active = mii->mii_media_active;
1679	ifmr->ifm_status = mii->mii_media_status;
1680}
1681
1682static int
1683sge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1684{
1685	struct sge_softc *sc;
1686	struct ifreq *ifr;
1687	struct mii_data *mii;
1688	int error = 0, mask, reinit;
1689
1690	sc = ifp->if_softc;
1691	ifr = (struct ifreq *)data;
1692
1693	switch(command) {
1694	case SIOCSIFFLAGS:
1695		SGE_LOCK(sc);
1696		if ((ifp->if_flags & IFF_UP) != 0) {
1697			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1698			    ((ifp->if_flags ^ sc->sge_if_flags) &
1699			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1700				sge_rxfilter(sc);
1701			else
1702				sge_init_locked(sc);
1703		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1704			sge_stop(sc);
1705		sc->sge_if_flags = ifp->if_flags;
1706		SGE_UNLOCK(sc);
1707		break;
1708	case SIOCSIFCAP:
1709		SGE_LOCK(sc);
1710		reinit = 0;
1711		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1712		if ((mask & IFCAP_TXCSUM) != 0 &&
1713		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1714			ifp->if_capenable ^= IFCAP_TXCSUM;
1715			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1716				ifp->if_hwassist |= SGE_CSUM_FEATURES;
1717			else
1718				ifp->if_hwassist &= ~SGE_CSUM_FEATURES;
1719		}
1720		if ((mask & IFCAP_RXCSUM) != 0 &&
1721		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
1722			ifp->if_capenable ^= IFCAP_RXCSUM;
1723		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1724		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1725			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1726		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1727		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1728			/*
1729			 * Due to unknown reason, toggling VLAN hardware
1730			 * tagging require interface reinitialization.
1731			 */
1732			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1733			reinit = 1;
1734		}
1735		if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1736			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1737			sge_init_locked(sc);
1738		}
1739		SGE_UNLOCK(sc);
1740		VLAN_CAPABILITIES(ifp);
1741		break;
1742	case SIOCADDMULTI:
1743	case SIOCDELMULTI:
1744		SGE_LOCK(sc);
1745		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1746			sge_rxfilter(sc);
1747		SGE_UNLOCK(sc);
1748		break;
1749	case SIOCGIFMEDIA:
1750	case SIOCSIFMEDIA:
1751		mii = device_get_softc(sc->sge_miibus);
1752		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1753		break;
1754	default:
1755		error = ether_ioctl(ifp, command, data);
1756		break;
1757	}
1758
1759	return (error);
1760}
1761
1762static void
1763sge_watchdog(struct sge_softc *sc)
1764{
1765	struct ifnet *ifp;
1766
1767	SGE_LOCK_ASSERT(sc);
1768	if (sc->sge_timer == 0 || --sc->sge_timer > 0)
1769		return;
1770
1771	ifp = sc->sge_ifp;
1772	if ((sc->sge_flags & SGE_FLAG_LINK) == 0) {
1773		if (1 || bootverbose)
1774			device_printf(sc->sge_dev,
1775			    "watchdog timeout (lost link)\n");
1776		ifp->if_oerrors++;
1777		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1778		sge_init_locked(sc);
1779		return;
1780	}
1781	device_printf(sc->sge_dev, "watchdog timeout\n");
1782	ifp->if_oerrors++;
1783
1784	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1785	sge_init_locked(sc);
1786	if (!IFQ_DRV_IS_EMPTY(&sc->sge_ifp->if_snd))
1787		sge_start_locked(ifp);
1788}
1789
1790/*
1791 * Stop the adapter and free any mbufs allocated to the
1792 * RX and TX lists.
1793 */
1794static void
1795sge_stop(struct sge_softc *sc)
1796{
1797	struct ifnet *ifp;
1798
1799	ifp = sc->sge_ifp;
1800
1801	SGE_LOCK_ASSERT(sc);
1802
1803	sc->sge_timer = 0;
1804	callout_stop(&sc->sge_stat_ch);
1805	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1806
1807	CSR_WRITE_4(sc, IntrMask, 0);
1808	CSR_READ_4(sc, IntrMask);
1809	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
1810	/* Stop TX/RX MAC. */
1811	CSR_WRITE_4(sc, TX_CTL, 0x1a00);
1812	CSR_WRITE_4(sc, RX_CTL, 0x1a00);
1813	/* XXX Can we assume active DMA cycles gone? */
1814	DELAY(2000);
1815	CSR_WRITE_4(sc, IntrMask, 0);
1816	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
1817
1818	sc->sge_flags &= ~SGE_FLAG_LINK;
1819	sge_list_rx_free(sc);
1820	sge_list_tx_free(sc);
1821}
1822