if_sf.c revision 175531
1/*-
2 * Copyright (c) 1997, 1998, 1999
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/sf/if_sf.c 175531 2008-01-21 09:51:28Z yongari $");
35
36/*
37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD.
38 * Programming manual is available from:
39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf.
40 *
41 * Written by Bill Paul <wpaul@ctr.columbia.edu>
42 * Department of Electical Engineering
43 * Columbia University, New York City
44 */
45/*
46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet
47 * controller designed with flexibility and reducing CPU load in mind.
48 * The Starfire offers high and low priority buffer queues, a
49 * producer/consumer index mechanism and several different buffer
50 * queue and completion queue descriptor types. Any one of a number
51 * of different driver designs can be used, depending on system and
52 * OS requirements. This driver makes use of type2 transmit frame
53 * descriptors to take full advantage of fragmented packets buffers
54 * and two RX buffer queues prioritized on size (one queue for small
55 * frames that will fit into a single mbuf, another with full size
56 * mbuf clusters for everything else). The producer/consumer indexes
57 * and completion queues are also used.
58 *
59 * One downside to the Starfire has to do with alignment: buffer
60 * queues must be aligned on 256-byte boundaries, and receive buffers
61 * must be aligned on longword boundaries. The receive buffer alignment
62 * causes problems on the strict alignment architecture, where the
63 * packet payload should be longword aligned. There is no simple way
64 * around this.
65 *
66 * For receive filtering, the Starfire offers 16 perfect filter slots
67 * and a 512-bit hash table.
68 *
69 * The Starfire has no internal transceiver, relying instead on an
70 * external MII-based transceiver. Accessing registers on external
71 * PHYs is done through a special register map rather than with the
72 * usual bitbang MDIO method.
73 *
74 * Acesssing the registers on the Starfire is a little tricky. The
75 * Starfire has a 512K internal register space. When programmed for
76 * PCI memory mapped mode, the entire register space can be accessed
77 * directly. However in I/O space mode, only 256 bytes are directly
78 * mapped into PCI I/O space. The other registers can be accessed
79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA
80 * registers inside the 256-byte I/O window.
81 */
82
83#ifdef HAVE_KERNEL_OPTION_HEADERS
84#include "opt_device_polling.h"
85#endif
86
87#include <sys/param.h>
88#include <sys/systm.h>
89#include <sys/bus.h>
90#include <sys/endian.h>
91#include <sys/kernel.h>
92#include <sys/malloc.h>
93#include <sys/mbuf.h>
94#include <sys/rman.h>
95#include <sys/module.h>
96#include <sys/socket.h>
97#include <sys/sockio.h>
98#include <sys/sysctl.h>
99#include <sys/taskqueue.h>
100
101#include <net/bpf.h>
102#include <net/if.h>
103#include <net/if_arp.h>
104#include <net/ethernet.h>
105#include <net/if_dl.h>
106#include <net/if_media.h>
107#include <net/if_types.h>
108#include <net/if_vlan_var.h>
109
110#include <dev/mii/mii.h>
111#include <dev/mii/miivar.h>
112
113#include <dev/pci/pcireg.h>
114#include <dev/pci/pcivar.h>
115
116#include <machine/bus.h>
117
118#include <dev/sf/if_sfreg.h>
119#include <dev/sf/starfire_rx.h>
120#include <dev/sf/starfire_tx.h>
121
122/* "device miibus" required.  See GENERIC if you get errors here. */
123#include "miibus_if.h"
124
125MODULE_DEPEND(sf, pci, 1, 1, 1);
126MODULE_DEPEND(sf, ether, 1, 1, 1);
127MODULE_DEPEND(sf, miibus, 1, 1, 1);
128
129#undef	SF_GFP_DEBUG
130#define	SF_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
131/* Define this to activate partial TCP/UDP checksum offload. */
132#undef	SF_PARTIAL_CSUM_SUPPORT
133
134static struct sf_type sf_devs[] = {
135	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
136	    AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" },
137	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
138	    AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" },
139	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
140	    AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" },
141	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
142	    AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" },
143	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
144	    AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" },
145	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
146	    AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" },
147	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
148	    AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" },
149};
150
151static int sf_probe(device_t);
152static int sf_attach(device_t);
153static int sf_detach(device_t);
154static int sf_shutdown(device_t);
155static int sf_suspend(device_t);
156static int sf_resume(device_t);
157static void sf_intr(void *);
158static void sf_tick(void *);
159static void sf_stats_update(struct sf_softc *);
160#ifndef __NO_STRICT_ALIGNMENT
161static __inline void sf_fixup_rx(struct mbuf *);
162#endif
163static void sf_rxeof(struct sf_softc *);
164static void sf_txeof(struct sf_softc *);
165static int sf_encap(struct sf_softc *, struct mbuf **);
166static void sf_start(struct ifnet *);
167static void sf_start_locked(struct ifnet *);
168static int sf_ioctl(struct ifnet *, u_long, caddr_t);
169static void sf_download_fw(struct sf_softc *);
170static void sf_init(void *);
171static void sf_init_locked(struct sf_softc *);
172static void sf_stop(struct sf_softc *);
173static void sf_watchdog(struct sf_softc *);
174static int sf_ifmedia_upd(struct ifnet *);
175static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *);
176static void sf_reset(struct sf_softc *);
177static int sf_dma_alloc(struct sf_softc *);
178static void sf_dma_free(struct sf_softc *);
179static int sf_init_rx_ring(struct sf_softc *);
180static void sf_init_tx_ring(struct sf_softc *);
181static int sf_newbuf(struct sf_softc *, int);
182static void sf_rxfilter(struct sf_softc *);
183static int sf_setperf(struct sf_softc *, int, uint8_t *);
184static int sf_sethash(struct sf_softc *, caddr_t, int);
185#ifdef notdef
186static int sf_setvlan(struct sf_softc *, int, uint32_t);
187#endif
188
189static uint8_t sf_read_eeprom(struct sf_softc *, int);
190
191static int sf_miibus_readreg(device_t, int, int);
192static int sf_miibus_writereg(device_t, int, int, int);
193static void sf_miibus_statchg(device_t);
194static void sf_link_task(void *, int);
195#ifdef DEVICE_POLLING
196static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
197#endif
198
199static uint32_t csr_read_4(struct sf_softc *, int);
200static void csr_write_4(struct sf_softc *, int, uint32_t);
201static void sf_txthresh_adjust(struct sf_softc *);
202static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS);
203static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
204static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS);
205
206static device_method_t sf_methods[] = {
207	/* Device interface */
208	DEVMETHOD(device_probe,		sf_probe),
209	DEVMETHOD(device_attach,	sf_attach),
210	DEVMETHOD(device_detach,	sf_detach),
211	DEVMETHOD(device_shutdown,	sf_shutdown),
212	DEVMETHOD(device_suspend,	sf_suspend),
213	DEVMETHOD(device_resume,	sf_resume),
214
215	/* bus interface */
216	DEVMETHOD(bus_print_child,	bus_generic_print_child),
217	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
218
219	/* MII interface */
220	DEVMETHOD(miibus_readreg,	sf_miibus_readreg),
221	DEVMETHOD(miibus_writereg,	sf_miibus_writereg),
222	DEVMETHOD(miibus_statchg,	sf_miibus_statchg),
223
224	{ NULL, NULL }
225};
226
227static driver_t sf_driver = {
228	"sf",
229	sf_methods,
230	sizeof(struct sf_softc),
231};
232
233static devclass_t sf_devclass;
234
235DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0);
236DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0);
237
238#define SF_SETBIT(sc, reg, x)	\
239	csr_write_4(sc, reg, csr_read_4(sc, reg) | (x))
240
241#define SF_CLRBIT(sc, reg, x)				\
242	csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x))
243
244static uint32_t
245csr_read_4(struct sf_softc *sc, int reg)
246{
247	uint32_t		val;
248
249	if (sc->sf_restype == SYS_RES_MEMORY)
250		val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE));
251	else {
252		CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
253		val = CSR_READ_4(sc, SF_INDIRECTIO_DATA);
254	}
255
256	return (val);
257}
258
259static uint8_t
260sf_read_eeprom(struct sf_softc *sc, int reg)
261{
262	uint8_t		val;
263
264	val = (csr_read_4(sc, SF_EEADDR_BASE +
265	    (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF;
266
267	return (val);
268}
269
270static void
271csr_write_4(struct sf_softc *sc, int reg, uint32_t val)
272{
273
274	if (sc->sf_restype == SYS_RES_MEMORY)
275		CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val);
276	else {
277		CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
278		CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val);
279	}
280}
281
282/*
283 * Copy the address 'mac' into the perfect RX filter entry at
284 * offset 'idx.' The perfect filter only has 16 entries so do
285 * some sanity tests.
286 */
287static int
288sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac)
289{
290
291	if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT)
292		return (EINVAL);
293
294	if (mac == NULL)
295		return (EINVAL);
296
297	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
298	    (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8));
299	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
300	    (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8));
301	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
302	    (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8));
303
304	return (0);
305}
306
307/*
308 * Set the bit in the 512-bit hash table that corresponds to the
309 * specified mac address 'mac.' If 'prio' is nonzero, update the
310 * priority hash table instead of the filter hash table.
311 */
312static int
313sf_sethash(struct sf_softc *sc, caddr_t	mac, int prio)
314{
315	uint32_t		h;
316
317	if (mac == NULL)
318		return (EINVAL);
319
320	h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23;
321
322	if (prio) {
323		SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF +
324		    (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
325	} else {
326		SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF +
327		    (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
328	}
329
330	return (0);
331}
332
333#ifdef notdef
334/*
335 * Set a VLAN tag in the receive filter.
336 */
337static int
338sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan)
339{
340
341	if (idx < 0 || idx >> SF_RXFILT_HASH_CNT)
342		return (EINVAL);
343
344	csr_write_4(sc, SF_RXFILT_HASH_BASE +
345	    (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan);
346
347	return (0);
348}
349#endif
350
351static int
352sf_miibus_readreg(device_t dev, int phy, int reg)
353{
354	struct sf_softc		*sc;
355	int			i;
356	uint32_t		val = 0;
357
358	sc = device_get_softc(dev);
359
360	for (i = 0; i < SF_TIMEOUT; i++) {
361		val = csr_read_4(sc, SF_PHY_REG(phy, reg));
362		if ((val & SF_MII_DATAVALID) != 0)
363			break;
364	}
365
366	if (i == SF_TIMEOUT)
367		return (0);
368
369	val &= SF_MII_DATAPORT;
370	if (val == 0xffff)
371		return (0);
372
373	return (val);
374}
375
376static int
377sf_miibus_writereg(device_t dev, int phy, int reg, int val)
378{
379	struct sf_softc		*sc;
380	int			i;
381	int			busy;
382
383	sc = device_get_softc(dev);
384
385	csr_write_4(sc, SF_PHY_REG(phy, reg), val);
386
387	for (i = 0; i < SF_TIMEOUT; i++) {
388		busy = csr_read_4(sc, SF_PHY_REG(phy, reg));
389		if ((busy & SF_MII_BUSY) == 0)
390			break;
391	}
392
393	return (0);
394}
395
396static void
397sf_miibus_statchg(device_t dev)
398{
399	struct sf_softc		*sc;
400
401	sc = device_get_softc(dev);
402	taskqueue_enqueue(taskqueue_swi, &sc->sf_link_task);
403}
404
405static void
406sf_link_task(void *arg, int pending)
407{
408	struct sf_softc		*sc;
409	struct mii_data		*mii;
410	struct ifnet		*ifp;
411	uint32_t		val;
412
413	sc = (struct sf_softc *)arg;
414
415	SF_LOCK(sc);
416
417	mii = device_get_softc(sc->sf_miibus);
418	ifp = sc->sf_ifp;
419	if (mii == NULL || ifp == NULL ||
420	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
421		SF_UNLOCK(sc);
422		return;
423	}
424
425	if (mii->mii_media_status & IFM_ACTIVE) {
426		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
427			sc->sf_link = 1;
428	} else
429		sc->sf_link = 0;
430
431	val = csr_read_4(sc, SF_MACCFG_1);
432	val &= ~SF_MACCFG1_FULLDUPLEX;
433	val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB);
434	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
435		val |= SF_MACCFG1_FULLDUPLEX;
436		csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX);
437#ifdef notyet
438		/* Configure flow-control bits. */
439		if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
440		    IFM_ETH_RXPAUSE) != 0)
441			val |= SF_MACCFG1_RX_FLOWENB;
442		if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
443		    IFM_ETH_TXPAUSE) != 0)
444			val |= SF_MACCFG1_TX_FLOWENB;
445#endif
446	} else
447		csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX);
448
449	/* Make sure to reset MAC to take changes effect. */
450	csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET);
451	DELAY(1000);
452	csr_write_4(sc, SF_MACCFG_1, val);
453
454	val = csr_read_4(sc, SF_TIMER_CTL);
455	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
456		val |= SF_TIMER_TIMES_TEN;
457	else
458		val &= ~SF_TIMER_TIMES_TEN;
459	csr_write_4(sc, SF_TIMER_CTL, val);
460
461	SF_UNLOCK(sc);
462}
463
464static void
465sf_rxfilter(struct sf_softc *sc)
466{
467	struct ifnet		*ifp;
468	int			i;
469	struct ifmultiaddr	*ifma;
470	uint8_t			dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
471	uint32_t		rxfilt;
472
473	ifp = sc->sf_ifp;
474
475	/* First zot all the existing filters. */
476	for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++)
477		sf_setperf(sc, i, dummy);
478	for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1);
479	    i += sizeof(uint32_t))
480		csr_write_4(sc, i, 0);
481
482	rxfilt = csr_read_4(sc, SF_RXFILT);
483	rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD);
484	if ((ifp->if_flags & IFF_BROADCAST) != 0)
485		rxfilt |= SF_RXFILT_BROAD;
486	if ((ifp->if_flags & IFF_ALLMULTI) != 0 ||
487	    (ifp->if_flags & IFF_PROMISC) != 0) {
488		if ((ifp->if_flags & IFF_PROMISC) != 0)
489			rxfilt |= SF_RXFILT_PROMISC;
490		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
491			rxfilt |= SF_RXFILT_ALLMULTI;
492		goto done;
493	}
494
495	/* Now program new ones. */
496	i = 1;
497	IF_ADDR_LOCK(ifp);
498	TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
499	    ifma_link) {
500		if (ifma->ifma_addr->sa_family != AF_LINK)
501			continue;
502		/*
503		 * Program the first 15 multicast groups
504		 * into the perfect filter. For all others,
505		 * use the hash table.
506		 */
507		if (i < SF_RXFILT_PERFECT_CNT) {
508			sf_setperf(sc, i,
509			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
510			i++;
511			continue;
512		}
513
514		sf_sethash(sc,
515		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0);
516	}
517	IF_ADDR_UNLOCK(ifp);
518
519done:
520	csr_write_4(sc, SF_RXFILT, rxfilt);
521}
522
523/*
524 * Set media options.
525 */
526static int
527sf_ifmedia_upd(struct ifnet *ifp)
528{
529	struct sf_softc		*sc;
530	struct mii_data		*mii;
531	int			error;
532
533	sc = ifp->if_softc;
534	SF_LOCK(sc);
535
536	mii = device_get_softc(sc->sf_miibus);
537	if (mii->mii_instance) {
538		struct mii_softc        *miisc;
539		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
540			mii_phy_reset(miisc);
541	}
542	error = mii_mediachg(mii);
543	SF_UNLOCK(sc);
544
545	return (error);
546}
547
548/*
549 * Report current media status.
550 */
551static void
552sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
553{
554	struct sf_softc		*sc;
555	struct mii_data		*mii;
556
557	sc = ifp->if_softc;
558	SF_LOCK(sc);
559	mii = device_get_softc(sc->sf_miibus);
560
561	mii_pollstat(mii);
562	ifmr->ifm_active = mii->mii_media_active;
563	ifmr->ifm_status = mii->mii_media_status;
564	SF_UNLOCK(sc);
565}
566
567static int
568sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
569{
570	struct sf_softc		*sc;
571	struct ifreq		*ifr;
572	struct mii_data		*mii;
573	int			error, mask;
574
575	sc = ifp->if_softc;
576	ifr = (struct ifreq *)data;
577	error = 0;
578
579	switch (command) {
580	case SIOCSIFFLAGS:
581		SF_LOCK(sc);
582		if (ifp->if_flags & IFF_UP) {
583			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
584				if ((ifp->if_flags ^ sc->sf_if_flags) &
585				    (IFF_PROMISC | IFF_ALLMULTI))
586					sf_rxfilter(sc);
587			} else {
588				if (sc->sf_detach == 0)
589					sf_init_locked(sc);
590			}
591		} else {
592			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
593				sf_stop(sc);
594		}
595		sc->sf_if_flags = ifp->if_flags;
596		SF_UNLOCK(sc);
597		break;
598	case SIOCADDMULTI:
599	case SIOCDELMULTI:
600		SF_LOCK(sc);
601		sf_rxfilter(sc);
602		SF_UNLOCK(sc);
603		break;
604	case SIOCGIFMEDIA:
605	case SIOCSIFMEDIA:
606		mii = device_get_softc(sc->sf_miibus);
607		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
608		break;
609	case SIOCSIFCAP:
610		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
611#ifdef DEVICE_POLLING
612		if ((mask & IFCAP_POLLING) != 0) {
613			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
614				error = ether_poll_register(sf_poll, ifp);
615				if (error != 0)
616					break;
617				SF_LOCK(sc);
618				/* Disable interrupts. */
619				csr_write_4(sc, SF_IMR, 0);
620				ifp->if_capenable |= IFCAP_POLLING;
621				SF_UNLOCK(sc);
622			} else {
623				error = ether_poll_deregister(ifp);
624				/* Enable interrupts. */
625				SF_LOCK(sc);
626				csr_write_4(sc, SF_IMR, SF_INTRS);
627				ifp->if_capenable &= ~IFCAP_POLLING;
628				SF_UNLOCK(sc);
629			}
630		}
631#endif /* DEVICE_POLLING */
632		if ((mask & IFCAP_TXCSUM) != 0) {
633			if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
634				SF_LOCK(sc);
635				ifp->if_capenable ^= IFCAP_TXCSUM;
636				if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) {
637					ifp->if_hwassist |= SF_CSUM_FEATURES;
638					SF_SETBIT(sc, SF_GEN_ETH_CTL,
639					    SF_ETHCTL_TXGFP_ENB);
640				} else {
641					ifp->if_hwassist &= ~SF_CSUM_FEATURES;
642					SF_CLRBIT(sc, SF_GEN_ETH_CTL,
643					    SF_ETHCTL_TXGFP_ENB);
644				}
645				SF_UNLOCK(sc);
646			}
647		}
648		if ((mask & IFCAP_RXCSUM) != 0) {
649			if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
650				SF_LOCK(sc);
651				ifp->if_capenable ^= IFCAP_RXCSUM;
652				if ((IFCAP_RXCSUM & ifp->if_capenable) != 0)
653					SF_SETBIT(sc, SF_GEN_ETH_CTL,
654					    SF_ETHCTL_RXGFP_ENB);
655				else
656					SF_CLRBIT(sc, SF_GEN_ETH_CTL,
657					    SF_ETHCTL_RXGFP_ENB);
658				SF_UNLOCK(sc);
659			}
660		}
661		break;
662	default:
663		error = ether_ioctl(ifp, command, data);
664		break;
665	}
666
667	return (error);
668}
669
670static void
671sf_reset(struct sf_softc *sc)
672{
673	int		i;
674
675	csr_write_4(sc, SF_GEN_ETH_CTL, 0);
676	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
677	DELAY(1000);
678	SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
679
680	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET);
681
682	for (i = 0; i < SF_TIMEOUT; i++) {
683		DELAY(10);
684		if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET))
685			break;
686	}
687
688	if (i == SF_TIMEOUT)
689		device_printf(sc->sf_dev, "reset never completed!\n");
690
691	/* Wait a little while for the chip to get its brains in order. */
692	DELAY(1000);
693}
694
695/*
696 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device
697 * IDs against our list and return a device name if we find a match.
698 * We also check the subsystem ID so that we can identify exactly which
699 * NIC has been found, if possible.
700 */
701static int
702sf_probe(device_t dev)
703{
704	struct sf_type		*t;
705	uint16_t		vid;
706	uint16_t		did;
707	uint16_t		sdid;
708	int			i;
709
710	vid = pci_get_vendor(dev);
711	did = pci_get_device(dev);
712	sdid = pci_get_subdevice(dev);
713
714	t = sf_devs;
715	for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) {
716		if (vid == t->sf_vid && did == t->sf_did) {
717			if (sdid == t->sf_sdid) {
718				device_set_desc(dev, t->sf_sname);
719				return (BUS_PROBE_DEFAULT);
720			}
721		}
722	}
723
724	if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) {
725		/* unkown subdevice */
726		device_set_desc(dev, sf_devs[0].sf_name);
727		return (BUS_PROBE_DEFAULT);
728	}
729
730	return (ENXIO);
731}
732
733/*
734 * Attach the interface. Allocate softc structures, do ifmedia
735 * setup and ethernet/BPF attach.
736 */
737static int
738sf_attach(device_t dev)
739{
740	int			i;
741	struct sf_softc		*sc;
742	struct ifnet		*ifp;
743	uint32_t		reg;
744	int			rid, error = 0;
745	uint8_t			eaddr[ETHER_ADDR_LEN];
746
747	sc = device_get_softc(dev);
748	sc->sf_dev = dev;
749
750	mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
751	    MTX_DEF);
752	callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0);
753	TASK_INIT(&sc->sf_link_task, 0, sf_link_task, sc);
754
755	/*
756	 * Map control/status registers.
757	 */
758	pci_enable_busmaster(dev);
759
760	/*
761	 * Prefer memory space register mapping over I/O space as the
762	 * hardware requires lots of register access to get various
763	 * producer/consumer index during Tx/Rx operation. However this
764	 * requires large memory space(512K) to map the entire register
765	 * space.
766	 */
767	sc->sf_rid = PCIR_BAR(0);
768	sc->sf_restype = SYS_RES_MEMORY;
769	sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid,
770	    RF_ACTIVE);
771	if (sc->sf_res == NULL) {
772		reg = pci_read_config(dev, PCIR_BAR(0), 4);
773		if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64)
774			sc->sf_rid = PCIR_BAR(2);
775		else
776			sc->sf_rid = PCIR_BAR(1);
777		sc->sf_restype = SYS_RES_IOPORT;
778		sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype,
779		    &sc->sf_rid, RF_ACTIVE);
780		if (sc->sf_res == NULL) {
781			device_printf(dev, "couldn't allocate resources\n");
782			mtx_destroy(&sc->sf_mtx);
783			return (ENXIO);
784		}
785	}
786	if (bootverbose)
787		device_printf(dev, "using %s space register mapping\n",
788		    sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O");
789
790	reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
791	if (reg == 0) {
792		/*
793		 * If cache line size is 0, MWI is not used at all, so set
794		 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32
795		 * and 64.
796		 */
797		reg = 16;
798		device_printf(dev, "setting PCI cache line size to %u\n", reg);
799		pci_write_config(dev, PCIR_CACHELNSZ, reg, 1);
800	} else {
801		if (bootverbose)
802			device_printf(dev, "PCI cache line size : %u\n", reg);
803	}
804	/* Enable MWI. */
805	reg = pci_read_config(dev, PCIR_COMMAND, 2);
806	reg |= PCIM_CMD_MWRICEN;
807	pci_write_config(dev, PCIR_COMMAND, reg, 2);
808
809	/* Allocate interrupt. */
810	rid = 0;
811	sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
812	    RF_SHAREABLE | RF_ACTIVE);
813
814	if (sc->sf_irq == NULL) {
815		device_printf(dev, "couldn't map interrupt\n");
816		error = ENXIO;
817		goto fail;
818	}
819
820	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
821	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
822	    OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
823	    sf_sysctl_stats, "I", "Statistics");
824
825	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
826		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
827		OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW,
828		&sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I",
829		"sf interrupt moderation");
830	/* Pull in device tunables. */
831	sc->sf_int_mod = SF_IM_DEFAULT;
832	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
833	    "int_mod", &sc->sf_int_mod);
834	if (error == 0) {
835		if (sc->sf_int_mod < SF_IM_MIN ||
836		    sc->sf_int_mod > SF_IM_MAX) {
837			device_printf(dev, "int_mod value out of range; "
838			    "using default: %d\n", SF_IM_DEFAULT);
839			sc->sf_int_mod = SF_IM_DEFAULT;
840		}
841	}
842
843	/* Reset the adapter. */
844	sf_reset(sc);
845
846	/*
847	 * Get station address from the EEPROM.
848	 */
849	for (i = 0; i < ETHER_ADDR_LEN; i++)
850		eaddr[i] =
851		    sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i);
852
853	/* Allocate DMA resources. */
854	if (sf_dma_alloc(sc) != 0) {
855		error = ENOSPC;
856		goto fail;
857	}
858
859	sc->sf_txthresh = SF_MIN_TX_THRESHOLD;
860
861	ifp = sc->sf_ifp = if_alloc(IFT_ETHER);
862	if (ifp == NULL) {
863		device_printf(dev, "can not allocate ifnet structure\n");
864		error = ENOSPC;
865		goto fail;
866	}
867
868	/* Do MII setup. */
869	if (mii_phy_probe(dev, &sc->sf_miibus, sf_ifmedia_upd,
870	    sf_ifmedia_sts)) {
871		device_printf(dev, "MII without any phy!\n");
872		error = ENXIO;
873		goto fail;
874	}
875
876	ifp->if_softc = sc;
877	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
878	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
879	ifp->if_ioctl = sf_ioctl;
880	ifp->if_start = sf_start;
881	ifp->if_init = sf_init;
882	IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
883	ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
884	IFQ_SET_READY(&ifp->if_snd);
885	/*
886	 * With the help of firmware, AIC-6915 supports
887	 * Tx/Rx TCP/UDP checksum offload.
888	 */
889	ifp->if_hwassist = SF_CSUM_FEATURES;
890	ifp->if_capabilities = IFCAP_HWCSUM;
891
892	/*
893	 * Call MI attach routine.
894	 */
895	ether_ifattach(ifp, eaddr);
896
897	/* VLAN capability setup. */
898	ifp->if_capabilities |= IFCAP_VLAN_MTU;
899	ifp->if_capenable = ifp->if_capabilities;
900#ifdef DEVICE_POLLING
901	ifp->if_capabilities |= IFCAP_POLLING;
902#endif
903	/*
904	 * Tell the upper layer(s) we support long frames.
905	 * Must appear after the call to ether_ifattach() because
906	 * ether_ifattach() sets ifi_hdrlen to the default value.
907	 */
908	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
909
910	/* Hook interrupt last to avoid having to lock softc */
911	error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE,
912	    NULL, sf_intr, sc, &sc->sf_intrhand);
913
914	if (error) {
915		device_printf(dev, "couldn't set up irq\n");
916		ether_ifdetach(ifp);
917		goto fail;
918	}
919
920fail:
921	if (error)
922		sf_detach(dev);
923
924	return (error);
925}
926
927/*
928 * Shutdown hardware and free up resources. This can be called any
929 * time after the mutex has been initialized. It is called in both
930 * the error case in attach and the normal detach case so it needs
931 * to be careful about only freeing resources that have actually been
932 * allocated.
933 */
934static int
935sf_detach(device_t dev)
936{
937	struct sf_softc		*sc;
938	struct ifnet		*ifp;
939
940	sc = device_get_softc(dev);
941	ifp = sc->sf_ifp;
942
943#ifdef DEVICE_POLLING
944	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
945		ether_poll_deregister(ifp);
946#endif
947
948	/* These should only be active if attach succeeded */
949	if (device_is_attached(dev)) {
950		SF_LOCK(sc);
951		sc->sf_detach = 1;
952		sf_stop(sc);
953		SF_UNLOCK(sc);
954		callout_drain(&sc->sf_co);
955		taskqueue_drain(taskqueue_swi, &sc->sf_link_task);
956		if (ifp != NULL)
957			ether_ifdetach(ifp);
958	}
959	if (sc->sf_miibus) {
960		device_delete_child(dev, sc->sf_miibus);
961		sc->sf_miibus = NULL;
962	}
963	bus_generic_detach(dev);
964
965	if (sc->sf_intrhand != NULL)
966		bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand);
967	if (sc->sf_irq != NULL)
968		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq);
969	if (sc->sf_res != NULL)
970		bus_release_resource(dev, sc->sf_restype, sc->sf_rid,
971		    sc->sf_res);
972
973	sf_dma_free(sc);
974	if (ifp != NULL)
975		if_free(ifp);
976
977	mtx_destroy(&sc->sf_mtx);
978
979	return (0);
980}
981
982struct sf_dmamap_arg {
983	bus_addr_t		sf_busaddr;
984};
985
986static void
987sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
988{
989	struct sf_dmamap_arg	*ctx;
990
991	if (error != 0)
992		return;
993	ctx = arg;
994	ctx->sf_busaddr = segs[0].ds_addr;
995}
996
997static int
998sf_dma_alloc(struct sf_softc *sc)
999{
1000	struct sf_dmamap_arg	ctx;
1001	struct sf_txdesc	*txd;
1002	struct sf_rxdesc	*rxd;
1003	bus_addr_t		lowaddr;
1004	bus_addr_t		rx_ring_end, rx_cring_end;
1005	bus_addr_t		tx_ring_end, tx_cring_end;
1006	int			error, i;
1007
1008	lowaddr = BUS_SPACE_MAXADDR;
1009
1010again:
1011	/* Create parent DMA tag. */
1012	error = bus_dma_tag_create(
1013	    bus_get_dma_tag(sc->sf_dev),	/* parent */
1014	    1, 0,			/* alignment, boundary */
1015	    lowaddr,			/* lowaddr */
1016	    BUS_SPACE_MAXADDR,		/* highaddr */
1017	    NULL, NULL,			/* filter, filterarg */
1018	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1019	    0,				/* nsegments */
1020	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1021	    0,				/* flags */
1022	    NULL, NULL,			/* lockfunc, lockarg */
1023	    &sc->sf_cdata.sf_parent_tag);
1024	if (error != 0) {
1025		device_printf(sc->sf_dev, "failed to create parent DMA tag\n");
1026		goto fail;
1027	}
1028	/* Create tag for Tx ring. */
1029	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1030	    SF_RING_ALIGN, 0, 		/* alignment, boundary */
1031	    BUS_SPACE_MAXADDR,		/* lowaddr */
1032	    BUS_SPACE_MAXADDR,		/* highaddr */
1033	    NULL, NULL,			/* filter, filterarg */
1034	    SF_TX_DLIST_SIZE,		/* maxsize */
1035	    1,				/* nsegments */
1036	    SF_TX_DLIST_SIZE,		/* maxsegsize */
1037	    0,				/* flags */
1038	    NULL, NULL,			/* lockfunc, lockarg */
1039	    &sc->sf_cdata.sf_tx_ring_tag);
1040	if (error != 0) {
1041		device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n");
1042		goto fail;
1043	}
1044
1045	/* Create tag for Tx completion ring. */
1046	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1047	    SF_RING_ALIGN, 0, 		/* alignment, boundary */
1048	    BUS_SPACE_MAXADDR,		/* lowaddr */
1049	    BUS_SPACE_MAXADDR,		/* highaddr */
1050	    NULL, NULL,			/* filter, filterarg */
1051	    SF_TX_CLIST_SIZE,		/* maxsize */
1052	    1,				/* nsegments */
1053	    SF_TX_CLIST_SIZE,		/* maxsegsize */
1054	    0,				/* flags */
1055	    NULL, NULL,			/* lockfunc, lockarg */
1056	    &sc->sf_cdata.sf_tx_cring_tag);
1057	if (error != 0) {
1058		device_printf(sc->sf_dev,
1059		    "failed to create Tx completion ring DMA tag\n");
1060		goto fail;
1061	}
1062
1063	/* Create tag for Rx ring. */
1064	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1065	    SF_RING_ALIGN, 0,		/* alignment, boundary */
1066	    BUS_SPACE_MAXADDR,		/* lowaddr */
1067	    BUS_SPACE_MAXADDR,		/* highaddr */
1068	    NULL, NULL,			/* filter, filterarg */
1069	    SF_RX_DLIST_SIZE,		/* maxsize */
1070	    1,				/* nsegments */
1071	    SF_RX_DLIST_SIZE,		/* maxsegsize */
1072	    0,				/* flags */
1073	    NULL, NULL,			/* lockfunc, lockarg */
1074	    &sc->sf_cdata.sf_rx_ring_tag);
1075	if (error != 0) {
1076		device_printf(sc->sf_dev,
1077		    "failed to create Rx ring DMA tag\n");
1078		goto fail;
1079	}
1080
1081	/* Create tag for Rx completion ring. */
1082	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1083	    SF_RING_ALIGN, 0,		/* alignment, boundary */
1084	    BUS_SPACE_MAXADDR,		/* lowaddr */
1085	    BUS_SPACE_MAXADDR,		/* highaddr */
1086	    NULL, NULL,			/* filter, filterarg */
1087	    SF_RX_CLIST_SIZE,		/* maxsize */
1088	    1,				/* nsegments */
1089	    SF_RX_CLIST_SIZE,		/* maxsegsize */
1090	    0,				/* flags */
1091	    NULL, NULL,			/* lockfunc, lockarg */
1092	    &sc->sf_cdata.sf_rx_cring_tag);
1093	if (error != 0) {
1094		device_printf(sc->sf_dev,
1095		    "failed to create Rx completion ring DMA tag\n");
1096		goto fail;
1097	}
1098
1099	/* Create tag for Tx buffers. */
1100	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1101	    1, 0,			/* alignment, boundary */
1102	    BUS_SPACE_MAXADDR,		/* lowaddr */
1103	    BUS_SPACE_MAXADDR,		/* highaddr */
1104	    NULL, NULL,			/* filter, filterarg */
1105	    MCLBYTES * SF_MAXTXSEGS,	/* maxsize */
1106	    SF_MAXTXSEGS,		/* nsegments */
1107	    MCLBYTES,			/* maxsegsize */
1108	    0,				/* flags */
1109	    NULL, NULL,			/* lockfunc, lockarg */
1110	    &sc->sf_cdata.sf_tx_tag);
1111	if (error != 0) {
1112		device_printf(sc->sf_dev, "failed to create Tx DMA tag\n");
1113		goto fail;
1114	}
1115
1116	/* Create tag for Rx buffers. */
1117	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1118	    SF_RX_ALIGN, 0,		/* alignment, boundary */
1119	    BUS_SPACE_MAXADDR,		/* lowaddr */
1120	    BUS_SPACE_MAXADDR,		/* highaddr */
1121	    NULL, NULL,			/* filter, filterarg */
1122	    MCLBYTES,			/* maxsize */
1123	    1,				/* nsegments */
1124	    MCLBYTES,			/* maxsegsize */
1125	    0,				/* flags */
1126	    NULL, NULL,			/* lockfunc, lockarg */
1127	    &sc->sf_cdata.sf_rx_tag);
1128	if (error != 0) {
1129		device_printf(sc->sf_dev, "failed to create Rx DMA tag\n");
1130		goto fail;
1131	}
1132
1133	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1134	error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag,
1135	    (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK |
1136	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map);
1137	if (error != 0) {
1138		device_printf(sc->sf_dev,
1139		    "failed to allocate DMA'able memory for Tx ring\n");
1140		goto fail;
1141	}
1142
1143	ctx.sf_busaddr = 0;
1144	error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag,
1145	    sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring,
1146	    SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1147	if (error != 0 || ctx.sf_busaddr == 0) {
1148		device_printf(sc->sf_dev,
1149		    "failed to load DMA'able memory for Tx ring\n");
1150		goto fail;
1151	}
1152	sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr;
1153
1154	/*
1155	 * Allocate DMA'able memory and load the DMA map for Tx completion ring.
1156	 */
1157	error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag,
1158	    (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK |
1159	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map);
1160	if (error != 0) {
1161		device_printf(sc->sf_dev,
1162		    "failed to allocate DMA'able memory for "
1163		    "Tx completion ring\n");
1164		goto fail;
1165	}
1166
1167	ctx.sf_busaddr = 0;
1168	error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag,
1169	    sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring,
1170	    SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1171	if (error != 0 || ctx.sf_busaddr == 0) {
1172		device_printf(sc->sf_dev,
1173		    "failed to load DMA'able memory for Tx completion ring\n");
1174		goto fail;
1175	}
1176	sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr;
1177
1178	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1179	error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag,
1180	    (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK |
1181	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map);
1182	if (error != 0) {
1183		device_printf(sc->sf_dev,
1184		    "failed to allocate DMA'able memory for Rx ring\n");
1185		goto fail;
1186	}
1187
1188	ctx.sf_busaddr = 0;
1189	error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag,
1190	    sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring,
1191	    SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1192	if (error != 0 || ctx.sf_busaddr == 0) {
1193		device_printf(sc->sf_dev,
1194		    "failed to load DMA'able memory for Rx ring\n");
1195		goto fail;
1196	}
1197	sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr;
1198
1199	/*
1200	 * Allocate DMA'able memory and load the DMA map for Rx completion ring.
1201	 */
1202	error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag,
1203	    (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK |
1204	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map);
1205	if (error != 0) {
1206		device_printf(sc->sf_dev,
1207		    "failed to allocate DMA'able memory for "
1208		    "Rx completion ring\n");
1209		goto fail;
1210	}
1211
1212	ctx.sf_busaddr = 0;
1213	error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag,
1214	    sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring,
1215	    SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1216	if (error != 0 || ctx.sf_busaddr == 0) {
1217		device_printf(sc->sf_dev,
1218		    "failed to load DMA'able memory for Rx completion ring\n");
1219		goto fail;
1220	}
1221	sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr;
1222
1223	/*
1224	 * Tx desciptor ring and Tx completion ring should be addressed in
1225	 * the same 4GB space. The same rule applys to Rx ring and Rx
1226	 * completion ring. Unfortunately there is no way to specify this
1227	 * boundary restriction with bus_dma(9). So just try to allocate
1228	 * without the restriction and check the restriction was satisfied.
1229	 * If not, fall back to 32bit dma addressing mode which always
1230	 * guarantees the restriction.
1231	 */
1232	tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE;
1233	tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE;
1234	rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE;
1235	rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE;
1236	if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) !=
1237	    SF_ADDR_HI(tx_cring_end)) ||
1238	    (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) !=
1239	    SF_ADDR_HI(tx_ring_end)) ||
1240	    (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) !=
1241	    SF_ADDR_HI(rx_cring_end)) ||
1242	    (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) !=
1243	    SF_ADDR_HI(rx_ring_end))) {
1244		device_printf(sc->sf_dev,
1245		    "switching to 32bit DMA mode\n");
1246		sf_dma_free(sc);
1247		/* Limit DMA address space to 32bit and try again. */
1248		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1249		goto again;
1250	}
1251
1252	/* Create DMA maps for Tx buffers. */
1253	for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1254		txd = &sc->sf_cdata.sf_txdesc[i];
1255		txd->tx_m = NULL;
1256		txd->ndesc = 0;
1257		txd->tx_dmamap = NULL;
1258		error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0,
1259		    &txd->tx_dmamap);
1260		if (error != 0) {
1261			device_printf(sc->sf_dev,
1262			    "failed to create Tx dmamap\n");
1263			goto fail;
1264		}
1265	}
1266	/* Create DMA maps for Rx buffers. */
1267	if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1268	    &sc->sf_cdata.sf_rx_sparemap)) != 0) {
1269		device_printf(sc->sf_dev,
1270		    "failed to create spare Rx dmamap\n");
1271		goto fail;
1272	}
1273	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1274		rxd = &sc->sf_cdata.sf_rxdesc[i];
1275		rxd->rx_m = NULL;
1276		rxd->rx_dmamap = NULL;
1277		error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1278		    &rxd->rx_dmamap);
1279		if (error != 0) {
1280			device_printf(sc->sf_dev,
1281			    "failed to create Rx dmamap\n");
1282			goto fail;
1283		}
1284	}
1285
1286fail:
1287	return (error);
1288}
1289
1290static void
1291sf_dma_free(struct sf_softc *sc)
1292{
1293	struct sf_txdesc	*txd;
1294	struct sf_rxdesc	*rxd;
1295	int			i;
1296
1297	/* Tx ring. */
1298	if (sc->sf_cdata.sf_tx_ring_tag) {
1299		if (sc->sf_cdata.sf_tx_ring_map)
1300			bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag,
1301			    sc->sf_cdata.sf_tx_ring_map);
1302		if (sc->sf_cdata.sf_tx_ring_map &&
1303		    sc->sf_rdata.sf_tx_ring)
1304			bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag,
1305			    sc->sf_rdata.sf_tx_ring,
1306			    sc->sf_cdata.sf_tx_ring_map);
1307		sc->sf_rdata.sf_tx_ring = NULL;
1308		sc->sf_cdata.sf_tx_ring_map = NULL;
1309		bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag);
1310		sc->sf_cdata.sf_tx_ring_tag = NULL;
1311	}
1312	/* Tx completion ring. */
1313	if (sc->sf_cdata.sf_tx_cring_tag) {
1314		if (sc->sf_cdata.sf_tx_cring_map)
1315			bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag,
1316			    sc->sf_cdata.sf_tx_cring_map);
1317		if (sc->sf_cdata.sf_tx_cring_map &&
1318		    sc->sf_rdata.sf_tx_cring)
1319			bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag,
1320			    sc->sf_rdata.sf_tx_cring,
1321			    sc->sf_cdata.sf_tx_cring_map);
1322		sc->sf_rdata.sf_tx_cring = NULL;
1323		sc->sf_cdata.sf_tx_cring_map = NULL;
1324		bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag);
1325		sc->sf_cdata.sf_tx_cring_tag = NULL;
1326	}
1327	/* Rx ring. */
1328	if (sc->sf_cdata.sf_rx_ring_tag) {
1329		if (sc->sf_cdata.sf_rx_ring_map)
1330			bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag,
1331			    sc->sf_cdata.sf_rx_ring_map);
1332		if (sc->sf_cdata.sf_rx_ring_map &&
1333		    sc->sf_rdata.sf_rx_ring)
1334			bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag,
1335			    sc->sf_rdata.sf_rx_ring,
1336			    sc->sf_cdata.sf_rx_ring_map);
1337		sc->sf_rdata.sf_rx_ring = NULL;
1338		sc->sf_cdata.sf_rx_ring_map = NULL;
1339		bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag);
1340		sc->sf_cdata.sf_rx_ring_tag = NULL;
1341	}
1342	/* Rx completion ring. */
1343	if (sc->sf_cdata.sf_rx_cring_tag) {
1344		if (sc->sf_cdata.sf_rx_cring_map)
1345			bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag,
1346			    sc->sf_cdata.sf_rx_cring_map);
1347		if (sc->sf_cdata.sf_rx_cring_map &&
1348		    sc->sf_rdata.sf_rx_cring)
1349			bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag,
1350			    sc->sf_rdata.sf_rx_cring,
1351			    sc->sf_cdata.sf_rx_cring_map);
1352		sc->sf_rdata.sf_rx_cring = NULL;
1353		sc->sf_cdata.sf_rx_cring_map = NULL;
1354		bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag);
1355		sc->sf_cdata.sf_rx_cring_tag = NULL;
1356	}
1357	/* Tx buffers. */
1358	if (sc->sf_cdata.sf_tx_tag) {
1359		for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1360			txd = &sc->sf_cdata.sf_txdesc[i];
1361			if (txd->tx_dmamap) {
1362				bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag,
1363				    txd->tx_dmamap);
1364				txd->tx_dmamap = NULL;
1365			}
1366		}
1367		bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag);
1368		sc->sf_cdata.sf_tx_tag = NULL;
1369	}
1370	/* Rx buffers. */
1371	if (sc->sf_cdata.sf_rx_tag) {
1372		for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1373			rxd = &sc->sf_cdata.sf_rxdesc[i];
1374			if (rxd->rx_dmamap) {
1375				bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1376				    rxd->rx_dmamap);
1377				rxd->rx_dmamap = NULL;
1378			}
1379		}
1380		if (sc->sf_cdata.sf_rx_sparemap) {
1381			bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1382			    sc->sf_cdata.sf_rx_sparemap);
1383			sc->sf_cdata.sf_rx_sparemap = 0;
1384		}
1385		bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag);
1386		sc->sf_cdata.sf_rx_tag = NULL;
1387	}
1388
1389	if (sc->sf_cdata.sf_parent_tag) {
1390		bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag);
1391		sc->sf_cdata.sf_parent_tag = NULL;
1392	}
1393}
1394
1395static int
1396sf_init_rx_ring(struct sf_softc *sc)
1397{
1398	struct sf_ring_data	*rd;
1399	int			i;
1400
1401	sc->sf_cdata.sf_rxc_cons = 0;
1402
1403	rd = &sc->sf_rdata;
1404	bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE);
1405	bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE);
1406
1407	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1408		if (sf_newbuf(sc, i) != 0)
1409			return (ENOBUFS);
1410	}
1411
1412	bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1413	    sc->sf_cdata.sf_rx_cring_map,
1414	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1415	bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1416	    sc->sf_cdata.sf_rx_ring_map,
1417	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1418
1419	return (0);
1420}
1421
1422static void
1423sf_init_tx_ring(struct sf_softc *sc)
1424{
1425	struct sf_ring_data	*rd;
1426	int			i;
1427
1428	sc->sf_cdata.sf_tx_prod = 0;
1429	sc->sf_cdata.sf_tx_cnt = 0;
1430	sc->sf_cdata.sf_txc_cons = 0;
1431
1432	rd = &sc->sf_rdata;
1433	bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE);
1434	bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE);
1435	for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1436		rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID);
1437		sc->sf_cdata.sf_txdesc[i].tx_m = NULL;
1438		sc->sf_cdata.sf_txdesc[i].ndesc = 0;
1439	}
1440	rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END);
1441
1442	bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
1443	    sc->sf_cdata.sf_tx_ring_map,
1444	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1445	bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1446	    sc->sf_cdata.sf_tx_cring_map,
1447	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1448}
1449
1450/*
1451 * Initialize an RX descriptor and attach an MBUF cluster.
1452 */
1453static int
1454sf_newbuf(struct sf_softc *sc, int idx)
1455{
1456	struct sf_rx_rdesc	*desc;
1457	struct sf_rxdesc	*rxd;
1458	struct mbuf		*m;
1459	bus_dma_segment_t	segs[1];
1460	bus_dmamap_t		map;
1461	int			nsegs;
1462
1463	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1464	if (m == NULL)
1465		return (ENOBUFS);
1466	m->m_len = m->m_pkthdr.len = MCLBYTES;
1467	m_adj(m, sizeof(uint32_t));
1468
1469	if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag,
1470	    sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1471		m_freem(m);
1472		return (ENOBUFS);
1473	}
1474	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1475
1476	rxd = &sc->sf_cdata.sf_rxdesc[idx];
1477	if (rxd->rx_m != NULL) {
1478		bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1479		    BUS_DMASYNC_POSTREAD);
1480		bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap);
1481	}
1482	map = rxd->rx_dmamap;
1483	rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap;
1484	sc->sf_cdata.sf_rx_sparemap = map;
1485	bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1486	    BUS_DMASYNC_PREREAD);
1487	rxd->rx_m = m;
1488	desc = &sc->sf_rdata.sf_rx_ring[idx];
1489	desc->sf_addr = htole64(segs[0].ds_addr);
1490
1491	return (0);
1492}
1493
1494#ifndef __NO_STRICT_ALIGNMENT
1495static __inline void
1496sf_fixup_rx(struct mbuf *m)
1497{
1498        int			i;
1499        uint16_t		*src, *dst;
1500
1501	src = mtod(m, uint16_t *);
1502	dst = src - 1;
1503
1504	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1505		*dst++ = *src++;
1506
1507	m->m_data -= ETHER_ALIGN;
1508}
1509#endif
1510
1511/*
1512 * The starfire is programmed to use 'normal' mode for packet reception,
1513 * which means we use the consumer/producer model for both the buffer
1514 * descriptor queue and the completion descriptor queue. The only problem
1515 * with this is that it involves a lot of register accesses: we have to
1516 * read the RX completion consumer and producer indexes and the RX buffer
1517 * producer index, plus the RX completion consumer and RX buffer producer
1518 * indexes have to be updated. It would have been easier if Adaptec had
1519 * put each index in a separate register, especially given that the damn
1520 * NIC has a 512K register space.
1521 *
1522 * In spite of all the lovely features that Adaptec crammed into the 6915,
1523 * it is marred by one truly stupid design flaw, which is that receive
1524 * buffer addresses must be aligned on a longword boundary. This forces
1525 * the packet payload to be unaligned, which is suboptimal on the x86 and
1526 * completely unuseable on the Alpha. Our only recourse is to copy received
1527 * packets into properly aligned buffers before handing them off.
1528 */
1529static void
1530sf_rxeof(struct sf_softc *sc)
1531{
1532	struct mbuf		*m;
1533	struct ifnet		*ifp;
1534	struct sf_rxdesc	*rxd;
1535	struct sf_rx_rcdesc	*cur_cmp;
1536	int			cons, eidx, prog;
1537	uint32_t		status, status2;
1538
1539	SF_LOCK_ASSERT(sc);
1540
1541	ifp = sc->sf_ifp;
1542
1543	bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1544	    sc->sf_cdata.sf_rx_ring_map,
1545	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1546	bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1547	    sc->sf_cdata.sf_rx_cring_map,
1548	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1549
1550	/*
1551	 * To reduce register access, directly read Receive completion
1552	 * queue entry.
1553	 */
1554	eidx = 0;
1555	prog = 0;
1556	for (cons = sc->sf_cdata.sf_rxc_cons; ; SF_INC(cons, SF_RX_CLIST_CNT)) {
1557		cur_cmp = &sc->sf_rdata.sf_rx_cring[cons];
1558		status = le32toh(cur_cmp->sf_rx_status1);
1559		if (status == 0)
1560			break;
1561#ifdef DEVICE_POLLING
1562		if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
1563			if (sc->rxcycles <= 0)
1564				break;
1565			sc->rxcycles--;
1566		}
1567#endif
1568		prog++;
1569		eidx = (status & SF_RX_CMPDESC_EIDX) >> 16;
1570		rxd = &sc->sf_cdata.sf_rxdesc[eidx];
1571		m = rxd->rx_m;
1572
1573		/*
1574		 * Note, if_ipackets and if_ierrors counters
1575		 * are handled in sf_stats_update().
1576		 */
1577		if ((status & SF_RXSTAT1_OK) == 0) {
1578			cur_cmp->sf_rx_status1 = 0;
1579			continue;
1580		}
1581
1582		if (sf_newbuf(sc, eidx) != 0) {
1583			ifp->if_iqdrops++;
1584			cur_cmp->sf_rx_status1 = 0;
1585			continue;
1586		}
1587
1588		/* AIC-6915 supports TCP/UDP checksum offload. */
1589		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1590			status2 = le32toh(cur_cmp->sf_rx_status2);
1591			/*
1592			 * Sometimes AIC-6915 generates an interrupt to
1593			 * warn RxGFP stall with bad checksum bit set
1594			 * in status word. I'm not sure what conditioan
1595			 * triggers it but recevied packet's checksum
1596			 * was correct even though AIC-6915 does not
1597			 * agree on this. This may be an indication of
1598			 * firmware bug. To fix the issue, do not rely
1599			 * on bad checksum bit in status word and let
1600			 * upper layer verify integrity of received
1601			 * frame.
1602			 * Another nice feature of AIC-6915 is hardware
1603			 * assistance of checksum calculation by
1604			 * providing partial checksum value for received
1605			 * frame. The partial checksum value can be used
1606			 * to accelerate checksum computation for
1607			 * fragmented TCP/UDP packets. Upper network
1608			 * stack already takes advantage of the partial
1609			 * checksum value in IP reassembly stage. But
1610			 * I'm not sure the correctness of the partial
1611			 * hardware checksum assistance as frequent
1612			 * RxGFP stalls are seen on non-fragmented
1613			 * frames. Due to the nature of the complexity
1614			 * of checksum computation code in firmware it's
1615			 * possible to see another bug in RxGFP so
1616			 * ignore checksum assistance for fragmented
1617			 * frames. This can be changed in future.
1618			 */
1619			if ((status2 & SF_RXSTAT2_FRAG) == 0) {
1620				if ((status2 & (SF_RXSTAT2_TCP |
1621				    SF_RXSTAT2_UDP)) != 0) {
1622					if ((status2 & SF_RXSTAT2_CSUM_OK)) {
1623						m->m_pkthdr.csum_flags =
1624						    CSUM_DATA_VALID |
1625						    CSUM_PSEUDO_HDR;
1626						m->m_pkthdr.csum_data = 0xffff;
1627					}
1628				}
1629			}
1630#ifdef SF_PARTIAL_CSUM_SUPPORT
1631			else if ((status2 & SF_RXSTAT2_FRAG) != 0) {
1632				if ((status2 & (SF_RXSTAT2_TCP |
1633				    SF_RXSTAT2_UDP)) != 0) {
1634					if ((status2 & SF_RXSTAT2_PCSUM_OK)) {
1635						m->m_pkthdr.csum_flags =
1636						    CSUM_DATA_VALID;
1637						m->m_pkthdr.csum_data =
1638						    (status &
1639						    SF_RX_CMPDESC_CSUM2);
1640					}
1641				}
1642			}
1643#endif
1644		}
1645
1646		m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN;
1647#ifndef	__NO_STRICT_ALIGNMENT
1648		sf_fixup_rx(m);
1649#endif
1650		m->m_pkthdr.rcvif = ifp;
1651
1652		SF_UNLOCK(sc);
1653		(*ifp->if_input)(ifp, m);
1654		SF_LOCK(sc);
1655
1656		/* Clear completion status. */
1657		cur_cmp->sf_rx_status1 = 0;
1658	}
1659
1660	if (prog > 0) {
1661		sc->sf_cdata.sf_rxc_cons = cons;
1662		bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1663		    sc->sf_cdata.sf_rx_ring_map,
1664		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1665		bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1666		    sc->sf_cdata.sf_rx_cring_map,
1667		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1668
1669		/* Update Rx completion Q1 consumer index. */
1670		csr_write_4(sc, SF_CQ_CONSIDX,
1671		    (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) |
1672		    (cons & SF_CQ_CONSIDX_RXQ1));
1673		/* Update Rx descriptor Q1 ptr. */
1674		csr_write_4(sc, SF_RXDQ_PTR_Q1,
1675		    (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) |
1676		    (eidx & SF_RXDQ_PRODIDX));
1677	}
1678}
1679
1680/*
1681 * Read the transmit status from the completion queue and release
1682 * mbufs. Note that the buffer descriptor index in the completion
1683 * descriptor is an offset from the start of the transmit buffer
1684 * descriptor list in bytes. This is important because the manual
1685 * gives the impression that it should match the producer/consumer
1686 * index, which is the offset in 8 byte blocks.
1687 */
1688static void
1689sf_txeof(struct sf_softc *sc)
1690{
1691	struct sf_txdesc	*txd;
1692	struct sf_tx_rcdesc	*cur_cmp;
1693	struct ifnet		*ifp;
1694	uint32_t		status;
1695	int			cons, idx, prod;
1696
1697	SF_LOCK_ASSERT(sc);
1698
1699	ifp = sc->sf_ifp;
1700
1701	bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1702	    sc->sf_cdata.sf_tx_cring_map,
1703	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1704
1705	cons = sc->sf_cdata.sf_txc_cons;
1706	prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16;
1707	if (prod == cons)
1708		return;
1709
1710	for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) {
1711		cur_cmp = &sc->sf_rdata.sf_tx_cring[cons];
1712		status = le32toh(cur_cmp->sf_tx_status1);
1713		if (status == 0)
1714			break;
1715		switch (status & SF_TX_CMPDESC_TYPE) {
1716		case SF_TXCMPTYPE_TX:
1717			/* Tx complete entry. */
1718			break;
1719		case SF_TXCMPTYPE_DMA:
1720			/* DMA complete entry. */
1721			idx = status & SF_TX_CMPDESC_IDX;
1722			idx = idx / sizeof(struct sf_tx_rdesc);
1723			/*
1724			 * We don't need to check Tx status here.
1725			 * SF_ISR_TX_LOFIFO intr would handle this.
1726			 * Note, if_opackets, if_collisions and if_oerrors
1727			 * counters are handled in sf_stats_update().
1728			 */
1729			txd = &sc->sf_cdata.sf_txdesc[idx];
1730			if (txd->tx_m != NULL) {
1731				bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
1732				    txd->tx_dmamap,
1733				    BUS_DMASYNC_POSTWRITE);
1734				bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
1735				    txd->tx_dmamap);
1736				m_freem(txd->tx_m);
1737				txd->tx_m = NULL;
1738			}
1739			sc->sf_cdata.sf_tx_cnt -= txd->ndesc;
1740			KASSERT(sc->sf_cdata.sf_tx_cnt >= 0,
1741			    ("%s: Active Tx desc counter was garbled\n",
1742			    __func__));
1743			txd->ndesc = 0;
1744			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1745			break;
1746		default:
1747			/* It should not happen. */
1748			device_printf(sc->sf_dev,
1749			    "unknown Tx completion type : 0x%08x : %d : %d\n",
1750			    status, cons, prod);
1751			break;
1752		}
1753		cur_cmp->sf_tx_status1 = 0;
1754	}
1755
1756	sc->sf_cdata.sf_txc_cons = cons;
1757	bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1758	    sc->sf_cdata.sf_tx_cring_map,
1759	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1760
1761	if (sc->sf_cdata.sf_tx_cnt == 0)
1762		sc->sf_watchdog_timer = 0;
1763
1764	/* Update Tx completion consumer index. */
1765	csr_write_4(sc, SF_CQ_CONSIDX,
1766	    (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) |
1767	    ((cons << 16) & 0xffff0000));
1768}
1769
1770static void
1771sf_txthresh_adjust(struct sf_softc *sc)
1772{
1773	uint32_t		txfctl;
1774
1775	device_printf(sc->sf_dev, "Tx underrun -- ");
1776	if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) {
1777		txfctl = csr_read_4(sc, SF_TX_FRAMCTL);
1778		/* Increase Tx threshold 256 bytes. */
1779		sc->sf_txthresh += 16;
1780		if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD)
1781			sc->sf_txthresh = SF_MAX_TX_THRESHOLD;
1782		txfctl &= ~SF_TXFRMCTL_TXTHRESH;
1783		txfctl |= sc->sf_txthresh;
1784		printf("increasing Tx threshold to %d bytes\n",
1785		    sc->sf_txthresh * SF_TX_THRESHOLD_UNIT);
1786		csr_write_4(sc, SF_TX_FRAMCTL, txfctl);
1787	} else
1788		printf("\n");
1789}
1790
1791#ifdef DEVICE_POLLING
1792static void
1793sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1794{
1795	struct sf_softc		*sc;
1796	uint32_t		status;
1797
1798	sc = ifp->if_softc;
1799	SF_LOCK(sc);
1800
1801	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1802		SF_UNLOCK(sc);
1803		return;
1804	}
1805
1806	sc->rxcycles = count;
1807	sf_rxeof(sc);
1808	sf_txeof(sc);
1809	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1810		sf_start_locked(ifp);
1811
1812	if (cmd == POLL_AND_CHECK_STATUS) {
1813		/* Reading the ISR register clears all interrrupts. */
1814		status = csr_read_4(sc, SF_ISR);
1815
1816		if ((status & SF_ISR_ABNORMALINTR) != 0) {
1817			if ((status & SF_ISR_STATSOFLOW) != 0)
1818				sf_stats_update(sc);
1819			else if ((status & SF_ISR_TX_LOFIFO) != 0)
1820				sf_txthresh_adjust(sc);
1821			else if ((status & SF_ISR_DMAERR) != 0) {
1822				device_printf(sc->sf_dev,
1823				    "DMA error, resetting\n");
1824				sf_init_locked(sc);
1825			} else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1826				sc->sf_statistics.sf_tx_gfp_stall++;
1827#ifdef	SF_GFP_DEBUG
1828				device_printf(sc->sf_dev,
1829				    "TxGFP is not responding!\n");
1830#endif
1831			} else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1832				sc->sf_statistics.sf_rx_gfp_stall++;
1833#ifdef	SF_GFP_DEBUG
1834				device_printf(sc->sf_dev,
1835				    "RxGFP is not responding!\n");
1836#endif
1837			}
1838		}
1839	}
1840
1841	SF_UNLOCK(sc);
1842}
1843#endif /* DEVICE_POLLING */
1844
1845static void
1846sf_intr(void *arg)
1847{
1848	struct sf_softc		*sc;
1849	struct ifnet		*ifp;
1850	uint32_t		status;
1851
1852	sc = (struct sf_softc *)arg;
1853	SF_LOCK(sc);
1854
1855	if (sc->sf_suspended != 0)
1856		goto done_locked;
1857
1858	/* Reading the ISR register clears all interrrupts. */
1859	status = csr_read_4(sc, SF_ISR);
1860	if (status == 0 || status == 0xffffffff ||
1861	    (status & SF_ISR_PCIINT_ASSERTED) == 0)
1862		goto done_locked;
1863
1864	ifp = sc->sf_ifp;
1865#ifdef DEVICE_POLLING
1866	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1867		goto done_locked;
1868#endif
1869	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1870		goto done_locked;
1871
1872	/* Disable interrupts. */
1873	csr_write_4(sc, SF_IMR, 0x00000000);
1874
1875	for (; (status & SF_INTRS) != 0;) {
1876		if ((status & SF_ISR_RXDQ1_DMADONE) != 0)
1877			sf_rxeof(sc);
1878
1879		if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE |
1880		    SF_ISR_TX_QUEUEDONE)) != 0)
1881			sf_txeof(sc);
1882
1883		if ((status & SF_ISR_ABNORMALINTR) != 0) {
1884			if ((status & SF_ISR_STATSOFLOW) != 0)
1885				sf_stats_update(sc);
1886			else if ((status & SF_ISR_TX_LOFIFO) != 0)
1887				sf_txthresh_adjust(sc);
1888			else if ((status & SF_ISR_DMAERR) != 0) {
1889				device_printf(sc->sf_dev,
1890				    "DMA error, resetting\n");
1891				sf_init_locked(sc);
1892				break;
1893			} else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1894				sc->sf_statistics.sf_tx_gfp_stall++;
1895#ifdef	SF_GFP_DEBUG
1896				device_printf(sc->sf_dev,
1897				    "TxGFP is not responding!\n");
1898#endif
1899			}
1900			else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1901				sc->sf_statistics.sf_rx_gfp_stall++;
1902#ifdef	SF_GFP_DEBUG
1903				device_printf(sc->sf_dev,
1904				    "RxGFP is not responding!\n");
1905#endif
1906			}
1907		}
1908		/* Reading the ISR register clears all interrrupts. */
1909		status = csr_read_4(sc, SF_ISR);
1910	}
1911
1912	/* Re-enable interrupts. */
1913	csr_write_4(sc, SF_IMR, SF_INTRS);
1914
1915	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1916		sf_start_locked(ifp);
1917done_locked:
1918	SF_UNLOCK(sc);
1919}
1920
1921static void
1922sf_download_fw(struct sf_softc *sc)
1923{
1924	uint32_t gfpinst;
1925	int i, ndx;
1926	uint8_t *p;
1927
1928	/*
1929	 * A FP instruction is composed of 48bits so we have to
1930	 * write it with two parts.
1931	 */
1932	p = txfwdata;
1933	ndx = 0;
1934	for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) {
1935		gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1936		csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst);
1937		gfpinst = p[0] << 8 | p[1];
1938		csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1939		p += SF_GFP_INST_BYTES;
1940		ndx += 2;
1941	}
1942	if (bootverbose)
1943		device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i);
1944
1945	p = rxfwdata;
1946	ndx = 0;
1947	for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) {
1948		gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1949		csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst);
1950		gfpinst = p[0] << 8 | p[1];
1951		csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1952		p += SF_GFP_INST_BYTES;
1953		ndx += 2;
1954	}
1955	if (bootverbose)
1956		device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i);
1957}
1958
1959static void
1960sf_init(void *xsc)
1961{
1962	struct sf_softc		*sc;
1963
1964	sc = (struct sf_softc *)xsc;
1965	SF_LOCK(sc);
1966	sf_init_locked(sc);
1967	SF_UNLOCK(sc);
1968}
1969
1970static void
1971sf_init_locked(struct sf_softc *sc)
1972{
1973	struct ifnet		*ifp;
1974	struct mii_data		*mii;
1975	uint8_t			eaddr[ETHER_ADDR_LEN];
1976	bus_addr_t		addr;
1977	int			i;
1978
1979	SF_LOCK_ASSERT(sc);
1980	ifp = sc->sf_ifp;
1981	mii = device_get_softc(sc->sf_miibus);
1982
1983	sf_stop(sc);
1984	/* Reset the hardware to a known state. */
1985	sf_reset(sc);
1986
1987	/* Init all the receive filter registers */
1988	for (i = SF_RXFILT_PERFECT_BASE;
1989	    i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t))
1990		csr_write_4(sc, i, 0);
1991
1992	/* Empty stats counter registers. */
1993	for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
1994		csr_write_4(sc, i, 0);
1995
1996	/* Init our MAC address. */
1997	bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr));
1998	csr_write_4(sc, SF_PAR0,
1999	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2000	csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]);
2001	sf_setperf(sc, 0, eaddr);
2002
2003	if (sf_init_rx_ring(sc) == ENOBUFS) {
2004		device_printf(sc->sf_dev,
2005		    "initialization failed: no memory for rx buffers\n");
2006		return;
2007	}
2008
2009	sf_init_tx_ring(sc);
2010
2011	/*
2012	 * 16 perfect address filtering.
2013	 * Hash only multicast destination address, Accept matching
2014	 * frames regardless of VLAN ID.
2015	 */
2016	csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN);
2017
2018	/*
2019	 * Set Rx filter.
2020	 */
2021	sf_rxfilter(sc);
2022
2023	/* Init the completion queue indexes. */
2024	csr_write_4(sc, SF_CQ_CONSIDX, 0);
2025	csr_write_4(sc, SF_CQ_PRODIDX, 0);
2026
2027	/* Init the RX completion queue. */
2028	addr = sc->sf_rdata.sf_rx_cring_paddr;
2029	csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr));
2030	csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR);
2031	if (SF_ADDR_HI(addr) != 0)
2032		SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT);
2033	/* Set RX completion queue type 2. */
2034	SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2);
2035	csr_write_4(sc, SF_RXCQ_CTL_2, 0);
2036
2037	/*
2038	 * Init RX DMA control.
2039	 * default RxHighPriority Threshold,
2040	 * default RxBurstSize, 128bytes.
2041	 */
2042	SF_SETBIT(sc, SF_RXDMA_CTL,
2043	    SF_RXDMA_REPORTBADPKTS |
2044	    (SF_RXDMA_HIGHPRIO_THRESH << 8) |
2045	    SF_RXDMA_BURST);
2046
2047	/* Init the RX buffer descriptor queue. */
2048	addr = sc->sf_rdata.sf_rx_ring_paddr;
2049	csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr));
2050	csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr));
2051
2052	/* Set RX queue buffer length. */
2053	csr_write_4(sc, SF_RXDQ_CTL_1,
2054	    ((MCLBYTES  - sizeof(uint32_t)) << 16) |
2055	    SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE);
2056
2057	if (SF_ADDR_HI(addr) != 0)
2058		SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR);
2059	csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1);
2060	csr_write_4(sc, SF_RXDQ_CTL_2, 0);
2061
2062	/* Init the TX completion queue */
2063	addr = sc->sf_rdata.sf_tx_cring_paddr;
2064	csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR);
2065	if (SF_ADDR_HI(addr) != 0)
2066		SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT);
2067
2068	/* Init the TX buffer descriptor queue. */
2069	addr = sc->sf_rdata.sf_tx_ring_paddr;
2070	csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr));
2071	csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2072	csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr));
2073	csr_write_4(sc, SF_TX_FRAMCTL,
2074	    SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh);
2075	csr_write_4(sc, SF_TXDQ_CTL,
2076	    SF_TXDMA_HIPRIO_THRESH << 24 |
2077	    SF_TXSKIPLEN_0BYTES << 16 |
2078	    SF_TXDDMA_BURST << 8 |
2079	    SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT);
2080	if (SF_ADDR_HI(addr) != 0)
2081		SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR);
2082
2083	/* Set VLAN Type register. */
2084	csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN);
2085
2086	/* Set TxPause Timer. */
2087	csr_write_4(sc, SF_TXPAUSETIMER, 0xffff);
2088
2089	/* Enable autopadding of short TX frames. */
2090	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD);
2091	SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD);
2092	/* Make sure to reset MAC to take changes effect. */
2093	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2094	DELAY(1000);
2095	SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2096
2097	/* Enable PCI bus master. */
2098	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN);
2099
2100	/* Load StarFire firmware. */
2101	sf_download_fw(sc);
2102
2103	/* Intialize interrupt moderation. */
2104	csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN |
2105	    (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL));
2106
2107#ifdef DEVICE_POLLING
2108	/* Disable interrupts if we are polling. */
2109	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2110		csr_write_4(sc, SF_IMR, 0x00000000);
2111	else
2112#endif
2113	/* Enable interrupts. */
2114	csr_write_4(sc, SF_IMR, SF_INTRS);
2115	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB);
2116
2117	/* Enable the RX and TX engines. */
2118	csr_write_4(sc, SF_GEN_ETH_CTL,
2119	    SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB |
2120	    SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB);
2121
2122	if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2123		SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2124	else
2125		SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2126	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2127		SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2128	else
2129		SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2130
2131	sc->sf_link = 0;
2132	mii_mediachg(mii);
2133
2134	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2135	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2136
2137	callout_reset(&sc->sf_co, hz, sf_tick, sc);
2138}
2139
2140static int
2141sf_encap(struct sf_softc *sc, struct mbuf **m_head)
2142{
2143	struct sf_txdesc	*txd;
2144	struct sf_tx_rdesc	*desc;
2145	struct mbuf		*m;
2146	bus_dmamap_t		map;
2147	bus_dma_segment_t	txsegs[SF_MAXTXSEGS];
2148	int			error, i, nsegs, prod, si;
2149	int			avail, nskip;
2150
2151	SF_LOCK_ASSERT(sc);
2152
2153	m = *m_head;
2154	prod = sc->sf_cdata.sf_tx_prod;
2155	txd = &sc->sf_cdata.sf_txdesc[prod];
2156	map = txd->tx_dmamap;
2157	error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map,
2158	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2159	if (error == EFBIG) {
2160		m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS);
2161		if (m == NULL) {
2162			m_freem(*m_head);
2163			*m_head = NULL;
2164			return (ENOBUFS);
2165		}
2166		*m_head = m;
2167		error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag,
2168		    map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2169		if (error != 0) {
2170			m_freem(*m_head);
2171			*m_head = NULL;
2172			return (error);
2173		}
2174	} else if (error != 0)
2175		return (error);
2176	if (nsegs == 0) {
2177		m_freem(*m_head);
2178		*m_head = NULL;
2179		return (EIO);
2180	}
2181
2182	/* Check number of available descriptors. */
2183	avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt;
2184	if (avail < nsegs) {
2185		bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2186		return (ENOBUFS);
2187	}
2188	nskip = 0;
2189	if (prod + nsegs >= SF_TX_DLIST_CNT) {
2190		nskip = SF_TX_DLIST_CNT - prod - 1;
2191		if (avail < nsegs + nskip) {
2192			bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2193			return (ENOBUFS);
2194		}
2195	}
2196
2197	bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE);
2198
2199	si = prod;
2200	for (i = 0; i < nsegs; i++) {
2201		desc = &sc->sf_rdata.sf_tx_ring[prod];
2202		desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID |
2203		    (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN));
2204		desc->sf_tx_reserved = 0;
2205		desc->sf_addr = htole64(txsegs[i].ds_addr);
2206		if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) {
2207			/* Queue wraps! */
2208			desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END);
2209			prod = 0;
2210		} else
2211			SF_INC(prod, SF_TX_DLIST_CNT);
2212	}
2213	/* Update producer index. */
2214	sc->sf_cdata.sf_tx_prod = prod;
2215	sc->sf_cdata.sf_tx_cnt += nsegs + nskip;
2216
2217	desc = &sc->sf_rdata.sf_tx_ring[si];
2218	/* Check TDP/UDP checksum offload request. */
2219	if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0)
2220		desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP);
2221	desc->sf_tx_ctrl |=
2222	    htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16));
2223
2224	txd->tx_dmamap = map;
2225	txd->tx_m = m;
2226	txd->ndesc = nsegs + nskip;
2227
2228	return (0);
2229}
2230
2231static void
2232sf_start(struct ifnet *ifp)
2233{
2234	struct sf_softc		*sc;
2235
2236	sc = ifp->if_softc;
2237	SF_LOCK(sc);
2238	sf_start_locked(ifp);
2239	SF_UNLOCK(sc);
2240}
2241
2242static void
2243sf_start_locked(struct ifnet *ifp)
2244{
2245	struct sf_softc		*sc;
2246	struct mbuf		*m_head;
2247	int			enq;
2248
2249	sc = ifp->if_softc;
2250	SF_LOCK_ASSERT(sc);
2251
2252	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2253	    IFF_DRV_RUNNING || sc->sf_link == 0)
2254		return;
2255
2256	/*
2257	 * Since we don't know when descriptor wrap occurrs in advance
2258	 * limit available number of active Tx descriptor counter to be
2259	 * higher than maximum number of DMA segments allowed in driver.
2260	 */
2261	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2262	    sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) {
2263		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2264		if (m_head == NULL)
2265			break;
2266		/*
2267		 * Pack the data into the transmit ring. If we
2268		 * don't have room, set the OACTIVE flag and wait
2269		 * for the NIC to drain the ring.
2270		 */
2271		if (sf_encap(sc, &m_head)) {
2272			if (m_head == NULL)
2273				break;
2274			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2275			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2276			break;
2277		}
2278
2279		enq++;
2280		/*
2281		 * If there's a BPF listener, bounce a copy of this frame
2282		 * to him.
2283		 */
2284		ETHER_BPF_MTAP(ifp, m_head);
2285	}
2286
2287	if (enq > 0) {
2288		bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
2289		    sc->sf_cdata.sf_tx_ring_map,
2290		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2291		/* Kick transmit. */
2292		csr_write_4(sc, SF_TXDQ_PRODIDX,
2293		    sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8));
2294
2295		/* Set a timeout in case the chip goes out to lunch. */
2296		sc->sf_watchdog_timer = 5;
2297	}
2298}
2299
2300static void
2301sf_stop(struct sf_softc *sc)
2302{
2303	struct sf_txdesc	*txd;
2304	struct sf_rxdesc	*rxd;
2305	struct ifnet		*ifp;
2306	int			i;
2307
2308	SF_LOCK_ASSERT(sc);
2309
2310	ifp = sc->sf_ifp;
2311
2312	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2313	sc->sf_link = 0;
2314	callout_stop(&sc->sf_co);
2315	sc->sf_watchdog_timer = 0;
2316
2317	/* Reading the ISR register clears all interrrupts. */
2318	csr_read_4(sc, SF_ISR);
2319	/* Disable further interrupts. */
2320	csr_write_4(sc, SF_IMR, 0);
2321
2322	/* Disable Tx/Rx egine. */
2323	csr_write_4(sc, SF_GEN_ETH_CTL, 0);
2324
2325	csr_write_4(sc, SF_CQ_CONSIDX, 0);
2326	csr_write_4(sc, SF_CQ_PRODIDX, 0);
2327	csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0);
2328	csr_write_4(sc, SF_RXDQ_CTL_1, 0);
2329	csr_write_4(sc, SF_RXDQ_PTR_Q1, 0);
2330	csr_write_4(sc, SF_TXCQ_CTL, 0);
2331	csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2332	csr_write_4(sc, SF_TXDQ_CTL, 0);
2333
2334	/*
2335	 * Free RX and TX mbufs still in the queues.
2336	 */
2337	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
2338		rxd = &sc->sf_cdata.sf_rxdesc[i];
2339		if (rxd->rx_m != NULL) {
2340			bus_dmamap_sync(sc->sf_cdata.sf_rx_tag,
2341			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2342			bus_dmamap_unload(sc->sf_cdata.sf_rx_tag,
2343			    rxd->rx_dmamap);
2344			m_freem(rxd->rx_m);
2345			rxd->rx_m = NULL;
2346		}
2347        }
2348	for (i = 0; i < SF_TX_DLIST_CNT; i++) {
2349		txd = &sc->sf_cdata.sf_txdesc[i];
2350		if (txd->tx_m != NULL) {
2351			bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
2352			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2353			bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
2354			    txd->tx_dmamap);
2355			m_freem(txd->tx_m);
2356			txd->tx_m = NULL;
2357			txd->ndesc = 0;
2358		}
2359        }
2360}
2361
2362static void
2363sf_tick(void *xsc)
2364{
2365	struct sf_softc		*sc;
2366	struct mii_data		*mii;
2367
2368	sc = xsc;
2369	SF_LOCK_ASSERT(sc);
2370	mii = device_get_softc(sc->sf_miibus);
2371	mii_tick(mii);
2372	sf_stats_update(sc);
2373	sf_watchdog(sc);
2374	callout_reset(&sc->sf_co, hz, sf_tick, sc);
2375}
2376
2377/*
2378 * Note: it is important that this function not be interrupted. We
2379 * use a two-stage register access scheme: if we are interrupted in
2380 * between setting the indirect address register and reading from the
2381 * indirect data register, the contents of the address register could
2382 * be changed out from under us.
2383 */
2384static void
2385sf_stats_update(struct sf_softc *sc)
2386{
2387	struct ifnet		*ifp;
2388	struct sf_stats		now, *stats, *nstats;
2389	int			i;
2390
2391	SF_LOCK_ASSERT(sc);
2392
2393	ifp = sc->sf_ifp;
2394	stats = &now;
2395
2396	stats->sf_tx_frames =
2397	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES);
2398	stats->sf_tx_single_colls =
2399	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL);
2400	stats->sf_tx_multi_colls =
2401	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL);
2402	stats->sf_tx_crcerrs =
2403	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS);
2404	stats->sf_tx_bytes =
2405	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES);
2406	stats->sf_tx_deferred =
2407	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED);
2408	stats->sf_tx_late_colls =
2409	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL);
2410	stats->sf_tx_pause_frames =
2411	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE);
2412	stats->sf_tx_control_frames =
2413	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME);
2414	stats->sf_tx_excess_colls =
2415	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL);
2416	stats->sf_tx_excess_defer =
2417	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF);
2418	stats->sf_tx_mcast_frames =
2419	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI);
2420	stats->sf_tx_bcast_frames =
2421	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST);
2422	stats->sf_tx_frames_lost =
2423	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST);
2424	stats->sf_rx_frames =
2425	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES);
2426	stats->sf_rx_crcerrs =
2427	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS);
2428	stats->sf_rx_alignerrs =
2429	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS);
2430	stats->sf_rx_bytes =
2431	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES);
2432	stats->sf_rx_pause_frames =
2433	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE);
2434	stats->sf_rx_control_frames =
2435	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME);
2436	stats->sf_rx_unsup_control_frames =
2437	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME);
2438	stats->sf_rx_giants =
2439	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS);
2440	stats->sf_rx_runts =
2441	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS);
2442	stats->sf_rx_jabbererrs =
2443	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER);
2444	stats->sf_rx_fragments =
2445	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS);
2446	stats->sf_rx_pkts_64 =
2447	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64);
2448	stats->sf_rx_pkts_65_127 =
2449	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127);
2450	stats->sf_rx_pkts_128_255 =
2451	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255);
2452	stats->sf_rx_pkts_256_511 =
2453	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511);
2454	stats->sf_rx_pkts_512_1023 =
2455	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023);
2456	stats->sf_rx_pkts_1024_1518 =
2457	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518);
2458	stats->sf_rx_frames_lost =
2459	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST);
2460	/* Lower 16bits are valid. */
2461	stats->sf_tx_underruns =
2462	    (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff);
2463
2464	/* Empty stats counter registers. */
2465	for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2466		csr_write_4(sc, i, 0);
2467
2468	ifp->if_opackets += (u_long)stats->sf_tx_frames;
2469
2470	ifp->if_collisions += (u_long)stats->sf_tx_single_colls +
2471	    (u_long)stats->sf_tx_multi_colls;
2472
2473	ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls +
2474	    (u_long)stats->sf_tx_excess_defer +
2475	    (u_long)stats->sf_tx_frames_lost;
2476
2477	ifp->if_ipackets += (u_long)stats->sf_rx_frames;
2478
2479	ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs +
2480	    (u_long)stats->sf_rx_alignerrs +
2481	    (u_long)stats->sf_rx_giants +
2482	    (u_long)stats->sf_rx_runts +
2483	    (u_long)stats->sf_rx_jabbererrs +
2484	    (u_long)stats->sf_rx_frames_lost;
2485
2486	nstats = &sc->sf_statistics;
2487
2488	nstats->sf_tx_frames += stats->sf_tx_frames;
2489	nstats->sf_tx_single_colls += stats->sf_tx_single_colls;
2490	nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls;
2491	nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs;
2492	nstats->sf_tx_bytes += stats->sf_tx_bytes;
2493	nstats->sf_tx_deferred += stats->sf_tx_deferred;
2494	nstats->sf_tx_late_colls += stats->sf_tx_late_colls;
2495	nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames;
2496	nstats->sf_tx_control_frames += stats->sf_tx_control_frames;
2497	nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls;
2498	nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer;
2499	nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames;
2500	nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames;
2501	nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost;
2502	nstats->sf_rx_frames += stats->sf_rx_frames;
2503	nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs;
2504	nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs;
2505	nstats->sf_rx_bytes += stats->sf_rx_bytes;
2506	nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames;
2507	nstats->sf_rx_control_frames += stats->sf_rx_control_frames;
2508	nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames;
2509	nstats->sf_rx_giants += stats->sf_rx_giants;
2510	nstats->sf_rx_runts += stats->sf_rx_runts;
2511	nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs;
2512	nstats->sf_rx_fragments += stats->sf_rx_fragments;
2513	nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64;
2514	nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127;
2515	nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255;
2516	nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511;
2517	nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023;
2518	nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518;
2519	nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost;
2520	nstats->sf_tx_underruns += stats->sf_tx_underruns;
2521}
2522
2523static void
2524sf_watchdog(struct sf_softc *sc)
2525{
2526	struct ifnet		*ifp;
2527
2528	SF_LOCK_ASSERT(sc);
2529
2530	if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer)
2531		return;
2532
2533	ifp = sc->sf_ifp;
2534
2535	ifp->if_oerrors++;
2536	if (sc->sf_link == 0) {
2537		if (bootverbose)
2538			if_printf(sc->sf_ifp, "watchdog timeout "
2539			   "(missed link)\n");
2540	} else
2541		if_printf(ifp, "watchdog timeout, %d Tx descs are active\n",
2542		    sc->sf_cdata.sf_tx_cnt);
2543
2544	sf_init_locked(sc);
2545
2546	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2547		sf_start_locked(ifp);
2548}
2549
2550static int
2551sf_shutdown(device_t dev)
2552{
2553	struct sf_softc		*sc;
2554
2555	sc = device_get_softc(dev);
2556
2557	SF_LOCK(sc);
2558	sf_stop(sc);
2559	SF_UNLOCK(sc);
2560
2561	return (0);
2562}
2563
2564static int
2565sf_suspend(device_t dev)
2566{
2567	struct sf_softc		*sc;
2568
2569	sc = device_get_softc(dev);
2570
2571	SF_LOCK(sc);
2572	sf_stop(sc);
2573	sc->sf_suspended = 1;
2574	bus_generic_suspend(dev);
2575	SF_UNLOCK(sc);
2576
2577	return (0);
2578}
2579
2580static int
2581sf_resume(device_t dev)
2582{
2583	struct sf_softc		*sc;
2584	struct ifnet		*ifp;
2585
2586	sc = device_get_softc(dev);
2587
2588	SF_LOCK(sc);
2589	bus_generic_resume(dev);
2590	ifp = sc->sf_ifp;
2591	if ((ifp->if_flags & IFF_UP) != 0)
2592		sf_init_locked(sc);
2593
2594	sc->sf_suspended = 0;
2595	SF_UNLOCK(sc);
2596
2597	return (0);
2598}
2599
2600static int
2601sf_sysctl_stats(SYSCTL_HANDLER_ARGS)
2602{
2603	struct sf_softc		*sc;
2604	struct sf_stats		*stats;
2605	int			error;
2606	int			result;
2607
2608	result = -1;
2609	error = sysctl_handle_int(oidp, &result, 0, req);
2610
2611	if (error != 0 || req->newptr == NULL)
2612		return (error);
2613
2614	if (result != 1)
2615		return (error);
2616
2617	sc = (struct sf_softc *)arg1;
2618	stats = &sc->sf_statistics;
2619
2620	printf("%s statistics:\n", device_get_nameunit(sc->sf_dev));
2621	printf("Transmit good frames : %ju\n",
2622	    (uintmax_t)stats->sf_tx_frames);
2623	printf("Transmit good octets : %ju\n",
2624	    (uintmax_t)stats->sf_tx_bytes);
2625	printf("Transmit single collisions : %u\n",
2626	    stats->sf_tx_single_colls);
2627	printf("Transmit multiple collisions : %u\n",
2628	    stats->sf_tx_multi_colls);
2629	printf("Transmit late collisions : %u\n",
2630	    stats->sf_tx_late_colls);
2631	printf("Transmit abort due to excessive collisions : %u\n",
2632	    stats->sf_tx_excess_colls);
2633	printf("Transmit CRC errors : %u\n",
2634	    stats->sf_tx_crcerrs);
2635	printf("Transmit deferrals : %u\n",
2636	    stats->sf_tx_deferred);
2637	printf("Transmit abort due to excessive deferrals : %u\n",
2638	    stats->sf_tx_excess_defer);
2639	printf("Transmit pause control frames : %u\n",
2640	    stats->sf_tx_pause_frames);
2641	printf("Transmit control frames : %u\n",
2642	    stats->sf_tx_control_frames);
2643	printf("Transmit good multicast frames : %u\n",
2644	    stats->sf_tx_mcast_frames);
2645	printf("Transmit good broadcast frames : %u\n",
2646	    stats->sf_tx_bcast_frames);
2647	printf("Transmit frames lost due to internal transmit errors : %u\n",
2648	    stats->sf_tx_frames_lost);
2649	printf("Transmit FIFO underflows : %u\n",
2650	    stats->sf_tx_underruns);
2651	printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall);
2652	printf("Receive good frames : %ju\n",
2653	    (uint64_t)stats->sf_rx_frames);
2654	printf("Receive good octets : %ju\n",
2655	    (uint64_t)stats->sf_rx_bytes);
2656	printf("Receive CRC errors : %u\n",
2657	    stats->sf_rx_crcerrs);
2658	printf("Receive alignment errors : %u\n",
2659	    stats->sf_rx_alignerrs);
2660	printf("Receive pause frames : %u\n",
2661	    stats->sf_rx_pause_frames);
2662	printf("Receive control frames : %u\n",
2663	    stats->sf_rx_control_frames);
2664	printf("Receive control frames with unsupported opcode : %u\n",
2665	    stats->sf_rx_unsup_control_frames);
2666	printf("Receive frames too long : %u\n",
2667	    stats->sf_rx_giants);
2668	printf("Receive frames too short : %u\n",
2669	    stats->sf_rx_runts);
2670	printf("Receive frames jabber errors : %u\n",
2671	    stats->sf_rx_jabbererrs);
2672	printf("Receive frames fragments : %u\n",
2673	    stats->sf_rx_fragments);
2674	printf("Receive packets 64 bytes : %ju\n",
2675	    (uint64_t)stats->sf_rx_pkts_64);
2676	printf("Receive packets 65 to 127 bytes : %ju\n",
2677	    (uint64_t)stats->sf_rx_pkts_65_127);
2678	printf("Receive packets 128 to 255 bytes : %ju\n",
2679	    (uint64_t)stats->sf_rx_pkts_128_255);
2680	printf("Receive packets 256 to 511 bytes : %ju\n",
2681	    (uint64_t)stats->sf_rx_pkts_256_511);
2682	printf("Receive packets 512 to 1023 bytes : %ju\n",
2683	    (uint64_t)stats->sf_rx_pkts_512_1023);
2684	printf("Receive packets 1024 to 1518 bytes : %ju\n",
2685	    (uint64_t)stats->sf_rx_pkts_1024_1518);
2686	printf("Receive frames lost due to internal receive errors : %u\n",
2687	    stats->sf_rx_frames_lost);
2688	printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall);
2689
2690	return (error);
2691}
2692
2693static int
2694sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2695{
2696	int error, value;
2697
2698	if (!arg1)
2699		return (EINVAL);
2700	value = *(int *)arg1;
2701	error = sysctl_handle_int(oidp, &value, 0, req);
2702	if (error || !req->newptr)
2703		return (error);
2704	if (value < low || value > high)
2705		return (EINVAL);
2706	*(int *)arg1 = value;
2707
2708	return (0);
2709}
2710
2711static int
2712sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS)
2713{
2714
2715	return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX));
2716}
2717