1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1997, 1998, 1999
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38/*
39 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD.
40 * Programming manual is available from:
41 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Department of Electical Engineering
45 * Columbia University, New York City
46 */
47/*
48 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet
49 * controller designed with flexibility and reducing CPU load in mind.
50 * The Starfire offers high and low priority buffer queues, a
51 * producer/consumer index mechanism and several different buffer
52 * queue and completion queue descriptor types. Any one of a number
53 * of different driver designs can be used, depending on system and
54 * OS requirements. This driver makes use of type2 transmit frame
55 * descriptors to take full advantage of fragmented packets buffers
56 * and two RX buffer queues prioritized on size (one queue for small
57 * frames that will fit into a single mbuf, another with full size
58 * mbuf clusters for everything else). The producer/consumer indexes
59 * and completion queues are also used.
60 *
61 * One downside to the Starfire has to do with alignment: buffer
62 * queues must be aligned on 256-byte boundaries, and receive buffers
63 * must be aligned on longword boundaries. The receive buffer alignment
64 * causes problems on the strict alignment architecture, where the
65 * packet payload should be longword aligned. There is no simple way
66 * around this.
67 *
68 * For receive filtering, the Starfire offers 16 perfect filter slots
69 * and a 512-bit hash table.
70 *
71 * The Starfire has no internal transceiver, relying instead on an
72 * external MII-based transceiver. Accessing registers on external
73 * PHYs is done through a special register map rather than with the
74 * usual bitbang MDIO method.
75 *
76 * Acesssing the registers on the Starfire is a little tricky. The
77 * Starfire has a 512K internal register space. When programmed for
78 * PCI memory mapped mode, the entire register space can be accessed
79 * directly. However in I/O space mode, only 256 bytes are directly
80 * mapped into PCI I/O space. The other registers can be accessed
81 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA
82 * registers inside the 256-byte I/O window.
83 */
84
85#ifdef HAVE_KERNEL_OPTION_HEADERS
86#include "opt_device_polling.h"
87#endif
88
89#include <sys/param.h>
90#include <sys/systm.h>
91#include <sys/bus.h>
92#include <sys/endian.h>
93#include <sys/kernel.h>
94#include <sys/malloc.h>
95#include <sys/mbuf.h>
96#include <sys/rman.h>
97#include <sys/module.h>
98#include <sys/socket.h>
99#include <sys/sockio.h>
100#include <sys/sysctl.h>
101
102#include <net/bpf.h>
103#include <net/if.h>
104#include <net/if_var.h>
105#include <net/if_arp.h>
106#include <net/ethernet.h>
107#include <net/if_dl.h>
108#include <net/if_media.h>
109#include <net/if_types.h>
110#include <net/if_vlan_var.h>
111
112#include <dev/mii/mii.h>
113#include <dev/mii/miivar.h>
114
115#include <dev/pci/pcireg.h>
116#include <dev/pci/pcivar.h>
117
118#include <machine/bus.h>
119
120#include <dev/sf/if_sfreg.h>
121#include <dev/sf/starfire_rx.h>
122#include <dev/sf/starfire_tx.h>
123
124/* "device miibus" required.  See GENERIC if you get errors here. */
125#include "miibus_if.h"
126
127MODULE_DEPEND(sf, pci, 1, 1, 1);
128MODULE_DEPEND(sf, ether, 1, 1, 1);
129MODULE_DEPEND(sf, miibus, 1, 1, 1);
130
131#undef	SF_GFP_DEBUG
132#define	SF_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
133/* Define this to activate partial TCP/UDP checksum offload. */
134#undef	SF_PARTIAL_CSUM_SUPPORT
135
136static struct sf_type sf_devs[] = {
137	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
138	    AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" },
139	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
140	    AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" },
141	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
142	    AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" },
143	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
144	    AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" },
145	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
146	    AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" },
147	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
148	    AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" },
149	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
150	    AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" },
151};
152
153static int sf_probe(device_t);
154static int sf_attach(device_t);
155static int sf_detach(device_t);
156static int sf_shutdown(device_t);
157static int sf_suspend(device_t);
158static int sf_resume(device_t);
159static void sf_intr(void *);
160static void sf_tick(void *);
161static void sf_stats_update(struct sf_softc *);
162#ifndef __NO_STRICT_ALIGNMENT
163static __inline void sf_fixup_rx(struct mbuf *);
164#endif
165static int sf_rxeof(struct sf_softc *);
166static void sf_txeof(struct sf_softc *);
167static int sf_encap(struct sf_softc *, struct mbuf **);
168static void sf_start(struct ifnet *);
169static void sf_start_locked(struct ifnet *);
170static int sf_ioctl(struct ifnet *, u_long, caddr_t);
171static void sf_download_fw(struct sf_softc *);
172static void sf_init(void *);
173static void sf_init_locked(struct sf_softc *);
174static void sf_stop(struct sf_softc *);
175static void sf_watchdog(struct sf_softc *);
176static int sf_ifmedia_upd(struct ifnet *);
177static int sf_ifmedia_upd_locked(struct ifnet *);
178static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *);
179static void sf_reset(struct sf_softc *);
180static int sf_dma_alloc(struct sf_softc *);
181static void sf_dma_free(struct sf_softc *);
182static int sf_init_rx_ring(struct sf_softc *);
183static void sf_init_tx_ring(struct sf_softc *);
184static int sf_newbuf(struct sf_softc *, int);
185static void sf_rxfilter(struct sf_softc *);
186static int sf_setperf(struct sf_softc *, int, uint8_t *);
187static int sf_sethash(struct sf_softc *, caddr_t, int);
188#ifdef notdef
189static int sf_setvlan(struct sf_softc *, int, uint32_t);
190#endif
191
192static uint8_t sf_read_eeprom(struct sf_softc *, int);
193
194static int sf_miibus_readreg(device_t, int, int);
195static int sf_miibus_writereg(device_t, int, int, int);
196static void sf_miibus_statchg(device_t);
197#ifdef DEVICE_POLLING
198static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
199#endif
200
201static uint32_t csr_read_4(struct sf_softc *, int);
202static void csr_write_4(struct sf_softc *, int, uint32_t);
203static void sf_txthresh_adjust(struct sf_softc *);
204static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS);
205static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
206static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS);
207
208static device_method_t sf_methods[] = {
209	/* Device interface */
210	DEVMETHOD(device_probe,		sf_probe),
211	DEVMETHOD(device_attach,	sf_attach),
212	DEVMETHOD(device_detach,	sf_detach),
213	DEVMETHOD(device_shutdown,	sf_shutdown),
214	DEVMETHOD(device_suspend,	sf_suspend),
215	DEVMETHOD(device_resume,	sf_resume),
216
217	/* MII interface */
218	DEVMETHOD(miibus_readreg,	sf_miibus_readreg),
219	DEVMETHOD(miibus_writereg,	sf_miibus_writereg),
220	DEVMETHOD(miibus_statchg,	sf_miibus_statchg),
221
222	DEVMETHOD_END
223};
224
225static driver_t sf_driver = {
226	"sf",
227	sf_methods,
228	sizeof(struct sf_softc),
229};
230
231static devclass_t sf_devclass;
232
233DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0);
234DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0);
235
236#define SF_SETBIT(sc, reg, x)	\
237	csr_write_4(sc, reg, csr_read_4(sc, reg) | (x))
238
239#define SF_CLRBIT(sc, reg, x)				\
240	csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x))
241
242static uint32_t
243csr_read_4(struct sf_softc *sc, int reg)
244{
245	uint32_t		val;
246
247	if (sc->sf_restype == SYS_RES_MEMORY)
248		val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE));
249	else {
250		CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
251		val = CSR_READ_4(sc, SF_INDIRECTIO_DATA);
252	}
253
254	return (val);
255}
256
257static uint8_t
258sf_read_eeprom(struct sf_softc *sc, int reg)
259{
260	uint8_t		val;
261
262	val = (csr_read_4(sc, SF_EEADDR_BASE +
263	    (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF;
264
265	return (val);
266}
267
268static void
269csr_write_4(struct sf_softc *sc, int reg, uint32_t val)
270{
271
272	if (sc->sf_restype == SYS_RES_MEMORY)
273		CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val);
274	else {
275		CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
276		CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val);
277	}
278}
279
280/*
281 * Copy the address 'mac' into the perfect RX filter entry at
282 * offset 'idx.' The perfect filter only has 16 entries so do
283 * some sanity tests.
284 */
285static int
286sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac)
287{
288
289	if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT)
290		return (EINVAL);
291
292	if (mac == NULL)
293		return (EINVAL);
294
295	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
296	    (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8));
297	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
298	    (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8));
299	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
300	    (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8));
301
302	return (0);
303}
304
305/*
306 * Set the bit in the 512-bit hash table that corresponds to the
307 * specified mac address 'mac.' If 'prio' is nonzero, update the
308 * priority hash table instead of the filter hash table.
309 */
310static int
311sf_sethash(struct sf_softc *sc, caddr_t	mac, int prio)
312{
313	uint32_t		h;
314
315	if (mac == NULL)
316		return (EINVAL);
317
318	h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23;
319
320	if (prio) {
321		SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF +
322		    (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
323	} else {
324		SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF +
325		    (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
326	}
327
328	return (0);
329}
330
331#ifdef notdef
332/*
333 * Set a VLAN tag in the receive filter.
334 */
335static int
336sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan)
337{
338
339	if (idx < 0 || idx >> SF_RXFILT_HASH_CNT)
340		return (EINVAL);
341
342	csr_write_4(sc, SF_RXFILT_HASH_BASE +
343	    (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan);
344
345	return (0);
346}
347#endif
348
349static int
350sf_miibus_readreg(device_t dev, int phy, int reg)
351{
352	struct sf_softc		*sc;
353	int			i;
354	uint32_t		val = 0;
355
356	sc = device_get_softc(dev);
357
358	for (i = 0; i < SF_TIMEOUT; i++) {
359		val = csr_read_4(sc, SF_PHY_REG(phy, reg));
360		if ((val & SF_MII_DATAVALID) != 0)
361			break;
362	}
363
364	if (i == SF_TIMEOUT)
365		return (0);
366
367	val &= SF_MII_DATAPORT;
368	if (val == 0xffff)
369		return (0);
370
371	return (val);
372}
373
374static int
375sf_miibus_writereg(device_t dev, int phy, int reg, int val)
376{
377	struct sf_softc		*sc;
378	int			i;
379	int			busy;
380
381	sc = device_get_softc(dev);
382
383	csr_write_4(sc, SF_PHY_REG(phy, reg), val);
384
385	for (i = 0; i < SF_TIMEOUT; i++) {
386		busy = csr_read_4(sc, SF_PHY_REG(phy, reg));
387		if ((busy & SF_MII_BUSY) == 0)
388			break;
389	}
390
391	return (0);
392}
393
394static void
395sf_miibus_statchg(device_t dev)
396{
397	struct sf_softc		*sc;
398	struct mii_data		*mii;
399	struct ifnet		*ifp;
400	uint32_t		val;
401
402	sc = device_get_softc(dev);
403	mii = device_get_softc(sc->sf_miibus);
404	ifp = sc->sf_ifp;
405	if (mii == NULL || ifp == NULL ||
406	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
407		return;
408
409	sc->sf_link = 0;
410	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
411	    (IFM_ACTIVE | IFM_AVALID)) {
412		switch (IFM_SUBTYPE(mii->mii_media_active)) {
413		case IFM_10_T:
414		case IFM_100_TX:
415		case IFM_100_FX:
416			sc->sf_link = 1;
417			break;
418		}
419	}
420	if (sc->sf_link == 0)
421		return;
422
423	val = csr_read_4(sc, SF_MACCFG_1);
424	val &= ~SF_MACCFG1_FULLDUPLEX;
425	val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB);
426	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
427		val |= SF_MACCFG1_FULLDUPLEX;
428		csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX);
429#ifdef notyet
430		/* Configure flow-control bits. */
431		if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
432		    IFM_ETH_RXPAUSE) != 0)
433			val |= SF_MACCFG1_RX_FLOWENB;
434		if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
435		    IFM_ETH_TXPAUSE) != 0)
436			val |= SF_MACCFG1_TX_FLOWENB;
437#endif
438	} else
439		csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX);
440
441	/* Make sure to reset MAC to take changes effect. */
442	csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET);
443	DELAY(1000);
444	csr_write_4(sc, SF_MACCFG_1, val);
445
446	val = csr_read_4(sc, SF_TIMER_CTL);
447	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
448		val |= SF_TIMER_TIMES_TEN;
449	else
450		val &= ~SF_TIMER_TIMES_TEN;
451	csr_write_4(sc, SF_TIMER_CTL, val);
452}
453
454static void
455sf_rxfilter(struct sf_softc *sc)
456{
457	struct ifnet		*ifp;
458	int			i;
459	struct ifmultiaddr	*ifma;
460	uint8_t			dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
461	uint32_t		rxfilt;
462
463	ifp = sc->sf_ifp;
464
465	/* First zot all the existing filters. */
466	for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++)
467		sf_setperf(sc, i, dummy);
468	for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1);
469	    i += sizeof(uint32_t))
470		csr_write_4(sc, i, 0);
471
472	rxfilt = csr_read_4(sc, SF_RXFILT);
473	rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD);
474	if ((ifp->if_flags & IFF_BROADCAST) != 0)
475		rxfilt |= SF_RXFILT_BROAD;
476	if ((ifp->if_flags & IFF_ALLMULTI) != 0 ||
477	    (ifp->if_flags & IFF_PROMISC) != 0) {
478		if ((ifp->if_flags & IFF_PROMISC) != 0)
479			rxfilt |= SF_RXFILT_PROMISC;
480		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
481			rxfilt |= SF_RXFILT_ALLMULTI;
482		goto done;
483	}
484
485	/* Now program new ones. */
486	i = 1;
487	/* XXX how do we maintain reverse semantics without impl */
488	if_maddr_rlock(ifp);
489	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs,
490	    ifma_link) {
491		if (ifma->ifma_addr->sa_family != AF_LINK)
492			continue;
493		/*
494		 * Program the first 15 multicast groups
495		 * into the perfect filter. For all others,
496		 * use the hash table.
497		 */
498		if (i < SF_RXFILT_PERFECT_CNT) {
499			sf_setperf(sc, i,
500			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
501			i++;
502			continue;
503		}
504
505		sf_sethash(sc,
506		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0);
507	}
508	if_maddr_runlock(ifp);
509
510done:
511	csr_write_4(sc, SF_RXFILT, rxfilt);
512}
513
514/*
515 * Set media options.
516 */
517static int
518sf_ifmedia_upd(struct ifnet *ifp)
519{
520	struct sf_softc		*sc;
521	int			error;
522
523	sc = ifp->if_softc;
524	SF_LOCK(sc);
525	error = sf_ifmedia_upd_locked(ifp);
526	SF_UNLOCK(sc);
527	return (error);
528}
529
530static int
531sf_ifmedia_upd_locked(struct ifnet *ifp)
532{
533	struct sf_softc		*sc;
534	struct mii_data		*mii;
535	struct mii_softc        *miisc;
536
537	sc = ifp->if_softc;
538	mii = device_get_softc(sc->sf_miibus);
539	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
540		PHY_RESET(miisc);
541	return (mii_mediachg(mii));
542}
543
544/*
545 * Report current media status.
546 */
547static void
548sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
549{
550	struct sf_softc		*sc;
551	struct mii_data		*mii;
552
553	sc = ifp->if_softc;
554	SF_LOCK(sc);
555	if ((ifp->if_flags & IFF_UP) == 0) {
556		SF_UNLOCK(sc);
557		return;
558	}
559
560	mii = device_get_softc(sc->sf_miibus);
561	mii_pollstat(mii);
562	ifmr->ifm_active = mii->mii_media_active;
563	ifmr->ifm_status = mii->mii_media_status;
564	SF_UNLOCK(sc);
565}
566
567static int
568sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
569{
570	struct sf_softc		*sc;
571	struct ifreq		*ifr;
572	struct mii_data		*mii;
573	int			error, mask;
574
575	sc = ifp->if_softc;
576	ifr = (struct ifreq *)data;
577	error = 0;
578
579	switch (command) {
580	case SIOCSIFFLAGS:
581		SF_LOCK(sc);
582		if (ifp->if_flags & IFF_UP) {
583			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
584				if ((ifp->if_flags ^ sc->sf_if_flags) &
585				    (IFF_PROMISC | IFF_ALLMULTI))
586					sf_rxfilter(sc);
587			} else {
588				if (sc->sf_detach == 0)
589					sf_init_locked(sc);
590			}
591		} else {
592			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
593				sf_stop(sc);
594		}
595		sc->sf_if_flags = ifp->if_flags;
596		SF_UNLOCK(sc);
597		break;
598	case SIOCADDMULTI:
599	case SIOCDELMULTI:
600		SF_LOCK(sc);
601		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
602			sf_rxfilter(sc);
603		SF_UNLOCK(sc);
604		break;
605	case SIOCGIFMEDIA:
606	case SIOCSIFMEDIA:
607		mii = device_get_softc(sc->sf_miibus);
608		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
609		break;
610	case SIOCSIFCAP:
611		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
612#ifdef DEVICE_POLLING
613		if ((mask & IFCAP_POLLING) != 0) {
614			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
615				error = ether_poll_register(sf_poll, ifp);
616				if (error != 0)
617					break;
618				SF_LOCK(sc);
619				/* Disable interrupts. */
620				csr_write_4(sc, SF_IMR, 0);
621				ifp->if_capenable |= IFCAP_POLLING;
622				SF_UNLOCK(sc);
623			} else {
624				error = ether_poll_deregister(ifp);
625				/* Enable interrupts. */
626				SF_LOCK(sc);
627				csr_write_4(sc, SF_IMR, SF_INTRS);
628				ifp->if_capenable &= ~IFCAP_POLLING;
629				SF_UNLOCK(sc);
630			}
631		}
632#endif /* DEVICE_POLLING */
633		if ((mask & IFCAP_TXCSUM) != 0) {
634			if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
635				SF_LOCK(sc);
636				ifp->if_capenable ^= IFCAP_TXCSUM;
637				if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) {
638					ifp->if_hwassist |= SF_CSUM_FEATURES;
639					SF_SETBIT(sc, SF_GEN_ETH_CTL,
640					    SF_ETHCTL_TXGFP_ENB);
641				} else {
642					ifp->if_hwassist &= ~SF_CSUM_FEATURES;
643					SF_CLRBIT(sc, SF_GEN_ETH_CTL,
644					    SF_ETHCTL_TXGFP_ENB);
645				}
646				SF_UNLOCK(sc);
647			}
648		}
649		if ((mask & IFCAP_RXCSUM) != 0) {
650			if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
651				SF_LOCK(sc);
652				ifp->if_capenable ^= IFCAP_RXCSUM;
653				if ((IFCAP_RXCSUM & ifp->if_capenable) != 0)
654					SF_SETBIT(sc, SF_GEN_ETH_CTL,
655					    SF_ETHCTL_RXGFP_ENB);
656				else
657					SF_CLRBIT(sc, SF_GEN_ETH_CTL,
658					    SF_ETHCTL_RXGFP_ENB);
659				SF_UNLOCK(sc);
660			}
661		}
662		break;
663	default:
664		error = ether_ioctl(ifp, command, data);
665		break;
666	}
667
668	return (error);
669}
670
671static void
672sf_reset(struct sf_softc *sc)
673{
674	int		i;
675
676	csr_write_4(sc, SF_GEN_ETH_CTL, 0);
677	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
678	DELAY(1000);
679	SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
680
681	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET);
682
683	for (i = 0; i < SF_TIMEOUT; i++) {
684		DELAY(10);
685		if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET))
686			break;
687	}
688
689	if (i == SF_TIMEOUT)
690		device_printf(sc->sf_dev, "reset never completed!\n");
691
692	/* Wait a little while for the chip to get its brains in order. */
693	DELAY(1000);
694}
695
696/*
697 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device
698 * IDs against our list and return a device name if we find a match.
699 * We also check the subsystem ID so that we can identify exactly which
700 * NIC has been found, if possible.
701 */
702static int
703sf_probe(device_t dev)
704{
705	struct sf_type		*t;
706	uint16_t		vid;
707	uint16_t		did;
708	uint16_t		sdid;
709	int			i;
710
711	vid = pci_get_vendor(dev);
712	did = pci_get_device(dev);
713	sdid = pci_get_subdevice(dev);
714
715	t = sf_devs;
716	for (i = 0; i < nitems(sf_devs); i++, t++) {
717		if (vid == t->sf_vid && did == t->sf_did) {
718			if (sdid == t->sf_sdid) {
719				device_set_desc(dev, t->sf_sname);
720				return (BUS_PROBE_DEFAULT);
721			}
722		}
723	}
724
725	if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) {
726		/* unknown subdevice */
727		device_set_desc(dev, sf_devs[0].sf_name);
728		return (BUS_PROBE_DEFAULT);
729	}
730
731	return (ENXIO);
732}
733
734/*
735 * Attach the interface. Allocate softc structures, do ifmedia
736 * setup and ethernet/BPF attach.
737 */
738static int
739sf_attach(device_t dev)
740{
741	int			i;
742	struct sf_softc		*sc;
743	struct ifnet		*ifp;
744	uint32_t		reg;
745	int			rid, error = 0;
746	uint8_t			eaddr[ETHER_ADDR_LEN];
747
748	sc = device_get_softc(dev);
749	sc->sf_dev = dev;
750
751	mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
752	    MTX_DEF);
753	callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0);
754
755	/*
756	 * Map control/status registers.
757	 */
758	pci_enable_busmaster(dev);
759
760	/*
761	 * Prefer memory space register mapping over I/O space as the
762	 * hardware requires lots of register access to get various
763	 * producer/consumer index during Tx/Rx operation. However this
764	 * requires large memory space(512K) to map the entire register
765	 * space.
766	 */
767	sc->sf_rid = PCIR_BAR(0);
768	sc->sf_restype = SYS_RES_MEMORY;
769	sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid,
770	    RF_ACTIVE);
771	if (sc->sf_res == NULL) {
772		reg = pci_read_config(dev, PCIR_BAR(0), 4);
773		if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64)
774			sc->sf_rid = PCIR_BAR(2);
775		else
776			sc->sf_rid = PCIR_BAR(1);
777		sc->sf_restype = SYS_RES_IOPORT;
778		sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype,
779		    &sc->sf_rid, RF_ACTIVE);
780		if (sc->sf_res == NULL) {
781			device_printf(dev, "couldn't allocate resources\n");
782			mtx_destroy(&sc->sf_mtx);
783			return (ENXIO);
784		}
785	}
786	if (bootverbose)
787		device_printf(dev, "using %s space register mapping\n",
788		    sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O");
789
790	reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
791	if (reg == 0) {
792		/*
793		 * If cache line size is 0, MWI is not used at all, so set
794		 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32
795		 * and 64.
796		 */
797		reg = 16;
798		device_printf(dev, "setting PCI cache line size to %u\n", reg);
799		pci_write_config(dev, PCIR_CACHELNSZ, reg, 1);
800	} else {
801		if (bootverbose)
802			device_printf(dev, "PCI cache line size : %u\n", reg);
803	}
804	/* Enable MWI. */
805	reg = pci_read_config(dev, PCIR_COMMAND, 2);
806	reg |= PCIM_CMD_MWRICEN;
807	pci_write_config(dev, PCIR_COMMAND, reg, 2);
808
809	/* Allocate interrupt. */
810	rid = 0;
811	sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
812	    RF_SHAREABLE | RF_ACTIVE);
813
814	if (sc->sf_irq == NULL) {
815		device_printf(dev, "couldn't map interrupt\n");
816		error = ENXIO;
817		goto fail;
818	}
819
820	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
821	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
822	    OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
823	    sf_sysctl_stats, "I", "Statistics");
824
825	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
826		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
827		OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW,
828		&sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I",
829		"sf interrupt moderation");
830	/* Pull in device tunables. */
831	sc->sf_int_mod = SF_IM_DEFAULT;
832	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
833	    "int_mod", &sc->sf_int_mod);
834	if (error == 0) {
835		if (sc->sf_int_mod < SF_IM_MIN ||
836		    sc->sf_int_mod > SF_IM_MAX) {
837			device_printf(dev, "int_mod value out of range; "
838			    "using default: %d\n", SF_IM_DEFAULT);
839			sc->sf_int_mod = SF_IM_DEFAULT;
840		}
841	}
842
843	/* Reset the adapter. */
844	sf_reset(sc);
845
846	/*
847	 * Get station address from the EEPROM.
848	 */
849	for (i = 0; i < ETHER_ADDR_LEN; i++)
850		eaddr[i] =
851		    sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i);
852
853	/* Allocate DMA resources. */
854	if (sf_dma_alloc(sc) != 0) {
855		error = ENOSPC;
856		goto fail;
857	}
858
859	sc->sf_txthresh = SF_MIN_TX_THRESHOLD;
860
861	ifp = sc->sf_ifp = if_alloc(IFT_ETHER);
862	if (ifp == NULL) {
863		device_printf(dev, "can not allocate ifnet structure\n");
864		error = ENOSPC;
865		goto fail;
866	}
867
868	/* Do MII setup. */
869	error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd,
870	    sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
871	if (error != 0) {
872		device_printf(dev, "attaching PHYs failed\n");
873		goto fail;
874	}
875
876	ifp->if_softc = sc;
877	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
878	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
879	ifp->if_ioctl = sf_ioctl;
880	ifp->if_start = sf_start;
881	ifp->if_init = sf_init;
882	IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
883	ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
884	IFQ_SET_READY(&ifp->if_snd);
885	/*
886	 * With the help of firmware, AIC-6915 supports
887	 * Tx/Rx TCP/UDP checksum offload.
888	 */
889	ifp->if_hwassist = SF_CSUM_FEATURES;
890	ifp->if_capabilities = IFCAP_HWCSUM;
891
892	/*
893	 * Call MI attach routine.
894	 */
895	ether_ifattach(ifp, eaddr);
896
897	/* VLAN capability setup. */
898	ifp->if_capabilities |= IFCAP_VLAN_MTU;
899	ifp->if_capenable = ifp->if_capabilities;
900#ifdef DEVICE_POLLING
901	ifp->if_capabilities |= IFCAP_POLLING;
902#endif
903	/*
904	 * Tell the upper layer(s) we support long frames.
905	 * Must appear after the call to ether_ifattach() because
906	 * ether_ifattach() sets ifi_hdrlen to the default value.
907	 */
908	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
909
910	/* Hook interrupt last to avoid having to lock softc */
911	error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE,
912	    NULL, sf_intr, sc, &sc->sf_intrhand);
913
914	if (error) {
915		device_printf(dev, "couldn't set up irq\n");
916		ether_ifdetach(ifp);
917		goto fail;
918	}
919
920	gone_by_fcp101_dev(dev);
921
922fail:
923	if (error)
924		sf_detach(dev);
925
926	return (error);
927}
928
929/*
930 * Shutdown hardware and free up resources. This can be called any
931 * time after the mutex has been initialized. It is called in both
932 * the error case in attach and the normal detach case so it needs
933 * to be careful about only freeing resources that have actually been
934 * allocated.
935 */
936static int
937sf_detach(device_t dev)
938{
939	struct sf_softc		*sc;
940	struct ifnet		*ifp;
941
942	sc = device_get_softc(dev);
943	ifp = sc->sf_ifp;
944
945#ifdef DEVICE_POLLING
946	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
947		ether_poll_deregister(ifp);
948#endif
949
950	/* These should only be active if attach succeeded */
951	if (device_is_attached(dev)) {
952		SF_LOCK(sc);
953		sc->sf_detach = 1;
954		sf_stop(sc);
955		SF_UNLOCK(sc);
956		callout_drain(&sc->sf_co);
957		if (ifp != NULL)
958			ether_ifdetach(ifp);
959	}
960	if (sc->sf_miibus) {
961		device_delete_child(dev, sc->sf_miibus);
962		sc->sf_miibus = NULL;
963	}
964	bus_generic_detach(dev);
965
966	if (sc->sf_intrhand != NULL)
967		bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand);
968	if (sc->sf_irq != NULL)
969		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq);
970	if (sc->sf_res != NULL)
971		bus_release_resource(dev, sc->sf_restype, sc->sf_rid,
972		    sc->sf_res);
973
974	sf_dma_free(sc);
975	if (ifp != NULL)
976		if_free(ifp);
977
978	mtx_destroy(&sc->sf_mtx);
979
980	return (0);
981}
982
983struct sf_dmamap_arg {
984	bus_addr_t		sf_busaddr;
985};
986
987static void
988sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
989{
990	struct sf_dmamap_arg	*ctx;
991
992	if (error != 0)
993		return;
994	ctx = arg;
995	ctx->sf_busaddr = segs[0].ds_addr;
996}
997
998static int
999sf_dma_alloc(struct sf_softc *sc)
1000{
1001	struct sf_dmamap_arg	ctx;
1002	struct sf_txdesc	*txd;
1003	struct sf_rxdesc	*rxd;
1004	bus_addr_t		lowaddr;
1005	bus_addr_t		rx_ring_end, rx_cring_end;
1006	bus_addr_t		tx_ring_end, tx_cring_end;
1007	int			error, i;
1008
1009	lowaddr = BUS_SPACE_MAXADDR;
1010
1011again:
1012	/* Create parent DMA tag. */
1013	error = bus_dma_tag_create(
1014	    bus_get_dma_tag(sc->sf_dev),	/* parent */
1015	    1, 0,			/* alignment, boundary */
1016	    lowaddr,			/* lowaddr */
1017	    BUS_SPACE_MAXADDR,		/* highaddr */
1018	    NULL, NULL,			/* filter, filterarg */
1019	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1020	    0,				/* nsegments */
1021	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1022	    0,				/* flags */
1023	    NULL, NULL,			/* lockfunc, lockarg */
1024	    &sc->sf_cdata.sf_parent_tag);
1025	if (error != 0) {
1026		device_printf(sc->sf_dev, "failed to create parent DMA tag\n");
1027		goto fail;
1028	}
1029	/* Create tag for Tx ring. */
1030	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1031	    SF_RING_ALIGN, 0, 		/* alignment, boundary */
1032	    BUS_SPACE_MAXADDR,		/* lowaddr */
1033	    BUS_SPACE_MAXADDR,		/* highaddr */
1034	    NULL, NULL,			/* filter, filterarg */
1035	    SF_TX_DLIST_SIZE,		/* maxsize */
1036	    1,				/* nsegments */
1037	    SF_TX_DLIST_SIZE,		/* maxsegsize */
1038	    0,				/* flags */
1039	    NULL, NULL,			/* lockfunc, lockarg */
1040	    &sc->sf_cdata.sf_tx_ring_tag);
1041	if (error != 0) {
1042		device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n");
1043		goto fail;
1044	}
1045
1046	/* Create tag for Tx completion ring. */
1047	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1048	    SF_RING_ALIGN, 0, 		/* alignment, boundary */
1049	    BUS_SPACE_MAXADDR,		/* lowaddr */
1050	    BUS_SPACE_MAXADDR,		/* highaddr */
1051	    NULL, NULL,			/* filter, filterarg */
1052	    SF_TX_CLIST_SIZE,		/* maxsize */
1053	    1,				/* nsegments */
1054	    SF_TX_CLIST_SIZE,		/* maxsegsize */
1055	    0,				/* flags */
1056	    NULL, NULL,			/* lockfunc, lockarg */
1057	    &sc->sf_cdata.sf_tx_cring_tag);
1058	if (error != 0) {
1059		device_printf(sc->sf_dev,
1060		    "failed to create Tx completion ring DMA tag\n");
1061		goto fail;
1062	}
1063
1064	/* Create tag for Rx ring. */
1065	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1066	    SF_RING_ALIGN, 0,		/* alignment, boundary */
1067	    BUS_SPACE_MAXADDR,		/* lowaddr */
1068	    BUS_SPACE_MAXADDR,		/* highaddr */
1069	    NULL, NULL,			/* filter, filterarg */
1070	    SF_RX_DLIST_SIZE,		/* maxsize */
1071	    1,				/* nsegments */
1072	    SF_RX_DLIST_SIZE,		/* maxsegsize */
1073	    0,				/* flags */
1074	    NULL, NULL,			/* lockfunc, lockarg */
1075	    &sc->sf_cdata.sf_rx_ring_tag);
1076	if (error != 0) {
1077		device_printf(sc->sf_dev,
1078		    "failed to create Rx ring DMA tag\n");
1079		goto fail;
1080	}
1081
1082	/* Create tag for Rx completion ring. */
1083	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1084	    SF_RING_ALIGN, 0,		/* alignment, boundary */
1085	    BUS_SPACE_MAXADDR,		/* lowaddr */
1086	    BUS_SPACE_MAXADDR,		/* highaddr */
1087	    NULL, NULL,			/* filter, filterarg */
1088	    SF_RX_CLIST_SIZE,		/* maxsize */
1089	    1,				/* nsegments */
1090	    SF_RX_CLIST_SIZE,		/* maxsegsize */
1091	    0,				/* flags */
1092	    NULL, NULL,			/* lockfunc, lockarg */
1093	    &sc->sf_cdata.sf_rx_cring_tag);
1094	if (error != 0) {
1095		device_printf(sc->sf_dev,
1096		    "failed to create Rx completion ring DMA tag\n");
1097		goto fail;
1098	}
1099
1100	/* Create tag for Tx buffers. */
1101	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1102	    1, 0,			/* alignment, boundary */
1103	    BUS_SPACE_MAXADDR,		/* lowaddr */
1104	    BUS_SPACE_MAXADDR,		/* highaddr */
1105	    NULL, NULL,			/* filter, filterarg */
1106	    MCLBYTES * SF_MAXTXSEGS,	/* maxsize */
1107	    SF_MAXTXSEGS,		/* nsegments */
1108	    MCLBYTES,			/* maxsegsize */
1109	    0,				/* flags */
1110	    NULL, NULL,			/* lockfunc, lockarg */
1111	    &sc->sf_cdata.sf_tx_tag);
1112	if (error != 0) {
1113		device_printf(sc->sf_dev, "failed to create Tx DMA tag\n");
1114		goto fail;
1115	}
1116
1117	/* Create tag for Rx buffers. */
1118	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1119	    SF_RX_ALIGN, 0,		/* alignment, boundary */
1120	    BUS_SPACE_MAXADDR,		/* lowaddr */
1121	    BUS_SPACE_MAXADDR,		/* highaddr */
1122	    NULL, NULL,			/* filter, filterarg */
1123	    MCLBYTES,			/* maxsize */
1124	    1,				/* nsegments */
1125	    MCLBYTES,			/* maxsegsize */
1126	    0,				/* flags */
1127	    NULL, NULL,			/* lockfunc, lockarg */
1128	    &sc->sf_cdata.sf_rx_tag);
1129	if (error != 0) {
1130		device_printf(sc->sf_dev, "failed to create Rx DMA tag\n");
1131		goto fail;
1132	}
1133
1134	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1135	error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag,
1136	    (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK |
1137	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map);
1138	if (error != 0) {
1139		device_printf(sc->sf_dev,
1140		    "failed to allocate DMA'able memory for Tx ring\n");
1141		goto fail;
1142	}
1143
1144	ctx.sf_busaddr = 0;
1145	error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag,
1146	    sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring,
1147	    SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1148	if (error != 0 || ctx.sf_busaddr == 0) {
1149		device_printf(sc->sf_dev,
1150		    "failed to load DMA'able memory for Tx ring\n");
1151		goto fail;
1152	}
1153	sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr;
1154
1155	/*
1156	 * Allocate DMA'able memory and load the DMA map for Tx completion ring.
1157	 */
1158	error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag,
1159	    (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK |
1160	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map);
1161	if (error != 0) {
1162		device_printf(sc->sf_dev,
1163		    "failed to allocate DMA'able memory for "
1164		    "Tx completion ring\n");
1165		goto fail;
1166	}
1167
1168	ctx.sf_busaddr = 0;
1169	error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag,
1170	    sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring,
1171	    SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1172	if (error != 0 || ctx.sf_busaddr == 0) {
1173		device_printf(sc->sf_dev,
1174		    "failed to load DMA'able memory for Tx completion ring\n");
1175		goto fail;
1176	}
1177	sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr;
1178
1179	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1180	error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag,
1181	    (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK |
1182	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map);
1183	if (error != 0) {
1184		device_printf(sc->sf_dev,
1185		    "failed to allocate DMA'able memory for Rx ring\n");
1186		goto fail;
1187	}
1188
1189	ctx.sf_busaddr = 0;
1190	error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag,
1191	    sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring,
1192	    SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1193	if (error != 0 || ctx.sf_busaddr == 0) {
1194		device_printf(sc->sf_dev,
1195		    "failed to load DMA'able memory for Rx ring\n");
1196		goto fail;
1197	}
1198	sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr;
1199
1200	/*
1201	 * Allocate DMA'able memory and load the DMA map for Rx completion ring.
1202	 */
1203	error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag,
1204	    (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK |
1205	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map);
1206	if (error != 0) {
1207		device_printf(sc->sf_dev,
1208		    "failed to allocate DMA'able memory for "
1209		    "Rx completion ring\n");
1210		goto fail;
1211	}
1212
1213	ctx.sf_busaddr = 0;
1214	error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag,
1215	    sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring,
1216	    SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1217	if (error != 0 || ctx.sf_busaddr == 0) {
1218		device_printf(sc->sf_dev,
1219		    "failed to load DMA'able memory for Rx completion ring\n");
1220		goto fail;
1221	}
1222	sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr;
1223
1224	/*
1225	 * Tx desciptor ring and Tx completion ring should be addressed in
1226	 * the same 4GB space. The same rule applys to Rx ring and Rx
1227	 * completion ring. Unfortunately there is no way to specify this
1228	 * boundary restriction with bus_dma(9). So just try to allocate
1229	 * without the restriction and check the restriction was satisfied.
1230	 * If not, fall back to 32bit dma addressing mode which always
1231	 * guarantees the restriction.
1232	 */
1233	tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE;
1234	tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE;
1235	rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE;
1236	rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE;
1237	if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) !=
1238	    SF_ADDR_HI(tx_cring_end)) ||
1239	    (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) !=
1240	    SF_ADDR_HI(tx_ring_end)) ||
1241	    (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) !=
1242	    SF_ADDR_HI(rx_cring_end)) ||
1243	    (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) !=
1244	    SF_ADDR_HI(rx_ring_end))) {
1245		device_printf(sc->sf_dev,
1246		    "switching to 32bit DMA mode\n");
1247		sf_dma_free(sc);
1248		/* Limit DMA address space to 32bit and try again. */
1249		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1250		goto again;
1251	}
1252
1253	/* Create DMA maps for Tx buffers. */
1254	for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1255		txd = &sc->sf_cdata.sf_txdesc[i];
1256		txd->tx_m = NULL;
1257		txd->ndesc = 0;
1258		txd->tx_dmamap = NULL;
1259		error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0,
1260		    &txd->tx_dmamap);
1261		if (error != 0) {
1262			device_printf(sc->sf_dev,
1263			    "failed to create Tx dmamap\n");
1264			goto fail;
1265		}
1266	}
1267	/* Create DMA maps for Rx buffers. */
1268	if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1269	    &sc->sf_cdata.sf_rx_sparemap)) != 0) {
1270		device_printf(sc->sf_dev,
1271		    "failed to create spare Rx dmamap\n");
1272		goto fail;
1273	}
1274	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1275		rxd = &sc->sf_cdata.sf_rxdesc[i];
1276		rxd->rx_m = NULL;
1277		rxd->rx_dmamap = NULL;
1278		error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1279		    &rxd->rx_dmamap);
1280		if (error != 0) {
1281			device_printf(sc->sf_dev,
1282			    "failed to create Rx dmamap\n");
1283			goto fail;
1284		}
1285	}
1286
1287fail:
1288	return (error);
1289}
1290
1291static void
1292sf_dma_free(struct sf_softc *sc)
1293{
1294	struct sf_txdesc	*txd;
1295	struct sf_rxdesc	*rxd;
1296	int			i;
1297
1298	/* Tx ring. */
1299	if (sc->sf_cdata.sf_tx_ring_tag) {
1300		if (sc->sf_rdata.sf_tx_ring_paddr)
1301			bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag,
1302			    sc->sf_cdata.sf_tx_ring_map);
1303		if (sc->sf_rdata.sf_tx_ring)
1304			bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag,
1305			    sc->sf_rdata.sf_tx_ring,
1306			    sc->sf_cdata.sf_tx_ring_map);
1307		sc->sf_rdata.sf_tx_ring = NULL;
1308		sc->sf_rdata.sf_tx_ring_paddr = 0;
1309		bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag);
1310		sc->sf_cdata.sf_tx_ring_tag = NULL;
1311	}
1312	/* Tx completion ring. */
1313	if (sc->sf_cdata.sf_tx_cring_tag) {
1314		if (sc->sf_rdata.sf_tx_cring_paddr)
1315			bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag,
1316			    sc->sf_cdata.sf_tx_cring_map);
1317		if (sc->sf_rdata.sf_tx_cring)
1318			bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag,
1319			    sc->sf_rdata.sf_tx_cring,
1320			    sc->sf_cdata.sf_tx_cring_map);
1321		sc->sf_rdata.sf_tx_cring = NULL;
1322		sc->sf_rdata.sf_tx_cring_paddr = 0;
1323		bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag);
1324		sc->sf_cdata.sf_tx_cring_tag = NULL;
1325	}
1326	/* Rx ring. */
1327	if (sc->sf_cdata.sf_rx_ring_tag) {
1328		if (sc->sf_rdata.sf_rx_ring_paddr)
1329			bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag,
1330			    sc->sf_cdata.sf_rx_ring_map);
1331		if (sc->sf_rdata.sf_rx_ring)
1332			bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag,
1333			    sc->sf_rdata.sf_rx_ring,
1334			    sc->sf_cdata.sf_rx_ring_map);
1335		sc->sf_rdata.sf_rx_ring = NULL;
1336		sc->sf_rdata.sf_rx_ring_paddr = 0;
1337		bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag);
1338		sc->sf_cdata.sf_rx_ring_tag = NULL;
1339	}
1340	/* Rx completion ring. */
1341	if (sc->sf_cdata.sf_rx_cring_tag) {
1342		if (sc->sf_rdata.sf_rx_cring_paddr)
1343			bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag,
1344			    sc->sf_cdata.sf_rx_cring_map);
1345		if (sc->sf_rdata.sf_rx_cring)
1346			bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag,
1347			    sc->sf_rdata.sf_rx_cring,
1348			    sc->sf_cdata.sf_rx_cring_map);
1349		sc->sf_rdata.sf_rx_cring = NULL;
1350		sc->sf_rdata.sf_rx_cring_paddr = 0;
1351		bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag);
1352		sc->sf_cdata.sf_rx_cring_tag = NULL;
1353	}
1354	/* Tx buffers. */
1355	if (sc->sf_cdata.sf_tx_tag) {
1356		for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1357			txd = &sc->sf_cdata.sf_txdesc[i];
1358			if (txd->tx_dmamap) {
1359				bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag,
1360				    txd->tx_dmamap);
1361				txd->tx_dmamap = NULL;
1362			}
1363		}
1364		bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag);
1365		sc->sf_cdata.sf_tx_tag = NULL;
1366	}
1367	/* Rx buffers. */
1368	if (sc->sf_cdata.sf_rx_tag) {
1369		for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1370			rxd = &sc->sf_cdata.sf_rxdesc[i];
1371			if (rxd->rx_dmamap) {
1372				bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1373				    rxd->rx_dmamap);
1374				rxd->rx_dmamap = NULL;
1375			}
1376		}
1377		if (sc->sf_cdata.sf_rx_sparemap) {
1378			bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1379			    sc->sf_cdata.sf_rx_sparemap);
1380			sc->sf_cdata.sf_rx_sparemap = 0;
1381		}
1382		bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag);
1383		sc->sf_cdata.sf_rx_tag = NULL;
1384	}
1385
1386	if (sc->sf_cdata.sf_parent_tag) {
1387		bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag);
1388		sc->sf_cdata.sf_parent_tag = NULL;
1389	}
1390}
1391
1392static int
1393sf_init_rx_ring(struct sf_softc *sc)
1394{
1395	struct sf_ring_data	*rd;
1396	int			i;
1397
1398	sc->sf_cdata.sf_rxc_cons = 0;
1399
1400	rd = &sc->sf_rdata;
1401	bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE);
1402	bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE);
1403
1404	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1405		if (sf_newbuf(sc, i) != 0)
1406			return (ENOBUFS);
1407	}
1408
1409	bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1410	    sc->sf_cdata.sf_rx_cring_map,
1411	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1412	bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1413	    sc->sf_cdata.sf_rx_ring_map,
1414	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1415
1416	return (0);
1417}
1418
1419static void
1420sf_init_tx_ring(struct sf_softc *sc)
1421{
1422	struct sf_ring_data	*rd;
1423	int			i;
1424
1425	sc->sf_cdata.sf_tx_prod = 0;
1426	sc->sf_cdata.sf_tx_cnt = 0;
1427	sc->sf_cdata.sf_txc_cons = 0;
1428
1429	rd = &sc->sf_rdata;
1430	bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE);
1431	bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE);
1432	for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1433		rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID);
1434		sc->sf_cdata.sf_txdesc[i].tx_m = NULL;
1435		sc->sf_cdata.sf_txdesc[i].ndesc = 0;
1436	}
1437	rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END);
1438
1439	bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
1440	    sc->sf_cdata.sf_tx_ring_map,
1441	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1442	bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1443	    sc->sf_cdata.sf_tx_cring_map,
1444	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1445}
1446
1447/*
1448 * Initialize an RX descriptor and attach an MBUF cluster.
1449 */
1450static int
1451sf_newbuf(struct sf_softc *sc, int idx)
1452{
1453	struct sf_rx_rdesc	*desc;
1454	struct sf_rxdesc	*rxd;
1455	struct mbuf		*m;
1456	bus_dma_segment_t	segs[1];
1457	bus_dmamap_t		map;
1458	int			nsegs;
1459
1460	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1461	if (m == NULL)
1462		return (ENOBUFS);
1463	m->m_len = m->m_pkthdr.len = MCLBYTES;
1464	m_adj(m, sizeof(uint32_t));
1465
1466	if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag,
1467	    sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1468		m_freem(m);
1469		return (ENOBUFS);
1470	}
1471	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1472
1473	rxd = &sc->sf_cdata.sf_rxdesc[idx];
1474	if (rxd->rx_m != NULL) {
1475		bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1476		    BUS_DMASYNC_POSTREAD);
1477		bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap);
1478	}
1479	map = rxd->rx_dmamap;
1480	rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap;
1481	sc->sf_cdata.sf_rx_sparemap = map;
1482	bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1483	    BUS_DMASYNC_PREREAD);
1484	rxd->rx_m = m;
1485	desc = &sc->sf_rdata.sf_rx_ring[idx];
1486	desc->sf_addr = htole64(segs[0].ds_addr);
1487
1488	return (0);
1489}
1490
1491#ifndef __NO_STRICT_ALIGNMENT
1492static __inline void
1493sf_fixup_rx(struct mbuf *m)
1494{
1495        int			i;
1496        uint16_t		*src, *dst;
1497
1498	src = mtod(m, uint16_t *);
1499	dst = src - 1;
1500
1501	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1502		*dst++ = *src++;
1503
1504	m->m_data -= ETHER_ALIGN;
1505}
1506#endif
1507
1508/*
1509 * The starfire is programmed to use 'normal' mode for packet reception,
1510 * which means we use the consumer/producer model for both the buffer
1511 * descriptor queue and the completion descriptor queue. The only problem
1512 * with this is that it involves a lot of register accesses: we have to
1513 * read the RX completion consumer and producer indexes and the RX buffer
1514 * producer index, plus the RX completion consumer and RX buffer producer
1515 * indexes have to be updated. It would have been easier if Adaptec had
1516 * put each index in a separate register, especially given that the damn
1517 * NIC has a 512K register space.
1518 *
1519 * In spite of all the lovely features that Adaptec crammed into the 6915,
1520 * it is marred by one truly stupid design flaw, which is that receive
1521 * buffer addresses must be aligned on a longword boundary. This forces
1522 * the packet payload to be unaligned, which is suboptimal on the x86 and
1523 * completely unusable on the Alpha. Our only recourse is to copy received
1524 * packets into properly aligned buffers before handing them off.
1525 */
1526static int
1527sf_rxeof(struct sf_softc *sc)
1528{
1529	struct mbuf		*m;
1530	struct ifnet		*ifp;
1531	struct sf_rxdesc	*rxd;
1532	struct sf_rx_rcdesc	*cur_cmp;
1533	int			cons, eidx, prog, rx_npkts;
1534	uint32_t		status, status2;
1535
1536	SF_LOCK_ASSERT(sc);
1537
1538	ifp = sc->sf_ifp;
1539	rx_npkts = 0;
1540
1541	bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1542	    sc->sf_cdata.sf_rx_ring_map,
1543	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1544	bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1545	    sc->sf_cdata.sf_rx_cring_map,
1546	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1547
1548	/*
1549	 * To reduce register access, directly read Receive completion
1550	 * queue entry.
1551	 */
1552	eidx = 0;
1553	prog = 0;
1554	for (cons = sc->sf_cdata.sf_rxc_cons;
1555	    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1556	    SF_INC(cons, SF_RX_CLIST_CNT)) {
1557		cur_cmp = &sc->sf_rdata.sf_rx_cring[cons];
1558		status = le32toh(cur_cmp->sf_rx_status1);
1559		if (status == 0)
1560			break;
1561#ifdef DEVICE_POLLING
1562		if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
1563			if (sc->rxcycles <= 0)
1564				break;
1565			sc->rxcycles--;
1566		}
1567#endif
1568		prog++;
1569		eidx = (status & SF_RX_CMPDESC_EIDX) >> 16;
1570		rxd = &sc->sf_cdata.sf_rxdesc[eidx];
1571		m = rxd->rx_m;
1572
1573		/*
1574		 * Note, IFCOUNTER_IPACKETS and IFCOUNTER_IERRORS
1575		 * are handled in sf_stats_update().
1576		 */
1577		if ((status & SF_RXSTAT1_OK) == 0) {
1578			cur_cmp->sf_rx_status1 = 0;
1579			continue;
1580		}
1581
1582		if (sf_newbuf(sc, eidx) != 0) {
1583			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1584			cur_cmp->sf_rx_status1 = 0;
1585			continue;
1586		}
1587
1588		/* AIC-6915 supports TCP/UDP checksum offload. */
1589		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1590			status2 = le32toh(cur_cmp->sf_rx_status2);
1591			/*
1592			 * Sometimes AIC-6915 generates an interrupt to
1593			 * warn RxGFP stall with bad checksum bit set
1594			 * in status word. I'm not sure what conditioan
1595			 * triggers it but recevied packet's checksum
1596			 * was correct even though AIC-6915 does not
1597			 * agree on this. This may be an indication of
1598			 * firmware bug. To fix the issue, do not rely
1599			 * on bad checksum bit in status word and let
1600			 * upper layer verify integrity of received
1601			 * frame.
1602			 * Another nice feature of AIC-6915 is hardware
1603			 * assistance of checksum calculation by
1604			 * providing partial checksum value for received
1605			 * frame. The partial checksum value can be used
1606			 * to accelerate checksum computation for
1607			 * fragmented TCP/UDP packets. Upper network
1608			 * stack already takes advantage of the partial
1609			 * checksum value in IP reassembly stage. But
1610			 * I'm not sure the correctness of the partial
1611			 * hardware checksum assistance as frequent
1612			 * RxGFP stalls are seen on non-fragmented
1613			 * frames. Due to the nature of the complexity
1614			 * of checksum computation code in firmware it's
1615			 * possible to see another bug in RxGFP so
1616			 * ignore checksum assistance for fragmented
1617			 * frames. This can be changed in future.
1618			 */
1619			if ((status2 & SF_RXSTAT2_FRAG) == 0) {
1620				if ((status2 & (SF_RXSTAT2_TCP |
1621				    SF_RXSTAT2_UDP)) != 0) {
1622					if ((status2 & SF_RXSTAT2_CSUM_OK)) {
1623						m->m_pkthdr.csum_flags =
1624						    CSUM_DATA_VALID |
1625						    CSUM_PSEUDO_HDR;
1626						m->m_pkthdr.csum_data = 0xffff;
1627					}
1628				}
1629			}
1630#ifdef SF_PARTIAL_CSUM_SUPPORT
1631			else if ((status2 & SF_RXSTAT2_FRAG) != 0) {
1632				if ((status2 & (SF_RXSTAT2_TCP |
1633				    SF_RXSTAT2_UDP)) != 0) {
1634					if ((status2 & SF_RXSTAT2_PCSUM_OK)) {
1635						m->m_pkthdr.csum_flags =
1636						    CSUM_DATA_VALID;
1637						m->m_pkthdr.csum_data =
1638						    (status &
1639						    SF_RX_CMPDESC_CSUM2);
1640					}
1641				}
1642			}
1643#endif
1644		}
1645
1646		m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN;
1647#ifndef	__NO_STRICT_ALIGNMENT
1648		sf_fixup_rx(m);
1649#endif
1650		m->m_pkthdr.rcvif = ifp;
1651
1652		SF_UNLOCK(sc);
1653		(*ifp->if_input)(ifp, m);
1654		SF_LOCK(sc);
1655		rx_npkts++;
1656
1657		/* Clear completion status. */
1658		cur_cmp->sf_rx_status1 = 0;
1659	}
1660
1661	if (prog > 0) {
1662		sc->sf_cdata.sf_rxc_cons = cons;
1663		bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1664		    sc->sf_cdata.sf_rx_ring_map,
1665		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1666		bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1667		    sc->sf_cdata.sf_rx_cring_map,
1668		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1669
1670		/* Update Rx completion Q1 consumer index. */
1671		csr_write_4(sc, SF_CQ_CONSIDX,
1672		    (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) |
1673		    (cons & SF_CQ_CONSIDX_RXQ1));
1674		/* Update Rx descriptor Q1 ptr. */
1675		csr_write_4(sc, SF_RXDQ_PTR_Q1,
1676		    (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) |
1677		    (eidx & SF_RXDQ_PRODIDX));
1678	}
1679	return (rx_npkts);
1680}
1681
1682/*
1683 * Read the transmit status from the completion queue and release
1684 * mbufs. Note that the buffer descriptor index in the completion
1685 * descriptor is an offset from the start of the transmit buffer
1686 * descriptor list in bytes. This is important because the manual
1687 * gives the impression that it should match the producer/consumer
1688 * index, which is the offset in 8 byte blocks.
1689 */
1690static void
1691sf_txeof(struct sf_softc *sc)
1692{
1693	struct sf_txdesc	*txd;
1694	struct sf_tx_rcdesc	*cur_cmp;
1695	struct ifnet		*ifp;
1696	uint32_t		status;
1697	int			cons, idx, prod;
1698
1699	SF_LOCK_ASSERT(sc);
1700
1701	ifp = sc->sf_ifp;
1702
1703	bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1704	    sc->sf_cdata.sf_tx_cring_map,
1705	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1706
1707	cons = sc->sf_cdata.sf_txc_cons;
1708	prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16;
1709	if (prod == cons)
1710		return;
1711
1712	for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) {
1713		cur_cmp = &sc->sf_rdata.sf_tx_cring[cons];
1714		status = le32toh(cur_cmp->sf_tx_status1);
1715		if (status == 0)
1716			break;
1717		switch (status & SF_TX_CMPDESC_TYPE) {
1718		case SF_TXCMPTYPE_TX:
1719			/* Tx complete entry. */
1720			break;
1721		case SF_TXCMPTYPE_DMA:
1722			/* DMA complete entry. */
1723			idx = status & SF_TX_CMPDESC_IDX;
1724			idx = idx / sizeof(struct sf_tx_rdesc);
1725			/*
1726			 * We don't need to check Tx status here.
1727			 * SF_ISR_TX_LOFIFO intr would handle this.
1728			 * Note, IFCOUNTER_OPACKETS, IFCOUNTER_COLLISIONS
1729			 * and IFCOUNTER_OERROR are handled in
1730			 * sf_stats_update().
1731			 */
1732			txd = &sc->sf_cdata.sf_txdesc[idx];
1733			if (txd->tx_m != NULL) {
1734				bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
1735				    txd->tx_dmamap,
1736				    BUS_DMASYNC_POSTWRITE);
1737				bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
1738				    txd->tx_dmamap);
1739				m_freem(txd->tx_m);
1740				txd->tx_m = NULL;
1741			}
1742			sc->sf_cdata.sf_tx_cnt -= txd->ndesc;
1743			KASSERT(sc->sf_cdata.sf_tx_cnt >= 0,
1744			    ("%s: Active Tx desc counter was garbled\n",
1745			    __func__));
1746			txd->ndesc = 0;
1747			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1748			break;
1749		default:
1750			/* It should not happen. */
1751			device_printf(sc->sf_dev,
1752			    "unknown Tx completion type : 0x%08x : %d : %d\n",
1753			    status, cons, prod);
1754			break;
1755		}
1756		cur_cmp->sf_tx_status1 = 0;
1757	}
1758
1759	sc->sf_cdata.sf_txc_cons = cons;
1760	bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1761	    sc->sf_cdata.sf_tx_cring_map,
1762	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1763
1764	if (sc->sf_cdata.sf_tx_cnt == 0)
1765		sc->sf_watchdog_timer = 0;
1766
1767	/* Update Tx completion consumer index. */
1768	csr_write_4(sc, SF_CQ_CONSIDX,
1769	    (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) |
1770	    ((cons << 16) & 0xffff0000));
1771}
1772
1773static void
1774sf_txthresh_adjust(struct sf_softc *sc)
1775{
1776	uint32_t		txfctl;
1777
1778	device_printf(sc->sf_dev, "Tx underrun -- ");
1779	if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) {
1780		txfctl = csr_read_4(sc, SF_TX_FRAMCTL);
1781		/* Increase Tx threshold 256 bytes. */
1782		sc->sf_txthresh += 16;
1783		if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD)
1784			sc->sf_txthresh = SF_MAX_TX_THRESHOLD;
1785		txfctl &= ~SF_TXFRMCTL_TXTHRESH;
1786		txfctl |= sc->sf_txthresh;
1787		printf("increasing Tx threshold to %d bytes\n",
1788		    sc->sf_txthresh * SF_TX_THRESHOLD_UNIT);
1789		csr_write_4(sc, SF_TX_FRAMCTL, txfctl);
1790	} else
1791		printf("\n");
1792}
1793
1794#ifdef DEVICE_POLLING
1795static int
1796sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1797{
1798	struct sf_softc		*sc;
1799	uint32_t		status;
1800	int			rx_npkts;
1801
1802	sc = ifp->if_softc;
1803	rx_npkts = 0;
1804	SF_LOCK(sc);
1805
1806	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1807		SF_UNLOCK(sc);
1808		return (rx_npkts);
1809	}
1810
1811	sc->rxcycles = count;
1812	rx_npkts = sf_rxeof(sc);
1813	sf_txeof(sc);
1814	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1815		sf_start_locked(ifp);
1816
1817	if (cmd == POLL_AND_CHECK_STATUS) {
1818		/* Reading the ISR register clears all interrrupts. */
1819		status = csr_read_4(sc, SF_ISR);
1820
1821		if ((status & SF_ISR_ABNORMALINTR) != 0) {
1822			if ((status & SF_ISR_STATSOFLOW) != 0)
1823				sf_stats_update(sc);
1824			else if ((status & SF_ISR_TX_LOFIFO) != 0)
1825				sf_txthresh_adjust(sc);
1826			else if ((status & SF_ISR_DMAERR) != 0) {
1827				device_printf(sc->sf_dev,
1828				    "DMA error, resetting\n");
1829				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1830				sf_init_locked(sc);
1831				SF_UNLOCK(sc);
1832				return (rx_npkts);
1833			} else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1834				sc->sf_statistics.sf_tx_gfp_stall++;
1835#ifdef	SF_GFP_DEBUG
1836				device_printf(sc->sf_dev,
1837				    "TxGFP is not responding!\n");
1838#endif
1839			} else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1840				sc->sf_statistics.sf_rx_gfp_stall++;
1841#ifdef	SF_GFP_DEBUG
1842				device_printf(sc->sf_dev,
1843				    "RxGFP is not responding!\n");
1844#endif
1845			}
1846		}
1847	}
1848
1849	SF_UNLOCK(sc);
1850	return (rx_npkts);
1851}
1852#endif /* DEVICE_POLLING */
1853
1854static void
1855sf_intr(void *arg)
1856{
1857	struct sf_softc		*sc;
1858	struct ifnet		*ifp;
1859	uint32_t		status;
1860	int			cnt;
1861
1862	sc = (struct sf_softc *)arg;
1863	SF_LOCK(sc);
1864
1865	if (sc->sf_suspended != 0)
1866		goto done_locked;
1867
1868	/* Reading the ISR register clears all interrrupts. */
1869	status = csr_read_4(sc, SF_ISR);
1870	if (status == 0 || status == 0xffffffff ||
1871	    (status & SF_ISR_PCIINT_ASSERTED) == 0)
1872		goto done_locked;
1873
1874	ifp = sc->sf_ifp;
1875#ifdef DEVICE_POLLING
1876	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1877		goto done_locked;
1878#endif
1879
1880	/* Disable interrupts. */
1881	csr_write_4(sc, SF_IMR, 0x00000000);
1882
1883	for (cnt = 32; (status & SF_INTRS) != 0;) {
1884		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1885			break;
1886		if ((status & SF_ISR_RXDQ1_DMADONE) != 0)
1887			sf_rxeof(sc);
1888
1889		if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE |
1890		    SF_ISR_TX_QUEUEDONE)) != 0)
1891			sf_txeof(sc);
1892
1893		if ((status & SF_ISR_ABNORMALINTR) != 0) {
1894			if ((status & SF_ISR_STATSOFLOW) != 0)
1895				sf_stats_update(sc);
1896			else if ((status & SF_ISR_TX_LOFIFO) != 0)
1897				sf_txthresh_adjust(sc);
1898			else if ((status & SF_ISR_DMAERR) != 0) {
1899				device_printf(sc->sf_dev,
1900				    "DMA error, resetting\n");
1901				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1902				sf_init_locked(sc);
1903				SF_UNLOCK(sc);
1904				return;
1905			} else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1906				sc->sf_statistics.sf_tx_gfp_stall++;
1907#ifdef	SF_GFP_DEBUG
1908				device_printf(sc->sf_dev,
1909				    "TxGFP is not responding!\n");
1910#endif
1911			}
1912			else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1913				sc->sf_statistics.sf_rx_gfp_stall++;
1914#ifdef	SF_GFP_DEBUG
1915				device_printf(sc->sf_dev,
1916				    "RxGFP is not responding!\n");
1917#endif
1918			}
1919		}
1920		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1921			sf_start_locked(ifp);
1922		if (--cnt <= 0)
1923			break;
1924		/* Reading the ISR register clears all interrrupts. */
1925		status = csr_read_4(sc, SF_ISR);
1926	}
1927
1928	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1929		/* Re-enable interrupts. */
1930		csr_write_4(sc, SF_IMR, SF_INTRS);
1931	}
1932
1933done_locked:
1934	SF_UNLOCK(sc);
1935}
1936
1937static void
1938sf_download_fw(struct sf_softc *sc)
1939{
1940	uint32_t gfpinst;
1941	int i, ndx;
1942	uint8_t *p;
1943
1944	/*
1945	 * A FP instruction is composed of 48bits so we have to
1946	 * write it with two parts.
1947	 */
1948	p = txfwdata;
1949	ndx = 0;
1950	for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) {
1951		gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1952		csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst);
1953		gfpinst = p[0] << 8 | p[1];
1954		csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1955		p += SF_GFP_INST_BYTES;
1956		ndx += 2;
1957	}
1958	if (bootverbose)
1959		device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i);
1960
1961	p = rxfwdata;
1962	ndx = 0;
1963	for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) {
1964		gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1965		csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst);
1966		gfpinst = p[0] << 8 | p[1];
1967		csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1968		p += SF_GFP_INST_BYTES;
1969		ndx += 2;
1970	}
1971	if (bootverbose)
1972		device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i);
1973}
1974
1975static void
1976sf_init(void *xsc)
1977{
1978	struct sf_softc		*sc;
1979
1980	sc = (struct sf_softc *)xsc;
1981	SF_LOCK(sc);
1982	sf_init_locked(sc);
1983	SF_UNLOCK(sc);
1984}
1985
1986static void
1987sf_init_locked(struct sf_softc *sc)
1988{
1989	struct ifnet		*ifp;
1990	uint8_t			eaddr[ETHER_ADDR_LEN];
1991	bus_addr_t		addr;
1992	int			i;
1993
1994	SF_LOCK_ASSERT(sc);
1995	ifp = sc->sf_ifp;
1996	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1997		return;
1998
1999	sf_stop(sc);
2000	/* Reset the hardware to a known state. */
2001	sf_reset(sc);
2002
2003	/* Init all the receive filter registers */
2004	for (i = SF_RXFILT_PERFECT_BASE;
2005	    i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t))
2006		csr_write_4(sc, i, 0);
2007
2008	/* Empty stats counter registers. */
2009	for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2010		csr_write_4(sc, i, 0);
2011
2012	/* Init our MAC address. */
2013	bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr));
2014	csr_write_4(sc, SF_PAR0,
2015	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2016	csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]);
2017	sf_setperf(sc, 0, eaddr);
2018
2019	if (sf_init_rx_ring(sc) == ENOBUFS) {
2020		device_printf(sc->sf_dev,
2021		    "initialization failed: no memory for rx buffers\n");
2022		sf_stop(sc);
2023		return;
2024	}
2025
2026	sf_init_tx_ring(sc);
2027
2028	/*
2029	 * 16 perfect address filtering.
2030	 * Hash only multicast destination address, Accept matching
2031	 * frames regardless of VLAN ID.
2032	 */
2033	csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN);
2034
2035	/*
2036	 * Set Rx filter.
2037	 */
2038	sf_rxfilter(sc);
2039
2040	/* Init the completion queue indexes. */
2041	csr_write_4(sc, SF_CQ_CONSIDX, 0);
2042	csr_write_4(sc, SF_CQ_PRODIDX, 0);
2043
2044	/* Init the RX completion queue. */
2045	addr = sc->sf_rdata.sf_rx_cring_paddr;
2046	csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr));
2047	csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR);
2048	if (SF_ADDR_HI(addr) != 0)
2049		SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT);
2050	/* Set RX completion queue type 2. */
2051	SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2);
2052	csr_write_4(sc, SF_RXCQ_CTL_2, 0);
2053
2054	/*
2055	 * Init RX DMA control.
2056	 * default RxHighPriority Threshold,
2057	 * default RxBurstSize, 128bytes.
2058	 */
2059	SF_SETBIT(sc, SF_RXDMA_CTL,
2060	    SF_RXDMA_REPORTBADPKTS |
2061	    (SF_RXDMA_HIGHPRIO_THRESH << 8) |
2062	    SF_RXDMA_BURST);
2063
2064	/* Init the RX buffer descriptor queue. */
2065	addr = sc->sf_rdata.sf_rx_ring_paddr;
2066	csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr));
2067	csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr));
2068
2069	/* Set RX queue buffer length. */
2070	csr_write_4(sc, SF_RXDQ_CTL_1,
2071	    ((MCLBYTES  - sizeof(uint32_t)) << 16) |
2072	    SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE);
2073
2074	if (SF_ADDR_HI(addr) != 0)
2075		SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR);
2076	csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1);
2077	csr_write_4(sc, SF_RXDQ_CTL_2, 0);
2078
2079	/* Init the TX completion queue */
2080	addr = sc->sf_rdata.sf_tx_cring_paddr;
2081	csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR);
2082	if (SF_ADDR_HI(addr) != 0)
2083		SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT);
2084
2085	/* Init the TX buffer descriptor queue. */
2086	addr = sc->sf_rdata.sf_tx_ring_paddr;
2087	csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr));
2088	csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2089	csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr));
2090	csr_write_4(sc, SF_TX_FRAMCTL,
2091	    SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh);
2092	csr_write_4(sc, SF_TXDQ_CTL,
2093	    SF_TXDMA_HIPRIO_THRESH << 24 |
2094	    SF_TXSKIPLEN_0BYTES << 16 |
2095	    SF_TXDDMA_BURST << 8 |
2096	    SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT);
2097	if (SF_ADDR_HI(addr) != 0)
2098		SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR);
2099
2100	/* Set VLAN Type register. */
2101	csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN);
2102
2103	/* Set TxPause Timer. */
2104	csr_write_4(sc, SF_TXPAUSETIMER, 0xffff);
2105
2106	/* Enable autopadding of short TX frames. */
2107	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD);
2108	SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD);
2109	/* Make sure to reset MAC to take changes effect. */
2110	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2111	DELAY(1000);
2112	SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2113
2114	/* Enable PCI bus master. */
2115	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN);
2116
2117	/* Load StarFire firmware. */
2118	sf_download_fw(sc);
2119
2120	/* Intialize interrupt moderation. */
2121	csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN |
2122	    (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL));
2123
2124#ifdef DEVICE_POLLING
2125	/* Disable interrupts if we are polling. */
2126	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2127		csr_write_4(sc, SF_IMR, 0x00000000);
2128	else
2129#endif
2130	/* Enable interrupts. */
2131	csr_write_4(sc, SF_IMR, SF_INTRS);
2132	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB);
2133
2134	/* Enable the RX and TX engines. */
2135	csr_write_4(sc, SF_GEN_ETH_CTL,
2136	    SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB |
2137	    SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB);
2138
2139	if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2140		SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2141	else
2142		SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2143	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2144		SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2145	else
2146		SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2147
2148	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2149	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2150
2151	sc->sf_link = 0;
2152	sf_ifmedia_upd_locked(ifp);
2153
2154	callout_reset(&sc->sf_co, hz, sf_tick, sc);
2155}
2156
2157static int
2158sf_encap(struct sf_softc *sc, struct mbuf **m_head)
2159{
2160	struct sf_txdesc	*txd;
2161	struct sf_tx_rdesc	*desc;
2162	struct mbuf		*m;
2163	bus_dmamap_t		map;
2164	bus_dma_segment_t	txsegs[SF_MAXTXSEGS];
2165	int			error, i, nsegs, prod, si;
2166	int			avail, nskip;
2167
2168	SF_LOCK_ASSERT(sc);
2169
2170	m = *m_head;
2171	prod = sc->sf_cdata.sf_tx_prod;
2172	txd = &sc->sf_cdata.sf_txdesc[prod];
2173	map = txd->tx_dmamap;
2174	error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map,
2175	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2176	if (error == EFBIG) {
2177		m = m_collapse(*m_head, M_NOWAIT, SF_MAXTXSEGS);
2178		if (m == NULL) {
2179			m_freem(*m_head);
2180			*m_head = NULL;
2181			return (ENOBUFS);
2182		}
2183		*m_head = m;
2184		error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag,
2185		    map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2186		if (error != 0) {
2187			m_freem(*m_head);
2188			*m_head = NULL;
2189			return (error);
2190		}
2191	} else if (error != 0)
2192		return (error);
2193	if (nsegs == 0) {
2194		m_freem(*m_head);
2195		*m_head = NULL;
2196		return (EIO);
2197	}
2198
2199	/* Check number of available descriptors. */
2200	avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt;
2201	if (avail < nsegs) {
2202		bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2203		return (ENOBUFS);
2204	}
2205	nskip = 0;
2206	if (prod + nsegs >= SF_TX_DLIST_CNT) {
2207		nskip = SF_TX_DLIST_CNT - prod - 1;
2208		if (avail < nsegs + nskip) {
2209			bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2210			return (ENOBUFS);
2211		}
2212	}
2213
2214	bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE);
2215
2216	si = prod;
2217	for (i = 0; i < nsegs; i++) {
2218		desc = &sc->sf_rdata.sf_tx_ring[prod];
2219		desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID |
2220		    (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN));
2221		desc->sf_tx_reserved = 0;
2222		desc->sf_addr = htole64(txsegs[i].ds_addr);
2223		if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) {
2224			/* Queue wraps! */
2225			desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END);
2226			prod = 0;
2227		} else
2228			SF_INC(prod, SF_TX_DLIST_CNT);
2229	}
2230	/* Update producer index. */
2231	sc->sf_cdata.sf_tx_prod = prod;
2232	sc->sf_cdata.sf_tx_cnt += nsegs + nskip;
2233
2234	desc = &sc->sf_rdata.sf_tx_ring[si];
2235	/* Check TDP/UDP checksum offload request. */
2236	if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0)
2237		desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP);
2238	desc->sf_tx_ctrl |=
2239	    htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16));
2240
2241	txd->tx_dmamap = map;
2242	txd->tx_m = m;
2243	txd->ndesc = nsegs + nskip;
2244
2245	return (0);
2246}
2247
2248static void
2249sf_start(struct ifnet *ifp)
2250{
2251	struct sf_softc		*sc;
2252
2253	sc = ifp->if_softc;
2254	SF_LOCK(sc);
2255	sf_start_locked(ifp);
2256	SF_UNLOCK(sc);
2257}
2258
2259static void
2260sf_start_locked(struct ifnet *ifp)
2261{
2262	struct sf_softc		*sc;
2263	struct mbuf		*m_head;
2264	int			enq;
2265
2266	sc = ifp->if_softc;
2267	SF_LOCK_ASSERT(sc);
2268
2269	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2270	    IFF_DRV_RUNNING || sc->sf_link == 0)
2271		return;
2272
2273	/*
2274	 * Since we don't know when descriptor wrap occurrs in advance
2275	 * limit available number of active Tx descriptor counter to be
2276	 * higher than maximum number of DMA segments allowed in driver.
2277	 */
2278	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2279	    sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) {
2280		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2281		if (m_head == NULL)
2282			break;
2283		/*
2284		 * Pack the data into the transmit ring. If we
2285		 * don't have room, set the OACTIVE flag and wait
2286		 * for the NIC to drain the ring.
2287		 */
2288		if (sf_encap(sc, &m_head)) {
2289			if (m_head == NULL)
2290				break;
2291			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2292			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2293			break;
2294		}
2295
2296		enq++;
2297		/*
2298		 * If there's a BPF listener, bounce a copy of this frame
2299		 * to him.
2300		 */
2301		ETHER_BPF_MTAP(ifp, m_head);
2302	}
2303
2304	if (enq > 0) {
2305		bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
2306		    sc->sf_cdata.sf_tx_ring_map,
2307		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2308		/* Kick transmit. */
2309		csr_write_4(sc, SF_TXDQ_PRODIDX,
2310		    sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8));
2311
2312		/* Set a timeout in case the chip goes out to lunch. */
2313		sc->sf_watchdog_timer = 5;
2314	}
2315}
2316
2317static void
2318sf_stop(struct sf_softc *sc)
2319{
2320	struct sf_txdesc	*txd;
2321	struct sf_rxdesc	*rxd;
2322	struct ifnet		*ifp;
2323	int			i;
2324
2325	SF_LOCK_ASSERT(sc);
2326
2327	ifp = sc->sf_ifp;
2328
2329	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2330	sc->sf_link = 0;
2331	callout_stop(&sc->sf_co);
2332	sc->sf_watchdog_timer = 0;
2333
2334	/* Reading the ISR register clears all interrrupts. */
2335	csr_read_4(sc, SF_ISR);
2336	/* Disable further interrupts. */
2337	csr_write_4(sc, SF_IMR, 0);
2338
2339	/* Disable Tx/Rx egine. */
2340	csr_write_4(sc, SF_GEN_ETH_CTL, 0);
2341
2342	/* Give hardware chance to drain active DMA cycles. */
2343	DELAY(1000);
2344
2345	csr_write_4(sc, SF_CQ_CONSIDX, 0);
2346	csr_write_4(sc, SF_CQ_PRODIDX, 0);
2347	csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0);
2348	csr_write_4(sc, SF_RXDQ_CTL_1, 0);
2349	csr_write_4(sc, SF_RXDQ_PTR_Q1, 0);
2350	csr_write_4(sc, SF_TXCQ_CTL, 0);
2351	csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2352	csr_write_4(sc, SF_TXDQ_CTL, 0);
2353
2354	/*
2355	 * Free RX and TX mbufs still in the queues.
2356	 */
2357	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
2358		rxd = &sc->sf_cdata.sf_rxdesc[i];
2359		if (rxd->rx_m != NULL) {
2360			bus_dmamap_sync(sc->sf_cdata.sf_rx_tag,
2361			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2362			bus_dmamap_unload(sc->sf_cdata.sf_rx_tag,
2363			    rxd->rx_dmamap);
2364			m_freem(rxd->rx_m);
2365			rxd->rx_m = NULL;
2366		}
2367        }
2368	for (i = 0; i < SF_TX_DLIST_CNT; i++) {
2369		txd = &sc->sf_cdata.sf_txdesc[i];
2370		if (txd->tx_m != NULL) {
2371			bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
2372			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2373			bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
2374			    txd->tx_dmamap);
2375			m_freem(txd->tx_m);
2376			txd->tx_m = NULL;
2377			txd->ndesc = 0;
2378		}
2379        }
2380}
2381
2382static void
2383sf_tick(void *xsc)
2384{
2385	struct sf_softc		*sc;
2386	struct mii_data		*mii;
2387
2388	sc = xsc;
2389	SF_LOCK_ASSERT(sc);
2390	mii = device_get_softc(sc->sf_miibus);
2391	mii_tick(mii);
2392	sf_stats_update(sc);
2393	sf_watchdog(sc);
2394	callout_reset(&sc->sf_co, hz, sf_tick, sc);
2395}
2396
2397/*
2398 * Note: it is important that this function not be interrupted. We
2399 * use a two-stage register access scheme: if we are interrupted in
2400 * between setting the indirect address register and reading from the
2401 * indirect data register, the contents of the address register could
2402 * be changed out from under us.
2403 */
2404static void
2405sf_stats_update(struct sf_softc *sc)
2406{
2407	struct ifnet		*ifp;
2408	struct sf_stats		now, *stats, *nstats;
2409	int			i;
2410
2411	SF_LOCK_ASSERT(sc);
2412
2413	ifp = sc->sf_ifp;
2414	stats = &now;
2415
2416	stats->sf_tx_frames =
2417	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES);
2418	stats->sf_tx_single_colls =
2419	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL);
2420	stats->sf_tx_multi_colls =
2421	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL);
2422	stats->sf_tx_crcerrs =
2423	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS);
2424	stats->sf_tx_bytes =
2425	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES);
2426	stats->sf_tx_deferred =
2427	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED);
2428	stats->sf_tx_late_colls =
2429	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL);
2430	stats->sf_tx_pause_frames =
2431	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE);
2432	stats->sf_tx_control_frames =
2433	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME);
2434	stats->sf_tx_excess_colls =
2435	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL);
2436	stats->sf_tx_excess_defer =
2437	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF);
2438	stats->sf_tx_mcast_frames =
2439	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI);
2440	stats->sf_tx_bcast_frames =
2441	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST);
2442	stats->sf_tx_frames_lost =
2443	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST);
2444	stats->sf_rx_frames =
2445	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES);
2446	stats->sf_rx_crcerrs =
2447	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS);
2448	stats->sf_rx_alignerrs =
2449	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS);
2450	stats->sf_rx_bytes =
2451	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES);
2452	stats->sf_rx_pause_frames =
2453	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE);
2454	stats->sf_rx_control_frames =
2455	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME);
2456	stats->sf_rx_unsup_control_frames =
2457	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME);
2458	stats->sf_rx_giants =
2459	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS);
2460	stats->sf_rx_runts =
2461	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS);
2462	stats->sf_rx_jabbererrs =
2463	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER);
2464	stats->sf_rx_fragments =
2465	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS);
2466	stats->sf_rx_pkts_64 =
2467	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64);
2468	stats->sf_rx_pkts_65_127 =
2469	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127);
2470	stats->sf_rx_pkts_128_255 =
2471	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255);
2472	stats->sf_rx_pkts_256_511 =
2473	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511);
2474	stats->sf_rx_pkts_512_1023 =
2475	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023);
2476	stats->sf_rx_pkts_1024_1518 =
2477	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518);
2478	stats->sf_rx_frames_lost =
2479	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST);
2480	/* Lower 16bits are valid. */
2481	stats->sf_tx_underruns =
2482	    (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff);
2483
2484	/* Empty stats counter registers. */
2485	for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2486		csr_write_4(sc, i, 0);
2487
2488	if_inc_counter(ifp, IFCOUNTER_OPACKETS, (u_long)stats->sf_tx_frames);
2489
2490	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
2491	    (u_long)stats->sf_tx_single_colls +
2492	    (u_long)stats->sf_tx_multi_colls);
2493
2494	if_inc_counter(ifp, IFCOUNTER_OERRORS,
2495	    (u_long)stats->sf_tx_excess_colls +
2496	    (u_long)stats->sf_tx_excess_defer +
2497	    (u_long)stats->sf_tx_frames_lost);
2498
2499	if_inc_counter(ifp, IFCOUNTER_IPACKETS, (u_long)stats->sf_rx_frames);
2500
2501	if_inc_counter(ifp, IFCOUNTER_IERRORS,
2502	    (u_long)stats->sf_rx_crcerrs +
2503	    (u_long)stats->sf_rx_alignerrs +
2504	    (u_long)stats->sf_rx_giants +
2505	    (u_long)stats->sf_rx_runts +
2506	    (u_long)stats->sf_rx_jabbererrs +
2507	    (u_long)stats->sf_rx_frames_lost);
2508
2509	nstats = &sc->sf_statistics;
2510
2511	nstats->sf_tx_frames += stats->sf_tx_frames;
2512	nstats->sf_tx_single_colls += stats->sf_tx_single_colls;
2513	nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls;
2514	nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs;
2515	nstats->sf_tx_bytes += stats->sf_tx_bytes;
2516	nstats->sf_tx_deferred += stats->sf_tx_deferred;
2517	nstats->sf_tx_late_colls += stats->sf_tx_late_colls;
2518	nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames;
2519	nstats->sf_tx_control_frames += stats->sf_tx_control_frames;
2520	nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls;
2521	nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer;
2522	nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames;
2523	nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames;
2524	nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost;
2525	nstats->sf_rx_frames += stats->sf_rx_frames;
2526	nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs;
2527	nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs;
2528	nstats->sf_rx_bytes += stats->sf_rx_bytes;
2529	nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames;
2530	nstats->sf_rx_control_frames += stats->sf_rx_control_frames;
2531	nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames;
2532	nstats->sf_rx_giants += stats->sf_rx_giants;
2533	nstats->sf_rx_runts += stats->sf_rx_runts;
2534	nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs;
2535	nstats->sf_rx_fragments += stats->sf_rx_fragments;
2536	nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64;
2537	nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127;
2538	nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255;
2539	nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511;
2540	nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023;
2541	nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518;
2542	nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost;
2543	nstats->sf_tx_underruns += stats->sf_tx_underruns;
2544}
2545
2546static void
2547sf_watchdog(struct sf_softc *sc)
2548{
2549	struct ifnet		*ifp;
2550
2551	SF_LOCK_ASSERT(sc);
2552
2553	if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer)
2554		return;
2555
2556	ifp = sc->sf_ifp;
2557
2558	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2559	if (sc->sf_link == 0) {
2560		if (bootverbose)
2561			if_printf(sc->sf_ifp, "watchdog timeout "
2562			   "(missed link)\n");
2563	} else
2564		if_printf(ifp, "watchdog timeout, %d Tx descs are active\n",
2565		    sc->sf_cdata.sf_tx_cnt);
2566
2567	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2568	sf_init_locked(sc);
2569
2570	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2571		sf_start_locked(ifp);
2572}
2573
2574static int
2575sf_shutdown(device_t dev)
2576{
2577	struct sf_softc		*sc;
2578
2579	sc = device_get_softc(dev);
2580
2581	SF_LOCK(sc);
2582	sf_stop(sc);
2583	SF_UNLOCK(sc);
2584
2585	return (0);
2586}
2587
2588static int
2589sf_suspend(device_t dev)
2590{
2591	struct sf_softc		*sc;
2592
2593	sc = device_get_softc(dev);
2594
2595	SF_LOCK(sc);
2596	sf_stop(sc);
2597	sc->sf_suspended = 1;
2598	bus_generic_suspend(dev);
2599	SF_UNLOCK(sc);
2600
2601	return (0);
2602}
2603
2604static int
2605sf_resume(device_t dev)
2606{
2607	struct sf_softc		*sc;
2608	struct ifnet		*ifp;
2609
2610	sc = device_get_softc(dev);
2611
2612	SF_LOCK(sc);
2613	bus_generic_resume(dev);
2614	ifp = sc->sf_ifp;
2615	if ((ifp->if_flags & IFF_UP) != 0)
2616		sf_init_locked(sc);
2617
2618	sc->sf_suspended = 0;
2619	SF_UNLOCK(sc);
2620
2621	return (0);
2622}
2623
2624static int
2625sf_sysctl_stats(SYSCTL_HANDLER_ARGS)
2626{
2627	struct sf_softc		*sc;
2628	struct sf_stats		*stats;
2629	int			error;
2630	int			result;
2631
2632	result = -1;
2633	error = sysctl_handle_int(oidp, &result, 0, req);
2634
2635	if (error != 0 || req->newptr == NULL)
2636		return (error);
2637
2638	if (result != 1)
2639		return (error);
2640
2641	sc = (struct sf_softc *)arg1;
2642	stats = &sc->sf_statistics;
2643
2644	printf("%s statistics:\n", device_get_nameunit(sc->sf_dev));
2645	printf("Transmit good frames : %ju\n",
2646	    (uintmax_t)stats->sf_tx_frames);
2647	printf("Transmit good octets : %ju\n",
2648	    (uintmax_t)stats->sf_tx_bytes);
2649	printf("Transmit single collisions : %u\n",
2650	    stats->sf_tx_single_colls);
2651	printf("Transmit multiple collisions : %u\n",
2652	    stats->sf_tx_multi_colls);
2653	printf("Transmit late collisions : %u\n",
2654	    stats->sf_tx_late_colls);
2655	printf("Transmit abort due to excessive collisions : %u\n",
2656	    stats->sf_tx_excess_colls);
2657	printf("Transmit CRC errors : %u\n",
2658	    stats->sf_tx_crcerrs);
2659	printf("Transmit deferrals : %u\n",
2660	    stats->sf_tx_deferred);
2661	printf("Transmit abort due to excessive deferrals : %u\n",
2662	    stats->sf_tx_excess_defer);
2663	printf("Transmit pause control frames : %u\n",
2664	    stats->sf_tx_pause_frames);
2665	printf("Transmit control frames : %u\n",
2666	    stats->sf_tx_control_frames);
2667	printf("Transmit good multicast frames : %u\n",
2668	    stats->sf_tx_mcast_frames);
2669	printf("Transmit good broadcast frames : %u\n",
2670	    stats->sf_tx_bcast_frames);
2671	printf("Transmit frames lost due to internal transmit errors : %u\n",
2672	    stats->sf_tx_frames_lost);
2673	printf("Transmit FIFO underflows : %u\n",
2674	    stats->sf_tx_underruns);
2675	printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall);
2676	printf("Receive good frames : %ju\n",
2677	    (uint64_t)stats->sf_rx_frames);
2678	printf("Receive good octets : %ju\n",
2679	    (uint64_t)stats->sf_rx_bytes);
2680	printf("Receive CRC errors : %u\n",
2681	    stats->sf_rx_crcerrs);
2682	printf("Receive alignment errors : %u\n",
2683	    stats->sf_rx_alignerrs);
2684	printf("Receive pause frames : %u\n",
2685	    stats->sf_rx_pause_frames);
2686	printf("Receive control frames : %u\n",
2687	    stats->sf_rx_control_frames);
2688	printf("Receive control frames with unsupported opcode : %u\n",
2689	    stats->sf_rx_unsup_control_frames);
2690	printf("Receive frames too long : %u\n",
2691	    stats->sf_rx_giants);
2692	printf("Receive frames too short : %u\n",
2693	    stats->sf_rx_runts);
2694	printf("Receive frames jabber errors : %u\n",
2695	    stats->sf_rx_jabbererrs);
2696	printf("Receive frames fragments : %u\n",
2697	    stats->sf_rx_fragments);
2698	printf("Receive packets 64 bytes : %ju\n",
2699	    (uint64_t)stats->sf_rx_pkts_64);
2700	printf("Receive packets 65 to 127 bytes : %ju\n",
2701	    (uint64_t)stats->sf_rx_pkts_65_127);
2702	printf("Receive packets 128 to 255 bytes : %ju\n",
2703	    (uint64_t)stats->sf_rx_pkts_128_255);
2704	printf("Receive packets 256 to 511 bytes : %ju\n",
2705	    (uint64_t)stats->sf_rx_pkts_256_511);
2706	printf("Receive packets 512 to 1023 bytes : %ju\n",
2707	    (uint64_t)stats->sf_rx_pkts_512_1023);
2708	printf("Receive packets 1024 to 1518 bytes : %ju\n",
2709	    (uint64_t)stats->sf_rx_pkts_1024_1518);
2710	printf("Receive frames lost due to internal receive errors : %u\n",
2711	    stats->sf_rx_frames_lost);
2712	printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall);
2713
2714	return (error);
2715}
2716
2717static int
2718sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2719{
2720	int error, value;
2721
2722	if (!arg1)
2723		return (EINVAL);
2724	value = *(int *)arg1;
2725	error = sysctl_handle_int(oidp, &value, 0, req);
2726	if (error || !req->newptr)
2727		return (error);
2728	if (value < low || value > high)
2729		return (EINVAL);
2730	*(int *)arg1 = value;
2731
2732	return (0);
2733}
2734
2735static int
2736sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS)
2737{
2738
2739	return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX));
2740}
2741