if_sf.c revision 221407
1/*-
2 * Copyright (c) 1997, 1998, 1999
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/sf/if_sf.c 221407 2011-05-03 19:51:29Z marius $");
35
36/*
37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD.
38 * Programming manual is available from:
39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf.
40 *
41 * Written by Bill Paul <wpaul@ctr.columbia.edu>
42 * Department of Electical Engineering
43 * Columbia University, New York City
44 */
45/*
46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet
47 * controller designed with flexibility and reducing CPU load in mind.
48 * The Starfire offers high and low priority buffer queues, a
49 * producer/consumer index mechanism and several different buffer
50 * queue and completion queue descriptor types. Any one of a number
51 * of different driver designs can be used, depending on system and
52 * OS requirements. This driver makes use of type2 transmit frame
53 * descriptors to take full advantage of fragmented packets buffers
54 * and two RX buffer queues prioritized on size (one queue for small
55 * frames that will fit into a single mbuf, another with full size
56 * mbuf clusters for everything else). The producer/consumer indexes
57 * and completion queues are also used.
58 *
59 * One downside to the Starfire has to do with alignment: buffer
60 * queues must be aligned on 256-byte boundaries, and receive buffers
61 * must be aligned on longword boundaries. The receive buffer alignment
62 * causes problems on the strict alignment architecture, where the
63 * packet payload should be longword aligned. There is no simple way
64 * around this.
65 *
66 * For receive filtering, the Starfire offers 16 perfect filter slots
67 * and a 512-bit hash table.
68 *
69 * The Starfire has no internal transceiver, relying instead on an
70 * external MII-based transceiver. Accessing registers on external
71 * PHYs is done through a special register map rather than with the
72 * usual bitbang MDIO method.
73 *
74 * Acesssing the registers on the Starfire is a little tricky. The
75 * Starfire has a 512K internal register space. When programmed for
76 * PCI memory mapped mode, the entire register space can be accessed
77 * directly. However in I/O space mode, only 256 bytes are directly
78 * mapped into PCI I/O space. The other registers can be accessed
79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA
80 * registers inside the 256-byte I/O window.
81 */
82
83#ifdef HAVE_KERNEL_OPTION_HEADERS
84#include "opt_device_polling.h"
85#endif
86
87#include <sys/param.h>
88#include <sys/systm.h>
89#include <sys/bus.h>
90#include <sys/endian.h>
91#include <sys/kernel.h>
92#include <sys/malloc.h>
93#include <sys/mbuf.h>
94#include <sys/rman.h>
95#include <sys/module.h>
96#include <sys/socket.h>
97#include <sys/sockio.h>
98#include <sys/sysctl.h>
99#include <sys/taskqueue.h>
100
101#include <net/bpf.h>
102#include <net/if.h>
103#include <net/if_arp.h>
104#include <net/ethernet.h>
105#include <net/if_dl.h>
106#include <net/if_media.h>
107#include <net/if_types.h>
108#include <net/if_vlan_var.h>
109
110#include <dev/mii/mii.h>
111#include <dev/mii/miivar.h>
112
113#include <dev/pci/pcireg.h>
114#include <dev/pci/pcivar.h>
115
116#include <machine/bus.h>
117
118#include <dev/sf/if_sfreg.h>
119#include <dev/sf/starfire_rx.h>
120#include <dev/sf/starfire_tx.h>
121
122/* "device miibus" required.  See GENERIC if you get errors here. */
123#include "miibus_if.h"
124
125MODULE_DEPEND(sf, pci, 1, 1, 1);
126MODULE_DEPEND(sf, ether, 1, 1, 1);
127MODULE_DEPEND(sf, miibus, 1, 1, 1);
128
129#undef	SF_GFP_DEBUG
130#define	SF_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
131/* Define this to activate partial TCP/UDP checksum offload. */
132#undef	SF_PARTIAL_CSUM_SUPPORT
133
134static struct sf_type sf_devs[] = {
135	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
136	    AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" },
137	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
138	    AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" },
139	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
140	    AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" },
141	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
142	    AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" },
143	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
144	    AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" },
145	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
146	    AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" },
147	{ AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX",
148	    AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" },
149};
150
151static int sf_probe(device_t);
152static int sf_attach(device_t);
153static int sf_detach(device_t);
154static int sf_shutdown(device_t);
155static int sf_suspend(device_t);
156static int sf_resume(device_t);
157static void sf_intr(void *);
158static void sf_tick(void *);
159static void sf_stats_update(struct sf_softc *);
160#ifndef __NO_STRICT_ALIGNMENT
161static __inline void sf_fixup_rx(struct mbuf *);
162#endif
163static int sf_rxeof(struct sf_softc *);
164static void sf_txeof(struct sf_softc *);
165static int sf_encap(struct sf_softc *, struct mbuf **);
166static void sf_start(struct ifnet *);
167static void sf_start_locked(struct ifnet *);
168static int sf_ioctl(struct ifnet *, u_long, caddr_t);
169static void sf_download_fw(struct sf_softc *);
170static void sf_init(void *);
171static void sf_init_locked(struct sf_softc *);
172static void sf_stop(struct sf_softc *);
173static void sf_watchdog(struct sf_softc *);
174static int sf_ifmedia_upd(struct ifnet *);
175static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *);
176static void sf_reset(struct sf_softc *);
177static int sf_dma_alloc(struct sf_softc *);
178static void sf_dma_free(struct sf_softc *);
179static int sf_init_rx_ring(struct sf_softc *);
180static void sf_init_tx_ring(struct sf_softc *);
181static int sf_newbuf(struct sf_softc *, int);
182static void sf_rxfilter(struct sf_softc *);
183static int sf_setperf(struct sf_softc *, int, uint8_t *);
184static int sf_sethash(struct sf_softc *, caddr_t, int);
185#ifdef notdef
186static int sf_setvlan(struct sf_softc *, int, uint32_t);
187#endif
188
189static uint8_t sf_read_eeprom(struct sf_softc *, int);
190
191static int sf_miibus_readreg(device_t, int, int);
192static int sf_miibus_writereg(device_t, int, int, int);
193static void sf_miibus_statchg(device_t);
194static void sf_link_task(void *, int);
195#ifdef DEVICE_POLLING
196static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
197#endif
198
199static uint32_t csr_read_4(struct sf_softc *, int);
200static void csr_write_4(struct sf_softc *, int, uint32_t);
201static void sf_txthresh_adjust(struct sf_softc *);
202static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS);
203static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
204static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS);
205
206static device_method_t sf_methods[] = {
207	/* Device interface */
208	DEVMETHOD(device_probe,		sf_probe),
209	DEVMETHOD(device_attach,	sf_attach),
210	DEVMETHOD(device_detach,	sf_detach),
211	DEVMETHOD(device_shutdown,	sf_shutdown),
212	DEVMETHOD(device_suspend,	sf_suspend),
213	DEVMETHOD(device_resume,	sf_resume),
214
215	/* bus interface */
216	DEVMETHOD(bus_print_child,	bus_generic_print_child),
217	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
218
219	/* MII interface */
220	DEVMETHOD(miibus_readreg,	sf_miibus_readreg),
221	DEVMETHOD(miibus_writereg,	sf_miibus_writereg),
222	DEVMETHOD(miibus_statchg,	sf_miibus_statchg),
223
224	{ NULL, NULL }
225};
226
227static driver_t sf_driver = {
228	"sf",
229	sf_methods,
230	sizeof(struct sf_softc),
231};
232
233static devclass_t sf_devclass;
234
235DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0);
236DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0);
237
238#define SF_SETBIT(sc, reg, x)	\
239	csr_write_4(sc, reg, csr_read_4(sc, reg) | (x))
240
241#define SF_CLRBIT(sc, reg, x)				\
242	csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x))
243
244static uint32_t
245csr_read_4(struct sf_softc *sc, int reg)
246{
247	uint32_t		val;
248
249	if (sc->sf_restype == SYS_RES_MEMORY)
250		val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE));
251	else {
252		CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
253		val = CSR_READ_4(sc, SF_INDIRECTIO_DATA);
254	}
255
256	return (val);
257}
258
259static uint8_t
260sf_read_eeprom(struct sf_softc *sc, int reg)
261{
262	uint8_t		val;
263
264	val = (csr_read_4(sc, SF_EEADDR_BASE +
265	    (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF;
266
267	return (val);
268}
269
270static void
271csr_write_4(struct sf_softc *sc, int reg, uint32_t val)
272{
273
274	if (sc->sf_restype == SYS_RES_MEMORY)
275		CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val);
276	else {
277		CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
278		CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val);
279	}
280}
281
282/*
283 * Copy the address 'mac' into the perfect RX filter entry at
284 * offset 'idx.' The perfect filter only has 16 entries so do
285 * some sanity tests.
286 */
287static int
288sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac)
289{
290
291	if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT)
292		return (EINVAL);
293
294	if (mac == NULL)
295		return (EINVAL);
296
297	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
298	    (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8));
299	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
300	    (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8));
301	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
302	    (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8));
303
304	return (0);
305}
306
307/*
308 * Set the bit in the 512-bit hash table that corresponds to the
309 * specified mac address 'mac.' If 'prio' is nonzero, update the
310 * priority hash table instead of the filter hash table.
311 */
312static int
313sf_sethash(struct sf_softc *sc, caddr_t	mac, int prio)
314{
315	uint32_t		h;
316
317	if (mac == NULL)
318		return (EINVAL);
319
320	h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23;
321
322	if (prio) {
323		SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF +
324		    (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
325	} else {
326		SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF +
327		    (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
328	}
329
330	return (0);
331}
332
333#ifdef notdef
334/*
335 * Set a VLAN tag in the receive filter.
336 */
337static int
338sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan)
339{
340
341	if (idx < 0 || idx >> SF_RXFILT_HASH_CNT)
342		return (EINVAL);
343
344	csr_write_4(sc, SF_RXFILT_HASH_BASE +
345	    (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan);
346
347	return (0);
348}
349#endif
350
351static int
352sf_miibus_readreg(device_t dev, int phy, int reg)
353{
354	struct sf_softc		*sc;
355	int			i;
356	uint32_t		val = 0;
357
358	sc = device_get_softc(dev);
359
360	for (i = 0; i < SF_TIMEOUT; i++) {
361		val = csr_read_4(sc, SF_PHY_REG(phy, reg));
362		if ((val & SF_MII_DATAVALID) != 0)
363			break;
364	}
365
366	if (i == SF_TIMEOUT)
367		return (0);
368
369	val &= SF_MII_DATAPORT;
370	if (val == 0xffff)
371		return (0);
372
373	return (val);
374}
375
376static int
377sf_miibus_writereg(device_t dev, int phy, int reg, int val)
378{
379	struct sf_softc		*sc;
380	int			i;
381	int			busy;
382
383	sc = device_get_softc(dev);
384
385	csr_write_4(sc, SF_PHY_REG(phy, reg), val);
386
387	for (i = 0; i < SF_TIMEOUT; i++) {
388		busy = csr_read_4(sc, SF_PHY_REG(phy, reg));
389		if ((busy & SF_MII_BUSY) == 0)
390			break;
391	}
392
393	return (0);
394}
395
396static void
397sf_miibus_statchg(device_t dev)
398{
399	struct sf_softc		*sc;
400
401	sc = device_get_softc(dev);
402	taskqueue_enqueue(taskqueue_swi, &sc->sf_link_task);
403}
404
405static void
406sf_link_task(void *arg, int pending)
407{
408	struct sf_softc		*sc;
409	struct mii_data		*mii;
410	struct ifnet		*ifp;
411	uint32_t		val;
412
413	sc = (struct sf_softc *)arg;
414
415	SF_LOCK(sc);
416
417	mii = device_get_softc(sc->sf_miibus);
418	ifp = sc->sf_ifp;
419	if (mii == NULL || ifp == NULL ||
420	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
421		SF_UNLOCK(sc);
422		return;
423	}
424
425	if (mii->mii_media_status & IFM_ACTIVE) {
426		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
427			sc->sf_link = 1;
428	} else
429		sc->sf_link = 0;
430
431	val = csr_read_4(sc, SF_MACCFG_1);
432	val &= ~SF_MACCFG1_FULLDUPLEX;
433	val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB);
434	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
435		val |= SF_MACCFG1_FULLDUPLEX;
436		csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX);
437#ifdef notyet
438		/* Configure flow-control bits. */
439		if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
440		    IFM_ETH_RXPAUSE) != 0)
441			val |= SF_MACCFG1_RX_FLOWENB;
442		if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
443		    IFM_ETH_TXPAUSE) != 0)
444			val |= SF_MACCFG1_TX_FLOWENB;
445#endif
446	} else
447		csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX);
448
449	/* Make sure to reset MAC to take changes effect. */
450	csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET);
451	DELAY(1000);
452	csr_write_4(sc, SF_MACCFG_1, val);
453
454	val = csr_read_4(sc, SF_TIMER_CTL);
455	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
456		val |= SF_TIMER_TIMES_TEN;
457	else
458		val &= ~SF_TIMER_TIMES_TEN;
459	csr_write_4(sc, SF_TIMER_CTL, val);
460
461	SF_UNLOCK(sc);
462}
463
464static void
465sf_rxfilter(struct sf_softc *sc)
466{
467	struct ifnet		*ifp;
468	int			i;
469	struct ifmultiaddr	*ifma;
470	uint8_t			dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
471	uint32_t		rxfilt;
472
473	ifp = sc->sf_ifp;
474
475	/* First zot all the existing filters. */
476	for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++)
477		sf_setperf(sc, i, dummy);
478	for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1);
479	    i += sizeof(uint32_t))
480		csr_write_4(sc, i, 0);
481
482	rxfilt = csr_read_4(sc, SF_RXFILT);
483	rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD);
484	if ((ifp->if_flags & IFF_BROADCAST) != 0)
485		rxfilt |= SF_RXFILT_BROAD;
486	if ((ifp->if_flags & IFF_ALLMULTI) != 0 ||
487	    (ifp->if_flags & IFF_PROMISC) != 0) {
488		if ((ifp->if_flags & IFF_PROMISC) != 0)
489			rxfilt |= SF_RXFILT_PROMISC;
490		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
491			rxfilt |= SF_RXFILT_ALLMULTI;
492		goto done;
493	}
494
495	/* Now program new ones. */
496	i = 1;
497	if_maddr_rlock(ifp);
498	TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
499	    ifma_link) {
500		if (ifma->ifma_addr->sa_family != AF_LINK)
501			continue;
502		/*
503		 * Program the first 15 multicast groups
504		 * into the perfect filter. For all others,
505		 * use the hash table.
506		 */
507		if (i < SF_RXFILT_PERFECT_CNT) {
508			sf_setperf(sc, i,
509			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
510			i++;
511			continue;
512		}
513
514		sf_sethash(sc,
515		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0);
516	}
517	if_maddr_runlock(ifp);
518
519done:
520	csr_write_4(sc, SF_RXFILT, rxfilt);
521}
522
523/*
524 * Set media options.
525 */
526static int
527sf_ifmedia_upd(struct ifnet *ifp)
528{
529	struct sf_softc		*sc;
530	struct mii_data		*mii;
531	struct mii_softc        *miisc;
532	int			error;
533
534	sc = ifp->if_softc;
535	SF_LOCK(sc);
536
537	mii = device_get_softc(sc->sf_miibus);
538	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
539		PHY_RESET(miisc);
540	error = mii_mediachg(mii);
541	SF_UNLOCK(sc);
542
543	return (error);
544}
545
546/*
547 * Report current media status.
548 */
549static void
550sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
551{
552	struct sf_softc		*sc;
553	struct mii_data		*mii;
554
555	sc = ifp->if_softc;
556	SF_LOCK(sc);
557	mii = device_get_softc(sc->sf_miibus);
558
559	mii_pollstat(mii);
560	ifmr->ifm_active = mii->mii_media_active;
561	ifmr->ifm_status = mii->mii_media_status;
562	SF_UNLOCK(sc);
563}
564
565static int
566sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
567{
568	struct sf_softc		*sc;
569	struct ifreq		*ifr;
570	struct mii_data		*mii;
571	int			error, mask;
572
573	sc = ifp->if_softc;
574	ifr = (struct ifreq *)data;
575	error = 0;
576
577	switch (command) {
578	case SIOCSIFFLAGS:
579		SF_LOCK(sc);
580		if (ifp->if_flags & IFF_UP) {
581			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
582				if ((ifp->if_flags ^ sc->sf_if_flags) &
583				    (IFF_PROMISC | IFF_ALLMULTI))
584					sf_rxfilter(sc);
585			} else {
586				if (sc->sf_detach == 0)
587					sf_init_locked(sc);
588			}
589		} else {
590			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
591				sf_stop(sc);
592		}
593		sc->sf_if_flags = ifp->if_flags;
594		SF_UNLOCK(sc);
595		break;
596	case SIOCADDMULTI:
597	case SIOCDELMULTI:
598		SF_LOCK(sc);
599		sf_rxfilter(sc);
600		SF_UNLOCK(sc);
601		break;
602	case SIOCGIFMEDIA:
603	case SIOCSIFMEDIA:
604		mii = device_get_softc(sc->sf_miibus);
605		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
606		break;
607	case SIOCSIFCAP:
608		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
609#ifdef DEVICE_POLLING
610		if ((mask & IFCAP_POLLING) != 0) {
611			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
612				error = ether_poll_register(sf_poll, ifp);
613				if (error != 0)
614					break;
615				SF_LOCK(sc);
616				/* Disable interrupts. */
617				csr_write_4(sc, SF_IMR, 0);
618				ifp->if_capenable |= IFCAP_POLLING;
619				SF_UNLOCK(sc);
620			} else {
621				error = ether_poll_deregister(ifp);
622				/* Enable interrupts. */
623				SF_LOCK(sc);
624				csr_write_4(sc, SF_IMR, SF_INTRS);
625				ifp->if_capenable &= ~IFCAP_POLLING;
626				SF_UNLOCK(sc);
627			}
628		}
629#endif /* DEVICE_POLLING */
630		if ((mask & IFCAP_TXCSUM) != 0) {
631			if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
632				SF_LOCK(sc);
633				ifp->if_capenable ^= IFCAP_TXCSUM;
634				if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) {
635					ifp->if_hwassist |= SF_CSUM_FEATURES;
636					SF_SETBIT(sc, SF_GEN_ETH_CTL,
637					    SF_ETHCTL_TXGFP_ENB);
638				} else {
639					ifp->if_hwassist &= ~SF_CSUM_FEATURES;
640					SF_CLRBIT(sc, SF_GEN_ETH_CTL,
641					    SF_ETHCTL_TXGFP_ENB);
642				}
643				SF_UNLOCK(sc);
644			}
645		}
646		if ((mask & IFCAP_RXCSUM) != 0) {
647			if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
648				SF_LOCK(sc);
649				ifp->if_capenable ^= IFCAP_RXCSUM;
650				if ((IFCAP_RXCSUM & ifp->if_capenable) != 0)
651					SF_SETBIT(sc, SF_GEN_ETH_CTL,
652					    SF_ETHCTL_RXGFP_ENB);
653				else
654					SF_CLRBIT(sc, SF_GEN_ETH_CTL,
655					    SF_ETHCTL_RXGFP_ENB);
656				SF_UNLOCK(sc);
657			}
658		}
659		break;
660	default:
661		error = ether_ioctl(ifp, command, data);
662		break;
663	}
664
665	return (error);
666}
667
668static void
669sf_reset(struct sf_softc *sc)
670{
671	int		i;
672
673	csr_write_4(sc, SF_GEN_ETH_CTL, 0);
674	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
675	DELAY(1000);
676	SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
677
678	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET);
679
680	for (i = 0; i < SF_TIMEOUT; i++) {
681		DELAY(10);
682		if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET))
683			break;
684	}
685
686	if (i == SF_TIMEOUT)
687		device_printf(sc->sf_dev, "reset never completed!\n");
688
689	/* Wait a little while for the chip to get its brains in order. */
690	DELAY(1000);
691}
692
693/*
694 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device
695 * IDs against our list and return a device name if we find a match.
696 * We also check the subsystem ID so that we can identify exactly which
697 * NIC has been found, if possible.
698 */
699static int
700sf_probe(device_t dev)
701{
702	struct sf_type		*t;
703	uint16_t		vid;
704	uint16_t		did;
705	uint16_t		sdid;
706	int			i;
707
708	vid = pci_get_vendor(dev);
709	did = pci_get_device(dev);
710	sdid = pci_get_subdevice(dev);
711
712	t = sf_devs;
713	for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) {
714		if (vid == t->sf_vid && did == t->sf_did) {
715			if (sdid == t->sf_sdid) {
716				device_set_desc(dev, t->sf_sname);
717				return (BUS_PROBE_DEFAULT);
718			}
719		}
720	}
721
722	if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) {
723		/* unkown subdevice */
724		device_set_desc(dev, sf_devs[0].sf_name);
725		return (BUS_PROBE_DEFAULT);
726	}
727
728	return (ENXIO);
729}
730
731/*
732 * Attach the interface. Allocate softc structures, do ifmedia
733 * setup and ethernet/BPF attach.
734 */
735static int
736sf_attach(device_t dev)
737{
738	int			i;
739	struct sf_softc		*sc;
740	struct ifnet		*ifp;
741	uint32_t		reg;
742	int			rid, error = 0;
743	uint8_t			eaddr[ETHER_ADDR_LEN];
744
745	sc = device_get_softc(dev);
746	sc->sf_dev = dev;
747
748	mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
749	    MTX_DEF);
750	callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0);
751	TASK_INIT(&sc->sf_link_task, 0, sf_link_task, sc);
752
753	/*
754	 * Map control/status registers.
755	 */
756	pci_enable_busmaster(dev);
757
758	/*
759	 * Prefer memory space register mapping over I/O space as the
760	 * hardware requires lots of register access to get various
761	 * producer/consumer index during Tx/Rx operation. However this
762	 * requires large memory space(512K) to map the entire register
763	 * space.
764	 */
765	sc->sf_rid = PCIR_BAR(0);
766	sc->sf_restype = SYS_RES_MEMORY;
767	sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid,
768	    RF_ACTIVE);
769	if (sc->sf_res == NULL) {
770		reg = pci_read_config(dev, PCIR_BAR(0), 4);
771		if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64)
772			sc->sf_rid = PCIR_BAR(2);
773		else
774			sc->sf_rid = PCIR_BAR(1);
775		sc->sf_restype = SYS_RES_IOPORT;
776		sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype,
777		    &sc->sf_rid, RF_ACTIVE);
778		if (sc->sf_res == NULL) {
779			device_printf(dev, "couldn't allocate resources\n");
780			mtx_destroy(&sc->sf_mtx);
781			return (ENXIO);
782		}
783	}
784	if (bootverbose)
785		device_printf(dev, "using %s space register mapping\n",
786		    sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O");
787
788	reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
789	if (reg == 0) {
790		/*
791		 * If cache line size is 0, MWI is not used at all, so set
792		 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32
793		 * and 64.
794		 */
795		reg = 16;
796		device_printf(dev, "setting PCI cache line size to %u\n", reg);
797		pci_write_config(dev, PCIR_CACHELNSZ, reg, 1);
798	} else {
799		if (bootverbose)
800			device_printf(dev, "PCI cache line size : %u\n", reg);
801	}
802	/* Enable MWI. */
803	reg = pci_read_config(dev, PCIR_COMMAND, 2);
804	reg |= PCIM_CMD_MWRICEN;
805	pci_write_config(dev, PCIR_COMMAND, reg, 2);
806
807	/* Allocate interrupt. */
808	rid = 0;
809	sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
810	    RF_SHAREABLE | RF_ACTIVE);
811
812	if (sc->sf_irq == NULL) {
813		device_printf(dev, "couldn't map interrupt\n");
814		error = ENXIO;
815		goto fail;
816	}
817
818	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
819	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
820	    OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
821	    sf_sysctl_stats, "I", "Statistics");
822
823	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
824		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
825		OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW,
826		&sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I",
827		"sf interrupt moderation");
828	/* Pull in device tunables. */
829	sc->sf_int_mod = SF_IM_DEFAULT;
830	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
831	    "int_mod", &sc->sf_int_mod);
832	if (error == 0) {
833		if (sc->sf_int_mod < SF_IM_MIN ||
834		    sc->sf_int_mod > SF_IM_MAX) {
835			device_printf(dev, "int_mod value out of range; "
836			    "using default: %d\n", SF_IM_DEFAULT);
837			sc->sf_int_mod = SF_IM_DEFAULT;
838		}
839	}
840
841	/* Reset the adapter. */
842	sf_reset(sc);
843
844	/*
845	 * Get station address from the EEPROM.
846	 */
847	for (i = 0; i < ETHER_ADDR_LEN; i++)
848		eaddr[i] =
849		    sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i);
850
851	/* Allocate DMA resources. */
852	if (sf_dma_alloc(sc) != 0) {
853		error = ENOSPC;
854		goto fail;
855	}
856
857	sc->sf_txthresh = SF_MIN_TX_THRESHOLD;
858
859	ifp = sc->sf_ifp = if_alloc(IFT_ETHER);
860	if (ifp == NULL) {
861		device_printf(dev, "can not allocate ifnet structure\n");
862		error = ENOSPC;
863		goto fail;
864	}
865
866	/* Do MII setup. */
867	error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd,
868	    sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
869	if (error != 0) {
870		device_printf(dev, "attaching PHYs failed\n");
871		goto fail;
872	}
873
874	ifp->if_softc = sc;
875	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
876	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
877	ifp->if_ioctl = sf_ioctl;
878	ifp->if_start = sf_start;
879	ifp->if_init = sf_init;
880	IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
881	ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1;
882	IFQ_SET_READY(&ifp->if_snd);
883	/*
884	 * With the help of firmware, AIC-6915 supports
885	 * Tx/Rx TCP/UDP checksum offload.
886	 */
887	ifp->if_hwassist = SF_CSUM_FEATURES;
888	ifp->if_capabilities = IFCAP_HWCSUM;
889
890	/*
891	 * Call MI attach routine.
892	 */
893	ether_ifattach(ifp, eaddr);
894
895	/* VLAN capability setup. */
896	ifp->if_capabilities |= IFCAP_VLAN_MTU;
897	ifp->if_capenable = ifp->if_capabilities;
898#ifdef DEVICE_POLLING
899	ifp->if_capabilities |= IFCAP_POLLING;
900#endif
901	/*
902	 * Tell the upper layer(s) we support long frames.
903	 * Must appear after the call to ether_ifattach() because
904	 * ether_ifattach() sets ifi_hdrlen to the default value.
905	 */
906	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
907
908	/* Hook interrupt last to avoid having to lock softc */
909	error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE,
910	    NULL, sf_intr, sc, &sc->sf_intrhand);
911
912	if (error) {
913		device_printf(dev, "couldn't set up irq\n");
914		ether_ifdetach(ifp);
915		goto fail;
916	}
917
918fail:
919	if (error)
920		sf_detach(dev);
921
922	return (error);
923}
924
925/*
926 * Shutdown hardware and free up resources. This can be called any
927 * time after the mutex has been initialized. It is called in both
928 * the error case in attach and the normal detach case so it needs
929 * to be careful about only freeing resources that have actually been
930 * allocated.
931 */
932static int
933sf_detach(device_t dev)
934{
935	struct sf_softc		*sc;
936	struct ifnet		*ifp;
937
938	sc = device_get_softc(dev);
939	ifp = sc->sf_ifp;
940
941#ifdef DEVICE_POLLING
942	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
943		ether_poll_deregister(ifp);
944#endif
945
946	/* These should only be active if attach succeeded */
947	if (device_is_attached(dev)) {
948		SF_LOCK(sc);
949		sc->sf_detach = 1;
950		sf_stop(sc);
951		SF_UNLOCK(sc);
952		callout_drain(&sc->sf_co);
953		taskqueue_drain(taskqueue_swi, &sc->sf_link_task);
954		if (ifp != NULL)
955			ether_ifdetach(ifp);
956	}
957	if (sc->sf_miibus) {
958		device_delete_child(dev, sc->sf_miibus);
959		sc->sf_miibus = NULL;
960	}
961	bus_generic_detach(dev);
962
963	if (sc->sf_intrhand != NULL)
964		bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand);
965	if (sc->sf_irq != NULL)
966		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq);
967	if (sc->sf_res != NULL)
968		bus_release_resource(dev, sc->sf_restype, sc->sf_rid,
969		    sc->sf_res);
970
971	sf_dma_free(sc);
972	if (ifp != NULL)
973		if_free(ifp);
974
975	mtx_destroy(&sc->sf_mtx);
976
977	return (0);
978}
979
980struct sf_dmamap_arg {
981	bus_addr_t		sf_busaddr;
982};
983
984static void
985sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
986{
987	struct sf_dmamap_arg	*ctx;
988
989	if (error != 0)
990		return;
991	ctx = arg;
992	ctx->sf_busaddr = segs[0].ds_addr;
993}
994
995static int
996sf_dma_alloc(struct sf_softc *sc)
997{
998	struct sf_dmamap_arg	ctx;
999	struct sf_txdesc	*txd;
1000	struct sf_rxdesc	*rxd;
1001	bus_addr_t		lowaddr;
1002	bus_addr_t		rx_ring_end, rx_cring_end;
1003	bus_addr_t		tx_ring_end, tx_cring_end;
1004	int			error, i;
1005
1006	lowaddr = BUS_SPACE_MAXADDR;
1007
1008again:
1009	/* Create parent DMA tag. */
1010	error = bus_dma_tag_create(
1011	    bus_get_dma_tag(sc->sf_dev),	/* parent */
1012	    1, 0,			/* alignment, boundary */
1013	    lowaddr,			/* lowaddr */
1014	    BUS_SPACE_MAXADDR,		/* highaddr */
1015	    NULL, NULL,			/* filter, filterarg */
1016	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1017	    0,				/* nsegments */
1018	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1019	    0,				/* flags */
1020	    NULL, NULL,			/* lockfunc, lockarg */
1021	    &sc->sf_cdata.sf_parent_tag);
1022	if (error != 0) {
1023		device_printf(sc->sf_dev, "failed to create parent DMA tag\n");
1024		goto fail;
1025	}
1026	/* Create tag for Tx ring. */
1027	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1028	    SF_RING_ALIGN, 0, 		/* alignment, boundary */
1029	    BUS_SPACE_MAXADDR,		/* lowaddr */
1030	    BUS_SPACE_MAXADDR,		/* highaddr */
1031	    NULL, NULL,			/* filter, filterarg */
1032	    SF_TX_DLIST_SIZE,		/* maxsize */
1033	    1,				/* nsegments */
1034	    SF_TX_DLIST_SIZE,		/* maxsegsize */
1035	    0,				/* flags */
1036	    NULL, NULL,			/* lockfunc, lockarg */
1037	    &sc->sf_cdata.sf_tx_ring_tag);
1038	if (error != 0) {
1039		device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n");
1040		goto fail;
1041	}
1042
1043	/* Create tag for Tx completion ring. */
1044	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1045	    SF_RING_ALIGN, 0, 		/* alignment, boundary */
1046	    BUS_SPACE_MAXADDR,		/* lowaddr */
1047	    BUS_SPACE_MAXADDR,		/* highaddr */
1048	    NULL, NULL,			/* filter, filterarg */
1049	    SF_TX_CLIST_SIZE,		/* maxsize */
1050	    1,				/* nsegments */
1051	    SF_TX_CLIST_SIZE,		/* maxsegsize */
1052	    0,				/* flags */
1053	    NULL, NULL,			/* lockfunc, lockarg */
1054	    &sc->sf_cdata.sf_tx_cring_tag);
1055	if (error != 0) {
1056		device_printf(sc->sf_dev,
1057		    "failed to create Tx completion ring DMA tag\n");
1058		goto fail;
1059	}
1060
1061	/* Create tag for Rx ring. */
1062	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1063	    SF_RING_ALIGN, 0,		/* alignment, boundary */
1064	    BUS_SPACE_MAXADDR,		/* lowaddr */
1065	    BUS_SPACE_MAXADDR,		/* highaddr */
1066	    NULL, NULL,			/* filter, filterarg */
1067	    SF_RX_DLIST_SIZE,		/* maxsize */
1068	    1,				/* nsegments */
1069	    SF_RX_DLIST_SIZE,		/* maxsegsize */
1070	    0,				/* flags */
1071	    NULL, NULL,			/* lockfunc, lockarg */
1072	    &sc->sf_cdata.sf_rx_ring_tag);
1073	if (error != 0) {
1074		device_printf(sc->sf_dev,
1075		    "failed to create Rx ring DMA tag\n");
1076		goto fail;
1077	}
1078
1079	/* Create tag for Rx completion ring. */
1080	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1081	    SF_RING_ALIGN, 0,		/* alignment, boundary */
1082	    BUS_SPACE_MAXADDR,		/* lowaddr */
1083	    BUS_SPACE_MAXADDR,		/* highaddr */
1084	    NULL, NULL,			/* filter, filterarg */
1085	    SF_RX_CLIST_SIZE,		/* maxsize */
1086	    1,				/* nsegments */
1087	    SF_RX_CLIST_SIZE,		/* maxsegsize */
1088	    0,				/* flags */
1089	    NULL, NULL,			/* lockfunc, lockarg */
1090	    &sc->sf_cdata.sf_rx_cring_tag);
1091	if (error != 0) {
1092		device_printf(sc->sf_dev,
1093		    "failed to create Rx completion ring DMA tag\n");
1094		goto fail;
1095	}
1096
1097	/* Create tag for Tx buffers. */
1098	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1099	    1, 0,			/* alignment, boundary */
1100	    BUS_SPACE_MAXADDR,		/* lowaddr */
1101	    BUS_SPACE_MAXADDR,		/* highaddr */
1102	    NULL, NULL,			/* filter, filterarg */
1103	    MCLBYTES * SF_MAXTXSEGS,	/* maxsize */
1104	    SF_MAXTXSEGS,		/* nsegments */
1105	    MCLBYTES,			/* maxsegsize */
1106	    0,				/* flags */
1107	    NULL, NULL,			/* lockfunc, lockarg */
1108	    &sc->sf_cdata.sf_tx_tag);
1109	if (error != 0) {
1110		device_printf(sc->sf_dev, "failed to create Tx DMA tag\n");
1111		goto fail;
1112	}
1113
1114	/* Create tag for Rx buffers. */
1115	error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */
1116	    SF_RX_ALIGN, 0,		/* alignment, boundary */
1117	    BUS_SPACE_MAXADDR,		/* lowaddr */
1118	    BUS_SPACE_MAXADDR,		/* highaddr */
1119	    NULL, NULL,			/* filter, filterarg */
1120	    MCLBYTES,			/* maxsize */
1121	    1,				/* nsegments */
1122	    MCLBYTES,			/* maxsegsize */
1123	    0,				/* flags */
1124	    NULL, NULL,			/* lockfunc, lockarg */
1125	    &sc->sf_cdata.sf_rx_tag);
1126	if (error != 0) {
1127		device_printf(sc->sf_dev, "failed to create Rx DMA tag\n");
1128		goto fail;
1129	}
1130
1131	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1132	error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag,
1133	    (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK |
1134	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map);
1135	if (error != 0) {
1136		device_printf(sc->sf_dev,
1137		    "failed to allocate DMA'able memory for Tx ring\n");
1138		goto fail;
1139	}
1140
1141	ctx.sf_busaddr = 0;
1142	error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag,
1143	    sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring,
1144	    SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1145	if (error != 0 || ctx.sf_busaddr == 0) {
1146		device_printf(sc->sf_dev,
1147		    "failed to load DMA'able memory for Tx ring\n");
1148		goto fail;
1149	}
1150	sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr;
1151
1152	/*
1153	 * Allocate DMA'able memory and load the DMA map for Tx completion ring.
1154	 */
1155	error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag,
1156	    (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK |
1157	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map);
1158	if (error != 0) {
1159		device_printf(sc->sf_dev,
1160		    "failed to allocate DMA'able memory for "
1161		    "Tx completion ring\n");
1162		goto fail;
1163	}
1164
1165	ctx.sf_busaddr = 0;
1166	error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag,
1167	    sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring,
1168	    SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1169	if (error != 0 || ctx.sf_busaddr == 0) {
1170		device_printf(sc->sf_dev,
1171		    "failed to load DMA'able memory for Tx completion ring\n");
1172		goto fail;
1173	}
1174	sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr;
1175
1176	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1177	error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag,
1178	    (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK |
1179	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map);
1180	if (error != 0) {
1181		device_printf(sc->sf_dev,
1182		    "failed to allocate DMA'able memory for Rx ring\n");
1183		goto fail;
1184	}
1185
1186	ctx.sf_busaddr = 0;
1187	error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag,
1188	    sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring,
1189	    SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1190	if (error != 0 || ctx.sf_busaddr == 0) {
1191		device_printf(sc->sf_dev,
1192		    "failed to load DMA'able memory for Rx ring\n");
1193		goto fail;
1194	}
1195	sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr;
1196
1197	/*
1198	 * Allocate DMA'able memory and load the DMA map for Rx completion ring.
1199	 */
1200	error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag,
1201	    (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK |
1202	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map);
1203	if (error != 0) {
1204		device_printf(sc->sf_dev,
1205		    "failed to allocate DMA'able memory for "
1206		    "Rx completion ring\n");
1207		goto fail;
1208	}
1209
1210	ctx.sf_busaddr = 0;
1211	error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag,
1212	    sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring,
1213	    SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0);
1214	if (error != 0 || ctx.sf_busaddr == 0) {
1215		device_printf(sc->sf_dev,
1216		    "failed to load DMA'able memory for Rx completion ring\n");
1217		goto fail;
1218	}
1219	sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr;
1220
1221	/*
1222	 * Tx desciptor ring and Tx completion ring should be addressed in
1223	 * the same 4GB space. The same rule applys to Rx ring and Rx
1224	 * completion ring. Unfortunately there is no way to specify this
1225	 * boundary restriction with bus_dma(9). So just try to allocate
1226	 * without the restriction and check the restriction was satisfied.
1227	 * If not, fall back to 32bit dma addressing mode which always
1228	 * guarantees the restriction.
1229	 */
1230	tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE;
1231	tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE;
1232	rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE;
1233	rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE;
1234	if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) !=
1235	    SF_ADDR_HI(tx_cring_end)) ||
1236	    (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) !=
1237	    SF_ADDR_HI(tx_ring_end)) ||
1238	    (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) !=
1239	    SF_ADDR_HI(rx_cring_end)) ||
1240	    (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) !=
1241	    SF_ADDR_HI(rx_ring_end))) {
1242		device_printf(sc->sf_dev,
1243		    "switching to 32bit DMA mode\n");
1244		sf_dma_free(sc);
1245		/* Limit DMA address space to 32bit and try again. */
1246		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1247		goto again;
1248	}
1249
1250	/* Create DMA maps for Tx buffers. */
1251	for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1252		txd = &sc->sf_cdata.sf_txdesc[i];
1253		txd->tx_m = NULL;
1254		txd->ndesc = 0;
1255		txd->tx_dmamap = NULL;
1256		error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0,
1257		    &txd->tx_dmamap);
1258		if (error != 0) {
1259			device_printf(sc->sf_dev,
1260			    "failed to create Tx dmamap\n");
1261			goto fail;
1262		}
1263	}
1264	/* Create DMA maps for Rx buffers. */
1265	if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1266	    &sc->sf_cdata.sf_rx_sparemap)) != 0) {
1267		device_printf(sc->sf_dev,
1268		    "failed to create spare Rx dmamap\n");
1269		goto fail;
1270	}
1271	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1272		rxd = &sc->sf_cdata.sf_rxdesc[i];
1273		rxd->rx_m = NULL;
1274		rxd->rx_dmamap = NULL;
1275		error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0,
1276		    &rxd->rx_dmamap);
1277		if (error != 0) {
1278			device_printf(sc->sf_dev,
1279			    "failed to create Rx dmamap\n");
1280			goto fail;
1281		}
1282	}
1283
1284fail:
1285	return (error);
1286}
1287
1288static void
1289sf_dma_free(struct sf_softc *sc)
1290{
1291	struct sf_txdesc	*txd;
1292	struct sf_rxdesc	*rxd;
1293	int			i;
1294
1295	/* Tx ring. */
1296	if (sc->sf_cdata.sf_tx_ring_tag) {
1297		if (sc->sf_cdata.sf_tx_ring_map)
1298			bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag,
1299			    sc->sf_cdata.sf_tx_ring_map);
1300		if (sc->sf_cdata.sf_tx_ring_map &&
1301		    sc->sf_rdata.sf_tx_ring)
1302			bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag,
1303			    sc->sf_rdata.sf_tx_ring,
1304			    sc->sf_cdata.sf_tx_ring_map);
1305		sc->sf_rdata.sf_tx_ring = NULL;
1306		sc->sf_cdata.sf_tx_ring_map = NULL;
1307		bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag);
1308		sc->sf_cdata.sf_tx_ring_tag = NULL;
1309	}
1310	/* Tx completion ring. */
1311	if (sc->sf_cdata.sf_tx_cring_tag) {
1312		if (sc->sf_cdata.sf_tx_cring_map)
1313			bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag,
1314			    sc->sf_cdata.sf_tx_cring_map);
1315		if (sc->sf_cdata.sf_tx_cring_map &&
1316		    sc->sf_rdata.sf_tx_cring)
1317			bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag,
1318			    sc->sf_rdata.sf_tx_cring,
1319			    sc->sf_cdata.sf_tx_cring_map);
1320		sc->sf_rdata.sf_tx_cring = NULL;
1321		sc->sf_cdata.sf_tx_cring_map = NULL;
1322		bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag);
1323		sc->sf_cdata.sf_tx_cring_tag = NULL;
1324	}
1325	/* Rx ring. */
1326	if (sc->sf_cdata.sf_rx_ring_tag) {
1327		if (sc->sf_cdata.sf_rx_ring_map)
1328			bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag,
1329			    sc->sf_cdata.sf_rx_ring_map);
1330		if (sc->sf_cdata.sf_rx_ring_map &&
1331		    sc->sf_rdata.sf_rx_ring)
1332			bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag,
1333			    sc->sf_rdata.sf_rx_ring,
1334			    sc->sf_cdata.sf_rx_ring_map);
1335		sc->sf_rdata.sf_rx_ring = NULL;
1336		sc->sf_cdata.sf_rx_ring_map = NULL;
1337		bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag);
1338		sc->sf_cdata.sf_rx_ring_tag = NULL;
1339	}
1340	/* Rx completion ring. */
1341	if (sc->sf_cdata.sf_rx_cring_tag) {
1342		if (sc->sf_cdata.sf_rx_cring_map)
1343			bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag,
1344			    sc->sf_cdata.sf_rx_cring_map);
1345		if (sc->sf_cdata.sf_rx_cring_map &&
1346		    sc->sf_rdata.sf_rx_cring)
1347			bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag,
1348			    sc->sf_rdata.sf_rx_cring,
1349			    sc->sf_cdata.sf_rx_cring_map);
1350		sc->sf_rdata.sf_rx_cring = NULL;
1351		sc->sf_cdata.sf_rx_cring_map = NULL;
1352		bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag);
1353		sc->sf_cdata.sf_rx_cring_tag = NULL;
1354	}
1355	/* Tx buffers. */
1356	if (sc->sf_cdata.sf_tx_tag) {
1357		for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1358			txd = &sc->sf_cdata.sf_txdesc[i];
1359			if (txd->tx_dmamap) {
1360				bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag,
1361				    txd->tx_dmamap);
1362				txd->tx_dmamap = NULL;
1363			}
1364		}
1365		bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag);
1366		sc->sf_cdata.sf_tx_tag = NULL;
1367	}
1368	/* Rx buffers. */
1369	if (sc->sf_cdata.sf_rx_tag) {
1370		for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1371			rxd = &sc->sf_cdata.sf_rxdesc[i];
1372			if (rxd->rx_dmamap) {
1373				bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1374				    rxd->rx_dmamap);
1375				rxd->rx_dmamap = NULL;
1376			}
1377		}
1378		if (sc->sf_cdata.sf_rx_sparemap) {
1379			bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag,
1380			    sc->sf_cdata.sf_rx_sparemap);
1381			sc->sf_cdata.sf_rx_sparemap = 0;
1382		}
1383		bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag);
1384		sc->sf_cdata.sf_rx_tag = NULL;
1385	}
1386
1387	if (sc->sf_cdata.sf_parent_tag) {
1388		bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag);
1389		sc->sf_cdata.sf_parent_tag = NULL;
1390	}
1391}
1392
1393static int
1394sf_init_rx_ring(struct sf_softc *sc)
1395{
1396	struct sf_ring_data	*rd;
1397	int			i;
1398
1399	sc->sf_cdata.sf_rxc_cons = 0;
1400
1401	rd = &sc->sf_rdata;
1402	bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE);
1403	bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE);
1404
1405	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1406		if (sf_newbuf(sc, i) != 0)
1407			return (ENOBUFS);
1408	}
1409
1410	bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1411	    sc->sf_cdata.sf_rx_cring_map,
1412	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1413	bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1414	    sc->sf_cdata.sf_rx_ring_map,
1415	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1416
1417	return (0);
1418}
1419
1420static void
1421sf_init_tx_ring(struct sf_softc *sc)
1422{
1423	struct sf_ring_data	*rd;
1424	int			i;
1425
1426	sc->sf_cdata.sf_tx_prod = 0;
1427	sc->sf_cdata.sf_tx_cnt = 0;
1428	sc->sf_cdata.sf_txc_cons = 0;
1429
1430	rd = &sc->sf_rdata;
1431	bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE);
1432	bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE);
1433	for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1434		rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID);
1435		sc->sf_cdata.sf_txdesc[i].tx_m = NULL;
1436		sc->sf_cdata.sf_txdesc[i].ndesc = 0;
1437	}
1438	rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END);
1439
1440	bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
1441	    sc->sf_cdata.sf_tx_ring_map,
1442	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1443	bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1444	    sc->sf_cdata.sf_tx_cring_map,
1445	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1446}
1447
1448/*
1449 * Initialize an RX descriptor and attach an MBUF cluster.
1450 */
1451static int
1452sf_newbuf(struct sf_softc *sc, int idx)
1453{
1454	struct sf_rx_rdesc	*desc;
1455	struct sf_rxdesc	*rxd;
1456	struct mbuf		*m;
1457	bus_dma_segment_t	segs[1];
1458	bus_dmamap_t		map;
1459	int			nsegs;
1460
1461	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1462	if (m == NULL)
1463		return (ENOBUFS);
1464	m->m_len = m->m_pkthdr.len = MCLBYTES;
1465	m_adj(m, sizeof(uint32_t));
1466
1467	if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag,
1468	    sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1469		m_freem(m);
1470		return (ENOBUFS);
1471	}
1472	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1473
1474	rxd = &sc->sf_cdata.sf_rxdesc[idx];
1475	if (rxd->rx_m != NULL) {
1476		bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1477		    BUS_DMASYNC_POSTREAD);
1478		bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap);
1479	}
1480	map = rxd->rx_dmamap;
1481	rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap;
1482	sc->sf_cdata.sf_rx_sparemap = map;
1483	bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap,
1484	    BUS_DMASYNC_PREREAD);
1485	rxd->rx_m = m;
1486	desc = &sc->sf_rdata.sf_rx_ring[idx];
1487	desc->sf_addr = htole64(segs[0].ds_addr);
1488
1489	return (0);
1490}
1491
1492#ifndef __NO_STRICT_ALIGNMENT
1493static __inline void
1494sf_fixup_rx(struct mbuf *m)
1495{
1496        int			i;
1497        uint16_t		*src, *dst;
1498
1499	src = mtod(m, uint16_t *);
1500	dst = src - 1;
1501
1502	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1503		*dst++ = *src++;
1504
1505	m->m_data -= ETHER_ALIGN;
1506}
1507#endif
1508
1509/*
1510 * The starfire is programmed to use 'normal' mode for packet reception,
1511 * which means we use the consumer/producer model for both the buffer
1512 * descriptor queue and the completion descriptor queue. The only problem
1513 * with this is that it involves a lot of register accesses: we have to
1514 * read the RX completion consumer and producer indexes and the RX buffer
1515 * producer index, plus the RX completion consumer and RX buffer producer
1516 * indexes have to be updated. It would have been easier if Adaptec had
1517 * put each index in a separate register, especially given that the damn
1518 * NIC has a 512K register space.
1519 *
1520 * In spite of all the lovely features that Adaptec crammed into the 6915,
1521 * it is marred by one truly stupid design flaw, which is that receive
1522 * buffer addresses must be aligned on a longword boundary. This forces
1523 * the packet payload to be unaligned, which is suboptimal on the x86 and
1524 * completely unuseable on the Alpha. Our only recourse is to copy received
1525 * packets into properly aligned buffers before handing them off.
1526 */
1527static int
1528sf_rxeof(struct sf_softc *sc)
1529{
1530	struct mbuf		*m;
1531	struct ifnet		*ifp;
1532	struct sf_rxdesc	*rxd;
1533	struct sf_rx_rcdesc	*cur_cmp;
1534	int			cons, eidx, prog, rx_npkts;
1535	uint32_t		status, status2;
1536
1537	SF_LOCK_ASSERT(sc);
1538
1539	ifp = sc->sf_ifp;
1540	rx_npkts = 0;
1541
1542	bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1543	    sc->sf_cdata.sf_rx_ring_map,
1544	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1545	bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1546	    sc->sf_cdata.sf_rx_cring_map,
1547	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1548
1549	/*
1550	 * To reduce register access, directly read Receive completion
1551	 * queue entry.
1552	 */
1553	eidx = 0;
1554	prog = 0;
1555	for (cons = sc->sf_cdata.sf_rxc_cons; ; SF_INC(cons, SF_RX_CLIST_CNT)) {
1556		cur_cmp = &sc->sf_rdata.sf_rx_cring[cons];
1557		status = le32toh(cur_cmp->sf_rx_status1);
1558		if (status == 0)
1559			break;
1560#ifdef DEVICE_POLLING
1561		if ((ifp->if_capenable & IFCAP_POLLING) != 0) {
1562			if (sc->rxcycles <= 0)
1563				break;
1564			sc->rxcycles--;
1565		}
1566#endif
1567		prog++;
1568		eidx = (status & SF_RX_CMPDESC_EIDX) >> 16;
1569		rxd = &sc->sf_cdata.sf_rxdesc[eidx];
1570		m = rxd->rx_m;
1571
1572		/*
1573		 * Note, if_ipackets and if_ierrors counters
1574		 * are handled in sf_stats_update().
1575		 */
1576		if ((status & SF_RXSTAT1_OK) == 0) {
1577			cur_cmp->sf_rx_status1 = 0;
1578			continue;
1579		}
1580
1581		if (sf_newbuf(sc, eidx) != 0) {
1582			ifp->if_iqdrops++;
1583			cur_cmp->sf_rx_status1 = 0;
1584			continue;
1585		}
1586
1587		/* AIC-6915 supports TCP/UDP checksum offload. */
1588		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1589			status2 = le32toh(cur_cmp->sf_rx_status2);
1590			/*
1591			 * Sometimes AIC-6915 generates an interrupt to
1592			 * warn RxGFP stall with bad checksum bit set
1593			 * in status word. I'm not sure what conditioan
1594			 * triggers it but recevied packet's checksum
1595			 * was correct even though AIC-6915 does not
1596			 * agree on this. This may be an indication of
1597			 * firmware bug. To fix the issue, do not rely
1598			 * on bad checksum bit in status word and let
1599			 * upper layer verify integrity of received
1600			 * frame.
1601			 * Another nice feature of AIC-6915 is hardware
1602			 * assistance of checksum calculation by
1603			 * providing partial checksum value for received
1604			 * frame. The partial checksum value can be used
1605			 * to accelerate checksum computation for
1606			 * fragmented TCP/UDP packets. Upper network
1607			 * stack already takes advantage of the partial
1608			 * checksum value in IP reassembly stage. But
1609			 * I'm not sure the correctness of the partial
1610			 * hardware checksum assistance as frequent
1611			 * RxGFP stalls are seen on non-fragmented
1612			 * frames. Due to the nature of the complexity
1613			 * of checksum computation code in firmware it's
1614			 * possible to see another bug in RxGFP so
1615			 * ignore checksum assistance for fragmented
1616			 * frames. This can be changed in future.
1617			 */
1618			if ((status2 & SF_RXSTAT2_FRAG) == 0) {
1619				if ((status2 & (SF_RXSTAT2_TCP |
1620				    SF_RXSTAT2_UDP)) != 0) {
1621					if ((status2 & SF_RXSTAT2_CSUM_OK)) {
1622						m->m_pkthdr.csum_flags =
1623						    CSUM_DATA_VALID |
1624						    CSUM_PSEUDO_HDR;
1625						m->m_pkthdr.csum_data = 0xffff;
1626					}
1627				}
1628			}
1629#ifdef SF_PARTIAL_CSUM_SUPPORT
1630			else if ((status2 & SF_RXSTAT2_FRAG) != 0) {
1631				if ((status2 & (SF_RXSTAT2_TCP |
1632				    SF_RXSTAT2_UDP)) != 0) {
1633					if ((status2 & SF_RXSTAT2_PCSUM_OK)) {
1634						m->m_pkthdr.csum_flags =
1635						    CSUM_DATA_VALID;
1636						m->m_pkthdr.csum_data =
1637						    (status &
1638						    SF_RX_CMPDESC_CSUM2);
1639					}
1640				}
1641			}
1642#endif
1643		}
1644
1645		m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN;
1646#ifndef	__NO_STRICT_ALIGNMENT
1647		sf_fixup_rx(m);
1648#endif
1649		m->m_pkthdr.rcvif = ifp;
1650
1651		SF_UNLOCK(sc);
1652		(*ifp->if_input)(ifp, m);
1653		SF_LOCK(sc);
1654		rx_npkts++;
1655
1656		/* Clear completion status. */
1657		cur_cmp->sf_rx_status1 = 0;
1658	}
1659
1660	if (prog > 0) {
1661		sc->sf_cdata.sf_rxc_cons = cons;
1662		bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag,
1663		    sc->sf_cdata.sf_rx_ring_map,
1664		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1665		bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag,
1666		    sc->sf_cdata.sf_rx_cring_map,
1667		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1668
1669		/* Update Rx completion Q1 consumer index. */
1670		csr_write_4(sc, SF_CQ_CONSIDX,
1671		    (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) |
1672		    (cons & SF_CQ_CONSIDX_RXQ1));
1673		/* Update Rx descriptor Q1 ptr. */
1674		csr_write_4(sc, SF_RXDQ_PTR_Q1,
1675		    (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) |
1676		    (eidx & SF_RXDQ_PRODIDX));
1677	}
1678	return (rx_npkts);
1679}
1680
1681/*
1682 * Read the transmit status from the completion queue and release
1683 * mbufs. Note that the buffer descriptor index in the completion
1684 * descriptor is an offset from the start of the transmit buffer
1685 * descriptor list in bytes. This is important because the manual
1686 * gives the impression that it should match the producer/consumer
1687 * index, which is the offset in 8 byte blocks.
1688 */
1689static void
1690sf_txeof(struct sf_softc *sc)
1691{
1692	struct sf_txdesc	*txd;
1693	struct sf_tx_rcdesc	*cur_cmp;
1694	struct ifnet		*ifp;
1695	uint32_t		status;
1696	int			cons, idx, prod;
1697
1698	SF_LOCK_ASSERT(sc);
1699
1700	ifp = sc->sf_ifp;
1701
1702	bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1703	    sc->sf_cdata.sf_tx_cring_map,
1704	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1705
1706	cons = sc->sf_cdata.sf_txc_cons;
1707	prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16;
1708	if (prod == cons)
1709		return;
1710
1711	for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) {
1712		cur_cmp = &sc->sf_rdata.sf_tx_cring[cons];
1713		status = le32toh(cur_cmp->sf_tx_status1);
1714		if (status == 0)
1715			break;
1716		switch (status & SF_TX_CMPDESC_TYPE) {
1717		case SF_TXCMPTYPE_TX:
1718			/* Tx complete entry. */
1719			break;
1720		case SF_TXCMPTYPE_DMA:
1721			/* DMA complete entry. */
1722			idx = status & SF_TX_CMPDESC_IDX;
1723			idx = idx / sizeof(struct sf_tx_rdesc);
1724			/*
1725			 * We don't need to check Tx status here.
1726			 * SF_ISR_TX_LOFIFO intr would handle this.
1727			 * Note, if_opackets, if_collisions and if_oerrors
1728			 * counters are handled in sf_stats_update().
1729			 */
1730			txd = &sc->sf_cdata.sf_txdesc[idx];
1731			if (txd->tx_m != NULL) {
1732				bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
1733				    txd->tx_dmamap,
1734				    BUS_DMASYNC_POSTWRITE);
1735				bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
1736				    txd->tx_dmamap);
1737				m_freem(txd->tx_m);
1738				txd->tx_m = NULL;
1739			}
1740			sc->sf_cdata.sf_tx_cnt -= txd->ndesc;
1741			KASSERT(sc->sf_cdata.sf_tx_cnt >= 0,
1742			    ("%s: Active Tx desc counter was garbled\n",
1743			    __func__));
1744			txd->ndesc = 0;
1745			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1746			break;
1747		default:
1748			/* It should not happen. */
1749			device_printf(sc->sf_dev,
1750			    "unknown Tx completion type : 0x%08x : %d : %d\n",
1751			    status, cons, prod);
1752			break;
1753		}
1754		cur_cmp->sf_tx_status1 = 0;
1755	}
1756
1757	sc->sf_cdata.sf_txc_cons = cons;
1758	bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag,
1759	    sc->sf_cdata.sf_tx_cring_map,
1760	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1761
1762	if (sc->sf_cdata.sf_tx_cnt == 0)
1763		sc->sf_watchdog_timer = 0;
1764
1765	/* Update Tx completion consumer index. */
1766	csr_write_4(sc, SF_CQ_CONSIDX,
1767	    (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) |
1768	    ((cons << 16) & 0xffff0000));
1769}
1770
1771static void
1772sf_txthresh_adjust(struct sf_softc *sc)
1773{
1774	uint32_t		txfctl;
1775
1776	device_printf(sc->sf_dev, "Tx underrun -- ");
1777	if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) {
1778		txfctl = csr_read_4(sc, SF_TX_FRAMCTL);
1779		/* Increase Tx threshold 256 bytes. */
1780		sc->sf_txthresh += 16;
1781		if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD)
1782			sc->sf_txthresh = SF_MAX_TX_THRESHOLD;
1783		txfctl &= ~SF_TXFRMCTL_TXTHRESH;
1784		txfctl |= sc->sf_txthresh;
1785		printf("increasing Tx threshold to %d bytes\n",
1786		    sc->sf_txthresh * SF_TX_THRESHOLD_UNIT);
1787		csr_write_4(sc, SF_TX_FRAMCTL, txfctl);
1788	} else
1789		printf("\n");
1790}
1791
1792#ifdef DEVICE_POLLING
1793static int
1794sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1795{
1796	struct sf_softc		*sc;
1797	uint32_t		status;
1798	int			rx_npkts;
1799
1800	sc = ifp->if_softc;
1801	rx_npkts = 0;
1802	SF_LOCK(sc);
1803
1804	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1805		SF_UNLOCK(sc);
1806		return (rx_npkts);
1807	}
1808
1809	sc->rxcycles = count;
1810	rx_npkts = sf_rxeof(sc);
1811	sf_txeof(sc);
1812	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1813		sf_start_locked(ifp);
1814
1815	if (cmd == POLL_AND_CHECK_STATUS) {
1816		/* Reading the ISR register clears all interrrupts. */
1817		status = csr_read_4(sc, SF_ISR);
1818
1819		if ((status & SF_ISR_ABNORMALINTR) != 0) {
1820			if ((status & SF_ISR_STATSOFLOW) != 0)
1821				sf_stats_update(sc);
1822			else if ((status & SF_ISR_TX_LOFIFO) != 0)
1823				sf_txthresh_adjust(sc);
1824			else if ((status & SF_ISR_DMAERR) != 0) {
1825				device_printf(sc->sf_dev,
1826				    "DMA error, resetting\n");
1827				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1828				sf_init_locked(sc);
1829				SF_UNLOCK(sc);
1830				return (rx_npkts);
1831			} else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1832				sc->sf_statistics.sf_tx_gfp_stall++;
1833#ifdef	SF_GFP_DEBUG
1834				device_printf(sc->sf_dev,
1835				    "TxGFP is not responding!\n");
1836#endif
1837			} else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1838				sc->sf_statistics.sf_rx_gfp_stall++;
1839#ifdef	SF_GFP_DEBUG
1840				device_printf(sc->sf_dev,
1841				    "RxGFP is not responding!\n");
1842#endif
1843			}
1844		}
1845	}
1846
1847	SF_UNLOCK(sc);
1848	return (rx_npkts);
1849}
1850#endif /* DEVICE_POLLING */
1851
1852static void
1853sf_intr(void *arg)
1854{
1855	struct sf_softc		*sc;
1856	struct ifnet		*ifp;
1857	uint32_t		status;
1858
1859	sc = (struct sf_softc *)arg;
1860	SF_LOCK(sc);
1861
1862	if (sc->sf_suspended != 0)
1863		goto done_locked;
1864
1865	/* Reading the ISR register clears all interrrupts. */
1866	status = csr_read_4(sc, SF_ISR);
1867	if (status == 0 || status == 0xffffffff ||
1868	    (status & SF_ISR_PCIINT_ASSERTED) == 0)
1869		goto done_locked;
1870
1871	ifp = sc->sf_ifp;
1872#ifdef DEVICE_POLLING
1873	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1874		goto done_locked;
1875#endif
1876	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1877		goto done_locked;
1878
1879	/* Disable interrupts. */
1880	csr_write_4(sc, SF_IMR, 0x00000000);
1881
1882	for (; (status & SF_INTRS) != 0;) {
1883		if ((status & SF_ISR_RXDQ1_DMADONE) != 0)
1884			sf_rxeof(sc);
1885
1886		if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE |
1887		    SF_ISR_TX_QUEUEDONE)) != 0)
1888			sf_txeof(sc);
1889
1890		if ((status & SF_ISR_ABNORMALINTR) != 0) {
1891			if ((status & SF_ISR_STATSOFLOW) != 0)
1892				sf_stats_update(sc);
1893			else if ((status & SF_ISR_TX_LOFIFO) != 0)
1894				sf_txthresh_adjust(sc);
1895			else if ((status & SF_ISR_DMAERR) != 0) {
1896				device_printf(sc->sf_dev,
1897				    "DMA error, resetting\n");
1898				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1899				sf_init_locked(sc);
1900				SF_UNLOCK(sc);
1901				return;
1902			} else if ((status & SF_ISR_NO_TX_CSUM) != 0) {
1903				sc->sf_statistics.sf_tx_gfp_stall++;
1904#ifdef	SF_GFP_DEBUG
1905				device_printf(sc->sf_dev,
1906				    "TxGFP is not responding!\n");
1907#endif
1908			}
1909			else if ((status & SF_ISR_RXGFP_NORESP) != 0) {
1910				sc->sf_statistics.sf_rx_gfp_stall++;
1911#ifdef	SF_GFP_DEBUG
1912				device_printf(sc->sf_dev,
1913				    "RxGFP is not responding!\n");
1914#endif
1915			}
1916		}
1917		/* Reading the ISR register clears all interrrupts. */
1918		status = csr_read_4(sc, SF_ISR);
1919	}
1920
1921	/* Re-enable interrupts. */
1922	csr_write_4(sc, SF_IMR, SF_INTRS);
1923
1924	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1925		sf_start_locked(ifp);
1926done_locked:
1927	SF_UNLOCK(sc);
1928}
1929
1930static void
1931sf_download_fw(struct sf_softc *sc)
1932{
1933	uint32_t gfpinst;
1934	int i, ndx;
1935	uint8_t *p;
1936
1937	/*
1938	 * A FP instruction is composed of 48bits so we have to
1939	 * write it with two parts.
1940	 */
1941	p = txfwdata;
1942	ndx = 0;
1943	for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) {
1944		gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1945		csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst);
1946		gfpinst = p[0] << 8 | p[1];
1947		csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1948		p += SF_GFP_INST_BYTES;
1949		ndx += 2;
1950	}
1951	if (bootverbose)
1952		device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i);
1953
1954	p = rxfwdata;
1955	ndx = 0;
1956	for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) {
1957		gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5];
1958		csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst);
1959		gfpinst = p[0] << 8 | p[1];
1960		csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst);
1961		p += SF_GFP_INST_BYTES;
1962		ndx += 2;
1963	}
1964	if (bootverbose)
1965		device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i);
1966}
1967
1968static void
1969sf_init(void *xsc)
1970{
1971	struct sf_softc		*sc;
1972
1973	sc = (struct sf_softc *)xsc;
1974	SF_LOCK(sc);
1975	sf_init_locked(sc);
1976	SF_UNLOCK(sc);
1977}
1978
1979static void
1980sf_init_locked(struct sf_softc *sc)
1981{
1982	struct ifnet		*ifp;
1983	struct mii_data		*mii;
1984	uint8_t			eaddr[ETHER_ADDR_LEN];
1985	bus_addr_t		addr;
1986	int			i;
1987
1988	SF_LOCK_ASSERT(sc);
1989	ifp = sc->sf_ifp;
1990	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1991		return;
1992	mii = device_get_softc(sc->sf_miibus);
1993
1994	sf_stop(sc);
1995	/* Reset the hardware to a known state. */
1996	sf_reset(sc);
1997
1998	/* Init all the receive filter registers */
1999	for (i = SF_RXFILT_PERFECT_BASE;
2000	    i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t))
2001		csr_write_4(sc, i, 0);
2002
2003	/* Empty stats counter registers. */
2004	for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2005		csr_write_4(sc, i, 0);
2006
2007	/* Init our MAC address. */
2008	bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr));
2009	csr_write_4(sc, SF_PAR0,
2010	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2011	csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]);
2012	sf_setperf(sc, 0, eaddr);
2013
2014	if (sf_init_rx_ring(sc) == ENOBUFS) {
2015		device_printf(sc->sf_dev,
2016		    "initialization failed: no memory for rx buffers\n");
2017		return;
2018	}
2019
2020	sf_init_tx_ring(sc);
2021
2022	/*
2023	 * 16 perfect address filtering.
2024	 * Hash only multicast destination address, Accept matching
2025	 * frames regardless of VLAN ID.
2026	 */
2027	csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN);
2028
2029	/*
2030	 * Set Rx filter.
2031	 */
2032	sf_rxfilter(sc);
2033
2034	/* Init the completion queue indexes. */
2035	csr_write_4(sc, SF_CQ_CONSIDX, 0);
2036	csr_write_4(sc, SF_CQ_PRODIDX, 0);
2037
2038	/* Init the RX completion queue. */
2039	addr = sc->sf_rdata.sf_rx_cring_paddr;
2040	csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr));
2041	csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR);
2042	if (SF_ADDR_HI(addr) != 0)
2043		SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT);
2044	/* Set RX completion queue type 2. */
2045	SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2);
2046	csr_write_4(sc, SF_RXCQ_CTL_2, 0);
2047
2048	/*
2049	 * Init RX DMA control.
2050	 * default RxHighPriority Threshold,
2051	 * default RxBurstSize, 128bytes.
2052	 */
2053	SF_SETBIT(sc, SF_RXDMA_CTL,
2054	    SF_RXDMA_REPORTBADPKTS |
2055	    (SF_RXDMA_HIGHPRIO_THRESH << 8) |
2056	    SF_RXDMA_BURST);
2057
2058	/* Init the RX buffer descriptor queue. */
2059	addr = sc->sf_rdata.sf_rx_ring_paddr;
2060	csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr));
2061	csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr));
2062
2063	/* Set RX queue buffer length. */
2064	csr_write_4(sc, SF_RXDQ_CTL_1,
2065	    ((MCLBYTES  - sizeof(uint32_t)) << 16) |
2066	    SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE);
2067
2068	if (SF_ADDR_HI(addr) != 0)
2069		SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR);
2070	csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1);
2071	csr_write_4(sc, SF_RXDQ_CTL_2, 0);
2072
2073	/* Init the TX completion queue */
2074	addr = sc->sf_rdata.sf_tx_cring_paddr;
2075	csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR);
2076	if (SF_ADDR_HI(addr) != 0)
2077		SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT);
2078
2079	/* Init the TX buffer descriptor queue. */
2080	addr = sc->sf_rdata.sf_tx_ring_paddr;
2081	csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr));
2082	csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2083	csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr));
2084	csr_write_4(sc, SF_TX_FRAMCTL,
2085	    SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh);
2086	csr_write_4(sc, SF_TXDQ_CTL,
2087	    SF_TXDMA_HIPRIO_THRESH << 24 |
2088	    SF_TXSKIPLEN_0BYTES << 16 |
2089	    SF_TXDDMA_BURST << 8 |
2090	    SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT);
2091	if (SF_ADDR_HI(addr) != 0)
2092		SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR);
2093
2094	/* Set VLAN Type register. */
2095	csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN);
2096
2097	/* Set TxPause Timer. */
2098	csr_write_4(sc, SF_TXPAUSETIMER, 0xffff);
2099
2100	/* Enable autopadding of short TX frames. */
2101	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD);
2102	SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD);
2103	/* Make sure to reset MAC to take changes effect. */
2104	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2105	DELAY(1000);
2106	SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
2107
2108	/* Enable PCI bus master. */
2109	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN);
2110
2111	/* Load StarFire firmware. */
2112	sf_download_fw(sc);
2113
2114	/* Intialize interrupt moderation. */
2115	csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN |
2116	    (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL));
2117
2118#ifdef DEVICE_POLLING
2119	/* Disable interrupts if we are polling. */
2120	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
2121		csr_write_4(sc, SF_IMR, 0x00000000);
2122	else
2123#endif
2124	/* Enable interrupts. */
2125	csr_write_4(sc, SF_IMR, SF_INTRS);
2126	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB);
2127
2128	/* Enable the RX and TX engines. */
2129	csr_write_4(sc, SF_GEN_ETH_CTL,
2130	    SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB |
2131	    SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB);
2132
2133	if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2134		SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2135	else
2136		SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB);
2137	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2138		SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2139	else
2140		SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB);
2141
2142	sc->sf_link = 0;
2143	mii_mediachg(mii);
2144
2145	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2146	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2147
2148	callout_reset(&sc->sf_co, hz, sf_tick, sc);
2149}
2150
2151static int
2152sf_encap(struct sf_softc *sc, struct mbuf **m_head)
2153{
2154	struct sf_txdesc	*txd;
2155	struct sf_tx_rdesc	*desc;
2156	struct mbuf		*m;
2157	bus_dmamap_t		map;
2158	bus_dma_segment_t	txsegs[SF_MAXTXSEGS];
2159	int			error, i, nsegs, prod, si;
2160	int			avail, nskip;
2161
2162	SF_LOCK_ASSERT(sc);
2163
2164	m = *m_head;
2165	prod = sc->sf_cdata.sf_tx_prod;
2166	txd = &sc->sf_cdata.sf_txdesc[prod];
2167	map = txd->tx_dmamap;
2168	error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map,
2169	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2170	if (error == EFBIG) {
2171		m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS);
2172		if (m == NULL) {
2173			m_freem(*m_head);
2174			*m_head = NULL;
2175			return (ENOBUFS);
2176		}
2177		*m_head = m;
2178		error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag,
2179		    map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
2180		if (error != 0) {
2181			m_freem(*m_head);
2182			*m_head = NULL;
2183			return (error);
2184		}
2185	} else if (error != 0)
2186		return (error);
2187	if (nsegs == 0) {
2188		m_freem(*m_head);
2189		*m_head = NULL;
2190		return (EIO);
2191	}
2192
2193	/* Check number of available descriptors. */
2194	avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt;
2195	if (avail < nsegs) {
2196		bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2197		return (ENOBUFS);
2198	}
2199	nskip = 0;
2200	if (prod + nsegs >= SF_TX_DLIST_CNT) {
2201		nskip = SF_TX_DLIST_CNT - prod - 1;
2202		if (avail < nsegs + nskip) {
2203			bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map);
2204			return (ENOBUFS);
2205		}
2206	}
2207
2208	bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE);
2209
2210	si = prod;
2211	for (i = 0; i < nsegs; i++) {
2212		desc = &sc->sf_rdata.sf_tx_ring[prod];
2213		desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID |
2214		    (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN));
2215		desc->sf_tx_reserved = 0;
2216		desc->sf_addr = htole64(txsegs[i].ds_addr);
2217		if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) {
2218			/* Queue wraps! */
2219			desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END);
2220			prod = 0;
2221		} else
2222			SF_INC(prod, SF_TX_DLIST_CNT);
2223	}
2224	/* Update producer index. */
2225	sc->sf_cdata.sf_tx_prod = prod;
2226	sc->sf_cdata.sf_tx_cnt += nsegs + nskip;
2227
2228	desc = &sc->sf_rdata.sf_tx_ring[si];
2229	/* Check TDP/UDP checksum offload request. */
2230	if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0)
2231		desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP);
2232	desc->sf_tx_ctrl |=
2233	    htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16));
2234
2235	txd->tx_dmamap = map;
2236	txd->tx_m = m;
2237	txd->ndesc = nsegs + nskip;
2238
2239	return (0);
2240}
2241
2242static void
2243sf_start(struct ifnet *ifp)
2244{
2245	struct sf_softc		*sc;
2246
2247	sc = ifp->if_softc;
2248	SF_LOCK(sc);
2249	sf_start_locked(ifp);
2250	SF_UNLOCK(sc);
2251}
2252
2253static void
2254sf_start_locked(struct ifnet *ifp)
2255{
2256	struct sf_softc		*sc;
2257	struct mbuf		*m_head;
2258	int			enq;
2259
2260	sc = ifp->if_softc;
2261	SF_LOCK_ASSERT(sc);
2262
2263	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2264	    IFF_DRV_RUNNING || sc->sf_link == 0)
2265		return;
2266
2267	/*
2268	 * Since we don't know when descriptor wrap occurrs in advance
2269	 * limit available number of active Tx descriptor counter to be
2270	 * higher than maximum number of DMA segments allowed in driver.
2271	 */
2272	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2273	    sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) {
2274		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2275		if (m_head == NULL)
2276			break;
2277		/*
2278		 * Pack the data into the transmit ring. If we
2279		 * don't have room, set the OACTIVE flag and wait
2280		 * for the NIC to drain the ring.
2281		 */
2282		if (sf_encap(sc, &m_head)) {
2283			if (m_head == NULL)
2284				break;
2285			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2286			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2287			break;
2288		}
2289
2290		enq++;
2291		/*
2292		 * If there's a BPF listener, bounce a copy of this frame
2293		 * to him.
2294		 */
2295		ETHER_BPF_MTAP(ifp, m_head);
2296	}
2297
2298	if (enq > 0) {
2299		bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag,
2300		    sc->sf_cdata.sf_tx_ring_map,
2301		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2302		/* Kick transmit. */
2303		csr_write_4(sc, SF_TXDQ_PRODIDX,
2304		    sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8));
2305
2306		/* Set a timeout in case the chip goes out to lunch. */
2307		sc->sf_watchdog_timer = 5;
2308	}
2309}
2310
2311static void
2312sf_stop(struct sf_softc *sc)
2313{
2314	struct sf_txdesc	*txd;
2315	struct sf_rxdesc	*rxd;
2316	struct ifnet		*ifp;
2317	int			i;
2318
2319	SF_LOCK_ASSERT(sc);
2320
2321	ifp = sc->sf_ifp;
2322
2323	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2324	sc->sf_link = 0;
2325	callout_stop(&sc->sf_co);
2326	sc->sf_watchdog_timer = 0;
2327
2328	/* Reading the ISR register clears all interrrupts. */
2329	csr_read_4(sc, SF_ISR);
2330	/* Disable further interrupts. */
2331	csr_write_4(sc, SF_IMR, 0);
2332
2333	/* Disable Tx/Rx egine. */
2334	csr_write_4(sc, SF_GEN_ETH_CTL, 0);
2335
2336	csr_write_4(sc, SF_CQ_CONSIDX, 0);
2337	csr_write_4(sc, SF_CQ_PRODIDX, 0);
2338	csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0);
2339	csr_write_4(sc, SF_RXDQ_CTL_1, 0);
2340	csr_write_4(sc, SF_RXDQ_PTR_Q1, 0);
2341	csr_write_4(sc, SF_TXCQ_CTL, 0);
2342	csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
2343	csr_write_4(sc, SF_TXDQ_CTL, 0);
2344
2345	/*
2346	 * Free RX and TX mbufs still in the queues.
2347	 */
2348	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
2349		rxd = &sc->sf_cdata.sf_rxdesc[i];
2350		if (rxd->rx_m != NULL) {
2351			bus_dmamap_sync(sc->sf_cdata.sf_rx_tag,
2352			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2353			bus_dmamap_unload(sc->sf_cdata.sf_rx_tag,
2354			    rxd->rx_dmamap);
2355			m_freem(rxd->rx_m);
2356			rxd->rx_m = NULL;
2357		}
2358        }
2359	for (i = 0; i < SF_TX_DLIST_CNT; i++) {
2360		txd = &sc->sf_cdata.sf_txdesc[i];
2361		if (txd->tx_m != NULL) {
2362			bus_dmamap_sync(sc->sf_cdata.sf_tx_tag,
2363			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2364			bus_dmamap_unload(sc->sf_cdata.sf_tx_tag,
2365			    txd->tx_dmamap);
2366			m_freem(txd->tx_m);
2367			txd->tx_m = NULL;
2368			txd->ndesc = 0;
2369		}
2370        }
2371}
2372
2373static void
2374sf_tick(void *xsc)
2375{
2376	struct sf_softc		*sc;
2377	struct mii_data		*mii;
2378
2379	sc = xsc;
2380	SF_LOCK_ASSERT(sc);
2381	mii = device_get_softc(sc->sf_miibus);
2382	mii_tick(mii);
2383	sf_stats_update(sc);
2384	sf_watchdog(sc);
2385	callout_reset(&sc->sf_co, hz, sf_tick, sc);
2386}
2387
2388/*
2389 * Note: it is important that this function not be interrupted. We
2390 * use a two-stage register access scheme: if we are interrupted in
2391 * between setting the indirect address register and reading from the
2392 * indirect data register, the contents of the address register could
2393 * be changed out from under us.
2394 */
2395static void
2396sf_stats_update(struct sf_softc *sc)
2397{
2398	struct ifnet		*ifp;
2399	struct sf_stats		now, *stats, *nstats;
2400	int			i;
2401
2402	SF_LOCK_ASSERT(sc);
2403
2404	ifp = sc->sf_ifp;
2405	stats = &now;
2406
2407	stats->sf_tx_frames =
2408	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES);
2409	stats->sf_tx_single_colls =
2410	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL);
2411	stats->sf_tx_multi_colls =
2412	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL);
2413	stats->sf_tx_crcerrs =
2414	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS);
2415	stats->sf_tx_bytes =
2416	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES);
2417	stats->sf_tx_deferred =
2418	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED);
2419	stats->sf_tx_late_colls =
2420	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL);
2421	stats->sf_tx_pause_frames =
2422	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE);
2423	stats->sf_tx_control_frames =
2424	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME);
2425	stats->sf_tx_excess_colls =
2426	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL);
2427	stats->sf_tx_excess_defer =
2428	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF);
2429	stats->sf_tx_mcast_frames =
2430	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI);
2431	stats->sf_tx_bcast_frames =
2432	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST);
2433	stats->sf_tx_frames_lost =
2434	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST);
2435	stats->sf_rx_frames =
2436	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES);
2437	stats->sf_rx_crcerrs =
2438	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS);
2439	stats->sf_rx_alignerrs =
2440	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS);
2441	stats->sf_rx_bytes =
2442	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES);
2443	stats->sf_rx_pause_frames =
2444	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE);
2445	stats->sf_rx_control_frames =
2446	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME);
2447	stats->sf_rx_unsup_control_frames =
2448	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME);
2449	stats->sf_rx_giants =
2450	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS);
2451	stats->sf_rx_runts =
2452	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS);
2453	stats->sf_rx_jabbererrs =
2454	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER);
2455	stats->sf_rx_fragments =
2456	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS);
2457	stats->sf_rx_pkts_64 =
2458	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64);
2459	stats->sf_rx_pkts_65_127 =
2460	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127);
2461	stats->sf_rx_pkts_128_255 =
2462	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255);
2463	stats->sf_rx_pkts_256_511 =
2464	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511);
2465	stats->sf_rx_pkts_512_1023 =
2466	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023);
2467	stats->sf_rx_pkts_1024_1518 =
2468	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518);
2469	stats->sf_rx_frames_lost =
2470	    csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST);
2471	/* Lower 16bits are valid. */
2472	stats->sf_tx_underruns =
2473	    (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff);
2474
2475	/* Empty stats counter registers. */
2476	for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t))
2477		csr_write_4(sc, i, 0);
2478
2479	ifp->if_opackets += (u_long)stats->sf_tx_frames;
2480
2481	ifp->if_collisions += (u_long)stats->sf_tx_single_colls +
2482	    (u_long)stats->sf_tx_multi_colls;
2483
2484	ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls +
2485	    (u_long)stats->sf_tx_excess_defer +
2486	    (u_long)stats->sf_tx_frames_lost;
2487
2488	ifp->if_ipackets += (u_long)stats->sf_rx_frames;
2489
2490	ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs +
2491	    (u_long)stats->sf_rx_alignerrs +
2492	    (u_long)stats->sf_rx_giants +
2493	    (u_long)stats->sf_rx_runts +
2494	    (u_long)stats->sf_rx_jabbererrs +
2495	    (u_long)stats->sf_rx_frames_lost;
2496
2497	nstats = &sc->sf_statistics;
2498
2499	nstats->sf_tx_frames += stats->sf_tx_frames;
2500	nstats->sf_tx_single_colls += stats->sf_tx_single_colls;
2501	nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls;
2502	nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs;
2503	nstats->sf_tx_bytes += stats->sf_tx_bytes;
2504	nstats->sf_tx_deferred += stats->sf_tx_deferred;
2505	nstats->sf_tx_late_colls += stats->sf_tx_late_colls;
2506	nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames;
2507	nstats->sf_tx_control_frames += stats->sf_tx_control_frames;
2508	nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls;
2509	nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer;
2510	nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames;
2511	nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames;
2512	nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost;
2513	nstats->sf_rx_frames += stats->sf_rx_frames;
2514	nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs;
2515	nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs;
2516	nstats->sf_rx_bytes += stats->sf_rx_bytes;
2517	nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames;
2518	nstats->sf_rx_control_frames += stats->sf_rx_control_frames;
2519	nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames;
2520	nstats->sf_rx_giants += stats->sf_rx_giants;
2521	nstats->sf_rx_runts += stats->sf_rx_runts;
2522	nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs;
2523	nstats->sf_rx_fragments += stats->sf_rx_fragments;
2524	nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64;
2525	nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127;
2526	nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255;
2527	nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511;
2528	nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023;
2529	nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518;
2530	nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost;
2531	nstats->sf_tx_underruns += stats->sf_tx_underruns;
2532}
2533
2534static void
2535sf_watchdog(struct sf_softc *sc)
2536{
2537	struct ifnet		*ifp;
2538
2539	SF_LOCK_ASSERT(sc);
2540
2541	if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer)
2542		return;
2543
2544	ifp = sc->sf_ifp;
2545
2546	ifp->if_oerrors++;
2547	if (sc->sf_link == 0) {
2548		if (bootverbose)
2549			if_printf(sc->sf_ifp, "watchdog timeout "
2550			   "(missed link)\n");
2551	} else
2552		if_printf(ifp, "watchdog timeout, %d Tx descs are active\n",
2553		    sc->sf_cdata.sf_tx_cnt);
2554
2555	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2556	sf_init_locked(sc);
2557
2558	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2559		sf_start_locked(ifp);
2560}
2561
2562static int
2563sf_shutdown(device_t dev)
2564{
2565	struct sf_softc		*sc;
2566
2567	sc = device_get_softc(dev);
2568
2569	SF_LOCK(sc);
2570	sf_stop(sc);
2571	SF_UNLOCK(sc);
2572
2573	return (0);
2574}
2575
2576static int
2577sf_suspend(device_t dev)
2578{
2579	struct sf_softc		*sc;
2580
2581	sc = device_get_softc(dev);
2582
2583	SF_LOCK(sc);
2584	sf_stop(sc);
2585	sc->sf_suspended = 1;
2586	bus_generic_suspend(dev);
2587	SF_UNLOCK(sc);
2588
2589	return (0);
2590}
2591
2592static int
2593sf_resume(device_t dev)
2594{
2595	struct sf_softc		*sc;
2596	struct ifnet		*ifp;
2597
2598	sc = device_get_softc(dev);
2599
2600	SF_LOCK(sc);
2601	bus_generic_resume(dev);
2602	ifp = sc->sf_ifp;
2603	if ((ifp->if_flags & IFF_UP) != 0)
2604		sf_init_locked(sc);
2605
2606	sc->sf_suspended = 0;
2607	SF_UNLOCK(sc);
2608
2609	return (0);
2610}
2611
2612static int
2613sf_sysctl_stats(SYSCTL_HANDLER_ARGS)
2614{
2615	struct sf_softc		*sc;
2616	struct sf_stats		*stats;
2617	int			error;
2618	int			result;
2619
2620	result = -1;
2621	error = sysctl_handle_int(oidp, &result, 0, req);
2622
2623	if (error != 0 || req->newptr == NULL)
2624		return (error);
2625
2626	if (result != 1)
2627		return (error);
2628
2629	sc = (struct sf_softc *)arg1;
2630	stats = &sc->sf_statistics;
2631
2632	printf("%s statistics:\n", device_get_nameunit(sc->sf_dev));
2633	printf("Transmit good frames : %ju\n",
2634	    (uintmax_t)stats->sf_tx_frames);
2635	printf("Transmit good octets : %ju\n",
2636	    (uintmax_t)stats->sf_tx_bytes);
2637	printf("Transmit single collisions : %u\n",
2638	    stats->sf_tx_single_colls);
2639	printf("Transmit multiple collisions : %u\n",
2640	    stats->sf_tx_multi_colls);
2641	printf("Transmit late collisions : %u\n",
2642	    stats->sf_tx_late_colls);
2643	printf("Transmit abort due to excessive collisions : %u\n",
2644	    stats->sf_tx_excess_colls);
2645	printf("Transmit CRC errors : %u\n",
2646	    stats->sf_tx_crcerrs);
2647	printf("Transmit deferrals : %u\n",
2648	    stats->sf_tx_deferred);
2649	printf("Transmit abort due to excessive deferrals : %u\n",
2650	    stats->sf_tx_excess_defer);
2651	printf("Transmit pause control frames : %u\n",
2652	    stats->sf_tx_pause_frames);
2653	printf("Transmit control frames : %u\n",
2654	    stats->sf_tx_control_frames);
2655	printf("Transmit good multicast frames : %u\n",
2656	    stats->sf_tx_mcast_frames);
2657	printf("Transmit good broadcast frames : %u\n",
2658	    stats->sf_tx_bcast_frames);
2659	printf("Transmit frames lost due to internal transmit errors : %u\n",
2660	    stats->sf_tx_frames_lost);
2661	printf("Transmit FIFO underflows : %u\n",
2662	    stats->sf_tx_underruns);
2663	printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall);
2664	printf("Receive good frames : %ju\n",
2665	    (uint64_t)stats->sf_rx_frames);
2666	printf("Receive good octets : %ju\n",
2667	    (uint64_t)stats->sf_rx_bytes);
2668	printf("Receive CRC errors : %u\n",
2669	    stats->sf_rx_crcerrs);
2670	printf("Receive alignment errors : %u\n",
2671	    stats->sf_rx_alignerrs);
2672	printf("Receive pause frames : %u\n",
2673	    stats->sf_rx_pause_frames);
2674	printf("Receive control frames : %u\n",
2675	    stats->sf_rx_control_frames);
2676	printf("Receive control frames with unsupported opcode : %u\n",
2677	    stats->sf_rx_unsup_control_frames);
2678	printf("Receive frames too long : %u\n",
2679	    stats->sf_rx_giants);
2680	printf("Receive frames too short : %u\n",
2681	    stats->sf_rx_runts);
2682	printf("Receive frames jabber errors : %u\n",
2683	    stats->sf_rx_jabbererrs);
2684	printf("Receive frames fragments : %u\n",
2685	    stats->sf_rx_fragments);
2686	printf("Receive packets 64 bytes : %ju\n",
2687	    (uint64_t)stats->sf_rx_pkts_64);
2688	printf("Receive packets 65 to 127 bytes : %ju\n",
2689	    (uint64_t)stats->sf_rx_pkts_65_127);
2690	printf("Receive packets 128 to 255 bytes : %ju\n",
2691	    (uint64_t)stats->sf_rx_pkts_128_255);
2692	printf("Receive packets 256 to 511 bytes : %ju\n",
2693	    (uint64_t)stats->sf_rx_pkts_256_511);
2694	printf("Receive packets 512 to 1023 bytes : %ju\n",
2695	    (uint64_t)stats->sf_rx_pkts_512_1023);
2696	printf("Receive packets 1024 to 1518 bytes : %ju\n",
2697	    (uint64_t)stats->sf_rx_pkts_1024_1518);
2698	printf("Receive frames lost due to internal receive errors : %u\n",
2699	    stats->sf_rx_frames_lost);
2700	printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall);
2701
2702	return (error);
2703}
2704
2705static int
2706sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2707{
2708	int error, value;
2709
2710	if (!arg1)
2711		return (EINVAL);
2712	value = *(int *)arg1;
2713	error = sysctl_handle_int(oidp, &value, 0, req);
2714	if (error || !req->newptr)
2715		return (error);
2716	if (value < low || value > high)
2717		return (EINVAL);
2718	*(int *)arg1 = value;
2719
2720	return (0);
2721}
2722
2723static int
2724sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS)
2725{
2726
2727	return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX));
2728}
2729