if_sk.c revision 176265
1/*	$OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $	*/
2
3/*-
4 * Copyright (c) 1997, 1998, 1999, 2000
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34/*-
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/dev/sk/if_sk.c 176265 2008-02-14 01:25:01Z yongari $");
52
53/*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * 	The XaQti XMAC II datasheet,
58 *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71/*
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
78 *
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
86 */
87
88#include <sys/param.h>
89#include <sys/systm.h>
90#include <sys/bus.h>
91#include <sys/endian.h>
92#include <sys/mbuf.h>
93#include <sys/malloc.h>
94#include <sys/kernel.h>
95#include <sys/module.h>
96#include <sys/socket.h>
97#include <sys/sockio.h>
98#include <sys/queue.h>
99#include <sys/sysctl.h>
100
101#include <net/bpf.h>
102#include <net/ethernet.h>
103#include <net/if.h>
104#include <net/if_arp.h>
105#include <net/if_dl.h>
106#include <net/if_media.h>
107#include <net/if_types.h>
108#include <net/if_vlan_var.h>
109
110#include <netinet/in.h>
111#include <netinet/in_systm.h>
112#include <netinet/ip.h>
113
114#include <machine/bus.h>
115#include <machine/in_cksum.h>
116#include <machine/resource.h>
117#include <sys/rman.h>
118
119#include <dev/mii/mii.h>
120#include <dev/mii/miivar.h>
121#include <dev/mii/brgphyreg.h>
122
123#include <dev/pci/pcireg.h>
124#include <dev/pci/pcivar.h>
125
126#if 0
127#define SK_USEIOSPACE
128#endif
129
130#include <dev/sk/if_skreg.h>
131#include <dev/sk/xmaciireg.h>
132#include <dev/sk/yukonreg.h>
133
134MODULE_DEPEND(sk, pci, 1, 1, 1);
135MODULE_DEPEND(sk, ether, 1, 1, 1);
136MODULE_DEPEND(sk, miibus, 1, 1, 1);
137
138/* "device miibus" required.  See GENERIC if you get errors here. */
139#include "miibus_if.h"
140
141#ifndef lint
142static const char rcsid[] =
143  "$FreeBSD: head/sys/dev/sk/if_sk.c 176265 2008-02-14 01:25:01Z yongari $";
144#endif
145
146static struct sk_type sk_devs[] = {
147	{
148		VENDORID_SK,
149		DEVICEID_SK_V1,
150		"SysKonnect Gigabit Ethernet (V1.0)"
151	},
152	{
153		VENDORID_SK,
154		DEVICEID_SK_V2,
155		"SysKonnect Gigabit Ethernet (V2.0)"
156	},
157	{
158		VENDORID_MARVELL,
159		DEVICEID_SK_V2,
160		"Marvell Gigabit Ethernet"
161	},
162	{
163		VENDORID_MARVELL,
164		DEVICEID_BELKIN_5005,
165		"Belkin F5D5005 Gigabit Ethernet"
166	},
167	{
168		VENDORID_3COM,
169		DEVICEID_3COM_3C940,
170		"3Com 3C940 Gigabit Ethernet"
171	},
172	{
173		VENDORID_LINKSYS,
174		DEVICEID_LINKSYS_EG1032,
175		"Linksys EG1032 Gigabit Ethernet"
176	},
177	{
178		VENDORID_DLINK,
179		DEVICEID_DLINK_DGE530T_A1,
180		"D-Link DGE-530T Gigabit Ethernet"
181	},
182	{
183		VENDORID_DLINK,
184		DEVICEID_DLINK_DGE530T_B1,
185		"D-Link DGE-530T Gigabit Ethernet"
186	},
187	{ 0, 0, NULL }
188};
189
190static int skc_probe(device_t);
191static int skc_attach(device_t);
192static int skc_detach(device_t);
193static int skc_shutdown(device_t);
194static int skc_suspend(device_t);
195static int skc_resume(device_t);
196static int sk_detach(device_t);
197static int sk_probe(device_t);
198static int sk_attach(device_t);
199static void sk_tick(void *);
200static void sk_yukon_tick(void *);
201static void sk_intr(void *);
202static void sk_intr_xmac(struct sk_if_softc *);
203static void sk_intr_bcom(struct sk_if_softc *);
204static void sk_intr_yukon(struct sk_if_softc *);
205static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
206static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
207static void sk_rxeof(struct sk_if_softc *);
208static void sk_jumbo_rxeof(struct sk_if_softc *);
209static void sk_txeof(struct sk_if_softc *);
210static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
211static int sk_encap(struct sk_if_softc *, struct mbuf **);
212static void sk_start(struct ifnet *);
213static void sk_start_locked(struct ifnet *);
214static int sk_ioctl(struct ifnet *, u_long, caddr_t);
215static void sk_init(void *);
216static void sk_init_locked(struct sk_if_softc *);
217static void sk_init_xmac(struct sk_if_softc *);
218static void sk_init_yukon(struct sk_if_softc *);
219static void sk_stop(struct sk_if_softc *);
220static void sk_watchdog(void *);
221static int sk_ifmedia_upd(struct ifnet *);
222static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
223static void sk_reset(struct sk_softc *);
224static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
225static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
226static int sk_newbuf(struct sk_if_softc *, int);
227static int sk_jumbo_newbuf(struct sk_if_softc *, int);
228static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
229static int sk_dma_alloc(struct sk_if_softc *);
230static int sk_dma_jumbo_alloc(struct sk_if_softc *);
231static void sk_dma_free(struct sk_if_softc *);
232static void sk_dma_jumbo_free(struct sk_if_softc *);
233static int sk_init_rx_ring(struct sk_if_softc *);
234static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
235static void sk_init_tx_ring(struct sk_if_softc *);
236static u_int32_t sk_win_read_4(struct sk_softc *, int);
237static u_int16_t sk_win_read_2(struct sk_softc *, int);
238static u_int8_t sk_win_read_1(struct sk_softc *, int);
239static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
240static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
241static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
242
243static int sk_miibus_readreg(device_t, int, int);
244static int sk_miibus_writereg(device_t, int, int, int);
245static void sk_miibus_statchg(device_t);
246
247static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
248static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
249						int);
250static void sk_xmac_miibus_statchg(struct sk_if_softc *);
251
252static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
253static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
254						int);
255static void sk_marv_miibus_statchg(struct sk_if_softc *);
256
257static uint32_t sk_xmchash(const uint8_t *);
258static uint32_t sk_gmchash(const uint8_t *);
259static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
260static void sk_setmulti(struct sk_if_softc *);
261static void sk_setpromisc(struct sk_if_softc *);
262
263static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
264static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
265
266/* Tunables. */
267static int jumbo_disable = 0;
268TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
269
270/*
271 * It seems that SK-NET GENESIS supports very simple checksum offload
272 * capability for Tx and I believe it can generate 0 checksum value for
273 * UDP packets in Tx as the hardware can't differenciate UDP packets from
274 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
275 * means sender didn't perforam checksum computation. For the safety I
276 * disabled UDP checksum offload capability at the moment. Alternatively
277 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
278 * offload routine.
279 */
280#define SK_CSUM_FEATURES	(CSUM_TCP)
281
282/*
283 * Note that we have newbus methods for both the GEnesis controller
284 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
285 * the miibus code is a child of the XMACs. We need to do it this way
286 * so that the miibus drivers can access the PHY registers on the
287 * right PHY. It's not quite what I had in mind, but it's the only
288 * design that achieves the desired effect.
289 */
290static device_method_t skc_methods[] = {
291	/* Device interface */
292	DEVMETHOD(device_probe,		skc_probe),
293	DEVMETHOD(device_attach,	skc_attach),
294	DEVMETHOD(device_detach,	skc_detach),
295	DEVMETHOD(device_suspend,	skc_suspend),
296	DEVMETHOD(device_resume,	skc_resume),
297	DEVMETHOD(device_shutdown,	skc_shutdown),
298
299	/* bus interface */
300	DEVMETHOD(bus_print_child,	bus_generic_print_child),
301	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
302
303	{ 0, 0 }
304};
305
306static driver_t skc_driver = {
307	"skc",
308	skc_methods,
309	sizeof(struct sk_softc)
310};
311
312static devclass_t skc_devclass;
313
314static device_method_t sk_methods[] = {
315	/* Device interface */
316	DEVMETHOD(device_probe,		sk_probe),
317	DEVMETHOD(device_attach,	sk_attach),
318	DEVMETHOD(device_detach,	sk_detach),
319	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
320
321	/* bus interface */
322	DEVMETHOD(bus_print_child,	bus_generic_print_child),
323	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
324
325	/* MII interface */
326	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
327	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
328	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
329
330	{ 0, 0 }
331};
332
333static driver_t sk_driver = {
334	"sk",
335	sk_methods,
336	sizeof(struct sk_if_softc)
337};
338
339static devclass_t sk_devclass;
340
341DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, 0, 0);
342DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
343DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
344
345static struct resource_spec sk_res_spec_io[] = {
346	{ SYS_RES_IOPORT,	PCIR_BAR(1),	RF_ACTIVE },
347	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
348	{ -1,			0,		0 }
349};
350
351static struct resource_spec sk_res_spec_mem[] = {
352	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
353	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
354	{ -1,			0,		0 }
355};
356
357#define SK_SETBIT(sc, reg, x)		\
358	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
359
360#define SK_CLRBIT(sc, reg, x)		\
361	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
362
363#define SK_WIN_SETBIT_4(sc, reg, x)	\
364	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
365
366#define SK_WIN_CLRBIT_4(sc, reg, x)	\
367	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
368
369#define SK_WIN_SETBIT_2(sc, reg, x)	\
370	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
371
372#define SK_WIN_CLRBIT_2(sc, reg, x)	\
373	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
374
375static u_int32_t
376sk_win_read_4(sc, reg)
377	struct sk_softc		*sc;
378	int			reg;
379{
380#ifdef SK_USEIOSPACE
381	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
382	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
383#else
384	return(CSR_READ_4(sc, reg));
385#endif
386}
387
388static u_int16_t
389sk_win_read_2(sc, reg)
390	struct sk_softc		*sc;
391	int			reg;
392{
393#ifdef SK_USEIOSPACE
394	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
395	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
396#else
397	return(CSR_READ_2(sc, reg));
398#endif
399}
400
401static u_int8_t
402sk_win_read_1(sc, reg)
403	struct sk_softc		*sc;
404	int			reg;
405{
406#ifdef SK_USEIOSPACE
407	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
408	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
409#else
410	return(CSR_READ_1(sc, reg));
411#endif
412}
413
414static void
415sk_win_write_4(sc, reg, val)
416	struct sk_softc		*sc;
417	int			reg;
418	u_int32_t		val;
419{
420#ifdef SK_USEIOSPACE
421	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
422	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
423#else
424	CSR_WRITE_4(sc, reg, val);
425#endif
426	return;
427}
428
429static void
430sk_win_write_2(sc, reg, val)
431	struct sk_softc		*sc;
432	int			reg;
433	u_int32_t		val;
434{
435#ifdef SK_USEIOSPACE
436	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
437	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
438#else
439	CSR_WRITE_2(sc, reg, val);
440#endif
441	return;
442}
443
444static void
445sk_win_write_1(sc, reg, val)
446	struct sk_softc		*sc;
447	int			reg;
448	u_int32_t		val;
449{
450#ifdef SK_USEIOSPACE
451	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
452	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
453#else
454	CSR_WRITE_1(sc, reg, val);
455#endif
456	return;
457}
458
459static int
460sk_miibus_readreg(dev, phy, reg)
461	device_t		dev;
462	int			phy, reg;
463{
464	struct sk_if_softc	*sc_if;
465	int			v;
466
467	sc_if = device_get_softc(dev);
468
469	SK_IF_MII_LOCK(sc_if);
470	switch(sc_if->sk_softc->sk_type) {
471	case SK_GENESIS:
472		v = sk_xmac_miibus_readreg(sc_if, phy, reg);
473		break;
474	case SK_YUKON:
475	case SK_YUKON_LITE:
476	case SK_YUKON_LP:
477		v = sk_marv_miibus_readreg(sc_if, phy, reg);
478		break;
479	default:
480		v = 0;
481		break;
482	}
483	SK_IF_MII_UNLOCK(sc_if);
484
485	return (v);
486}
487
488static int
489sk_miibus_writereg(dev, phy, reg, val)
490	device_t		dev;
491	int			phy, reg, val;
492{
493	struct sk_if_softc	*sc_if;
494	int			v;
495
496	sc_if = device_get_softc(dev);
497
498	SK_IF_MII_LOCK(sc_if);
499	switch(sc_if->sk_softc->sk_type) {
500	case SK_GENESIS:
501		v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
502		break;
503	case SK_YUKON:
504	case SK_YUKON_LITE:
505	case SK_YUKON_LP:
506		v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
507		break;
508	default:
509		v = 0;
510		break;
511	}
512	SK_IF_MII_UNLOCK(sc_if);
513
514	return (v);
515}
516
517static void
518sk_miibus_statchg(dev)
519	device_t		dev;
520{
521	struct sk_if_softc	*sc_if;
522
523	sc_if = device_get_softc(dev);
524
525	SK_IF_MII_LOCK(sc_if);
526	switch(sc_if->sk_softc->sk_type) {
527	case SK_GENESIS:
528		sk_xmac_miibus_statchg(sc_if);
529		break;
530	case SK_YUKON:
531	case SK_YUKON_LITE:
532	case SK_YUKON_LP:
533		sk_marv_miibus_statchg(sc_if);
534		break;
535	}
536	SK_IF_MII_UNLOCK(sc_if);
537
538	return;
539}
540
541static int
542sk_xmac_miibus_readreg(sc_if, phy, reg)
543	struct sk_if_softc	*sc_if;
544	int			phy, reg;
545{
546	int			i;
547
548	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
549		return(0);
550
551	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
552	SK_XM_READ_2(sc_if, XM_PHY_DATA);
553	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
554		for (i = 0; i < SK_TIMEOUT; i++) {
555			DELAY(1);
556			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
557			    XM_MMUCMD_PHYDATARDY)
558				break;
559		}
560
561		if (i == SK_TIMEOUT) {
562			if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
563			return(0);
564		}
565	}
566	DELAY(1);
567	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
568
569	return(i);
570}
571
572static int
573sk_xmac_miibus_writereg(sc_if, phy, reg, val)
574	struct sk_if_softc	*sc_if;
575	int			phy, reg, val;
576{
577	int			i;
578
579	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
580	for (i = 0; i < SK_TIMEOUT; i++) {
581		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
582			break;
583	}
584
585	if (i == SK_TIMEOUT) {
586		if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
587		return (ETIMEDOUT);
588	}
589
590	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
591	for (i = 0; i < SK_TIMEOUT; i++) {
592		DELAY(1);
593		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
594			break;
595	}
596	if (i == SK_TIMEOUT)
597		if_printf(sc_if->sk_ifp, "phy write timed out\n");
598
599	return(0);
600}
601
602static void
603sk_xmac_miibus_statchg(sc_if)
604	struct sk_if_softc	*sc_if;
605{
606	struct mii_data		*mii;
607
608	mii = device_get_softc(sc_if->sk_miibus);
609
610	/*
611	 * If this is a GMII PHY, manually set the XMAC's
612	 * duplex mode accordingly.
613	 */
614	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
615		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
616			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
617		} else {
618			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
619		}
620	}
621}
622
623static int
624sk_marv_miibus_readreg(sc_if, phy, reg)
625	struct sk_if_softc	*sc_if;
626	int			phy, reg;
627{
628	u_int16_t		val;
629	int			i;
630
631	if (phy != 0 ||
632	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
633	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
634		return(0);
635	}
636
637        SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
638		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
639
640	for (i = 0; i < SK_TIMEOUT; i++) {
641		DELAY(1);
642		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
643		if (val & YU_SMICR_READ_VALID)
644			break;
645	}
646
647	if (i == SK_TIMEOUT) {
648		if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
649		return(0);
650	}
651
652	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
653
654	return(val);
655}
656
657static int
658sk_marv_miibus_writereg(sc_if, phy, reg, val)
659	struct sk_if_softc	*sc_if;
660	int			phy, reg, val;
661{
662	int			i;
663
664	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
665	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
666		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
667
668	for (i = 0; i < SK_TIMEOUT; i++) {
669		DELAY(1);
670		if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
671			break;
672	}
673	if (i == SK_TIMEOUT)
674		if_printf(sc_if->sk_ifp, "phy write timeout\n");
675
676	return(0);
677}
678
679static void
680sk_marv_miibus_statchg(sc_if)
681	struct sk_if_softc	*sc_if;
682{
683	return;
684}
685
686#define HASH_BITS		6
687
688static u_int32_t
689sk_xmchash(addr)
690	const uint8_t *addr;
691{
692	uint32_t crc;
693
694	/* Compute CRC for the address value. */
695	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
696
697	return (~crc & ((1 << HASH_BITS) - 1));
698}
699
700/* gmchash is just a big endian crc */
701static u_int32_t
702sk_gmchash(addr)
703	const uint8_t *addr;
704{
705	uint32_t crc;
706
707	/* Compute CRC for the address value. */
708	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
709
710	return (crc & ((1 << HASH_BITS) - 1));
711}
712
713static void
714sk_setfilt(sc_if, addr, slot)
715	struct sk_if_softc	*sc_if;
716	u_int16_t		*addr;
717	int			slot;
718{
719	int			base;
720
721	base = XM_RXFILT_ENTRY(slot);
722
723	SK_XM_WRITE_2(sc_if, base, addr[0]);
724	SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
725	SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
726
727	return;
728}
729
730static void
731sk_setmulti(sc_if)
732	struct sk_if_softc	*sc_if;
733{
734	struct sk_softc		*sc = sc_if->sk_softc;
735	struct ifnet		*ifp = sc_if->sk_ifp;
736	u_int32_t		hashes[2] = { 0, 0 };
737	int			h = 0, i;
738	struct ifmultiaddr	*ifma;
739	u_int16_t		dummy[] = { 0, 0, 0 };
740	u_int16_t		maddr[(ETHER_ADDR_LEN+1)/2];
741
742	SK_IF_LOCK_ASSERT(sc_if);
743
744	/* First, zot all the existing filters. */
745	switch(sc->sk_type) {
746	case SK_GENESIS:
747		for (i = 1; i < XM_RXFILT_MAX; i++)
748			sk_setfilt(sc_if, dummy, i);
749
750		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
751		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
752		break;
753	case SK_YUKON:
754	case SK_YUKON_LITE:
755	case SK_YUKON_LP:
756		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
757		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
758		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
759		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
760		break;
761	}
762
763	/* Now program new ones. */
764	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
765		hashes[0] = 0xFFFFFFFF;
766		hashes[1] = 0xFFFFFFFF;
767	} else {
768		i = 1;
769		IF_ADDR_LOCK(ifp);
770		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
771			if (ifma->ifma_addr->sa_family != AF_LINK)
772				continue;
773			/*
774			 * Program the first XM_RXFILT_MAX multicast groups
775			 * into the perfect filter. For all others,
776			 * use the hash table.
777			 */
778			if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
779				bcopy(LLADDR(
780				    (struct sockaddr_dl *)ifma->ifma_addr),
781				    maddr, ETHER_ADDR_LEN);
782				sk_setfilt(sc_if, maddr, i);
783				i++;
784				continue;
785			}
786
787			switch(sc->sk_type) {
788			case SK_GENESIS:
789				bcopy(LLADDR(
790				    (struct sockaddr_dl *)ifma->ifma_addr),
791				    maddr, ETHER_ADDR_LEN);
792				h = sk_xmchash((const uint8_t *)maddr);
793				break;
794			case SK_YUKON:
795			case SK_YUKON_LITE:
796			case SK_YUKON_LP:
797				bcopy(LLADDR(
798				    (struct sockaddr_dl *)ifma->ifma_addr),
799				    maddr, ETHER_ADDR_LEN);
800				h = sk_gmchash((const uint8_t *)maddr);
801				break;
802			}
803			if (h < 32)
804				hashes[0] |= (1 << h);
805			else
806				hashes[1] |= (1 << (h - 32));
807		}
808		IF_ADDR_UNLOCK(ifp);
809	}
810
811	switch(sc->sk_type) {
812	case SK_GENESIS:
813		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
814			       XM_MODE_RX_USE_PERFECT);
815		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
816		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
817		break;
818	case SK_YUKON:
819	case SK_YUKON_LITE:
820	case SK_YUKON_LP:
821		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
822		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
823		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
824		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
825		break;
826	}
827
828	return;
829}
830
831static void
832sk_setpromisc(sc_if)
833	struct sk_if_softc	*sc_if;
834{
835	struct sk_softc		*sc = sc_if->sk_softc;
836	struct ifnet		*ifp = sc_if->sk_ifp;
837
838	SK_IF_LOCK_ASSERT(sc_if);
839
840	switch(sc->sk_type) {
841	case SK_GENESIS:
842		if (ifp->if_flags & IFF_PROMISC) {
843			SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
844		} else {
845			SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
846		}
847		break;
848	case SK_YUKON:
849	case SK_YUKON_LITE:
850	case SK_YUKON_LP:
851		if (ifp->if_flags & IFF_PROMISC) {
852			SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
853			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
854		} else {
855			SK_YU_SETBIT_2(sc_if, YUKON_RCR,
856			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
857		}
858		break;
859	}
860
861	return;
862}
863
864static int
865sk_init_rx_ring(sc_if)
866	struct sk_if_softc	*sc_if;
867{
868	struct sk_ring_data	*rd;
869	bus_addr_t		addr;
870	u_int32_t		csum_start;
871	int			i;
872
873	sc_if->sk_cdata.sk_rx_cons = 0;
874
875	csum_start = (ETHER_HDR_LEN + sizeof(struct ip))  << 16 |
876	    ETHER_HDR_LEN;
877	rd = &sc_if->sk_rdata;
878	bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
879	for (i = 0; i < SK_RX_RING_CNT; i++) {
880		if (sk_newbuf(sc_if, i) != 0)
881			return (ENOBUFS);
882		if (i == (SK_RX_RING_CNT - 1))
883			addr = SK_RX_RING_ADDR(sc_if, 0);
884		else
885			addr = SK_RX_RING_ADDR(sc_if, i + 1);
886		rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
887		rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
888	}
889
890	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
891	    sc_if->sk_cdata.sk_rx_ring_map,
892	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
893
894	return(0);
895}
896
897static int
898sk_init_jumbo_rx_ring(sc_if)
899	struct sk_if_softc	*sc_if;
900{
901	struct sk_ring_data	*rd;
902	bus_addr_t		addr;
903	u_int32_t		csum_start;
904	int			i;
905
906	sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
907
908	csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
909	    ETHER_HDR_LEN;
910	rd = &sc_if->sk_rdata;
911	bzero(rd->sk_jumbo_rx_ring,
912	    sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
913	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
914		if (sk_jumbo_newbuf(sc_if, i) != 0)
915			return (ENOBUFS);
916		if (i == (SK_JUMBO_RX_RING_CNT - 1))
917			addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
918		else
919			addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
920		rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
921		rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
922	}
923
924	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
925	    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
926	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
927
928	return (0);
929}
930
931static void
932sk_init_tx_ring(sc_if)
933	struct sk_if_softc	*sc_if;
934{
935	struct sk_ring_data	*rd;
936	struct sk_txdesc	*txd;
937	bus_addr_t		addr;
938	int			i;
939
940	STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
941	STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
942
943	sc_if->sk_cdata.sk_tx_prod = 0;
944	sc_if->sk_cdata.sk_tx_cons = 0;
945	sc_if->sk_cdata.sk_tx_cnt = 0;
946
947	rd = &sc_if->sk_rdata;
948	bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
949	for (i = 0; i < SK_TX_RING_CNT; i++) {
950		if (i == (SK_TX_RING_CNT - 1))
951			addr = SK_TX_RING_ADDR(sc_if, 0);
952		else
953			addr = SK_TX_RING_ADDR(sc_if, i + 1);
954		rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
955		txd = &sc_if->sk_cdata.sk_txdesc[i];
956		STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
957	}
958
959	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
960	    sc_if->sk_cdata.sk_tx_ring_map,
961	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
962}
963
964static __inline void
965sk_discard_rxbuf(sc_if, idx)
966	struct sk_if_softc	*sc_if;
967	int			idx;
968{
969	struct sk_rx_desc	*r;
970	struct sk_rxdesc	*rxd;
971	struct mbuf		*m;
972
973
974	r = &sc_if->sk_rdata.sk_rx_ring[idx];
975	rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
976	m = rxd->rx_m;
977	r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
978}
979
980static __inline void
981sk_discard_jumbo_rxbuf(sc_if, idx)
982	struct sk_if_softc	*sc_if;
983	int			idx;
984{
985	struct sk_rx_desc	*r;
986	struct sk_rxdesc	*rxd;
987	struct mbuf		*m;
988
989	r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
990	rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
991	m = rxd->rx_m;
992	r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
993}
994
995static int
996sk_newbuf(sc_if, idx)
997	struct sk_if_softc	*sc_if;
998	int 			idx;
999{
1000	struct sk_rx_desc	*r;
1001	struct sk_rxdesc	*rxd;
1002	struct mbuf		*m;
1003	bus_dma_segment_t	segs[1];
1004	bus_dmamap_t		map;
1005	int			nsegs;
1006
1007	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1008	if (m == NULL)
1009		return (ENOBUFS);
1010	m->m_len = m->m_pkthdr.len = MCLBYTES;
1011	m_adj(m, ETHER_ALIGN);
1012
1013	if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
1014	    sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1015		m_freem(m);
1016		return (ENOBUFS);
1017	}
1018	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1019
1020	rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
1021	if (rxd->rx_m != NULL) {
1022		bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
1023		    BUS_DMASYNC_POSTREAD);
1024		bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
1025	}
1026	map = rxd->rx_dmamap;
1027	rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
1028	sc_if->sk_cdata.sk_rx_sparemap = map;
1029	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
1030	    BUS_DMASYNC_PREREAD);
1031	rxd->rx_m = m;
1032	r = &sc_if->sk_rdata.sk_rx_ring[idx];
1033	r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1034	r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1035	r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1036
1037	return (0);
1038}
1039
1040static int
1041sk_jumbo_newbuf(sc_if, idx)
1042	struct sk_if_softc	*sc_if;
1043	int			idx;
1044{
1045	struct sk_rx_desc	*r;
1046	struct sk_rxdesc	*rxd;
1047	struct mbuf		*m;
1048	bus_dma_segment_t	segs[1];
1049	bus_dmamap_t		map;
1050	int			nsegs;
1051
1052	m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1053	if (m == NULL)
1054		return (ENOBUFS);
1055	if ((m->m_flags & M_EXT) == 0) {
1056		m_freem(m);
1057		return (ENOBUFS);
1058	}
1059	m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1060	/*
1061	 * Adjust alignment so packet payload begins on a
1062	 * longword boundary. Mandatory for Alpha, useful on
1063	 * x86 too.
1064	 */
1065	m_adj(m, ETHER_ALIGN);
1066
1067	if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
1068	    sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1069		m_freem(m);
1070		return (ENOBUFS);
1071	}
1072	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1073
1074	rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
1075	if (rxd->rx_m != NULL) {
1076		bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1077		    BUS_DMASYNC_POSTREAD);
1078		bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
1079		    rxd->rx_dmamap);
1080	}
1081	map = rxd->rx_dmamap;
1082	rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
1083	sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
1084	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1085	    BUS_DMASYNC_PREREAD);
1086	rxd->rx_m = m;
1087	r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
1088	r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1089	r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1090	r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1091
1092	return (0);
1093}
1094
1095/*
1096 * Set media options.
1097 */
1098static int
1099sk_ifmedia_upd(ifp)
1100	struct ifnet		*ifp;
1101{
1102	struct sk_if_softc	*sc_if = ifp->if_softc;
1103	struct mii_data		*mii;
1104
1105	mii = device_get_softc(sc_if->sk_miibus);
1106	sk_init(sc_if);
1107	mii_mediachg(mii);
1108
1109	return(0);
1110}
1111
1112/*
1113 * Report current media status.
1114 */
1115static void
1116sk_ifmedia_sts(ifp, ifmr)
1117	struct ifnet		*ifp;
1118	struct ifmediareq	*ifmr;
1119{
1120	struct sk_if_softc	*sc_if;
1121	struct mii_data		*mii;
1122
1123	sc_if = ifp->if_softc;
1124	mii = device_get_softc(sc_if->sk_miibus);
1125
1126	mii_pollstat(mii);
1127	ifmr->ifm_active = mii->mii_media_active;
1128	ifmr->ifm_status = mii->mii_media_status;
1129
1130	return;
1131}
1132
1133static int
1134sk_ioctl(ifp, command, data)
1135	struct ifnet		*ifp;
1136	u_long			command;
1137	caddr_t			data;
1138{
1139	struct sk_if_softc	*sc_if = ifp->if_softc;
1140	struct ifreq		*ifr = (struct ifreq *) data;
1141	int			error, mask;
1142	struct mii_data		*mii;
1143
1144	error = 0;
1145	switch(command) {
1146	case SIOCSIFMTU:
1147		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
1148			error = EINVAL;
1149		else if (ifp->if_mtu != ifr->ifr_mtu) {
1150			if (sc_if->sk_jumbo_disable != 0 &&
1151			    ifr->ifr_mtu > SK_MAX_FRAMELEN)
1152				error = EINVAL;
1153			else {
1154				SK_IF_LOCK(sc_if);
1155				ifp->if_mtu = ifr->ifr_mtu;
1156				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1157					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1158					sk_init_locked(sc_if);
1159				}
1160				SK_IF_UNLOCK(sc_if);
1161			}
1162		}
1163		break;
1164	case SIOCSIFFLAGS:
1165		SK_IF_LOCK(sc_if);
1166		if (ifp->if_flags & IFF_UP) {
1167			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1168				if ((ifp->if_flags ^ sc_if->sk_if_flags)
1169				    & IFF_PROMISC) {
1170					sk_setpromisc(sc_if);
1171					sk_setmulti(sc_if);
1172				}
1173			} else
1174				sk_init_locked(sc_if);
1175		} else {
1176			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1177				sk_stop(sc_if);
1178		}
1179		sc_if->sk_if_flags = ifp->if_flags;
1180		SK_IF_UNLOCK(sc_if);
1181		break;
1182	case SIOCADDMULTI:
1183	case SIOCDELMULTI:
1184		SK_IF_LOCK(sc_if);
1185		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1186			sk_setmulti(sc_if);
1187		SK_IF_UNLOCK(sc_if);
1188		break;
1189	case SIOCGIFMEDIA:
1190	case SIOCSIFMEDIA:
1191		mii = device_get_softc(sc_if->sk_miibus);
1192		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1193		break;
1194	case SIOCSIFCAP:
1195		SK_IF_LOCK(sc_if);
1196		if (sc_if->sk_softc->sk_type == SK_GENESIS) {
1197			SK_IF_UNLOCK(sc_if);
1198			break;
1199		}
1200		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1201		if (mask & IFCAP_HWCSUM) {
1202			ifp->if_capenable ^= IFCAP_HWCSUM;
1203			if (IFCAP_HWCSUM & ifp->if_capenable &&
1204			    IFCAP_HWCSUM & ifp->if_capabilities)
1205				ifp->if_hwassist = SK_CSUM_FEATURES;
1206			else
1207				ifp->if_hwassist = 0;
1208		}
1209		SK_IF_UNLOCK(sc_if);
1210		break;
1211	default:
1212		error = ether_ioctl(ifp, command, data);
1213		break;
1214	}
1215
1216	return (error);
1217}
1218
1219/*
1220 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1221 * IDs against our list and return a device name if we find a match.
1222 */
1223static int
1224skc_probe(dev)
1225	device_t		dev;
1226{
1227	struct sk_type		*t = sk_devs;
1228
1229	while(t->sk_name != NULL) {
1230		if ((pci_get_vendor(dev) == t->sk_vid) &&
1231		    (pci_get_device(dev) == t->sk_did)) {
1232			/*
1233			 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1234			 * Rev. 3 is supported by re(4).
1235			 */
1236			if ((t->sk_vid == VENDORID_LINKSYS) &&
1237				(t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1238				(pci_get_subdevice(dev) !=
1239				 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1240				t++;
1241				continue;
1242			}
1243			device_set_desc(dev, t->sk_name);
1244			return (BUS_PROBE_DEFAULT);
1245		}
1246		t++;
1247	}
1248
1249	return(ENXIO);
1250}
1251
1252/*
1253 * Force the GEnesis into reset, then bring it out of reset.
1254 */
1255static void
1256sk_reset(sc)
1257	struct sk_softc		*sc;
1258{
1259
1260	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1261	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1262	if (SK_YUKON_FAMILY(sc->sk_type))
1263		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1264
1265	DELAY(1000);
1266	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1267	DELAY(2);
1268	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1269	if (SK_YUKON_FAMILY(sc->sk_type))
1270		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1271
1272	if (sc->sk_type == SK_GENESIS) {
1273		/* Configure packet arbiter */
1274		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1275		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1276		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1277		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1278		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1279	}
1280
1281	/* Enable RAM interface */
1282	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1283
1284	/*
1285         * Configure interrupt moderation. The moderation timer
1286	 * defers interrupts specified in the interrupt moderation
1287	 * timer mask based on the timeout specified in the interrupt
1288	 * moderation timer init register. Each bit in the timer
1289	 * register represents one tick, so to specify a timeout in
1290	 * microseconds, we have to multiply by the correct number of
1291	 * ticks-per-microsecond.
1292	 */
1293	switch (sc->sk_type) {
1294	case SK_GENESIS:
1295		sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
1296		break;
1297	default:
1298		sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
1299		break;
1300	}
1301	if (bootverbose)
1302		device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
1303		    sc->sk_int_mod);
1304	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
1305	    sc->sk_int_ticks));
1306	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1307	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1308	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1309
1310	return;
1311}
1312
1313static int
1314sk_probe(dev)
1315	device_t		dev;
1316{
1317	struct sk_softc		*sc;
1318
1319	sc = device_get_softc(device_get_parent(dev));
1320
1321	/*
1322	 * Not much to do here. We always know there will be
1323	 * at least one XMAC present, and if there are two,
1324	 * skc_attach() will create a second device instance
1325	 * for us.
1326	 */
1327	switch (sc->sk_type) {
1328	case SK_GENESIS:
1329		device_set_desc(dev, "XaQti Corp. XMAC II");
1330		break;
1331	case SK_YUKON:
1332	case SK_YUKON_LITE:
1333	case SK_YUKON_LP:
1334		device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1335		break;
1336	}
1337
1338	return (BUS_PROBE_DEFAULT);
1339}
1340
1341/*
1342 * Each XMAC chip is attached as a separate logical IP interface.
1343 * Single port cards will have only one logical interface of course.
1344 */
1345static int
1346sk_attach(dev)
1347	device_t		dev;
1348{
1349	struct sk_softc		*sc;
1350	struct sk_if_softc	*sc_if;
1351	struct ifnet		*ifp;
1352	int			i, port, error;
1353	u_char			eaddr[6];
1354
1355	if (dev == NULL)
1356		return(EINVAL);
1357
1358	error = 0;
1359	sc_if = device_get_softc(dev);
1360	sc = device_get_softc(device_get_parent(dev));
1361	port = *(int *)device_get_ivars(dev);
1362
1363	sc_if->sk_if_dev = dev;
1364	sc_if->sk_port = port;
1365	sc_if->sk_softc = sc;
1366	sc->sk_if[port] = sc_if;
1367	if (port == SK_PORT_A)
1368		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1369	if (port == SK_PORT_B)
1370		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1371
1372	callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
1373	callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
1374
1375	if (sk_dma_alloc(sc_if) != 0) {
1376		error = ENOMEM;
1377		goto fail;
1378	}
1379	sk_dma_jumbo_alloc(sc_if);
1380
1381	ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1382	if (ifp == NULL) {
1383		device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
1384		error = ENOSPC;
1385		goto fail;
1386	}
1387	ifp->if_softc = sc_if;
1388	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1389	ifp->if_mtu = ETHERMTU;
1390	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1391	/*
1392	 * SK_GENESIS has a bug in checksum offload - From linux.
1393	 */
1394	if (sc_if->sk_softc->sk_type != SK_GENESIS) {
1395		ifp->if_capabilities = IFCAP_HWCSUM;
1396		ifp->if_hwassist = SK_CSUM_FEATURES;
1397	} else {
1398		ifp->if_capabilities = 0;
1399		ifp->if_hwassist = 0;
1400	}
1401	ifp->if_capenable = ifp->if_capabilities;
1402	ifp->if_ioctl = sk_ioctl;
1403	ifp->if_start = sk_start;
1404	ifp->if_timer = 0;
1405	ifp->if_watchdog = NULL;
1406	ifp->if_init = sk_init;
1407	IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1408	ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1409	IFQ_SET_READY(&ifp->if_snd);
1410
1411	/*
1412	 * Get station address for this interface. Note that
1413	 * dual port cards actually come with three station
1414	 * addresses: one for each port, plus an extra. The
1415	 * extra one is used by the SysKonnect driver software
1416	 * as a 'virtual' station address for when both ports
1417	 * are operating in failover mode. Currently we don't
1418	 * use this extra address.
1419	 */
1420	SK_IF_LOCK(sc_if);
1421	for (i = 0; i < ETHER_ADDR_LEN; i++)
1422		eaddr[i] =
1423		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1424
1425	/*
1426	 * Set up RAM buffer addresses. The NIC will have a certain
1427	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1428	 * need to divide this up a) between the transmitter and
1429 	 * receiver and b) between the two XMACs, if this is a
1430	 * dual port NIC. Our algotithm is to divide up the memory
1431	 * evenly so that everyone gets a fair share.
1432	 *
1433	 * Just to be contrary, Yukon2 appears to have separate memory
1434	 * for each MAC.
1435	 */
1436	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1437		u_int32_t		chunk, val;
1438
1439		chunk = sc->sk_ramsize / 2;
1440		val = sc->sk_rboff / sizeof(u_int64_t);
1441		sc_if->sk_rx_ramstart = val;
1442		val += (chunk / sizeof(u_int64_t));
1443		sc_if->sk_rx_ramend = val - 1;
1444		sc_if->sk_tx_ramstart = val;
1445		val += (chunk / sizeof(u_int64_t));
1446		sc_if->sk_tx_ramend = val - 1;
1447	} else {
1448		u_int32_t		chunk, val;
1449
1450		chunk = sc->sk_ramsize / 4;
1451		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1452		    sizeof(u_int64_t);
1453		sc_if->sk_rx_ramstart = val;
1454		val += (chunk / sizeof(u_int64_t));
1455		sc_if->sk_rx_ramend = val - 1;
1456		sc_if->sk_tx_ramstart = val;
1457		val += (chunk / sizeof(u_int64_t));
1458		sc_if->sk_tx_ramend = val - 1;
1459	}
1460
1461	/* Read and save PHY type and set PHY address */
1462	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1463	if (!SK_YUKON_FAMILY(sc->sk_type)) {
1464		switch(sc_if->sk_phytype) {
1465		case SK_PHYTYPE_XMAC:
1466			sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1467			break;
1468		case SK_PHYTYPE_BCOM:
1469			sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1470			break;
1471		default:
1472			device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
1473			    sc_if->sk_phytype);
1474			error = ENODEV;
1475			SK_IF_UNLOCK(sc_if);
1476			goto fail;
1477		}
1478	} else {
1479		if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1480		    sc->sk_pmd != 'S') {
1481			/* not initialized, punt */
1482			sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1483			sc->sk_coppertype = 1;
1484		}
1485
1486		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1487
1488		if (!(sc->sk_coppertype))
1489			sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1490	}
1491
1492	/*
1493	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1494	 */
1495	SK_IF_UNLOCK(sc_if);
1496	ether_ifattach(ifp, eaddr);
1497	SK_IF_LOCK(sc_if);
1498
1499	/*
1500	 * The hardware should be ready for VLAN_MTU by default:
1501	 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1502	 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1503	 *
1504	 */
1505        ifp->if_capabilities |= IFCAP_VLAN_MTU;
1506        ifp->if_capenable |= IFCAP_VLAN_MTU;
1507	/*
1508	 * Tell the upper layer(s) we support long frames.
1509	 * Must appear after the call to ether_ifattach() because
1510	 * ether_ifattach() sets ifi_hdrlen to the default value.
1511	 */
1512        ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1513
1514	/*
1515	 * Do miibus setup.
1516	 */
1517	switch (sc->sk_type) {
1518	case SK_GENESIS:
1519		sk_init_xmac(sc_if);
1520		break;
1521	case SK_YUKON:
1522	case SK_YUKON_LITE:
1523	case SK_YUKON_LP:
1524		sk_init_yukon(sc_if);
1525		break;
1526	}
1527
1528	SK_IF_UNLOCK(sc_if);
1529	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1530	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1531		device_printf(sc_if->sk_if_dev, "no PHY found!\n");
1532		ether_ifdetach(ifp);
1533		error = ENXIO;
1534		goto fail;
1535	}
1536
1537fail:
1538	if (error) {
1539		/* Access should be ok even though lock has been dropped */
1540		sc->sk_if[port] = NULL;
1541		sk_detach(dev);
1542	}
1543
1544	return(error);
1545}
1546
1547/*
1548 * Attach the interface. Allocate softc structures, do ifmedia
1549 * setup and ethernet/BPF attach.
1550 */
1551static int
1552skc_attach(dev)
1553	device_t		dev;
1554{
1555	struct sk_softc		*sc;
1556	int			error = 0, *port;
1557	uint8_t			skrs;
1558	const char		*pname = NULL;
1559	char			*revstr;
1560
1561	sc = device_get_softc(dev);
1562	sc->sk_dev = dev;
1563
1564	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1565	    MTX_DEF);
1566	mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
1567	/*
1568	 * Map control/status registers.
1569	 */
1570	pci_enable_busmaster(dev);
1571
1572	/* Allocate resources */
1573#ifdef SK_USEIOSPACE
1574	sc->sk_res_spec = sk_res_spec_io;
1575#else
1576	sc->sk_res_spec = sk_res_spec_mem;
1577#endif
1578	error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1579	if (error) {
1580		if (sc->sk_res_spec == sk_res_spec_mem)
1581			sc->sk_res_spec = sk_res_spec_io;
1582		else
1583			sc->sk_res_spec = sk_res_spec_mem;
1584		error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1585		if (error) {
1586			device_printf(dev, "couldn't allocate %s resources\n",
1587			    sc->sk_res_spec == sk_res_spec_mem ? "memory" :
1588			    "I/O");
1589			goto fail;
1590		}
1591	}
1592
1593	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1594	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1595
1596	/* Bail out if chip is not recognized. */
1597	if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1598		device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
1599		    sc->sk_type, sc->sk_rev);
1600		error = ENXIO;
1601		goto fail;
1602	}
1603
1604	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1605		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1606		OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1607		&sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1608		"SK interrupt moderation");
1609
1610	/* Pull in device tunables. */
1611	sc->sk_int_mod = SK_IM_DEFAULT;
1612	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1613		"int_mod", &sc->sk_int_mod);
1614	if (error == 0) {
1615		if (sc->sk_int_mod < SK_IM_MIN ||
1616		    sc->sk_int_mod > SK_IM_MAX) {
1617			device_printf(dev, "int_mod value out of range; "
1618			    "using default: %d\n", SK_IM_DEFAULT);
1619			sc->sk_int_mod = SK_IM_DEFAULT;
1620		}
1621	}
1622
1623	/* Reset the adapter. */
1624	sk_reset(sc);
1625
1626	skrs = sk_win_read_1(sc, SK_EPROM0);
1627	if (sc->sk_type == SK_GENESIS) {
1628		/* Read and save RAM size and RAMbuffer offset */
1629		switch(skrs) {
1630		case SK_RAMSIZE_512K_64:
1631			sc->sk_ramsize = 0x80000;
1632			sc->sk_rboff = SK_RBOFF_0;
1633			break;
1634		case SK_RAMSIZE_1024K_64:
1635			sc->sk_ramsize = 0x100000;
1636			sc->sk_rboff = SK_RBOFF_80000;
1637			break;
1638		case SK_RAMSIZE_1024K_128:
1639			sc->sk_ramsize = 0x100000;
1640			sc->sk_rboff = SK_RBOFF_0;
1641			break;
1642		case SK_RAMSIZE_2048K_128:
1643			sc->sk_ramsize = 0x200000;
1644			sc->sk_rboff = SK_RBOFF_0;
1645			break;
1646		default:
1647			device_printf(dev, "unknown ram size: %d\n", skrs);
1648			error = ENXIO;
1649			goto fail;
1650		}
1651	} else { /* SK_YUKON_FAMILY */
1652		if (skrs == 0x00)
1653			sc->sk_ramsize = 0x20000;
1654		else
1655			sc->sk_ramsize = skrs * (1<<12);
1656		sc->sk_rboff = SK_RBOFF_0;
1657	}
1658
1659	/* Read and save physical media type */
1660	 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1661
1662	 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1663		 sc->sk_coppertype = 1;
1664	 else
1665		 sc->sk_coppertype = 0;
1666
1667	/* Determine whether to name it with VPD PN or just make it up.
1668	 * Marvell Yukon VPD PN seems to freqently be bogus. */
1669	switch (pci_get_device(dev)) {
1670	case DEVICEID_SK_V1:
1671	case DEVICEID_BELKIN_5005:
1672	case DEVICEID_3COM_3C940:
1673	case DEVICEID_LINKSYS_EG1032:
1674	case DEVICEID_DLINK_DGE530T_A1:
1675	case DEVICEID_DLINK_DGE530T_B1:
1676		/* Stay with VPD PN. */
1677		(void) pci_get_vpd_ident(dev, &pname);
1678		break;
1679	case DEVICEID_SK_V2:
1680		/* YUKON VPD PN might bear no resemblance to reality. */
1681		switch (sc->sk_type) {
1682		case SK_GENESIS:
1683			/* Stay with VPD PN. */
1684			(void) pci_get_vpd_ident(dev, &pname);
1685			break;
1686		case SK_YUKON:
1687			pname = "Marvell Yukon Gigabit Ethernet";
1688			break;
1689		case SK_YUKON_LITE:
1690			pname = "Marvell Yukon Lite Gigabit Ethernet";
1691			break;
1692		case SK_YUKON_LP:
1693			pname = "Marvell Yukon LP Gigabit Ethernet";
1694			break;
1695		default:
1696			pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1697			break;
1698		}
1699
1700		/* Yukon Lite Rev. A0 needs special test. */
1701		if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1702			u_int32_t far;
1703			u_int8_t testbyte;
1704
1705			/* Save flash address register before testing. */
1706			far = sk_win_read_4(sc, SK_EP_ADDR);
1707
1708			sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1709			testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1710
1711			if (testbyte != 0x00) {
1712				/* Yukon Lite Rev. A0 detected. */
1713				sc->sk_type = SK_YUKON_LITE;
1714				sc->sk_rev = SK_YUKON_LITE_REV_A0;
1715				/* Restore flash address register. */
1716				sk_win_write_4(sc, SK_EP_ADDR, far);
1717			}
1718		}
1719		break;
1720	default:
1721		device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1722			"chipver=%02x, rev=%x\n",
1723			pci_get_vendor(dev), pci_get_device(dev),
1724			sc->sk_type, sc->sk_rev);
1725		error = ENXIO;
1726		goto fail;
1727	}
1728
1729	if (sc->sk_type == SK_YUKON_LITE) {
1730		switch (sc->sk_rev) {
1731		case SK_YUKON_LITE_REV_A0:
1732			revstr = "A0";
1733			break;
1734		case SK_YUKON_LITE_REV_A1:
1735			revstr = "A1";
1736			break;
1737		case SK_YUKON_LITE_REV_A3:
1738			revstr = "A3";
1739			break;
1740		default:
1741			revstr = "";
1742			break;
1743		}
1744	} else {
1745		revstr = "";
1746	}
1747
1748	/* Announce the product name and more VPD data if there. */
1749	if (pname != NULL)
1750		device_printf(dev, "%s rev. %s(0x%x)\n",
1751			pname, revstr, sc->sk_rev);
1752
1753	if (bootverbose) {
1754		device_printf(dev, "chip ver  = 0x%02x\n", sc->sk_type);
1755		device_printf(dev, "chip rev  = 0x%02x\n", sc->sk_rev);
1756		device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1757		device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1758	}
1759
1760	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1761	if (sc->sk_devs[SK_PORT_A] == NULL) {
1762		device_printf(dev, "failed to add child for PORT_A\n");
1763		error = ENXIO;
1764		goto fail;
1765	}
1766	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1767	if (port == NULL) {
1768		device_printf(dev, "failed to allocate memory for "
1769		    "ivars of PORT_A\n");
1770		error = ENXIO;
1771		goto fail;
1772	}
1773	*port = SK_PORT_A;
1774	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1775
1776	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1777		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1778		if (sc->sk_devs[SK_PORT_B] == NULL) {
1779			device_printf(dev, "failed to add child for PORT_B\n");
1780			error = ENXIO;
1781			goto fail;
1782		}
1783		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1784		if (port == NULL) {
1785			device_printf(dev, "failed to allocate memory for "
1786			    "ivars of PORT_B\n");
1787			error = ENXIO;
1788			goto fail;
1789		}
1790		*port = SK_PORT_B;
1791		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1792	}
1793
1794	/* Turn on the 'driver is loaded' LED. */
1795	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1796
1797	error = bus_generic_attach(dev);
1798	if (error) {
1799		device_printf(dev, "failed to attach port(s)\n");
1800		goto fail;
1801	}
1802
1803	/* Hook interrupt last to avoid having to lock softc */
1804	error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
1805	    NULL, sk_intr, sc, &sc->sk_intrhand);
1806
1807	if (error) {
1808		device_printf(dev, "couldn't set up irq\n");
1809		goto fail;
1810	}
1811
1812fail:
1813	if (error)
1814		skc_detach(dev);
1815
1816	return(error);
1817}
1818
1819/*
1820 * Shutdown hardware and free up resources. This can be called any
1821 * time after the mutex has been initialized. It is called in both
1822 * the error case in attach and the normal detach case so it needs
1823 * to be careful about only freeing resources that have actually been
1824 * allocated.
1825 */
1826static int
1827sk_detach(dev)
1828	device_t		dev;
1829{
1830	struct sk_if_softc	*sc_if;
1831	struct ifnet		*ifp;
1832
1833	sc_if = device_get_softc(dev);
1834	KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1835	    ("sk mutex not initialized in sk_detach"));
1836	SK_IF_LOCK(sc_if);
1837
1838	ifp = sc_if->sk_ifp;
1839	/* These should only be active if attach_xmac succeeded */
1840	if (device_is_attached(dev)) {
1841		sk_stop(sc_if);
1842		/* Can't hold locks while calling detach */
1843		SK_IF_UNLOCK(sc_if);
1844		callout_drain(&sc_if->sk_tick_ch);
1845		callout_drain(&sc_if->sk_watchdog_ch);
1846		ether_ifdetach(ifp);
1847		SK_IF_LOCK(sc_if);
1848	}
1849	if (ifp)
1850		if_free(ifp);
1851	/*
1852	 * We're generally called from skc_detach() which is using
1853	 * device_delete_child() to get to here. It's already trashed
1854	 * miibus for us, so don't do it here or we'll panic.
1855	 */
1856	/*
1857	if (sc_if->sk_miibus != NULL)
1858		device_delete_child(dev, sc_if->sk_miibus);
1859	*/
1860	bus_generic_detach(dev);
1861	sk_dma_jumbo_free(sc_if);
1862	sk_dma_free(sc_if);
1863	SK_IF_UNLOCK(sc_if);
1864
1865	return(0);
1866}
1867
1868static int
1869skc_detach(dev)
1870	device_t		dev;
1871{
1872	struct sk_softc		*sc;
1873
1874	sc = device_get_softc(dev);
1875	KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1876
1877	if (device_is_alive(dev)) {
1878		if (sc->sk_devs[SK_PORT_A] != NULL) {
1879			free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1880			device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1881		}
1882		if (sc->sk_devs[SK_PORT_B] != NULL) {
1883			free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1884			device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1885		}
1886		bus_generic_detach(dev);
1887	}
1888
1889	if (sc->sk_intrhand)
1890		bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
1891	bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
1892
1893	mtx_destroy(&sc->sk_mii_mtx);
1894	mtx_destroy(&sc->sk_mtx);
1895
1896	return(0);
1897}
1898
1899struct sk_dmamap_arg {
1900	bus_addr_t	sk_busaddr;
1901};
1902
1903static void
1904sk_dmamap_cb(arg, segs, nseg, error)
1905	void			*arg;
1906	bus_dma_segment_t	*segs;
1907	int			nseg;
1908	int			error;
1909{
1910	struct sk_dmamap_arg	*ctx;
1911
1912	if (error != 0)
1913		return;
1914
1915	ctx = arg;
1916	ctx->sk_busaddr = segs[0].ds_addr;
1917}
1918
1919/*
1920 * Allocate jumbo buffer storage. The SysKonnect adapters support
1921 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1922 * use them in their drivers. In order for us to use them, we need
1923 * large 9K receive buffers, however standard mbuf clusters are only
1924 * 2048 bytes in size. Consequently, we need to allocate and manage
1925 * our own jumbo buffer pool. Fortunately, this does not require an
1926 * excessive amount of additional code.
1927 */
1928static int
1929sk_dma_alloc(sc_if)
1930	struct sk_if_softc	*sc_if;
1931{
1932	struct sk_dmamap_arg	ctx;
1933	struct sk_txdesc	*txd;
1934	struct sk_rxdesc	*rxd;
1935	int			error, i;
1936
1937	/* create parent tag */
1938	/*
1939	 * XXX
1940	 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
1941	 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
1942	 * However bz@ reported that it does not work on amd64 with > 4GB
1943	 * RAM. Until we have more clues of the breakage, disable DAC mode
1944	 * by limiting DMA address to be in 32bit address space.
1945	 */
1946	error = bus_dma_tag_create(
1947		    bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
1948		    1, 0,			/* algnmnt, boundary */
1949		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1950		    BUS_SPACE_MAXADDR,		/* highaddr */
1951		    NULL, NULL,			/* filter, filterarg */
1952		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1953		    0,				/* nsegments */
1954		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1955		    0,				/* flags */
1956		    NULL, NULL,			/* lockfunc, lockarg */
1957		    &sc_if->sk_cdata.sk_parent_tag);
1958	if (error != 0) {
1959		device_printf(sc_if->sk_if_dev,
1960		    "failed to create parent DMA tag\n");
1961		goto fail;
1962	}
1963
1964	/* create tag for Tx ring */
1965	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1966		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
1967		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1968		    BUS_SPACE_MAXADDR,		/* highaddr */
1969		    NULL, NULL,			/* filter, filterarg */
1970		    SK_TX_RING_SZ,		/* maxsize */
1971		    1,				/* nsegments */
1972		    SK_TX_RING_SZ,		/* maxsegsize */
1973		    0,				/* flags */
1974		    NULL, NULL,			/* lockfunc, lockarg */
1975		    &sc_if->sk_cdata.sk_tx_ring_tag);
1976	if (error != 0) {
1977		device_printf(sc_if->sk_if_dev,
1978		    "failed to allocate Tx ring DMA tag\n");
1979		goto fail;
1980	}
1981
1982	/* create tag for Rx ring */
1983	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1984		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
1985		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1986		    BUS_SPACE_MAXADDR,		/* highaddr */
1987		    NULL, NULL,			/* filter, filterarg */
1988		    SK_RX_RING_SZ,		/* maxsize */
1989		    1,				/* nsegments */
1990		    SK_RX_RING_SZ,		/* maxsegsize */
1991		    0,				/* flags */
1992		    NULL, NULL,			/* lockfunc, lockarg */
1993		    &sc_if->sk_cdata.sk_rx_ring_tag);
1994	if (error != 0) {
1995		device_printf(sc_if->sk_if_dev,
1996		    "failed to allocate Rx ring DMA tag\n");
1997		goto fail;
1998	}
1999
2000	/* create tag for Tx buffers */
2001	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2002		    1, 0,			/* algnmnt, boundary */
2003		    BUS_SPACE_MAXADDR,		/* lowaddr */
2004		    BUS_SPACE_MAXADDR,		/* highaddr */
2005		    NULL, NULL,			/* filter, filterarg */
2006		    MCLBYTES * SK_MAXTXSEGS,	/* maxsize */
2007		    SK_MAXTXSEGS,		/* nsegments */
2008		    MCLBYTES,			/* maxsegsize */
2009		    0,				/* flags */
2010		    NULL, NULL,			/* lockfunc, lockarg */
2011		    &sc_if->sk_cdata.sk_tx_tag);
2012	if (error != 0) {
2013		device_printf(sc_if->sk_if_dev,
2014		    "failed to allocate Tx DMA tag\n");
2015		goto fail;
2016	}
2017
2018	/* create tag for Rx buffers */
2019	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2020		    1, 0,			/* algnmnt, boundary */
2021		    BUS_SPACE_MAXADDR,		/* lowaddr */
2022		    BUS_SPACE_MAXADDR,		/* highaddr */
2023		    NULL, NULL,			/* filter, filterarg */
2024		    MCLBYTES,			/* maxsize */
2025		    1,				/* nsegments */
2026		    MCLBYTES,			/* maxsegsize */
2027		    0,				/* flags */
2028		    NULL, NULL,			/* lockfunc, lockarg */
2029		    &sc_if->sk_cdata.sk_rx_tag);
2030	if (error != 0) {
2031		device_printf(sc_if->sk_if_dev,
2032		    "failed to allocate Rx DMA tag\n");
2033		goto fail;
2034	}
2035
2036	/* allocate DMA'able memory and load the DMA map for Tx ring */
2037	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
2038	    (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2039	    &sc_if->sk_cdata.sk_tx_ring_map);
2040	if (error != 0) {
2041		device_printf(sc_if->sk_if_dev,
2042		    "failed to allocate DMA'able memory for Tx ring\n");
2043		goto fail;
2044	}
2045
2046	ctx.sk_busaddr = 0;
2047	error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
2048	    sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
2049	    SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2050	if (error != 0) {
2051		device_printf(sc_if->sk_if_dev,
2052		    "failed to load DMA'able memory for Tx ring\n");
2053		goto fail;
2054	}
2055	sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
2056
2057	/* allocate DMA'able memory and load the DMA map for Rx ring */
2058	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
2059	    (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2060	    &sc_if->sk_cdata.sk_rx_ring_map);
2061	if (error != 0) {
2062		device_printf(sc_if->sk_if_dev,
2063		    "failed to allocate DMA'able memory for Rx ring\n");
2064		goto fail;
2065	}
2066
2067	ctx.sk_busaddr = 0;
2068	error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
2069	    sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
2070	    SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2071	if (error != 0) {
2072		device_printf(sc_if->sk_if_dev,
2073		    "failed to load DMA'able memory for Rx ring\n");
2074		goto fail;
2075	}
2076	sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
2077
2078	/* create DMA maps for Tx buffers */
2079	for (i = 0; i < SK_TX_RING_CNT; i++) {
2080		txd = &sc_if->sk_cdata.sk_txdesc[i];
2081		txd->tx_m = NULL;
2082		txd->tx_dmamap = NULL;
2083		error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
2084		    &txd->tx_dmamap);
2085		if (error != 0) {
2086			device_printf(sc_if->sk_if_dev,
2087			    "failed to create Tx dmamap\n");
2088			goto fail;
2089		}
2090	}
2091
2092	/* create DMA maps for Rx buffers */
2093	if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2094	    &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
2095		device_printf(sc_if->sk_if_dev,
2096		    "failed to create spare Rx dmamap\n");
2097		goto fail;
2098	}
2099	for (i = 0; i < SK_RX_RING_CNT; i++) {
2100		rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2101		rxd->rx_m = NULL;
2102		rxd->rx_dmamap = NULL;
2103		error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2104		    &rxd->rx_dmamap);
2105		if (error != 0) {
2106			device_printf(sc_if->sk_if_dev,
2107			    "failed to create Rx dmamap\n");
2108			goto fail;
2109		}
2110	}
2111
2112fail:
2113	return (error);
2114}
2115
2116static int
2117sk_dma_jumbo_alloc(sc_if)
2118	struct sk_if_softc	*sc_if;
2119{
2120	struct sk_dmamap_arg	ctx;
2121	struct sk_rxdesc	*jrxd;
2122	int			error, i;
2123
2124	if (jumbo_disable != 0) {
2125		device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
2126		sc_if->sk_jumbo_disable = 1;
2127		return (0);
2128	}
2129	/* create tag for jumbo Rx ring */
2130	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2131		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
2132		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2133		    BUS_SPACE_MAXADDR,		/* highaddr */
2134		    NULL, NULL,			/* filter, filterarg */
2135		    SK_JUMBO_RX_RING_SZ,	/* maxsize */
2136		    1,				/* nsegments */
2137		    SK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2138		    0,				/* flags */
2139		    NULL, NULL,			/* lockfunc, lockarg */
2140		    &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2141	if (error != 0) {
2142		device_printf(sc_if->sk_if_dev,
2143		    "failed to allocate jumbo Rx ring DMA tag\n");
2144		goto jumbo_fail;
2145	}
2146
2147	/* create tag for jumbo Rx buffers */
2148	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2149		    1, 0,			/* algnmnt, boundary */
2150		    BUS_SPACE_MAXADDR,		/* lowaddr */
2151		    BUS_SPACE_MAXADDR,		/* highaddr */
2152		    NULL, NULL,			/* filter, filterarg */
2153		    MJUM9BYTES,			/* maxsize */
2154		    1,				/* nsegments */
2155		    MJUM9BYTES,			/* maxsegsize */
2156		    0,				/* flags */
2157		    NULL, NULL,			/* lockfunc, lockarg */
2158		    &sc_if->sk_cdata.sk_jumbo_rx_tag);
2159	if (error != 0) {
2160		device_printf(sc_if->sk_if_dev,
2161		    "failed to allocate jumbo Rx DMA tag\n");
2162		goto jumbo_fail;
2163	}
2164
2165	/* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2166	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2167	    (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring,
2168	    BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2169	if (error != 0) {
2170		device_printf(sc_if->sk_if_dev,
2171		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2172		goto jumbo_fail;
2173	}
2174
2175	ctx.sk_busaddr = 0;
2176	error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2177	    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2178	    sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
2179	    &ctx, BUS_DMA_NOWAIT);
2180	if (error != 0) {
2181		device_printf(sc_if->sk_if_dev,
2182		    "failed to load DMA'able memory for jumbo Rx ring\n");
2183		goto jumbo_fail;
2184	}
2185	sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
2186
2187	/* create DMA maps for jumbo Rx buffers */
2188	if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2189	    &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
2190		device_printf(sc_if->sk_if_dev,
2191		    "failed to create spare jumbo Rx dmamap\n");
2192		goto jumbo_fail;
2193	}
2194	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2195		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2196		jrxd->rx_m = NULL;
2197		jrxd->rx_dmamap = NULL;
2198		error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2199		    &jrxd->rx_dmamap);
2200		if (error != 0) {
2201			device_printf(sc_if->sk_if_dev,
2202			    "failed to create jumbo Rx dmamap\n");
2203			goto jumbo_fail;
2204		}
2205	}
2206
2207	return (0);
2208
2209jumbo_fail:
2210	sk_dma_jumbo_free(sc_if);
2211	device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
2212	    "resource shortage\n");
2213	sc_if->sk_jumbo_disable = 1;
2214	return (0);
2215}
2216
2217static void
2218sk_dma_free(sc_if)
2219	struct sk_if_softc	*sc_if;
2220{
2221	struct sk_txdesc	*txd;
2222	struct sk_rxdesc	*rxd;
2223	int			i;
2224
2225	/* Tx ring */
2226	if (sc_if->sk_cdata.sk_tx_ring_tag) {
2227		if (sc_if->sk_cdata.sk_tx_ring_map)
2228			bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
2229			    sc_if->sk_cdata.sk_tx_ring_map);
2230		if (sc_if->sk_cdata.sk_tx_ring_map &&
2231		    sc_if->sk_rdata.sk_tx_ring)
2232			bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
2233			    sc_if->sk_rdata.sk_tx_ring,
2234			    sc_if->sk_cdata.sk_tx_ring_map);
2235		sc_if->sk_rdata.sk_tx_ring = NULL;
2236		sc_if->sk_cdata.sk_tx_ring_map = NULL;
2237		bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
2238		sc_if->sk_cdata.sk_tx_ring_tag = NULL;
2239	}
2240	/* Rx ring */
2241	if (sc_if->sk_cdata.sk_rx_ring_tag) {
2242		if (sc_if->sk_cdata.sk_rx_ring_map)
2243			bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
2244			    sc_if->sk_cdata.sk_rx_ring_map);
2245		if (sc_if->sk_cdata.sk_rx_ring_map &&
2246		    sc_if->sk_rdata.sk_rx_ring)
2247			bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
2248			    sc_if->sk_rdata.sk_rx_ring,
2249			    sc_if->sk_cdata.sk_rx_ring_map);
2250		sc_if->sk_rdata.sk_rx_ring = NULL;
2251		sc_if->sk_cdata.sk_rx_ring_map = NULL;
2252		bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
2253		sc_if->sk_cdata.sk_rx_ring_tag = NULL;
2254	}
2255	/* Tx buffers */
2256	if (sc_if->sk_cdata.sk_tx_tag) {
2257		for (i = 0; i < SK_TX_RING_CNT; i++) {
2258			txd = &sc_if->sk_cdata.sk_txdesc[i];
2259			if (txd->tx_dmamap) {
2260				bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
2261				    txd->tx_dmamap);
2262				txd->tx_dmamap = NULL;
2263			}
2264		}
2265		bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
2266		sc_if->sk_cdata.sk_tx_tag = NULL;
2267	}
2268	/* Rx buffers */
2269	if (sc_if->sk_cdata.sk_rx_tag) {
2270		for (i = 0; i < SK_RX_RING_CNT; i++) {
2271			rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2272			if (rxd->rx_dmamap) {
2273				bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2274				    rxd->rx_dmamap);
2275				rxd->rx_dmamap = NULL;
2276			}
2277		}
2278		if (sc_if->sk_cdata.sk_rx_sparemap) {
2279			bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2280			    sc_if->sk_cdata.sk_rx_sparemap);
2281			sc_if->sk_cdata.sk_rx_sparemap = NULL;
2282		}
2283		bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
2284		sc_if->sk_cdata.sk_rx_tag = NULL;
2285	}
2286
2287	if (sc_if->sk_cdata.sk_parent_tag) {
2288		bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
2289		sc_if->sk_cdata.sk_parent_tag = NULL;
2290	}
2291}
2292
2293static void
2294sk_dma_jumbo_free(sc_if)
2295	struct sk_if_softc	*sc_if;
2296{
2297	struct sk_rxdesc	*jrxd;
2298	int			i;
2299
2300	/* jumbo Rx ring */
2301	if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
2302		if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
2303			bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2304			    sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2305		if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
2306		    sc_if->sk_rdata.sk_jumbo_rx_ring)
2307			bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2308			    sc_if->sk_rdata.sk_jumbo_rx_ring,
2309			    sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2310		sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
2311		sc_if->sk_cdata.sk_jumbo_rx_ring_map = NULL;
2312		bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2313		sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
2314	}
2315
2316	/* jumbo Rx buffers */
2317	if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
2318		for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2319			jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2320			if (jrxd->rx_dmamap) {
2321				bus_dmamap_destroy(
2322				    sc_if->sk_cdata.sk_jumbo_rx_tag,
2323				    jrxd->rx_dmamap);
2324				jrxd->rx_dmamap = NULL;
2325			}
2326		}
2327		if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
2328			bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
2329			    sc_if->sk_cdata.sk_jumbo_rx_sparemap);
2330			sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
2331		}
2332		bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
2333		sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
2334	}
2335}
2336
2337static void
2338sk_txcksum(ifp, m, f)
2339	struct ifnet		*ifp;
2340	struct mbuf		*m;
2341	struct sk_tx_desc	*f;
2342{
2343	struct ip		*ip;
2344	u_int16_t		offset;
2345	u_int8_t 		*p;
2346
2347	offset = sizeof(struct ip) + ETHER_HDR_LEN;
2348	for(; m && m->m_len == 0; m = m->m_next)
2349		;
2350	if (m == NULL || m->m_len < ETHER_HDR_LEN) {
2351		if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
2352		/* checksum may be corrupted */
2353		goto sendit;
2354	}
2355	if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
2356		if (m->m_len != ETHER_HDR_LEN) {
2357			if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
2358			    __func__);
2359			/* checksum may be corrupted */
2360			goto sendit;
2361		}
2362		for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
2363			;
2364		if (m == NULL) {
2365			offset = sizeof(struct ip) + ETHER_HDR_LEN;
2366			/* checksum may be corrupted */
2367			goto sendit;
2368		}
2369		ip = mtod(m, struct ip *);
2370	} else {
2371		p = mtod(m, u_int8_t *);
2372		p += ETHER_HDR_LEN;
2373		ip = (struct ip *)p;
2374	}
2375	offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
2376
2377sendit:
2378	f->sk_csum_startval = 0;
2379	f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
2380	    (offset << 16));
2381}
2382
2383static int
2384sk_encap(sc_if, m_head)
2385        struct sk_if_softc	*sc_if;
2386        struct mbuf		**m_head;
2387{
2388	struct sk_txdesc	*txd;
2389	struct sk_tx_desc	*f = NULL;
2390	struct mbuf		*m;
2391	bus_dma_segment_t	txsegs[SK_MAXTXSEGS];
2392	u_int32_t		cflags, frag, si, sk_ctl;
2393	int			error, i, nseg;
2394
2395	SK_IF_LOCK_ASSERT(sc_if);
2396
2397	if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
2398		return (ENOBUFS);
2399
2400	error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2401	    txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2402	if (error == EFBIG) {
2403		m = m_defrag(*m_head, M_DONTWAIT);
2404		if (m == NULL) {
2405			m_freem(*m_head);
2406			*m_head = NULL;
2407			return (ENOMEM);
2408		}
2409		*m_head = m;
2410		error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2411		    txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2412		if (error != 0) {
2413			m_freem(*m_head);
2414			*m_head = NULL;
2415			return (error);
2416		}
2417	} else if (error != 0)
2418		return (error);
2419	if (nseg == 0) {
2420		m_freem(*m_head);
2421		*m_head = NULL;
2422		return (EIO);
2423	}
2424	if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
2425		bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2426		return (ENOBUFS);
2427	}
2428
2429	m = *m_head;
2430	if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
2431		cflags = SK_OPCODE_CSUM;
2432	else
2433		cflags = SK_OPCODE_DEFAULT;
2434	si = frag = sc_if->sk_cdata.sk_tx_prod;
2435	for (i = 0; i < nseg; i++) {
2436		f = &sc_if->sk_rdata.sk_tx_ring[frag];
2437		f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
2438		f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
2439		sk_ctl = txsegs[i].ds_len | cflags;
2440		if (i == 0) {
2441			if (cflags == SK_OPCODE_CSUM)
2442				sk_txcksum(sc_if->sk_ifp, m, f);
2443			sk_ctl |= SK_TXCTL_FIRSTFRAG;
2444		} else
2445			sk_ctl |= SK_TXCTL_OWN;
2446		f->sk_ctl = htole32(sk_ctl);
2447		sc_if->sk_cdata.sk_tx_cnt++;
2448		SK_INC(frag, SK_TX_RING_CNT);
2449	}
2450	sc_if->sk_cdata.sk_tx_prod = frag;
2451
2452	/* set EOF on the last desciptor */
2453	frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
2454	f = &sc_if->sk_rdata.sk_tx_ring[frag];
2455	f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
2456
2457	/* turn the first descriptor ownership to NIC */
2458	f = &sc_if->sk_rdata.sk_tx_ring[si];
2459	f->sk_ctl |= htole32(SK_TXCTL_OWN);
2460
2461	STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
2462	STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
2463	txd->tx_m = m;
2464
2465	/* sync descriptors */
2466	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2467	    BUS_DMASYNC_PREWRITE);
2468	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2469	    sc_if->sk_cdata.sk_tx_ring_map,
2470	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2471
2472	return (0);
2473}
2474
2475static void
2476sk_start(ifp)
2477	struct ifnet		*ifp;
2478{
2479	struct sk_if_softc *sc_if;
2480
2481	sc_if = ifp->if_softc;
2482
2483	SK_IF_LOCK(sc_if);
2484	sk_start_locked(ifp);
2485	SK_IF_UNLOCK(sc_if);
2486
2487	return;
2488}
2489
2490static void
2491sk_start_locked(ifp)
2492	struct ifnet		*ifp;
2493{
2494        struct sk_softc		*sc;
2495        struct sk_if_softc	*sc_if;
2496        struct mbuf		*m_head;
2497	int			enq;
2498
2499	sc_if = ifp->if_softc;
2500	sc = sc_if->sk_softc;
2501
2502	SK_IF_LOCK_ASSERT(sc_if);
2503
2504	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2505	    sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
2506		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2507		if (m_head == NULL)
2508			break;
2509
2510		/*
2511		 * Pack the data into the transmit ring. If we
2512		 * don't have room, set the OACTIVE flag and wait
2513		 * for the NIC to drain the ring.
2514		 */
2515		if (sk_encap(sc_if, &m_head)) {
2516			if (m_head == NULL)
2517				break;
2518			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2519			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2520			break;
2521		}
2522
2523		enq++;
2524		/*
2525		 * If there's a BPF listener, bounce a copy of this frame
2526		 * to him.
2527		 */
2528		BPF_MTAP(ifp, m_head);
2529	}
2530
2531	if (enq > 0) {
2532		/* Transmit */
2533		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2534
2535		/* Set a timeout in case the chip goes out to lunch. */
2536		sc_if->sk_watchdog_timer = 5;
2537	}
2538}
2539
2540
2541static void
2542sk_watchdog(arg)
2543	void			*arg;
2544{
2545	struct sk_if_softc	*sc_if;
2546	struct ifnet		*ifp;
2547
2548	ifp = arg;
2549	sc_if = ifp->if_softc;
2550
2551	SK_IF_LOCK_ASSERT(sc_if);
2552
2553	if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
2554		goto done;
2555
2556	/*
2557	 * Reclaim first as there is a possibility of losing Tx completion
2558	 * interrupts.
2559	 */
2560	sk_txeof(sc_if);
2561	if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2562		if_printf(sc_if->sk_ifp, "watchdog timeout\n");
2563		ifp->if_oerrors++;
2564		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2565		sk_init_locked(sc_if);
2566	}
2567
2568done:
2569	callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
2570
2571	return;
2572}
2573
2574static int
2575skc_shutdown(dev)
2576	device_t		dev;
2577{
2578	struct sk_softc		*sc;
2579
2580	sc = device_get_softc(dev);
2581	SK_LOCK(sc);
2582
2583	/* Turn off the 'driver is loaded' LED. */
2584	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2585
2586	/*
2587	 * Reset the GEnesis controller. Doing this should also
2588	 * assert the resets on the attached XMAC(s).
2589	 */
2590	sk_reset(sc);
2591	SK_UNLOCK(sc);
2592
2593	return (0);
2594}
2595
2596static int
2597skc_suspend(dev)
2598	device_t		dev;
2599{
2600	struct sk_softc		*sc;
2601	struct sk_if_softc	*sc_if0, *sc_if1;
2602	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2603
2604	sc = device_get_softc(dev);
2605
2606	SK_LOCK(sc);
2607
2608	sc_if0 = sc->sk_if[SK_PORT_A];
2609	sc_if1 = sc->sk_if[SK_PORT_B];
2610	if (sc_if0 != NULL)
2611		ifp0 = sc_if0->sk_ifp;
2612	if (sc_if1 != NULL)
2613		ifp1 = sc_if1->sk_ifp;
2614	if (ifp0 != NULL)
2615		sk_stop(sc_if0);
2616	if (ifp1 != NULL)
2617		sk_stop(sc_if1);
2618	sc->sk_suspended = 1;
2619
2620	SK_UNLOCK(sc);
2621
2622	return (0);
2623}
2624
2625static int
2626skc_resume(dev)
2627	device_t		dev;
2628{
2629	struct sk_softc		*sc;
2630	struct sk_if_softc	*sc_if0, *sc_if1;
2631	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2632
2633	sc = device_get_softc(dev);
2634
2635	SK_LOCK(sc);
2636
2637	sc_if0 = sc->sk_if[SK_PORT_A];
2638	sc_if1 = sc->sk_if[SK_PORT_B];
2639	if (sc_if0 != NULL)
2640		ifp0 = sc_if0->sk_ifp;
2641	if (sc_if1 != NULL)
2642		ifp1 = sc_if1->sk_ifp;
2643	if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
2644		sk_init_locked(sc_if0);
2645	if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
2646		sk_init_locked(sc_if1);
2647	sc->sk_suspended = 0;
2648
2649	SK_UNLOCK(sc);
2650
2651	return (0);
2652}
2653
2654/*
2655 * According to the data sheet from SK-NET GENESIS the hardware can compute
2656 * two Rx checksums at the same time(Each checksum start position is
2657 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2658 * does not work at least on my Yukon hardware. I tried every possible ways
2659 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2660 * checksum offload was disabled at the moment and only IP checksum offload
2661 * was enabled.
2662 * As nomral IP header size is 20 bytes I can't expect it would give an
2663 * increase in throughput. However it seems it doesn't hurt performance in
2664 * my testing. If there is a more detailed information for checksum secret
2665 * of the hardware in question please contact yongari@FreeBSD.org to add
2666 * TCP/UDP checksum offload support.
2667 */
2668static __inline void
2669sk_rxcksum(ifp, m, csum)
2670	struct ifnet		*ifp;
2671	struct mbuf		*m;
2672	u_int32_t		csum;
2673{
2674	struct ether_header	*eh;
2675	struct ip		*ip;
2676	int32_t			hlen, len, pktlen;
2677	u_int16_t		csum1, csum2, ipcsum;
2678
2679	pktlen = m->m_pkthdr.len;
2680	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
2681		return;
2682	eh = mtod(m, struct ether_header *);
2683	if (eh->ether_type != htons(ETHERTYPE_IP))
2684		return;
2685	ip = (struct ip *)(eh + 1);
2686	if (ip->ip_v != IPVERSION)
2687		return;
2688	hlen = ip->ip_hl << 2;
2689	pktlen -= sizeof(struct ether_header);
2690	if (hlen < sizeof(struct ip))
2691		return;
2692	if (ntohs(ip->ip_len) < hlen)
2693		return;
2694	if (ntohs(ip->ip_len) != pktlen)
2695		return;
2696
2697	csum1 = htons(csum & 0xffff);
2698	csum2 = htons((csum >> 16) & 0xffff);
2699	ipcsum = in_addword(csum1, ~csum2 & 0xffff);
2700	/* checksum fixup for IP options */
2701	len = hlen - sizeof(struct ip);
2702	if (len > 0) {
2703		/*
2704		 * If the second checksum value is correct we can compute IP
2705		 * checksum with simple math. Unfortunately the second checksum
2706		 * value is wrong so we can't verify the checksum from the
2707		 * value(It seems there is some magic here to get correct
2708		 * value). If the second checksum value is correct it also
2709		 * means we can get TCP/UDP checksum) here. However, it still
2710		 * needs pseudo header checksum calculation due to hardware
2711		 * limitations.
2712		 */
2713		return;
2714	}
2715	m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2716	if (ipcsum == 0xffff)
2717		m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2718}
2719
2720static __inline int
2721sk_rxvalid(sc, stat, len)
2722	struct sk_softc		*sc;
2723	u_int32_t		stat, len;
2724{
2725
2726	if (sc->sk_type == SK_GENESIS) {
2727		if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
2728		    XM_RXSTAT_BYTES(stat) != len)
2729			return (0);
2730	} else {
2731		if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
2732		    YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
2733		    YU_RXSTAT_JABBER)) != 0 ||
2734		    (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
2735		    YU_RXSTAT_BYTES(stat) != len)
2736			return (0);
2737	}
2738
2739	return (1);
2740}
2741
2742static void
2743sk_rxeof(sc_if)
2744	struct sk_if_softc	*sc_if;
2745{
2746	struct sk_softc		*sc;
2747	struct mbuf		*m;
2748	struct ifnet		*ifp;
2749	struct sk_rx_desc	*cur_rx;
2750	struct sk_rxdesc	*rxd;
2751	int			cons, prog;
2752	u_int32_t		csum, rxstat, sk_ctl;
2753
2754	sc = sc_if->sk_softc;
2755	ifp = sc_if->sk_ifp;
2756
2757	SK_IF_LOCK_ASSERT(sc_if);
2758
2759	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2760	    sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
2761
2762	prog = 0;
2763	for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
2764	    prog++, SK_INC(cons, SK_RX_RING_CNT)) {
2765		cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
2766		sk_ctl = le32toh(cur_rx->sk_ctl);
2767		if ((sk_ctl & SK_RXCTL_OWN) != 0)
2768			break;
2769		rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
2770		rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2771
2772		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2773		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2774		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2775		    SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2776		    SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
2777		    sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2778			ifp->if_ierrors++;
2779			sk_discard_rxbuf(sc_if, cons);
2780			continue;
2781		}
2782
2783		m = rxd->rx_m;
2784		csum = le32toh(cur_rx->sk_csum);
2785		if (sk_newbuf(sc_if, cons) != 0) {
2786			ifp->if_iqdrops++;
2787			/* reuse old buffer */
2788			sk_discard_rxbuf(sc_if, cons);
2789			continue;
2790		}
2791		m->m_pkthdr.rcvif = ifp;
2792		m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2793		ifp->if_ipackets++;
2794		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2795			sk_rxcksum(ifp, m, csum);
2796		SK_IF_UNLOCK(sc_if);
2797		(*ifp->if_input)(ifp, m);
2798		SK_IF_LOCK(sc_if);
2799	}
2800
2801	if (prog > 0) {
2802		sc_if->sk_cdata.sk_rx_cons = cons;
2803		bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2804		    sc_if->sk_cdata.sk_rx_ring_map,
2805		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2806	}
2807}
2808
2809static void
2810sk_jumbo_rxeof(sc_if)
2811	struct sk_if_softc	*sc_if;
2812{
2813	struct sk_softc		*sc;
2814	struct mbuf		*m;
2815	struct ifnet		*ifp;
2816	struct sk_rx_desc	*cur_rx;
2817	struct sk_rxdesc	*jrxd;
2818	int			cons, prog;
2819	u_int32_t		csum, rxstat, sk_ctl;
2820
2821	sc = sc_if->sk_softc;
2822	ifp = sc_if->sk_ifp;
2823
2824	SK_IF_LOCK_ASSERT(sc_if);
2825
2826	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2827	    sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
2828
2829	prog = 0;
2830	for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
2831	    prog < SK_JUMBO_RX_RING_CNT;
2832	    prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
2833		cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
2834		sk_ctl = le32toh(cur_rx->sk_ctl);
2835		if ((sk_ctl & SK_RXCTL_OWN) != 0)
2836			break;
2837		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
2838		rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2839
2840		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2841		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2842		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2843		    SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2844		    SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
2845		    sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2846			ifp->if_ierrors++;
2847			sk_discard_jumbo_rxbuf(sc_if, cons);
2848			continue;
2849		}
2850
2851		m = jrxd->rx_m;
2852		csum = le32toh(cur_rx->sk_csum);
2853		if (sk_jumbo_newbuf(sc_if, cons) != 0) {
2854			ifp->if_iqdrops++;
2855			/* reuse old buffer */
2856			sk_discard_jumbo_rxbuf(sc_if, cons);
2857			continue;
2858		}
2859		m->m_pkthdr.rcvif = ifp;
2860		m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2861		ifp->if_ipackets++;
2862		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2863			sk_rxcksum(ifp, m, csum);
2864		SK_IF_UNLOCK(sc_if);
2865		(*ifp->if_input)(ifp, m);
2866		SK_IF_LOCK(sc_if);
2867	}
2868
2869	if (prog > 0) {
2870		sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
2871		bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2872		    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2873		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2874	}
2875}
2876
2877static void
2878sk_txeof(sc_if)
2879	struct sk_if_softc	*sc_if;
2880{
2881	struct sk_softc		*sc;
2882	struct sk_txdesc	*txd;
2883	struct sk_tx_desc	*cur_tx;
2884	struct ifnet		*ifp;
2885	u_int32_t		idx, sk_ctl;
2886
2887	sc = sc_if->sk_softc;
2888	ifp = sc_if->sk_ifp;
2889
2890	txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2891	if (txd == NULL)
2892		return;
2893	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2894	    sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
2895	/*
2896	 * Go through our tx ring and free mbufs for those
2897	 * frames that have been sent.
2898	 */
2899	for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
2900		if (sc_if->sk_cdata.sk_tx_cnt <= 0)
2901			break;
2902		cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
2903		sk_ctl = le32toh(cur_tx->sk_ctl);
2904		if (sk_ctl & SK_TXCTL_OWN)
2905			break;
2906		sc_if->sk_cdata.sk_tx_cnt--;
2907		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2908		if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
2909			continue;
2910		bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2911		    BUS_DMASYNC_POSTWRITE);
2912		bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2913
2914		ifp->if_opackets++;
2915		m_freem(txd->tx_m);
2916		txd->tx_m = NULL;
2917		STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
2918		STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
2919		txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2920	}
2921	sc_if->sk_cdata.sk_tx_cons = idx;
2922	sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
2923
2924	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2925	    sc_if->sk_cdata.sk_tx_ring_map,
2926	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2927}
2928
2929static void
2930sk_tick(xsc_if)
2931	void			*xsc_if;
2932{
2933	struct sk_if_softc	*sc_if;
2934	struct mii_data		*mii;
2935	struct ifnet		*ifp;
2936	int			i;
2937
2938	sc_if = xsc_if;
2939	ifp = sc_if->sk_ifp;
2940	mii = device_get_softc(sc_if->sk_miibus);
2941
2942	if (!(ifp->if_flags & IFF_UP))
2943		return;
2944
2945	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2946		sk_intr_bcom(sc_if);
2947		return;
2948	}
2949
2950	/*
2951	 * According to SysKonnect, the correct way to verify that
2952	 * the link has come back up is to poll bit 0 of the GPIO
2953	 * register three times. This pin has the signal from the
2954	 * link_sync pin connected to it; if we read the same link
2955	 * state 3 times in a row, we know the link is up.
2956	 */
2957	for (i = 0; i < 3; i++) {
2958		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2959			break;
2960	}
2961
2962	if (i != 3) {
2963		callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2964		return;
2965	}
2966
2967	/* Turn the GP0 interrupt back on. */
2968	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2969	SK_XM_READ_2(sc_if, XM_ISR);
2970	mii_tick(mii);
2971	callout_stop(&sc_if->sk_tick_ch);
2972}
2973
2974static void
2975sk_yukon_tick(xsc_if)
2976	void			*xsc_if;
2977{
2978	struct sk_if_softc	*sc_if;
2979	struct mii_data		*mii;
2980
2981	sc_if = xsc_if;
2982	mii = device_get_softc(sc_if->sk_miibus);
2983
2984	mii_tick(mii);
2985	callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
2986}
2987
2988static void
2989sk_intr_bcom(sc_if)
2990	struct sk_if_softc	*sc_if;
2991{
2992	struct mii_data		*mii;
2993	struct ifnet		*ifp;
2994	int			status;
2995	mii = device_get_softc(sc_if->sk_miibus);
2996	ifp = sc_if->sk_ifp;
2997
2998	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2999
3000	/*
3001	 * Read the PHY interrupt register to make sure
3002	 * we clear any pending interrupts.
3003	 */
3004	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
3005
3006	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3007		sk_init_xmac(sc_if);
3008		return;
3009	}
3010
3011	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
3012		int			lstat;
3013		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
3014		    BRGPHY_MII_AUXSTS);
3015
3016		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
3017			mii_mediachg(mii);
3018			/* Turn off the link LED. */
3019			SK_IF_WRITE_1(sc_if, 0,
3020			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
3021			sc_if->sk_link = 0;
3022		} else if (status & BRGPHY_ISR_LNK_CHG) {
3023			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3024	    		    BRGPHY_MII_IMR, 0xFF00);
3025			mii_tick(mii);
3026			sc_if->sk_link = 1;
3027			/* Turn on the link LED. */
3028			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3029			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
3030			    SK_LINKLED_BLINK_OFF);
3031		} else {
3032			mii_tick(mii);
3033			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3034		}
3035	}
3036
3037	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3038
3039	return;
3040}
3041
3042static void
3043sk_intr_xmac(sc_if)
3044	struct sk_if_softc	*sc_if;
3045{
3046	struct sk_softc		*sc;
3047	u_int16_t		status;
3048
3049	sc = sc_if->sk_softc;
3050	status = SK_XM_READ_2(sc_if, XM_ISR);
3051
3052	/*
3053	 * Link has gone down. Start MII tick timeout to
3054	 * watch for link resync.
3055	 */
3056	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
3057		if (status & XM_ISR_GP0_SET) {
3058			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3059			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3060		}
3061
3062		if (status & XM_ISR_AUTONEG_DONE) {
3063			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3064		}
3065	}
3066
3067	if (status & XM_IMR_TX_UNDERRUN)
3068		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
3069
3070	if (status & XM_IMR_RX_OVERRUN)
3071		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
3072
3073	status = SK_XM_READ_2(sc_if, XM_ISR);
3074
3075	return;
3076}
3077
3078static void
3079sk_intr_yukon(sc_if)
3080	struct sk_if_softc	*sc_if;
3081{
3082	u_int8_t status;
3083
3084	status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
3085	/* RX overrun */
3086	if ((status & SK_GMAC_INT_RX_OVER) != 0) {
3087		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3088		    SK_RFCTL_RX_FIFO_OVER);
3089	}
3090	/* TX underrun */
3091	if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
3092		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3093		    SK_TFCTL_TX_FIFO_UNDER);
3094	}
3095}
3096
3097static void
3098sk_intr(xsc)
3099	void			*xsc;
3100{
3101	struct sk_softc		*sc = xsc;
3102	struct sk_if_softc	*sc_if0, *sc_if1;
3103	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
3104	u_int32_t		status;
3105
3106	SK_LOCK(sc);
3107
3108	status = CSR_READ_4(sc, SK_ISSR);
3109	if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3110		goto done_locked;
3111
3112	sc_if0 = sc->sk_if[SK_PORT_A];
3113	sc_if1 = sc->sk_if[SK_PORT_B];
3114
3115	if (sc_if0 != NULL)
3116		ifp0 = sc_if0->sk_ifp;
3117	if (sc_if1 != NULL)
3118		ifp1 = sc_if1->sk_ifp;
3119
3120	for (; (status &= sc->sk_intrmask) != 0;) {
3121		/* Handle receive interrupts first. */
3122		if (status & SK_ISR_RX1_EOF) {
3123			if (ifp0->if_mtu > SK_MAX_FRAMELEN)
3124				sk_jumbo_rxeof(sc_if0);
3125			else
3126				sk_rxeof(sc_if0);
3127			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
3128			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3129		}
3130		if (status & SK_ISR_RX2_EOF) {
3131			if (ifp1->if_mtu > SK_MAX_FRAMELEN)
3132				sk_jumbo_rxeof(sc_if1);
3133			else
3134				sk_rxeof(sc_if1);
3135			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
3136			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3137		}
3138
3139		/* Then transmit interrupts. */
3140		if (status & SK_ISR_TX1_S_EOF) {
3141			sk_txeof(sc_if0);
3142			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
3143		}
3144		if (status & SK_ISR_TX2_S_EOF) {
3145			sk_txeof(sc_if1);
3146			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
3147		}
3148
3149		/* Then MAC interrupts. */
3150		if (status & SK_ISR_MAC1 &&
3151		    ifp0->if_drv_flags & IFF_DRV_RUNNING) {
3152			if (sc->sk_type == SK_GENESIS)
3153				sk_intr_xmac(sc_if0);
3154			else
3155				sk_intr_yukon(sc_if0);
3156		}
3157
3158		if (status & SK_ISR_MAC2 &&
3159		    ifp1->if_drv_flags & IFF_DRV_RUNNING) {
3160			if (sc->sk_type == SK_GENESIS)
3161				sk_intr_xmac(sc_if1);
3162			else
3163				sk_intr_yukon(sc_if1);
3164		}
3165
3166		if (status & SK_ISR_EXTERNAL_REG) {
3167			if (ifp0 != NULL &&
3168			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
3169				sk_intr_bcom(sc_if0);
3170			if (ifp1 != NULL &&
3171			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
3172				sk_intr_bcom(sc_if1);
3173		}
3174		status = CSR_READ_4(sc, SK_ISSR);
3175	}
3176
3177	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3178
3179	if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3180		sk_start_locked(ifp0);
3181	if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3182		sk_start_locked(ifp1);
3183
3184done_locked:
3185	SK_UNLOCK(sc);
3186}
3187
3188static void
3189sk_init_xmac(sc_if)
3190	struct sk_if_softc	*sc_if;
3191{
3192	struct sk_softc		*sc;
3193	struct ifnet		*ifp;
3194	u_int16_t		eaddr[(ETHER_ADDR_LEN+1)/2];
3195	struct sk_bcom_hack	bhack[] = {
3196	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3197	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3198	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3199	{ 0, 0 } };
3200
3201	SK_IF_LOCK_ASSERT(sc_if);
3202
3203	sc = sc_if->sk_softc;
3204	ifp = sc_if->sk_ifp;
3205
3206	/* Unreset the XMAC. */
3207	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
3208	DELAY(1000);
3209
3210	/* Reset the XMAC's internal state. */
3211	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3212
3213	/* Save the XMAC II revision */
3214	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
3215
3216	/*
3217	 * Perform additional initialization for external PHYs,
3218	 * namely for the 1000baseTX cards that use the XMAC's
3219	 * GMII mode.
3220	 */
3221	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3222		int			i = 0;
3223		u_int32_t		val;
3224
3225		/* Take PHY out of reset. */
3226		val = sk_win_read_4(sc, SK_GPIO);
3227		if (sc_if->sk_port == SK_PORT_A)
3228			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
3229		else
3230			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
3231		sk_win_write_4(sc, SK_GPIO, val);
3232
3233		/* Enable GMII mode on the XMAC. */
3234		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
3235
3236		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3237		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
3238		DELAY(10000);
3239		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3240		    BRGPHY_MII_IMR, 0xFFF0);
3241
3242		/*
3243		 * Early versions of the BCM5400 apparently have
3244		 * a bug that requires them to have their reserved
3245		 * registers initialized to some magic values. I don't
3246		 * know what the numbers do, I'm just the messenger.
3247		 */
3248		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
3249		    == 0x6041) {
3250			while(bhack[i].reg) {
3251				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3252				    bhack[i].reg, bhack[i].val);
3253				i++;
3254			}
3255		}
3256	}
3257
3258	/* Set station address */
3259	bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
3260	SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
3261	SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
3262	SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
3263	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
3264
3265	if (ifp->if_flags & IFF_BROADCAST) {
3266		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3267	} else {
3268		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3269	}
3270
3271	/* We don't need the FCS appended to the packet. */
3272	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
3273
3274	/* We want short frames padded to 60 bytes. */
3275	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
3276
3277	/*
3278	 * Enable the reception of all error frames. This is is
3279	 * a necessary evil due to the design of the XMAC. The
3280	 * XMAC's receive FIFO is only 8K in size, however jumbo
3281	 * frames can be up to 9000 bytes in length. When bad
3282	 * frame filtering is enabled, the XMAC's RX FIFO operates
3283	 * in 'store and forward' mode. For this to work, the
3284	 * entire frame has to fit into the FIFO, but that means
3285	 * that jumbo frames larger than 8192 bytes will be
3286	 * truncated. Disabling all bad frame filtering causes
3287	 * the RX FIFO to operate in streaming mode, in which
3288	 * case the XMAC will start transfering frames out of the
3289	 * RX FIFO as soon as the FIFO threshold is reached.
3290	 */
3291	if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3292		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
3293		    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
3294		    XM_MODE_RX_INRANGELEN);
3295		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3296	} else
3297		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3298
3299	/*
3300	 * Bump up the transmit threshold. This helps hold off transmit
3301	 * underruns when we're blasting traffic from both ports at once.
3302	 */
3303	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
3304
3305	/* Set promiscuous mode */
3306	sk_setpromisc(sc_if);
3307
3308	/* Set multicast filter */
3309	sk_setmulti(sc_if);
3310
3311	/* Clear and enable interrupts */
3312	SK_XM_READ_2(sc_if, XM_ISR);
3313	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
3314		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
3315	else
3316		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3317
3318	/* Configure MAC arbiter */
3319	switch(sc_if->sk_xmac_rev) {
3320	case XM_XMAC_REV_B2:
3321		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
3322		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
3323		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
3324		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
3325		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
3326		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
3327		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
3328		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
3329		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3330		break;
3331	case XM_XMAC_REV_C1:
3332		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
3333		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
3334		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
3335		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
3336		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
3337		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
3338		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
3339		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
3340		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3341		break;
3342	default:
3343		break;
3344	}
3345	sk_win_write_2(sc, SK_MACARB_CTL,
3346	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
3347
3348	sc_if->sk_link = 1;
3349
3350	return;
3351}
3352
3353static void
3354sk_init_yukon(sc_if)
3355	struct sk_if_softc	*sc_if;
3356{
3357	u_int32_t		phy, v;
3358	u_int16_t		reg;
3359	struct sk_softc		*sc;
3360	struct ifnet		*ifp;
3361	int			i;
3362
3363	SK_IF_LOCK_ASSERT(sc_if);
3364
3365	sc = sc_if->sk_softc;
3366	ifp = sc_if->sk_ifp;
3367
3368	if (sc->sk_type == SK_YUKON_LITE &&
3369	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3370		/*
3371		 * Workaround code for COMA mode, set PHY reset.
3372		 * Otherwise it will not correctly take chip out of
3373		 * powerdown (coma)
3374		 */
3375		v = sk_win_read_4(sc, SK_GPIO);
3376		v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
3377		sk_win_write_4(sc, SK_GPIO, v);
3378	}
3379
3380	/* GMAC and GPHY Reset */
3381	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
3382	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
3383	DELAY(1000);
3384
3385	if (sc->sk_type == SK_YUKON_LITE &&
3386	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3387		/*
3388		 * Workaround code for COMA mode, clear PHY reset
3389		 */
3390		v = sk_win_read_4(sc, SK_GPIO);
3391		v |= SK_GPIO_DIR9;
3392		v &= ~SK_GPIO_DAT9;
3393		sk_win_write_4(sc, SK_GPIO, v);
3394	}
3395
3396	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
3397		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
3398
3399	if (sc->sk_coppertype)
3400		phy |= SK_GPHY_COPPER;
3401	else
3402		phy |= SK_GPHY_FIBER;
3403
3404	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
3405	DELAY(1000);
3406	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
3407	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
3408		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
3409
3410	/* unused read of the interrupt source register */
3411	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
3412
3413	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
3414
3415	/* MIB Counter Clear Mode set */
3416	reg |= YU_PAR_MIB_CLR;
3417	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3418
3419	/* MIB Counter Clear Mode clear */
3420	reg &= ~YU_PAR_MIB_CLR;
3421	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3422
3423	/* receive control reg */
3424	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
3425
3426	/* transmit parameter register */
3427	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
3428		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3429
3430	/* serial mode register */
3431	reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
3432	if (ifp->if_mtu > SK_MAX_FRAMELEN)
3433		reg |= YU_SMR_MFL_JUMBO;
3434	SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
3435
3436	/* Setup Yukon's address */
3437	for (i = 0; i < 3; i++) {
3438		/* Write Source Address 1 (unicast filter) */
3439		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
3440			      IF_LLADDR(sc_if->sk_ifp)[i * 2] |
3441			      IF_LLADDR(sc_if->sk_ifp)[i * 2 + 1] << 8);
3442	}
3443
3444	for (i = 0; i < 3; i++) {
3445		reg = sk_win_read_2(sc_if->sk_softc,
3446				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
3447		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
3448	}
3449
3450	/* Set promiscuous mode */
3451	sk_setpromisc(sc_if);
3452
3453	/* Set multicast filter */
3454	sk_setmulti(sc_if);
3455
3456	/* enable interrupt mask for counter overflows */
3457	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
3458	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
3459	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
3460
3461	/* Configure RX MAC FIFO Flush Mask */
3462	v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
3463	    YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
3464	    YU_RXSTAT_JABBER;
3465	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
3466
3467	/* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3468	if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
3469		v = SK_TFCTL_OPERATION_ON;
3470	else
3471		v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
3472	/* Configure RX MAC FIFO */
3473	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
3474	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
3475
3476	/* Increase flush threshould to 64 bytes */
3477	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
3478	    SK_RFCTL_FIFO_THRESHOLD + 1);
3479
3480	/* Configure TX MAC FIFO */
3481	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
3482	SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
3483}
3484
3485/*
3486 * Note that to properly initialize any part of the GEnesis chip,
3487 * you first have to take it out of reset mode.
3488 */
3489static void
3490sk_init(xsc)
3491	void			*xsc;
3492{
3493	struct sk_if_softc	*sc_if = xsc;
3494
3495	SK_IF_LOCK(sc_if);
3496	sk_init_locked(sc_if);
3497	SK_IF_UNLOCK(sc_if);
3498
3499	return;
3500}
3501
3502static void
3503sk_init_locked(sc_if)
3504	struct sk_if_softc	*sc_if;
3505{
3506	struct sk_softc		*sc;
3507	struct ifnet		*ifp;
3508	struct mii_data		*mii;
3509	u_int16_t		reg;
3510	u_int32_t		imr;
3511	int			error;
3512
3513	SK_IF_LOCK_ASSERT(sc_if);
3514
3515	ifp = sc_if->sk_ifp;
3516	sc = sc_if->sk_softc;
3517	mii = device_get_softc(sc_if->sk_miibus);
3518
3519	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3520		return;
3521
3522	/* Cancel pending I/O and free all RX/TX buffers. */
3523	sk_stop(sc_if);
3524
3525	if (sc->sk_type == SK_GENESIS) {
3526		/* Configure LINK_SYNC LED */
3527		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
3528		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3529			SK_LINKLED_LINKSYNC_ON);
3530
3531		/* Configure RX LED */
3532		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
3533			SK_RXLEDCTL_COUNTER_START);
3534
3535		/* Configure TX LED */
3536		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
3537			SK_TXLEDCTL_COUNTER_START);
3538	}
3539
3540	/*
3541	 * Configure descriptor poll timer
3542	 *
3543	 * SK-NET GENESIS data sheet says that possibility of losing Start
3544	 * transmit command due to CPU/cache related interim storage problems
3545	 * under certain conditions. The document recommends a polling
3546	 * mechanism to send a Start transmit command to initiate transfer
3547	 * of ready descriptors regulary. To cope with this issue sk(4) now
3548	 * enables descriptor poll timer to initiate descriptor processing
3549	 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3550	 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3551	 * command instead of waiting for next descriptor polling time.
3552	 * The same rule may apply to Rx side too but it seems that is not
3553	 * needed at the moment.
3554	 * Since sk(4) uses descriptor polling as a last resort there is no
3555	 * need to set smaller polling time than maximum allowable one.
3556	 */
3557	SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
3558
3559	/* Configure I2C registers */
3560
3561	/* Configure XMAC(s) */
3562	switch (sc->sk_type) {
3563	case SK_GENESIS:
3564		sk_init_xmac(sc_if);
3565		break;
3566	case SK_YUKON:
3567	case SK_YUKON_LITE:
3568	case SK_YUKON_LP:
3569		sk_init_yukon(sc_if);
3570		break;
3571	}
3572	mii_mediachg(mii);
3573
3574	if (sc->sk_type == SK_GENESIS) {
3575		/* Configure MAC FIFOs */
3576		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
3577		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
3578		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
3579
3580		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
3581		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
3582		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
3583	}
3584
3585	/* Configure transmit arbiter(s) */
3586	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
3587	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
3588
3589	/* Configure RAMbuffers */
3590	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
3591	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
3592	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
3593	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
3594	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
3595	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
3596
3597	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
3598	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
3599	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
3600	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
3601	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
3602	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
3603	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
3604
3605	/* Configure BMUs */
3606	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
3607	if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3608		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3609		    SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3610		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3611		    SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3612	} else {
3613		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3614		    SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
3615		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3616		    SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
3617	}
3618
3619	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
3620	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
3621	    SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
3622	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
3623	    SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
3624
3625	/* Init descriptors */
3626	if (ifp->if_mtu > SK_MAX_FRAMELEN)
3627		error = sk_init_jumbo_rx_ring(sc_if);
3628	else
3629		error = sk_init_rx_ring(sc_if);
3630	if (error != 0) {
3631		device_printf(sc_if->sk_if_dev,
3632		    "initialization failed: no memory for rx buffers\n");
3633		sk_stop(sc_if);
3634		return;
3635	}
3636	sk_init_tx_ring(sc_if);
3637
3638	/* Set interrupt moderation if changed via sysctl. */
3639	imr = sk_win_read_4(sc, SK_IMTIMERINIT);
3640	if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
3641		sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
3642		    sc->sk_int_ticks));
3643		if (bootverbose)
3644			device_printf(sc_if->sk_if_dev,
3645			    "interrupt moderation is %d us.\n",
3646			    sc->sk_int_mod);
3647	}
3648
3649	/* Configure interrupt handling */
3650	CSR_READ_4(sc, SK_ISSR);
3651	if (sc_if->sk_port == SK_PORT_A)
3652		sc->sk_intrmask |= SK_INTRS1;
3653	else
3654		sc->sk_intrmask |= SK_INTRS2;
3655
3656	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
3657
3658	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3659
3660	/* Start BMUs. */
3661	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
3662
3663	switch(sc->sk_type) {
3664	case SK_GENESIS:
3665		/* Enable XMACs TX and RX state machines */
3666		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
3667		SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3668		break;
3669	case SK_YUKON:
3670	case SK_YUKON_LITE:
3671	case SK_YUKON_LP:
3672		reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
3673		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
3674#if 0
3675		/* XXX disable 100Mbps and full duplex mode? */
3676		reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
3677#endif
3678		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
3679	}
3680
3681	/* Activate descriptor polling timer */
3682	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
3683	/* start transfer of Tx descriptors */
3684	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
3685
3686	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3687	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3688
3689	switch (sc->sk_type) {
3690	case SK_YUKON:
3691	case SK_YUKON_LITE:
3692	case SK_YUKON_LP:
3693		callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3694		break;
3695	}
3696
3697	callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
3698
3699	return;
3700}
3701
3702static void
3703sk_stop(sc_if)
3704	struct sk_if_softc	*sc_if;
3705{
3706	int			i;
3707	struct sk_softc		*sc;
3708	struct sk_txdesc	*txd;
3709	struct sk_rxdesc	*rxd;
3710	struct sk_rxdesc	*jrxd;
3711	struct ifnet		*ifp;
3712	u_int32_t		val;
3713
3714	SK_IF_LOCK_ASSERT(sc_if);
3715	sc = sc_if->sk_softc;
3716	ifp = sc_if->sk_ifp;
3717
3718	callout_stop(&sc_if->sk_tick_ch);
3719	callout_stop(&sc_if->sk_watchdog_ch);
3720
3721	/* stop Tx descriptor polling timer */
3722	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
3723	/* stop transfer of Tx descriptors */
3724	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
3725	for (i = 0; i < SK_TIMEOUT; i++) {
3726		val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
3727		if ((val & SK_TXBMU_TX_STOP) == 0)
3728			break;
3729		DELAY(1);
3730	}
3731	if (i == SK_TIMEOUT)
3732		device_printf(sc_if->sk_if_dev,
3733		    "can not stop transfer of Tx descriptor\n");
3734	/* stop transfer of Rx descriptors */
3735	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
3736	for (i = 0; i < SK_TIMEOUT; i++) {
3737		val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
3738		if ((val & SK_RXBMU_RX_STOP) == 0)
3739			break;
3740		DELAY(1);
3741	}
3742	if (i == SK_TIMEOUT)
3743		device_printf(sc_if->sk_if_dev,
3744		    "can not stop transfer of Rx descriptor\n");
3745
3746	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3747		/* Put PHY back into reset. */
3748		val = sk_win_read_4(sc, SK_GPIO);
3749		if (sc_if->sk_port == SK_PORT_A) {
3750			val |= SK_GPIO_DIR0;
3751			val &= ~SK_GPIO_DAT0;
3752		} else {
3753			val |= SK_GPIO_DIR2;
3754			val &= ~SK_GPIO_DAT2;
3755		}
3756		sk_win_write_4(sc, SK_GPIO, val);
3757	}
3758
3759	/* Turn off various components of this interface. */
3760	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3761	switch (sc->sk_type) {
3762	case SK_GENESIS:
3763		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
3764		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
3765		break;
3766	case SK_YUKON:
3767	case SK_YUKON_LITE:
3768	case SK_YUKON_LP:
3769		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
3770		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
3771		break;
3772	}
3773	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
3774	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3775	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
3776	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3777	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
3778	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3779	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3780	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
3781	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
3782
3783	/* Disable interrupts */
3784	if (sc_if->sk_port == SK_PORT_A)
3785		sc->sk_intrmask &= ~SK_INTRS1;
3786	else
3787		sc->sk_intrmask &= ~SK_INTRS2;
3788	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3789
3790	SK_XM_READ_2(sc_if, XM_ISR);
3791	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3792
3793	/* Free RX and TX mbufs still in the queues. */
3794	for (i = 0; i < SK_RX_RING_CNT; i++) {
3795		rxd = &sc_if->sk_cdata.sk_rxdesc[i];
3796		if (rxd->rx_m != NULL) {
3797			bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
3798			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3799			bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
3800			    rxd->rx_dmamap);
3801			m_freem(rxd->rx_m);
3802			rxd->rx_m = NULL;
3803		}
3804	}
3805	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
3806		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
3807		if (jrxd->rx_m != NULL) {
3808			bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
3809			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3810			bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
3811			    jrxd->rx_dmamap);
3812			m_freem(jrxd->rx_m);
3813			jrxd->rx_m = NULL;
3814		}
3815	}
3816	for (i = 0; i < SK_TX_RING_CNT; i++) {
3817		txd = &sc_if->sk_cdata.sk_txdesc[i];
3818		if (txd->tx_m != NULL) {
3819			bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
3820			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3821			bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
3822			    txd->tx_dmamap);
3823			m_freem(txd->tx_m);
3824			txd->tx_m = NULL;
3825		}
3826	}
3827
3828	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
3829
3830	return;
3831}
3832
3833static int
3834sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3835{
3836	int error, value;
3837
3838	if (!arg1)
3839		return (EINVAL);
3840	value = *(int *)arg1;
3841	error = sysctl_handle_int(oidp, &value, 0, req);
3842	if (error || !req->newptr)
3843		return (error);
3844	if (value < low || value > high)
3845		return (EINVAL);
3846	*(int *)arg1 = value;
3847	return (0);
3848}
3849
3850static int
3851sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3852{
3853	return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
3854}
3855