if_sk.c revision 213893
154359Sroberto/*	$OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $	*/
254359Sroberto
354359Sroberto/*-
454359Sroberto * Copyright (c) 1997, 1998, 1999, 2000
554359Sroberto *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
682498Sroberto *
754359Sroberto * Redistribution and use in source and binary forms, with or without
854359Sroberto * modification, are permitted provided that the following conditions
954359Sroberto * are met:
1054359Sroberto * 1. Redistributions of source code must retain the above copyright
1154359Sroberto *    notice, this list of conditions and the following disclaimer.
1254359Sroberto * 2. Redistributions in binary form must reproduce the above copyright
1354359Sroberto *    notice, this list of conditions and the following disclaimer in the
1454359Sroberto *    documentation and/or other materials provided with the distribution.
1554359Sroberto * 3. All advertising materials mentioning features or use of this software
1654359Sroberto *    must display the following acknowledgement:
1754359Sroberto *	This product includes software developed by Bill Paul.
1854359Sroberto * 4. Neither the name of the author nor the names of any co-contributors
1954359Sroberto *    may be used to endorse or promote products derived from this software
2054359Sroberto *    without specific prior written permission.
2154359Sroberto *
2254359Sroberto * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
2354359Sroberto * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2454359Sroberto * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2554359Sroberto * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
2654359Sroberto * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2754359Sroberto * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2854359Sroberto * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2954359Sroberto * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3054359Sroberto * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3154359Sroberto * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
3254359Sroberto * THE POSSIBILITY OF SUCH DAMAGE.
3354359Sroberto */
3454359Sroberto/*-
3554359Sroberto * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
3654359Sroberto *
3754359Sroberto * Permission to use, copy, modify, and distribute this software for any
3854359Sroberto * purpose with or without fee is hereby granted, provided that the above
3954359Sroberto * copyright notice and this permission notice appear in all copies.
4054359Sroberto *
4154359Sroberto * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
4254359Sroberto * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
4354359Sroberto * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
4454359Sroberto * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
4554359Sroberto * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
4654359Sroberto * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
4754359Sroberto * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
4854359Sroberto */
4954359Sroberto
5054359Sroberto#include <sys/cdefs.h>
5154359Sroberto__FBSDID("$FreeBSD: head/sys/dev/sk/if_sk.c 213893 2010-10-15 14:52:11Z marius $");
5254359Sroberto
5354359Sroberto/*
5454359Sroberto * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
5554359Sroberto * the SK-984x series adapters, both single port and dual port.
5654359Sroberto * References:
5754359Sroberto * 	The XaQti XMAC II datasheet,
5854359Sroberto *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
5954359Sroberto *	The SysKonnect GEnesis manual, http://www.syskonnect.com
6054359Sroberto *
6154359Sroberto * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
6254359Sroberto * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
6354359Sroberto * convenience to others until Vitesse corrects this problem:
6454359Sroberto *
6554359Sroberto * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
6654359Sroberto *
6754359Sroberto * Written by Bill Paul <wpaul@ee.columbia.edu>
6854359Sroberto * Department of Electrical Engineering
6954359Sroberto * Columbia University, New York City
7054359Sroberto */
7154359Sroberto/*
7254359Sroberto * The SysKonnect gigabit ethernet adapters consist of two main
7354359Sroberto * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
7454359Sroberto * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
7554359Sroberto * components and a PHY while the GEnesis controller provides a PCI
7654359Sroberto * interface with DMA support. Each card may have between 512K and
7754359Sroberto * 2MB of SRAM on board depending on the configuration.
7854359Sroberto *
7954359Sroberto * The SysKonnect GEnesis controller can have either one or two XMAC
8054359Sroberto * chips connected to it, allowing single or dual port NIC configurations.
8154359Sroberto * SysKonnect has the distinction of being the only vendor on the market
8254359Sroberto * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
8354359Sroberto * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
8454359Sroberto * XMAC registers. This driver takes advantage of these features to allow
8554359Sroberto * both XMACs to operate as independent interfaces.
8654359Sroberto */
8754359Sroberto
8854359Sroberto#include <sys/param.h>
8954359Sroberto#include <sys/systm.h>
9054359Sroberto#include <sys/bus.h>
9154359Sroberto#include <sys/endian.h>
9254359Sroberto#include <sys/mbuf.h>
9354359Sroberto#include <sys/malloc.h>
9454359Sroberto#include <sys/kernel.h>
9554359Sroberto#include <sys/module.h>
9654359Sroberto#include <sys/socket.h>
9754359Sroberto#include <sys/sockio.h>
9854359Sroberto#include <sys/queue.h>
9954359Sroberto#include <sys/sysctl.h>
10054359Sroberto
10154359Sroberto#include <net/bpf.h>
10254359Sroberto#include <net/ethernet.h>
10354359Sroberto#include <net/if.h>
10454359Sroberto#include <net/if_arp.h>
10554359Sroberto#include <net/if_dl.h>
10654359Sroberto#include <net/if_media.h>
10754359Sroberto#include <net/if_types.h>
10854359Sroberto#include <net/if_vlan_var.h>
10954359Sroberto
11054359Sroberto#include <netinet/in.h>
11154359Sroberto#include <netinet/in_systm.h>
11254359Sroberto#include <netinet/ip.h>
11354359Sroberto
11454359Sroberto#include <machine/bus.h>
11554359Sroberto#include <machine/in_cksum.h>
11654359Sroberto#include <machine/resource.h>
11754359Sroberto#include <sys/rman.h>
11854359Sroberto
11954359Sroberto#include <dev/mii/mii.h>
12054359Sroberto#include <dev/mii/miivar.h>
12154359Sroberto#include <dev/mii/brgphyreg.h>
12254359Sroberto
12354359Sroberto#include <dev/pci/pcireg.h>
12454359Sroberto#include <dev/pci/pcivar.h>
12554359Sroberto
12654359Sroberto#if 0
12754359Sroberto#define SK_USEIOSPACE
12854359Sroberto#endif
12954359Sroberto
13054359Sroberto#include <dev/sk/if_skreg.h>
13154359Sroberto#include <dev/sk/xmaciireg.h>
13254359Sroberto#include <dev/sk/yukonreg.h>
13354359Sroberto
13454359SrobertoMODULE_DEPEND(sk, pci, 1, 1, 1);
13554359SrobertoMODULE_DEPEND(sk, ether, 1, 1, 1);
13654359SrobertoMODULE_DEPEND(sk, miibus, 1, 1, 1);
13754359Sroberto
13854359Sroberto/* "device miibus" required.  See GENERIC if you get errors here. */
13954359Sroberto#include "miibus_if.h"
14054359Sroberto
14154359Sroberto#ifndef lint
14254359Srobertostatic const char rcsid[] =
14354359Sroberto  "$FreeBSD: head/sys/dev/sk/if_sk.c 213893 2010-10-15 14:52:11Z marius $";
14454359Sroberto#endif
14554359Sroberto
14654359Srobertostatic struct sk_type sk_devs[] = {
14754359Sroberto	{
14854359Sroberto		VENDORID_SK,
14954359Sroberto		DEVICEID_SK_V1,
15054359Sroberto		"SysKonnect Gigabit Ethernet (V1.0)"
15154359Sroberto	},
15254359Sroberto	{
15354359Sroberto		VENDORID_SK,
15454359Sroberto		DEVICEID_SK_V2,
15554359Sroberto		"SysKonnect Gigabit Ethernet (V2.0)"
15654359Sroberto	},
15754359Sroberto	{
15854359Sroberto		VENDORID_MARVELL,
15954359Sroberto		DEVICEID_SK_V2,
16054359Sroberto		"Marvell Gigabit Ethernet"
16154359Sroberto	},
16254359Sroberto	{
16354359Sroberto		VENDORID_MARVELL,
16454359Sroberto		DEVICEID_BELKIN_5005,
16554359Sroberto		"Belkin F5D5005 Gigabit Ethernet"
16654359Sroberto	},
16754359Sroberto	{
16854359Sroberto		VENDORID_3COM,
16954359Sroberto		DEVICEID_3COM_3C940,
17054359Sroberto		"3Com 3C940 Gigabit Ethernet"
17154359Sroberto	},
17254359Sroberto	{
17354359Sroberto		VENDORID_LINKSYS,
17454359Sroberto		DEVICEID_LINKSYS_EG1032,
17554359Sroberto		"Linksys EG1032 Gigabit Ethernet"
17654359Sroberto	},
17754359Sroberto	{
17854359Sroberto		VENDORID_DLINK,
17954359Sroberto		DEVICEID_DLINK_DGE530T_A1,
18054359Sroberto		"D-Link DGE-530T Gigabit Ethernet"
18154359Sroberto	},
18254359Sroberto	{
18354359Sroberto		VENDORID_DLINK,
18454359Sroberto		DEVICEID_DLINK_DGE530T_B1,
18554359Sroberto		"D-Link DGE-530T Gigabit Ethernet"
18654359Sroberto	},
18754359Sroberto	{ 0, 0, NULL }
18854359Sroberto};
18954359Sroberto
19054359Srobertostatic int skc_probe(device_t);
19154359Srobertostatic int skc_attach(device_t);
19254359Srobertostatic int skc_detach(device_t);
19354359Srobertostatic int skc_shutdown(device_t);
19454359Srobertostatic int skc_suspend(device_t);
19554359Srobertostatic int skc_resume(device_t);
19654359Srobertostatic int sk_detach(device_t);
19754359Srobertostatic int sk_probe(device_t);
19854359Srobertostatic int sk_attach(device_t);
19954359Srobertostatic void sk_tick(void *);
20054359Srobertostatic void sk_yukon_tick(void *);
20154359Srobertostatic void sk_intr(void *);
20254359Srobertostatic void sk_intr_xmac(struct sk_if_softc *);
20354359Srobertostatic void sk_intr_bcom(struct sk_if_softc *);
20454359Srobertostatic void sk_intr_yukon(struct sk_if_softc *);
20554359Srobertostatic __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
20654359Srobertostatic __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
20754359Srobertostatic void sk_rxeof(struct sk_if_softc *);
20854359Srobertostatic void sk_jumbo_rxeof(struct sk_if_softc *);
20954359Srobertostatic void sk_txeof(struct sk_if_softc *);
21054359Srobertostatic void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
21154359Srobertostatic int sk_encap(struct sk_if_softc *, struct mbuf **);
21254359Srobertostatic void sk_start(struct ifnet *);
21354359Srobertostatic void sk_start_locked(struct ifnet *);
21454359Srobertostatic int sk_ioctl(struct ifnet *, u_long, caddr_t);
21554359Srobertostatic void sk_init(void *);
21654359Srobertostatic void sk_init_locked(struct sk_if_softc *);
21754359Srobertostatic void sk_init_xmac(struct sk_if_softc *);
21854359Srobertostatic void sk_init_yukon(struct sk_if_softc *);
21954359Srobertostatic void sk_stop(struct sk_if_softc *);
22054359Srobertostatic void sk_watchdog(void *);
22154359Srobertostatic int sk_ifmedia_upd(struct ifnet *);
22254359Srobertostatic void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
22354359Srobertostatic void sk_reset(struct sk_softc *);
22454359Srobertostatic __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
22554359Srobertostatic __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
226static int sk_newbuf(struct sk_if_softc *, int);
227static int sk_jumbo_newbuf(struct sk_if_softc *, int);
228static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
229static int sk_dma_alloc(struct sk_if_softc *);
230static int sk_dma_jumbo_alloc(struct sk_if_softc *);
231static void sk_dma_free(struct sk_if_softc *);
232static void sk_dma_jumbo_free(struct sk_if_softc *);
233static int sk_init_rx_ring(struct sk_if_softc *);
234static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
235static void sk_init_tx_ring(struct sk_if_softc *);
236static u_int32_t sk_win_read_4(struct sk_softc *, int);
237static u_int16_t sk_win_read_2(struct sk_softc *, int);
238static u_int8_t sk_win_read_1(struct sk_softc *, int);
239static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
240static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
241static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
242
243static int sk_miibus_readreg(device_t, int, int);
244static int sk_miibus_writereg(device_t, int, int, int);
245static void sk_miibus_statchg(device_t);
246
247static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
248static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
249						int);
250static void sk_xmac_miibus_statchg(struct sk_if_softc *);
251
252static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
253static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
254						int);
255static void sk_marv_miibus_statchg(struct sk_if_softc *);
256
257static uint32_t sk_xmchash(const uint8_t *);
258static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
259static void sk_rxfilter(struct sk_if_softc *);
260static void sk_rxfilter_genesis(struct sk_if_softc *);
261static void sk_rxfilter_yukon(struct sk_if_softc *);
262
263static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
264static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
265
266/* Tunables. */
267static int jumbo_disable = 0;
268TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
269
270/*
271 * It seems that SK-NET GENESIS supports very simple checksum offload
272 * capability for Tx and I believe it can generate 0 checksum value for
273 * UDP packets in Tx as the hardware can't differenciate UDP packets from
274 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
275 * means sender didn't perforam checksum computation. For the safety I
276 * disabled UDP checksum offload capability at the moment. Alternatively
277 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
278 * offload routine.
279 */
280#define SK_CSUM_FEATURES	(CSUM_TCP)
281
282/*
283 * Note that we have newbus methods for both the GEnesis controller
284 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
285 * the miibus code is a child of the XMACs. We need to do it this way
286 * so that the miibus drivers can access the PHY registers on the
287 * right PHY. It's not quite what I had in mind, but it's the only
288 * design that achieves the desired effect.
289 */
290static device_method_t skc_methods[] = {
291	/* Device interface */
292	DEVMETHOD(device_probe,		skc_probe),
293	DEVMETHOD(device_attach,	skc_attach),
294	DEVMETHOD(device_detach,	skc_detach),
295	DEVMETHOD(device_suspend,	skc_suspend),
296	DEVMETHOD(device_resume,	skc_resume),
297	DEVMETHOD(device_shutdown,	skc_shutdown),
298
299	/* bus interface */
300	DEVMETHOD(bus_print_child,	bus_generic_print_child),
301	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
302
303	{ 0, 0 }
304};
305
306static driver_t skc_driver = {
307	"skc",
308	skc_methods,
309	sizeof(struct sk_softc)
310};
311
312static devclass_t skc_devclass;
313
314static device_method_t sk_methods[] = {
315	/* Device interface */
316	DEVMETHOD(device_probe,		sk_probe),
317	DEVMETHOD(device_attach,	sk_attach),
318	DEVMETHOD(device_detach,	sk_detach),
319	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
320
321	/* bus interface */
322	DEVMETHOD(bus_print_child,	bus_generic_print_child),
323	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
324
325	/* MII interface */
326	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
327	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
328	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
329
330	{ 0, 0 }
331};
332
333static driver_t sk_driver = {
334	"sk",
335	sk_methods,
336	sizeof(struct sk_if_softc)
337};
338
339static devclass_t sk_devclass;
340
341DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, 0, 0);
342DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
343DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
344
345static struct resource_spec sk_res_spec_io[] = {
346	{ SYS_RES_IOPORT,	PCIR_BAR(1),	RF_ACTIVE },
347	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
348	{ -1,			0,		0 }
349};
350
351static struct resource_spec sk_res_spec_mem[] = {
352	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
353	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
354	{ -1,			0,		0 }
355};
356
357#define SK_SETBIT(sc, reg, x)		\
358	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
359
360#define SK_CLRBIT(sc, reg, x)		\
361	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
362
363#define SK_WIN_SETBIT_4(sc, reg, x)	\
364	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
365
366#define SK_WIN_CLRBIT_4(sc, reg, x)	\
367	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
368
369#define SK_WIN_SETBIT_2(sc, reg, x)	\
370	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
371
372#define SK_WIN_CLRBIT_2(sc, reg, x)	\
373	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
374
375static u_int32_t
376sk_win_read_4(sc, reg)
377	struct sk_softc		*sc;
378	int			reg;
379{
380#ifdef SK_USEIOSPACE
381	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
382	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
383#else
384	return(CSR_READ_4(sc, reg));
385#endif
386}
387
388static u_int16_t
389sk_win_read_2(sc, reg)
390	struct sk_softc		*sc;
391	int			reg;
392{
393#ifdef SK_USEIOSPACE
394	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
395	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
396#else
397	return(CSR_READ_2(sc, reg));
398#endif
399}
400
401static u_int8_t
402sk_win_read_1(sc, reg)
403	struct sk_softc		*sc;
404	int			reg;
405{
406#ifdef SK_USEIOSPACE
407	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
408	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
409#else
410	return(CSR_READ_1(sc, reg));
411#endif
412}
413
414static void
415sk_win_write_4(sc, reg, val)
416	struct sk_softc		*sc;
417	int			reg;
418	u_int32_t		val;
419{
420#ifdef SK_USEIOSPACE
421	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
422	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
423#else
424	CSR_WRITE_4(sc, reg, val);
425#endif
426	return;
427}
428
429static void
430sk_win_write_2(sc, reg, val)
431	struct sk_softc		*sc;
432	int			reg;
433	u_int32_t		val;
434{
435#ifdef SK_USEIOSPACE
436	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
437	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
438#else
439	CSR_WRITE_2(sc, reg, val);
440#endif
441	return;
442}
443
444static void
445sk_win_write_1(sc, reg, val)
446	struct sk_softc		*sc;
447	int			reg;
448	u_int32_t		val;
449{
450#ifdef SK_USEIOSPACE
451	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
452	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
453#else
454	CSR_WRITE_1(sc, reg, val);
455#endif
456	return;
457}
458
459static int
460sk_miibus_readreg(dev, phy, reg)
461	device_t		dev;
462	int			phy, reg;
463{
464	struct sk_if_softc	*sc_if;
465	int			v;
466
467	sc_if = device_get_softc(dev);
468
469	SK_IF_MII_LOCK(sc_if);
470	switch(sc_if->sk_softc->sk_type) {
471	case SK_GENESIS:
472		v = sk_xmac_miibus_readreg(sc_if, phy, reg);
473		break;
474	case SK_YUKON:
475	case SK_YUKON_LITE:
476	case SK_YUKON_LP:
477		v = sk_marv_miibus_readreg(sc_if, phy, reg);
478		break;
479	default:
480		v = 0;
481		break;
482	}
483	SK_IF_MII_UNLOCK(sc_if);
484
485	return (v);
486}
487
488static int
489sk_miibus_writereg(dev, phy, reg, val)
490	device_t		dev;
491	int			phy, reg, val;
492{
493	struct sk_if_softc	*sc_if;
494	int			v;
495
496	sc_if = device_get_softc(dev);
497
498	SK_IF_MII_LOCK(sc_if);
499	switch(sc_if->sk_softc->sk_type) {
500	case SK_GENESIS:
501		v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
502		break;
503	case SK_YUKON:
504	case SK_YUKON_LITE:
505	case SK_YUKON_LP:
506		v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
507		break;
508	default:
509		v = 0;
510		break;
511	}
512	SK_IF_MII_UNLOCK(sc_if);
513
514	return (v);
515}
516
517static void
518sk_miibus_statchg(dev)
519	device_t		dev;
520{
521	struct sk_if_softc	*sc_if;
522
523	sc_if = device_get_softc(dev);
524
525	SK_IF_MII_LOCK(sc_if);
526	switch(sc_if->sk_softc->sk_type) {
527	case SK_GENESIS:
528		sk_xmac_miibus_statchg(sc_if);
529		break;
530	case SK_YUKON:
531	case SK_YUKON_LITE:
532	case SK_YUKON_LP:
533		sk_marv_miibus_statchg(sc_if);
534		break;
535	}
536	SK_IF_MII_UNLOCK(sc_if);
537
538	return;
539}
540
541static int
542sk_xmac_miibus_readreg(sc_if, phy, reg)
543	struct sk_if_softc	*sc_if;
544	int			phy, reg;
545{
546	int			i;
547
548	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
549	SK_XM_READ_2(sc_if, XM_PHY_DATA);
550	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
551		for (i = 0; i < SK_TIMEOUT; i++) {
552			DELAY(1);
553			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
554			    XM_MMUCMD_PHYDATARDY)
555				break;
556		}
557
558		if (i == SK_TIMEOUT) {
559			if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
560			return(0);
561		}
562	}
563	DELAY(1);
564	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
565
566	return(i);
567}
568
569static int
570sk_xmac_miibus_writereg(sc_if, phy, reg, val)
571	struct sk_if_softc	*sc_if;
572	int			phy, reg, val;
573{
574	int			i;
575
576	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
577	for (i = 0; i < SK_TIMEOUT; i++) {
578		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
579			break;
580	}
581
582	if (i == SK_TIMEOUT) {
583		if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
584		return (ETIMEDOUT);
585	}
586
587	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
588	for (i = 0; i < SK_TIMEOUT; i++) {
589		DELAY(1);
590		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
591			break;
592	}
593	if (i == SK_TIMEOUT)
594		if_printf(sc_if->sk_ifp, "phy write timed out\n");
595
596	return(0);
597}
598
599static void
600sk_xmac_miibus_statchg(sc_if)
601	struct sk_if_softc	*sc_if;
602{
603	struct mii_data		*mii;
604
605	mii = device_get_softc(sc_if->sk_miibus);
606
607	/*
608	 * If this is a GMII PHY, manually set the XMAC's
609	 * duplex mode accordingly.
610	 */
611	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
612		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
613			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
614		} else {
615			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
616		}
617	}
618}
619
620static int
621sk_marv_miibus_readreg(sc_if, phy, reg)
622	struct sk_if_softc	*sc_if;
623	int			phy, reg;
624{
625	u_int16_t		val;
626	int			i;
627
628	if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
629	    sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) {
630		return(0);
631	}
632
633        SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
634		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
635
636	for (i = 0; i < SK_TIMEOUT; i++) {
637		DELAY(1);
638		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
639		if (val & YU_SMICR_READ_VALID)
640			break;
641	}
642
643	if (i == SK_TIMEOUT) {
644		if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
645		return(0);
646	}
647
648	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
649
650	return(val);
651}
652
653static int
654sk_marv_miibus_writereg(sc_if, phy, reg, val)
655	struct sk_if_softc	*sc_if;
656	int			phy, reg, val;
657{
658	int			i;
659
660	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
661	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
662		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
663
664	for (i = 0; i < SK_TIMEOUT; i++) {
665		DELAY(1);
666		if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
667			break;
668	}
669	if (i == SK_TIMEOUT)
670		if_printf(sc_if->sk_ifp, "phy write timeout\n");
671
672	return(0);
673}
674
675static void
676sk_marv_miibus_statchg(sc_if)
677	struct sk_if_softc	*sc_if;
678{
679	return;
680}
681
682#define HASH_BITS		6
683
684static u_int32_t
685sk_xmchash(addr)
686	const uint8_t *addr;
687{
688	uint32_t crc;
689
690	/* Compute CRC for the address value. */
691	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
692
693	return (~crc & ((1 << HASH_BITS) - 1));
694}
695
696static void
697sk_setfilt(sc_if, addr, slot)
698	struct sk_if_softc	*sc_if;
699	u_int16_t		*addr;
700	int			slot;
701{
702	int			base;
703
704	base = XM_RXFILT_ENTRY(slot);
705
706	SK_XM_WRITE_2(sc_if, base, addr[0]);
707	SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
708	SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
709
710	return;
711}
712
713static void
714sk_rxfilter(sc_if)
715	struct sk_if_softc	*sc_if;
716{
717	struct sk_softc		*sc;
718
719	SK_IF_LOCK_ASSERT(sc_if);
720
721	sc = sc_if->sk_softc;
722	if (sc->sk_type == SK_GENESIS)
723		sk_rxfilter_genesis(sc_if);
724	else
725		sk_rxfilter_yukon(sc_if);
726}
727
728static void
729sk_rxfilter_genesis(sc_if)
730	struct sk_if_softc	*sc_if;
731{
732	struct ifnet		*ifp = sc_if->sk_ifp;
733	u_int32_t		hashes[2] = { 0, 0 }, mode;
734	int			h = 0, i;
735	struct ifmultiaddr	*ifma;
736	u_int16_t		dummy[] = { 0, 0, 0 };
737	u_int16_t		maddr[(ETHER_ADDR_LEN+1)/2];
738
739	SK_IF_LOCK_ASSERT(sc_if);
740
741	mode = SK_XM_READ_4(sc_if, XM_MODE);
742	mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
743	    XM_MODE_RX_USE_PERFECT);
744	/* First, zot all the existing perfect filters. */
745	for (i = 1; i < XM_RXFILT_MAX; i++)
746		sk_setfilt(sc_if, dummy, i);
747
748	/* Now program new ones. */
749	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
750		if (ifp->if_flags & IFF_ALLMULTI)
751			mode |= XM_MODE_RX_USE_HASH;
752		if (ifp->if_flags & IFF_PROMISC)
753			mode |= XM_MODE_RX_PROMISC;
754		hashes[0] = 0xFFFFFFFF;
755		hashes[1] = 0xFFFFFFFF;
756	} else {
757		i = 1;
758		if_maddr_rlock(ifp);
759		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
760		    ifma_link) {
761			if (ifma->ifma_addr->sa_family != AF_LINK)
762				continue;
763			/*
764			 * Program the first XM_RXFILT_MAX multicast groups
765			 * into the perfect filter.
766			 */
767			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
768			    maddr, ETHER_ADDR_LEN);
769			if (i < XM_RXFILT_MAX) {
770				sk_setfilt(sc_if, maddr, i);
771				mode |= XM_MODE_RX_USE_PERFECT;
772				i++;
773				continue;
774			}
775			h = sk_xmchash((const uint8_t *)maddr);
776			if (h < 32)
777				hashes[0] |= (1 << h);
778			else
779				hashes[1] |= (1 << (h - 32));
780			mode |= XM_MODE_RX_USE_HASH;
781		}
782		if_maddr_runlock(ifp);
783	}
784
785	SK_XM_WRITE_4(sc_if, XM_MODE, mode);
786	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
787	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
788}
789
790static void
791sk_rxfilter_yukon(sc_if)
792	struct sk_if_softc	*sc_if;
793{
794	struct ifnet		*ifp;
795	u_int32_t		crc, hashes[2] = { 0, 0 }, mode;
796	struct ifmultiaddr	*ifma;
797
798	SK_IF_LOCK_ASSERT(sc_if);
799
800	ifp = sc_if->sk_ifp;
801	mode = SK_YU_READ_2(sc_if, YUKON_RCR);
802	if (ifp->if_flags & IFF_PROMISC)
803		mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN);
804	else if (ifp->if_flags & IFF_ALLMULTI) {
805		mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN;
806		hashes[0] = 0xFFFFFFFF;
807		hashes[1] = 0xFFFFFFFF;
808	} else {
809		mode |= YU_RCR_UFLEN;
810		if_maddr_rlock(ifp);
811		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
812			if (ifma->ifma_addr->sa_family != AF_LINK)
813				continue;
814			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
815			    ifma->ifma_addr), ETHER_ADDR_LEN);
816			/* Just want the 6 least significant bits. */
817			crc &= 0x3f;
818			/* Set the corresponding bit in the hash table. */
819			hashes[crc >> 5] |= 1 << (crc & 0x1f);
820		}
821		if_maddr_runlock(ifp);
822		if (hashes[0] != 0 || hashes[1] != 0)
823			mode |= YU_RCR_MUFLEN;
824	}
825
826	SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
827	SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
828	SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
829	SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
830	SK_YU_WRITE_2(sc_if, YUKON_RCR, mode);
831}
832
833static int
834sk_init_rx_ring(sc_if)
835	struct sk_if_softc	*sc_if;
836{
837	struct sk_ring_data	*rd;
838	bus_addr_t		addr;
839	u_int32_t		csum_start;
840	int			i;
841
842	sc_if->sk_cdata.sk_rx_cons = 0;
843
844	csum_start = (ETHER_HDR_LEN + sizeof(struct ip))  << 16 |
845	    ETHER_HDR_LEN;
846	rd = &sc_if->sk_rdata;
847	bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
848	for (i = 0; i < SK_RX_RING_CNT; i++) {
849		if (sk_newbuf(sc_if, i) != 0)
850			return (ENOBUFS);
851		if (i == (SK_RX_RING_CNT - 1))
852			addr = SK_RX_RING_ADDR(sc_if, 0);
853		else
854			addr = SK_RX_RING_ADDR(sc_if, i + 1);
855		rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
856		rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
857	}
858
859	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
860	    sc_if->sk_cdata.sk_rx_ring_map,
861	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
862
863	return(0);
864}
865
866static int
867sk_init_jumbo_rx_ring(sc_if)
868	struct sk_if_softc	*sc_if;
869{
870	struct sk_ring_data	*rd;
871	bus_addr_t		addr;
872	u_int32_t		csum_start;
873	int			i;
874
875	sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
876
877	csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
878	    ETHER_HDR_LEN;
879	rd = &sc_if->sk_rdata;
880	bzero(rd->sk_jumbo_rx_ring,
881	    sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
882	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
883		if (sk_jumbo_newbuf(sc_if, i) != 0)
884			return (ENOBUFS);
885		if (i == (SK_JUMBO_RX_RING_CNT - 1))
886			addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
887		else
888			addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
889		rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
890		rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
891	}
892
893	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
894	    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
895	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
896
897	return (0);
898}
899
900static void
901sk_init_tx_ring(sc_if)
902	struct sk_if_softc	*sc_if;
903{
904	struct sk_ring_data	*rd;
905	struct sk_txdesc	*txd;
906	bus_addr_t		addr;
907	int			i;
908
909	STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
910	STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
911
912	sc_if->sk_cdata.sk_tx_prod = 0;
913	sc_if->sk_cdata.sk_tx_cons = 0;
914	sc_if->sk_cdata.sk_tx_cnt = 0;
915
916	rd = &sc_if->sk_rdata;
917	bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
918	for (i = 0; i < SK_TX_RING_CNT; i++) {
919		if (i == (SK_TX_RING_CNT - 1))
920			addr = SK_TX_RING_ADDR(sc_if, 0);
921		else
922			addr = SK_TX_RING_ADDR(sc_if, i + 1);
923		rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
924		txd = &sc_if->sk_cdata.sk_txdesc[i];
925		STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
926	}
927
928	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
929	    sc_if->sk_cdata.sk_tx_ring_map,
930	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
931}
932
933static __inline void
934sk_discard_rxbuf(sc_if, idx)
935	struct sk_if_softc	*sc_if;
936	int			idx;
937{
938	struct sk_rx_desc	*r;
939	struct sk_rxdesc	*rxd;
940	struct mbuf		*m;
941
942
943	r = &sc_if->sk_rdata.sk_rx_ring[idx];
944	rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
945	m = rxd->rx_m;
946	r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
947}
948
949static __inline void
950sk_discard_jumbo_rxbuf(sc_if, idx)
951	struct sk_if_softc	*sc_if;
952	int			idx;
953{
954	struct sk_rx_desc	*r;
955	struct sk_rxdesc	*rxd;
956	struct mbuf		*m;
957
958	r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
959	rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
960	m = rxd->rx_m;
961	r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
962}
963
964static int
965sk_newbuf(sc_if, idx)
966	struct sk_if_softc	*sc_if;
967	int 			idx;
968{
969	struct sk_rx_desc	*r;
970	struct sk_rxdesc	*rxd;
971	struct mbuf		*m;
972	bus_dma_segment_t	segs[1];
973	bus_dmamap_t		map;
974	int			nsegs;
975
976	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
977	if (m == NULL)
978		return (ENOBUFS);
979	m->m_len = m->m_pkthdr.len = MCLBYTES;
980	m_adj(m, ETHER_ALIGN);
981
982	if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
983	    sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
984		m_freem(m);
985		return (ENOBUFS);
986	}
987	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
988
989	rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
990	if (rxd->rx_m != NULL) {
991		bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
992		    BUS_DMASYNC_POSTREAD);
993		bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
994	}
995	map = rxd->rx_dmamap;
996	rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
997	sc_if->sk_cdata.sk_rx_sparemap = map;
998	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
999	    BUS_DMASYNC_PREREAD);
1000	rxd->rx_m = m;
1001	r = &sc_if->sk_rdata.sk_rx_ring[idx];
1002	r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1003	r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1004	r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1005
1006	return (0);
1007}
1008
1009static int
1010sk_jumbo_newbuf(sc_if, idx)
1011	struct sk_if_softc	*sc_if;
1012	int			idx;
1013{
1014	struct sk_rx_desc	*r;
1015	struct sk_rxdesc	*rxd;
1016	struct mbuf		*m;
1017	bus_dma_segment_t	segs[1];
1018	bus_dmamap_t		map;
1019	int			nsegs;
1020
1021	m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1022	if (m == NULL)
1023		return (ENOBUFS);
1024	if ((m->m_flags & M_EXT) == 0) {
1025		m_freem(m);
1026		return (ENOBUFS);
1027	}
1028	m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1029	/*
1030	 * Adjust alignment so packet payload begins on a
1031	 * longword boundary. Mandatory for Alpha, useful on
1032	 * x86 too.
1033	 */
1034	m_adj(m, ETHER_ALIGN);
1035
1036	if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
1037	    sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1038		m_freem(m);
1039		return (ENOBUFS);
1040	}
1041	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1042
1043	rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
1044	if (rxd->rx_m != NULL) {
1045		bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1046		    BUS_DMASYNC_POSTREAD);
1047		bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
1048		    rxd->rx_dmamap);
1049	}
1050	map = rxd->rx_dmamap;
1051	rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
1052	sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
1053	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1054	    BUS_DMASYNC_PREREAD);
1055	rxd->rx_m = m;
1056	r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
1057	r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1058	r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1059	r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1060
1061	return (0);
1062}
1063
1064/*
1065 * Set media options.
1066 */
1067static int
1068sk_ifmedia_upd(ifp)
1069	struct ifnet		*ifp;
1070{
1071	struct sk_if_softc	*sc_if = ifp->if_softc;
1072	struct mii_data		*mii;
1073
1074	mii = device_get_softc(sc_if->sk_miibus);
1075	sk_init(sc_if);
1076	mii_mediachg(mii);
1077
1078	return(0);
1079}
1080
1081/*
1082 * Report current media status.
1083 */
1084static void
1085sk_ifmedia_sts(ifp, ifmr)
1086	struct ifnet		*ifp;
1087	struct ifmediareq	*ifmr;
1088{
1089	struct sk_if_softc	*sc_if;
1090	struct mii_data		*mii;
1091
1092	sc_if = ifp->if_softc;
1093	mii = device_get_softc(sc_if->sk_miibus);
1094
1095	mii_pollstat(mii);
1096	ifmr->ifm_active = mii->mii_media_active;
1097	ifmr->ifm_status = mii->mii_media_status;
1098
1099	return;
1100}
1101
1102static int
1103sk_ioctl(ifp, command, data)
1104	struct ifnet		*ifp;
1105	u_long			command;
1106	caddr_t			data;
1107{
1108	struct sk_if_softc	*sc_if = ifp->if_softc;
1109	struct ifreq		*ifr = (struct ifreq *) data;
1110	int			error, mask;
1111	struct mii_data		*mii;
1112
1113	error = 0;
1114	switch(command) {
1115	case SIOCSIFMTU:
1116		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
1117			error = EINVAL;
1118		else if (ifp->if_mtu != ifr->ifr_mtu) {
1119			if (sc_if->sk_jumbo_disable != 0 &&
1120			    ifr->ifr_mtu > SK_MAX_FRAMELEN)
1121				error = EINVAL;
1122			else {
1123				SK_IF_LOCK(sc_if);
1124				ifp->if_mtu = ifr->ifr_mtu;
1125				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1126					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1127					sk_init_locked(sc_if);
1128				}
1129				SK_IF_UNLOCK(sc_if);
1130			}
1131		}
1132		break;
1133	case SIOCSIFFLAGS:
1134		SK_IF_LOCK(sc_if);
1135		if (ifp->if_flags & IFF_UP) {
1136			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1137				if ((ifp->if_flags ^ sc_if->sk_if_flags)
1138				    & (IFF_PROMISC | IFF_ALLMULTI))
1139					sk_rxfilter(sc_if);
1140			} else
1141				sk_init_locked(sc_if);
1142		} else {
1143			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1144				sk_stop(sc_if);
1145		}
1146		sc_if->sk_if_flags = ifp->if_flags;
1147		SK_IF_UNLOCK(sc_if);
1148		break;
1149	case SIOCADDMULTI:
1150	case SIOCDELMULTI:
1151		SK_IF_LOCK(sc_if);
1152		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1153			sk_rxfilter(sc_if);
1154		SK_IF_UNLOCK(sc_if);
1155		break;
1156	case SIOCGIFMEDIA:
1157	case SIOCSIFMEDIA:
1158		mii = device_get_softc(sc_if->sk_miibus);
1159		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1160		break;
1161	case SIOCSIFCAP:
1162		SK_IF_LOCK(sc_if);
1163		if (sc_if->sk_softc->sk_type == SK_GENESIS) {
1164			SK_IF_UNLOCK(sc_if);
1165			break;
1166		}
1167		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1168		if ((mask & IFCAP_TXCSUM) != 0 &&
1169		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1170			ifp->if_capenable ^= IFCAP_TXCSUM;
1171			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1172				ifp->if_hwassist |= SK_CSUM_FEATURES;
1173			else
1174				ifp->if_hwassist &= ~SK_CSUM_FEATURES;
1175		}
1176		if ((mask & IFCAP_RXCSUM) != 0 &&
1177		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
1178			ifp->if_capenable ^= IFCAP_RXCSUM;
1179		SK_IF_UNLOCK(sc_if);
1180		break;
1181	default:
1182		error = ether_ioctl(ifp, command, data);
1183		break;
1184	}
1185
1186	return (error);
1187}
1188
1189/*
1190 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1191 * IDs against our list and return a device name if we find a match.
1192 */
1193static int
1194skc_probe(dev)
1195	device_t		dev;
1196{
1197	struct sk_type		*t = sk_devs;
1198
1199	while(t->sk_name != NULL) {
1200		if ((pci_get_vendor(dev) == t->sk_vid) &&
1201		    (pci_get_device(dev) == t->sk_did)) {
1202			/*
1203			 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1204			 * Rev. 3 is supported by re(4).
1205			 */
1206			if ((t->sk_vid == VENDORID_LINKSYS) &&
1207				(t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1208				(pci_get_subdevice(dev) !=
1209				 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1210				t++;
1211				continue;
1212			}
1213			device_set_desc(dev, t->sk_name);
1214			return (BUS_PROBE_DEFAULT);
1215		}
1216		t++;
1217	}
1218
1219	return(ENXIO);
1220}
1221
1222/*
1223 * Force the GEnesis into reset, then bring it out of reset.
1224 */
1225static void
1226sk_reset(sc)
1227	struct sk_softc		*sc;
1228{
1229
1230	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1231	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1232	if (SK_YUKON_FAMILY(sc->sk_type))
1233		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1234
1235	DELAY(1000);
1236	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1237	DELAY(2);
1238	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1239	if (SK_YUKON_FAMILY(sc->sk_type))
1240		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1241
1242	if (sc->sk_type == SK_GENESIS) {
1243		/* Configure packet arbiter */
1244		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1245		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1246		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1247		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1248		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1249	}
1250
1251	/* Enable RAM interface */
1252	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1253
1254	/*
1255         * Configure interrupt moderation. The moderation timer
1256	 * defers interrupts specified in the interrupt moderation
1257	 * timer mask based on the timeout specified in the interrupt
1258	 * moderation timer init register. Each bit in the timer
1259	 * register represents one tick, so to specify a timeout in
1260	 * microseconds, we have to multiply by the correct number of
1261	 * ticks-per-microsecond.
1262	 */
1263	switch (sc->sk_type) {
1264	case SK_GENESIS:
1265		sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
1266		break;
1267	default:
1268		sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
1269		break;
1270	}
1271	if (bootverbose)
1272		device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
1273		    sc->sk_int_mod);
1274	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
1275	    sc->sk_int_ticks));
1276	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1277	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1278	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1279
1280	return;
1281}
1282
1283static int
1284sk_probe(dev)
1285	device_t		dev;
1286{
1287	struct sk_softc		*sc;
1288
1289	sc = device_get_softc(device_get_parent(dev));
1290
1291	/*
1292	 * Not much to do here. We always know there will be
1293	 * at least one XMAC present, and if there are two,
1294	 * skc_attach() will create a second device instance
1295	 * for us.
1296	 */
1297	switch (sc->sk_type) {
1298	case SK_GENESIS:
1299		device_set_desc(dev, "XaQti Corp. XMAC II");
1300		break;
1301	case SK_YUKON:
1302	case SK_YUKON_LITE:
1303	case SK_YUKON_LP:
1304		device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1305		break;
1306	}
1307
1308	return (BUS_PROBE_DEFAULT);
1309}
1310
1311/*
1312 * Each XMAC chip is attached as a separate logical IP interface.
1313 * Single port cards will have only one logical interface of course.
1314 */
1315static int
1316sk_attach(dev)
1317	device_t		dev;
1318{
1319	struct sk_softc		*sc;
1320	struct sk_if_softc	*sc_if;
1321	struct ifnet		*ifp;
1322	int			error, i, phy, port;
1323	u_char			eaddr[6];
1324
1325	if (dev == NULL)
1326		return(EINVAL);
1327
1328	error = 0;
1329	sc_if = device_get_softc(dev);
1330	sc = device_get_softc(device_get_parent(dev));
1331	port = *(int *)device_get_ivars(dev);
1332
1333	sc_if->sk_if_dev = dev;
1334	sc_if->sk_port = port;
1335	sc_if->sk_softc = sc;
1336	sc->sk_if[port] = sc_if;
1337	if (port == SK_PORT_A)
1338		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1339	if (port == SK_PORT_B)
1340		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1341
1342	callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
1343	callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
1344
1345	if (sk_dma_alloc(sc_if) != 0) {
1346		error = ENOMEM;
1347		goto fail;
1348	}
1349	sk_dma_jumbo_alloc(sc_if);
1350
1351	ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1352	if (ifp == NULL) {
1353		device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
1354		error = ENOSPC;
1355		goto fail;
1356	}
1357	ifp->if_softc = sc_if;
1358	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1359	ifp->if_mtu = ETHERMTU;
1360	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1361	/*
1362	 * SK_GENESIS has a bug in checksum offload - From linux.
1363	 */
1364	if (sc_if->sk_softc->sk_type != SK_GENESIS) {
1365		ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
1366		ifp->if_hwassist = 0;
1367	} else {
1368		ifp->if_capabilities = 0;
1369		ifp->if_hwassist = 0;
1370	}
1371	ifp->if_capenable = ifp->if_capabilities;
1372	/*
1373	 * Some revision of Yukon controller generates corrupted
1374	 * frame when TX checksum offloading is enabled.  The
1375	 * frame has a valid checksum value so payload might be
1376	 * modified during TX checksum calculation. Disable TX
1377	 * checksum offloading but give users chance to enable it
1378	 * when they know their controller works without problems
1379	 * with TX checksum offloading.
1380	 */
1381	ifp->if_capenable &= ~IFCAP_TXCSUM;
1382	ifp->if_ioctl = sk_ioctl;
1383	ifp->if_start = sk_start;
1384	ifp->if_init = sk_init;
1385	IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1386	ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1387	IFQ_SET_READY(&ifp->if_snd);
1388
1389	/*
1390	 * Get station address for this interface. Note that
1391	 * dual port cards actually come with three station
1392	 * addresses: one for each port, plus an extra. The
1393	 * extra one is used by the SysKonnect driver software
1394	 * as a 'virtual' station address for when both ports
1395	 * are operating in failover mode. Currently we don't
1396	 * use this extra address.
1397	 */
1398	SK_IF_LOCK(sc_if);
1399	for (i = 0; i < ETHER_ADDR_LEN; i++)
1400		eaddr[i] =
1401		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1402
1403	/*
1404	 * Set up RAM buffer addresses. The NIC will have a certain
1405	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1406	 * need to divide this up a) between the transmitter and
1407 	 * receiver and b) between the two XMACs, if this is a
1408	 * dual port NIC. Our algotithm is to divide up the memory
1409	 * evenly so that everyone gets a fair share.
1410	 *
1411	 * Just to be contrary, Yukon2 appears to have separate memory
1412	 * for each MAC.
1413	 */
1414	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1415		u_int32_t		chunk, val;
1416
1417		chunk = sc->sk_ramsize / 2;
1418		val = sc->sk_rboff / sizeof(u_int64_t);
1419		sc_if->sk_rx_ramstart = val;
1420		val += (chunk / sizeof(u_int64_t));
1421		sc_if->sk_rx_ramend = val - 1;
1422		sc_if->sk_tx_ramstart = val;
1423		val += (chunk / sizeof(u_int64_t));
1424		sc_if->sk_tx_ramend = val - 1;
1425	} else {
1426		u_int32_t		chunk, val;
1427
1428		chunk = sc->sk_ramsize / 4;
1429		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1430		    sizeof(u_int64_t);
1431		sc_if->sk_rx_ramstart = val;
1432		val += (chunk / sizeof(u_int64_t));
1433		sc_if->sk_rx_ramend = val - 1;
1434		sc_if->sk_tx_ramstart = val;
1435		val += (chunk / sizeof(u_int64_t));
1436		sc_if->sk_tx_ramend = val - 1;
1437	}
1438
1439	/* Read and save PHY type and set PHY address */
1440	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1441	if (!SK_YUKON_FAMILY(sc->sk_type)) {
1442		switch(sc_if->sk_phytype) {
1443		case SK_PHYTYPE_XMAC:
1444			sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1445			break;
1446		case SK_PHYTYPE_BCOM:
1447			sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1448			break;
1449		default:
1450			device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
1451			    sc_if->sk_phytype);
1452			error = ENODEV;
1453			SK_IF_UNLOCK(sc_if);
1454			goto fail;
1455		}
1456	} else {
1457		if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1458		    sc->sk_pmd != 'S') {
1459			/* not initialized, punt */
1460			sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1461			sc->sk_coppertype = 1;
1462		}
1463
1464		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1465
1466		if (!(sc->sk_coppertype))
1467			sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1468	}
1469
1470	/*
1471	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1472	 */
1473	SK_IF_UNLOCK(sc_if);
1474	ether_ifattach(ifp, eaddr);
1475	SK_IF_LOCK(sc_if);
1476
1477	/*
1478	 * The hardware should be ready for VLAN_MTU by default:
1479	 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1480	 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1481	 *
1482	 */
1483        ifp->if_capabilities |= IFCAP_VLAN_MTU;
1484        ifp->if_capenable |= IFCAP_VLAN_MTU;
1485	/*
1486	 * Tell the upper layer(s) we support long frames.
1487	 * Must appear after the call to ether_ifattach() because
1488	 * ether_ifattach() sets ifi_hdrlen to the default value.
1489	 */
1490        ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1491
1492	/*
1493	 * Do miibus setup.
1494	 */
1495	phy = MII_PHY_ANY;
1496	switch (sc->sk_type) {
1497	case SK_GENESIS:
1498		sk_init_xmac(sc_if);
1499		if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
1500			phy = 0;
1501		break;
1502	case SK_YUKON:
1503	case SK_YUKON_LITE:
1504	case SK_YUKON_LP:
1505		sk_init_yukon(sc_if);
1506		phy = 0;
1507		break;
1508	}
1509
1510	SK_IF_UNLOCK(sc_if);
1511	error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd,
1512	    sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
1513	if (error != 0) {
1514		device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n");
1515		ether_ifdetach(ifp);
1516		goto fail;
1517	}
1518
1519fail:
1520	if (error) {
1521		/* Access should be ok even though lock has been dropped */
1522		sc->sk_if[port] = NULL;
1523		sk_detach(dev);
1524	}
1525
1526	return(error);
1527}
1528
1529/*
1530 * Attach the interface. Allocate softc structures, do ifmedia
1531 * setup and ethernet/BPF attach.
1532 */
1533static int
1534skc_attach(dev)
1535	device_t		dev;
1536{
1537	struct sk_softc		*sc;
1538	int			error = 0, *port;
1539	uint8_t			skrs;
1540	const char		*pname = NULL;
1541	char			*revstr;
1542
1543	sc = device_get_softc(dev);
1544	sc->sk_dev = dev;
1545
1546	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1547	    MTX_DEF);
1548	mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
1549	/*
1550	 * Map control/status registers.
1551	 */
1552	pci_enable_busmaster(dev);
1553
1554	/* Allocate resources */
1555#ifdef SK_USEIOSPACE
1556	sc->sk_res_spec = sk_res_spec_io;
1557#else
1558	sc->sk_res_spec = sk_res_spec_mem;
1559#endif
1560	error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1561	if (error) {
1562		if (sc->sk_res_spec == sk_res_spec_mem)
1563			sc->sk_res_spec = sk_res_spec_io;
1564		else
1565			sc->sk_res_spec = sk_res_spec_mem;
1566		error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1567		if (error) {
1568			device_printf(dev, "couldn't allocate %s resources\n",
1569			    sc->sk_res_spec == sk_res_spec_mem ? "memory" :
1570			    "I/O");
1571			goto fail;
1572		}
1573	}
1574
1575	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1576	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1577
1578	/* Bail out if chip is not recognized. */
1579	if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1580		device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
1581		    sc->sk_type, sc->sk_rev);
1582		error = ENXIO;
1583		goto fail;
1584	}
1585
1586	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1587		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1588		OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1589		&sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1590		"SK interrupt moderation");
1591
1592	/* Pull in device tunables. */
1593	sc->sk_int_mod = SK_IM_DEFAULT;
1594	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1595		"int_mod", &sc->sk_int_mod);
1596	if (error == 0) {
1597		if (sc->sk_int_mod < SK_IM_MIN ||
1598		    sc->sk_int_mod > SK_IM_MAX) {
1599			device_printf(dev, "int_mod value out of range; "
1600			    "using default: %d\n", SK_IM_DEFAULT);
1601			sc->sk_int_mod = SK_IM_DEFAULT;
1602		}
1603	}
1604
1605	/* Reset the adapter. */
1606	sk_reset(sc);
1607
1608	skrs = sk_win_read_1(sc, SK_EPROM0);
1609	if (sc->sk_type == SK_GENESIS) {
1610		/* Read and save RAM size and RAMbuffer offset */
1611		switch(skrs) {
1612		case SK_RAMSIZE_512K_64:
1613			sc->sk_ramsize = 0x80000;
1614			sc->sk_rboff = SK_RBOFF_0;
1615			break;
1616		case SK_RAMSIZE_1024K_64:
1617			sc->sk_ramsize = 0x100000;
1618			sc->sk_rboff = SK_RBOFF_80000;
1619			break;
1620		case SK_RAMSIZE_1024K_128:
1621			sc->sk_ramsize = 0x100000;
1622			sc->sk_rboff = SK_RBOFF_0;
1623			break;
1624		case SK_RAMSIZE_2048K_128:
1625			sc->sk_ramsize = 0x200000;
1626			sc->sk_rboff = SK_RBOFF_0;
1627			break;
1628		default:
1629			device_printf(dev, "unknown ram size: %d\n", skrs);
1630			error = ENXIO;
1631			goto fail;
1632		}
1633	} else { /* SK_YUKON_FAMILY */
1634		if (skrs == 0x00)
1635			sc->sk_ramsize = 0x20000;
1636		else
1637			sc->sk_ramsize = skrs * (1<<12);
1638		sc->sk_rboff = SK_RBOFF_0;
1639	}
1640
1641	/* Read and save physical media type */
1642	 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1643
1644	 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1645		 sc->sk_coppertype = 1;
1646	 else
1647		 sc->sk_coppertype = 0;
1648
1649	/* Determine whether to name it with VPD PN or just make it up.
1650	 * Marvell Yukon VPD PN seems to freqently be bogus. */
1651	switch (pci_get_device(dev)) {
1652	case DEVICEID_SK_V1:
1653	case DEVICEID_BELKIN_5005:
1654	case DEVICEID_3COM_3C940:
1655	case DEVICEID_LINKSYS_EG1032:
1656	case DEVICEID_DLINK_DGE530T_A1:
1657	case DEVICEID_DLINK_DGE530T_B1:
1658		/* Stay with VPD PN. */
1659		(void) pci_get_vpd_ident(dev, &pname);
1660		break;
1661	case DEVICEID_SK_V2:
1662		/* YUKON VPD PN might bear no resemblance to reality. */
1663		switch (sc->sk_type) {
1664		case SK_GENESIS:
1665			/* Stay with VPD PN. */
1666			(void) pci_get_vpd_ident(dev, &pname);
1667			break;
1668		case SK_YUKON:
1669			pname = "Marvell Yukon Gigabit Ethernet";
1670			break;
1671		case SK_YUKON_LITE:
1672			pname = "Marvell Yukon Lite Gigabit Ethernet";
1673			break;
1674		case SK_YUKON_LP:
1675			pname = "Marvell Yukon LP Gigabit Ethernet";
1676			break;
1677		default:
1678			pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1679			break;
1680		}
1681
1682		/* Yukon Lite Rev. A0 needs special test. */
1683		if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1684			u_int32_t far;
1685			u_int8_t testbyte;
1686
1687			/* Save flash address register before testing. */
1688			far = sk_win_read_4(sc, SK_EP_ADDR);
1689
1690			sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1691			testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1692
1693			if (testbyte != 0x00) {
1694				/* Yukon Lite Rev. A0 detected. */
1695				sc->sk_type = SK_YUKON_LITE;
1696				sc->sk_rev = SK_YUKON_LITE_REV_A0;
1697				/* Restore flash address register. */
1698				sk_win_write_4(sc, SK_EP_ADDR, far);
1699			}
1700		}
1701		break;
1702	default:
1703		device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1704			"chipver=%02x, rev=%x\n",
1705			pci_get_vendor(dev), pci_get_device(dev),
1706			sc->sk_type, sc->sk_rev);
1707		error = ENXIO;
1708		goto fail;
1709	}
1710
1711	if (sc->sk_type == SK_YUKON_LITE) {
1712		switch (sc->sk_rev) {
1713		case SK_YUKON_LITE_REV_A0:
1714			revstr = "A0";
1715			break;
1716		case SK_YUKON_LITE_REV_A1:
1717			revstr = "A1";
1718			break;
1719		case SK_YUKON_LITE_REV_A3:
1720			revstr = "A3";
1721			break;
1722		default:
1723			revstr = "";
1724			break;
1725		}
1726	} else {
1727		revstr = "";
1728	}
1729
1730	/* Announce the product name and more VPD data if there. */
1731	if (pname != NULL)
1732		device_printf(dev, "%s rev. %s(0x%x)\n",
1733			pname, revstr, sc->sk_rev);
1734
1735	if (bootverbose) {
1736		device_printf(dev, "chip ver  = 0x%02x\n", sc->sk_type);
1737		device_printf(dev, "chip rev  = 0x%02x\n", sc->sk_rev);
1738		device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1739		device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1740	}
1741
1742	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1743	if (sc->sk_devs[SK_PORT_A] == NULL) {
1744		device_printf(dev, "failed to add child for PORT_A\n");
1745		error = ENXIO;
1746		goto fail;
1747	}
1748	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1749	if (port == NULL) {
1750		device_printf(dev, "failed to allocate memory for "
1751		    "ivars of PORT_A\n");
1752		error = ENXIO;
1753		goto fail;
1754	}
1755	*port = SK_PORT_A;
1756	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1757
1758	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1759		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1760		if (sc->sk_devs[SK_PORT_B] == NULL) {
1761			device_printf(dev, "failed to add child for PORT_B\n");
1762			error = ENXIO;
1763			goto fail;
1764		}
1765		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1766		if (port == NULL) {
1767			device_printf(dev, "failed to allocate memory for "
1768			    "ivars of PORT_B\n");
1769			error = ENXIO;
1770			goto fail;
1771		}
1772		*port = SK_PORT_B;
1773		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1774	}
1775
1776	/* Turn on the 'driver is loaded' LED. */
1777	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1778
1779	error = bus_generic_attach(dev);
1780	if (error) {
1781		device_printf(dev, "failed to attach port(s)\n");
1782		goto fail;
1783	}
1784
1785	/* Hook interrupt last to avoid having to lock softc */
1786	error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
1787	    NULL, sk_intr, sc, &sc->sk_intrhand);
1788
1789	if (error) {
1790		device_printf(dev, "couldn't set up irq\n");
1791		goto fail;
1792	}
1793
1794fail:
1795	if (error)
1796		skc_detach(dev);
1797
1798	return(error);
1799}
1800
1801/*
1802 * Shutdown hardware and free up resources. This can be called any
1803 * time after the mutex has been initialized. It is called in both
1804 * the error case in attach and the normal detach case so it needs
1805 * to be careful about only freeing resources that have actually been
1806 * allocated.
1807 */
1808static int
1809sk_detach(dev)
1810	device_t		dev;
1811{
1812	struct sk_if_softc	*sc_if;
1813	struct ifnet		*ifp;
1814
1815	sc_if = device_get_softc(dev);
1816	KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1817	    ("sk mutex not initialized in sk_detach"));
1818	SK_IF_LOCK(sc_if);
1819
1820	ifp = sc_if->sk_ifp;
1821	/* These should only be active if attach_xmac succeeded */
1822	if (device_is_attached(dev)) {
1823		sk_stop(sc_if);
1824		/* Can't hold locks while calling detach */
1825		SK_IF_UNLOCK(sc_if);
1826		callout_drain(&sc_if->sk_tick_ch);
1827		callout_drain(&sc_if->sk_watchdog_ch);
1828		ether_ifdetach(ifp);
1829		SK_IF_LOCK(sc_if);
1830	}
1831	if (ifp)
1832		if_free(ifp);
1833	/*
1834	 * We're generally called from skc_detach() which is using
1835	 * device_delete_child() to get to here. It's already trashed
1836	 * miibus for us, so don't do it here or we'll panic.
1837	 */
1838	/*
1839	if (sc_if->sk_miibus != NULL)
1840		device_delete_child(dev, sc_if->sk_miibus);
1841	*/
1842	bus_generic_detach(dev);
1843	sk_dma_jumbo_free(sc_if);
1844	sk_dma_free(sc_if);
1845	SK_IF_UNLOCK(sc_if);
1846
1847	return(0);
1848}
1849
1850static int
1851skc_detach(dev)
1852	device_t		dev;
1853{
1854	struct sk_softc		*sc;
1855
1856	sc = device_get_softc(dev);
1857	KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1858
1859	if (device_is_alive(dev)) {
1860		if (sc->sk_devs[SK_PORT_A] != NULL) {
1861			free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1862			device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1863		}
1864		if (sc->sk_devs[SK_PORT_B] != NULL) {
1865			free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1866			device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1867		}
1868		bus_generic_detach(dev);
1869	}
1870
1871	if (sc->sk_intrhand)
1872		bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
1873	bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
1874
1875	mtx_destroy(&sc->sk_mii_mtx);
1876	mtx_destroy(&sc->sk_mtx);
1877
1878	return(0);
1879}
1880
1881struct sk_dmamap_arg {
1882	bus_addr_t	sk_busaddr;
1883};
1884
1885static void
1886sk_dmamap_cb(arg, segs, nseg, error)
1887	void			*arg;
1888	bus_dma_segment_t	*segs;
1889	int			nseg;
1890	int			error;
1891{
1892	struct sk_dmamap_arg	*ctx;
1893
1894	if (error != 0)
1895		return;
1896
1897	ctx = arg;
1898	ctx->sk_busaddr = segs[0].ds_addr;
1899}
1900
1901/*
1902 * Allocate jumbo buffer storage. The SysKonnect adapters support
1903 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1904 * use them in their drivers. In order for us to use them, we need
1905 * large 9K receive buffers, however standard mbuf clusters are only
1906 * 2048 bytes in size. Consequently, we need to allocate and manage
1907 * our own jumbo buffer pool. Fortunately, this does not require an
1908 * excessive amount of additional code.
1909 */
1910static int
1911sk_dma_alloc(sc_if)
1912	struct sk_if_softc	*sc_if;
1913{
1914	struct sk_dmamap_arg	ctx;
1915	struct sk_txdesc	*txd;
1916	struct sk_rxdesc	*rxd;
1917	int			error, i;
1918
1919	/* create parent tag */
1920	/*
1921	 * XXX
1922	 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
1923	 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
1924	 * However bz@ reported that it does not work on amd64 with > 4GB
1925	 * RAM. Until we have more clues of the breakage, disable DAC mode
1926	 * by limiting DMA address to be in 32bit address space.
1927	 */
1928	error = bus_dma_tag_create(
1929		    bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
1930		    1, 0,			/* algnmnt, boundary */
1931		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1932		    BUS_SPACE_MAXADDR,		/* highaddr */
1933		    NULL, NULL,			/* filter, filterarg */
1934		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1935		    0,				/* nsegments */
1936		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1937		    0,				/* flags */
1938		    NULL, NULL,			/* lockfunc, lockarg */
1939		    &sc_if->sk_cdata.sk_parent_tag);
1940	if (error != 0) {
1941		device_printf(sc_if->sk_if_dev,
1942		    "failed to create parent DMA tag\n");
1943		goto fail;
1944	}
1945
1946	/* create tag for Tx ring */
1947	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1948		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
1949		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1950		    BUS_SPACE_MAXADDR,		/* highaddr */
1951		    NULL, NULL,			/* filter, filterarg */
1952		    SK_TX_RING_SZ,		/* maxsize */
1953		    1,				/* nsegments */
1954		    SK_TX_RING_SZ,		/* maxsegsize */
1955		    0,				/* flags */
1956		    NULL, NULL,			/* lockfunc, lockarg */
1957		    &sc_if->sk_cdata.sk_tx_ring_tag);
1958	if (error != 0) {
1959		device_printf(sc_if->sk_if_dev,
1960		    "failed to allocate Tx ring DMA tag\n");
1961		goto fail;
1962	}
1963
1964	/* create tag for Rx ring */
1965	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1966		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
1967		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1968		    BUS_SPACE_MAXADDR,		/* highaddr */
1969		    NULL, NULL,			/* filter, filterarg */
1970		    SK_RX_RING_SZ,		/* maxsize */
1971		    1,				/* nsegments */
1972		    SK_RX_RING_SZ,		/* maxsegsize */
1973		    0,				/* flags */
1974		    NULL, NULL,			/* lockfunc, lockarg */
1975		    &sc_if->sk_cdata.sk_rx_ring_tag);
1976	if (error != 0) {
1977		device_printf(sc_if->sk_if_dev,
1978		    "failed to allocate Rx ring DMA tag\n");
1979		goto fail;
1980	}
1981
1982	/* create tag for Tx buffers */
1983	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1984		    1, 0,			/* algnmnt, boundary */
1985		    BUS_SPACE_MAXADDR,		/* lowaddr */
1986		    BUS_SPACE_MAXADDR,		/* highaddr */
1987		    NULL, NULL,			/* filter, filterarg */
1988		    MCLBYTES * SK_MAXTXSEGS,	/* maxsize */
1989		    SK_MAXTXSEGS,		/* nsegments */
1990		    MCLBYTES,			/* maxsegsize */
1991		    0,				/* flags */
1992		    NULL, NULL,			/* lockfunc, lockarg */
1993		    &sc_if->sk_cdata.sk_tx_tag);
1994	if (error != 0) {
1995		device_printf(sc_if->sk_if_dev,
1996		    "failed to allocate Tx DMA tag\n");
1997		goto fail;
1998	}
1999
2000	/* create tag for Rx buffers */
2001	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2002		    1, 0,			/* algnmnt, boundary */
2003		    BUS_SPACE_MAXADDR,		/* lowaddr */
2004		    BUS_SPACE_MAXADDR,		/* highaddr */
2005		    NULL, NULL,			/* filter, filterarg */
2006		    MCLBYTES,			/* maxsize */
2007		    1,				/* nsegments */
2008		    MCLBYTES,			/* maxsegsize */
2009		    0,				/* flags */
2010		    NULL, NULL,			/* lockfunc, lockarg */
2011		    &sc_if->sk_cdata.sk_rx_tag);
2012	if (error != 0) {
2013		device_printf(sc_if->sk_if_dev,
2014		    "failed to allocate Rx DMA tag\n");
2015		goto fail;
2016	}
2017
2018	/* allocate DMA'able memory and load the DMA map for Tx ring */
2019	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
2020	    (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2021	    &sc_if->sk_cdata.sk_tx_ring_map);
2022	if (error != 0) {
2023		device_printf(sc_if->sk_if_dev,
2024		    "failed to allocate DMA'able memory for Tx ring\n");
2025		goto fail;
2026	}
2027
2028	ctx.sk_busaddr = 0;
2029	error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
2030	    sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
2031	    SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2032	if (error != 0) {
2033		device_printf(sc_if->sk_if_dev,
2034		    "failed to load DMA'able memory for Tx ring\n");
2035		goto fail;
2036	}
2037	sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
2038
2039	/* allocate DMA'able memory and load the DMA map for Rx ring */
2040	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
2041	    (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
2042	    &sc_if->sk_cdata.sk_rx_ring_map);
2043	if (error != 0) {
2044		device_printf(sc_if->sk_if_dev,
2045		    "failed to allocate DMA'able memory for Rx ring\n");
2046		goto fail;
2047	}
2048
2049	ctx.sk_busaddr = 0;
2050	error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
2051	    sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
2052	    SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2053	if (error != 0) {
2054		device_printf(sc_if->sk_if_dev,
2055		    "failed to load DMA'able memory for Rx ring\n");
2056		goto fail;
2057	}
2058	sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
2059
2060	/* create DMA maps for Tx buffers */
2061	for (i = 0; i < SK_TX_RING_CNT; i++) {
2062		txd = &sc_if->sk_cdata.sk_txdesc[i];
2063		txd->tx_m = NULL;
2064		txd->tx_dmamap = NULL;
2065		error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
2066		    &txd->tx_dmamap);
2067		if (error != 0) {
2068			device_printf(sc_if->sk_if_dev,
2069			    "failed to create Tx dmamap\n");
2070			goto fail;
2071		}
2072	}
2073
2074	/* create DMA maps for Rx buffers */
2075	if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2076	    &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
2077		device_printf(sc_if->sk_if_dev,
2078		    "failed to create spare Rx dmamap\n");
2079		goto fail;
2080	}
2081	for (i = 0; i < SK_RX_RING_CNT; i++) {
2082		rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2083		rxd->rx_m = NULL;
2084		rxd->rx_dmamap = NULL;
2085		error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2086		    &rxd->rx_dmamap);
2087		if (error != 0) {
2088			device_printf(sc_if->sk_if_dev,
2089			    "failed to create Rx dmamap\n");
2090			goto fail;
2091		}
2092	}
2093
2094fail:
2095	return (error);
2096}
2097
2098static int
2099sk_dma_jumbo_alloc(sc_if)
2100	struct sk_if_softc	*sc_if;
2101{
2102	struct sk_dmamap_arg	ctx;
2103	struct sk_rxdesc	*jrxd;
2104	int			error, i;
2105
2106	if (jumbo_disable != 0) {
2107		device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
2108		sc_if->sk_jumbo_disable = 1;
2109		return (0);
2110	}
2111	/* create tag for jumbo Rx ring */
2112	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2113		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
2114		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2115		    BUS_SPACE_MAXADDR,		/* highaddr */
2116		    NULL, NULL,			/* filter, filterarg */
2117		    SK_JUMBO_RX_RING_SZ,	/* maxsize */
2118		    1,				/* nsegments */
2119		    SK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2120		    0,				/* flags */
2121		    NULL, NULL,			/* lockfunc, lockarg */
2122		    &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2123	if (error != 0) {
2124		device_printf(sc_if->sk_if_dev,
2125		    "failed to allocate jumbo Rx ring DMA tag\n");
2126		goto jumbo_fail;
2127	}
2128
2129	/* create tag for jumbo Rx buffers */
2130	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2131		    1, 0,			/* algnmnt, boundary */
2132		    BUS_SPACE_MAXADDR,		/* lowaddr */
2133		    BUS_SPACE_MAXADDR,		/* highaddr */
2134		    NULL, NULL,			/* filter, filterarg */
2135		    MJUM9BYTES,			/* maxsize */
2136		    1,				/* nsegments */
2137		    MJUM9BYTES,			/* maxsegsize */
2138		    0,				/* flags */
2139		    NULL, NULL,			/* lockfunc, lockarg */
2140		    &sc_if->sk_cdata.sk_jumbo_rx_tag);
2141	if (error != 0) {
2142		device_printf(sc_if->sk_if_dev,
2143		    "failed to allocate jumbo Rx DMA tag\n");
2144		goto jumbo_fail;
2145	}
2146
2147	/* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2148	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2149	    (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring,
2150	    BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2151	if (error != 0) {
2152		device_printf(sc_if->sk_if_dev,
2153		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2154		goto jumbo_fail;
2155	}
2156
2157	ctx.sk_busaddr = 0;
2158	error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2159	    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2160	    sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
2161	    &ctx, BUS_DMA_NOWAIT);
2162	if (error != 0) {
2163		device_printf(sc_if->sk_if_dev,
2164		    "failed to load DMA'able memory for jumbo Rx ring\n");
2165		goto jumbo_fail;
2166	}
2167	sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
2168
2169	/* create DMA maps for jumbo Rx buffers */
2170	if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2171	    &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
2172		device_printf(sc_if->sk_if_dev,
2173		    "failed to create spare jumbo Rx dmamap\n");
2174		goto jumbo_fail;
2175	}
2176	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2177		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2178		jrxd->rx_m = NULL;
2179		jrxd->rx_dmamap = NULL;
2180		error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2181		    &jrxd->rx_dmamap);
2182		if (error != 0) {
2183			device_printf(sc_if->sk_if_dev,
2184			    "failed to create jumbo Rx dmamap\n");
2185			goto jumbo_fail;
2186		}
2187	}
2188
2189	return (0);
2190
2191jumbo_fail:
2192	sk_dma_jumbo_free(sc_if);
2193	device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
2194	    "resource shortage\n");
2195	sc_if->sk_jumbo_disable = 1;
2196	return (0);
2197}
2198
2199static void
2200sk_dma_free(sc_if)
2201	struct sk_if_softc	*sc_if;
2202{
2203	struct sk_txdesc	*txd;
2204	struct sk_rxdesc	*rxd;
2205	int			i;
2206
2207	/* Tx ring */
2208	if (sc_if->sk_cdata.sk_tx_ring_tag) {
2209		if (sc_if->sk_cdata.sk_tx_ring_map)
2210			bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
2211			    sc_if->sk_cdata.sk_tx_ring_map);
2212		if (sc_if->sk_cdata.sk_tx_ring_map &&
2213		    sc_if->sk_rdata.sk_tx_ring)
2214			bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
2215			    sc_if->sk_rdata.sk_tx_ring,
2216			    sc_if->sk_cdata.sk_tx_ring_map);
2217		sc_if->sk_rdata.sk_tx_ring = NULL;
2218		sc_if->sk_cdata.sk_tx_ring_map = NULL;
2219		bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
2220		sc_if->sk_cdata.sk_tx_ring_tag = NULL;
2221	}
2222	/* Rx ring */
2223	if (sc_if->sk_cdata.sk_rx_ring_tag) {
2224		if (sc_if->sk_cdata.sk_rx_ring_map)
2225			bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
2226			    sc_if->sk_cdata.sk_rx_ring_map);
2227		if (sc_if->sk_cdata.sk_rx_ring_map &&
2228		    sc_if->sk_rdata.sk_rx_ring)
2229			bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
2230			    sc_if->sk_rdata.sk_rx_ring,
2231			    sc_if->sk_cdata.sk_rx_ring_map);
2232		sc_if->sk_rdata.sk_rx_ring = NULL;
2233		sc_if->sk_cdata.sk_rx_ring_map = NULL;
2234		bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
2235		sc_if->sk_cdata.sk_rx_ring_tag = NULL;
2236	}
2237	/* Tx buffers */
2238	if (sc_if->sk_cdata.sk_tx_tag) {
2239		for (i = 0; i < SK_TX_RING_CNT; i++) {
2240			txd = &sc_if->sk_cdata.sk_txdesc[i];
2241			if (txd->tx_dmamap) {
2242				bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
2243				    txd->tx_dmamap);
2244				txd->tx_dmamap = NULL;
2245			}
2246		}
2247		bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
2248		sc_if->sk_cdata.sk_tx_tag = NULL;
2249	}
2250	/* Rx buffers */
2251	if (sc_if->sk_cdata.sk_rx_tag) {
2252		for (i = 0; i < SK_RX_RING_CNT; i++) {
2253			rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2254			if (rxd->rx_dmamap) {
2255				bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2256				    rxd->rx_dmamap);
2257				rxd->rx_dmamap = NULL;
2258			}
2259		}
2260		if (sc_if->sk_cdata.sk_rx_sparemap) {
2261			bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2262			    sc_if->sk_cdata.sk_rx_sparemap);
2263			sc_if->sk_cdata.sk_rx_sparemap = NULL;
2264		}
2265		bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
2266		sc_if->sk_cdata.sk_rx_tag = NULL;
2267	}
2268
2269	if (sc_if->sk_cdata.sk_parent_tag) {
2270		bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
2271		sc_if->sk_cdata.sk_parent_tag = NULL;
2272	}
2273}
2274
2275static void
2276sk_dma_jumbo_free(sc_if)
2277	struct sk_if_softc	*sc_if;
2278{
2279	struct sk_rxdesc	*jrxd;
2280	int			i;
2281
2282	/* jumbo Rx ring */
2283	if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
2284		if (sc_if->sk_cdata.sk_jumbo_rx_ring_map)
2285			bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2286			    sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2287		if (sc_if->sk_cdata.sk_jumbo_rx_ring_map &&
2288		    sc_if->sk_rdata.sk_jumbo_rx_ring)
2289			bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2290			    sc_if->sk_rdata.sk_jumbo_rx_ring,
2291			    sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2292		sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
2293		sc_if->sk_cdata.sk_jumbo_rx_ring_map = NULL;
2294		bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2295		sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
2296	}
2297
2298	/* jumbo Rx buffers */
2299	if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
2300		for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2301			jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2302			if (jrxd->rx_dmamap) {
2303				bus_dmamap_destroy(
2304				    sc_if->sk_cdata.sk_jumbo_rx_tag,
2305				    jrxd->rx_dmamap);
2306				jrxd->rx_dmamap = NULL;
2307			}
2308		}
2309		if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
2310			bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
2311			    sc_if->sk_cdata.sk_jumbo_rx_sparemap);
2312			sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
2313		}
2314		bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
2315		sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
2316	}
2317}
2318
2319static void
2320sk_txcksum(ifp, m, f)
2321	struct ifnet		*ifp;
2322	struct mbuf		*m;
2323	struct sk_tx_desc	*f;
2324{
2325	struct ip		*ip;
2326	u_int16_t		offset;
2327	u_int8_t 		*p;
2328
2329	offset = sizeof(struct ip) + ETHER_HDR_LEN;
2330	for(; m && m->m_len == 0; m = m->m_next)
2331		;
2332	if (m == NULL || m->m_len < ETHER_HDR_LEN) {
2333		if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
2334		/* checksum may be corrupted */
2335		goto sendit;
2336	}
2337	if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
2338		if (m->m_len != ETHER_HDR_LEN) {
2339			if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
2340			    __func__);
2341			/* checksum may be corrupted */
2342			goto sendit;
2343		}
2344		for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
2345			;
2346		if (m == NULL) {
2347			offset = sizeof(struct ip) + ETHER_HDR_LEN;
2348			/* checksum may be corrupted */
2349			goto sendit;
2350		}
2351		ip = mtod(m, struct ip *);
2352	} else {
2353		p = mtod(m, u_int8_t *);
2354		p += ETHER_HDR_LEN;
2355		ip = (struct ip *)p;
2356	}
2357	offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
2358
2359sendit:
2360	f->sk_csum_startval = 0;
2361	f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
2362	    (offset << 16));
2363}
2364
2365static int
2366sk_encap(sc_if, m_head)
2367        struct sk_if_softc	*sc_if;
2368        struct mbuf		**m_head;
2369{
2370	struct sk_txdesc	*txd;
2371	struct sk_tx_desc	*f = NULL;
2372	struct mbuf		*m;
2373	bus_dma_segment_t	txsegs[SK_MAXTXSEGS];
2374	u_int32_t		cflags, frag, si, sk_ctl;
2375	int			error, i, nseg;
2376
2377	SK_IF_LOCK_ASSERT(sc_if);
2378
2379	if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
2380		return (ENOBUFS);
2381
2382	error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2383	    txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2384	if (error == EFBIG) {
2385		m = m_defrag(*m_head, M_DONTWAIT);
2386		if (m == NULL) {
2387			m_freem(*m_head);
2388			*m_head = NULL;
2389			return (ENOMEM);
2390		}
2391		*m_head = m;
2392		error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2393		    txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2394		if (error != 0) {
2395			m_freem(*m_head);
2396			*m_head = NULL;
2397			return (error);
2398		}
2399	} else if (error != 0)
2400		return (error);
2401	if (nseg == 0) {
2402		m_freem(*m_head);
2403		*m_head = NULL;
2404		return (EIO);
2405	}
2406	if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
2407		bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2408		return (ENOBUFS);
2409	}
2410
2411	m = *m_head;
2412	if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
2413		cflags = SK_OPCODE_CSUM;
2414	else
2415		cflags = SK_OPCODE_DEFAULT;
2416	si = frag = sc_if->sk_cdata.sk_tx_prod;
2417	for (i = 0; i < nseg; i++) {
2418		f = &sc_if->sk_rdata.sk_tx_ring[frag];
2419		f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
2420		f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
2421		sk_ctl = txsegs[i].ds_len | cflags;
2422		if (i == 0) {
2423			if (cflags == SK_OPCODE_CSUM)
2424				sk_txcksum(sc_if->sk_ifp, m, f);
2425			sk_ctl |= SK_TXCTL_FIRSTFRAG;
2426		} else
2427			sk_ctl |= SK_TXCTL_OWN;
2428		f->sk_ctl = htole32(sk_ctl);
2429		sc_if->sk_cdata.sk_tx_cnt++;
2430		SK_INC(frag, SK_TX_RING_CNT);
2431	}
2432	sc_if->sk_cdata.sk_tx_prod = frag;
2433
2434	/* set EOF on the last desciptor */
2435	frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
2436	f = &sc_if->sk_rdata.sk_tx_ring[frag];
2437	f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
2438
2439	/* turn the first descriptor ownership to NIC */
2440	f = &sc_if->sk_rdata.sk_tx_ring[si];
2441	f->sk_ctl |= htole32(SK_TXCTL_OWN);
2442
2443	STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
2444	STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
2445	txd->tx_m = m;
2446
2447	/* sync descriptors */
2448	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2449	    BUS_DMASYNC_PREWRITE);
2450	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2451	    sc_if->sk_cdata.sk_tx_ring_map,
2452	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2453
2454	return (0);
2455}
2456
2457static void
2458sk_start(ifp)
2459	struct ifnet		*ifp;
2460{
2461	struct sk_if_softc *sc_if;
2462
2463	sc_if = ifp->if_softc;
2464
2465	SK_IF_LOCK(sc_if);
2466	sk_start_locked(ifp);
2467	SK_IF_UNLOCK(sc_if);
2468
2469	return;
2470}
2471
2472static void
2473sk_start_locked(ifp)
2474	struct ifnet		*ifp;
2475{
2476        struct sk_softc		*sc;
2477        struct sk_if_softc	*sc_if;
2478        struct mbuf		*m_head;
2479	int			enq;
2480
2481	sc_if = ifp->if_softc;
2482	sc = sc_if->sk_softc;
2483
2484	SK_IF_LOCK_ASSERT(sc_if);
2485
2486	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2487	    sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
2488		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2489		if (m_head == NULL)
2490			break;
2491
2492		/*
2493		 * Pack the data into the transmit ring. If we
2494		 * don't have room, set the OACTIVE flag and wait
2495		 * for the NIC to drain the ring.
2496		 */
2497		if (sk_encap(sc_if, &m_head)) {
2498			if (m_head == NULL)
2499				break;
2500			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2501			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2502			break;
2503		}
2504
2505		enq++;
2506		/*
2507		 * If there's a BPF listener, bounce a copy of this frame
2508		 * to him.
2509		 */
2510		BPF_MTAP(ifp, m_head);
2511	}
2512
2513	if (enq > 0) {
2514		/* Transmit */
2515		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2516
2517		/* Set a timeout in case the chip goes out to lunch. */
2518		sc_if->sk_watchdog_timer = 5;
2519	}
2520}
2521
2522
2523static void
2524sk_watchdog(arg)
2525	void			*arg;
2526{
2527	struct sk_if_softc	*sc_if;
2528	struct ifnet		*ifp;
2529
2530	ifp = arg;
2531	sc_if = ifp->if_softc;
2532
2533	SK_IF_LOCK_ASSERT(sc_if);
2534
2535	if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
2536		goto done;
2537
2538	/*
2539	 * Reclaim first as there is a possibility of losing Tx completion
2540	 * interrupts.
2541	 */
2542	sk_txeof(sc_if);
2543	if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2544		if_printf(sc_if->sk_ifp, "watchdog timeout\n");
2545		ifp->if_oerrors++;
2546		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2547		sk_init_locked(sc_if);
2548	}
2549
2550done:
2551	callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
2552
2553	return;
2554}
2555
2556static int
2557skc_shutdown(dev)
2558	device_t		dev;
2559{
2560	struct sk_softc		*sc;
2561
2562	sc = device_get_softc(dev);
2563	SK_LOCK(sc);
2564
2565	/* Turn off the 'driver is loaded' LED. */
2566	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2567
2568	/*
2569	 * Reset the GEnesis controller. Doing this should also
2570	 * assert the resets on the attached XMAC(s).
2571	 */
2572	sk_reset(sc);
2573	SK_UNLOCK(sc);
2574
2575	return (0);
2576}
2577
2578static int
2579skc_suspend(dev)
2580	device_t		dev;
2581{
2582	struct sk_softc		*sc;
2583	struct sk_if_softc	*sc_if0, *sc_if1;
2584	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2585
2586	sc = device_get_softc(dev);
2587
2588	SK_LOCK(sc);
2589
2590	sc_if0 = sc->sk_if[SK_PORT_A];
2591	sc_if1 = sc->sk_if[SK_PORT_B];
2592	if (sc_if0 != NULL)
2593		ifp0 = sc_if0->sk_ifp;
2594	if (sc_if1 != NULL)
2595		ifp1 = sc_if1->sk_ifp;
2596	if (ifp0 != NULL)
2597		sk_stop(sc_if0);
2598	if (ifp1 != NULL)
2599		sk_stop(sc_if1);
2600	sc->sk_suspended = 1;
2601
2602	SK_UNLOCK(sc);
2603
2604	return (0);
2605}
2606
2607static int
2608skc_resume(dev)
2609	device_t		dev;
2610{
2611	struct sk_softc		*sc;
2612	struct sk_if_softc	*sc_if0, *sc_if1;
2613	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2614
2615	sc = device_get_softc(dev);
2616
2617	SK_LOCK(sc);
2618
2619	sc_if0 = sc->sk_if[SK_PORT_A];
2620	sc_if1 = sc->sk_if[SK_PORT_B];
2621	if (sc_if0 != NULL)
2622		ifp0 = sc_if0->sk_ifp;
2623	if (sc_if1 != NULL)
2624		ifp1 = sc_if1->sk_ifp;
2625	if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
2626		sk_init_locked(sc_if0);
2627	if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
2628		sk_init_locked(sc_if1);
2629	sc->sk_suspended = 0;
2630
2631	SK_UNLOCK(sc);
2632
2633	return (0);
2634}
2635
2636/*
2637 * According to the data sheet from SK-NET GENESIS the hardware can compute
2638 * two Rx checksums at the same time(Each checksum start position is
2639 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2640 * does not work at least on my Yukon hardware. I tried every possible ways
2641 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2642 * checksum offload was disabled at the moment and only IP checksum offload
2643 * was enabled.
2644 * As nomral IP header size is 20 bytes I can't expect it would give an
2645 * increase in throughput. However it seems it doesn't hurt performance in
2646 * my testing. If there is a more detailed information for checksum secret
2647 * of the hardware in question please contact yongari@FreeBSD.org to add
2648 * TCP/UDP checksum offload support.
2649 */
2650static __inline void
2651sk_rxcksum(ifp, m, csum)
2652	struct ifnet		*ifp;
2653	struct mbuf		*m;
2654	u_int32_t		csum;
2655{
2656	struct ether_header	*eh;
2657	struct ip		*ip;
2658	int32_t			hlen, len, pktlen;
2659	u_int16_t		csum1, csum2, ipcsum;
2660
2661	pktlen = m->m_pkthdr.len;
2662	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
2663		return;
2664	eh = mtod(m, struct ether_header *);
2665	if (eh->ether_type != htons(ETHERTYPE_IP))
2666		return;
2667	ip = (struct ip *)(eh + 1);
2668	if (ip->ip_v != IPVERSION)
2669		return;
2670	hlen = ip->ip_hl << 2;
2671	pktlen -= sizeof(struct ether_header);
2672	if (hlen < sizeof(struct ip))
2673		return;
2674	if (ntohs(ip->ip_len) < hlen)
2675		return;
2676	if (ntohs(ip->ip_len) != pktlen)
2677		return;
2678
2679	csum1 = htons(csum & 0xffff);
2680	csum2 = htons((csum >> 16) & 0xffff);
2681	ipcsum = in_addword(csum1, ~csum2 & 0xffff);
2682	/* checksum fixup for IP options */
2683	len = hlen - sizeof(struct ip);
2684	if (len > 0) {
2685		/*
2686		 * If the second checksum value is correct we can compute IP
2687		 * checksum with simple math. Unfortunately the second checksum
2688		 * value is wrong so we can't verify the checksum from the
2689		 * value(It seems there is some magic here to get correct
2690		 * value). If the second checksum value is correct it also
2691		 * means we can get TCP/UDP checksum) here. However, it still
2692		 * needs pseudo header checksum calculation due to hardware
2693		 * limitations.
2694		 */
2695		return;
2696	}
2697	m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2698	if (ipcsum == 0xffff)
2699		m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2700}
2701
2702static __inline int
2703sk_rxvalid(sc, stat, len)
2704	struct sk_softc		*sc;
2705	u_int32_t		stat, len;
2706{
2707
2708	if (sc->sk_type == SK_GENESIS) {
2709		if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
2710		    XM_RXSTAT_BYTES(stat) != len)
2711			return (0);
2712	} else {
2713		if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
2714		    YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
2715		    YU_RXSTAT_JABBER)) != 0 ||
2716		    (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
2717		    YU_RXSTAT_BYTES(stat) != len)
2718			return (0);
2719	}
2720
2721	return (1);
2722}
2723
2724static void
2725sk_rxeof(sc_if)
2726	struct sk_if_softc	*sc_if;
2727{
2728	struct sk_softc		*sc;
2729	struct mbuf		*m;
2730	struct ifnet		*ifp;
2731	struct sk_rx_desc	*cur_rx;
2732	struct sk_rxdesc	*rxd;
2733	int			cons, prog;
2734	u_int32_t		csum, rxstat, sk_ctl;
2735
2736	sc = sc_if->sk_softc;
2737	ifp = sc_if->sk_ifp;
2738
2739	SK_IF_LOCK_ASSERT(sc_if);
2740
2741	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2742	    sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
2743
2744	prog = 0;
2745	for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
2746	    prog++, SK_INC(cons, SK_RX_RING_CNT)) {
2747		cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
2748		sk_ctl = le32toh(cur_rx->sk_ctl);
2749		if ((sk_ctl & SK_RXCTL_OWN) != 0)
2750			break;
2751		rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
2752		rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2753
2754		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2755		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2756		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2757		    SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2758		    SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
2759		    sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2760			ifp->if_ierrors++;
2761			sk_discard_rxbuf(sc_if, cons);
2762			continue;
2763		}
2764
2765		m = rxd->rx_m;
2766		csum = le32toh(cur_rx->sk_csum);
2767		if (sk_newbuf(sc_if, cons) != 0) {
2768			ifp->if_iqdrops++;
2769			/* reuse old buffer */
2770			sk_discard_rxbuf(sc_if, cons);
2771			continue;
2772		}
2773		m->m_pkthdr.rcvif = ifp;
2774		m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2775		ifp->if_ipackets++;
2776		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2777			sk_rxcksum(ifp, m, csum);
2778		SK_IF_UNLOCK(sc_if);
2779		(*ifp->if_input)(ifp, m);
2780		SK_IF_LOCK(sc_if);
2781	}
2782
2783	if (prog > 0) {
2784		sc_if->sk_cdata.sk_rx_cons = cons;
2785		bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2786		    sc_if->sk_cdata.sk_rx_ring_map,
2787		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2788	}
2789}
2790
2791static void
2792sk_jumbo_rxeof(sc_if)
2793	struct sk_if_softc	*sc_if;
2794{
2795	struct sk_softc		*sc;
2796	struct mbuf		*m;
2797	struct ifnet		*ifp;
2798	struct sk_rx_desc	*cur_rx;
2799	struct sk_rxdesc	*jrxd;
2800	int			cons, prog;
2801	u_int32_t		csum, rxstat, sk_ctl;
2802
2803	sc = sc_if->sk_softc;
2804	ifp = sc_if->sk_ifp;
2805
2806	SK_IF_LOCK_ASSERT(sc_if);
2807
2808	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2809	    sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
2810
2811	prog = 0;
2812	for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
2813	    prog < SK_JUMBO_RX_RING_CNT;
2814	    prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
2815		cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
2816		sk_ctl = le32toh(cur_rx->sk_ctl);
2817		if ((sk_ctl & SK_RXCTL_OWN) != 0)
2818			break;
2819		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
2820		rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2821
2822		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2823		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2824		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2825		    SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2826		    SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
2827		    sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2828			ifp->if_ierrors++;
2829			sk_discard_jumbo_rxbuf(sc_if, cons);
2830			continue;
2831		}
2832
2833		m = jrxd->rx_m;
2834		csum = le32toh(cur_rx->sk_csum);
2835		if (sk_jumbo_newbuf(sc_if, cons) != 0) {
2836			ifp->if_iqdrops++;
2837			/* reuse old buffer */
2838			sk_discard_jumbo_rxbuf(sc_if, cons);
2839			continue;
2840		}
2841		m->m_pkthdr.rcvif = ifp;
2842		m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2843		ifp->if_ipackets++;
2844		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2845			sk_rxcksum(ifp, m, csum);
2846		SK_IF_UNLOCK(sc_if);
2847		(*ifp->if_input)(ifp, m);
2848		SK_IF_LOCK(sc_if);
2849	}
2850
2851	if (prog > 0) {
2852		sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
2853		bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2854		    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2855		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2856	}
2857}
2858
2859static void
2860sk_txeof(sc_if)
2861	struct sk_if_softc	*sc_if;
2862{
2863	struct sk_softc		*sc;
2864	struct sk_txdesc	*txd;
2865	struct sk_tx_desc	*cur_tx;
2866	struct ifnet		*ifp;
2867	u_int32_t		idx, sk_ctl;
2868
2869	sc = sc_if->sk_softc;
2870	ifp = sc_if->sk_ifp;
2871
2872	txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2873	if (txd == NULL)
2874		return;
2875	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2876	    sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
2877	/*
2878	 * Go through our tx ring and free mbufs for those
2879	 * frames that have been sent.
2880	 */
2881	for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
2882		if (sc_if->sk_cdata.sk_tx_cnt <= 0)
2883			break;
2884		cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
2885		sk_ctl = le32toh(cur_tx->sk_ctl);
2886		if (sk_ctl & SK_TXCTL_OWN)
2887			break;
2888		sc_if->sk_cdata.sk_tx_cnt--;
2889		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2890		if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
2891			continue;
2892		bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2893		    BUS_DMASYNC_POSTWRITE);
2894		bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2895
2896		ifp->if_opackets++;
2897		m_freem(txd->tx_m);
2898		txd->tx_m = NULL;
2899		STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
2900		STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
2901		txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2902	}
2903	sc_if->sk_cdata.sk_tx_cons = idx;
2904	sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
2905
2906	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2907	    sc_if->sk_cdata.sk_tx_ring_map,
2908	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2909}
2910
2911static void
2912sk_tick(xsc_if)
2913	void			*xsc_if;
2914{
2915	struct sk_if_softc	*sc_if;
2916	struct mii_data		*mii;
2917	struct ifnet		*ifp;
2918	int			i;
2919
2920	sc_if = xsc_if;
2921	ifp = sc_if->sk_ifp;
2922	mii = device_get_softc(sc_if->sk_miibus);
2923
2924	if (!(ifp->if_flags & IFF_UP))
2925		return;
2926
2927	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2928		sk_intr_bcom(sc_if);
2929		return;
2930	}
2931
2932	/*
2933	 * According to SysKonnect, the correct way to verify that
2934	 * the link has come back up is to poll bit 0 of the GPIO
2935	 * register three times. This pin has the signal from the
2936	 * link_sync pin connected to it; if we read the same link
2937	 * state 3 times in a row, we know the link is up.
2938	 */
2939	for (i = 0; i < 3; i++) {
2940		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2941			break;
2942	}
2943
2944	if (i != 3) {
2945		callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2946		return;
2947	}
2948
2949	/* Turn the GP0 interrupt back on. */
2950	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2951	SK_XM_READ_2(sc_if, XM_ISR);
2952	mii_tick(mii);
2953	callout_stop(&sc_if->sk_tick_ch);
2954}
2955
2956static void
2957sk_yukon_tick(xsc_if)
2958	void			*xsc_if;
2959{
2960	struct sk_if_softc	*sc_if;
2961	struct mii_data		*mii;
2962
2963	sc_if = xsc_if;
2964	mii = device_get_softc(sc_if->sk_miibus);
2965
2966	mii_tick(mii);
2967	callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
2968}
2969
2970static void
2971sk_intr_bcom(sc_if)
2972	struct sk_if_softc	*sc_if;
2973{
2974	struct mii_data		*mii;
2975	struct ifnet		*ifp;
2976	int			status;
2977	mii = device_get_softc(sc_if->sk_miibus);
2978	ifp = sc_if->sk_ifp;
2979
2980	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2981
2982	/*
2983	 * Read the PHY interrupt register to make sure
2984	 * we clear any pending interrupts.
2985	 */
2986	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2987
2988	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2989		sk_init_xmac(sc_if);
2990		return;
2991	}
2992
2993	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2994		int			lstat;
2995		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2996		    BRGPHY_MII_AUXSTS);
2997
2998		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2999			mii_mediachg(mii);
3000			/* Turn off the link LED. */
3001			SK_IF_WRITE_1(sc_if, 0,
3002			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
3003			sc_if->sk_link = 0;
3004		} else if (status & BRGPHY_ISR_LNK_CHG) {
3005			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3006	    		    BRGPHY_MII_IMR, 0xFF00);
3007			mii_tick(mii);
3008			sc_if->sk_link = 1;
3009			/* Turn on the link LED. */
3010			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3011			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
3012			    SK_LINKLED_BLINK_OFF);
3013		} else {
3014			mii_tick(mii);
3015			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3016		}
3017	}
3018
3019	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3020
3021	return;
3022}
3023
3024static void
3025sk_intr_xmac(sc_if)
3026	struct sk_if_softc	*sc_if;
3027{
3028	struct sk_softc		*sc;
3029	u_int16_t		status;
3030
3031	sc = sc_if->sk_softc;
3032	status = SK_XM_READ_2(sc_if, XM_ISR);
3033
3034	/*
3035	 * Link has gone down. Start MII tick timeout to
3036	 * watch for link resync.
3037	 */
3038	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
3039		if (status & XM_ISR_GP0_SET) {
3040			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3041			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3042		}
3043
3044		if (status & XM_ISR_AUTONEG_DONE) {
3045			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3046		}
3047	}
3048
3049	if (status & XM_IMR_TX_UNDERRUN)
3050		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
3051
3052	if (status & XM_IMR_RX_OVERRUN)
3053		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
3054
3055	status = SK_XM_READ_2(sc_if, XM_ISR);
3056
3057	return;
3058}
3059
3060static void
3061sk_intr_yukon(sc_if)
3062	struct sk_if_softc	*sc_if;
3063{
3064	u_int8_t status;
3065
3066	status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
3067	/* RX overrun */
3068	if ((status & SK_GMAC_INT_RX_OVER) != 0) {
3069		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3070		    SK_RFCTL_RX_FIFO_OVER);
3071	}
3072	/* TX underrun */
3073	if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
3074		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3075		    SK_TFCTL_TX_FIFO_UNDER);
3076	}
3077}
3078
3079static void
3080sk_intr(xsc)
3081	void			*xsc;
3082{
3083	struct sk_softc		*sc = xsc;
3084	struct sk_if_softc	*sc_if0, *sc_if1;
3085	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
3086	u_int32_t		status;
3087
3088	SK_LOCK(sc);
3089
3090	status = CSR_READ_4(sc, SK_ISSR);
3091	if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3092		goto done_locked;
3093
3094	sc_if0 = sc->sk_if[SK_PORT_A];
3095	sc_if1 = sc->sk_if[SK_PORT_B];
3096
3097	if (sc_if0 != NULL)
3098		ifp0 = sc_if0->sk_ifp;
3099	if (sc_if1 != NULL)
3100		ifp1 = sc_if1->sk_ifp;
3101
3102	for (; (status &= sc->sk_intrmask) != 0;) {
3103		/* Handle receive interrupts first. */
3104		if (status & SK_ISR_RX1_EOF) {
3105			if (ifp0->if_mtu > SK_MAX_FRAMELEN)
3106				sk_jumbo_rxeof(sc_if0);
3107			else
3108				sk_rxeof(sc_if0);
3109			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
3110			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3111		}
3112		if (status & SK_ISR_RX2_EOF) {
3113			if (ifp1->if_mtu > SK_MAX_FRAMELEN)
3114				sk_jumbo_rxeof(sc_if1);
3115			else
3116				sk_rxeof(sc_if1);
3117			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
3118			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3119		}
3120
3121		/* Then transmit interrupts. */
3122		if (status & SK_ISR_TX1_S_EOF) {
3123			sk_txeof(sc_if0);
3124			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
3125		}
3126		if (status & SK_ISR_TX2_S_EOF) {
3127			sk_txeof(sc_if1);
3128			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
3129		}
3130
3131		/* Then MAC interrupts. */
3132		if (status & SK_ISR_MAC1 &&
3133		    ifp0->if_drv_flags & IFF_DRV_RUNNING) {
3134			if (sc->sk_type == SK_GENESIS)
3135				sk_intr_xmac(sc_if0);
3136			else
3137				sk_intr_yukon(sc_if0);
3138		}
3139
3140		if (status & SK_ISR_MAC2 &&
3141		    ifp1->if_drv_flags & IFF_DRV_RUNNING) {
3142			if (sc->sk_type == SK_GENESIS)
3143				sk_intr_xmac(sc_if1);
3144			else
3145				sk_intr_yukon(sc_if1);
3146		}
3147
3148		if (status & SK_ISR_EXTERNAL_REG) {
3149			if (ifp0 != NULL &&
3150			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
3151				sk_intr_bcom(sc_if0);
3152			if (ifp1 != NULL &&
3153			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
3154				sk_intr_bcom(sc_if1);
3155		}
3156		status = CSR_READ_4(sc, SK_ISSR);
3157	}
3158
3159	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3160
3161	if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3162		sk_start_locked(ifp0);
3163	if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3164		sk_start_locked(ifp1);
3165
3166done_locked:
3167	SK_UNLOCK(sc);
3168}
3169
3170static void
3171sk_init_xmac(sc_if)
3172	struct sk_if_softc	*sc_if;
3173{
3174	struct sk_softc		*sc;
3175	struct ifnet		*ifp;
3176	u_int16_t		eaddr[(ETHER_ADDR_LEN+1)/2];
3177	struct sk_bcom_hack	bhack[] = {
3178	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3179	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3180	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3181	{ 0, 0 } };
3182
3183	SK_IF_LOCK_ASSERT(sc_if);
3184
3185	sc = sc_if->sk_softc;
3186	ifp = sc_if->sk_ifp;
3187
3188	/* Unreset the XMAC. */
3189	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
3190	DELAY(1000);
3191
3192	/* Reset the XMAC's internal state. */
3193	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3194
3195	/* Save the XMAC II revision */
3196	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
3197
3198	/*
3199	 * Perform additional initialization for external PHYs,
3200	 * namely for the 1000baseTX cards that use the XMAC's
3201	 * GMII mode.
3202	 */
3203	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3204		int			i = 0;
3205		u_int32_t		val;
3206
3207		/* Take PHY out of reset. */
3208		val = sk_win_read_4(sc, SK_GPIO);
3209		if (sc_if->sk_port == SK_PORT_A)
3210			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
3211		else
3212			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
3213		sk_win_write_4(sc, SK_GPIO, val);
3214
3215		/* Enable GMII mode on the XMAC. */
3216		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
3217
3218		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3219		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
3220		DELAY(10000);
3221		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3222		    BRGPHY_MII_IMR, 0xFFF0);
3223
3224		/*
3225		 * Early versions of the BCM5400 apparently have
3226		 * a bug that requires them to have their reserved
3227		 * registers initialized to some magic values. I don't
3228		 * know what the numbers do, I'm just the messenger.
3229		 */
3230		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
3231		    == 0x6041) {
3232			while(bhack[i].reg) {
3233				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3234				    bhack[i].reg, bhack[i].val);
3235				i++;
3236			}
3237		}
3238	}
3239
3240	/* Set station address */
3241	bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
3242	SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
3243	SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
3244	SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
3245	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
3246
3247	if (ifp->if_flags & IFF_BROADCAST) {
3248		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3249	} else {
3250		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3251	}
3252
3253	/* We don't need the FCS appended to the packet. */
3254	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
3255
3256	/* We want short frames padded to 60 bytes. */
3257	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
3258
3259	/*
3260	 * Enable the reception of all error frames. This is is
3261	 * a necessary evil due to the design of the XMAC. The
3262	 * XMAC's receive FIFO is only 8K in size, however jumbo
3263	 * frames can be up to 9000 bytes in length. When bad
3264	 * frame filtering is enabled, the XMAC's RX FIFO operates
3265	 * in 'store and forward' mode. For this to work, the
3266	 * entire frame has to fit into the FIFO, but that means
3267	 * that jumbo frames larger than 8192 bytes will be
3268	 * truncated. Disabling all bad frame filtering causes
3269	 * the RX FIFO to operate in streaming mode, in which
3270	 * case the XMAC will start transfering frames out of the
3271	 * RX FIFO as soon as the FIFO threshold is reached.
3272	 */
3273	if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3274		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
3275		    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
3276		    XM_MODE_RX_INRANGELEN);
3277		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3278	} else
3279		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3280
3281	/*
3282	 * Bump up the transmit threshold. This helps hold off transmit
3283	 * underruns when we're blasting traffic from both ports at once.
3284	 */
3285	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
3286
3287	/* Set Rx filter */
3288	sk_rxfilter_genesis(sc_if);
3289
3290	/* Clear and enable interrupts */
3291	SK_XM_READ_2(sc_if, XM_ISR);
3292	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
3293		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
3294	else
3295		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3296
3297	/* Configure MAC arbiter */
3298	switch(sc_if->sk_xmac_rev) {
3299	case XM_XMAC_REV_B2:
3300		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
3301		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
3302		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
3303		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
3304		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
3305		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
3306		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
3307		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
3308		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3309		break;
3310	case XM_XMAC_REV_C1:
3311		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
3312		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
3313		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
3314		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
3315		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
3316		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
3317		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
3318		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
3319		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3320		break;
3321	default:
3322		break;
3323	}
3324	sk_win_write_2(sc, SK_MACARB_CTL,
3325	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
3326
3327	sc_if->sk_link = 1;
3328
3329	return;
3330}
3331
3332static void
3333sk_init_yukon(sc_if)
3334	struct sk_if_softc	*sc_if;
3335{
3336	u_int32_t		phy, v;
3337	u_int16_t		reg;
3338	struct sk_softc		*sc;
3339	struct ifnet		*ifp;
3340	int			i;
3341
3342	SK_IF_LOCK_ASSERT(sc_if);
3343
3344	sc = sc_if->sk_softc;
3345	ifp = sc_if->sk_ifp;
3346
3347	if (sc->sk_type == SK_YUKON_LITE &&
3348	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3349		/*
3350		 * Workaround code for COMA mode, set PHY reset.
3351		 * Otherwise it will not correctly take chip out of
3352		 * powerdown (coma)
3353		 */
3354		v = sk_win_read_4(sc, SK_GPIO);
3355		v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
3356		sk_win_write_4(sc, SK_GPIO, v);
3357	}
3358
3359	/* GMAC and GPHY Reset */
3360	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
3361	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
3362	DELAY(1000);
3363
3364	if (sc->sk_type == SK_YUKON_LITE &&
3365	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3366		/*
3367		 * Workaround code for COMA mode, clear PHY reset
3368		 */
3369		v = sk_win_read_4(sc, SK_GPIO);
3370		v |= SK_GPIO_DIR9;
3371		v &= ~SK_GPIO_DAT9;
3372		sk_win_write_4(sc, SK_GPIO, v);
3373	}
3374
3375	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
3376		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
3377
3378	if (sc->sk_coppertype)
3379		phy |= SK_GPHY_COPPER;
3380	else
3381		phy |= SK_GPHY_FIBER;
3382
3383	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
3384	DELAY(1000);
3385	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
3386	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
3387		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
3388
3389	/* unused read of the interrupt source register */
3390	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
3391
3392	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
3393
3394	/* MIB Counter Clear Mode set */
3395	reg |= YU_PAR_MIB_CLR;
3396	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3397
3398	/* MIB Counter Clear Mode clear */
3399	reg &= ~YU_PAR_MIB_CLR;
3400	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3401
3402	/* receive control reg */
3403	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
3404
3405	/* transmit parameter register */
3406	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
3407		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3408
3409	/* serial mode register */
3410	reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
3411	if (ifp->if_mtu > SK_MAX_FRAMELEN)
3412		reg |= YU_SMR_MFL_JUMBO;
3413	SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
3414
3415	/* Setup Yukon's address */
3416	for (i = 0; i < 3; i++) {
3417		/* Write Source Address 1 (unicast filter) */
3418		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
3419			      IF_LLADDR(sc_if->sk_ifp)[i * 2] |
3420			      IF_LLADDR(sc_if->sk_ifp)[i * 2 + 1] << 8);
3421	}
3422
3423	for (i = 0; i < 3; i++) {
3424		reg = sk_win_read_2(sc_if->sk_softc,
3425				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
3426		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
3427	}
3428
3429	/* Set Rx filter */
3430	sk_rxfilter_yukon(sc_if);
3431
3432	/* enable interrupt mask for counter overflows */
3433	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
3434	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
3435	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
3436
3437	/* Configure RX MAC FIFO Flush Mask */
3438	v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
3439	    YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
3440	    YU_RXSTAT_JABBER;
3441	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
3442
3443	/* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3444	if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
3445		v = SK_TFCTL_OPERATION_ON;
3446	else
3447		v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
3448	/* Configure RX MAC FIFO */
3449	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
3450	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
3451
3452	/* Increase flush threshould to 64 bytes */
3453	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
3454	    SK_RFCTL_FIFO_THRESHOLD + 1);
3455
3456	/* Configure TX MAC FIFO */
3457	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
3458	SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
3459}
3460
3461/*
3462 * Note that to properly initialize any part of the GEnesis chip,
3463 * you first have to take it out of reset mode.
3464 */
3465static void
3466sk_init(xsc)
3467	void			*xsc;
3468{
3469	struct sk_if_softc	*sc_if = xsc;
3470
3471	SK_IF_LOCK(sc_if);
3472	sk_init_locked(sc_if);
3473	SK_IF_UNLOCK(sc_if);
3474
3475	return;
3476}
3477
3478static void
3479sk_init_locked(sc_if)
3480	struct sk_if_softc	*sc_if;
3481{
3482	struct sk_softc		*sc;
3483	struct ifnet		*ifp;
3484	struct mii_data		*mii;
3485	u_int16_t		reg;
3486	u_int32_t		imr;
3487	int			error;
3488
3489	SK_IF_LOCK_ASSERT(sc_if);
3490
3491	ifp = sc_if->sk_ifp;
3492	sc = sc_if->sk_softc;
3493	mii = device_get_softc(sc_if->sk_miibus);
3494
3495	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3496		return;
3497
3498	/* Cancel pending I/O and free all RX/TX buffers. */
3499	sk_stop(sc_if);
3500
3501	if (sc->sk_type == SK_GENESIS) {
3502		/* Configure LINK_SYNC LED */
3503		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
3504		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3505			SK_LINKLED_LINKSYNC_ON);
3506
3507		/* Configure RX LED */
3508		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
3509			SK_RXLEDCTL_COUNTER_START);
3510
3511		/* Configure TX LED */
3512		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
3513			SK_TXLEDCTL_COUNTER_START);
3514	}
3515
3516	/*
3517	 * Configure descriptor poll timer
3518	 *
3519	 * SK-NET GENESIS data sheet says that possibility of losing Start
3520	 * transmit command due to CPU/cache related interim storage problems
3521	 * under certain conditions. The document recommends a polling
3522	 * mechanism to send a Start transmit command to initiate transfer
3523	 * of ready descriptors regulary. To cope with this issue sk(4) now
3524	 * enables descriptor poll timer to initiate descriptor processing
3525	 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3526	 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3527	 * command instead of waiting for next descriptor polling time.
3528	 * The same rule may apply to Rx side too but it seems that is not
3529	 * needed at the moment.
3530	 * Since sk(4) uses descriptor polling as a last resort there is no
3531	 * need to set smaller polling time than maximum allowable one.
3532	 */
3533	SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
3534
3535	/* Configure I2C registers */
3536
3537	/* Configure XMAC(s) */
3538	switch (sc->sk_type) {
3539	case SK_GENESIS:
3540		sk_init_xmac(sc_if);
3541		break;
3542	case SK_YUKON:
3543	case SK_YUKON_LITE:
3544	case SK_YUKON_LP:
3545		sk_init_yukon(sc_if);
3546		break;
3547	}
3548	mii_mediachg(mii);
3549
3550	if (sc->sk_type == SK_GENESIS) {
3551		/* Configure MAC FIFOs */
3552		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
3553		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
3554		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
3555
3556		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
3557		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
3558		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
3559	}
3560
3561	/* Configure transmit arbiter(s) */
3562	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
3563	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
3564
3565	/* Configure RAMbuffers */
3566	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
3567	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
3568	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
3569	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
3570	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
3571	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
3572
3573	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
3574	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
3575	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
3576	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
3577	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
3578	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
3579	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
3580
3581	/* Configure BMUs */
3582	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
3583	if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3584		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3585		    SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3586		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3587		    SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3588	} else {
3589		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3590		    SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
3591		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3592		    SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
3593	}
3594
3595	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
3596	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
3597	    SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
3598	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
3599	    SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
3600
3601	/* Init descriptors */
3602	if (ifp->if_mtu > SK_MAX_FRAMELEN)
3603		error = sk_init_jumbo_rx_ring(sc_if);
3604	else
3605		error = sk_init_rx_ring(sc_if);
3606	if (error != 0) {
3607		device_printf(sc_if->sk_if_dev,
3608		    "initialization failed: no memory for rx buffers\n");
3609		sk_stop(sc_if);
3610		return;
3611	}
3612	sk_init_tx_ring(sc_if);
3613
3614	/* Set interrupt moderation if changed via sysctl. */
3615	imr = sk_win_read_4(sc, SK_IMTIMERINIT);
3616	if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
3617		sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
3618		    sc->sk_int_ticks));
3619		if (bootverbose)
3620			device_printf(sc_if->sk_if_dev,
3621			    "interrupt moderation is %d us.\n",
3622			    sc->sk_int_mod);
3623	}
3624
3625	/* Configure interrupt handling */
3626	CSR_READ_4(sc, SK_ISSR);
3627	if (sc_if->sk_port == SK_PORT_A)
3628		sc->sk_intrmask |= SK_INTRS1;
3629	else
3630		sc->sk_intrmask |= SK_INTRS2;
3631
3632	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
3633
3634	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3635
3636	/* Start BMUs. */
3637	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
3638
3639	switch(sc->sk_type) {
3640	case SK_GENESIS:
3641		/* Enable XMACs TX and RX state machines */
3642		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
3643		SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3644		break;
3645	case SK_YUKON:
3646	case SK_YUKON_LITE:
3647	case SK_YUKON_LP:
3648		reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
3649		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
3650#if 0
3651		/* XXX disable 100Mbps and full duplex mode? */
3652		reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
3653#endif
3654		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
3655	}
3656
3657	/* Activate descriptor polling timer */
3658	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
3659	/* start transfer of Tx descriptors */
3660	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
3661
3662	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3663	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3664
3665	switch (sc->sk_type) {
3666	case SK_YUKON:
3667	case SK_YUKON_LITE:
3668	case SK_YUKON_LP:
3669		callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3670		break;
3671	}
3672
3673	callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
3674
3675	return;
3676}
3677
3678static void
3679sk_stop(sc_if)
3680	struct sk_if_softc	*sc_if;
3681{
3682	int			i;
3683	struct sk_softc		*sc;
3684	struct sk_txdesc	*txd;
3685	struct sk_rxdesc	*rxd;
3686	struct sk_rxdesc	*jrxd;
3687	struct ifnet		*ifp;
3688	u_int32_t		val;
3689
3690	SK_IF_LOCK_ASSERT(sc_if);
3691	sc = sc_if->sk_softc;
3692	ifp = sc_if->sk_ifp;
3693
3694	callout_stop(&sc_if->sk_tick_ch);
3695	callout_stop(&sc_if->sk_watchdog_ch);
3696
3697	/* stop Tx descriptor polling timer */
3698	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
3699	/* stop transfer of Tx descriptors */
3700	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
3701	for (i = 0; i < SK_TIMEOUT; i++) {
3702		val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
3703		if ((val & SK_TXBMU_TX_STOP) == 0)
3704			break;
3705		DELAY(1);
3706	}
3707	if (i == SK_TIMEOUT)
3708		device_printf(sc_if->sk_if_dev,
3709		    "can not stop transfer of Tx descriptor\n");
3710	/* stop transfer of Rx descriptors */
3711	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
3712	for (i = 0; i < SK_TIMEOUT; i++) {
3713		val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
3714		if ((val & SK_RXBMU_RX_STOP) == 0)
3715			break;
3716		DELAY(1);
3717	}
3718	if (i == SK_TIMEOUT)
3719		device_printf(sc_if->sk_if_dev,
3720		    "can not stop transfer of Rx descriptor\n");
3721
3722	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3723		/* Put PHY back into reset. */
3724		val = sk_win_read_4(sc, SK_GPIO);
3725		if (sc_if->sk_port == SK_PORT_A) {
3726			val |= SK_GPIO_DIR0;
3727			val &= ~SK_GPIO_DAT0;
3728		} else {
3729			val |= SK_GPIO_DIR2;
3730			val &= ~SK_GPIO_DAT2;
3731		}
3732		sk_win_write_4(sc, SK_GPIO, val);
3733	}
3734
3735	/* Turn off various components of this interface. */
3736	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3737	switch (sc->sk_type) {
3738	case SK_GENESIS:
3739		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
3740		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
3741		break;
3742	case SK_YUKON:
3743	case SK_YUKON_LITE:
3744	case SK_YUKON_LP:
3745		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
3746		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
3747		break;
3748	}
3749	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
3750	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3751	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
3752	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3753	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
3754	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3755	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3756	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
3757	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
3758
3759	/* Disable interrupts */
3760	if (sc_if->sk_port == SK_PORT_A)
3761		sc->sk_intrmask &= ~SK_INTRS1;
3762	else
3763		sc->sk_intrmask &= ~SK_INTRS2;
3764	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3765
3766	SK_XM_READ_2(sc_if, XM_ISR);
3767	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3768
3769	/* Free RX and TX mbufs still in the queues. */
3770	for (i = 0; i < SK_RX_RING_CNT; i++) {
3771		rxd = &sc_if->sk_cdata.sk_rxdesc[i];
3772		if (rxd->rx_m != NULL) {
3773			bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
3774			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3775			bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
3776			    rxd->rx_dmamap);
3777			m_freem(rxd->rx_m);
3778			rxd->rx_m = NULL;
3779		}
3780	}
3781	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
3782		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
3783		if (jrxd->rx_m != NULL) {
3784			bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
3785			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3786			bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
3787			    jrxd->rx_dmamap);
3788			m_freem(jrxd->rx_m);
3789			jrxd->rx_m = NULL;
3790		}
3791	}
3792	for (i = 0; i < SK_TX_RING_CNT; i++) {
3793		txd = &sc_if->sk_cdata.sk_txdesc[i];
3794		if (txd->tx_m != NULL) {
3795			bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
3796			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3797			bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
3798			    txd->tx_dmamap);
3799			m_freem(txd->tx_m);
3800			txd->tx_m = NULL;
3801		}
3802	}
3803
3804	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
3805
3806	return;
3807}
3808
3809static int
3810sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3811{
3812	int error, value;
3813
3814	if (!arg1)
3815		return (EINVAL);
3816	value = *(int *)arg1;
3817	error = sysctl_handle_int(oidp, &value, 0, req);
3818	if (error || !req->newptr)
3819		return (error);
3820	if (value < low || value > high)
3821		return (EINVAL);
3822	*(int *)arg1 = value;
3823	return (0);
3824}
3825
3826static int
3827sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3828{
3829	return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
3830}
3831