if_sk.c revision 146734
1/*	$OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $	*/
2
3/*-
4 * Copyright (c) 1997, 1998, 1999, 2000
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34/*-
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/dev/sk/if_sk.c 146734 2005-05-29 04:42:30Z nyan $");
52
53/*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * 	The XaQti XMAC II datasheet,
58 *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71/*
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
78 *
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
86 */
87
88#include <sys/param.h>
89#include <sys/systm.h>
90#include <sys/sockio.h>
91#include <sys/mbuf.h>
92#include <sys/malloc.h>
93#include <sys/kernel.h>
94#include <sys/module.h>
95#include <sys/socket.h>
96#include <sys/queue.h>
97#include <sys/sysctl.h>
98
99#include <net/if.h>
100#include <net/if_arp.h>
101#include <net/ethernet.h>
102#include <net/if_dl.h>
103#include <net/if_media.h>
104
105#include <net/bpf.h>
106
107#include <vm/vm.h>              /* for vtophys */
108#include <vm/pmap.h>            /* for vtophys */
109#include <machine/bus.h>
110#include <machine/resource.h>
111#include <sys/bus.h>
112#include <sys/rman.h>
113
114#include <dev/mii/mii.h>
115#include <dev/mii/miivar.h>
116#include <dev/mii/brgphyreg.h>
117
118#include <dev/pci/pcireg.h>
119#include <dev/pci/pcivar.h>
120
121#if 0
122#define SK_USEIOSPACE
123#endif
124
125#include <pci/if_skreg.h>
126#include <pci/xmaciireg.h>
127#include <pci/yukonreg.h>
128
129MODULE_DEPEND(sk, pci, 1, 1, 1);
130MODULE_DEPEND(sk, ether, 1, 1, 1);
131MODULE_DEPEND(sk, miibus, 1, 1, 1);
132
133/* "controller miibus0" required.  See GENERIC if you get errors here. */
134#include "miibus_if.h"
135
136#ifndef lint
137static const char rcsid[] =
138  "$FreeBSD: head/sys/dev/sk/if_sk.c 146734 2005-05-29 04:42:30Z nyan $";
139#endif
140
141static struct sk_type sk_devs[] = {
142	{
143		VENDORID_SK,
144		DEVICEID_SK_V1,
145		"SysKonnect Gigabit Ethernet (V1.0)"
146	},
147	{
148		VENDORID_SK,
149		DEVICEID_SK_V2,
150		"SysKonnect Gigabit Ethernet (V2.0)"
151	},
152	{
153		VENDORID_MARVELL,
154		DEVICEID_SK_V2,
155		"Marvell Gigabit Ethernet"
156	},
157	{
158		VENDORID_MARVELL,
159		DEVICEID_BELKIN_5005,
160		"Belkin F5D5005 Gigabit Ethernet"
161	},
162	{
163		VENDORID_3COM,
164		DEVICEID_3COM_3C940,
165		"3Com 3C940 Gigabit Ethernet"
166	},
167	{
168		VENDORID_LINKSYS,
169		DEVICEID_LINKSYS_EG1032,
170		"Linksys EG1032 Gigabit Ethernet"
171	},
172	{
173		VENDORID_DLINK,
174		DEVICEID_DLINK_DGE530T,
175		"D-Link DGE-530T Gigabit Ethernet"
176	},
177	{ 0, 0, NULL }
178};
179
180static int skc_probe(device_t);
181static int skc_attach(device_t);
182static int skc_detach(device_t);
183static void skc_shutdown(device_t);
184static int sk_detach(device_t);
185static int sk_probe(device_t);
186static int sk_attach(device_t);
187static void sk_tick(void *);
188static void sk_intr(void *);
189static void sk_intr_xmac(struct sk_if_softc *);
190static void sk_intr_bcom(struct sk_if_softc *);
191static void sk_intr_yukon(struct sk_if_softc *);
192static void sk_rxeof(struct sk_if_softc *);
193static void sk_txeof(struct sk_if_softc *);
194static int sk_encap(struct sk_if_softc *, struct mbuf *,
195					u_int32_t *);
196static void sk_start(struct ifnet *);
197static int sk_ioctl(struct ifnet *, u_long, caddr_t);
198static void sk_init(void *);
199static void sk_init_xmac(struct sk_if_softc *);
200static void sk_init_yukon(struct sk_if_softc *);
201static void sk_stop(struct sk_if_softc *);
202static void sk_watchdog(struct ifnet *);
203static int sk_ifmedia_upd(struct ifnet *);
204static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
205static void sk_reset(struct sk_softc *);
206static int sk_newbuf(struct sk_if_softc *,
207					struct sk_chain *, struct mbuf *);
208static int sk_alloc_jumbo_mem(struct sk_if_softc *);
209static void sk_free_jumbo_mem(struct sk_if_softc *);
210static void *sk_jalloc(struct sk_if_softc *);
211static void sk_jfree(void *, void *);
212static int sk_init_rx_ring(struct sk_if_softc *);
213static void sk_init_tx_ring(struct sk_if_softc *);
214static u_int32_t sk_win_read_4(struct sk_softc *, int);
215static u_int16_t sk_win_read_2(struct sk_softc *, int);
216static u_int8_t sk_win_read_1(struct sk_softc *, int);
217static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
218static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
219static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
220static u_int8_t sk_vpd_readbyte(struct sk_softc *, int);
221static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int);
222static void sk_vpd_read(struct sk_softc *);
223
224static int sk_miibus_readreg(device_t, int, int);
225static int sk_miibus_writereg(device_t, int, int, int);
226static void sk_miibus_statchg(device_t);
227
228static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
229static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
230						int);
231static void sk_xmac_miibus_statchg(struct sk_if_softc *);
232
233static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
234static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
235						int);
236static void sk_marv_miibus_statchg(struct sk_if_softc *);
237
238static uint32_t sk_xmchash(const uint8_t *);
239static uint32_t sk_gmchash(const uint8_t *);
240static void sk_setfilt(struct sk_if_softc *, caddr_t, int);
241static void sk_setmulti(struct sk_if_softc *);
242static void sk_setpromisc(struct sk_if_softc *);
243
244static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
245static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
246
247#ifdef SK_USEIOSPACE
248#define SK_RES		SYS_RES_IOPORT
249#define SK_RID		SK_PCI_LOIO
250#else
251#define SK_RES		SYS_RES_MEMORY
252#define SK_RID		SK_PCI_LOMEM
253#endif
254
255/*
256 * Note that we have newbus methods for both the GEnesis controller
257 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
258 * the miibus code is a child of the XMACs. We need to do it this way
259 * so that the miibus drivers can access the PHY registers on the
260 * right PHY. It's not quite what I had in mind, but it's the only
261 * design that achieves the desired effect.
262 */
263static device_method_t skc_methods[] = {
264	/* Device interface */
265	DEVMETHOD(device_probe,		skc_probe),
266	DEVMETHOD(device_attach,	skc_attach),
267	DEVMETHOD(device_detach,	skc_detach),
268	DEVMETHOD(device_shutdown,	skc_shutdown),
269
270	/* bus interface */
271	DEVMETHOD(bus_print_child,	bus_generic_print_child),
272	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
273
274	{ 0, 0 }
275};
276
277static driver_t skc_driver = {
278	"skc",
279	skc_methods,
280	sizeof(struct sk_softc)
281};
282
283static devclass_t skc_devclass;
284
285static device_method_t sk_methods[] = {
286	/* Device interface */
287	DEVMETHOD(device_probe,		sk_probe),
288	DEVMETHOD(device_attach,	sk_attach),
289	DEVMETHOD(device_detach,	sk_detach),
290	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
291
292	/* bus interface */
293	DEVMETHOD(bus_print_child,	bus_generic_print_child),
294	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
295
296	/* MII interface */
297	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
298	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
299	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
300
301	{ 0, 0 }
302};
303
304static driver_t sk_driver = {
305	"sk",
306	sk_methods,
307	sizeof(struct sk_if_softc)
308};
309
310static devclass_t sk_devclass;
311
312DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0);
313DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
314DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
315
316#define SK_SETBIT(sc, reg, x)		\
317	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
318
319#define SK_CLRBIT(sc, reg, x)		\
320	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
321
322#define SK_WIN_SETBIT_4(sc, reg, x)	\
323	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
324
325#define SK_WIN_CLRBIT_4(sc, reg, x)	\
326	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
327
328#define SK_WIN_SETBIT_2(sc, reg, x)	\
329	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
330
331#define SK_WIN_CLRBIT_2(sc, reg, x)	\
332	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
333
334static u_int32_t
335sk_win_read_4(sc, reg)
336	struct sk_softc		*sc;
337	int			reg;
338{
339#ifdef SK_USEIOSPACE
340	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
341	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
342#else
343	return(CSR_READ_4(sc, reg));
344#endif
345}
346
347static u_int16_t
348sk_win_read_2(sc, reg)
349	struct sk_softc		*sc;
350	int			reg;
351{
352#ifdef SK_USEIOSPACE
353	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
354	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
355#else
356	return(CSR_READ_2(sc, reg));
357#endif
358}
359
360static u_int8_t
361sk_win_read_1(sc, reg)
362	struct sk_softc		*sc;
363	int			reg;
364{
365#ifdef SK_USEIOSPACE
366	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
367	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
368#else
369	return(CSR_READ_1(sc, reg));
370#endif
371}
372
373static void
374sk_win_write_4(sc, reg, val)
375	struct sk_softc		*sc;
376	int			reg;
377	u_int32_t		val;
378{
379#ifdef SK_USEIOSPACE
380	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
381	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
382#else
383	CSR_WRITE_4(sc, reg, val);
384#endif
385	return;
386}
387
388static void
389sk_win_write_2(sc, reg, val)
390	struct sk_softc		*sc;
391	int			reg;
392	u_int32_t		val;
393{
394#ifdef SK_USEIOSPACE
395	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
396	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
397#else
398	CSR_WRITE_2(sc, reg, val);
399#endif
400	return;
401}
402
403static void
404sk_win_write_1(sc, reg, val)
405	struct sk_softc		*sc;
406	int			reg;
407	u_int32_t		val;
408{
409#ifdef SK_USEIOSPACE
410	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
411	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
412#else
413	CSR_WRITE_1(sc, reg, val);
414#endif
415	return;
416}
417
418/*
419 * The VPD EEPROM contains Vital Product Data, as suggested in
420 * the PCI 2.1 specification. The VPD data is separared into areas
421 * denoted by resource IDs. The SysKonnect VPD contains an ID string
422 * resource (the name of the adapter), a read-only area resource
423 * containing various key/data fields and a read/write area which
424 * can be used to store asset management information or log messages.
425 * We read the ID string and read-only into buffers attached to
426 * the controller softc structure for later use. At the moment,
427 * we only use the ID string during skc_attach().
428 */
429static u_int8_t
430sk_vpd_readbyte(sc, addr)
431	struct sk_softc		*sc;
432	int			addr;
433{
434	int			i;
435
436	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
437	for (i = 0; i < SK_TIMEOUT; i++) {
438		DELAY(1);
439		if (sk_win_read_2(sc,
440		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
441			break;
442	}
443
444	if (i == SK_TIMEOUT)
445		return(0);
446
447	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
448}
449
450static void
451sk_vpd_read_res(sc, res, addr)
452	struct sk_softc		*sc;
453	struct vpd_res		*res;
454	int			addr;
455{
456	int			i;
457	u_int8_t		*ptr;
458
459	ptr = (u_int8_t *)res;
460	for (i = 0; i < sizeof(struct vpd_res); i++)
461		ptr[i] = sk_vpd_readbyte(sc, i + addr);
462
463	return;
464}
465
466static void
467sk_vpd_read(sc)
468	struct sk_softc		*sc;
469{
470	int			pos = 0, i;
471	struct vpd_res		res;
472
473	if (sc->sk_vpd_prodname != NULL)
474		free(sc->sk_vpd_prodname, M_DEVBUF);
475	if (sc->sk_vpd_readonly != NULL)
476		free(sc->sk_vpd_readonly, M_DEVBUF);
477	sc->sk_vpd_prodname = NULL;
478	sc->sk_vpd_readonly = NULL;
479	sc->sk_vpd_readonly_len = 0;
480
481	sk_vpd_read_res(sc, &res, pos);
482
483	/*
484	 * Bail out quietly if the eeprom appears to be missing or empty.
485	 */
486	if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff)
487		return;
488
489	if (res.vr_id != VPD_RES_ID) {
490		printf("skc%d: bad VPD resource id: expected %x got %x\n",
491		    sc->sk_unit, VPD_RES_ID, res.vr_id);
492		return;
493	}
494
495	pos += sizeof(res);
496	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
497	if (sc->sk_vpd_prodname != NULL) {
498		for (i = 0; i < res.vr_len; i++)
499			sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
500		sc->sk_vpd_prodname[i] = '\0';
501	}
502	pos += res.vr_len;
503
504	sk_vpd_read_res(sc, &res, pos);
505
506	if (res.vr_id != VPD_RES_READ) {
507		printf("skc%d: bad VPD resource id: expected %x got %x\n",
508		    sc->sk_unit, VPD_RES_READ, res.vr_id);
509		return;
510	}
511
512	pos += sizeof(res);
513	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
514	for (i = 0; i < res.vr_len; i++)
515		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
516	sc->sk_vpd_readonly_len = res.vr_len;
517
518	return;
519}
520
521static int
522sk_miibus_readreg(dev, phy, reg)
523	device_t		dev;
524	int			phy, reg;
525{
526	struct sk_if_softc	*sc_if;
527
528	sc_if = device_get_softc(dev);
529
530	switch(sc_if->sk_softc->sk_type) {
531	case SK_GENESIS:
532		return(sk_xmac_miibus_readreg(sc_if, phy, reg));
533	case SK_YUKON:
534	case SK_YUKON_LITE:
535	case SK_YUKON_LP:
536		return(sk_marv_miibus_readreg(sc_if, phy, reg));
537	}
538
539	return(0);
540}
541
542static int
543sk_miibus_writereg(dev, phy, reg, val)
544	device_t		dev;
545	int			phy, reg, val;
546{
547	struct sk_if_softc	*sc_if;
548
549	sc_if = device_get_softc(dev);
550
551	switch(sc_if->sk_softc->sk_type) {
552	case SK_GENESIS:
553		return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
554	case SK_YUKON:
555	case SK_YUKON_LITE:
556	case SK_YUKON_LP:
557		return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
558	}
559
560	return(0);
561}
562
563static void
564sk_miibus_statchg(dev)
565	device_t		dev;
566{
567	struct sk_if_softc	*sc_if;
568
569	sc_if = device_get_softc(dev);
570
571	switch(sc_if->sk_softc->sk_type) {
572	case SK_GENESIS:
573		sk_xmac_miibus_statchg(sc_if);
574		break;
575	case SK_YUKON:
576	case SK_YUKON_LITE:
577	case SK_YUKON_LP:
578		sk_marv_miibus_statchg(sc_if);
579		break;
580	}
581
582	return;
583}
584
585static int
586sk_xmac_miibus_readreg(sc_if, phy, reg)
587	struct sk_if_softc	*sc_if;
588	int			phy, reg;
589{
590	int			i;
591
592	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
593		return(0);
594
595	SK_IF_LOCK(sc_if);
596	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
597	SK_XM_READ_2(sc_if, XM_PHY_DATA);
598	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
599		for (i = 0; i < SK_TIMEOUT; i++) {
600			DELAY(1);
601			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
602			    XM_MMUCMD_PHYDATARDY)
603				break;
604		}
605
606		if (i == SK_TIMEOUT) {
607			printf("sk%d: phy failed to come ready\n",
608			    sc_if->sk_unit);
609			SK_IF_UNLOCK(sc_if);
610			return(0);
611		}
612	}
613	DELAY(1);
614	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
615	SK_IF_UNLOCK(sc_if);
616	return(i);
617}
618
619static int
620sk_xmac_miibus_writereg(sc_if, phy, reg, val)
621	struct sk_if_softc	*sc_if;
622	int			phy, reg, val;
623{
624	int			i;
625
626	SK_IF_LOCK(sc_if);
627	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
628	for (i = 0; i < SK_TIMEOUT; i++) {
629		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
630			break;
631	}
632
633	if (i == SK_TIMEOUT) {
634		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
635		SK_IF_UNLOCK(sc_if);
636		return(ETIMEDOUT);
637	}
638
639	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
640	for (i = 0; i < SK_TIMEOUT; i++) {
641		DELAY(1);
642		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
643			break;
644	}
645	SK_IF_UNLOCK(sc_if);
646	if (i == SK_TIMEOUT)
647		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
648
649	return(0);
650}
651
652static void
653sk_xmac_miibus_statchg(sc_if)
654	struct sk_if_softc	*sc_if;
655{
656	struct mii_data		*mii;
657
658	mii = device_get_softc(sc_if->sk_miibus);
659
660	SK_IF_LOCK(sc_if);
661	/*
662	 * If this is a GMII PHY, manually set the XMAC's
663	 * duplex mode accordingly.
664	 */
665	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
666		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
667			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
668		} else {
669			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
670		}
671	}
672	SK_IF_UNLOCK(sc_if);
673
674	return;
675}
676
677static int
678sk_marv_miibus_readreg(sc_if, phy, reg)
679	struct sk_if_softc	*sc_if;
680	int			phy, reg;
681{
682	u_int16_t		val;
683	int			i;
684
685	if (phy != 0 ||
686	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
687	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
688		return(0);
689	}
690
691	SK_IF_LOCK(sc_if);
692        SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
693		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
694
695	for (i = 0; i < SK_TIMEOUT; i++) {
696		DELAY(1);
697		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
698		if (val & YU_SMICR_READ_VALID)
699			break;
700	}
701
702	if (i == SK_TIMEOUT) {
703		printf("sk%d: phy failed to come ready\n",
704		    sc_if->sk_unit);
705		SK_IF_UNLOCK(sc_if);
706		return(0);
707	}
708
709	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
710	SK_IF_UNLOCK(sc_if);
711
712	return(val);
713}
714
715static int
716sk_marv_miibus_writereg(sc_if, phy, reg, val)
717	struct sk_if_softc	*sc_if;
718	int			phy, reg, val;
719{
720	int			i;
721
722	SK_IF_LOCK(sc_if);
723	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
724	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
725		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
726
727	for (i = 0; i < SK_TIMEOUT; i++) {
728		DELAY(1);
729		if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
730			break;
731	}
732	SK_IF_UNLOCK(sc_if);
733
734	return(0);
735}
736
737static void
738sk_marv_miibus_statchg(sc_if)
739	struct sk_if_softc	*sc_if;
740{
741	return;
742}
743
744#define HASH_BITS		6
745
746static u_int32_t
747sk_xmchash(addr)
748	const uint8_t *addr;
749{
750	uint32_t crc;
751
752	/* Compute CRC for the address value. */
753	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
754
755	return (~crc & ((1 << HASH_BITS) - 1));
756}
757
758/* gmchash is just a big endian crc */
759static u_int32_t
760sk_gmchash(addr)
761	const uint8_t *addr;
762{
763	uint32_t crc;
764
765	/* Compute CRC for the address value. */
766	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
767
768	return (crc & ((1 << HASH_BITS) - 1));
769}
770
771static void
772sk_setfilt(sc_if, addr, slot)
773	struct sk_if_softc	*sc_if;
774	caddr_t			addr;
775	int			slot;
776{
777	int			base;
778
779	base = XM_RXFILT_ENTRY(slot);
780
781	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
782	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
783	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
784
785	return;
786}
787
788static void
789sk_setmulti(sc_if)
790	struct sk_if_softc	*sc_if;
791{
792	struct sk_softc		*sc = sc_if->sk_softc;
793	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
794	u_int32_t		hashes[2] = { 0, 0 };
795	int			h = 0, i;
796	struct ifmultiaddr	*ifma;
797	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
798
799
800	/* First, zot all the existing filters. */
801	switch(sc->sk_type) {
802	case SK_GENESIS:
803		for (i = 1; i < XM_RXFILT_MAX; i++)
804			sk_setfilt(sc_if, (caddr_t)&dummy, i);
805
806		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
807		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
808		break;
809	case SK_YUKON:
810	case SK_YUKON_LITE:
811	case SK_YUKON_LP:
812		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
813		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
814		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
815		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
816		break;
817	}
818
819	/* Now program new ones. */
820	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
821		hashes[0] = 0xFFFFFFFF;
822		hashes[1] = 0xFFFFFFFF;
823	} else {
824		i = 1;
825		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
826			if (ifma->ifma_addr->sa_family != AF_LINK)
827				continue;
828			/*
829			 * Program the first XM_RXFILT_MAX multicast groups
830			 * into the perfect filter. For all others,
831			 * use the hash table.
832			 */
833			if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
834				sk_setfilt(sc_if,
835			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
836				i++;
837				continue;
838			}
839
840			switch(sc->sk_type) {
841			case SK_GENESIS:
842				h = sk_xmchash(
843					LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
844				break;
845			case SK_YUKON:
846			case SK_YUKON_LITE:
847			case SK_YUKON_LP:
848				h = sk_gmchash(
849					LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
850				break;
851			}
852			if (h < 32)
853				hashes[0] |= (1 << h);
854			else
855				hashes[1] |= (1 << (h - 32));
856		}
857	}
858
859	switch(sc->sk_type) {
860	case SK_GENESIS:
861		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
862			       XM_MODE_RX_USE_PERFECT);
863		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
864		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
865		break;
866	case SK_YUKON:
867	case SK_YUKON_LITE:
868	case SK_YUKON_LP:
869		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
870		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
871		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
872		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
873		break;
874	}
875
876	return;
877}
878
879static void
880sk_setpromisc(sc_if)
881	struct sk_if_softc	*sc_if;
882{
883	struct sk_softc		*sc = sc_if->sk_softc;
884	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
885
886	switch(sc->sk_type) {
887	case SK_GENESIS:
888		if (ifp->if_flags & IFF_PROMISC) {
889			SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
890		} else {
891			SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
892		}
893		break;
894	case SK_YUKON:
895	case SK_YUKON_LITE:
896	case SK_YUKON_LP:
897		if (ifp->if_flags & IFF_PROMISC) {
898			SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
899			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
900		} else {
901			SK_YU_SETBIT_2(sc_if, YUKON_RCR,
902			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
903		}
904		break;
905	}
906
907	return;
908}
909
910static int
911sk_init_rx_ring(sc_if)
912	struct sk_if_softc	*sc_if;
913{
914	struct sk_chain_data	*cd = &sc_if->sk_cdata;
915	struct sk_ring_data	*rd = sc_if->sk_rdata;
916	int			i;
917
918	bzero((char *)rd->sk_rx_ring,
919	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
920
921	for (i = 0; i < SK_RX_RING_CNT; i++) {
922		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
923		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
924			return(ENOBUFS);
925		if (i == (SK_RX_RING_CNT - 1)) {
926			cd->sk_rx_chain[i].sk_next =
927			    &cd->sk_rx_chain[0];
928			rd->sk_rx_ring[i].sk_next =
929			    vtophys(&rd->sk_rx_ring[0]);
930		} else {
931			cd->sk_rx_chain[i].sk_next =
932			    &cd->sk_rx_chain[i + 1];
933			rd->sk_rx_ring[i].sk_next =
934			    vtophys(&rd->sk_rx_ring[i + 1]);
935		}
936	}
937
938	sc_if->sk_cdata.sk_rx_prod = 0;
939	sc_if->sk_cdata.sk_rx_cons = 0;
940
941	return(0);
942}
943
944static void
945sk_init_tx_ring(sc_if)
946	struct sk_if_softc	*sc_if;
947{
948	struct sk_chain_data	*cd = &sc_if->sk_cdata;
949	struct sk_ring_data	*rd = sc_if->sk_rdata;
950	int			i;
951
952	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
953	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
954
955	for (i = 0; i < SK_TX_RING_CNT; i++) {
956		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
957		if (i == (SK_TX_RING_CNT - 1)) {
958			cd->sk_tx_chain[i].sk_next =
959			    &cd->sk_tx_chain[0];
960			rd->sk_tx_ring[i].sk_next =
961			    vtophys(&rd->sk_tx_ring[0]);
962		} else {
963			cd->sk_tx_chain[i].sk_next =
964			    &cd->sk_tx_chain[i + 1];
965			rd->sk_tx_ring[i].sk_next =
966			    vtophys(&rd->sk_tx_ring[i + 1]);
967		}
968	}
969
970	sc_if->sk_cdata.sk_tx_prod = 0;
971	sc_if->sk_cdata.sk_tx_cons = 0;
972	sc_if->sk_cdata.sk_tx_cnt = 0;
973
974	return;
975}
976
977static int
978sk_newbuf(sc_if, c, m)
979	struct sk_if_softc	*sc_if;
980	struct sk_chain		*c;
981	struct mbuf		*m;
982{
983	struct mbuf		*m_new = NULL;
984	struct sk_rx_desc	*r;
985
986	if (m == NULL) {
987		caddr_t			*buf = NULL;
988
989		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
990		if (m_new == NULL)
991			return(ENOBUFS);
992
993		/* Allocate the jumbo buffer */
994		buf = sk_jalloc(sc_if);
995		if (buf == NULL) {
996			m_freem(m_new);
997#ifdef SK_VERBOSE
998			printf("sk%d: jumbo allocation failed "
999			    "-- packet dropped!\n", sc_if->sk_unit);
1000#endif
1001			return(ENOBUFS);
1002		}
1003
1004		/* Attach the buffer to the mbuf */
1005		MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
1006		    (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
1007		m_new->m_data = (void *)buf;
1008		m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
1009	} else {
1010		/*
1011	 	 * We're re-using a previously allocated mbuf;
1012		 * be sure to re-init pointers and lengths to
1013		 * default values.
1014		 */
1015		m_new = m;
1016		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
1017		m_new->m_data = m_new->m_ext.ext_buf;
1018	}
1019
1020	/*
1021	 * Adjust alignment so packet payload begins on a
1022	 * longword boundary. Mandatory for Alpha, useful on
1023	 * x86 too.
1024	 */
1025	m_adj(m_new, ETHER_ALIGN);
1026
1027	r = c->sk_desc;
1028	c->sk_mbuf = m_new;
1029	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
1030	r->sk_ctl = m_new->m_len | SK_RXSTAT;
1031
1032	return(0);
1033}
1034
1035/*
1036 * Allocate jumbo buffer storage. The SysKonnect adapters support
1037 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1038 * use them in their drivers. In order for us to use them, we need
1039 * large 9K receive buffers, however standard mbuf clusters are only
1040 * 2048 bytes in size. Consequently, we need to allocate and manage
1041 * our own jumbo buffer pool. Fortunately, this does not require an
1042 * excessive amount of additional code.
1043 */
1044static int
1045sk_alloc_jumbo_mem(sc_if)
1046	struct sk_if_softc	*sc_if;
1047{
1048	caddr_t			ptr;
1049	register int		i;
1050	struct sk_jpool_entry   *entry;
1051
1052	/* Grab a big chunk o' storage. */
1053	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
1054	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1055
1056	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
1057		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
1058		return(ENOBUFS);
1059	}
1060
1061	mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
1062
1063	SLIST_INIT(&sc_if->sk_jfree_listhead);
1064	SLIST_INIT(&sc_if->sk_jinuse_listhead);
1065
1066	/*
1067	 * Now divide it up into 9K pieces and save the addresses
1068	 * in an array.
1069	 */
1070	ptr = sc_if->sk_cdata.sk_jumbo_buf;
1071	for (i = 0; i < SK_JSLOTS; i++) {
1072		sc_if->sk_cdata.sk_jslots[i] = ptr;
1073		ptr += SK_JLEN;
1074		entry = malloc(sizeof(struct sk_jpool_entry),
1075		    M_DEVBUF, M_NOWAIT);
1076		if (entry == NULL) {
1077			sk_free_jumbo_mem(sc_if);
1078			sc_if->sk_cdata.sk_jumbo_buf = NULL;
1079			printf("sk%d: no memory for jumbo "
1080			    "buffer queue!\n", sc_if->sk_unit);
1081			return(ENOBUFS);
1082		}
1083		entry->slot = i;
1084		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1085		    entry, jpool_entries);
1086	}
1087
1088	return(0);
1089}
1090
1091static void
1092sk_free_jumbo_mem(sc_if)
1093	struct sk_if_softc	*sc_if;
1094{
1095	struct sk_jpool_entry	*entry;
1096
1097	SK_JLIST_LOCK(sc_if);
1098
1099	/* We cannot release external mbuf storage while in use. */
1100	if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) {
1101		printf("sk%d: will leak jumbo buffer memory!\n", sc_if->sk_unit);
1102		SK_JLIST_UNLOCK(sc_if);
1103		return;
1104	}
1105
1106	while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
1107		entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1108		SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1109		free(entry, M_DEVBUF);
1110	}
1111
1112	SK_JLIST_UNLOCK(sc_if);
1113
1114	mtx_destroy(&sc_if->sk_jlist_mtx);
1115
1116	contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1117
1118	return;
1119}
1120
1121/*
1122 * Allocate a jumbo buffer.
1123 */
1124static void *
1125sk_jalloc(sc_if)
1126	struct sk_if_softc	*sc_if;
1127{
1128	struct sk_jpool_entry   *entry;
1129
1130	SK_JLIST_LOCK(sc_if);
1131
1132	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1133
1134	if (entry == NULL) {
1135#ifdef SK_VERBOSE
1136		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1137#endif
1138		SK_JLIST_UNLOCK(sc_if);
1139		return(NULL);
1140	}
1141
1142	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1143	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1144
1145	SK_JLIST_UNLOCK(sc_if);
1146
1147	return(sc_if->sk_cdata.sk_jslots[entry->slot]);
1148}
1149
1150/*
1151 * Release a jumbo buffer.
1152 */
1153static void
1154sk_jfree(buf, args)
1155	void			*buf;
1156	void			*args;
1157{
1158	struct sk_if_softc	*sc_if;
1159	int		        i;
1160	struct sk_jpool_entry   *entry;
1161
1162	/* Extract the softc struct pointer. */
1163	sc_if = (struct sk_if_softc *)args;
1164	if (sc_if == NULL)
1165		panic("sk_jfree: didn't get softc pointer!");
1166
1167	SK_JLIST_LOCK(sc_if);
1168
1169	/* calculate the slot this buffer belongs to */
1170	i = ((vm_offset_t)buf
1171	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1172
1173	if ((i < 0) || (i >= SK_JSLOTS))
1174		panic("sk_jfree: asked to free buffer that we don't manage!");
1175
1176	entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1177	if (entry == NULL)
1178		panic("sk_jfree: buffer not in use!");
1179	entry->slot = i;
1180	SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
1181	SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
1182	if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
1183		wakeup(sc_if);
1184
1185	SK_JLIST_UNLOCK(sc_if);
1186	return;
1187}
1188
1189/*
1190 * Set media options.
1191 */
1192static int
1193sk_ifmedia_upd(ifp)
1194	struct ifnet		*ifp;
1195{
1196	struct sk_if_softc	*sc_if = ifp->if_softc;
1197	struct mii_data		*mii;
1198
1199	mii = device_get_softc(sc_if->sk_miibus);
1200	sk_init(sc_if);
1201	mii_mediachg(mii);
1202
1203	return(0);
1204}
1205
1206/*
1207 * Report current media status.
1208 */
1209static void
1210sk_ifmedia_sts(ifp, ifmr)
1211	struct ifnet		*ifp;
1212	struct ifmediareq	*ifmr;
1213{
1214	struct sk_if_softc	*sc_if;
1215	struct mii_data		*mii;
1216
1217	sc_if = ifp->if_softc;
1218	mii = device_get_softc(sc_if->sk_miibus);
1219
1220	mii_pollstat(mii);
1221	ifmr->ifm_active = mii->mii_media_active;
1222	ifmr->ifm_status = mii->mii_media_status;
1223
1224	return;
1225}
1226
1227static int
1228sk_ioctl(ifp, command, data)
1229	struct ifnet		*ifp;
1230	u_long			command;
1231	caddr_t			data;
1232{
1233	struct sk_if_softc	*sc_if = ifp->if_softc;
1234	struct ifreq		*ifr = (struct ifreq *) data;
1235	int			error = 0;
1236	struct mii_data		*mii;
1237
1238	switch(command) {
1239	case SIOCSIFMTU:
1240		if (ifr->ifr_mtu > SK_JUMBO_MTU)
1241			error = EINVAL;
1242		else {
1243			ifp->if_mtu = ifr->ifr_mtu;
1244			ifp->if_flags &= ~IFF_RUNNING;
1245			sk_init(sc_if);
1246		}
1247		break;
1248	case SIOCSIFFLAGS:
1249		SK_IF_LOCK(sc_if);
1250		if (ifp->if_flags & IFF_UP) {
1251			if (ifp->if_flags & IFF_RUNNING) {
1252				if ((ifp->if_flags ^ sc_if->sk_if_flags)
1253				    & IFF_PROMISC) {
1254					sk_setpromisc(sc_if);
1255					sk_setmulti(sc_if);
1256				}
1257			} else
1258				sk_init(sc_if);
1259		} else {
1260			if (ifp->if_flags & IFF_RUNNING)
1261				sk_stop(sc_if);
1262		}
1263		sc_if->sk_if_flags = ifp->if_flags;
1264		SK_IF_UNLOCK(sc_if);
1265		error = 0;
1266		break;
1267	case SIOCADDMULTI:
1268	case SIOCDELMULTI:
1269		if (ifp->if_flags & IFF_RUNNING) {
1270			SK_IF_LOCK(sc_if);
1271			sk_setmulti(sc_if);
1272			SK_IF_UNLOCK(sc_if);
1273			error = 0;
1274		}
1275		break;
1276	case SIOCGIFMEDIA:
1277	case SIOCSIFMEDIA:
1278		mii = device_get_softc(sc_if->sk_miibus);
1279		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1280		break;
1281	default:
1282		error = ether_ioctl(ifp, command, data);
1283		break;
1284	}
1285
1286	return(error);
1287}
1288
1289/*
1290 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1291 * IDs against our list and return a device name if we find a match.
1292 */
1293static int
1294skc_probe(dev)
1295	device_t		dev;
1296{
1297	struct sk_softc		*sc;
1298	struct sk_type		*t = sk_devs;
1299
1300	sc = device_get_softc(dev);
1301
1302	while(t->sk_name != NULL) {
1303		if ((pci_get_vendor(dev) == t->sk_vid) &&
1304		    (pci_get_device(dev) == t->sk_did)) {
1305			device_set_desc(dev, t->sk_name);
1306			return (BUS_PROBE_DEFAULT);
1307		}
1308		t++;
1309	}
1310
1311	return(ENXIO);
1312}
1313
1314/*
1315 * Force the GEnesis into reset, then bring it out of reset.
1316 */
1317static void
1318sk_reset(sc)
1319	struct sk_softc		*sc;
1320{
1321	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1322	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1323	if (SK_YUKON_FAMILY(sc->sk_type))
1324		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1325
1326	DELAY(1000);
1327	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1328	DELAY(2);
1329	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1330	if (SK_YUKON_FAMILY(sc->sk_type))
1331		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1332
1333	if (sc->sk_type == SK_GENESIS) {
1334		/* Configure packet arbiter */
1335		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1336		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1337		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1338		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1339		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1340	}
1341
1342	/* Enable RAM interface */
1343	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1344
1345	/*
1346         * Configure interrupt moderation. The moderation timer
1347	 * defers interrupts specified in the interrupt moderation
1348	 * timer mask based on the timeout specified in the interrupt
1349	 * moderation timer init register. Each bit in the timer
1350	 * register represents 18.825ns, so to specify a timeout in
1351	 * microseconds, we have to multiply by 54.
1352	 */
1353	printf("skc%d: interrupt moderation is %d us\n",
1354	    sc->sk_unit, sc->sk_int_mod);
1355	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
1356	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1357	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1358	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1359
1360	return;
1361}
1362
1363static int
1364sk_probe(dev)
1365	device_t		dev;
1366{
1367	struct sk_softc		*sc;
1368
1369	sc = device_get_softc(device_get_parent(dev));
1370
1371	/*
1372	 * Not much to do here. We always know there will be
1373	 * at least one XMAC present, and if there are two,
1374	 * skc_attach() will create a second device instance
1375	 * for us.
1376	 */
1377	switch (sc->sk_type) {
1378	case SK_GENESIS:
1379		device_set_desc(dev, "XaQti Corp. XMAC II");
1380		break;
1381	case SK_YUKON:
1382	case SK_YUKON_LITE:
1383	case SK_YUKON_LP:
1384		device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1385		break;
1386	}
1387
1388	return (BUS_PROBE_DEFAULT);
1389}
1390
1391/*
1392 * Each XMAC chip is attached as a separate logical IP interface.
1393 * Single port cards will have only one logical interface of course.
1394 */
1395static int
1396sk_attach(dev)
1397	device_t		dev;
1398{
1399	struct sk_softc		*sc;
1400	struct sk_if_softc	*sc_if;
1401	struct ifnet		*ifp;
1402	int			i, port, error;
1403
1404	if (dev == NULL)
1405		return(EINVAL);
1406
1407	error = 0;
1408	sc_if = device_get_softc(dev);
1409	sc = device_get_softc(device_get_parent(dev));
1410	port = *(int *)device_get_ivars(dev);
1411
1412	sc_if->sk_dev = dev;
1413	sc_if->sk_unit = device_get_unit(dev);
1414	sc_if->sk_port = port;
1415	sc_if->sk_softc = sc;
1416	sc->sk_if[port] = sc_if;
1417	if (port == SK_PORT_A)
1418		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1419	if (port == SK_PORT_B)
1420		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1421
1422	/* Allocate the descriptor queues. */
1423	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1424	    M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0);
1425
1426	if (sc_if->sk_rdata == NULL) {
1427		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1428		error = ENOMEM;
1429		goto fail;
1430	}
1431
1432	/* Try to allocate memory for jumbo buffers. */
1433	if (sk_alloc_jumbo_mem(sc_if)) {
1434		printf("sk%d: jumbo buffer allocation failed\n",
1435		    sc_if->sk_unit);
1436		error = ENOMEM;
1437		goto fail;
1438	}
1439
1440	ifp = &sc_if->arpcom.ac_if;
1441	ifp->if_softc = sc_if;
1442	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1443	ifp->if_mtu = ETHERMTU;
1444	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1445	ifp->if_ioctl = sk_ioctl;
1446	ifp->if_start = sk_start;
1447	ifp->if_watchdog = sk_watchdog;
1448	ifp->if_init = sk_init;
1449	ifp->if_baudrate = 1000000000;
1450	IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1451	ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1452	IFQ_SET_READY(&ifp->if_snd);
1453
1454	callout_handle_init(&sc_if->sk_tick_ch);
1455
1456	/*
1457	 * Get station address for this interface. Note that
1458	 * dual port cards actually come with three station
1459	 * addresses: one for each port, plus an extra. The
1460	 * extra one is used by the SysKonnect driver software
1461	 * as a 'virtual' station address for when both ports
1462	 * are operating in failover mode. Currently we don't
1463	 * use this extra address.
1464	 */
1465	SK_LOCK(sc);
1466	for (i = 0; i < ETHER_ADDR_LEN; i++)
1467		sc_if->arpcom.ac_enaddr[i] =
1468		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1469
1470	/*
1471	 * Set up RAM buffer addresses. The NIC will have a certain
1472	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1473	 * need to divide this up a) between the transmitter and
1474 	 * receiver and b) between the two XMACs, if this is a
1475	 * dual port NIC. Our algotithm is to divide up the memory
1476	 * evenly so that everyone gets a fair share.
1477	 */
1478	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1479		u_int32_t		chunk, val;
1480
1481		chunk = sc->sk_ramsize / 2;
1482		val = sc->sk_rboff / sizeof(u_int64_t);
1483		sc_if->sk_rx_ramstart = val;
1484		val += (chunk / sizeof(u_int64_t));
1485		sc_if->sk_rx_ramend = val - 1;
1486		sc_if->sk_tx_ramstart = val;
1487		val += (chunk / sizeof(u_int64_t));
1488		sc_if->sk_tx_ramend = val - 1;
1489	} else {
1490		u_int32_t		chunk, val;
1491
1492		chunk = sc->sk_ramsize / 4;
1493		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1494		    sizeof(u_int64_t);
1495		sc_if->sk_rx_ramstart = val;
1496		val += (chunk / sizeof(u_int64_t));
1497		sc_if->sk_rx_ramend = val - 1;
1498		sc_if->sk_tx_ramstart = val;
1499		val += (chunk / sizeof(u_int64_t));
1500		sc_if->sk_tx_ramend = val - 1;
1501	}
1502
1503	/* Read and save PHY type and set PHY address */
1504	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1505	switch(sc_if->sk_phytype) {
1506	case SK_PHYTYPE_XMAC:
1507		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1508		break;
1509	case SK_PHYTYPE_BCOM:
1510		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1511		break;
1512	case SK_PHYTYPE_MARV_COPPER:
1513		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1514		break;
1515	default:
1516		printf("skc%d: unsupported PHY type: %d\n",
1517		    sc->sk_unit, sc_if->sk_phytype);
1518		error = ENODEV;
1519		SK_UNLOCK(sc);
1520		goto fail;
1521	}
1522
1523
1524	/*
1525	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1526	 */
1527	SK_UNLOCK(sc);
1528	ether_ifattach(ifp, sc_if->arpcom.ac_enaddr);
1529	SK_LOCK(sc);
1530
1531	/*
1532	 * Do miibus setup.
1533	 */
1534	switch (sc->sk_type) {
1535	case SK_GENESIS:
1536		sk_init_xmac(sc_if);
1537		break;
1538	case SK_YUKON:
1539	case SK_YUKON_LITE:
1540	case SK_YUKON_LP:
1541		sk_init_yukon(sc_if);
1542		break;
1543	}
1544
1545	SK_UNLOCK(sc);
1546	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1547	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1548		printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1549		ether_ifdetach(ifp);
1550		error = ENXIO;
1551		goto fail;
1552	}
1553
1554fail:
1555	if (error) {
1556		/* Access should be ok even though lock has been dropped */
1557		sc->sk_if[port] = NULL;
1558		sk_detach(dev);
1559	}
1560
1561	return(error);
1562}
1563
1564/*
1565 * Attach the interface. Allocate softc structures, do ifmedia
1566 * setup and ethernet/BPF attach.
1567 */
1568static int
1569skc_attach(dev)
1570	device_t		dev;
1571{
1572	struct sk_softc		*sc;
1573	int			unit, error = 0, rid, *port;
1574	uint8_t			skrs;
1575	char			*pname, *revstr;
1576
1577	sc = device_get_softc(dev);
1578	unit = device_get_unit(dev);
1579
1580	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1581	    MTX_DEF | MTX_RECURSE);
1582	/*
1583	 * Map control/status registers.
1584	 */
1585	pci_enable_busmaster(dev);
1586
1587	rid = SK_RID;
1588	sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE);
1589
1590	if (sc->sk_res == NULL) {
1591		printf("sk%d: couldn't map ports/memory\n", unit);
1592		error = ENXIO;
1593		goto fail;
1594	}
1595
1596	sc->sk_btag = rman_get_bustag(sc->sk_res);
1597	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1598
1599	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1600	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1601
1602	/* Bail out if chip is not recognized. */
1603	if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1604		printf("skc%d: unknown device: chipver=%02x, rev=%x\n",
1605			unit, sc->sk_type, sc->sk_rev);
1606		error = ENXIO;
1607		goto fail;
1608	}
1609
1610	/* Allocate interrupt */
1611	rid = 0;
1612	sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1613	    RF_SHAREABLE | RF_ACTIVE);
1614
1615	if (sc->sk_irq == NULL) {
1616		printf("skc%d: couldn't map interrupt\n", unit);
1617		error = ENXIO;
1618		goto fail;
1619	}
1620
1621	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1622		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1623		OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1624		&sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1625		"SK interrupt moderation");
1626
1627	/* Pull in device tunables. */
1628	sc->sk_int_mod = SK_IM_DEFAULT;
1629	error = resource_int_value(device_get_name(dev), unit,
1630		"int_mod", &sc->sk_int_mod);
1631	if (error == 0) {
1632		if (sc->sk_int_mod < SK_IM_MIN ||
1633		    sc->sk_int_mod > SK_IM_MAX) {
1634			printf("skc%d: int_mod value out of range; "
1635			    "using default: %d\n", unit, SK_IM_DEFAULT);
1636			sc->sk_int_mod = SK_IM_DEFAULT;
1637		}
1638	}
1639
1640	/* Reset the adapter. */
1641	sk_reset(sc);
1642
1643	sc->sk_unit = unit;
1644
1645	/* Read and save vital product data from EEPROM. */
1646	sk_vpd_read(sc);
1647
1648	skrs = sk_win_read_1(sc, SK_EPROM0);
1649	if (sc->sk_type == SK_GENESIS) {
1650		/* Read and save RAM size and RAMbuffer offset */
1651		switch(skrs) {
1652		case SK_RAMSIZE_512K_64:
1653			sc->sk_ramsize = 0x80000;
1654			sc->sk_rboff = SK_RBOFF_0;
1655			break;
1656		case SK_RAMSIZE_1024K_64:
1657			sc->sk_ramsize = 0x100000;
1658			sc->sk_rboff = SK_RBOFF_80000;
1659			break;
1660		case SK_RAMSIZE_1024K_128:
1661			sc->sk_ramsize = 0x100000;
1662			sc->sk_rboff = SK_RBOFF_0;
1663			break;
1664		case SK_RAMSIZE_2048K_128:
1665			sc->sk_ramsize = 0x200000;
1666			sc->sk_rboff = SK_RBOFF_0;
1667			break;
1668		default:
1669			printf("skc%d: unknown ram size: %d\n",
1670			    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1671			error = ENXIO;
1672			goto fail;
1673		}
1674	} else { /* SK_YUKON_FAMILY */
1675		if (skrs == 0x00)
1676			sc->sk_ramsize = 0x20000;
1677		else
1678			sc->sk_ramsize = skrs * (1<<12);
1679		sc->sk_rboff = SK_RBOFF_0;
1680	}
1681
1682	/* Read and save physical media type */
1683	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1684	case SK_PMD_1000BASESX:
1685		sc->sk_pmd = IFM_1000_SX;
1686		break;
1687	case SK_PMD_1000BASELX:
1688		sc->sk_pmd = IFM_1000_LX;
1689		break;
1690	case SK_PMD_1000BASECX:
1691		sc->sk_pmd = IFM_1000_CX;
1692		break;
1693	case SK_PMD_1000BASETX:
1694		sc->sk_pmd = IFM_1000_T;
1695		break;
1696	default:
1697		printf("skc%d: unknown media type: 0x%x\n",
1698		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1699		error = ENXIO;
1700		goto fail;
1701	}
1702
1703	/* Determine whether to name it with VPD PN or just make it up.
1704	 * Marvell Yukon VPD PN seems to freqently be bogus. */
1705	switch (pci_get_device(dev)) {
1706	case DEVICEID_SK_V1:
1707	case DEVICEID_BELKIN_5005:
1708	case DEVICEID_3COM_3C940:
1709	case DEVICEID_LINKSYS_EG1032:
1710	case DEVICEID_DLINK_DGE530T:
1711		/* Stay with VPD PN. */
1712		pname = sc->sk_vpd_prodname;
1713		break;
1714	case DEVICEID_SK_V2:
1715		/* YUKON VPD PN might bear no resemblance to reality. */
1716		switch (sc->sk_type) {
1717		case SK_GENESIS:
1718			/* Stay with VPD PN. */
1719			pname = sc->sk_vpd_prodname;
1720			break;
1721		case SK_YUKON:
1722			pname = "Marvell Yukon Gigabit Ethernet";
1723			break;
1724		case SK_YUKON_LITE:
1725			pname = "Marvell Yukon Lite Gigabit Ethernet";
1726			break;
1727		case SK_YUKON_LP:
1728			pname = "Marvell Yukon LP Gigabit Ethernet";
1729			break;
1730		default:
1731			pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1732			break;
1733		}
1734
1735		/* Yukon Lite Rev. A0 needs special test. */
1736		if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1737			u_int32_t far;
1738			u_int8_t testbyte;
1739
1740			/* Save flash address register before testing. */
1741			far = sk_win_read_4(sc, SK_EP_ADDR);
1742
1743			sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1744			testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1745
1746			if (testbyte != 0x00) {
1747				/* Yukon Lite Rev. A0 detected. */
1748				sc->sk_type = SK_YUKON_LITE;
1749				sc->sk_rev = SK_YUKON_LITE_REV_A0;
1750				/* Restore flash address register. */
1751				sk_win_write_4(sc, SK_EP_ADDR, far);
1752			}
1753		}
1754		break;
1755	default:
1756		device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1757			"chipver=%02x, rev=%x\n",
1758			pci_get_vendor(dev), pci_get_device(dev),
1759			sc->sk_type, sc->sk_rev);
1760		error = ENXIO;
1761		goto fail;
1762	}
1763
1764	if (sc->sk_type == SK_YUKON_LITE) {
1765		switch (sc->sk_rev) {
1766		case SK_YUKON_LITE_REV_A0:
1767			revstr = "A0";
1768			break;
1769		case SK_YUKON_LITE_REV_A1:
1770			revstr = "A1";
1771			break;
1772		case SK_YUKON_LITE_REV_A3:
1773			revstr = "A3";
1774			break;
1775		default:
1776			revstr = "";
1777			break;
1778		}
1779	} else {
1780		revstr = "";
1781	}
1782
1783	/* Announce the product name and more VPD data if there. */
1784	device_printf(dev, "%s rev. %s(0x%x)\n",
1785		pname != NULL ? pname : "<unknown>", revstr, sc->sk_rev);
1786
1787	if (bootverbose) {
1788		if (sc->sk_vpd_readonly != NULL &&
1789		    sc->sk_vpd_readonly_len != 0) {
1790			char buf[256];
1791			char *dp = sc->sk_vpd_readonly;
1792			uint16_t l, len = sc->sk_vpd_readonly_len;
1793
1794			while (len >= 3) {
1795				if ((*dp == 'P' && *(dp+1) == 'N') ||
1796				    (*dp == 'E' && *(dp+1) == 'C') ||
1797				    (*dp == 'M' && *(dp+1) == 'N') ||
1798				    (*dp == 'S' && *(dp+1) == 'N')) {
1799					l = 0;
1800					while (l < *(dp+2)) {
1801						buf[l] = *(dp+3+l);
1802						++l;
1803					}
1804					buf[l] = '\0';
1805					device_printf(dev, "%c%c: %s\n",
1806					    *dp, *(dp+1), buf);
1807					len -= (3 + l);
1808					dp += (3 + l);
1809				} else {
1810					len -= (3 + *(dp+2));
1811					dp += (3 + *(dp+2));
1812				}
1813			}
1814		}
1815		device_printf(dev, "chip ver  = 0x%02x\n", sc->sk_type);
1816		device_printf(dev, "chip rev  = 0x%02x\n", sc->sk_rev);
1817		device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1818		device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1819	}
1820
1821	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1822	if (sc->sk_devs[SK_PORT_A] == NULL) {
1823		device_printf(dev, "failed to add child for PORT_A\n");
1824		error = ENXIO;
1825		goto fail;
1826	}
1827	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1828	if (port == NULL) {
1829		device_printf(dev, "failed to allocate memory for "
1830		    "ivars of PORT_A\n");
1831		error = ENXIO;
1832		goto fail;
1833	}
1834	*port = SK_PORT_A;
1835	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1836
1837	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1838		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1839		if (sc->sk_devs[SK_PORT_B] == NULL) {
1840			device_printf(dev, "failed to add child for PORT_B\n");
1841			error = ENXIO;
1842			goto fail;
1843		}
1844		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1845		if (port == NULL) {
1846			device_printf(dev, "failed to allocate memory for "
1847			    "ivars of PORT_B\n");
1848			error = ENXIO;
1849			goto fail;
1850		}
1851		*port = SK_PORT_B;
1852		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1853	}
1854
1855	/* Turn on the 'driver is loaded' LED. */
1856	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1857
1858	bus_generic_attach(dev);
1859
1860	/* Hook interrupt last to avoid having to lock softc */
1861	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE,
1862	    sk_intr, sc, &sc->sk_intrhand);
1863
1864	if (error) {
1865		printf("skc%d: couldn't set up irq\n", unit);
1866		goto fail;
1867	}
1868
1869fail:
1870	if (error)
1871		skc_detach(dev);
1872
1873	return(error);
1874}
1875
1876/*
1877 * Shutdown hardware and free up resources. This can be called any
1878 * time after the mutex has been initialized. It is called in both
1879 * the error case in attach and the normal detach case so it needs
1880 * to be careful about only freeing resources that have actually been
1881 * allocated.
1882 */
1883static int
1884sk_detach(dev)
1885	device_t		dev;
1886{
1887	struct sk_if_softc	*sc_if;
1888	struct ifnet		*ifp;
1889
1890	sc_if = device_get_softc(dev);
1891	KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1892	    ("sk mutex not initialized in sk_detach"));
1893	SK_IF_LOCK(sc_if);
1894
1895	ifp = &sc_if->arpcom.ac_if;
1896	/* These should only be active if attach_xmac succeeded */
1897	if (device_is_attached(dev)) {
1898		sk_stop(sc_if);
1899		/* Can't hold locks while calling detach */
1900		SK_IF_UNLOCK(sc_if);
1901		ether_ifdetach(ifp);
1902		SK_IF_LOCK(sc_if);
1903	}
1904	/*
1905	 * We're generally called from skc_detach() which is using
1906	 * device_delete_child() to get to here. It's already trashed
1907	 * miibus for us, so don't do it here or we'll panic.
1908	 */
1909	/*
1910	if (sc_if->sk_miibus != NULL)
1911		device_delete_child(dev, sc_if->sk_miibus);
1912	*/
1913	bus_generic_detach(dev);
1914	if (sc_if->sk_cdata.sk_jumbo_buf != NULL)
1915		sk_free_jumbo_mem(sc_if);
1916	if (sc_if->sk_rdata != NULL) {
1917		contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
1918		    M_DEVBUF);
1919	}
1920	SK_IF_UNLOCK(sc_if);
1921
1922	return(0);
1923}
1924
1925static int
1926skc_detach(dev)
1927	device_t		dev;
1928{
1929	struct sk_softc		*sc;
1930
1931	sc = device_get_softc(dev);
1932	KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1933
1934	if (device_is_alive(dev)) {
1935		if (sc->sk_devs[SK_PORT_A] != NULL) {
1936			free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1937			device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1938		}
1939		if (sc->sk_devs[SK_PORT_B] != NULL) {
1940			free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1941			device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1942		}
1943		bus_generic_detach(dev);
1944	}
1945
1946	if (sc->sk_vpd_prodname != NULL)
1947		free(sc->sk_vpd_prodname, M_DEVBUF);
1948	if (sc->sk_vpd_readonly != NULL)
1949		free(sc->sk_vpd_readonly, M_DEVBUF);
1950
1951	if (sc->sk_intrhand)
1952		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1953	if (sc->sk_irq)
1954		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1955	if (sc->sk_res)
1956		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1957
1958	mtx_destroy(&sc->sk_mtx);
1959
1960	return(0);
1961}
1962
1963static int
1964sk_encap(sc_if, m_head, txidx)
1965        struct sk_if_softc	*sc_if;
1966        struct mbuf		*m_head;
1967        u_int32_t		*txidx;
1968{
1969	struct sk_tx_desc	*f = NULL;
1970	struct mbuf		*m;
1971	u_int32_t		frag, cur, cnt = 0;
1972
1973	SK_IF_LOCK_ASSERT(sc_if);
1974
1975	m = m_head;
1976	cur = frag = *txidx;
1977
1978	/*
1979	 * Start packing the mbufs in this chain into
1980	 * the fragment pointers. Stop when we run out
1981	 * of fragments or hit the end of the mbuf chain.
1982	 */
1983	for (m = m_head; m != NULL; m = m->m_next) {
1984		if (m->m_len != 0) {
1985			if ((SK_TX_RING_CNT -
1986			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1987				return(ENOBUFS);
1988			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1989			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1990			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1991			if (cnt == 0)
1992				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1993			else
1994				f->sk_ctl |= SK_TXCTL_OWN;
1995			cur = frag;
1996			SK_INC(frag, SK_TX_RING_CNT);
1997			cnt++;
1998		}
1999	}
2000
2001	if (m != NULL)
2002		return(ENOBUFS);
2003
2004	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
2005		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
2006	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
2007	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
2008	sc_if->sk_cdata.sk_tx_cnt += cnt;
2009
2010	*txidx = frag;
2011
2012	return(0);
2013}
2014
2015static void
2016sk_start(ifp)
2017	struct ifnet		*ifp;
2018{
2019        struct sk_softc		*sc;
2020        struct sk_if_softc	*sc_if;
2021        struct mbuf		*m_head = NULL;
2022        u_int32_t		idx;
2023
2024	sc_if = ifp->if_softc;
2025	sc = sc_if->sk_softc;
2026
2027	SK_IF_LOCK(sc_if);
2028
2029	idx = sc_if->sk_cdata.sk_tx_prod;
2030
2031	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
2032		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2033		if (m_head == NULL)
2034			break;
2035
2036		/*
2037		 * Pack the data into the transmit ring. If we
2038		 * don't have room, set the OACTIVE flag and wait
2039		 * for the NIC to drain the ring.
2040		 */
2041		if (sk_encap(sc_if, m_head, &idx)) {
2042			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2043			ifp->if_flags |= IFF_OACTIVE;
2044			break;
2045		}
2046
2047		/*
2048		 * If there's a BPF listener, bounce a copy of this frame
2049		 * to him.
2050		 */
2051		BPF_MTAP(ifp, m_head);
2052	}
2053
2054	/* Transmit */
2055	if (idx != sc_if->sk_cdata.sk_tx_prod) {
2056		sc_if->sk_cdata.sk_tx_prod = idx;
2057		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2058
2059		/* Set a timeout in case the chip goes out to lunch. */
2060		ifp->if_timer = 5;
2061	}
2062	SK_IF_UNLOCK(sc_if);
2063
2064	return;
2065}
2066
2067
2068static void
2069sk_watchdog(ifp)
2070	struct ifnet		*ifp;
2071{
2072	struct sk_if_softc	*sc_if;
2073
2074	sc_if = ifp->if_softc;
2075
2076	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
2077	ifp->if_flags &= ~IFF_RUNNING;
2078	sk_init(sc_if);
2079
2080	return;
2081}
2082
2083static void
2084skc_shutdown(dev)
2085	device_t		dev;
2086{
2087	struct sk_softc		*sc;
2088
2089	sc = device_get_softc(dev);
2090	SK_LOCK(sc);
2091
2092	/* Turn off the 'driver is loaded' LED. */
2093	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2094
2095	/*
2096	 * Reset the GEnesis controller. Doing this should also
2097	 * assert the resets on the attached XMAC(s).
2098	 */
2099	sk_reset(sc);
2100	SK_UNLOCK(sc);
2101
2102	return;
2103}
2104
2105static void
2106sk_rxeof(sc_if)
2107	struct sk_if_softc	*sc_if;
2108{
2109	struct sk_softc		*sc;
2110	struct mbuf		*m;
2111	struct ifnet		*ifp;
2112	struct sk_chain		*cur_rx;
2113	int			total_len = 0;
2114	int			i;
2115	u_int32_t		rxstat;
2116
2117	sc = sc_if->sk_softc;
2118	ifp = &sc_if->arpcom.ac_if;
2119	i = sc_if->sk_cdata.sk_rx_prod;
2120	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2121
2122	SK_LOCK_ASSERT(sc);
2123
2124	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
2125
2126		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2127		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
2128		m = cur_rx->sk_mbuf;
2129		cur_rx->sk_mbuf = NULL;
2130		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
2131		SK_INC(i, SK_RX_RING_CNT);
2132
2133		if (rxstat & XM_RXSTAT_ERRFRAME) {
2134			ifp->if_ierrors++;
2135			sk_newbuf(sc_if, cur_rx, m);
2136			continue;
2137		}
2138
2139		/*
2140		 * Try to allocate a new jumbo buffer. If that
2141		 * fails, copy the packet to mbufs and put the
2142		 * jumbo buffer back in the ring so it can be
2143		 * re-used. If allocating mbufs fails, then we
2144		 * have to drop the packet.
2145		 */
2146		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
2147			struct mbuf		*m0;
2148			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
2149			    ifp, NULL);
2150			sk_newbuf(sc_if, cur_rx, m);
2151			if (m0 == NULL) {
2152				printf("sk%d: no receive buffers "
2153				    "available -- packet dropped!\n",
2154				    sc_if->sk_unit);
2155				ifp->if_ierrors++;
2156				continue;
2157			}
2158			m = m0;
2159		} else {
2160			m->m_pkthdr.rcvif = ifp;
2161			m->m_pkthdr.len = m->m_len = total_len;
2162		}
2163
2164		ifp->if_ipackets++;
2165		SK_UNLOCK(sc);
2166		(*ifp->if_input)(ifp, m);
2167		SK_LOCK(sc);
2168	}
2169
2170	sc_if->sk_cdata.sk_rx_prod = i;
2171
2172	return;
2173}
2174
2175static void
2176sk_txeof(sc_if)
2177	struct sk_if_softc	*sc_if;
2178{
2179	struct sk_softc		*sc;
2180	struct sk_tx_desc	*cur_tx;
2181	struct ifnet		*ifp;
2182	u_int32_t		idx;
2183
2184	sc = sc_if->sk_softc;
2185	ifp = &sc_if->arpcom.ac_if;
2186
2187	/*
2188	 * Go through our tx ring and free mbufs for those
2189	 * frames that have been sent.
2190	 */
2191	idx = sc_if->sk_cdata.sk_tx_cons;
2192	while(idx != sc_if->sk_cdata.sk_tx_prod) {
2193		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
2194		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
2195			break;
2196		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
2197			ifp->if_opackets++;
2198		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
2199			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
2200			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
2201		}
2202		sc_if->sk_cdata.sk_tx_cnt--;
2203		SK_INC(idx, SK_TX_RING_CNT);
2204	}
2205
2206	if (sc_if->sk_cdata.sk_tx_cnt == 0) {
2207		ifp->if_timer = 0;
2208	} else /* nudge chip to keep tx ring moving */
2209		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2210
2211	if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
2212		ifp->if_flags &= ~IFF_OACTIVE;
2213
2214	sc_if->sk_cdata.sk_tx_cons = idx;
2215}
2216
2217static void
2218sk_tick(xsc_if)
2219	void			*xsc_if;
2220{
2221	struct sk_if_softc	*sc_if;
2222	struct mii_data		*mii;
2223	struct ifnet		*ifp;
2224	int			i;
2225
2226	sc_if = xsc_if;
2227	SK_IF_LOCK(sc_if);
2228	ifp = &sc_if->arpcom.ac_if;
2229	mii = device_get_softc(sc_if->sk_miibus);
2230
2231	if (!(ifp->if_flags & IFF_UP)) {
2232		SK_IF_UNLOCK(sc_if);
2233		return;
2234	}
2235
2236	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2237		sk_intr_bcom(sc_if);
2238		SK_IF_UNLOCK(sc_if);
2239		return;
2240	}
2241
2242	/*
2243	 * According to SysKonnect, the correct way to verify that
2244	 * the link has come back up is to poll bit 0 of the GPIO
2245	 * register three times. This pin has the signal from the
2246	 * link_sync pin connected to it; if we read the same link
2247	 * state 3 times in a row, we know the link is up.
2248	 */
2249	for (i = 0; i < 3; i++) {
2250		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2251			break;
2252	}
2253
2254	if (i != 3) {
2255		sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2256		SK_IF_UNLOCK(sc_if);
2257		return;
2258	}
2259
2260	/* Turn the GP0 interrupt back on. */
2261	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2262	SK_XM_READ_2(sc_if, XM_ISR);
2263	mii_tick(mii);
2264	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2265
2266	SK_IF_UNLOCK(sc_if);
2267	return;
2268}
2269
2270static void
2271sk_intr_bcom(sc_if)
2272	struct sk_if_softc	*sc_if;
2273{
2274	struct mii_data		*mii;
2275	struct ifnet		*ifp;
2276	int			status;
2277	mii = device_get_softc(sc_if->sk_miibus);
2278	ifp = &sc_if->arpcom.ac_if;
2279
2280	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2281
2282	/*
2283	 * Read the PHY interrupt register to make sure
2284	 * we clear any pending interrupts.
2285	 */
2286	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2287
2288	if (!(ifp->if_flags & IFF_RUNNING)) {
2289		sk_init_xmac(sc_if);
2290		return;
2291	}
2292
2293	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2294		int			lstat;
2295		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2296		    BRGPHY_MII_AUXSTS);
2297
2298		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2299			mii_mediachg(mii);
2300			/* Turn off the link LED. */
2301			SK_IF_WRITE_1(sc_if, 0,
2302			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
2303			sc_if->sk_link = 0;
2304		} else if (status & BRGPHY_ISR_LNK_CHG) {
2305			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2306	    		    BRGPHY_MII_IMR, 0xFF00);
2307			mii_tick(mii);
2308			sc_if->sk_link = 1;
2309			/* Turn on the link LED. */
2310			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2311			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2312			    SK_LINKLED_BLINK_OFF);
2313		} else {
2314			mii_tick(mii);
2315			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2316		}
2317	}
2318
2319	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2320
2321	return;
2322}
2323
2324static void
2325sk_intr_xmac(sc_if)
2326	struct sk_if_softc	*sc_if;
2327{
2328	struct sk_softc		*sc;
2329	u_int16_t		status;
2330
2331	sc = sc_if->sk_softc;
2332	status = SK_XM_READ_2(sc_if, XM_ISR);
2333
2334	/*
2335	 * Link has gone down. Start MII tick timeout to
2336	 * watch for link resync.
2337	 */
2338	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2339		if (status & XM_ISR_GP0_SET) {
2340			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2341			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2342		}
2343
2344		if (status & XM_ISR_AUTONEG_DONE) {
2345			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2346		}
2347	}
2348
2349	if (status & XM_IMR_TX_UNDERRUN)
2350		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2351
2352	if (status & XM_IMR_RX_OVERRUN)
2353		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2354
2355	status = SK_XM_READ_2(sc_if, XM_ISR);
2356
2357	return;
2358}
2359
2360static void
2361sk_intr_yukon(sc_if)
2362	struct sk_if_softc	*sc_if;
2363{
2364	int status;
2365
2366	status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2367
2368	return;
2369}
2370
2371static void
2372sk_intr(xsc)
2373	void			*xsc;
2374{
2375	struct sk_softc		*sc = xsc;
2376	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
2377	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2378	u_int32_t		status;
2379
2380	SK_LOCK(sc);
2381
2382	sc_if0 = sc->sk_if[SK_PORT_A];
2383	sc_if1 = sc->sk_if[SK_PORT_B];
2384
2385	if (sc_if0 != NULL)
2386		ifp0 = &sc_if0->arpcom.ac_if;
2387	if (sc_if1 != NULL)
2388		ifp1 = &sc_if1->arpcom.ac_if;
2389
2390	for (;;) {
2391		status = CSR_READ_4(sc, SK_ISSR);
2392		if (!(status & sc->sk_intrmask))
2393			break;
2394
2395		/* Handle receive interrupts first. */
2396		if (status & SK_ISR_RX1_EOF) {
2397			sk_rxeof(sc_if0);
2398			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2399			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2400		}
2401		if (status & SK_ISR_RX2_EOF) {
2402			sk_rxeof(sc_if1);
2403			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2404			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2405		}
2406
2407		/* Then transmit interrupts. */
2408		if (status & SK_ISR_TX1_S_EOF) {
2409			sk_txeof(sc_if0);
2410			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2411			    SK_TXBMU_CLR_IRQ_EOF);
2412		}
2413		if (status & SK_ISR_TX2_S_EOF) {
2414			sk_txeof(sc_if1);
2415			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2416			    SK_TXBMU_CLR_IRQ_EOF);
2417		}
2418
2419		/* Then MAC interrupts. */
2420		if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) {
2421			if (sc->sk_type == SK_GENESIS)
2422				sk_intr_xmac(sc_if0);
2423			else
2424				sk_intr_yukon(sc_if0);
2425		}
2426
2427		if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) {
2428			if (sc->sk_type == SK_GENESIS)
2429				sk_intr_xmac(sc_if1);
2430			else
2431				sk_intr_yukon(sc_if1);
2432		}
2433
2434		if (status & SK_ISR_EXTERNAL_REG) {
2435			if (ifp0 != NULL &&
2436			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2437				sk_intr_bcom(sc_if0);
2438			if (ifp1 != NULL &&
2439			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2440				sk_intr_bcom(sc_if1);
2441		}
2442	}
2443
2444	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2445
2446	if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
2447		sk_start(ifp0);
2448	if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
2449		sk_start(ifp1);
2450
2451	SK_UNLOCK(sc);
2452
2453	return;
2454}
2455
2456static void
2457sk_init_xmac(sc_if)
2458	struct sk_if_softc	*sc_if;
2459{
2460	struct sk_softc		*sc;
2461	struct ifnet		*ifp;
2462	struct sk_bcom_hack	bhack[] = {
2463	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2464	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2465	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2466	{ 0, 0 } };
2467
2468	sc = sc_if->sk_softc;
2469	ifp = &sc_if->arpcom.ac_if;
2470
2471	/* Unreset the XMAC. */
2472	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2473	DELAY(1000);
2474
2475	/* Reset the XMAC's internal state. */
2476	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2477
2478	/* Save the XMAC II revision */
2479	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2480
2481	/*
2482	 * Perform additional initialization for external PHYs,
2483	 * namely for the 1000baseTX cards that use the XMAC's
2484	 * GMII mode.
2485	 */
2486	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2487		int			i = 0;
2488		u_int32_t		val;
2489
2490		/* Take PHY out of reset. */
2491		val = sk_win_read_4(sc, SK_GPIO);
2492		if (sc_if->sk_port == SK_PORT_A)
2493			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2494		else
2495			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2496		sk_win_write_4(sc, SK_GPIO, val);
2497
2498		/* Enable GMII mode on the XMAC. */
2499		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2500
2501		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2502		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2503		DELAY(10000);
2504		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2505		    BRGPHY_MII_IMR, 0xFFF0);
2506
2507		/*
2508		 * Early versions of the BCM5400 apparently have
2509		 * a bug that requires them to have their reserved
2510		 * registers initialized to some magic values. I don't
2511		 * know what the numbers do, I'm just the messenger.
2512		 */
2513		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2514		    == 0x6041) {
2515			while(bhack[i].reg) {
2516				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2517				    bhack[i].reg, bhack[i].val);
2518				i++;
2519			}
2520		}
2521	}
2522
2523	/* Set station address */
2524	SK_XM_WRITE_2(sc_if, XM_PAR0,
2525	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2526	SK_XM_WRITE_2(sc_if, XM_PAR1,
2527	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2528	SK_XM_WRITE_2(sc_if, XM_PAR2,
2529	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2530	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2531
2532	if (ifp->if_flags & IFF_BROADCAST) {
2533		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2534	} else {
2535		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2536	}
2537
2538	/* We don't need the FCS appended to the packet. */
2539	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2540
2541	/* We want short frames padded to 60 bytes. */
2542	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2543
2544	/*
2545	 * Enable the reception of all error frames. This is is
2546	 * a necessary evil due to the design of the XMAC. The
2547	 * XMAC's receive FIFO is only 8K in size, however jumbo
2548	 * frames can be up to 9000 bytes in length. When bad
2549	 * frame filtering is enabled, the XMAC's RX FIFO operates
2550	 * in 'store and forward' mode. For this to work, the
2551	 * entire frame has to fit into the FIFO, but that means
2552	 * that jumbo frames larger than 8192 bytes will be
2553	 * truncated. Disabling all bad frame filtering causes
2554	 * the RX FIFO to operate in streaming mode, in which
2555	 * case the XMAC will start transfering frames out of the
2556	 * RX FIFO as soon as the FIFO threshold is reached.
2557	 */
2558	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2559	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2560	    XM_MODE_RX_INRANGELEN);
2561
2562	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2563		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2564	else
2565		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2566
2567	/*
2568	 * Bump up the transmit threshold. This helps hold off transmit
2569	 * underruns when we're blasting traffic from both ports at once.
2570	 */
2571	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2572
2573	/* Set promiscuous mode */
2574	sk_setpromisc(sc_if);
2575
2576	/* Set multicast filter */
2577	sk_setmulti(sc_if);
2578
2579	/* Clear and enable interrupts */
2580	SK_XM_READ_2(sc_if, XM_ISR);
2581	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2582		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2583	else
2584		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2585
2586	/* Configure MAC arbiter */
2587	switch(sc_if->sk_xmac_rev) {
2588	case XM_XMAC_REV_B2:
2589		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2590		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2591		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2592		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2593		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2594		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2595		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2596		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2597		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2598		break;
2599	case XM_XMAC_REV_C1:
2600		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2601		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2602		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2603		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2604		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2605		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2606		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2607		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2608		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2609		break;
2610	default:
2611		break;
2612	}
2613	sk_win_write_2(sc, SK_MACARB_CTL,
2614	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2615
2616	sc_if->sk_link = 1;
2617
2618	return;
2619}
2620
2621static void
2622sk_init_yukon(sc_if)
2623	struct sk_if_softc	*sc_if;
2624{
2625	u_int32_t		phy;
2626	u_int16_t		reg;
2627	struct sk_softc		*sc;
2628	struct ifnet		*ifp;
2629	int			i;
2630
2631	sc = sc_if->sk_softc;
2632	ifp = &sc_if->arpcom.ac_if;
2633
2634	if (sc->sk_type == SK_YUKON_LITE &&
2635	    sc->sk_rev == SK_YUKON_LITE_REV_A3) {
2636		/* Take PHY out of reset. */
2637		sk_win_write_4(sc, SK_GPIO,
2638			(sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9);
2639	}
2640
2641	/* GMAC and GPHY Reset */
2642	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2643	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2644	DELAY(1000);
2645	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2646	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2647	DELAY(1000);
2648
2649	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2650		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2651
2652	switch(sc_if->sk_softc->sk_pmd) {
2653	case IFM_1000_SX:
2654	case IFM_1000_LX:
2655		phy |= SK_GPHY_FIBER;
2656		break;
2657
2658	case IFM_1000_CX:
2659	case IFM_1000_T:
2660		phy |= SK_GPHY_COPPER;
2661		break;
2662	}
2663
2664	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2665	DELAY(1000);
2666	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2667	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2668		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2669
2670	/* unused read of the interrupt source register */
2671	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2672
2673	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2674
2675	/* MIB Counter Clear Mode set */
2676	reg |= YU_PAR_MIB_CLR;
2677	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2678
2679	/* MIB Counter Clear Mode clear */
2680	reg &= ~YU_PAR_MIB_CLR;
2681	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2682
2683	/* receive control reg */
2684	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2685
2686	/* transmit parameter register */
2687	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2688		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2689
2690	/* serial mode register */
2691	reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
2692	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2693		reg |= YU_SMR_MFL_JUMBO;
2694	SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2695
2696	/* Setup Yukon's address */
2697	for (i = 0; i < 3; i++) {
2698		/* Write Source Address 1 (unicast filter) */
2699		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2700			      sc_if->arpcom.ac_enaddr[i * 2] |
2701			      sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2702	}
2703
2704	for (i = 0; i < 3; i++) {
2705		reg = sk_win_read_2(sc_if->sk_softc,
2706				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2707		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2708	}
2709
2710	/* Set promiscuous mode */
2711	sk_setpromisc(sc_if);
2712
2713	/* Set multicast filter */
2714	sk_setmulti(sc_if);
2715
2716	/* enable interrupt mask for counter overflows */
2717	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2718	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2719	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2720
2721	/* Configure RX MAC FIFO */
2722	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2723	SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2724
2725	/* Configure TX MAC FIFO */
2726	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2727	SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2728}
2729
2730/*
2731 * Note that to properly initialize any part of the GEnesis chip,
2732 * you first have to take it out of reset mode.
2733 */
2734static void
2735sk_init(xsc)
2736	void			*xsc;
2737{
2738	struct sk_if_softc	*sc_if = xsc;
2739	struct sk_softc		*sc;
2740	struct ifnet		*ifp;
2741	struct mii_data		*mii;
2742	u_int16_t		reg;
2743	u_int32_t		imr;
2744
2745	SK_IF_LOCK(sc_if);
2746
2747	ifp = &sc_if->arpcom.ac_if;
2748	sc = sc_if->sk_softc;
2749	mii = device_get_softc(sc_if->sk_miibus);
2750
2751	if (ifp->if_flags & IFF_RUNNING) {
2752		SK_IF_UNLOCK(sc_if);
2753		return;
2754	}
2755
2756	/* Cancel pending I/O and free all RX/TX buffers. */
2757	sk_stop(sc_if);
2758
2759	if (sc->sk_type == SK_GENESIS) {
2760		/* Configure LINK_SYNC LED */
2761		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2762		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2763			SK_LINKLED_LINKSYNC_ON);
2764
2765		/* Configure RX LED */
2766		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2767			SK_RXLEDCTL_COUNTER_START);
2768
2769		/* Configure TX LED */
2770		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2771			SK_TXLEDCTL_COUNTER_START);
2772	}
2773
2774	/* Configure I2C registers */
2775
2776	/* Configure XMAC(s) */
2777	switch (sc->sk_type) {
2778	case SK_GENESIS:
2779		sk_init_xmac(sc_if);
2780		break;
2781	case SK_YUKON:
2782	case SK_YUKON_LITE:
2783	case SK_YUKON_LP:
2784		sk_init_yukon(sc_if);
2785		break;
2786	}
2787	mii_mediachg(mii);
2788
2789	if (sc->sk_type == SK_GENESIS) {
2790		/* Configure MAC FIFOs */
2791		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2792		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2793		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2794
2795		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2796		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2797		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2798	}
2799
2800	/* Configure transmit arbiter(s) */
2801	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2802	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2803
2804	/* Configure RAMbuffers */
2805	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2806	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2807	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2808	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2809	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2810	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2811
2812	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2813	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2814	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2815	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2816	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2817	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2818	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2819
2820	/* Configure BMUs */
2821	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2822	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2823	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2824	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2825
2826	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2827	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2828	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2829	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2830
2831	/* Init descriptors */
2832	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2833		printf("sk%d: initialization failed: no "
2834		    "memory for rx buffers\n", sc_if->sk_unit);
2835		sk_stop(sc_if);
2836		SK_IF_UNLOCK(sc_if);
2837		return;
2838	}
2839	sk_init_tx_ring(sc_if);
2840
2841	/* Set interrupt moderation if changed via sysctl. */
2842	/* SK_LOCK(sc); */
2843	imr = sk_win_read_4(sc, SK_IMTIMERINIT);
2844	if (imr != SK_IM_USECS(sc->sk_int_mod)) {
2845		sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
2846		printf("skc%d: interrupt moderation is %d us\n",
2847		    sc->sk_unit, sc->sk_int_mod);
2848	}
2849	/* SK_UNLOCK(sc); */
2850
2851	/* Configure interrupt handling */
2852	CSR_READ_4(sc, SK_ISSR);
2853	if (sc_if->sk_port == SK_PORT_A)
2854		sc->sk_intrmask |= SK_INTRS1;
2855	else
2856		sc->sk_intrmask |= SK_INTRS2;
2857
2858	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2859
2860	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2861
2862	/* Start BMUs. */
2863	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2864
2865	switch(sc->sk_type) {
2866	case SK_GENESIS:
2867		/* Enable XMACs TX and RX state machines */
2868		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2869		SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2870		break;
2871	case SK_YUKON:
2872	case SK_YUKON_LITE:
2873	case SK_YUKON_LP:
2874		reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2875		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2876		reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2877		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2878	}
2879
2880	ifp->if_flags |= IFF_RUNNING;
2881	ifp->if_flags &= ~IFF_OACTIVE;
2882
2883	SK_IF_UNLOCK(sc_if);
2884
2885	return;
2886}
2887
2888static void
2889sk_stop(sc_if)
2890	struct sk_if_softc	*sc_if;
2891{
2892	int			i;
2893	struct sk_softc		*sc;
2894	struct ifnet		*ifp;
2895
2896	SK_IF_LOCK(sc_if);
2897	sc = sc_if->sk_softc;
2898	ifp = &sc_if->arpcom.ac_if;
2899
2900	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2901
2902	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2903		u_int32_t		val;
2904
2905		/* Put PHY back into reset. */
2906		val = sk_win_read_4(sc, SK_GPIO);
2907		if (sc_if->sk_port == SK_PORT_A) {
2908			val |= SK_GPIO_DIR0;
2909			val &= ~SK_GPIO_DAT0;
2910		} else {
2911			val |= SK_GPIO_DIR2;
2912			val &= ~SK_GPIO_DAT2;
2913		}
2914		sk_win_write_4(sc, SK_GPIO, val);
2915	}
2916
2917	/* Turn off various components of this interface. */
2918	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2919	switch (sc->sk_type) {
2920	case SK_GENESIS:
2921		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2922		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2923		break;
2924	case SK_YUKON:
2925	case SK_YUKON_LITE:
2926	case SK_YUKON_LP:
2927		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2928		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2929		break;
2930	}
2931	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2932	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2933	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2934	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2935	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2936	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2937	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2938	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2939	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2940
2941	/* Disable interrupts */
2942	if (sc_if->sk_port == SK_PORT_A)
2943		sc->sk_intrmask &= ~SK_INTRS1;
2944	else
2945		sc->sk_intrmask &= ~SK_INTRS2;
2946	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2947
2948	SK_XM_READ_2(sc_if, XM_ISR);
2949	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2950
2951	/* Free RX and TX mbufs still in the queues. */
2952	for (i = 0; i < SK_RX_RING_CNT; i++) {
2953		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2954			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2955			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2956		}
2957	}
2958
2959	for (i = 0; i < SK_TX_RING_CNT; i++) {
2960		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2961			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2962			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2963		}
2964	}
2965
2966	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2967	SK_IF_UNLOCK(sc_if);
2968	return;
2969}
2970
2971static int
2972sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2973{
2974	int error, value;
2975
2976	if (!arg1)
2977		return (EINVAL);
2978	value = *(int *)arg1;
2979	error = sysctl_handle_int(oidp, &value, 0, req);
2980	if (error || !req->newptr)
2981		return (error);
2982	if (value < low || value > high)
2983		return (EINVAL);
2984	*(int *)arg1 = value;
2985	return (0);
2986}
2987
2988static int
2989sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
2990{
2991	return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
2992}
2993