if_sk.c revision 151242
1/*	$OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $	*/
2
3/*-
4 * Copyright (c) 1997, 1998, 1999, 2000
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34/*-
35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/dev/sk/if_sk.c 151242 2005-10-11 22:55:16Z yar $");
52
53/*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * 	The XaQti XMAC II datasheet,
58 *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71/*
72 * The SysKonnect gigabit ethernet adapters consist of two main
73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75 * components and a PHY while the GEnesis controller provides a PCI
76 * interface with DMA support. Each card may have between 512K and
77 * 2MB of SRAM on board depending on the configuration.
78 *
79 * The SysKonnect GEnesis controller can have either one or two XMAC
80 * chips connected to it, allowing single or dual port NIC configurations.
81 * SysKonnect has the distinction of being the only vendor on the market
82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84 * XMAC registers. This driver takes advantage of these features to allow
85 * both XMACs to operate as independent interfaces.
86 */
87
88#include <sys/param.h>
89#include <sys/systm.h>
90#include <sys/sockio.h>
91#include <sys/mbuf.h>
92#include <sys/malloc.h>
93#include <sys/kernel.h>
94#include <sys/module.h>
95#include <sys/socket.h>
96#include <sys/queue.h>
97#include <sys/sysctl.h>
98
99#include <net/if.h>
100#include <net/if_arp.h>
101#include <net/ethernet.h>
102#include <net/if_dl.h>
103#include <net/if_media.h>
104#include <net/if_types.h>
105
106#include <net/bpf.h>
107
108#include <vm/vm.h>              /* for vtophys */
109#include <vm/pmap.h>            /* for vtophys */
110#include <machine/bus.h>
111#include <machine/resource.h>
112#include <sys/bus.h>
113#include <sys/rman.h>
114
115#include <dev/mii/mii.h>
116#include <dev/mii/miivar.h>
117#include <dev/mii/brgphyreg.h>
118
119#include <dev/pci/pcireg.h>
120#include <dev/pci/pcivar.h>
121
122#if 0
123#define SK_USEIOSPACE
124#endif
125
126#include <pci/if_skreg.h>
127#include <pci/xmaciireg.h>
128#include <pci/yukonreg.h>
129
130MODULE_DEPEND(sk, pci, 1, 1, 1);
131MODULE_DEPEND(sk, ether, 1, 1, 1);
132MODULE_DEPEND(sk, miibus, 1, 1, 1);
133
134/* "controller miibus0" required.  See GENERIC if you get errors here. */
135#include "miibus_if.h"
136
137#ifndef lint
138static const char rcsid[] =
139  "$FreeBSD: head/sys/dev/sk/if_sk.c 151242 2005-10-11 22:55:16Z yar $";
140#endif
141
142static struct sk_type sk_devs[] = {
143	{
144		VENDORID_SK,
145		DEVICEID_SK_V1,
146		"SysKonnect Gigabit Ethernet (V1.0)"
147	},
148	{
149		VENDORID_SK,
150		DEVICEID_SK_V2,
151		"SysKonnect Gigabit Ethernet (V2.0)"
152	},
153	{
154		VENDORID_MARVELL,
155		DEVICEID_SK_V2,
156		"Marvell Gigabit Ethernet"
157	},
158	{
159		VENDORID_MARVELL,
160		DEVICEID_BELKIN_5005,
161		"Belkin F5D5005 Gigabit Ethernet"
162	},
163	{
164		VENDORID_3COM,
165		DEVICEID_3COM_3C940,
166		"3Com 3C940 Gigabit Ethernet"
167	},
168	{
169		VENDORID_LINKSYS,
170		DEVICEID_LINKSYS_EG1032,
171		"Linksys EG1032 Gigabit Ethernet"
172	},
173	{
174		VENDORID_DLINK,
175		DEVICEID_DLINK_DGE530T,
176		"D-Link DGE-530T Gigabit Ethernet"
177	},
178	{ 0, 0, NULL }
179};
180
181static int skc_probe(device_t);
182static int skc_attach(device_t);
183static int skc_detach(device_t);
184static void skc_shutdown(device_t);
185static int sk_detach(device_t);
186static int sk_probe(device_t);
187static int sk_attach(device_t);
188static void sk_tick(void *);
189static void sk_intr(void *);
190static void sk_intr_xmac(struct sk_if_softc *);
191static void sk_intr_bcom(struct sk_if_softc *);
192static void sk_intr_yukon(struct sk_if_softc *);
193static void sk_rxeof(struct sk_if_softc *);
194static void sk_txeof(struct sk_if_softc *);
195static int sk_encap(struct sk_if_softc *, struct mbuf *,
196					u_int32_t *);
197static void sk_start(struct ifnet *);
198static void sk_start_locked(struct ifnet *);
199static int sk_ioctl(struct ifnet *, u_long, caddr_t);
200static void sk_init(void *);
201static void sk_init_locked(struct sk_if_softc *);
202static void sk_init_xmac(struct sk_if_softc *);
203static void sk_init_yukon(struct sk_if_softc *);
204static void sk_stop(struct sk_if_softc *);
205static void sk_watchdog(struct ifnet *);
206static int sk_ifmedia_upd(struct ifnet *);
207static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
208static void sk_reset(struct sk_softc *);
209static int sk_newbuf(struct sk_if_softc *,
210					struct sk_chain *, struct mbuf *);
211static int sk_alloc_jumbo_mem(struct sk_if_softc *);
212static void sk_free_jumbo_mem(struct sk_if_softc *);
213static void *sk_jalloc(struct sk_if_softc *);
214static void sk_jfree(void *, void *);
215static int sk_init_rx_ring(struct sk_if_softc *);
216static void sk_init_tx_ring(struct sk_if_softc *);
217static u_int32_t sk_win_read_4(struct sk_softc *, int);
218static u_int16_t sk_win_read_2(struct sk_softc *, int);
219static u_int8_t sk_win_read_1(struct sk_softc *, int);
220static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
221static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
222static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
223static u_int8_t sk_vpd_readbyte(struct sk_softc *, int);
224static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int);
225static void sk_vpd_read(struct sk_softc *);
226
227static int sk_miibus_readreg(device_t, int, int);
228static int sk_miibus_writereg(device_t, int, int, int);
229static void sk_miibus_statchg(device_t);
230
231static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
232static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
233						int);
234static void sk_xmac_miibus_statchg(struct sk_if_softc *);
235
236static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
237static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
238						int);
239static void sk_marv_miibus_statchg(struct sk_if_softc *);
240
241static uint32_t sk_xmchash(const uint8_t *);
242static uint32_t sk_gmchash(const uint8_t *);
243static void sk_setfilt(struct sk_if_softc *, caddr_t, int);
244static void sk_setmulti(struct sk_if_softc *);
245static void sk_setpromisc(struct sk_if_softc *);
246
247static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
248static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
249
250#ifdef SK_USEIOSPACE
251#define SK_RES		SYS_RES_IOPORT
252#define SK_RID		SK_PCI_LOIO
253#else
254#define SK_RES		SYS_RES_MEMORY
255#define SK_RID		SK_PCI_LOMEM
256#endif
257
258/*
259 * Note that we have newbus methods for both the GEnesis controller
260 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
261 * the miibus code is a child of the XMACs. We need to do it this way
262 * so that the miibus drivers can access the PHY registers on the
263 * right PHY. It's not quite what I had in mind, but it's the only
264 * design that achieves the desired effect.
265 */
266static device_method_t skc_methods[] = {
267	/* Device interface */
268	DEVMETHOD(device_probe,		skc_probe),
269	DEVMETHOD(device_attach,	skc_attach),
270	DEVMETHOD(device_detach,	skc_detach),
271	DEVMETHOD(device_shutdown,	skc_shutdown),
272
273	/* bus interface */
274	DEVMETHOD(bus_print_child,	bus_generic_print_child),
275	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
276
277	{ 0, 0 }
278};
279
280static driver_t skc_driver = {
281	"skc",
282	skc_methods,
283	sizeof(struct sk_softc)
284};
285
286static devclass_t skc_devclass;
287
288static device_method_t sk_methods[] = {
289	/* Device interface */
290	DEVMETHOD(device_probe,		sk_probe),
291	DEVMETHOD(device_attach,	sk_attach),
292	DEVMETHOD(device_detach,	sk_detach),
293	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
294
295	/* bus interface */
296	DEVMETHOD(bus_print_child,	bus_generic_print_child),
297	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
298
299	/* MII interface */
300	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
301	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
302	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
303
304	{ 0, 0 }
305};
306
307static driver_t sk_driver = {
308	"sk",
309	sk_methods,
310	sizeof(struct sk_if_softc)
311};
312
313static devclass_t sk_devclass;
314
315DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0);
316DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
317DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
318
319#define SK_SETBIT(sc, reg, x)		\
320	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
321
322#define SK_CLRBIT(sc, reg, x)		\
323	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
324
325#define SK_WIN_SETBIT_4(sc, reg, x)	\
326	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
327
328#define SK_WIN_CLRBIT_4(sc, reg, x)	\
329	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
330
331#define SK_WIN_SETBIT_2(sc, reg, x)	\
332	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
333
334#define SK_WIN_CLRBIT_2(sc, reg, x)	\
335	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
336
337static u_int32_t
338sk_win_read_4(sc, reg)
339	struct sk_softc		*sc;
340	int			reg;
341{
342#ifdef SK_USEIOSPACE
343	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
344	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
345#else
346	return(CSR_READ_4(sc, reg));
347#endif
348}
349
350static u_int16_t
351sk_win_read_2(sc, reg)
352	struct sk_softc		*sc;
353	int			reg;
354{
355#ifdef SK_USEIOSPACE
356	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
357	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
358#else
359	return(CSR_READ_2(sc, reg));
360#endif
361}
362
363static u_int8_t
364sk_win_read_1(sc, reg)
365	struct sk_softc		*sc;
366	int			reg;
367{
368#ifdef SK_USEIOSPACE
369	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
370	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
371#else
372	return(CSR_READ_1(sc, reg));
373#endif
374}
375
376static void
377sk_win_write_4(sc, reg, val)
378	struct sk_softc		*sc;
379	int			reg;
380	u_int32_t		val;
381{
382#ifdef SK_USEIOSPACE
383	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
384	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
385#else
386	CSR_WRITE_4(sc, reg, val);
387#endif
388	return;
389}
390
391static void
392sk_win_write_2(sc, reg, val)
393	struct sk_softc		*sc;
394	int			reg;
395	u_int32_t		val;
396{
397#ifdef SK_USEIOSPACE
398	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
399	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
400#else
401	CSR_WRITE_2(sc, reg, val);
402#endif
403	return;
404}
405
406static void
407sk_win_write_1(sc, reg, val)
408	struct sk_softc		*sc;
409	int			reg;
410	u_int32_t		val;
411{
412#ifdef SK_USEIOSPACE
413	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
414	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
415#else
416	CSR_WRITE_1(sc, reg, val);
417#endif
418	return;
419}
420
421/*
422 * The VPD EEPROM contains Vital Product Data, as suggested in
423 * the PCI 2.1 specification. The VPD data is separared into areas
424 * denoted by resource IDs. The SysKonnect VPD contains an ID string
425 * resource (the name of the adapter), a read-only area resource
426 * containing various key/data fields and a read/write area which
427 * can be used to store asset management information or log messages.
428 * We read the ID string and read-only into buffers attached to
429 * the controller softc structure for later use. At the moment,
430 * we only use the ID string during skc_attach().
431 */
432static u_int8_t
433sk_vpd_readbyte(sc, addr)
434	struct sk_softc		*sc;
435	int			addr;
436{
437	int			i;
438
439	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
440	for (i = 0; i < SK_TIMEOUT; i++) {
441		DELAY(1);
442		if (sk_win_read_2(sc,
443		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
444			break;
445	}
446
447	if (i == SK_TIMEOUT)
448		return(0);
449
450	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
451}
452
453static void
454sk_vpd_read_res(sc, res, addr)
455	struct sk_softc		*sc;
456	struct vpd_res		*res;
457	int			addr;
458{
459	int			i;
460	u_int8_t		*ptr;
461
462	ptr = (u_int8_t *)res;
463	for (i = 0; i < sizeof(struct vpd_res); i++)
464		ptr[i] = sk_vpd_readbyte(sc, i + addr);
465
466	return;
467}
468
469static void
470sk_vpd_read(sc)
471	struct sk_softc		*sc;
472{
473	int			pos = 0, i;
474	struct vpd_res		res;
475
476	if (sc->sk_vpd_prodname != NULL)
477		free(sc->sk_vpd_prodname, M_DEVBUF);
478	if (sc->sk_vpd_readonly != NULL)
479		free(sc->sk_vpd_readonly, M_DEVBUF);
480	sc->sk_vpd_prodname = NULL;
481	sc->sk_vpd_readonly = NULL;
482	sc->sk_vpd_readonly_len = 0;
483
484	sk_vpd_read_res(sc, &res, pos);
485
486	/*
487	 * Bail out quietly if the eeprom appears to be missing or empty.
488	 */
489	if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff)
490		return;
491
492	if (res.vr_id != VPD_RES_ID) {
493		printf("skc%d: bad VPD resource id: expected %x got %x\n",
494		    sc->sk_unit, VPD_RES_ID, res.vr_id);
495		return;
496	}
497
498	pos += sizeof(res);
499	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
500	if (sc->sk_vpd_prodname != NULL) {
501		for (i = 0; i < res.vr_len; i++)
502			sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
503		sc->sk_vpd_prodname[i] = '\0';
504	}
505	pos += res.vr_len;
506
507	sk_vpd_read_res(sc, &res, pos);
508
509	if (res.vr_id != VPD_RES_READ) {
510		printf("skc%d: bad VPD resource id: expected %x got %x\n",
511		    sc->sk_unit, VPD_RES_READ, res.vr_id);
512		return;
513	}
514
515	pos += sizeof(res);
516	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
517	for (i = 0; i < res.vr_len; i++)
518		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
519	sc->sk_vpd_readonly_len = res.vr_len;
520
521	return;
522}
523
524static int
525sk_miibus_readreg(dev, phy, reg)
526	device_t		dev;
527	int			phy, reg;
528{
529	struct sk_if_softc	*sc_if;
530
531	sc_if = device_get_softc(dev);
532
533	switch(sc_if->sk_softc->sk_type) {
534	case SK_GENESIS:
535		return(sk_xmac_miibus_readreg(sc_if, phy, reg));
536	case SK_YUKON:
537	case SK_YUKON_LITE:
538	case SK_YUKON_LP:
539		return(sk_marv_miibus_readreg(sc_if, phy, reg));
540	}
541
542	return(0);
543}
544
545static int
546sk_miibus_writereg(dev, phy, reg, val)
547	device_t		dev;
548	int			phy, reg, val;
549{
550	struct sk_if_softc	*sc_if;
551
552	sc_if = device_get_softc(dev);
553
554	switch(sc_if->sk_softc->sk_type) {
555	case SK_GENESIS:
556		return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
557	case SK_YUKON:
558	case SK_YUKON_LITE:
559	case SK_YUKON_LP:
560		return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
561	}
562
563	return(0);
564}
565
566static void
567sk_miibus_statchg(dev)
568	device_t		dev;
569{
570	struct sk_if_softc	*sc_if;
571
572	sc_if = device_get_softc(dev);
573
574	switch(sc_if->sk_softc->sk_type) {
575	case SK_GENESIS:
576		sk_xmac_miibus_statchg(sc_if);
577		break;
578	case SK_YUKON:
579	case SK_YUKON_LITE:
580	case SK_YUKON_LP:
581		sk_marv_miibus_statchg(sc_if);
582		break;
583	}
584
585	return;
586}
587
588static int
589sk_xmac_miibus_readreg(sc_if, phy, reg)
590	struct sk_if_softc	*sc_if;
591	int			phy, reg;
592{
593	int			i;
594
595	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
596		return(0);
597
598	SK_IF_LOCK(sc_if);
599	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
600	SK_XM_READ_2(sc_if, XM_PHY_DATA);
601	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
602		for (i = 0; i < SK_TIMEOUT; i++) {
603			DELAY(1);
604			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
605			    XM_MMUCMD_PHYDATARDY)
606				break;
607		}
608
609		if (i == SK_TIMEOUT) {
610			printf("sk%d: phy failed to come ready\n",
611			    sc_if->sk_unit);
612			SK_IF_UNLOCK(sc_if);
613			return(0);
614		}
615	}
616	DELAY(1);
617	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
618	SK_IF_UNLOCK(sc_if);
619	return(i);
620}
621
622static int
623sk_xmac_miibus_writereg(sc_if, phy, reg, val)
624	struct sk_if_softc	*sc_if;
625	int			phy, reg, val;
626{
627	int			i;
628
629	SK_IF_LOCK(sc_if);
630	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
631	for (i = 0; i < SK_TIMEOUT; i++) {
632		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
633			break;
634	}
635
636	if (i == SK_TIMEOUT) {
637		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
638		SK_IF_UNLOCK(sc_if);
639		return(ETIMEDOUT);
640	}
641
642	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
643	for (i = 0; i < SK_TIMEOUT; i++) {
644		DELAY(1);
645		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
646			break;
647	}
648	SK_IF_UNLOCK(sc_if);
649	if (i == SK_TIMEOUT)
650		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
651
652	return(0);
653}
654
655static void
656sk_xmac_miibus_statchg(sc_if)
657	struct sk_if_softc	*sc_if;
658{
659	struct mii_data		*mii;
660
661	mii = device_get_softc(sc_if->sk_miibus);
662
663	SK_IF_LOCK(sc_if);
664	/*
665	 * If this is a GMII PHY, manually set the XMAC's
666	 * duplex mode accordingly.
667	 */
668	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
669		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
670			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
671		} else {
672			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
673		}
674	}
675	SK_IF_UNLOCK(sc_if);
676
677	return;
678}
679
680static int
681sk_marv_miibus_readreg(sc_if, phy, reg)
682	struct sk_if_softc	*sc_if;
683	int			phy, reg;
684{
685	u_int16_t		val;
686	int			i;
687
688	if (phy != 0 ||
689	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
690	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
691		return(0);
692	}
693
694	SK_IF_LOCK(sc_if);
695        SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
696		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
697
698	for (i = 0; i < SK_TIMEOUT; i++) {
699		DELAY(1);
700		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
701		if (val & YU_SMICR_READ_VALID)
702			break;
703	}
704
705	if (i == SK_TIMEOUT) {
706		printf("sk%d: phy failed to come ready\n",
707		    sc_if->sk_unit);
708		SK_IF_UNLOCK(sc_if);
709		return(0);
710	}
711
712	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
713	SK_IF_UNLOCK(sc_if);
714
715	return(val);
716}
717
718static int
719sk_marv_miibus_writereg(sc_if, phy, reg, val)
720	struct sk_if_softc	*sc_if;
721	int			phy, reg, val;
722{
723	int			i;
724
725	SK_IF_LOCK(sc_if);
726	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
727	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
728		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
729
730	for (i = 0; i < SK_TIMEOUT; i++) {
731		DELAY(1);
732		if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
733			break;
734	}
735	SK_IF_UNLOCK(sc_if);
736
737	return(0);
738}
739
740static void
741sk_marv_miibus_statchg(sc_if)
742	struct sk_if_softc	*sc_if;
743{
744	return;
745}
746
747#define HASH_BITS		6
748
749static u_int32_t
750sk_xmchash(addr)
751	const uint8_t *addr;
752{
753	uint32_t crc;
754
755	/* Compute CRC for the address value. */
756	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
757
758	return (~crc & ((1 << HASH_BITS) - 1));
759}
760
761/* gmchash is just a big endian crc */
762static u_int32_t
763sk_gmchash(addr)
764	const uint8_t *addr;
765{
766	uint32_t crc;
767
768	/* Compute CRC for the address value. */
769	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
770
771	return (crc & ((1 << HASH_BITS) - 1));
772}
773
774static void
775sk_setfilt(sc_if, addr, slot)
776	struct sk_if_softc	*sc_if;
777	caddr_t			addr;
778	int			slot;
779{
780	int			base;
781
782	base = XM_RXFILT_ENTRY(slot);
783
784	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
785	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
786	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
787
788	return;
789}
790
791static void
792sk_setmulti(sc_if)
793	struct sk_if_softc	*sc_if;
794{
795	struct sk_softc		*sc = sc_if->sk_softc;
796	struct ifnet		*ifp = sc_if->sk_ifp;
797	u_int32_t		hashes[2] = { 0, 0 };
798	int			h = 0, i;
799	struct ifmultiaddr	*ifma;
800	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
801
802	SK_IF_LOCK_ASSERT(sc_if);
803
804	/* First, zot all the existing filters. */
805	switch(sc->sk_type) {
806	case SK_GENESIS:
807		for (i = 1; i < XM_RXFILT_MAX; i++)
808			sk_setfilt(sc_if, (caddr_t)&dummy, i);
809
810		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
811		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
812		break;
813	case SK_YUKON:
814	case SK_YUKON_LITE:
815	case SK_YUKON_LP:
816		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
817		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
818		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
819		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
820		break;
821	}
822
823	/* Now program new ones. */
824	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
825		hashes[0] = 0xFFFFFFFF;
826		hashes[1] = 0xFFFFFFFF;
827	} else {
828		i = 1;
829		IF_ADDR_LOCK(ifp);
830		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
831			if (ifma->ifma_addr->sa_family != AF_LINK)
832				continue;
833			/*
834			 * Program the first XM_RXFILT_MAX multicast groups
835			 * into the perfect filter. For all others,
836			 * use the hash table.
837			 */
838			if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
839				sk_setfilt(sc_if,
840			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
841				i++;
842				continue;
843			}
844
845			switch(sc->sk_type) {
846			case SK_GENESIS:
847				h = sk_xmchash(
848					LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
849				break;
850			case SK_YUKON:
851			case SK_YUKON_LITE:
852			case SK_YUKON_LP:
853				h = sk_gmchash(
854					LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
855				break;
856			}
857			if (h < 32)
858				hashes[0] |= (1 << h);
859			else
860				hashes[1] |= (1 << (h - 32));
861		}
862		IF_ADDR_UNLOCK(ifp);
863	}
864
865	switch(sc->sk_type) {
866	case SK_GENESIS:
867		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
868			       XM_MODE_RX_USE_PERFECT);
869		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
870		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
871		break;
872	case SK_YUKON:
873	case SK_YUKON_LITE:
874	case SK_YUKON_LP:
875		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
876		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
877		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
878		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
879		break;
880	}
881
882	return;
883}
884
885static void
886sk_setpromisc(sc_if)
887	struct sk_if_softc	*sc_if;
888{
889	struct sk_softc		*sc = sc_if->sk_softc;
890	struct ifnet		*ifp = sc_if->sk_ifp;
891
892	SK_IF_LOCK_ASSERT(sc_if);
893
894	switch(sc->sk_type) {
895	case SK_GENESIS:
896		if (ifp->if_flags & IFF_PROMISC) {
897			SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
898		} else {
899			SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
900		}
901		break;
902	case SK_YUKON:
903	case SK_YUKON_LITE:
904	case SK_YUKON_LP:
905		if (ifp->if_flags & IFF_PROMISC) {
906			SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
907			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
908		} else {
909			SK_YU_SETBIT_2(sc_if, YUKON_RCR,
910			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
911		}
912		break;
913	}
914
915	return;
916}
917
918static int
919sk_init_rx_ring(sc_if)
920	struct sk_if_softc	*sc_if;
921{
922	struct sk_chain_data	*cd = &sc_if->sk_cdata;
923	struct sk_ring_data	*rd = sc_if->sk_rdata;
924	int			i;
925
926	bzero((char *)rd->sk_rx_ring,
927	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
928
929	for (i = 0; i < SK_RX_RING_CNT; i++) {
930		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
931		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
932			return(ENOBUFS);
933		if (i == (SK_RX_RING_CNT - 1)) {
934			cd->sk_rx_chain[i].sk_next =
935			    &cd->sk_rx_chain[0];
936			rd->sk_rx_ring[i].sk_next =
937			    vtophys(&rd->sk_rx_ring[0]);
938		} else {
939			cd->sk_rx_chain[i].sk_next =
940			    &cd->sk_rx_chain[i + 1];
941			rd->sk_rx_ring[i].sk_next =
942			    vtophys(&rd->sk_rx_ring[i + 1]);
943		}
944	}
945
946	sc_if->sk_cdata.sk_rx_prod = 0;
947	sc_if->sk_cdata.sk_rx_cons = 0;
948
949	return(0);
950}
951
952static void
953sk_init_tx_ring(sc_if)
954	struct sk_if_softc	*sc_if;
955{
956	struct sk_chain_data	*cd = &sc_if->sk_cdata;
957	struct sk_ring_data	*rd = sc_if->sk_rdata;
958	int			i;
959
960	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
961	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
962
963	for (i = 0; i < SK_TX_RING_CNT; i++) {
964		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
965		if (i == (SK_TX_RING_CNT - 1)) {
966			cd->sk_tx_chain[i].sk_next =
967			    &cd->sk_tx_chain[0];
968			rd->sk_tx_ring[i].sk_next =
969			    vtophys(&rd->sk_tx_ring[0]);
970		} else {
971			cd->sk_tx_chain[i].sk_next =
972			    &cd->sk_tx_chain[i + 1];
973			rd->sk_tx_ring[i].sk_next =
974			    vtophys(&rd->sk_tx_ring[i + 1]);
975		}
976	}
977
978	sc_if->sk_cdata.sk_tx_prod = 0;
979	sc_if->sk_cdata.sk_tx_cons = 0;
980	sc_if->sk_cdata.sk_tx_cnt = 0;
981
982	return;
983}
984
985static int
986sk_newbuf(sc_if, c, m)
987	struct sk_if_softc	*sc_if;
988	struct sk_chain		*c;
989	struct mbuf		*m;
990{
991	struct mbuf		*m_new = NULL;
992	struct sk_rx_desc	*r;
993
994	if (m == NULL) {
995		caddr_t			*buf = NULL;
996
997		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
998		if (m_new == NULL)
999			return(ENOBUFS);
1000
1001		/* Allocate the jumbo buffer */
1002		buf = sk_jalloc(sc_if);
1003		if (buf == NULL) {
1004			m_freem(m_new);
1005#ifdef SK_VERBOSE
1006			printf("sk%d: jumbo allocation failed "
1007			    "-- packet dropped!\n", sc_if->sk_unit);
1008#endif
1009			return(ENOBUFS);
1010		}
1011
1012		/* Attach the buffer to the mbuf */
1013		MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
1014		    (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
1015		m_new->m_data = (void *)buf;
1016		m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
1017	} else {
1018		/*
1019	 	 * We're re-using a previously allocated mbuf;
1020		 * be sure to re-init pointers and lengths to
1021		 * default values.
1022		 */
1023		m_new = m;
1024		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
1025		m_new->m_data = m_new->m_ext.ext_buf;
1026	}
1027
1028	/*
1029	 * Adjust alignment so packet payload begins on a
1030	 * longword boundary. Mandatory for Alpha, useful on
1031	 * x86 too.
1032	 */
1033	m_adj(m_new, ETHER_ALIGN);
1034
1035	r = c->sk_desc;
1036	c->sk_mbuf = m_new;
1037	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
1038	r->sk_ctl = m_new->m_len | SK_RXSTAT;
1039
1040	return(0);
1041}
1042
1043/*
1044 * Allocate jumbo buffer storage. The SysKonnect adapters support
1045 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1046 * use them in their drivers. In order for us to use them, we need
1047 * large 9K receive buffers, however standard mbuf clusters are only
1048 * 2048 bytes in size. Consequently, we need to allocate and manage
1049 * our own jumbo buffer pool. Fortunately, this does not require an
1050 * excessive amount of additional code.
1051 */
1052static int
1053sk_alloc_jumbo_mem(sc_if)
1054	struct sk_if_softc	*sc_if;
1055{
1056	caddr_t			ptr;
1057	register int		i;
1058	struct sk_jpool_entry   *entry;
1059
1060	/* Grab a big chunk o' storage. */
1061	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
1062	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1063
1064	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
1065		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
1066		return(ENOBUFS);
1067	}
1068
1069	mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
1070
1071	SLIST_INIT(&sc_if->sk_jfree_listhead);
1072	SLIST_INIT(&sc_if->sk_jinuse_listhead);
1073
1074	/*
1075	 * Now divide it up into 9K pieces and save the addresses
1076	 * in an array.
1077	 */
1078	ptr = sc_if->sk_cdata.sk_jumbo_buf;
1079	for (i = 0; i < SK_JSLOTS; i++) {
1080		sc_if->sk_cdata.sk_jslots[i] = ptr;
1081		ptr += SK_JLEN;
1082		entry = malloc(sizeof(struct sk_jpool_entry),
1083		    M_DEVBUF, M_NOWAIT);
1084		if (entry == NULL) {
1085			sk_free_jumbo_mem(sc_if);
1086			sc_if->sk_cdata.sk_jumbo_buf = NULL;
1087			printf("sk%d: no memory for jumbo "
1088			    "buffer queue!\n", sc_if->sk_unit);
1089			return(ENOBUFS);
1090		}
1091		entry->slot = i;
1092		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1093		    entry, jpool_entries);
1094	}
1095
1096	return(0);
1097}
1098
1099static void
1100sk_free_jumbo_mem(sc_if)
1101	struct sk_if_softc	*sc_if;
1102{
1103	struct sk_jpool_entry	*entry;
1104
1105	SK_JLIST_LOCK(sc_if);
1106
1107	/* We cannot release external mbuf storage while in use. */
1108	if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) {
1109		printf("sk%d: will leak jumbo buffer memory!\n", sc_if->sk_unit);
1110		SK_JLIST_UNLOCK(sc_if);
1111		return;
1112	}
1113
1114	while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
1115		entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1116		SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1117		free(entry, M_DEVBUF);
1118	}
1119
1120	SK_JLIST_UNLOCK(sc_if);
1121
1122	mtx_destroy(&sc_if->sk_jlist_mtx);
1123
1124	contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1125
1126	return;
1127}
1128
1129/*
1130 * Allocate a jumbo buffer.
1131 */
1132static void *
1133sk_jalloc(sc_if)
1134	struct sk_if_softc	*sc_if;
1135{
1136	struct sk_jpool_entry   *entry;
1137
1138	SK_JLIST_LOCK(sc_if);
1139
1140	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1141
1142	if (entry == NULL) {
1143#ifdef SK_VERBOSE
1144		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1145#endif
1146		SK_JLIST_UNLOCK(sc_if);
1147		return(NULL);
1148	}
1149
1150	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1151	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1152
1153	SK_JLIST_UNLOCK(sc_if);
1154
1155	return(sc_if->sk_cdata.sk_jslots[entry->slot]);
1156}
1157
1158/*
1159 * Release a jumbo buffer.
1160 */
1161static void
1162sk_jfree(buf, args)
1163	void			*buf;
1164	void			*args;
1165{
1166	struct sk_if_softc	*sc_if;
1167	int		        i;
1168	struct sk_jpool_entry   *entry;
1169
1170	/* Extract the softc struct pointer. */
1171	sc_if = (struct sk_if_softc *)args;
1172	if (sc_if == NULL)
1173		panic("sk_jfree: didn't get softc pointer!");
1174
1175	SK_JLIST_LOCK(sc_if);
1176
1177	/* calculate the slot this buffer belongs to */
1178	i = ((vm_offset_t)buf
1179	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1180
1181	if ((i < 0) || (i >= SK_JSLOTS))
1182		panic("sk_jfree: asked to free buffer that we don't manage!");
1183
1184	entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1185	if (entry == NULL)
1186		panic("sk_jfree: buffer not in use!");
1187	entry->slot = i;
1188	SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
1189	SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
1190	if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
1191		wakeup(sc_if);
1192
1193	SK_JLIST_UNLOCK(sc_if);
1194	return;
1195}
1196
1197/*
1198 * Set media options.
1199 */
1200static int
1201sk_ifmedia_upd(ifp)
1202	struct ifnet		*ifp;
1203{
1204	struct sk_if_softc	*sc_if = ifp->if_softc;
1205	struct mii_data		*mii;
1206
1207	mii = device_get_softc(sc_if->sk_miibus);
1208	sk_init(sc_if);
1209	mii_mediachg(mii);
1210
1211	return(0);
1212}
1213
1214/*
1215 * Report current media status.
1216 */
1217static void
1218sk_ifmedia_sts(ifp, ifmr)
1219	struct ifnet		*ifp;
1220	struct ifmediareq	*ifmr;
1221{
1222	struct sk_if_softc	*sc_if;
1223	struct mii_data		*mii;
1224
1225	sc_if = ifp->if_softc;
1226	mii = device_get_softc(sc_if->sk_miibus);
1227
1228	mii_pollstat(mii);
1229	ifmr->ifm_active = mii->mii_media_active;
1230	ifmr->ifm_status = mii->mii_media_status;
1231
1232	return;
1233}
1234
1235static int
1236sk_ioctl(ifp, command, data)
1237	struct ifnet		*ifp;
1238	u_long			command;
1239	caddr_t			data;
1240{
1241	struct sk_if_softc	*sc_if = ifp->if_softc;
1242	struct ifreq		*ifr = (struct ifreq *) data;
1243	int			error = 0;
1244	struct mii_data		*mii;
1245
1246	switch(command) {
1247	case SIOCSIFMTU:
1248		SK_IF_LOCK(sc_if);
1249		if (ifr->ifr_mtu > SK_JUMBO_MTU)
1250			error = EINVAL;
1251		else {
1252			ifp->if_mtu = ifr->ifr_mtu;
1253			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1254			sk_init_locked(sc_if);
1255		}
1256		SK_IF_UNLOCK(sc_if);
1257		break;
1258	case SIOCSIFFLAGS:
1259		SK_IF_LOCK(sc_if);
1260		if (ifp->if_flags & IFF_UP) {
1261			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1262				if ((ifp->if_flags ^ sc_if->sk_if_flags)
1263				    & IFF_PROMISC) {
1264					sk_setpromisc(sc_if);
1265					sk_setmulti(sc_if);
1266				}
1267			} else
1268				sk_init_locked(sc_if);
1269		} else {
1270			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1271				sk_stop(sc_if);
1272		}
1273		sc_if->sk_if_flags = ifp->if_flags;
1274		SK_IF_UNLOCK(sc_if);
1275		error = 0;
1276		break;
1277	case SIOCADDMULTI:
1278	case SIOCDELMULTI:
1279		SK_IF_LOCK(sc_if);
1280		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1281			sk_setmulti(sc_if);
1282			error = 0;
1283		}
1284		SK_IF_UNLOCK(sc_if);
1285		break;
1286	case SIOCGIFMEDIA:
1287	case SIOCSIFMEDIA:
1288		mii = device_get_softc(sc_if->sk_miibus);
1289		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1290		break;
1291	default:
1292		error = ether_ioctl(ifp, command, data);
1293		break;
1294	}
1295
1296	return(error);
1297}
1298
1299/*
1300 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1301 * IDs against our list and return a device name if we find a match.
1302 */
1303static int
1304skc_probe(dev)
1305	device_t		dev;
1306{
1307	struct sk_softc		*sc;
1308	struct sk_type		*t = sk_devs;
1309
1310	sc = device_get_softc(dev);
1311
1312	while(t->sk_name != NULL) {
1313		if ((pci_get_vendor(dev) == t->sk_vid) &&
1314		    (pci_get_device(dev) == t->sk_did)) {
1315			device_set_desc(dev, t->sk_name);
1316			return (BUS_PROBE_DEFAULT);
1317		}
1318		t++;
1319	}
1320
1321	return(ENXIO);
1322}
1323
1324/*
1325 * Force the GEnesis into reset, then bring it out of reset.
1326 */
1327static void
1328sk_reset(sc)
1329	struct sk_softc		*sc;
1330{
1331	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1332	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1333	if (SK_YUKON_FAMILY(sc->sk_type))
1334		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1335
1336	DELAY(1000);
1337	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1338	DELAY(2);
1339	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1340	if (SK_YUKON_FAMILY(sc->sk_type))
1341		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1342
1343	if (sc->sk_type == SK_GENESIS) {
1344		/* Configure packet arbiter */
1345		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1346		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1347		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1348		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1349		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1350	}
1351
1352	/* Enable RAM interface */
1353	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1354
1355	/*
1356         * Configure interrupt moderation. The moderation timer
1357	 * defers interrupts specified in the interrupt moderation
1358	 * timer mask based on the timeout specified in the interrupt
1359	 * moderation timer init register. Each bit in the timer
1360	 * register represents 18.825ns, so to specify a timeout in
1361	 * microseconds, we have to multiply by 54.
1362	 */
1363	if (bootverbose)
1364		printf("skc%d: interrupt moderation is %d us\n",
1365		    sc->sk_unit, sc->sk_int_mod);
1366	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
1367	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1368	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1369	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1370
1371	return;
1372}
1373
1374static int
1375sk_probe(dev)
1376	device_t		dev;
1377{
1378	struct sk_softc		*sc;
1379
1380	sc = device_get_softc(device_get_parent(dev));
1381
1382	/*
1383	 * Not much to do here. We always know there will be
1384	 * at least one XMAC present, and if there are two,
1385	 * skc_attach() will create a second device instance
1386	 * for us.
1387	 */
1388	switch (sc->sk_type) {
1389	case SK_GENESIS:
1390		device_set_desc(dev, "XaQti Corp. XMAC II");
1391		break;
1392	case SK_YUKON:
1393	case SK_YUKON_LITE:
1394	case SK_YUKON_LP:
1395		device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1396		break;
1397	}
1398
1399	return (BUS_PROBE_DEFAULT);
1400}
1401
1402/*
1403 * Each XMAC chip is attached as a separate logical IP interface.
1404 * Single port cards will have only one logical interface of course.
1405 */
1406static int
1407sk_attach(dev)
1408	device_t		dev;
1409{
1410	struct sk_softc		*sc;
1411	struct sk_if_softc	*sc_if;
1412	struct ifnet		*ifp;
1413	int			i, port, error;
1414	u_char			eaddr[6];
1415
1416	if (dev == NULL)
1417		return(EINVAL);
1418
1419	error = 0;
1420	sc_if = device_get_softc(dev);
1421	sc = device_get_softc(device_get_parent(dev));
1422	port = *(int *)device_get_ivars(dev);
1423
1424	sc_if->sk_dev = dev;
1425	sc_if->sk_unit = device_get_unit(dev);
1426	sc_if->sk_port = port;
1427	sc_if->sk_softc = sc;
1428	sc->sk_if[port] = sc_if;
1429	if (port == SK_PORT_A)
1430		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1431	if (port == SK_PORT_B)
1432		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1433
1434	/* Allocate the descriptor queues. */
1435	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1436	    M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0);
1437
1438	if (sc_if->sk_rdata == NULL) {
1439		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1440		error = ENOMEM;
1441		goto fail;
1442	}
1443
1444	/* Try to allocate memory for jumbo buffers. */
1445	if (sk_alloc_jumbo_mem(sc_if)) {
1446		printf("sk%d: jumbo buffer allocation failed\n",
1447		    sc_if->sk_unit);
1448		error = ENOMEM;
1449		goto fail;
1450	}
1451
1452	ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1453	if (ifp == NULL) {
1454		printf("sk%d: can not if_alloc()\n", sc_if->sk_unit);
1455		error = ENOSPC;
1456		goto fail;
1457	}
1458	ifp->if_softc = sc_if;
1459	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1460	ifp->if_mtu = ETHERMTU;
1461	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1462	/*
1463	 * The hardware should be ready for VLAN_MTU by default:
1464	 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1465	 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1466	 */
1467	ifp->if_capabilities = ifp->if_capenable = IFCAP_VLAN_MTU;
1468	ifp->if_ioctl = sk_ioctl;
1469	ifp->if_start = sk_start;
1470	ifp->if_watchdog = sk_watchdog;
1471	ifp->if_init = sk_init;
1472	ifp->if_baudrate = 1000000000;
1473	IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1474	ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1475	IFQ_SET_READY(&ifp->if_snd);
1476
1477	callout_handle_init(&sc_if->sk_tick_ch);
1478
1479	/*
1480	 * Get station address for this interface. Note that
1481	 * dual port cards actually come with three station
1482	 * addresses: one for each port, plus an extra. The
1483	 * extra one is used by the SysKonnect driver software
1484	 * as a 'virtual' station address for when both ports
1485	 * are operating in failover mode. Currently we don't
1486	 * use this extra address.
1487	 */
1488	SK_LOCK(sc);
1489	for (i = 0; i < ETHER_ADDR_LEN; i++)
1490		eaddr[i] =
1491		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1492
1493	/*
1494	 * Set up RAM buffer addresses. The NIC will have a certain
1495	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1496	 * need to divide this up a) between the transmitter and
1497 	 * receiver and b) between the two XMACs, if this is a
1498	 * dual port NIC. Our algotithm is to divide up the memory
1499	 * evenly so that everyone gets a fair share.
1500	 */
1501	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1502		u_int32_t		chunk, val;
1503
1504		chunk = sc->sk_ramsize / 2;
1505		val = sc->sk_rboff / sizeof(u_int64_t);
1506		sc_if->sk_rx_ramstart = val;
1507		val += (chunk / sizeof(u_int64_t));
1508		sc_if->sk_rx_ramend = val - 1;
1509		sc_if->sk_tx_ramstart = val;
1510		val += (chunk / sizeof(u_int64_t));
1511		sc_if->sk_tx_ramend = val - 1;
1512	} else {
1513		u_int32_t		chunk, val;
1514
1515		chunk = sc->sk_ramsize / 4;
1516		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1517		    sizeof(u_int64_t);
1518		sc_if->sk_rx_ramstart = val;
1519		val += (chunk / sizeof(u_int64_t));
1520		sc_if->sk_rx_ramend = val - 1;
1521		sc_if->sk_tx_ramstart = val;
1522		val += (chunk / sizeof(u_int64_t));
1523		sc_if->sk_tx_ramend = val - 1;
1524	}
1525
1526	/* Read and save PHY type and set PHY address */
1527	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1528	switch(sc_if->sk_phytype) {
1529	case SK_PHYTYPE_XMAC:
1530		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1531		break;
1532	case SK_PHYTYPE_BCOM:
1533		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1534		break;
1535	case SK_PHYTYPE_MARV_COPPER:
1536		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1537		break;
1538	default:
1539		printf("skc%d: unsupported PHY type: %d\n",
1540		    sc->sk_unit, sc_if->sk_phytype);
1541		error = ENODEV;
1542		SK_UNLOCK(sc);
1543		goto fail;
1544	}
1545
1546
1547	/*
1548	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1549	 */
1550	SK_UNLOCK(sc);
1551	ether_ifattach(ifp, eaddr);
1552	SK_LOCK(sc);
1553
1554	/*
1555	 * Do miibus setup.
1556	 */
1557	switch (sc->sk_type) {
1558	case SK_GENESIS:
1559		sk_init_xmac(sc_if);
1560		break;
1561	case SK_YUKON:
1562	case SK_YUKON_LITE:
1563	case SK_YUKON_LP:
1564		sk_init_yukon(sc_if);
1565		break;
1566	}
1567
1568	SK_UNLOCK(sc);
1569	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1570	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1571		printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1572		ether_ifdetach(ifp);
1573		error = ENXIO;
1574		goto fail;
1575	}
1576
1577fail:
1578	if (error) {
1579		/* Access should be ok even though lock has been dropped */
1580		sc->sk_if[port] = NULL;
1581		sk_detach(dev);
1582	}
1583
1584	return(error);
1585}
1586
1587/*
1588 * Attach the interface. Allocate softc structures, do ifmedia
1589 * setup and ethernet/BPF attach.
1590 */
1591static int
1592skc_attach(dev)
1593	device_t		dev;
1594{
1595	struct sk_softc		*sc;
1596	int			unit, error = 0, rid, *port;
1597	uint8_t			skrs;
1598	char			*pname, *revstr;
1599
1600	sc = device_get_softc(dev);
1601	unit = device_get_unit(dev);
1602
1603	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1604	    MTX_DEF | MTX_RECURSE);
1605	/*
1606	 * Map control/status registers.
1607	 */
1608	pci_enable_busmaster(dev);
1609
1610	rid = SK_RID;
1611	sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE);
1612
1613	if (sc->sk_res == NULL) {
1614		printf("sk%d: couldn't map ports/memory\n", unit);
1615		error = ENXIO;
1616		goto fail;
1617	}
1618
1619	sc->sk_btag = rman_get_bustag(sc->sk_res);
1620	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1621
1622	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1623	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1624
1625	/* Bail out if chip is not recognized. */
1626	if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1627		printf("skc%d: unknown device: chipver=%02x, rev=%x\n",
1628			unit, sc->sk_type, sc->sk_rev);
1629		error = ENXIO;
1630		goto fail;
1631	}
1632
1633	/* Allocate interrupt */
1634	rid = 0;
1635	sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1636	    RF_SHAREABLE | RF_ACTIVE);
1637
1638	if (sc->sk_irq == NULL) {
1639		printf("skc%d: couldn't map interrupt\n", unit);
1640		error = ENXIO;
1641		goto fail;
1642	}
1643
1644	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1645		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1646		OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1647		&sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1648		"SK interrupt moderation");
1649
1650	/* Pull in device tunables. */
1651	sc->sk_int_mod = SK_IM_DEFAULT;
1652	error = resource_int_value(device_get_name(dev), unit,
1653		"int_mod", &sc->sk_int_mod);
1654	if (error == 0) {
1655		if (sc->sk_int_mod < SK_IM_MIN ||
1656		    sc->sk_int_mod > SK_IM_MAX) {
1657			printf("skc%d: int_mod value out of range; "
1658			    "using default: %d\n", unit, SK_IM_DEFAULT);
1659			sc->sk_int_mod = SK_IM_DEFAULT;
1660		}
1661	}
1662
1663	/* Reset the adapter. */
1664	sk_reset(sc);
1665
1666	sc->sk_unit = unit;
1667
1668	/* Read and save vital product data from EEPROM. */
1669	sk_vpd_read(sc);
1670
1671	skrs = sk_win_read_1(sc, SK_EPROM0);
1672	if (sc->sk_type == SK_GENESIS) {
1673		/* Read and save RAM size and RAMbuffer offset */
1674		switch(skrs) {
1675		case SK_RAMSIZE_512K_64:
1676			sc->sk_ramsize = 0x80000;
1677			sc->sk_rboff = SK_RBOFF_0;
1678			break;
1679		case SK_RAMSIZE_1024K_64:
1680			sc->sk_ramsize = 0x100000;
1681			sc->sk_rboff = SK_RBOFF_80000;
1682			break;
1683		case SK_RAMSIZE_1024K_128:
1684			sc->sk_ramsize = 0x100000;
1685			sc->sk_rboff = SK_RBOFF_0;
1686			break;
1687		case SK_RAMSIZE_2048K_128:
1688			sc->sk_ramsize = 0x200000;
1689			sc->sk_rboff = SK_RBOFF_0;
1690			break;
1691		default:
1692			printf("skc%d: unknown ram size: %d\n",
1693			    sc->sk_unit, skrs);
1694			error = ENXIO;
1695			goto fail;
1696		}
1697	} else { /* SK_YUKON_FAMILY */
1698		if (skrs == 0x00)
1699			sc->sk_ramsize = 0x20000;
1700		else
1701			sc->sk_ramsize = skrs * (1<<12);
1702		sc->sk_rboff = SK_RBOFF_0;
1703	}
1704
1705	/* Read and save physical media type */
1706	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1707	case SK_PMD_1000BASESX:
1708		sc->sk_pmd = IFM_1000_SX;
1709		break;
1710	case SK_PMD_1000BASELX:
1711		sc->sk_pmd = IFM_1000_LX;
1712		break;
1713	case SK_PMD_1000BASECX:
1714		sc->sk_pmd = IFM_1000_CX;
1715		break;
1716	case SK_PMD_1000BASETX:
1717		sc->sk_pmd = IFM_1000_T;
1718		break;
1719	default:
1720		printf("skc%d: unknown media type: 0x%x\n",
1721		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1722		error = ENXIO;
1723		goto fail;
1724	}
1725
1726	/* Determine whether to name it with VPD PN or just make it up.
1727	 * Marvell Yukon VPD PN seems to freqently be bogus. */
1728	switch (pci_get_device(dev)) {
1729	case DEVICEID_SK_V1:
1730	case DEVICEID_BELKIN_5005:
1731	case DEVICEID_3COM_3C940:
1732	case DEVICEID_LINKSYS_EG1032:
1733	case DEVICEID_DLINK_DGE530T:
1734		/* Stay with VPD PN. */
1735		pname = sc->sk_vpd_prodname;
1736		break;
1737	case DEVICEID_SK_V2:
1738		/* YUKON VPD PN might bear no resemblance to reality. */
1739		switch (sc->sk_type) {
1740		case SK_GENESIS:
1741			/* Stay with VPD PN. */
1742			pname = sc->sk_vpd_prodname;
1743			break;
1744		case SK_YUKON:
1745			pname = "Marvell Yukon Gigabit Ethernet";
1746			break;
1747		case SK_YUKON_LITE:
1748			pname = "Marvell Yukon Lite Gigabit Ethernet";
1749			break;
1750		case SK_YUKON_LP:
1751			pname = "Marvell Yukon LP Gigabit Ethernet";
1752			break;
1753		default:
1754			pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1755			break;
1756		}
1757
1758		/* Yukon Lite Rev. A0 needs special test. */
1759		if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1760			u_int32_t far;
1761			u_int8_t testbyte;
1762
1763			/* Save flash address register before testing. */
1764			far = sk_win_read_4(sc, SK_EP_ADDR);
1765
1766			sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1767			testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1768
1769			if (testbyte != 0x00) {
1770				/* Yukon Lite Rev. A0 detected. */
1771				sc->sk_type = SK_YUKON_LITE;
1772				sc->sk_rev = SK_YUKON_LITE_REV_A0;
1773				/* Restore flash address register. */
1774				sk_win_write_4(sc, SK_EP_ADDR, far);
1775			}
1776		}
1777		break;
1778	default:
1779		device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1780			"chipver=%02x, rev=%x\n",
1781			pci_get_vendor(dev), pci_get_device(dev),
1782			sc->sk_type, sc->sk_rev);
1783		error = ENXIO;
1784		goto fail;
1785	}
1786
1787	if (sc->sk_type == SK_YUKON_LITE) {
1788		switch (sc->sk_rev) {
1789		case SK_YUKON_LITE_REV_A0:
1790			revstr = "A0";
1791			break;
1792		case SK_YUKON_LITE_REV_A1:
1793			revstr = "A1";
1794			break;
1795		case SK_YUKON_LITE_REV_A3:
1796			revstr = "A3";
1797			break;
1798		default:
1799			revstr = "";
1800			break;
1801		}
1802	} else {
1803		revstr = "";
1804	}
1805
1806	/* Announce the product name and more VPD data if there. */
1807	device_printf(dev, "%s rev. %s(0x%x)\n",
1808		pname != NULL ? pname : "<unknown>", revstr, sc->sk_rev);
1809
1810	if (bootverbose) {
1811		if (sc->sk_vpd_readonly != NULL &&
1812		    sc->sk_vpd_readonly_len != 0) {
1813			char buf[256];
1814			char *dp = sc->sk_vpd_readonly;
1815			uint16_t l, len = sc->sk_vpd_readonly_len;
1816
1817			while (len >= 3) {
1818				if ((*dp == 'P' && *(dp+1) == 'N') ||
1819				    (*dp == 'E' && *(dp+1) == 'C') ||
1820				    (*dp == 'M' && *(dp+1) == 'N') ||
1821				    (*dp == 'S' && *(dp+1) == 'N')) {
1822					l = 0;
1823					while (l < *(dp+2)) {
1824						buf[l] = *(dp+3+l);
1825						++l;
1826					}
1827					buf[l] = '\0';
1828					device_printf(dev, "%c%c: %s\n",
1829					    *dp, *(dp+1), buf);
1830					len -= (3 + l);
1831					dp += (3 + l);
1832				} else {
1833					len -= (3 + *(dp+2));
1834					dp += (3 + *(dp+2));
1835				}
1836			}
1837		}
1838		device_printf(dev, "chip ver  = 0x%02x\n", sc->sk_type);
1839		device_printf(dev, "chip rev  = 0x%02x\n", sc->sk_rev);
1840		device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1841		device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1842	}
1843
1844	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1845	if (sc->sk_devs[SK_PORT_A] == NULL) {
1846		device_printf(dev, "failed to add child for PORT_A\n");
1847		error = ENXIO;
1848		goto fail;
1849	}
1850	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1851	if (port == NULL) {
1852		device_printf(dev, "failed to allocate memory for "
1853		    "ivars of PORT_A\n");
1854		error = ENXIO;
1855		goto fail;
1856	}
1857	*port = SK_PORT_A;
1858	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1859
1860	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1861		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1862		if (sc->sk_devs[SK_PORT_B] == NULL) {
1863			device_printf(dev, "failed to add child for PORT_B\n");
1864			error = ENXIO;
1865			goto fail;
1866		}
1867		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1868		if (port == NULL) {
1869			device_printf(dev, "failed to allocate memory for "
1870			    "ivars of PORT_B\n");
1871			error = ENXIO;
1872			goto fail;
1873		}
1874		*port = SK_PORT_B;
1875		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1876	}
1877
1878	/* Turn on the 'driver is loaded' LED. */
1879	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1880
1881	error = bus_generic_attach(dev);
1882	if (error) {
1883		device_printf(dev, "failed to attach port(s)\n");
1884		goto fail;
1885	}
1886
1887	/* Hook interrupt last to avoid having to lock softc */
1888	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE,
1889	    sk_intr, sc, &sc->sk_intrhand);
1890
1891	if (error) {
1892		printf("skc%d: couldn't set up irq\n", unit);
1893		goto fail;
1894	}
1895
1896fail:
1897	if (error)
1898		skc_detach(dev);
1899
1900	return(error);
1901}
1902
1903/*
1904 * Shutdown hardware and free up resources. This can be called any
1905 * time after the mutex has been initialized. It is called in both
1906 * the error case in attach and the normal detach case so it needs
1907 * to be careful about only freeing resources that have actually been
1908 * allocated.
1909 */
1910static int
1911sk_detach(dev)
1912	device_t		dev;
1913{
1914	struct sk_if_softc	*sc_if;
1915	struct ifnet		*ifp;
1916
1917	sc_if = device_get_softc(dev);
1918	KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1919	    ("sk mutex not initialized in sk_detach"));
1920	SK_IF_LOCK(sc_if);
1921
1922	ifp = sc_if->sk_ifp;
1923	/* These should only be active if attach_xmac succeeded */
1924	if (device_is_attached(dev)) {
1925		sk_stop(sc_if);
1926		/* Can't hold locks while calling detach */
1927		SK_IF_UNLOCK(sc_if);
1928		ether_ifdetach(ifp);
1929		SK_IF_LOCK(sc_if);
1930	}
1931	if (ifp)
1932		if_free(ifp);
1933	/*
1934	 * We're generally called from skc_detach() which is using
1935	 * device_delete_child() to get to here. It's already trashed
1936	 * miibus for us, so don't do it here or we'll panic.
1937	 */
1938	/*
1939	if (sc_if->sk_miibus != NULL)
1940		device_delete_child(dev, sc_if->sk_miibus);
1941	*/
1942	bus_generic_detach(dev);
1943	if (sc_if->sk_cdata.sk_jumbo_buf != NULL)
1944		sk_free_jumbo_mem(sc_if);
1945	if (sc_if->sk_rdata != NULL) {
1946		contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
1947		    M_DEVBUF);
1948	}
1949	SK_IF_UNLOCK(sc_if);
1950
1951	return(0);
1952}
1953
1954static int
1955skc_detach(dev)
1956	device_t		dev;
1957{
1958	struct sk_softc		*sc;
1959
1960	sc = device_get_softc(dev);
1961	KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1962
1963	if (device_is_alive(dev)) {
1964		if (sc->sk_devs[SK_PORT_A] != NULL) {
1965			free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1966			device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1967		}
1968		if (sc->sk_devs[SK_PORT_B] != NULL) {
1969			free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1970			device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1971		}
1972		bus_generic_detach(dev);
1973	}
1974
1975	if (sc->sk_vpd_prodname != NULL)
1976		free(sc->sk_vpd_prodname, M_DEVBUF);
1977	if (sc->sk_vpd_readonly != NULL)
1978		free(sc->sk_vpd_readonly, M_DEVBUF);
1979
1980	if (sc->sk_intrhand)
1981		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1982	if (sc->sk_irq)
1983		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1984	if (sc->sk_res)
1985		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1986
1987	mtx_destroy(&sc->sk_mtx);
1988
1989	return(0);
1990}
1991
1992static int
1993sk_encap(sc_if, m_head, txidx)
1994        struct sk_if_softc	*sc_if;
1995        struct mbuf		*m_head;
1996        u_int32_t		*txidx;
1997{
1998	struct sk_tx_desc	*f = NULL;
1999	struct mbuf		*m;
2000	u_int32_t		frag, cur, cnt = 0;
2001
2002	SK_IF_LOCK_ASSERT(sc_if);
2003
2004	m = m_head;
2005	cur = frag = *txidx;
2006
2007	/*
2008	 * Start packing the mbufs in this chain into
2009	 * the fragment pointers. Stop when we run out
2010	 * of fragments or hit the end of the mbuf chain.
2011	 */
2012	for (m = m_head; m != NULL; m = m->m_next) {
2013		if (m->m_len != 0) {
2014			if ((SK_TX_RING_CNT -
2015			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
2016				return(ENOBUFS);
2017			f = &sc_if->sk_rdata->sk_tx_ring[frag];
2018			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
2019			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
2020			if (cnt == 0)
2021				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
2022			else
2023				f->sk_ctl |= SK_TXCTL_OWN;
2024			cur = frag;
2025			SK_INC(frag, SK_TX_RING_CNT);
2026			cnt++;
2027		}
2028	}
2029
2030	if (m != NULL)
2031		return(ENOBUFS);
2032
2033	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
2034		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
2035	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
2036	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
2037	sc_if->sk_cdata.sk_tx_cnt += cnt;
2038
2039	*txidx = frag;
2040
2041	return(0);
2042}
2043
2044static void
2045sk_start(ifp)
2046	struct ifnet		*ifp;
2047{
2048	struct sk_if_softc *sc_if;
2049
2050	sc_if = ifp->if_softc;
2051
2052	SK_IF_LOCK(sc_if);
2053	sk_start_locked(ifp);
2054	SK_IF_UNLOCK(sc_if);
2055
2056	return;
2057}
2058
2059static void
2060sk_start_locked(ifp)
2061	struct ifnet		*ifp;
2062{
2063        struct sk_softc		*sc;
2064        struct sk_if_softc	*sc_if;
2065        struct mbuf		*m_head = NULL;
2066        u_int32_t		idx;
2067
2068	sc_if = ifp->if_softc;
2069	sc = sc_if->sk_softc;
2070
2071	SK_IF_LOCK_ASSERT(sc_if);
2072
2073	idx = sc_if->sk_cdata.sk_tx_prod;
2074
2075	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
2076		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2077		if (m_head == NULL)
2078			break;
2079
2080		/*
2081		 * Pack the data into the transmit ring. If we
2082		 * don't have room, set the OACTIVE flag and wait
2083		 * for the NIC to drain the ring.
2084		 */
2085		if (sk_encap(sc_if, m_head, &idx)) {
2086			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2087			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2088			break;
2089		}
2090
2091		/*
2092		 * If there's a BPF listener, bounce a copy of this frame
2093		 * to him.
2094		 */
2095		BPF_MTAP(ifp, m_head);
2096	}
2097
2098	/* Transmit */
2099	if (idx != sc_if->sk_cdata.sk_tx_prod) {
2100		sc_if->sk_cdata.sk_tx_prod = idx;
2101		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2102
2103		/* Set a timeout in case the chip goes out to lunch. */
2104		ifp->if_timer = 5;
2105	}
2106
2107	return;
2108}
2109
2110
2111static void
2112sk_watchdog(ifp)
2113	struct ifnet		*ifp;
2114{
2115	struct sk_if_softc	*sc_if;
2116
2117	sc_if = ifp->if_softc;
2118
2119	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
2120	SK_IF_LOCK(sc_if);
2121	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2122	sk_init_locked(sc_if);
2123	SK_IF_UNLOCK(sc_if);
2124
2125	return;
2126}
2127
2128static void
2129skc_shutdown(dev)
2130	device_t		dev;
2131{
2132	struct sk_softc		*sc;
2133
2134	sc = device_get_softc(dev);
2135	SK_LOCK(sc);
2136
2137	/* Turn off the 'driver is loaded' LED. */
2138	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2139
2140	/*
2141	 * Reset the GEnesis controller. Doing this should also
2142	 * assert the resets on the attached XMAC(s).
2143	 */
2144	sk_reset(sc);
2145	SK_UNLOCK(sc);
2146
2147	return;
2148}
2149
2150static void
2151sk_rxeof(sc_if)
2152	struct sk_if_softc	*sc_if;
2153{
2154	struct sk_softc		*sc;
2155	struct mbuf		*m;
2156	struct ifnet		*ifp;
2157	struct sk_chain		*cur_rx;
2158	int			total_len = 0;
2159	int			i;
2160	u_int32_t		rxstat;
2161
2162	sc = sc_if->sk_softc;
2163	ifp = sc_if->sk_ifp;
2164	i = sc_if->sk_cdata.sk_rx_prod;
2165	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2166
2167	SK_LOCK_ASSERT(sc);
2168
2169	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
2170
2171		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2172		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
2173		m = cur_rx->sk_mbuf;
2174		cur_rx->sk_mbuf = NULL;
2175		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
2176		SK_INC(i, SK_RX_RING_CNT);
2177
2178		if (rxstat & XM_RXSTAT_ERRFRAME) {
2179			ifp->if_ierrors++;
2180			sk_newbuf(sc_if, cur_rx, m);
2181			continue;
2182		}
2183
2184		/*
2185		 * Try to allocate a new jumbo buffer. If that
2186		 * fails, copy the packet to mbufs and put the
2187		 * jumbo buffer back in the ring so it can be
2188		 * re-used. If allocating mbufs fails, then we
2189		 * have to drop the packet.
2190		 */
2191		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
2192			struct mbuf		*m0;
2193			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
2194			    ifp, NULL);
2195			sk_newbuf(sc_if, cur_rx, m);
2196			if (m0 == NULL) {
2197				printf("sk%d: no receive buffers "
2198				    "available -- packet dropped!\n",
2199				    sc_if->sk_unit);
2200				ifp->if_ierrors++;
2201				continue;
2202			}
2203			m = m0;
2204		} else {
2205			m->m_pkthdr.rcvif = ifp;
2206			m->m_pkthdr.len = m->m_len = total_len;
2207		}
2208
2209		ifp->if_ipackets++;
2210		SK_UNLOCK(sc);
2211		(*ifp->if_input)(ifp, m);
2212		SK_LOCK(sc);
2213	}
2214
2215	sc_if->sk_cdata.sk_rx_prod = i;
2216
2217	return;
2218}
2219
2220static void
2221sk_txeof(sc_if)
2222	struct sk_if_softc	*sc_if;
2223{
2224	struct sk_softc		*sc;
2225	struct sk_tx_desc	*cur_tx;
2226	struct ifnet		*ifp;
2227	u_int32_t		idx;
2228
2229	sc = sc_if->sk_softc;
2230	ifp = sc_if->sk_ifp;
2231
2232	/*
2233	 * Go through our tx ring and free mbufs for those
2234	 * frames that have been sent.
2235	 */
2236	idx = sc_if->sk_cdata.sk_tx_cons;
2237	while(idx != sc_if->sk_cdata.sk_tx_prod) {
2238		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
2239		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
2240			break;
2241		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
2242			ifp->if_opackets++;
2243		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
2244			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
2245			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
2246		}
2247		sc_if->sk_cdata.sk_tx_cnt--;
2248		SK_INC(idx, SK_TX_RING_CNT);
2249	}
2250
2251	if (sc_if->sk_cdata.sk_tx_cnt == 0) {
2252		ifp->if_timer = 0;
2253	} else /* nudge chip to keep tx ring moving */
2254		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2255
2256	if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
2257		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2258
2259	sc_if->sk_cdata.sk_tx_cons = idx;
2260}
2261
2262static void
2263sk_tick(xsc_if)
2264	void			*xsc_if;
2265{
2266	struct sk_if_softc	*sc_if;
2267	struct mii_data		*mii;
2268	struct ifnet		*ifp;
2269	int			i;
2270
2271	sc_if = xsc_if;
2272	SK_IF_LOCK(sc_if);
2273	ifp = sc_if->sk_ifp;
2274	mii = device_get_softc(sc_if->sk_miibus);
2275
2276	if (!(ifp->if_flags & IFF_UP)) {
2277		SK_IF_UNLOCK(sc_if);
2278		return;
2279	}
2280
2281	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2282		sk_intr_bcom(sc_if);
2283		SK_IF_UNLOCK(sc_if);
2284		return;
2285	}
2286
2287	/*
2288	 * According to SysKonnect, the correct way to verify that
2289	 * the link has come back up is to poll bit 0 of the GPIO
2290	 * register three times. This pin has the signal from the
2291	 * link_sync pin connected to it; if we read the same link
2292	 * state 3 times in a row, we know the link is up.
2293	 */
2294	for (i = 0; i < 3; i++) {
2295		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2296			break;
2297	}
2298
2299	if (i != 3) {
2300		sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2301		SK_IF_UNLOCK(sc_if);
2302		return;
2303	}
2304
2305	/* Turn the GP0 interrupt back on. */
2306	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2307	SK_XM_READ_2(sc_if, XM_ISR);
2308	mii_tick(mii);
2309	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2310
2311	SK_IF_UNLOCK(sc_if);
2312	return;
2313}
2314
2315static void
2316sk_intr_bcom(sc_if)
2317	struct sk_if_softc	*sc_if;
2318{
2319	struct mii_data		*mii;
2320	struct ifnet		*ifp;
2321	int			status;
2322	mii = device_get_softc(sc_if->sk_miibus);
2323	ifp = sc_if->sk_ifp;
2324
2325	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2326
2327	/*
2328	 * Read the PHY interrupt register to make sure
2329	 * we clear any pending interrupts.
2330	 */
2331	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2332
2333	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2334		sk_init_xmac(sc_if);
2335		return;
2336	}
2337
2338	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2339		int			lstat;
2340		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2341		    BRGPHY_MII_AUXSTS);
2342
2343		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2344			mii_mediachg(mii);
2345			/* Turn off the link LED. */
2346			SK_IF_WRITE_1(sc_if, 0,
2347			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
2348			sc_if->sk_link = 0;
2349		} else if (status & BRGPHY_ISR_LNK_CHG) {
2350			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2351	    		    BRGPHY_MII_IMR, 0xFF00);
2352			mii_tick(mii);
2353			sc_if->sk_link = 1;
2354			/* Turn on the link LED. */
2355			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2356			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2357			    SK_LINKLED_BLINK_OFF);
2358		} else {
2359			mii_tick(mii);
2360			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2361		}
2362	}
2363
2364	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2365
2366	return;
2367}
2368
2369static void
2370sk_intr_xmac(sc_if)
2371	struct sk_if_softc	*sc_if;
2372{
2373	struct sk_softc		*sc;
2374	u_int16_t		status;
2375
2376	sc = sc_if->sk_softc;
2377	status = SK_XM_READ_2(sc_if, XM_ISR);
2378
2379	/*
2380	 * Link has gone down. Start MII tick timeout to
2381	 * watch for link resync.
2382	 */
2383	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2384		if (status & XM_ISR_GP0_SET) {
2385			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2386			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2387		}
2388
2389		if (status & XM_ISR_AUTONEG_DONE) {
2390			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2391		}
2392	}
2393
2394	if (status & XM_IMR_TX_UNDERRUN)
2395		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2396
2397	if (status & XM_IMR_RX_OVERRUN)
2398		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2399
2400	status = SK_XM_READ_2(sc_if, XM_ISR);
2401
2402	return;
2403}
2404
2405static void
2406sk_intr_yukon(sc_if)
2407	struct sk_if_softc	*sc_if;
2408{
2409	int status;
2410
2411	status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2412
2413	return;
2414}
2415
2416static void
2417sk_intr(xsc)
2418	void			*xsc;
2419{
2420	struct sk_softc		*sc = xsc;
2421	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
2422	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2423	u_int32_t		status;
2424
2425	SK_LOCK(sc);
2426
2427	sc_if0 = sc->sk_if[SK_PORT_A];
2428	sc_if1 = sc->sk_if[SK_PORT_B];
2429
2430	if (sc_if0 != NULL)
2431		ifp0 = sc_if0->sk_ifp;
2432	if (sc_if1 != NULL)
2433		ifp1 = sc_if1->sk_ifp;
2434
2435	for (;;) {
2436		status = CSR_READ_4(sc, SK_ISSR);
2437		if (!(status & sc->sk_intrmask))
2438			break;
2439
2440		/* Handle receive interrupts first. */
2441		if (status & SK_ISR_RX1_EOF) {
2442			sk_rxeof(sc_if0);
2443			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2444			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2445		}
2446		if (status & SK_ISR_RX2_EOF) {
2447			sk_rxeof(sc_if1);
2448			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2449			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2450		}
2451
2452		/* Then transmit interrupts. */
2453		if (status & SK_ISR_TX1_S_EOF) {
2454			sk_txeof(sc_if0);
2455			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2456			    SK_TXBMU_CLR_IRQ_EOF);
2457		}
2458		if (status & SK_ISR_TX2_S_EOF) {
2459			sk_txeof(sc_if1);
2460			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2461			    SK_TXBMU_CLR_IRQ_EOF);
2462		}
2463
2464		/* Then MAC interrupts. */
2465		if (status & SK_ISR_MAC1 &&
2466		    ifp0->if_drv_flags & IFF_DRV_RUNNING) {
2467			if (sc->sk_type == SK_GENESIS)
2468				sk_intr_xmac(sc_if0);
2469			else
2470				sk_intr_yukon(sc_if0);
2471		}
2472
2473		if (status & SK_ISR_MAC2 &&
2474		    ifp1->if_drv_flags & IFF_DRV_RUNNING) {
2475			if (sc->sk_type == SK_GENESIS)
2476				sk_intr_xmac(sc_if1);
2477			else
2478				sk_intr_yukon(sc_if1);
2479		}
2480
2481		if (status & SK_ISR_EXTERNAL_REG) {
2482			if (ifp0 != NULL &&
2483			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2484				sk_intr_bcom(sc_if0);
2485			if (ifp1 != NULL &&
2486			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2487				sk_intr_bcom(sc_if1);
2488		}
2489	}
2490
2491	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2492
2493	if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
2494		sk_start_locked(ifp0);
2495	if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
2496		sk_start_locked(ifp1);
2497
2498	SK_UNLOCK(sc);
2499
2500	return;
2501}
2502
2503static void
2504sk_init_xmac(sc_if)
2505	struct sk_if_softc	*sc_if;
2506{
2507	struct sk_softc		*sc;
2508	struct ifnet		*ifp;
2509	struct sk_bcom_hack	bhack[] = {
2510	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2511	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2512	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2513	{ 0, 0 } };
2514
2515	sc = sc_if->sk_softc;
2516	ifp = sc_if->sk_ifp;
2517
2518	/* Unreset the XMAC. */
2519	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2520	DELAY(1000);
2521
2522	/* Reset the XMAC's internal state. */
2523	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2524
2525	/* Save the XMAC II revision */
2526	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2527
2528	/*
2529	 * Perform additional initialization for external PHYs,
2530	 * namely for the 1000baseTX cards that use the XMAC's
2531	 * GMII mode.
2532	 */
2533	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2534		int			i = 0;
2535		u_int32_t		val;
2536
2537		/* Take PHY out of reset. */
2538		val = sk_win_read_4(sc, SK_GPIO);
2539		if (sc_if->sk_port == SK_PORT_A)
2540			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2541		else
2542			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2543		sk_win_write_4(sc, SK_GPIO, val);
2544
2545		/* Enable GMII mode on the XMAC. */
2546		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2547
2548		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2549		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2550		DELAY(10000);
2551		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2552		    BRGPHY_MII_IMR, 0xFFF0);
2553
2554		/*
2555		 * Early versions of the BCM5400 apparently have
2556		 * a bug that requires them to have their reserved
2557		 * registers initialized to some magic values. I don't
2558		 * know what the numbers do, I'm just the messenger.
2559		 */
2560		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2561		    == 0x6041) {
2562			while(bhack[i].reg) {
2563				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2564				    bhack[i].reg, bhack[i].val);
2565				i++;
2566			}
2567		}
2568	}
2569
2570	/* Set station address */
2571	SK_XM_WRITE_2(sc_if, XM_PAR0,
2572	    *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[0]));
2573	SK_XM_WRITE_2(sc_if, XM_PAR1,
2574	    *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[2]));
2575	SK_XM_WRITE_2(sc_if, XM_PAR2,
2576	    *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[4]));
2577	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2578
2579	if (ifp->if_flags & IFF_BROADCAST) {
2580		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2581	} else {
2582		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2583	}
2584
2585	/* We don't need the FCS appended to the packet. */
2586	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2587
2588	/* We want short frames padded to 60 bytes. */
2589	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2590
2591	/*
2592	 * Enable the reception of all error frames. This is is
2593	 * a necessary evil due to the design of the XMAC. The
2594	 * XMAC's receive FIFO is only 8K in size, however jumbo
2595	 * frames can be up to 9000 bytes in length. When bad
2596	 * frame filtering is enabled, the XMAC's RX FIFO operates
2597	 * in 'store and forward' mode. For this to work, the
2598	 * entire frame has to fit into the FIFO, but that means
2599	 * that jumbo frames larger than 8192 bytes will be
2600	 * truncated. Disabling all bad frame filtering causes
2601	 * the RX FIFO to operate in streaming mode, in which
2602	 * case the XMAC will start transfering frames out of the
2603	 * RX FIFO as soon as the FIFO threshold is reached.
2604	 */
2605	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2606	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2607	    XM_MODE_RX_INRANGELEN);
2608
2609	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2610		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2611	else
2612		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2613
2614	/*
2615	 * Bump up the transmit threshold. This helps hold off transmit
2616	 * underruns when we're blasting traffic from both ports at once.
2617	 */
2618	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2619
2620	/* Set promiscuous mode */
2621	sk_setpromisc(sc_if);
2622
2623	/* Set multicast filter */
2624	sk_setmulti(sc_if);
2625
2626	/* Clear and enable interrupts */
2627	SK_XM_READ_2(sc_if, XM_ISR);
2628	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2629		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2630	else
2631		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2632
2633	/* Configure MAC arbiter */
2634	switch(sc_if->sk_xmac_rev) {
2635	case XM_XMAC_REV_B2:
2636		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2637		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2638		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2639		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2640		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2641		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2642		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2643		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2644		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2645		break;
2646	case XM_XMAC_REV_C1:
2647		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2648		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2649		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2650		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2651		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2652		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2653		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2654		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2655		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2656		break;
2657	default:
2658		break;
2659	}
2660	sk_win_write_2(sc, SK_MACARB_CTL,
2661	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2662
2663	sc_if->sk_link = 1;
2664
2665	return;
2666}
2667
2668static void
2669sk_init_yukon(sc_if)
2670	struct sk_if_softc	*sc_if;
2671{
2672	u_int32_t		phy;
2673	u_int16_t		reg;
2674	struct sk_softc		*sc;
2675	struct ifnet		*ifp;
2676	int			i;
2677
2678	sc = sc_if->sk_softc;
2679	ifp = sc_if->sk_ifp;
2680
2681	if (sc->sk_type == SK_YUKON_LITE &&
2682	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2683		/* Take PHY out of reset. */
2684		sk_win_write_4(sc, SK_GPIO,
2685			(sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9);
2686	}
2687
2688	/* GMAC and GPHY Reset */
2689	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2690	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2691	DELAY(1000);
2692	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2693	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2694	DELAY(1000);
2695
2696	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2697		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2698
2699	switch(sc_if->sk_softc->sk_pmd) {
2700	case IFM_1000_SX:
2701	case IFM_1000_LX:
2702		phy |= SK_GPHY_FIBER;
2703		break;
2704
2705	case IFM_1000_CX:
2706	case IFM_1000_T:
2707		phy |= SK_GPHY_COPPER;
2708		break;
2709	}
2710
2711	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2712	DELAY(1000);
2713	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2714	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2715		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2716
2717	/* unused read of the interrupt source register */
2718	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2719
2720	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2721
2722	/* MIB Counter Clear Mode set */
2723	reg |= YU_PAR_MIB_CLR;
2724	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2725
2726	/* MIB Counter Clear Mode clear */
2727	reg &= ~YU_PAR_MIB_CLR;
2728	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2729
2730	/* receive control reg */
2731	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2732
2733	/* transmit parameter register */
2734	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2735		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2736
2737	/* serial mode register */
2738	reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
2739	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2740		reg |= YU_SMR_MFL_JUMBO;
2741	SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2742
2743	/* Setup Yukon's address */
2744	for (i = 0; i < 3; i++) {
2745		/* Write Source Address 1 (unicast filter) */
2746		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2747			      IFP2ENADDR(sc_if->sk_ifp)[i * 2] |
2748			      IFP2ENADDR(sc_if->sk_ifp)[i * 2 + 1] << 8);
2749	}
2750
2751	for (i = 0; i < 3; i++) {
2752		reg = sk_win_read_2(sc_if->sk_softc,
2753				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2754		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2755	}
2756
2757	/* Set promiscuous mode */
2758	sk_setpromisc(sc_if);
2759
2760	/* Set multicast filter */
2761	sk_setmulti(sc_if);
2762
2763	/* enable interrupt mask for counter overflows */
2764	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2765	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2766	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2767
2768	/* Configure RX MAC FIFO */
2769	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2770	SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2771
2772	/* Configure TX MAC FIFO */
2773	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2774	SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2775}
2776
2777/*
2778 * Note that to properly initialize any part of the GEnesis chip,
2779 * you first have to take it out of reset mode.
2780 */
2781static void
2782sk_init(xsc)
2783	void			*xsc;
2784{
2785	struct sk_if_softc	*sc_if = xsc;
2786
2787	SK_IF_LOCK(sc_if);
2788	sk_init_locked(sc_if);
2789	SK_IF_UNLOCK(sc_if);
2790
2791	return;
2792}
2793
2794static void
2795sk_init_locked(sc_if)
2796	struct sk_if_softc	*sc_if;
2797{
2798	struct sk_softc		*sc;
2799	struct ifnet		*ifp;
2800	struct mii_data		*mii;
2801	u_int16_t		reg;
2802	u_int32_t		imr;
2803
2804	SK_IF_LOCK_ASSERT(sc_if);
2805
2806	ifp = sc_if->sk_ifp;
2807	sc = sc_if->sk_softc;
2808	mii = device_get_softc(sc_if->sk_miibus);
2809
2810	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2811		return;
2812
2813	/* Cancel pending I/O and free all RX/TX buffers. */
2814	sk_stop(sc_if);
2815
2816	if (sc->sk_type == SK_GENESIS) {
2817		/* Configure LINK_SYNC LED */
2818		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2819		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2820			SK_LINKLED_LINKSYNC_ON);
2821
2822		/* Configure RX LED */
2823		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2824			SK_RXLEDCTL_COUNTER_START);
2825
2826		/* Configure TX LED */
2827		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2828			SK_TXLEDCTL_COUNTER_START);
2829	}
2830
2831	/* Configure I2C registers */
2832
2833	/* Configure XMAC(s) */
2834	switch (sc->sk_type) {
2835	case SK_GENESIS:
2836		sk_init_xmac(sc_if);
2837		break;
2838	case SK_YUKON:
2839	case SK_YUKON_LITE:
2840	case SK_YUKON_LP:
2841		sk_init_yukon(sc_if);
2842		break;
2843	}
2844	mii_mediachg(mii);
2845
2846	if (sc->sk_type == SK_GENESIS) {
2847		/* Configure MAC FIFOs */
2848		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2849		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2850		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2851
2852		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2853		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2854		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2855	}
2856
2857	/* Configure transmit arbiter(s) */
2858	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2859	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2860
2861	/* Configure RAMbuffers */
2862	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2863	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2864	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2865	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2866	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2867	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2868
2869	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2870	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2871	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2872	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2873	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2874	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2875	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2876
2877	/* Configure BMUs */
2878	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2879	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2880	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2881	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2882
2883	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2884	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2885	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2886	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2887
2888	/* Init descriptors */
2889	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2890		printf("sk%d: initialization failed: no "
2891		    "memory for rx buffers\n", sc_if->sk_unit);
2892		sk_stop(sc_if);
2893		return;
2894	}
2895	sk_init_tx_ring(sc_if);
2896
2897	/* Set interrupt moderation if changed via sysctl. */
2898	/* SK_LOCK(sc); */
2899	imr = sk_win_read_4(sc, SK_IMTIMERINIT);
2900	if (imr != SK_IM_USECS(sc->sk_int_mod)) {
2901		sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
2902		if (bootverbose)
2903			printf("skc%d: interrupt moderation is %d us\n",
2904			    sc->sk_unit, sc->sk_int_mod);
2905	}
2906	/* SK_UNLOCK(sc); */
2907
2908	/* Configure interrupt handling */
2909	CSR_READ_4(sc, SK_ISSR);
2910	if (sc_if->sk_port == SK_PORT_A)
2911		sc->sk_intrmask |= SK_INTRS1;
2912	else
2913		sc->sk_intrmask |= SK_INTRS2;
2914
2915	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2916
2917	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2918
2919	/* Start BMUs. */
2920	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2921
2922	switch(sc->sk_type) {
2923	case SK_GENESIS:
2924		/* Enable XMACs TX and RX state machines */
2925		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2926		SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2927		break;
2928	case SK_YUKON:
2929	case SK_YUKON_LITE:
2930	case SK_YUKON_LP:
2931		reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2932		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2933		reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2934		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2935	}
2936
2937	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2938	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2939
2940	return;
2941}
2942
2943static void
2944sk_stop(sc_if)
2945	struct sk_if_softc	*sc_if;
2946{
2947	int			i;
2948	struct sk_softc		*sc;
2949	struct ifnet		*ifp;
2950
2951	SK_IF_LOCK_ASSERT(sc_if);
2952	sc = sc_if->sk_softc;
2953	ifp = sc_if->sk_ifp;
2954
2955	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2956
2957	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2958		u_int32_t		val;
2959
2960		/* Put PHY back into reset. */
2961		val = sk_win_read_4(sc, SK_GPIO);
2962		if (sc_if->sk_port == SK_PORT_A) {
2963			val |= SK_GPIO_DIR0;
2964			val &= ~SK_GPIO_DAT0;
2965		} else {
2966			val |= SK_GPIO_DIR2;
2967			val &= ~SK_GPIO_DAT2;
2968		}
2969		sk_win_write_4(sc, SK_GPIO, val);
2970	}
2971
2972	/* Turn off various components of this interface. */
2973	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2974	switch (sc->sk_type) {
2975	case SK_GENESIS:
2976		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2977		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2978		break;
2979	case SK_YUKON:
2980	case SK_YUKON_LITE:
2981	case SK_YUKON_LP:
2982		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2983		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2984		break;
2985	}
2986	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2987	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2988	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2989	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2990	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2991	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2992	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2993	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2994	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2995
2996	/* Disable interrupts */
2997	if (sc_if->sk_port == SK_PORT_A)
2998		sc->sk_intrmask &= ~SK_INTRS1;
2999	else
3000		sc->sk_intrmask &= ~SK_INTRS2;
3001	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3002
3003	SK_XM_READ_2(sc_if, XM_ISR);
3004	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3005
3006	/* Free RX and TX mbufs still in the queues. */
3007	for (i = 0; i < SK_RX_RING_CNT; i++) {
3008		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
3009			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
3010			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
3011		}
3012	}
3013
3014	for (i = 0; i < SK_TX_RING_CNT; i++) {
3015		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
3016			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
3017			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
3018		}
3019	}
3020
3021	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
3022
3023	return;
3024}
3025
3026static int
3027sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3028{
3029	int error, value;
3030
3031	if (!arg1)
3032		return (EINVAL);
3033	value = *(int *)arg1;
3034	error = sysctl_handle_int(oidp, &value, 0, req);
3035	if (error || !req->newptr)
3036		return (error);
3037	if (value < low || value > high)
3038		return (EINVAL);
3039	*(int *)arg1 = value;
3040	return (0);
3041}
3042
3043static int
3044sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3045{
3046	return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
3047}
3048