1/*	$OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $	*/
2
3/*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright (c) 1997, 1998, 1999, 2000
7 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Bill Paul.
20 * 4. Neither the name of the author nor the names of any co-contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34 * THE POSSIBILITY OF SUCH DAMAGE.
35 */
36/*-
37 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
38 *
39 * Permission to use, copy, modify, and distribute this software for any
40 * purpose with or without fee is hereby granted, provided that the above
41 * copyright notice and this permission notice appear in all copies.
42 *
43 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
44 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
46 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
47 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
48 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
49 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
50 */
51
52#include <sys/cdefs.h>
53__FBSDID("$FreeBSD: releng/12.0/sys/dev/sk/if_sk.c 336757 2018-07-27 10:40:48Z eadler $");
54
55/*
56 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
57 * the SK-984x series adapters, both single port and dual port.
58 * References:
59 * 	The XaQti XMAC II datasheet,
60 *  https://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
61 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
62 *
63 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
64 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
65 * convenience to others until Vitesse corrects this problem:
66 *
67 * https://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
68 *
69 * Written by Bill Paul <wpaul@ee.columbia.edu>
70 * Department of Electrical Engineering
71 * Columbia University, New York City
72 */
73/*
74 * The SysKonnect gigabit ethernet adapters consist of two main
75 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
76 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
77 * components and a PHY while the GEnesis controller provides a PCI
78 * interface with DMA support. Each card may have between 512K and
79 * 2MB of SRAM on board depending on the configuration.
80 *
81 * The SysKonnect GEnesis controller can have either one or two XMAC
82 * chips connected to it, allowing single or dual port NIC configurations.
83 * SysKonnect has the distinction of being the only vendor on the market
84 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
85 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
86 * XMAC registers. This driver takes advantage of these features to allow
87 * both XMACs to operate as independent interfaces.
88 */
89
90#include <sys/param.h>
91#include <sys/systm.h>
92#include <sys/bus.h>
93#include <sys/endian.h>
94#include <sys/mbuf.h>
95#include <sys/malloc.h>
96#include <sys/kernel.h>
97#include <sys/module.h>
98#include <sys/socket.h>
99#include <sys/sockio.h>
100#include <sys/queue.h>
101#include <sys/sysctl.h>
102
103#include <net/bpf.h>
104#include <net/ethernet.h>
105#include <net/if.h>
106#include <net/if_var.h>
107#include <net/if_arp.h>
108#include <net/if_dl.h>
109#include <net/if_media.h>
110#include <net/if_types.h>
111#include <net/if_vlan_var.h>
112
113#include <netinet/in.h>
114#include <netinet/in_systm.h>
115#include <netinet/ip.h>
116
117#include <machine/bus.h>
118#include <machine/in_cksum.h>
119#include <machine/resource.h>
120#include <sys/rman.h>
121
122#include <dev/mii/mii.h>
123#include <dev/mii/miivar.h>
124#include <dev/mii/brgphyreg.h>
125
126#include <dev/pci/pcireg.h>
127#include <dev/pci/pcivar.h>
128
129#if 0
130#define SK_USEIOSPACE
131#endif
132
133#include <dev/sk/if_skreg.h>
134#include <dev/sk/xmaciireg.h>
135#include <dev/sk/yukonreg.h>
136
137MODULE_DEPEND(sk, pci, 1, 1, 1);
138MODULE_DEPEND(sk, ether, 1, 1, 1);
139MODULE_DEPEND(sk, miibus, 1, 1, 1);
140
141/* "device miibus" required.  See GENERIC if you get errors here. */
142#include "miibus_if.h"
143
144static const struct sk_type sk_devs[] = {
145	{
146		VENDORID_SK,
147		DEVICEID_SK_V1,
148		"SysKonnect Gigabit Ethernet (V1.0)"
149	},
150	{
151		VENDORID_SK,
152		DEVICEID_SK_V2,
153		"SysKonnect Gigabit Ethernet (V2.0)"
154	},
155	{
156		VENDORID_MARVELL,
157		DEVICEID_SK_V2,
158		"Marvell Gigabit Ethernet"
159	},
160	{
161		VENDORID_MARVELL,
162		DEVICEID_BELKIN_5005,
163		"Belkin F5D5005 Gigabit Ethernet"
164	},
165	{
166		VENDORID_3COM,
167		DEVICEID_3COM_3C940,
168		"3Com 3C940 Gigabit Ethernet"
169	},
170	{
171		VENDORID_LINKSYS,
172		DEVICEID_LINKSYS_EG1032,
173		"Linksys EG1032 Gigabit Ethernet"
174	},
175	{
176		VENDORID_DLINK,
177		DEVICEID_DLINK_DGE530T_A1,
178		"D-Link DGE-530T Gigabit Ethernet"
179	},
180	{
181		VENDORID_DLINK,
182		DEVICEID_DLINK_DGE530T_B1,
183		"D-Link DGE-530T Gigabit Ethernet"
184	},
185	{ 0, 0, NULL }
186};
187
188static int skc_probe(device_t);
189static int skc_attach(device_t);
190static int skc_detach(device_t);
191static int skc_shutdown(device_t);
192static int skc_suspend(device_t);
193static int skc_resume(device_t);
194static bus_dma_tag_t skc_get_dma_tag(device_t, device_t);
195static int sk_detach(device_t);
196static int sk_probe(device_t);
197static int sk_attach(device_t);
198static void sk_tick(void *);
199static void sk_yukon_tick(void *);
200static void sk_intr(void *);
201static void sk_intr_xmac(struct sk_if_softc *);
202static void sk_intr_bcom(struct sk_if_softc *);
203static void sk_intr_yukon(struct sk_if_softc *);
204static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t);
205static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
206static void sk_rxeof(struct sk_if_softc *);
207static void sk_jumbo_rxeof(struct sk_if_softc *);
208static void sk_txeof(struct sk_if_softc *);
209static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *);
210static int sk_encap(struct sk_if_softc *, struct mbuf **);
211static void sk_start(struct ifnet *);
212static void sk_start_locked(struct ifnet *);
213static int sk_ioctl(struct ifnet *, u_long, caddr_t);
214static void sk_init(void *);
215static void sk_init_locked(struct sk_if_softc *);
216static void sk_init_xmac(struct sk_if_softc *);
217static void sk_init_yukon(struct sk_if_softc *);
218static void sk_stop(struct sk_if_softc *);
219static void sk_watchdog(void *);
220static int sk_ifmedia_upd(struct ifnet *);
221static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
222static void sk_reset(struct sk_softc *);
223static __inline void sk_discard_rxbuf(struct sk_if_softc *, int);
224static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int);
225static int sk_newbuf(struct sk_if_softc *, int);
226static int sk_jumbo_newbuf(struct sk_if_softc *, int);
227static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
228static int sk_dma_alloc(struct sk_if_softc *);
229static int sk_dma_jumbo_alloc(struct sk_if_softc *);
230static void sk_dma_free(struct sk_if_softc *);
231static void sk_dma_jumbo_free(struct sk_if_softc *);
232static int sk_init_rx_ring(struct sk_if_softc *);
233static int sk_init_jumbo_rx_ring(struct sk_if_softc *);
234static void sk_init_tx_ring(struct sk_if_softc *);
235static u_int32_t sk_win_read_4(struct sk_softc *, int);
236static u_int16_t sk_win_read_2(struct sk_softc *, int);
237static u_int8_t sk_win_read_1(struct sk_softc *, int);
238static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
239static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
240static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
241
242static int sk_miibus_readreg(device_t, int, int);
243static int sk_miibus_writereg(device_t, int, int, int);
244static void sk_miibus_statchg(device_t);
245
246static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
247static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
248						int);
249static void sk_xmac_miibus_statchg(struct sk_if_softc *);
250
251static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
252static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
253						int);
254static void sk_marv_miibus_statchg(struct sk_if_softc *);
255
256static uint32_t sk_xmchash(const uint8_t *);
257static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int);
258static void sk_rxfilter(struct sk_if_softc *);
259static void sk_rxfilter_genesis(struct sk_if_softc *);
260static void sk_rxfilter_yukon(struct sk_if_softc *);
261
262static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
263static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
264
265/* Tunables. */
266static int jumbo_disable = 0;
267TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable);
268
269#ifdef __HAIKU__
270static u_short in_addword(u_short a, u_short b);
271#endif
272
273/*
274 * It seems that SK-NET GENESIS supports very simple checksum offload
275 * capability for Tx and I believe it can generate 0 checksum value for
276 * UDP packets in Tx as the hardware can't differenciate UDP packets from
277 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it
278 * means sender didn't perforam checksum computation. For the safety I
279 * disabled UDP checksum offload capability at the moment. Alternatively
280 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum
281 * offload routine.
282 */
283#define SK_CSUM_FEATURES	(CSUM_TCP)
284
285/*
286 * Note that we have newbus methods for both the GEnesis controller
287 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
288 * the miibus code is a child of the XMACs. We need to do it this way
289 * so that the miibus drivers can access the PHY registers on the
290 * right PHY. It's not quite what I had in mind, but it's the only
291 * design that achieves the desired effect.
292 */
293static device_method_t skc_methods[] = {
294	/* Device interface */
295	DEVMETHOD(device_probe,		skc_probe),
296	DEVMETHOD(device_attach,	skc_attach),
297	DEVMETHOD(device_detach,	skc_detach),
298	DEVMETHOD(device_suspend,	skc_suspend),
299	DEVMETHOD(device_resume,	skc_resume),
300	DEVMETHOD(device_shutdown,	skc_shutdown),
301
302	DEVMETHOD(bus_get_dma_tag,	skc_get_dma_tag),
303
304	DEVMETHOD_END
305};
306
307static driver_t skc_driver = {
308	"skc",
309	skc_methods,
310	sizeof(struct sk_softc)
311};
312
313static devclass_t skc_devclass;
314
315static device_method_t sk_methods[] = {
316	/* Device interface */
317	DEVMETHOD(device_probe,		sk_probe),
318	DEVMETHOD(device_attach,	sk_attach),
319	DEVMETHOD(device_detach,	sk_detach),
320	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
321
322	/* MII interface */
323	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
324	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
325	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
326
327	DEVMETHOD_END
328};
329
330static driver_t sk_driver = {
331	"sk",
332	sk_methods,
333	sizeof(struct sk_if_softc)
334};
335
336static devclass_t sk_devclass;
337
338DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, NULL, NULL);
339DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, NULL, NULL);
340DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, NULL, NULL);
341
342static struct resource_spec sk_res_spec_io[] = {
343	{ SYS_RES_IOPORT,	PCIR_BAR(1),	RF_ACTIVE },
344	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
345	{ -1,			0,		0 }
346};
347
348static struct resource_spec sk_res_spec_mem[] = {
349	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
350	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
351	{ -1,			0,		0 }
352};
353
354#define SK_SETBIT(sc, reg, x)		\
355	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
356
357#define SK_CLRBIT(sc, reg, x)		\
358	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
359
360#define SK_WIN_SETBIT_4(sc, reg, x)	\
361	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
362
363#define SK_WIN_CLRBIT_4(sc, reg, x)	\
364	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
365
366#define SK_WIN_SETBIT_2(sc, reg, x)	\
367	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
368
369#define SK_WIN_CLRBIT_2(sc, reg, x)	\
370	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
371
372static u_int32_t
373sk_win_read_4(sc, reg)
374	struct sk_softc		*sc;
375	int			reg;
376{
377#ifdef SK_USEIOSPACE
378	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
379	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
380#else
381	return(CSR_READ_4(sc, reg));
382#endif
383}
384
385static u_int16_t
386sk_win_read_2(sc, reg)
387	struct sk_softc		*sc;
388	int			reg;
389{
390#ifdef SK_USEIOSPACE
391	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
392	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
393#else
394	return(CSR_READ_2(sc, reg));
395#endif
396}
397
398static u_int8_t
399sk_win_read_1(sc, reg)
400	struct sk_softc		*sc;
401	int			reg;
402{
403#ifdef SK_USEIOSPACE
404	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
405	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
406#else
407	return(CSR_READ_1(sc, reg));
408#endif
409}
410
411static void
412sk_win_write_4(sc, reg, val)
413	struct sk_softc		*sc;
414	int			reg;
415	u_int32_t		val;
416{
417#ifdef SK_USEIOSPACE
418	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
419	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
420#else
421	CSR_WRITE_4(sc, reg, val);
422#endif
423	return;
424}
425
426static void
427sk_win_write_2(sc, reg, val)
428	struct sk_softc		*sc;
429	int			reg;
430	u_int32_t		val;
431{
432#ifdef SK_USEIOSPACE
433	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
434	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
435#else
436	CSR_WRITE_2(sc, reg, val);
437#endif
438	return;
439}
440
441static void
442sk_win_write_1(sc, reg, val)
443	struct sk_softc		*sc;
444	int			reg;
445	u_int32_t		val;
446{
447#ifdef SK_USEIOSPACE
448	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
449	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
450#else
451	CSR_WRITE_1(sc, reg, val);
452#endif
453	return;
454}
455
456#ifdef __HAIKU__
457/* stole these from in_cksum.c */
458#define ADDCARRY(x)  (x > 65535 ? x -= 65535 : x)
459static u_short
460in_addword(u_short a, u_short b)
461{
462	u_int64_t sum = a + b;
463
464	ADDCARRY(sum);
465	return (sum);
466}
467#endif
468
469static int
470sk_miibus_readreg(dev, phy, reg)
471	device_t		dev;
472	int			phy, reg;
473{
474	struct sk_if_softc	*sc_if;
475	int			v;
476
477	sc_if = device_get_softc(dev);
478
479	SK_IF_MII_LOCK(sc_if);
480	switch(sc_if->sk_softc->sk_type) {
481	case SK_GENESIS:
482		v = sk_xmac_miibus_readreg(sc_if, phy, reg);
483		break;
484	case SK_YUKON:
485	case SK_YUKON_LITE:
486	case SK_YUKON_LP:
487		v = sk_marv_miibus_readreg(sc_if, phy, reg);
488		break;
489	default:
490		v = 0;
491		break;
492	}
493	SK_IF_MII_UNLOCK(sc_if);
494
495	return (v);
496}
497
498static int
499sk_miibus_writereg(dev, phy, reg, val)
500	device_t		dev;
501	int			phy, reg, val;
502{
503	struct sk_if_softc	*sc_if;
504	int			v;
505
506	sc_if = device_get_softc(dev);
507
508	SK_IF_MII_LOCK(sc_if);
509	switch(sc_if->sk_softc->sk_type) {
510	case SK_GENESIS:
511		v = sk_xmac_miibus_writereg(sc_if, phy, reg, val);
512		break;
513	case SK_YUKON:
514	case SK_YUKON_LITE:
515	case SK_YUKON_LP:
516		v = sk_marv_miibus_writereg(sc_if, phy, reg, val);
517		break;
518	default:
519		v = 0;
520		break;
521	}
522	SK_IF_MII_UNLOCK(sc_if);
523
524	return (v);
525}
526
527static void
528sk_miibus_statchg(dev)
529	device_t		dev;
530{
531	struct sk_if_softc	*sc_if;
532
533	sc_if = device_get_softc(dev);
534
535	SK_IF_MII_LOCK(sc_if);
536	switch(sc_if->sk_softc->sk_type) {
537	case SK_GENESIS:
538		sk_xmac_miibus_statchg(sc_if);
539		break;
540	case SK_YUKON:
541	case SK_YUKON_LITE:
542	case SK_YUKON_LP:
543		sk_marv_miibus_statchg(sc_if);
544		break;
545	}
546	SK_IF_MII_UNLOCK(sc_if);
547
548	return;
549}
550
551static int
552sk_xmac_miibus_readreg(sc_if, phy, reg)
553	struct sk_if_softc	*sc_if;
554	int			phy, reg;
555{
556	int			i;
557
558	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
559	SK_XM_READ_2(sc_if, XM_PHY_DATA);
560	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
561		for (i = 0; i < SK_TIMEOUT; i++) {
562			DELAY(1);
563			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
564			    XM_MMUCMD_PHYDATARDY)
565				break;
566		}
567
568		if (i == SK_TIMEOUT) {
569			if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
570			return(0);
571		}
572	}
573	DELAY(1);
574	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
575
576	return(i);
577}
578
579static int
580sk_xmac_miibus_writereg(sc_if, phy, reg, val)
581	struct sk_if_softc	*sc_if;
582	int			phy, reg, val;
583{
584	int			i;
585
586	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
587	for (i = 0; i < SK_TIMEOUT; i++) {
588		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
589			break;
590	}
591
592	if (i == SK_TIMEOUT) {
593		if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
594		return (ETIMEDOUT);
595	}
596
597	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
598	for (i = 0; i < SK_TIMEOUT; i++) {
599		DELAY(1);
600		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
601			break;
602	}
603	if (i == SK_TIMEOUT)
604		if_printf(sc_if->sk_ifp, "phy write timed out\n");
605
606	return(0);
607}
608
609static void
610sk_xmac_miibus_statchg(sc_if)
611	struct sk_if_softc	*sc_if;
612{
613	struct mii_data		*mii;
614
615	mii = device_get_softc(sc_if->sk_miibus);
616
617	/*
618	 * If this is a GMII PHY, manually set the XMAC's
619	 * duplex mode accordingly.
620	 */
621	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
622		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
623			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
624		} else {
625			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
626		}
627	}
628}
629
630static int
631sk_marv_miibus_readreg(sc_if, phy, reg)
632	struct sk_if_softc	*sc_if;
633	int			phy, reg;
634{
635	u_int16_t		val;
636	int			i;
637
638	if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
639	    sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) {
640		return(0);
641	}
642
643        SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
644		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
645
646	for (i = 0; i < SK_TIMEOUT; i++) {
647		DELAY(1);
648		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
649		if (val & YU_SMICR_READ_VALID)
650			break;
651	}
652
653	if (i == SK_TIMEOUT) {
654		if_printf(sc_if->sk_ifp, "phy failed to come ready\n");
655		return(0);
656	}
657
658	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
659
660	return(val);
661}
662
663static int
664sk_marv_miibus_writereg(sc_if, phy, reg, val)
665	struct sk_if_softc	*sc_if;
666	int			phy, reg, val;
667{
668	int			i;
669
670	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
671	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
672		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
673
674	for (i = 0; i < SK_TIMEOUT; i++) {
675		DELAY(1);
676		if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0)
677			break;
678	}
679	if (i == SK_TIMEOUT)
680		if_printf(sc_if->sk_ifp, "phy write timeout\n");
681
682	return(0);
683}
684
685static void
686sk_marv_miibus_statchg(sc_if)
687	struct sk_if_softc	*sc_if;
688{
689	return;
690}
691
692#define HASH_BITS		6
693
694static u_int32_t
695sk_xmchash(addr)
696	const uint8_t *addr;
697{
698	uint32_t crc;
699
700	/* Compute CRC for the address value. */
701	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
702
703	return (~crc & ((1 << HASH_BITS) - 1));
704}
705
706static void
707sk_setfilt(sc_if, addr, slot)
708	struct sk_if_softc	*sc_if;
709	u_int16_t		*addr;
710	int			slot;
711{
712	int			base;
713
714	base = XM_RXFILT_ENTRY(slot);
715
716	SK_XM_WRITE_2(sc_if, base, addr[0]);
717	SK_XM_WRITE_2(sc_if, base + 2, addr[1]);
718	SK_XM_WRITE_2(sc_if, base + 4, addr[2]);
719
720	return;
721}
722
723static void
724sk_rxfilter(sc_if)
725	struct sk_if_softc	*sc_if;
726{
727	struct sk_softc		*sc;
728
729	SK_IF_LOCK_ASSERT(sc_if);
730
731	sc = sc_if->sk_softc;
732	if (sc->sk_type == SK_GENESIS)
733		sk_rxfilter_genesis(sc_if);
734	else
735		sk_rxfilter_yukon(sc_if);
736}
737
738static void
739sk_rxfilter_genesis(sc_if)
740	struct sk_if_softc	*sc_if;
741{
742	struct ifnet		*ifp = sc_if->sk_ifp;
743	u_int32_t		hashes[2] = { 0, 0 }, mode;
744	int			h = 0, i;
745	struct ifmultiaddr	*ifma;
746	u_int16_t		dummy[] = { 0, 0, 0 };
747	u_int16_t		maddr[(ETHER_ADDR_LEN+1)/2];
748
749	SK_IF_LOCK_ASSERT(sc_if);
750
751	mode = SK_XM_READ_4(sc_if, XM_MODE);
752	mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
753	    XM_MODE_RX_USE_PERFECT);
754	/* First, zot all the existing perfect filters. */
755	for (i = 1; i < XM_RXFILT_MAX; i++)
756		sk_setfilt(sc_if, dummy, i);
757
758	/* Now program new ones. */
759	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
760		if (ifp->if_flags & IFF_ALLMULTI)
761			mode |= XM_MODE_RX_USE_HASH;
762		if (ifp->if_flags & IFF_PROMISC)
763			mode |= XM_MODE_RX_PROMISC;
764		hashes[0] = 0xFFFFFFFF;
765		hashes[1] = 0xFFFFFFFF;
766	} else {
767		i = 1;
768		if_maddr_rlock(ifp);
769		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead,
770		    ifma_link) {
771			if (ifma->ifma_addr->sa_family != AF_LINK)
772				continue;
773			/*
774			 * Program the first XM_RXFILT_MAX multicast groups
775			 * into the perfect filter.
776			 */
777			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
778			    maddr, ETHER_ADDR_LEN);
779			if (i < XM_RXFILT_MAX) {
780				sk_setfilt(sc_if, maddr, i);
781				mode |= XM_MODE_RX_USE_PERFECT;
782				i++;
783				continue;
784			}
785			h = sk_xmchash((const uint8_t *)maddr);
786			if (h < 32)
787				hashes[0] |= (1 << h);
788			else
789				hashes[1] |= (1 << (h - 32));
790			mode |= XM_MODE_RX_USE_HASH;
791		}
792		if_maddr_runlock(ifp);
793	}
794
795	SK_XM_WRITE_4(sc_if, XM_MODE, mode);
796	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
797	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
798}
799
800static void
801sk_rxfilter_yukon(sc_if)
802	struct sk_if_softc	*sc_if;
803{
804	struct ifnet		*ifp;
805	u_int32_t		crc, hashes[2] = { 0, 0 }, mode;
806	struct ifmultiaddr	*ifma;
807
808	SK_IF_LOCK_ASSERT(sc_if);
809
810	ifp = sc_if->sk_ifp;
811	mode = SK_YU_READ_2(sc_if, YUKON_RCR);
812	if (ifp->if_flags & IFF_PROMISC)
813		mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN);
814	else if (ifp->if_flags & IFF_ALLMULTI) {
815		mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN;
816		hashes[0] = 0xFFFFFFFF;
817		hashes[1] = 0xFFFFFFFF;
818	} else {
819		mode |= YU_RCR_UFLEN;
820		if_maddr_rlock(ifp);
821		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
822			if (ifma->ifma_addr->sa_family != AF_LINK)
823				continue;
824			crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
825			    ifma->ifma_addr), ETHER_ADDR_LEN);
826			/* Just want the 6 least significant bits. */
827			crc &= 0x3f;
828			/* Set the corresponding bit in the hash table. */
829			hashes[crc >> 5] |= 1 << (crc & 0x1f);
830		}
831		if_maddr_runlock(ifp);
832		if (hashes[0] != 0 || hashes[1] != 0)
833			mode |= YU_RCR_MUFLEN;
834	}
835
836	SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
837	SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
838	SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
839	SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
840	SK_YU_WRITE_2(sc_if, YUKON_RCR, mode);
841}
842
843static int
844sk_init_rx_ring(sc_if)
845	struct sk_if_softc	*sc_if;
846{
847	struct sk_ring_data	*rd;
848	bus_addr_t		addr;
849	u_int32_t		csum_start;
850	int			i;
851
852	sc_if->sk_cdata.sk_rx_cons = 0;
853
854	csum_start = (ETHER_HDR_LEN + sizeof(struct ip))  << 16 |
855	    ETHER_HDR_LEN;
856	rd = &sc_if->sk_rdata;
857	bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
858	for (i = 0; i < SK_RX_RING_CNT; i++) {
859		if (sk_newbuf(sc_if, i) != 0)
860			return (ENOBUFS);
861		if (i == (SK_RX_RING_CNT - 1))
862			addr = SK_RX_RING_ADDR(sc_if, 0);
863		else
864			addr = SK_RX_RING_ADDR(sc_if, i + 1);
865		rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
866		rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start);
867	}
868
869	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
870	    sc_if->sk_cdata.sk_rx_ring_map,
871	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
872
873	return(0);
874}
875
876static int
877sk_init_jumbo_rx_ring(sc_if)
878	struct sk_if_softc	*sc_if;
879{
880	struct sk_ring_data	*rd;
881	bus_addr_t		addr;
882	u_int32_t		csum_start;
883	int			i;
884
885	sc_if->sk_cdata.sk_jumbo_rx_cons = 0;
886
887	csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) |
888	    ETHER_HDR_LEN;
889	rd = &sc_if->sk_rdata;
890	bzero(rd->sk_jumbo_rx_ring,
891	    sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT);
892	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
893		if (sk_jumbo_newbuf(sc_if, i) != 0)
894			return (ENOBUFS);
895		if (i == (SK_JUMBO_RX_RING_CNT - 1))
896			addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0);
897		else
898			addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1);
899		rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
900		rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start);
901	}
902
903	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
904	    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
905	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
906
907	return (0);
908}
909
910static void
911sk_init_tx_ring(sc_if)
912	struct sk_if_softc	*sc_if;
913{
914	struct sk_ring_data	*rd;
915	struct sk_txdesc	*txd;
916	bus_addr_t		addr;
917	int			i;
918
919	STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq);
920	STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq);
921
922	sc_if->sk_cdata.sk_tx_prod = 0;
923	sc_if->sk_cdata.sk_tx_cons = 0;
924	sc_if->sk_cdata.sk_tx_cnt = 0;
925
926	rd = &sc_if->sk_rdata;
927	bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
928	for (i = 0; i < SK_TX_RING_CNT; i++) {
929		if (i == (SK_TX_RING_CNT - 1))
930			addr = SK_TX_RING_ADDR(sc_if, 0);
931		else
932			addr = SK_TX_RING_ADDR(sc_if, i + 1);
933		rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr));
934		txd = &sc_if->sk_cdata.sk_txdesc[i];
935		STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
936	}
937
938	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
939	    sc_if->sk_cdata.sk_tx_ring_map,
940	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
941}
942
943static __inline void
944sk_discard_rxbuf(sc_if, idx)
945	struct sk_if_softc	*sc_if;
946	int			idx;
947{
948	struct sk_rx_desc	*r;
949	struct sk_rxdesc	*rxd;
950	struct mbuf		*m;
951
952
953	r = &sc_if->sk_rdata.sk_rx_ring[idx];
954	rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
955	m = rxd->rx_m;
956	r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
957}
958
959static __inline void
960sk_discard_jumbo_rxbuf(sc_if, idx)
961	struct sk_if_softc	*sc_if;
962	int			idx;
963{
964	struct sk_rx_desc	*r;
965	struct sk_rxdesc	*rxd;
966	struct mbuf		*m;
967
968	r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
969	rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
970	m = rxd->rx_m;
971	r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM);
972}
973
974static int
975sk_newbuf(sc_if, idx)
976	struct sk_if_softc	*sc_if;
977	int 			idx;
978{
979	struct sk_rx_desc	*r;
980	struct sk_rxdesc	*rxd;
981	struct mbuf		*m;
982	bus_dma_segment_t	segs[1];
983	bus_dmamap_t		map;
984	int			nsegs;
985
986	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
987	if (m == NULL)
988		return (ENOBUFS);
989	m->m_len = m->m_pkthdr.len = MCLBYTES;
990	m_adj(m, ETHER_ALIGN);
991
992	if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag,
993	    sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) {
994		m_freem(m);
995		return (ENOBUFS);
996	}
997	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
998
999	rxd = &sc_if->sk_cdata.sk_rxdesc[idx];
1000	if (rxd->rx_m != NULL) {
1001		bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
1002		    BUS_DMASYNC_POSTREAD);
1003		bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap);
1004	}
1005	map = rxd->rx_dmamap;
1006	rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap;
1007	sc_if->sk_cdata.sk_rx_sparemap = map;
1008	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap,
1009	    BUS_DMASYNC_PREREAD);
1010	rxd->rx_m = m;
1011	r = &sc_if->sk_rdata.sk_rx_ring[idx];
1012	r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1013	r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1014	r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1015
1016	return (0);
1017}
1018
1019static int
1020sk_jumbo_newbuf(sc_if, idx)
1021	struct sk_if_softc	*sc_if;
1022	int			idx;
1023{
1024	struct sk_rx_desc	*r;
1025	struct sk_rxdesc	*rxd;
1026	struct mbuf		*m;
1027	bus_dma_segment_t	segs[1];
1028	bus_dmamap_t		map;
1029	int			nsegs;
1030
1031	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1032	if (m == NULL)
1033		return (ENOBUFS);
1034	m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1035	/*
1036	 * Adjust alignment so packet payload begins on a
1037	 * longword boundary. Mandatory for Alpha, useful on
1038	 * x86 too.
1039	 */
1040	m_adj(m, ETHER_ALIGN);
1041
1042	if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag,
1043	    sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1044		m_freem(m);
1045		return (ENOBUFS);
1046	}
1047	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1048
1049	rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx];
1050	if (rxd->rx_m != NULL) {
1051		bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1052		    BUS_DMASYNC_POSTREAD);
1053		bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
1054		    rxd->rx_dmamap);
1055	}
1056	map = rxd->rx_dmamap;
1057	rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap;
1058	sc_if->sk_cdata.sk_jumbo_rx_sparemap = map;
1059	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap,
1060	    BUS_DMASYNC_PREREAD);
1061	rxd->rx_m = m;
1062	r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx];
1063	r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr));
1064	r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr));
1065	r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM);
1066
1067	return (0);
1068}
1069
1070/*
1071 * Set media options.
1072 */
1073static int
1074sk_ifmedia_upd(ifp)
1075	struct ifnet		*ifp;
1076{
1077	struct sk_if_softc	*sc_if = ifp->if_softc;
1078	struct mii_data		*mii;
1079
1080	mii = device_get_softc(sc_if->sk_miibus);
1081	sk_init(sc_if);
1082	mii_mediachg(mii);
1083
1084	return(0);
1085}
1086
1087/*
1088 * Report current media status.
1089 */
1090static void
1091sk_ifmedia_sts(ifp, ifmr)
1092	struct ifnet		*ifp;
1093	struct ifmediareq	*ifmr;
1094{
1095	struct sk_if_softc	*sc_if;
1096	struct mii_data		*mii;
1097
1098	sc_if = ifp->if_softc;
1099	mii = device_get_softc(sc_if->sk_miibus);
1100
1101	mii_pollstat(mii);
1102	ifmr->ifm_active = mii->mii_media_active;
1103	ifmr->ifm_status = mii->mii_media_status;
1104
1105	return;
1106}
1107
1108static int
1109sk_ioctl(ifp, command, data)
1110	struct ifnet		*ifp;
1111	u_long			command;
1112	caddr_t			data;
1113{
1114	struct sk_if_softc	*sc_if = ifp->if_softc;
1115	struct ifreq		*ifr = (struct ifreq *) data;
1116	int			error, mask;
1117	struct mii_data		*mii;
1118
1119	error = 0;
1120	switch(command) {
1121	case SIOCSIFMTU:
1122		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU)
1123			error = EINVAL;
1124		else if (ifp->if_mtu != ifr->ifr_mtu) {
1125			if (sc_if->sk_jumbo_disable != 0 &&
1126			    ifr->ifr_mtu > SK_MAX_FRAMELEN)
1127				error = EINVAL;
1128			else {
1129				SK_IF_LOCK(sc_if);
1130				ifp->if_mtu = ifr->ifr_mtu;
1131				if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1132					ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1133					sk_init_locked(sc_if);
1134				}
1135				SK_IF_UNLOCK(sc_if);
1136			}
1137		}
1138		break;
1139	case SIOCSIFFLAGS:
1140		SK_IF_LOCK(sc_if);
1141		if (ifp->if_flags & IFF_UP) {
1142			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1143				if ((ifp->if_flags ^ sc_if->sk_if_flags)
1144				    & (IFF_PROMISC | IFF_ALLMULTI))
1145					sk_rxfilter(sc_if);
1146			} else
1147				sk_init_locked(sc_if);
1148		} else {
1149			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1150				sk_stop(sc_if);
1151		}
1152		sc_if->sk_if_flags = ifp->if_flags;
1153		SK_IF_UNLOCK(sc_if);
1154		break;
1155	case SIOCADDMULTI:
1156	case SIOCDELMULTI:
1157		SK_IF_LOCK(sc_if);
1158		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1159			sk_rxfilter(sc_if);
1160		SK_IF_UNLOCK(sc_if);
1161		break;
1162	case SIOCGIFMEDIA:
1163	case SIOCSIFMEDIA:
1164		mii = device_get_softc(sc_if->sk_miibus);
1165		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1166		break;
1167	case SIOCSIFCAP:
1168		SK_IF_LOCK(sc_if);
1169		if (sc_if->sk_softc->sk_type == SK_GENESIS) {
1170			SK_IF_UNLOCK(sc_if);
1171			break;
1172		}
1173		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1174		if ((mask & IFCAP_TXCSUM) != 0 &&
1175		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1176			ifp->if_capenable ^= IFCAP_TXCSUM;
1177			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1178				ifp->if_hwassist |= SK_CSUM_FEATURES;
1179			else
1180				ifp->if_hwassist &= ~SK_CSUM_FEATURES;
1181		}
1182		if ((mask & IFCAP_RXCSUM) != 0 &&
1183		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
1184			ifp->if_capenable ^= IFCAP_RXCSUM;
1185		SK_IF_UNLOCK(sc_if);
1186		break;
1187	default:
1188		error = ether_ioctl(ifp, command, data);
1189		break;
1190	}
1191
1192	return (error);
1193}
1194
1195/*
1196 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1197 * IDs against our list and return a device name if we find a match.
1198 */
1199static int
1200skc_probe(dev)
1201	device_t		dev;
1202{
1203	const struct sk_type	*t = sk_devs;
1204
1205	while(t->sk_name != NULL) {
1206		if ((pci_get_vendor(dev) == t->sk_vid) &&
1207		    (pci_get_device(dev) == t->sk_did)) {
1208			/*
1209			 * Only attach to rev. 2 of the Linksys EG1032 adapter.
1210			 * Rev. 3 is supported by re(4).
1211			 */
1212			if ((t->sk_vid == VENDORID_LINKSYS) &&
1213				(t->sk_did == DEVICEID_LINKSYS_EG1032) &&
1214				(pci_get_subdevice(dev) !=
1215				 SUBDEVICEID_LINKSYS_EG1032_REV2)) {
1216				t++;
1217				continue;
1218			}
1219			device_set_desc(dev, t->sk_name);
1220			return (BUS_PROBE_DEFAULT);
1221		}
1222		t++;
1223	}
1224
1225	return(ENXIO);
1226}
1227
1228/*
1229 * Force the GEnesis into reset, then bring it out of reset.
1230 */
1231static void
1232sk_reset(sc)
1233	struct sk_softc		*sc;
1234{
1235
1236	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1237	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1238	if (SK_YUKON_FAMILY(sc->sk_type))
1239		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1240
1241	DELAY(1000);
1242	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1243	DELAY(2);
1244	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1245	if (SK_YUKON_FAMILY(sc->sk_type))
1246		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1247
1248	if (sc->sk_type == SK_GENESIS) {
1249		/* Configure packet arbiter */
1250		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1251		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1252		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1253		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1254		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1255	}
1256
1257	/* Enable RAM interface */
1258	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1259
1260	/*
1261         * Configure interrupt moderation. The moderation timer
1262	 * defers interrupts specified in the interrupt moderation
1263	 * timer mask based on the timeout specified in the interrupt
1264	 * moderation timer init register. Each bit in the timer
1265	 * register represents one tick, so to specify a timeout in
1266	 * microseconds, we have to multiply by the correct number of
1267	 * ticks-per-microsecond.
1268	 */
1269	switch (sc->sk_type) {
1270	case SK_GENESIS:
1271		sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS;
1272		break;
1273	default:
1274		sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON;
1275		break;
1276	}
1277	if (bootverbose)
1278		device_printf(sc->sk_dev, "interrupt moderation is %d us\n",
1279		    sc->sk_int_mod);
1280	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
1281	    sc->sk_int_ticks));
1282	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1283	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1284	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1285
1286	return;
1287}
1288
1289static int
1290sk_probe(dev)
1291	device_t		dev;
1292{
1293	struct sk_softc		*sc;
1294
1295	sc = device_get_softc(device_get_parent(dev));
1296
1297	/*
1298	 * Not much to do here. We always know there will be
1299	 * at least one XMAC present, and if there are two,
1300	 * skc_attach() will create a second device instance
1301	 * for us.
1302	 */
1303	switch (sc->sk_type) {
1304	case SK_GENESIS:
1305		device_set_desc(dev, "XaQti Corp. XMAC II");
1306		break;
1307	case SK_YUKON:
1308	case SK_YUKON_LITE:
1309	case SK_YUKON_LP:
1310		device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1311		break;
1312	}
1313
1314	return (BUS_PROBE_DEFAULT);
1315}
1316
1317/*
1318 * Each XMAC chip is attached as a separate logical IP interface.
1319 * Single port cards will have only one logical interface of course.
1320 */
1321static int
1322sk_attach(dev)
1323	device_t		dev;
1324{
1325	struct sk_softc		*sc;
1326	struct sk_if_softc	*sc_if;
1327	struct ifnet		*ifp;
1328	u_int32_t		r;
1329	int			error, i, phy, port;
1330	u_char			eaddr[6];
1331	u_char			inv_mac[] = {0, 0, 0, 0, 0, 0};
1332
1333	if (dev == NULL)
1334		return(EINVAL);
1335
1336	error = 0;
1337	sc_if = device_get_softc(dev);
1338	sc = device_get_softc(device_get_parent(dev));
1339	port = *(int *)device_get_ivars(dev);
1340
1341	sc_if->sk_if_dev = dev;
1342	sc_if->sk_port = port;
1343	sc_if->sk_softc = sc;
1344	sc->sk_if[port] = sc_if;
1345	if (port == SK_PORT_A)
1346		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1347	if (port == SK_PORT_B)
1348		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1349
1350	callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0);
1351	callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0);
1352
1353	if (sk_dma_alloc(sc_if) != 0) {
1354		error = ENOMEM;
1355		goto fail;
1356	}
1357	sk_dma_jumbo_alloc(sc_if);
1358
1359	ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1360	if (ifp == NULL) {
1361		device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
1362		error = ENOSPC;
1363		goto fail;
1364	}
1365	ifp->if_softc = sc_if;
1366	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1367	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1368	/*
1369	 * SK_GENESIS has a bug in checksum offload - From linux.
1370	 */
1371	if (sc_if->sk_softc->sk_type != SK_GENESIS) {
1372		ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM;
1373		ifp->if_hwassist = 0;
1374	} else {
1375		ifp->if_capabilities = 0;
1376		ifp->if_hwassist = 0;
1377	}
1378	ifp->if_capenable = ifp->if_capabilities;
1379	/*
1380	 * Some revision of Yukon controller generates corrupted
1381	 * frame when TX checksum offloading is enabled.  The
1382	 * frame has a valid checksum value so payload might be
1383	 * modified during TX checksum calculation. Disable TX
1384	 * checksum offloading but give users chance to enable it
1385	 * when they know their controller works without problems
1386	 * with TX checksum offloading.
1387	 */
1388	ifp->if_capenable &= ~IFCAP_TXCSUM;
1389	ifp->if_ioctl = sk_ioctl;
1390	ifp->if_start = sk_start;
1391	ifp->if_init = sk_init;
1392	IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1393	ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1394	IFQ_SET_READY(&ifp->if_snd);
1395
1396	/*
1397	 * Get station address for this interface. Note that
1398	 * dual port cards actually come with three station
1399	 * addresses: one for each port, plus an extra. The
1400	 * extra one is used by the SysKonnect driver software
1401	 * as a 'virtual' station address for when both ports
1402	 * are operating in failover mode. Currently we don't
1403	 * use this extra address.
1404	 */
1405	SK_IF_LOCK(sc_if);
1406	for (i = 0; i < ETHER_ADDR_LEN; i++)
1407		eaddr[i] =
1408		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1409
1410	/* Verify whether the station address is invalid or not. */
1411	if (bcmp(eaddr, inv_mac, sizeof(inv_mac)) == 0) {
1412		device_printf(sc_if->sk_if_dev,
1413		    "Generating random ethernet address\n");
1414		r = arc4random();
1415		/*
1416		 * Set OUI to convenient locally assigned address.  'b'
1417		 * is 0x62, which has the locally assigned bit set, and
1418		 * the broadcast/multicast bit clear.
1419		 */
1420		eaddr[0] = 'b';
1421		eaddr[1] = 's';
1422		eaddr[2] = 'd';
1423		eaddr[3] = (r >> 16) & 0xff;
1424		eaddr[4] = (r >>  8) & 0xff;
1425		eaddr[5] = (r >>  0) & 0xff;
1426	}
1427	/*
1428	 * Set up RAM buffer addresses. The NIC will have a certain
1429	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1430	 * need to divide this up a) between the transmitter and
1431 	 * receiver and b) between the two XMACs, if this is a
1432	 * dual port NIC. Our algotithm is to divide up the memory
1433	 * evenly so that everyone gets a fair share.
1434	 *
1435	 * Just to be contrary, Yukon2 appears to have separate memory
1436	 * for each MAC.
1437	 */
1438	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1439		u_int32_t		chunk, val;
1440
1441		chunk = sc->sk_ramsize / 2;
1442		val = sc->sk_rboff / sizeof(u_int64_t);
1443		sc_if->sk_rx_ramstart = val;
1444		val += (chunk / sizeof(u_int64_t));
1445		sc_if->sk_rx_ramend = val - 1;
1446		sc_if->sk_tx_ramstart = val;
1447		val += (chunk / sizeof(u_int64_t));
1448		sc_if->sk_tx_ramend = val - 1;
1449	} else {
1450		u_int32_t		chunk, val;
1451
1452		chunk = sc->sk_ramsize / 4;
1453		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1454		    sizeof(u_int64_t);
1455		sc_if->sk_rx_ramstart = val;
1456		val += (chunk / sizeof(u_int64_t));
1457		sc_if->sk_rx_ramend = val - 1;
1458		sc_if->sk_tx_ramstart = val;
1459		val += (chunk / sizeof(u_int64_t));
1460		sc_if->sk_tx_ramend = val - 1;
1461	}
1462
1463	/* Read and save PHY type and set PHY address */
1464	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1465	if (!SK_YUKON_FAMILY(sc->sk_type)) {
1466		switch(sc_if->sk_phytype) {
1467		case SK_PHYTYPE_XMAC:
1468			sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1469			break;
1470		case SK_PHYTYPE_BCOM:
1471			sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1472			break;
1473		default:
1474			device_printf(sc->sk_dev, "unsupported PHY type: %d\n",
1475			    sc_if->sk_phytype);
1476			error = ENODEV;
1477			SK_IF_UNLOCK(sc_if);
1478			goto fail;
1479		}
1480	} else {
1481		if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1482		    sc->sk_pmd != 'S') {
1483			/* not initialized, punt */
1484			sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1485			sc->sk_coppertype = 1;
1486		}
1487
1488		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1489
1490		if (!(sc->sk_coppertype))
1491			sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1492	}
1493
1494	/*
1495	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1496	 */
1497	SK_IF_UNLOCK(sc_if);
1498	ether_ifattach(ifp, eaddr);
1499	SK_IF_LOCK(sc_if);
1500
1501	/*
1502	 * The hardware should be ready for VLAN_MTU by default:
1503	 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially;
1504	 * YU_SMR_MFL_VLAN is set by this driver in Yukon.
1505	 *
1506	 */
1507        ifp->if_capabilities |= IFCAP_VLAN_MTU;
1508        ifp->if_capenable |= IFCAP_VLAN_MTU;
1509	/*
1510	 * Tell the upper layer(s) we support long frames.
1511	 * Must appear after the call to ether_ifattach() because
1512	 * ether_ifattach() sets ifi_hdrlen to the default value.
1513	 */
1514        ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1515
1516	/*
1517	 * Do miibus setup.
1518	 */
1519	phy = MII_PHY_ANY;
1520	switch (sc->sk_type) {
1521	case SK_GENESIS:
1522		sk_init_xmac(sc_if);
1523		if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
1524			phy = 0;
1525		break;
1526	case SK_YUKON:
1527	case SK_YUKON_LITE:
1528	case SK_YUKON_LP:
1529		sk_init_yukon(sc_if);
1530		phy = 0;
1531		break;
1532	}
1533
1534	SK_IF_UNLOCK(sc_if);
1535	error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd,
1536	    sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
1537	if (error != 0) {
1538		device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n");
1539		ether_ifdetach(ifp);
1540		goto fail;
1541	}
1542
1543fail:
1544	if (error) {
1545		/* Access should be ok even though lock has been dropped */
1546		sc->sk_if[port] = NULL;
1547		sk_detach(dev);
1548	}
1549
1550	return(error);
1551}
1552
1553/*
1554 * Attach the interface. Allocate softc structures, do ifmedia
1555 * setup and ethernet/BPF attach.
1556 */
1557static int
1558skc_attach(dev)
1559	device_t		dev;
1560{
1561	struct sk_softc		*sc;
1562	int			error = 0, *port;
1563	uint8_t			skrs;
1564	const char		*pname = NULL;
1565	char			*revstr;
1566
1567	sc = device_get_softc(dev);
1568	sc->sk_dev = dev;
1569
1570	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1571	    MTX_DEF);
1572	mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF);
1573	/*
1574	 * Map control/status registers.
1575	 */
1576	pci_enable_busmaster(dev);
1577
1578	/* Allocate resources */
1579#ifdef SK_USEIOSPACE
1580	sc->sk_res_spec = sk_res_spec_io;
1581#else
1582	sc->sk_res_spec = sk_res_spec_mem;
1583#endif
1584	error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1585	if (error) {
1586		if (sc->sk_res_spec == sk_res_spec_mem)
1587			sc->sk_res_spec = sk_res_spec_io;
1588		else
1589			sc->sk_res_spec = sk_res_spec_mem;
1590		error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res);
1591		if (error) {
1592			device_printf(dev, "couldn't allocate %s resources\n",
1593			    sc->sk_res_spec == sk_res_spec_mem ? "memory" :
1594			    "I/O");
1595			goto fail;
1596		}
1597	}
1598
1599	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1600	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1601
1602	/* Bail out if chip is not recognized. */
1603	if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1604		device_printf(dev, "unknown device: chipver=%02x, rev=%x\n",
1605		    sc->sk_type, sc->sk_rev);
1606		error = ENXIO;
1607		goto fail;
1608	}
1609
1610	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1611		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1612		OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1613		&sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1614		"SK interrupt moderation");
1615
1616	/* Pull in device tunables. */
1617	sc->sk_int_mod = SK_IM_DEFAULT;
1618	error = resource_int_value(device_get_name(dev), device_get_unit(dev),
1619		"int_mod", &sc->sk_int_mod);
1620	if (error == 0) {
1621		if (sc->sk_int_mod < SK_IM_MIN ||
1622		    sc->sk_int_mod > SK_IM_MAX) {
1623			device_printf(dev, "int_mod value out of range; "
1624			    "using default: %d\n", SK_IM_DEFAULT);
1625			sc->sk_int_mod = SK_IM_DEFAULT;
1626		}
1627	}
1628
1629	/* Reset the adapter. */
1630	sk_reset(sc);
1631
1632	skrs = sk_win_read_1(sc, SK_EPROM0);
1633	if (sc->sk_type == SK_GENESIS) {
1634		/* Read and save RAM size and RAMbuffer offset */
1635		switch(skrs) {
1636		case SK_RAMSIZE_512K_64:
1637			sc->sk_ramsize = 0x80000;
1638			sc->sk_rboff = SK_RBOFF_0;
1639			break;
1640		case SK_RAMSIZE_1024K_64:
1641			sc->sk_ramsize = 0x100000;
1642			sc->sk_rboff = SK_RBOFF_80000;
1643			break;
1644		case SK_RAMSIZE_1024K_128:
1645			sc->sk_ramsize = 0x100000;
1646			sc->sk_rboff = SK_RBOFF_0;
1647			break;
1648		case SK_RAMSIZE_2048K_128:
1649			sc->sk_ramsize = 0x200000;
1650			sc->sk_rboff = SK_RBOFF_0;
1651			break;
1652		default:
1653			device_printf(dev, "unknown ram size: %d\n", skrs);
1654			error = ENXIO;
1655			goto fail;
1656		}
1657	} else { /* SK_YUKON_FAMILY */
1658		if (skrs == 0x00)
1659			sc->sk_ramsize = 0x20000;
1660		else
1661			sc->sk_ramsize = skrs * (1<<12);
1662		sc->sk_rboff = SK_RBOFF_0;
1663	}
1664
1665	/* Read and save physical media type */
1666	 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1667
1668	 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1669		 sc->sk_coppertype = 1;
1670	 else
1671		 sc->sk_coppertype = 0;
1672
1673	/* Determine whether to name it with VPD PN or just make it up.
1674	 * Marvell Yukon VPD PN seems to freqently be bogus. */
1675	switch (pci_get_device(dev)) {
1676	case DEVICEID_SK_V1:
1677	case DEVICEID_BELKIN_5005:
1678	case DEVICEID_3COM_3C940:
1679	case DEVICEID_LINKSYS_EG1032:
1680	case DEVICEID_DLINK_DGE530T_A1:
1681	case DEVICEID_DLINK_DGE530T_B1:
1682		/* Stay with VPD PN. */
1683		(void) pci_get_vpd_ident(dev, &pname);
1684		break;
1685	case DEVICEID_SK_V2:
1686		/* YUKON VPD PN might bear no resemblance to reality. */
1687		switch (sc->sk_type) {
1688		case SK_GENESIS:
1689			/* Stay with VPD PN. */
1690			(void) pci_get_vpd_ident(dev, &pname);
1691			break;
1692		case SK_YUKON:
1693			pname = "Marvell Yukon Gigabit Ethernet";
1694			break;
1695		case SK_YUKON_LITE:
1696			pname = "Marvell Yukon Lite Gigabit Ethernet";
1697			break;
1698		case SK_YUKON_LP:
1699			pname = "Marvell Yukon LP Gigabit Ethernet";
1700			break;
1701		default:
1702			pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1703			break;
1704		}
1705
1706		/* Yukon Lite Rev. A0 needs special test. */
1707		if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1708			u_int32_t far;
1709			u_int8_t testbyte;
1710
1711			/* Save flash address register before testing. */
1712			far = sk_win_read_4(sc, SK_EP_ADDR);
1713
1714			sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1715			testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1716
1717			if (testbyte != 0x00) {
1718				/* Yukon Lite Rev. A0 detected. */
1719				sc->sk_type = SK_YUKON_LITE;
1720				sc->sk_rev = SK_YUKON_LITE_REV_A0;
1721				/* Restore flash address register. */
1722				sk_win_write_4(sc, SK_EP_ADDR, far);
1723			}
1724		}
1725		break;
1726	default:
1727		device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1728			"chipver=%02x, rev=%x\n",
1729			pci_get_vendor(dev), pci_get_device(dev),
1730			sc->sk_type, sc->sk_rev);
1731		error = ENXIO;
1732		goto fail;
1733	}
1734
1735	if (sc->sk_type == SK_YUKON_LITE) {
1736		switch (sc->sk_rev) {
1737		case SK_YUKON_LITE_REV_A0:
1738			revstr = "A0";
1739			break;
1740		case SK_YUKON_LITE_REV_A1:
1741			revstr = "A1";
1742			break;
1743		case SK_YUKON_LITE_REV_A3:
1744			revstr = "A3";
1745			break;
1746		default:
1747			revstr = "";
1748			break;
1749		}
1750	} else {
1751		revstr = "";
1752	}
1753
1754	/* Announce the product name and more VPD data if there. */
1755	if (pname != NULL)
1756		device_printf(dev, "%s rev. %s(0x%x)\n",
1757			pname, revstr, sc->sk_rev);
1758
1759	if (bootverbose) {
1760		device_printf(dev, "chip ver  = 0x%02x\n", sc->sk_type);
1761		device_printf(dev, "chip rev  = 0x%02x\n", sc->sk_rev);
1762		device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1763		device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1764	}
1765
1766	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1767	if (sc->sk_devs[SK_PORT_A] == NULL) {
1768		device_printf(dev, "failed to add child for PORT_A\n");
1769		error = ENXIO;
1770		goto fail;
1771	}
1772	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1773	if (port == NULL) {
1774		device_printf(dev, "failed to allocate memory for "
1775		    "ivars of PORT_A\n");
1776		error = ENXIO;
1777		goto fail;
1778	}
1779	*port = SK_PORT_A;
1780	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1781
1782	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1783		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1784		if (sc->sk_devs[SK_PORT_B] == NULL) {
1785			device_printf(dev, "failed to add child for PORT_B\n");
1786			error = ENXIO;
1787			goto fail;
1788		}
1789		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1790		if (port == NULL) {
1791			device_printf(dev, "failed to allocate memory for "
1792			    "ivars of PORT_B\n");
1793			error = ENXIO;
1794			goto fail;
1795		}
1796		*port = SK_PORT_B;
1797		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1798	}
1799
1800	/* Turn on the 'driver is loaded' LED. */
1801	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1802
1803	error = bus_generic_attach(dev);
1804	if (error) {
1805		device_printf(dev, "failed to attach port(s)\n");
1806		goto fail;
1807	}
1808
1809	/* Hook interrupt last to avoid having to lock softc */
1810	error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
1811	    NULL, sk_intr, sc, &sc->sk_intrhand);
1812
1813	if (error) {
1814		device_printf(dev, "couldn't set up irq\n");
1815		goto fail;
1816	}
1817
1818fail:
1819	if (error)
1820		skc_detach(dev);
1821
1822	return(error);
1823}
1824
1825/*
1826 * Shutdown hardware and free up resources. This can be called any
1827 * time after the mutex has been initialized. It is called in both
1828 * the error case in attach and the normal detach case so it needs
1829 * to be careful about only freeing resources that have actually been
1830 * allocated.
1831 */
1832static int
1833sk_detach(dev)
1834	device_t		dev;
1835{
1836	struct sk_if_softc	*sc_if;
1837	struct ifnet		*ifp;
1838
1839	sc_if = device_get_softc(dev);
1840	KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1841	    ("sk mutex not initialized in sk_detach"));
1842	SK_IF_LOCK(sc_if);
1843
1844	ifp = sc_if->sk_ifp;
1845	/* These should only be active if attach_xmac succeeded */
1846	if (device_is_attached(dev)) {
1847		sk_stop(sc_if);
1848		/* Can't hold locks while calling detach */
1849		SK_IF_UNLOCK(sc_if);
1850		callout_drain(&sc_if->sk_tick_ch);
1851		callout_drain(&sc_if->sk_watchdog_ch);
1852		ether_ifdetach(ifp);
1853		SK_IF_LOCK(sc_if);
1854	}
1855	/*
1856	 * We're generally called from skc_detach() which is using
1857	 * device_delete_child() to get to here. It's already trashed
1858	 * miibus for us, so don't do it here or we'll panic.
1859	 */
1860	/*
1861	if (sc_if->sk_miibus != NULL)
1862		device_delete_child(dev, sc_if->sk_miibus);
1863	*/
1864	bus_generic_detach(dev);
1865	sk_dma_jumbo_free(sc_if);
1866	sk_dma_free(sc_if);
1867	SK_IF_UNLOCK(sc_if);
1868	if (ifp)
1869		if_free(ifp);
1870
1871	return(0);
1872}
1873
1874static int
1875skc_detach(dev)
1876	device_t		dev;
1877{
1878	struct sk_softc		*sc;
1879
1880	sc = device_get_softc(dev);
1881	KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1882
1883	if (device_is_alive(dev)) {
1884		if (sc->sk_devs[SK_PORT_A] != NULL) {
1885			free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1886			device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1887		}
1888		if (sc->sk_devs[SK_PORT_B] != NULL) {
1889			free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1890			device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1891		}
1892		bus_generic_detach(dev);
1893	}
1894
1895	if (sc->sk_intrhand)
1896		bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
1897	bus_release_resources(dev, sc->sk_res_spec, sc->sk_res);
1898
1899	mtx_destroy(&sc->sk_mii_mtx);
1900	mtx_destroy(&sc->sk_mtx);
1901
1902	return(0);
1903}
1904
1905static bus_dma_tag_t
1906skc_get_dma_tag(device_t bus, device_t child __unused)
1907{
1908
1909	return (bus_get_dma_tag(bus));
1910}
1911
1912struct sk_dmamap_arg {
1913	bus_addr_t	sk_busaddr;
1914};
1915
1916static void
1917sk_dmamap_cb(arg, segs, nseg, error)
1918	void			*arg;
1919	bus_dma_segment_t	*segs;
1920	int			nseg;
1921	int			error;
1922{
1923	struct sk_dmamap_arg	*ctx;
1924
1925	if (error != 0)
1926		return;
1927
1928	ctx = arg;
1929	ctx->sk_busaddr = segs[0].ds_addr;
1930}
1931
1932/*
1933 * Allocate jumbo buffer storage. The SysKonnect adapters support
1934 * "jumbograms" (9K frames), although SysKonnect doesn't currently
1935 * use them in their drivers. In order for us to use them, we need
1936 * large 9K receive buffers, however standard mbuf clusters are only
1937 * 2048 bytes in size. Consequently, we need to allocate and manage
1938 * our own jumbo buffer pool. Fortunately, this does not require an
1939 * excessive amount of additional code.
1940 */
1941static int
1942sk_dma_alloc(sc_if)
1943	struct sk_if_softc	*sc_if;
1944{
1945	struct sk_dmamap_arg	ctx;
1946	struct sk_txdesc	*txd;
1947	struct sk_rxdesc	*rxd;
1948	int			error, i;
1949
1950	/* create parent tag */
1951	/*
1952	 * XXX
1953	 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument
1954	 * in bus_dma_tag_create(9) as the NIC would support DAC mode.
1955	 * However bz@ reported that it does not work on amd64 with > 4GB
1956	 * RAM. Until we have more clues of the breakage, disable DAC mode
1957	 * by limiting DMA address to be in 32bit address space.
1958	 */
1959	error = bus_dma_tag_create(
1960		    bus_get_dma_tag(sc_if->sk_if_dev),/* parent */
1961		    1, 0,			/* algnmnt, boundary */
1962		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1963		    BUS_SPACE_MAXADDR,		/* highaddr */
1964		    NULL, NULL,			/* filter, filterarg */
1965		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1966		    0,				/* nsegments */
1967		    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1968		    0,				/* flags */
1969		    NULL, NULL,			/* lockfunc, lockarg */
1970		    &sc_if->sk_cdata.sk_parent_tag);
1971	if (error != 0) {
1972		device_printf(sc_if->sk_if_dev,
1973		    "failed to create parent DMA tag\n");
1974		goto fail;
1975	}
1976
1977	/* create tag for Tx ring */
1978	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1979		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
1980		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1981		    BUS_SPACE_MAXADDR,		/* highaddr */
1982		    NULL, NULL,			/* filter, filterarg */
1983		    SK_TX_RING_SZ,		/* maxsize */
1984		    1,				/* nsegments */
1985		    SK_TX_RING_SZ,		/* maxsegsize */
1986		    0,				/* flags */
1987		    NULL, NULL,			/* lockfunc, lockarg */
1988		    &sc_if->sk_cdata.sk_tx_ring_tag);
1989	if (error != 0) {
1990		device_printf(sc_if->sk_if_dev,
1991		    "failed to allocate Tx ring DMA tag\n");
1992		goto fail;
1993	}
1994
1995	/* create tag for Rx ring */
1996	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
1997		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
1998		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1999		    BUS_SPACE_MAXADDR,		/* highaddr */
2000		    NULL, NULL,			/* filter, filterarg */
2001		    SK_RX_RING_SZ,		/* maxsize */
2002		    1,				/* nsegments */
2003		    SK_RX_RING_SZ,		/* maxsegsize */
2004		    0,				/* flags */
2005		    NULL, NULL,			/* lockfunc, lockarg */
2006		    &sc_if->sk_cdata.sk_rx_ring_tag);
2007	if (error != 0) {
2008		device_printf(sc_if->sk_if_dev,
2009		    "failed to allocate Rx ring DMA tag\n");
2010		goto fail;
2011	}
2012
2013	/* create tag for Tx buffers */
2014	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2015		    1, 0,			/* algnmnt, boundary */
2016		    BUS_SPACE_MAXADDR,		/* lowaddr */
2017		    BUS_SPACE_MAXADDR,		/* highaddr */
2018		    NULL, NULL,			/* filter, filterarg */
2019		    MCLBYTES * SK_MAXTXSEGS,	/* maxsize */
2020		    SK_MAXTXSEGS,		/* nsegments */
2021		    MCLBYTES,			/* maxsegsize */
2022		    0,				/* flags */
2023		    NULL, NULL,			/* lockfunc, lockarg */
2024		    &sc_if->sk_cdata.sk_tx_tag);
2025	if (error != 0) {
2026		device_printf(sc_if->sk_if_dev,
2027		    "failed to allocate Tx DMA tag\n");
2028		goto fail;
2029	}
2030
2031	/* create tag for Rx buffers */
2032	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2033		    1, 0,			/* algnmnt, boundary */
2034		    BUS_SPACE_MAXADDR,		/* lowaddr */
2035		    BUS_SPACE_MAXADDR,		/* highaddr */
2036		    NULL, NULL,			/* filter, filterarg */
2037		    MCLBYTES,			/* maxsize */
2038		    1,				/* nsegments */
2039		    MCLBYTES,			/* maxsegsize */
2040		    0,				/* flags */
2041		    NULL, NULL,			/* lockfunc, lockarg */
2042		    &sc_if->sk_cdata.sk_rx_tag);
2043	if (error != 0) {
2044		device_printf(sc_if->sk_if_dev,
2045		    "failed to allocate Rx DMA tag\n");
2046		goto fail;
2047	}
2048
2049	/* allocate DMA'able memory and load the DMA map for Tx ring */
2050	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag,
2051	    (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT |
2052	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_tx_ring_map);
2053	if (error != 0) {
2054		device_printf(sc_if->sk_if_dev,
2055		    "failed to allocate DMA'able memory for Tx ring\n");
2056		goto fail;
2057	}
2058
2059	ctx.sk_busaddr = 0;
2060	error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag,
2061	    sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring,
2062	    SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2063	if (error != 0) {
2064		device_printf(sc_if->sk_if_dev,
2065		    "failed to load DMA'able memory for Tx ring\n");
2066		goto fail;
2067	}
2068	sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr;
2069
2070	/* allocate DMA'able memory and load the DMA map for Rx ring */
2071	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag,
2072	    (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT |
2073	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_rx_ring_map);
2074	if (error != 0) {
2075		device_printf(sc_if->sk_if_dev,
2076		    "failed to allocate DMA'able memory for Rx ring\n");
2077		goto fail;
2078	}
2079
2080	ctx.sk_busaddr = 0;
2081	error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag,
2082	    sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring,
2083	    SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
2084	if (error != 0) {
2085		device_printf(sc_if->sk_if_dev,
2086		    "failed to load DMA'able memory for Rx ring\n");
2087		goto fail;
2088	}
2089	sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr;
2090
2091	/* create DMA maps for Tx buffers */
2092	for (i = 0; i < SK_TX_RING_CNT; i++) {
2093		txd = &sc_if->sk_cdata.sk_txdesc[i];
2094		txd->tx_m = NULL;
2095		txd->tx_dmamap = NULL;
2096		error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0,
2097		    &txd->tx_dmamap);
2098		if (error != 0) {
2099			device_printf(sc_if->sk_if_dev,
2100			    "failed to create Tx dmamap\n");
2101			goto fail;
2102		}
2103	}
2104
2105	/* create DMA maps for Rx buffers */
2106	if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2107	    &sc_if->sk_cdata.sk_rx_sparemap)) != 0) {
2108		device_printf(sc_if->sk_if_dev,
2109		    "failed to create spare Rx dmamap\n");
2110		goto fail;
2111	}
2112	for (i = 0; i < SK_RX_RING_CNT; i++) {
2113		rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2114		rxd->rx_m = NULL;
2115		rxd->rx_dmamap = NULL;
2116		error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0,
2117		    &rxd->rx_dmamap);
2118		if (error != 0) {
2119			device_printf(sc_if->sk_if_dev,
2120			    "failed to create Rx dmamap\n");
2121			goto fail;
2122		}
2123	}
2124
2125fail:
2126	return (error);
2127}
2128
2129static int
2130sk_dma_jumbo_alloc(sc_if)
2131	struct sk_if_softc	*sc_if;
2132{
2133	struct sk_dmamap_arg	ctx;
2134	struct sk_rxdesc	*jrxd;
2135	int			error, i;
2136
2137	if (jumbo_disable != 0) {
2138		device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n");
2139		sc_if->sk_jumbo_disable = 1;
2140		return (0);
2141	}
2142	/* create tag for jumbo Rx ring */
2143	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2144		    SK_RING_ALIGN, 0,		/* algnmnt, boundary */
2145		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2146		    BUS_SPACE_MAXADDR,		/* highaddr */
2147		    NULL, NULL,			/* filter, filterarg */
2148		    SK_JUMBO_RX_RING_SZ,	/* maxsize */
2149		    1,				/* nsegments */
2150		    SK_JUMBO_RX_RING_SZ,	/* maxsegsize */
2151		    0,				/* flags */
2152		    NULL, NULL,			/* lockfunc, lockarg */
2153		    &sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2154	if (error != 0) {
2155		device_printf(sc_if->sk_if_dev,
2156		    "failed to allocate jumbo Rx ring DMA tag\n");
2157		goto jumbo_fail;
2158	}
2159
2160	/* create tag for jumbo Rx buffers */
2161	error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */
2162		    1, 0,			/* algnmnt, boundary */
2163		    BUS_SPACE_MAXADDR,		/* lowaddr */
2164		    BUS_SPACE_MAXADDR,		/* highaddr */
2165		    NULL, NULL,			/* filter, filterarg */
2166		    MJUM9BYTES,			/* maxsize */
2167		    1,				/* nsegments */
2168		    MJUM9BYTES,			/* maxsegsize */
2169		    0,				/* flags */
2170		    NULL, NULL,			/* lockfunc, lockarg */
2171		    &sc_if->sk_cdata.sk_jumbo_rx_tag);
2172	if (error != 0) {
2173		device_printf(sc_if->sk_if_dev,
2174		    "failed to allocate jumbo Rx DMA tag\n");
2175		goto jumbo_fail;
2176	}
2177
2178	/* allocate DMA'able memory and load the DMA map for jumbo Rx ring */
2179	error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2180	    (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, BUS_DMA_NOWAIT |
2181	    BUS_DMA_COHERENT | BUS_DMA_ZERO,
2182	    &sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2183	if (error != 0) {
2184		device_printf(sc_if->sk_if_dev,
2185		    "failed to allocate DMA'able memory for jumbo Rx ring\n");
2186		goto jumbo_fail;
2187	}
2188
2189	ctx.sk_busaddr = 0;
2190	error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2191	    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2192	    sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb,
2193	    &ctx, BUS_DMA_NOWAIT);
2194	if (error != 0) {
2195		device_printf(sc_if->sk_if_dev,
2196		    "failed to load DMA'able memory for jumbo Rx ring\n");
2197		goto jumbo_fail;
2198	}
2199	sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr;
2200
2201	/* create DMA maps for jumbo Rx buffers */
2202	if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2203	    &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) {
2204		device_printf(sc_if->sk_if_dev,
2205		    "failed to create spare jumbo Rx dmamap\n");
2206		goto jumbo_fail;
2207	}
2208	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2209		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2210		jrxd->rx_m = NULL;
2211		jrxd->rx_dmamap = NULL;
2212		error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0,
2213		    &jrxd->rx_dmamap);
2214		if (error != 0) {
2215			device_printf(sc_if->sk_if_dev,
2216			    "failed to create jumbo Rx dmamap\n");
2217			goto jumbo_fail;
2218		}
2219	}
2220
2221	return (0);
2222
2223jumbo_fail:
2224	sk_dma_jumbo_free(sc_if);
2225	device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to "
2226	    "resource shortage\n");
2227	sc_if->sk_jumbo_disable = 1;
2228	return (0);
2229}
2230
2231static void
2232sk_dma_free(sc_if)
2233	struct sk_if_softc	*sc_if;
2234{
2235	struct sk_txdesc	*txd;
2236	struct sk_rxdesc	*rxd;
2237	int			i;
2238
2239	/* Tx ring */
2240	if (sc_if->sk_cdata.sk_tx_ring_tag) {
2241		if (sc_if->sk_rdata.sk_tx_ring_paddr)
2242			bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag,
2243			    sc_if->sk_cdata.sk_tx_ring_map);
2244		if (sc_if->sk_rdata.sk_tx_ring)
2245			bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag,
2246			    sc_if->sk_rdata.sk_tx_ring,
2247			    sc_if->sk_cdata.sk_tx_ring_map);
2248		sc_if->sk_rdata.sk_tx_ring = NULL;
2249		sc_if->sk_rdata.sk_tx_ring_paddr = 0;
2250		bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag);
2251		sc_if->sk_cdata.sk_tx_ring_tag = NULL;
2252	}
2253	/* Rx ring */
2254	if (sc_if->sk_cdata.sk_rx_ring_tag) {
2255		if (sc_if->sk_rdata.sk_rx_ring_paddr)
2256			bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag,
2257			    sc_if->sk_cdata.sk_rx_ring_map);
2258		if (sc_if->sk_rdata.sk_rx_ring)
2259			bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag,
2260			    sc_if->sk_rdata.sk_rx_ring,
2261			    sc_if->sk_cdata.sk_rx_ring_map);
2262		sc_if->sk_rdata.sk_rx_ring = NULL;
2263		sc_if->sk_rdata.sk_rx_ring_paddr = 0;
2264		bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag);
2265		sc_if->sk_cdata.sk_rx_ring_tag = NULL;
2266	}
2267	/* Tx buffers */
2268	if (sc_if->sk_cdata.sk_tx_tag) {
2269		for (i = 0; i < SK_TX_RING_CNT; i++) {
2270			txd = &sc_if->sk_cdata.sk_txdesc[i];
2271			if (txd->tx_dmamap) {
2272				bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag,
2273				    txd->tx_dmamap);
2274				txd->tx_dmamap = NULL;
2275			}
2276		}
2277		bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag);
2278		sc_if->sk_cdata.sk_tx_tag = NULL;
2279	}
2280	/* Rx buffers */
2281	if (sc_if->sk_cdata.sk_rx_tag) {
2282		for (i = 0; i < SK_RX_RING_CNT; i++) {
2283			rxd = &sc_if->sk_cdata.sk_rxdesc[i];
2284			if (rxd->rx_dmamap) {
2285				bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2286				    rxd->rx_dmamap);
2287				rxd->rx_dmamap = NULL;
2288			}
2289		}
2290		if (sc_if->sk_cdata.sk_rx_sparemap) {
2291			bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag,
2292			    sc_if->sk_cdata.sk_rx_sparemap);
2293			sc_if->sk_cdata.sk_rx_sparemap = NULL;
2294		}
2295		bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag);
2296		sc_if->sk_cdata.sk_rx_tag = NULL;
2297	}
2298
2299	if (sc_if->sk_cdata.sk_parent_tag) {
2300		bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag);
2301		sc_if->sk_cdata.sk_parent_tag = NULL;
2302	}
2303}
2304
2305static void
2306sk_dma_jumbo_free(sc_if)
2307	struct sk_if_softc	*sc_if;
2308{
2309	struct sk_rxdesc	*jrxd;
2310	int			i;
2311
2312	/* jumbo Rx ring */
2313	if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) {
2314		if (sc_if->sk_rdata.sk_jumbo_rx_ring_paddr)
2315			bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2316			    sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2317		if (sc_if->sk_rdata.sk_jumbo_rx_ring)
2318			bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2319			    sc_if->sk_rdata.sk_jumbo_rx_ring,
2320			    sc_if->sk_cdata.sk_jumbo_rx_ring_map);
2321		sc_if->sk_rdata.sk_jumbo_rx_ring = NULL;
2322		sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = 0;
2323		bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag);
2324		sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL;
2325	}
2326
2327	/* jumbo Rx buffers */
2328	if (sc_if->sk_cdata.sk_jumbo_rx_tag) {
2329		for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
2330			jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
2331			if (jrxd->rx_dmamap) {
2332				bus_dmamap_destroy(
2333				    sc_if->sk_cdata.sk_jumbo_rx_tag,
2334				    jrxd->rx_dmamap);
2335				jrxd->rx_dmamap = NULL;
2336			}
2337		}
2338		if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) {
2339			bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag,
2340			    sc_if->sk_cdata.sk_jumbo_rx_sparemap);
2341			sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL;
2342		}
2343		bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag);
2344		sc_if->sk_cdata.sk_jumbo_rx_tag = NULL;
2345	}
2346}
2347
2348static void
2349sk_txcksum(ifp, m, f)
2350	struct ifnet		*ifp;
2351	struct mbuf		*m;
2352	struct sk_tx_desc	*f;
2353{
2354	struct ip		*ip;
2355	u_int16_t		offset;
2356	u_int8_t 		*p;
2357
2358	offset = sizeof(struct ip) + ETHER_HDR_LEN;
2359	for(; m && m->m_len == 0; m = m->m_next)
2360		;
2361	if (m == NULL || m->m_len < ETHER_HDR_LEN) {
2362		if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__);
2363		/* checksum may be corrupted */
2364		goto sendit;
2365	}
2366	if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
2367		if (m->m_len != ETHER_HDR_LEN) {
2368			if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n",
2369			    __func__);
2370			/* checksum may be corrupted */
2371			goto sendit;
2372		}
2373		for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
2374			;
2375		if (m == NULL) {
2376			offset = sizeof(struct ip) + ETHER_HDR_LEN;
2377			/* checksum may be corrupted */
2378			goto sendit;
2379		}
2380		ip = mtod(m, struct ip *);
2381	} else {
2382		p = mtod(m, u_int8_t *);
2383		p += ETHER_HDR_LEN;
2384		ip = (struct ip *)p;
2385	}
2386	offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
2387
2388sendit:
2389	f->sk_csum_startval = 0;
2390	f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) |
2391	    (offset << 16));
2392}
2393
2394static int
2395sk_encap(sc_if, m_head)
2396        struct sk_if_softc	*sc_if;
2397        struct mbuf		**m_head;
2398{
2399	struct sk_txdesc	*txd;
2400	struct sk_tx_desc	*f = NULL;
2401	struct mbuf		*m;
2402	bus_dma_segment_t	txsegs[SK_MAXTXSEGS];
2403	u_int32_t		cflags, frag, si, sk_ctl;
2404	int			error, i, nseg;
2405
2406	SK_IF_LOCK_ASSERT(sc_if);
2407
2408	if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL)
2409		return (ENOBUFS);
2410
2411	error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2412	    txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2413	if (error == EFBIG) {
2414		m = m_defrag(*m_head, M_NOWAIT);
2415		if (m == NULL) {
2416			m_freem(*m_head);
2417			*m_head = NULL;
2418			return (ENOMEM);
2419		}
2420		*m_head = m;
2421		error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
2422		    txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
2423		if (error != 0) {
2424			m_freem(*m_head);
2425			*m_head = NULL;
2426			return (error);
2427		}
2428	} else if (error != 0)
2429		return (error);
2430	if (nseg == 0) {
2431		m_freem(*m_head);
2432		*m_head = NULL;
2433		return (EIO);
2434	}
2435	if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) {
2436		bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2437		return (ENOBUFS);
2438	}
2439
2440	m = *m_head;
2441	if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0)
2442		cflags = SK_OPCODE_CSUM;
2443	else
2444		cflags = SK_OPCODE_DEFAULT;
2445	si = frag = sc_if->sk_cdata.sk_tx_prod;
2446	for (i = 0; i < nseg; i++) {
2447		f = &sc_if->sk_rdata.sk_tx_ring[frag];
2448		f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr));
2449		f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr));
2450		sk_ctl = txsegs[i].ds_len | cflags;
2451		if (i == 0) {
2452			if (cflags == SK_OPCODE_CSUM)
2453				sk_txcksum(sc_if->sk_ifp, m, f);
2454			sk_ctl |= SK_TXCTL_FIRSTFRAG;
2455		} else
2456			sk_ctl |= SK_TXCTL_OWN;
2457		f->sk_ctl = htole32(sk_ctl);
2458		sc_if->sk_cdata.sk_tx_cnt++;
2459		SK_INC(frag, SK_TX_RING_CNT);
2460	}
2461	sc_if->sk_cdata.sk_tx_prod = frag;
2462
2463	/* set EOF on the last desciptor */
2464	frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT;
2465	f = &sc_if->sk_rdata.sk_tx_ring[frag];
2466	f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR);
2467
2468	/* turn the first descriptor ownership to NIC */
2469	f = &sc_if->sk_rdata.sk_tx_ring[si];
2470	f->sk_ctl |= htole32(SK_TXCTL_OWN);
2471
2472	STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q);
2473	STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q);
2474	txd->tx_m = m;
2475
2476	/* sync descriptors */
2477	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2478	    BUS_DMASYNC_PREWRITE);
2479	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2480	    sc_if->sk_cdata.sk_tx_ring_map,
2481	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2482
2483	return (0);
2484}
2485
2486static void
2487sk_start(ifp)
2488	struct ifnet		*ifp;
2489{
2490	struct sk_if_softc *sc_if;
2491
2492	sc_if = ifp->if_softc;
2493
2494	SK_IF_LOCK(sc_if);
2495	sk_start_locked(ifp);
2496	SK_IF_UNLOCK(sc_if);
2497
2498	return;
2499}
2500
2501static void
2502sk_start_locked(ifp)
2503	struct ifnet		*ifp;
2504{
2505        struct sk_softc		*sc;
2506        struct sk_if_softc	*sc_if;
2507        struct mbuf		*m_head;
2508	int			enq;
2509
2510	sc_if = ifp->if_softc;
2511	sc = sc_if->sk_softc;
2512
2513	SK_IF_LOCK_ASSERT(sc_if);
2514
2515	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2516	    sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) {
2517		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2518		if (m_head == NULL)
2519			break;
2520
2521		/*
2522		 * Pack the data into the transmit ring. If we
2523		 * don't have room, set the OACTIVE flag and wait
2524		 * for the NIC to drain the ring.
2525		 */
2526		if (sk_encap(sc_if, &m_head)) {
2527			if (m_head == NULL)
2528				break;
2529			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2530			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2531			break;
2532		}
2533
2534		enq++;
2535		/*
2536		 * If there's a BPF listener, bounce a copy of this frame
2537		 * to him.
2538		 */
2539		BPF_MTAP(ifp, m_head);
2540	}
2541
2542	if (enq > 0) {
2543		/* Transmit */
2544		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2545
2546		/* Set a timeout in case the chip goes out to lunch. */
2547		sc_if->sk_watchdog_timer = 5;
2548	}
2549}
2550
2551
2552static void
2553sk_watchdog(arg)
2554	void			*arg;
2555{
2556	struct sk_if_softc	*sc_if;
2557	struct ifnet		*ifp;
2558
2559	ifp = arg;
2560	sc_if = ifp->if_softc;
2561
2562	SK_IF_LOCK_ASSERT(sc_if);
2563
2564	if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer)
2565		goto done;
2566
2567	/*
2568	 * Reclaim first as there is a possibility of losing Tx completion
2569	 * interrupts.
2570	 */
2571	sk_txeof(sc_if);
2572	if (sc_if->sk_cdata.sk_tx_cnt != 0) {
2573		if_printf(sc_if->sk_ifp, "watchdog timeout\n");
2574		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2575		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2576		sk_init_locked(sc_if);
2577	}
2578
2579done:
2580	callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
2581
2582	return;
2583}
2584
2585static int
2586skc_shutdown(dev)
2587	device_t		dev;
2588{
2589	struct sk_softc		*sc;
2590
2591	sc = device_get_softc(dev);
2592	SK_LOCK(sc);
2593
2594	/* Turn off the 'driver is loaded' LED. */
2595	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2596
2597	/*
2598	 * Reset the GEnesis controller. Doing this should also
2599	 * assert the resets on the attached XMAC(s).
2600	 */
2601	sk_reset(sc);
2602	SK_UNLOCK(sc);
2603
2604	return (0);
2605}
2606
2607static int
2608skc_suspend(dev)
2609	device_t		dev;
2610{
2611	struct sk_softc		*sc;
2612	struct sk_if_softc	*sc_if0, *sc_if1;
2613	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2614
2615	sc = device_get_softc(dev);
2616
2617	SK_LOCK(sc);
2618
2619	sc_if0 = sc->sk_if[SK_PORT_A];
2620	sc_if1 = sc->sk_if[SK_PORT_B];
2621	if (sc_if0 != NULL)
2622		ifp0 = sc_if0->sk_ifp;
2623	if (sc_if1 != NULL)
2624		ifp1 = sc_if1->sk_ifp;
2625	if (ifp0 != NULL)
2626		sk_stop(sc_if0);
2627	if (ifp1 != NULL)
2628		sk_stop(sc_if1);
2629	sc->sk_suspended = 1;
2630
2631	SK_UNLOCK(sc);
2632
2633	return (0);
2634}
2635
2636static int
2637skc_resume(dev)
2638	device_t		dev;
2639{
2640	struct sk_softc		*sc;
2641	struct sk_if_softc	*sc_if0, *sc_if1;
2642	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2643
2644	sc = device_get_softc(dev);
2645
2646	SK_LOCK(sc);
2647
2648	sc_if0 = sc->sk_if[SK_PORT_A];
2649	sc_if1 = sc->sk_if[SK_PORT_B];
2650	if (sc_if0 != NULL)
2651		ifp0 = sc_if0->sk_ifp;
2652	if (sc_if1 != NULL)
2653		ifp1 = sc_if1->sk_ifp;
2654	if (ifp0 != NULL && ifp0->if_flags & IFF_UP)
2655		sk_init_locked(sc_if0);
2656	if (ifp1 != NULL && ifp1->if_flags & IFF_UP)
2657		sk_init_locked(sc_if1);
2658	sc->sk_suspended = 0;
2659
2660	SK_UNLOCK(sc);
2661
2662	return (0);
2663}
2664
2665/*
2666 * According to the data sheet from SK-NET GENESIS the hardware can compute
2667 * two Rx checksums at the same time(Each checksum start position is
2668 * programmed in Rx descriptors). However it seems that TCP/UDP checksum
2669 * does not work at least on my Yukon hardware. I tried every possible ways
2670 * to get correct checksum value but couldn't get correct one. So TCP/UDP
2671 * checksum offload was disabled at the moment and only IP checksum offload
2672 * was enabled.
2673 * As nomral IP header size is 20 bytes I can't expect it would give an
2674 * increase in throughput. However it seems it doesn't hurt performance in
2675 * my testing. If there is a more detailed information for checksum secret
2676 * of the hardware in question please contact yongari@FreeBSD.org to add
2677 * TCP/UDP checksum offload support.
2678 */
2679static __inline void
2680sk_rxcksum(ifp, m, csum)
2681	struct ifnet		*ifp;
2682	struct mbuf		*m;
2683	u_int32_t		csum;
2684{
2685	struct ether_header	*eh;
2686	struct ip		*ip;
2687	int32_t			hlen, len, pktlen;
2688	u_int16_t		csum1, csum2, ipcsum;
2689
2690	pktlen = m->m_pkthdr.len;
2691	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
2692		return;
2693	eh = mtod(m, struct ether_header *);
2694	if (eh->ether_type != htons(ETHERTYPE_IP))
2695		return;
2696	ip = (struct ip *)(eh + 1);
2697	if (ip->ip_v != IPVERSION)
2698		return;
2699	hlen = ip->ip_hl << 2;
2700	pktlen -= sizeof(struct ether_header);
2701	if (hlen < sizeof(struct ip))
2702		return;
2703	if (ntohs(ip->ip_len) < hlen)
2704		return;
2705	if (ntohs(ip->ip_len) != pktlen)
2706		return;
2707
2708	csum1 = htons(csum & 0xffff);
2709	csum2 = htons((csum >> 16) & 0xffff);
2710	ipcsum = in_addword(csum1, ~csum2 & 0xffff);
2711	/* checksum fixup for IP options */
2712	len = hlen - sizeof(struct ip);
2713	if (len > 0) {
2714		/*
2715		 * If the second checksum value is correct we can compute IP
2716		 * checksum with simple math. Unfortunately the second checksum
2717		 * value is wrong so we can't verify the checksum from the
2718		 * value(It seems there is some magic here to get correct
2719		 * value). If the second checksum value is correct it also
2720		 * means we can get TCP/UDP checksum) here. However, it still
2721		 * needs pseudo header checksum calculation due to hardware
2722		 * limitations.
2723		 */
2724		return;
2725	}
2726	m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2727	if (ipcsum == 0xffff)
2728		m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2729}
2730
2731static __inline int
2732sk_rxvalid(sc, stat, len)
2733	struct sk_softc		*sc;
2734	u_int32_t		stat, len;
2735{
2736
2737	if (sc->sk_type == SK_GENESIS) {
2738		if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
2739		    XM_RXSTAT_BYTES(stat) != len)
2740			return (0);
2741	} else {
2742		if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
2743		    YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
2744		    YU_RXSTAT_JABBER)) != 0 ||
2745		    (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
2746		    YU_RXSTAT_BYTES(stat) != len)
2747			return (0);
2748	}
2749
2750	return (1);
2751}
2752
2753static void
2754sk_rxeof(sc_if)
2755	struct sk_if_softc	*sc_if;
2756{
2757	struct sk_softc		*sc;
2758	struct mbuf		*m;
2759	struct ifnet		*ifp;
2760	struct sk_rx_desc	*cur_rx;
2761	struct sk_rxdesc	*rxd;
2762	int			cons, prog;
2763	u_int32_t		csum, rxstat, sk_ctl;
2764
2765	sc = sc_if->sk_softc;
2766	ifp = sc_if->sk_ifp;
2767
2768	SK_IF_LOCK_ASSERT(sc_if);
2769
2770	bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2771	    sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD);
2772
2773	prog = 0;
2774	for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT;
2775	    prog++, SK_INC(cons, SK_RX_RING_CNT)) {
2776		cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons];
2777		sk_ctl = le32toh(cur_rx->sk_ctl);
2778		if ((sk_ctl & SK_RXCTL_OWN) != 0)
2779			break;
2780		rxd = &sc_if->sk_cdata.sk_rxdesc[cons];
2781		rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2782
2783		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2784		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2785		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2786		    SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2787		    SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN ||
2788		    sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2789			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2790			sk_discard_rxbuf(sc_if, cons);
2791			continue;
2792		}
2793
2794		m = rxd->rx_m;
2795		csum = le32toh(cur_rx->sk_csum);
2796		if (sk_newbuf(sc_if, cons) != 0) {
2797			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2798			/* reuse old buffer */
2799			sk_discard_rxbuf(sc_if, cons);
2800			continue;
2801		}
2802		m->m_pkthdr.rcvif = ifp;
2803		m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2804		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2805		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2806			sk_rxcksum(ifp, m, csum);
2807		SK_IF_UNLOCK(sc_if);
2808		(*ifp->if_input)(ifp, m);
2809		SK_IF_LOCK(sc_if);
2810	}
2811
2812	if (prog > 0) {
2813		sc_if->sk_cdata.sk_rx_cons = cons;
2814		bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag,
2815		    sc_if->sk_cdata.sk_rx_ring_map,
2816		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2817	}
2818}
2819
2820static void
2821sk_jumbo_rxeof(sc_if)
2822	struct sk_if_softc	*sc_if;
2823{
2824	struct sk_softc		*sc;
2825	struct mbuf		*m;
2826	struct ifnet		*ifp;
2827	struct sk_rx_desc	*cur_rx;
2828	struct sk_rxdesc	*jrxd;
2829	int			cons, prog;
2830	u_int32_t		csum, rxstat, sk_ctl;
2831
2832	sc = sc_if->sk_softc;
2833	ifp = sc_if->sk_ifp;
2834
2835	SK_IF_LOCK_ASSERT(sc_if);
2836
2837	bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2838	    sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD);
2839
2840	prog = 0;
2841	for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons;
2842	    prog < SK_JUMBO_RX_RING_CNT;
2843	    prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) {
2844		cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons];
2845		sk_ctl = le32toh(cur_rx->sk_ctl);
2846		if ((sk_ctl & SK_RXCTL_OWN) != 0)
2847			break;
2848		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons];
2849		rxstat = le32toh(cur_rx->sk_xmac_rxstat);
2850
2851		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
2852		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
2853		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
2854		    SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN ||
2855		    SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN ||
2856		    sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) {
2857			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2858			sk_discard_jumbo_rxbuf(sc_if, cons);
2859			continue;
2860		}
2861
2862		m = jrxd->rx_m;
2863		csum = le32toh(cur_rx->sk_csum);
2864		if (sk_jumbo_newbuf(sc_if, cons) != 0) {
2865			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2866			/* reuse old buffer */
2867			sk_discard_jumbo_rxbuf(sc_if, cons);
2868			continue;
2869		}
2870		m->m_pkthdr.rcvif = ifp;
2871		m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl);
2872		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2873		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2874			sk_rxcksum(ifp, m, csum);
2875		SK_IF_UNLOCK(sc_if);
2876		(*ifp->if_input)(ifp, m);
2877		SK_IF_LOCK(sc_if);
2878	}
2879
2880	if (prog > 0) {
2881		sc_if->sk_cdata.sk_jumbo_rx_cons = cons;
2882		bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag,
2883		    sc_if->sk_cdata.sk_jumbo_rx_ring_map,
2884		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2885	}
2886}
2887
2888static void
2889sk_txeof(sc_if)
2890	struct sk_if_softc	*sc_if;
2891{
2892	struct sk_txdesc	*txd;
2893	struct sk_tx_desc	*cur_tx;
2894	struct ifnet		*ifp;
2895	u_int32_t		idx, sk_ctl;
2896
2897	ifp = sc_if->sk_ifp;
2898
2899	txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2900	if (txd == NULL)
2901		return;
2902	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2903	    sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD);
2904	/*
2905	 * Go through our tx ring and free mbufs for those
2906	 * frames that have been sent.
2907	 */
2908	for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) {
2909		if (sc_if->sk_cdata.sk_tx_cnt <= 0)
2910			break;
2911		cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
2912		sk_ctl = le32toh(cur_tx->sk_ctl);
2913		if (sk_ctl & SK_TXCTL_OWN)
2914			break;
2915		sc_if->sk_cdata.sk_tx_cnt--;
2916		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2917		if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0)
2918			continue;
2919		bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap,
2920		    BUS_DMASYNC_POSTWRITE);
2921		bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap);
2922
2923		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2924		m_freem(txd->tx_m);
2925		txd->tx_m = NULL;
2926		STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q);
2927		STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q);
2928		txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq);
2929	}
2930	sc_if->sk_cdata.sk_tx_cons = idx;
2931	sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
2932
2933	bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag,
2934	    sc_if->sk_cdata.sk_tx_ring_map,
2935	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2936}
2937
2938static void
2939sk_tick(xsc_if)
2940	void			*xsc_if;
2941{
2942	struct sk_if_softc	*sc_if;
2943	struct mii_data		*mii;
2944	struct ifnet		*ifp;
2945	int			i;
2946
2947	sc_if = xsc_if;
2948	ifp = sc_if->sk_ifp;
2949	mii = device_get_softc(sc_if->sk_miibus);
2950
2951	if (!(ifp->if_flags & IFF_UP))
2952		return;
2953
2954	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2955		sk_intr_bcom(sc_if);
2956		return;
2957	}
2958
2959	/*
2960	 * According to SysKonnect, the correct way to verify that
2961	 * the link has come back up is to poll bit 0 of the GPIO
2962	 * register three times. This pin has the signal from the
2963	 * link_sync pin connected to it; if we read the same link
2964	 * state 3 times in a row, we know the link is up.
2965	 */
2966	for (i = 0; i < 3; i++) {
2967		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2968			break;
2969	}
2970
2971	if (i != 3) {
2972		callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
2973		return;
2974	}
2975
2976	/* Turn the GP0 interrupt back on. */
2977	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2978	SK_XM_READ_2(sc_if, XM_ISR);
2979	mii_tick(mii);
2980	callout_stop(&sc_if->sk_tick_ch);
2981}
2982
2983static void
2984sk_yukon_tick(xsc_if)
2985	void			*xsc_if;
2986{
2987	struct sk_if_softc	*sc_if;
2988	struct mii_data		*mii;
2989
2990	sc_if = xsc_if;
2991	mii = device_get_softc(sc_if->sk_miibus);
2992
2993	mii_tick(mii);
2994	callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
2995}
2996
2997static void
2998sk_intr_bcom(sc_if)
2999	struct sk_if_softc	*sc_if;
3000{
3001	struct mii_data		*mii;
3002	struct ifnet		*ifp;
3003	int			status;
3004	mii = device_get_softc(sc_if->sk_miibus);
3005	ifp = sc_if->sk_ifp;
3006
3007	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3008
3009	/*
3010	 * Read the PHY interrupt register to make sure
3011	 * we clear any pending interrupts.
3012	 */
3013	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
3014
3015	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3016		sk_init_xmac(sc_if);
3017		return;
3018	}
3019
3020	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
3021		int			lstat;
3022		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
3023		    BRGPHY_MII_AUXSTS);
3024
3025		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
3026			mii_mediachg(mii);
3027			/* Turn off the link LED. */
3028			SK_IF_WRITE_1(sc_if, 0,
3029			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
3030			sc_if->sk_link = 0;
3031		} else if (status & BRGPHY_ISR_LNK_CHG) {
3032			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3033	    		    BRGPHY_MII_IMR, 0xFF00);
3034			mii_tick(mii);
3035			sc_if->sk_link = 1;
3036			/* Turn on the link LED. */
3037			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3038			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
3039			    SK_LINKLED_BLINK_OFF);
3040		} else {
3041			mii_tick(mii);
3042			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3043		}
3044	}
3045
3046	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3047
3048	return;
3049}
3050
3051static void
3052sk_intr_xmac(sc_if)
3053	struct sk_if_softc	*sc_if;
3054{
3055	struct sk_softc		*sc;
3056	u_int16_t		status;
3057
3058	sc = sc_if->sk_softc;
3059	status = SK_XM_READ_2(sc_if, XM_ISR);
3060
3061	/*
3062	 * Link has gone down. Start MII tick timeout to
3063	 * watch for link resync.
3064	 */
3065	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
3066		if (status & XM_ISR_GP0_SET) {
3067			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
3068			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3069		}
3070
3071		if (status & XM_ISR_AUTONEG_DONE) {
3072			callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if);
3073		}
3074	}
3075
3076	if (status & XM_IMR_TX_UNDERRUN)
3077		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
3078
3079	if (status & XM_IMR_RX_OVERRUN)
3080		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
3081
3082	status = SK_XM_READ_2(sc_if, XM_ISR);
3083
3084	return;
3085}
3086
3087static void
3088sk_intr_yukon(sc_if)
3089	struct sk_if_softc	*sc_if;
3090{
3091	u_int8_t status;
3092
3093	status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
3094	/* RX overrun */
3095	if ((status & SK_GMAC_INT_RX_OVER) != 0) {
3096		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3097		    SK_RFCTL_RX_FIFO_OVER);
3098	}
3099	/* TX underrun */
3100	if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
3101		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
3102		    SK_TFCTL_TX_FIFO_UNDER);
3103	}
3104}
3105
3106static void
3107sk_intr(xsc)
3108	void			*xsc;
3109{
3110	struct sk_softc		*sc = xsc;
3111	struct sk_if_softc	*sc_if0, *sc_if1;
3112	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
3113	u_int32_t		status;
3114
3115	SK_LOCK(sc);
3116
3117#ifndef __HAIKU__
3118	status = CSR_READ_4(sc, SK_ISSR);
3119	if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3120		goto done_locked;
3121#endif
3122
3123	sc_if0 = sc->sk_if[SK_PORT_A];
3124	sc_if1 = sc->sk_if[SK_PORT_B];
3125
3126	if (sc_if0 != NULL)
3127		ifp0 = sc_if0->sk_ifp;
3128	if (sc_if1 != NULL)
3129		ifp1 = sc_if1->sk_ifp;
3130
3131#ifndef __HAIKU__
3132	for (; (status &= sc->sk_intrmask) != 0;) {
3133#else
3134	status = atomic_get((int32 *)&sc->sk_intstatus);
3135	status &= sc->sk_intrmask;
3136	while (true) {
3137
3138		if (status == 0 || status == 0xffffffff || sc->sk_suspended)
3139			goto done_locked;
3140#endif
3141		/* Handle receive interrupts first. */
3142		if (status & SK_ISR_RX1_EOF) {
3143			if (ifp0->if_mtu > SK_MAX_FRAMELEN)
3144				sk_jumbo_rxeof(sc_if0);
3145			else
3146				sk_rxeof(sc_if0);
3147			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
3148			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3149		}
3150		if (status & SK_ISR_RX2_EOF) {
3151			if (ifp1->if_mtu > SK_MAX_FRAMELEN)
3152				sk_jumbo_rxeof(sc_if1);
3153			else
3154				sk_rxeof(sc_if1);
3155			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
3156			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
3157		}
3158
3159		/* Then transmit interrupts. */
3160		if (status & SK_ISR_TX1_S_EOF) {
3161			sk_txeof(sc_if0);
3162			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF);
3163		}
3164		if (status & SK_ISR_TX2_S_EOF) {
3165			sk_txeof(sc_if1);
3166			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF);
3167		}
3168
3169		/* Then MAC interrupts. */
3170		if (status & SK_ISR_MAC1 &&
3171		    ifp0->if_drv_flags & IFF_DRV_RUNNING) {
3172			if (sc->sk_type == SK_GENESIS)
3173				sk_intr_xmac(sc_if0);
3174			else
3175				sk_intr_yukon(sc_if0);
3176		}
3177
3178		if (status & SK_ISR_MAC2 &&
3179		    ifp1->if_drv_flags & IFF_DRV_RUNNING) {
3180			if (sc->sk_type == SK_GENESIS)
3181				sk_intr_xmac(sc_if1);
3182			else
3183				sk_intr_yukon(sc_if1);
3184		}
3185
3186		if (status & SK_ISR_EXTERNAL_REG) {
3187			if (ifp0 != NULL &&
3188			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
3189				sk_intr_bcom(sc_if0);
3190			if (ifp1 != NULL &&
3191			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
3192				sk_intr_bcom(sc_if1);
3193		}
3194		status = CSR_READ_4(sc, SK_ISSR);
3195#ifdef __HAIKU__
3196		if (((status & sc->sk_intrmask) == 0) || status == 0xffffffff ||
3197			sc->sk_suspended) {
3198			break;
3199		}
3200#endif
3201	}
3202
3203	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3204
3205	if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
3206		sk_start_locked(ifp0);
3207	if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
3208		sk_start_locked(ifp1);
3209
3210done_locked:
3211	SK_UNLOCK(sc);
3212}
3213
3214static void
3215sk_init_xmac(sc_if)
3216	struct sk_if_softc	*sc_if;
3217{
3218	struct sk_softc		*sc;
3219	struct ifnet		*ifp;
3220	u_int16_t		eaddr[(ETHER_ADDR_LEN+1)/2];
3221	static const struct sk_bcom_hack bhack[] = {
3222	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
3223	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
3224	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
3225	{ 0, 0 } };
3226
3227	SK_IF_LOCK_ASSERT(sc_if);
3228
3229	sc = sc_if->sk_softc;
3230	ifp = sc_if->sk_ifp;
3231
3232	/* Unreset the XMAC. */
3233	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
3234	DELAY(1000);
3235
3236	/* Reset the XMAC's internal state. */
3237	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3238
3239	/* Save the XMAC II revision */
3240	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
3241
3242	/*
3243	 * Perform additional initialization for external PHYs,
3244	 * namely for the 1000baseTX cards that use the XMAC's
3245	 * GMII mode.
3246	 */
3247	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3248		int			i = 0;
3249		u_int32_t		val;
3250
3251		/* Take PHY out of reset. */
3252		val = sk_win_read_4(sc, SK_GPIO);
3253		if (sc_if->sk_port == SK_PORT_A)
3254			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
3255		else
3256			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
3257		sk_win_write_4(sc, SK_GPIO, val);
3258
3259		/* Enable GMII mode on the XMAC. */
3260		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
3261
3262		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3263		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
3264		DELAY(10000);
3265		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3266		    BRGPHY_MII_IMR, 0xFFF0);
3267
3268		/*
3269		 * Early versions of the BCM5400 apparently have
3270		 * a bug that requires them to have their reserved
3271		 * registers initialized to some magic values. I don't
3272		 * know what the numbers do, I'm just the messenger.
3273		 */
3274		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
3275		    == 0x6041) {
3276			while(bhack[i].reg) {
3277				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
3278				    bhack[i].reg, bhack[i].val);
3279				i++;
3280			}
3281		}
3282	}
3283
3284	/* Set station address */
3285	bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN);
3286	SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]);
3287	SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]);
3288	SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]);
3289	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
3290
3291	if (ifp->if_flags & IFF_BROADCAST) {
3292		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3293	} else {
3294		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
3295	}
3296
3297	/* We don't need the FCS appended to the packet. */
3298	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
3299
3300	/* We want short frames padded to 60 bytes. */
3301	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
3302
3303	/*
3304	 * Enable the reception of all error frames. This is is
3305	 * a necessary evil due to the design of the XMAC. The
3306	 * XMAC's receive FIFO is only 8K in size, however jumbo
3307	 * frames can be up to 9000 bytes in length. When bad
3308	 * frame filtering is enabled, the XMAC's RX FIFO operates
3309	 * in 'store and forward' mode. For this to work, the
3310	 * entire frame has to fit into the FIFO, but that means
3311	 * that jumbo frames larger than 8192 bytes will be
3312	 * truncated. Disabling all bad frame filtering causes
3313	 * the RX FIFO to operate in streaming mode, in which
3314	 * case the XMAC will start transferring frames out of the
3315	 * RX FIFO as soon as the FIFO threshold is reached.
3316	 */
3317	if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3318		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
3319		    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
3320		    XM_MODE_RX_INRANGELEN);
3321		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3322	} else
3323		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
3324
3325	/*
3326	 * Bump up the transmit threshold. This helps hold off transmit
3327	 * underruns when we're blasting traffic from both ports at once.
3328	 */
3329	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
3330
3331	/* Set Rx filter */
3332	sk_rxfilter_genesis(sc_if);
3333
3334	/* Clear and enable interrupts */
3335	SK_XM_READ_2(sc_if, XM_ISR);
3336	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
3337		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
3338	else
3339		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3340
3341	/* Configure MAC arbiter */
3342	switch(sc_if->sk_xmac_rev) {
3343	case XM_XMAC_REV_B2:
3344		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
3345		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
3346		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
3347		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
3348		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
3349		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
3350		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
3351		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
3352		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3353		break;
3354	case XM_XMAC_REV_C1:
3355		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
3356		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
3357		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
3358		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
3359		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
3360		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
3361		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
3362		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
3363		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
3364		break;
3365	default:
3366		break;
3367	}
3368	sk_win_write_2(sc, SK_MACARB_CTL,
3369	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
3370
3371	sc_if->sk_link = 1;
3372
3373	return;
3374}
3375
3376static void
3377sk_init_yukon(sc_if)
3378	struct sk_if_softc	*sc_if;
3379{
3380	u_int32_t		phy, v;
3381	u_int16_t		reg;
3382	struct sk_softc		*sc;
3383	struct ifnet		*ifp;
3384	u_int8_t		*eaddr;
3385	int			i;
3386
3387	SK_IF_LOCK_ASSERT(sc_if);
3388
3389	sc = sc_if->sk_softc;
3390	ifp = sc_if->sk_ifp;
3391
3392	if (sc->sk_type == SK_YUKON_LITE &&
3393	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3394		/*
3395		 * Workaround code for COMA mode, set PHY reset.
3396		 * Otherwise it will not correctly take chip out of
3397		 * powerdown (coma)
3398		 */
3399		v = sk_win_read_4(sc, SK_GPIO);
3400		v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
3401		sk_win_write_4(sc, SK_GPIO, v);
3402	}
3403
3404	/* GMAC and GPHY Reset */
3405	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
3406	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
3407	DELAY(1000);
3408
3409	if (sc->sk_type == SK_YUKON_LITE &&
3410	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
3411		/*
3412		 * Workaround code for COMA mode, clear PHY reset
3413		 */
3414		v = sk_win_read_4(sc, SK_GPIO);
3415		v |= SK_GPIO_DIR9;
3416		v &= ~SK_GPIO_DAT9;
3417		sk_win_write_4(sc, SK_GPIO, v);
3418	}
3419
3420	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
3421		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
3422
3423	if (sc->sk_coppertype)
3424		phy |= SK_GPHY_COPPER;
3425	else
3426		phy |= SK_GPHY_FIBER;
3427
3428	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
3429	DELAY(1000);
3430	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
3431	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
3432		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
3433
3434	/* unused read of the interrupt source register */
3435	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
3436
3437	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
3438
3439	/* MIB Counter Clear Mode set */
3440	reg |= YU_PAR_MIB_CLR;
3441	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3442
3443	/* MIB Counter Clear Mode clear */
3444	reg &= ~YU_PAR_MIB_CLR;
3445	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
3446
3447	/* receive control reg */
3448	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
3449
3450	/* transmit parameter register */
3451	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
3452		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
3453
3454	/* serial mode register */
3455	reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
3456	if (ifp->if_mtu > SK_MAX_FRAMELEN)
3457		reg |= YU_SMR_MFL_JUMBO;
3458	SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
3459
3460	/* Setup Yukon's station address */
3461	eaddr = IF_LLADDR(sc_if->sk_ifp);
3462	for (i = 0; i < 3; i++)
3463		SK_YU_WRITE_2(sc_if, SK_MAC0_0 + i * 4,
3464		    eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3465	/* Set GMAC source address of flow control. */
3466	for (i = 0; i < 3; i++)
3467		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
3468		    eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3469	/* Set GMAC virtual address. */
3470	for (i = 0; i < 3; i++)
3471		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4,
3472		    eaddr[i * 2] | eaddr[i * 2 + 1] << 8);
3473
3474	/* Set Rx filter */
3475	sk_rxfilter_yukon(sc_if);
3476
3477	/* enable interrupt mask for counter overflows */
3478	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
3479	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
3480	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
3481
3482	/* Configure RX MAC FIFO Flush Mask */
3483	v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
3484	    YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
3485	    YU_RXSTAT_JABBER;
3486	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
3487
3488	/* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
3489	if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
3490		v = SK_TFCTL_OPERATION_ON;
3491	else
3492		v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
3493	/* Configure RX MAC FIFO */
3494	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
3495	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
3496
3497	/* Increase flush threshould to 64 bytes */
3498	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
3499	    SK_RFCTL_FIFO_THRESHOLD + 1);
3500
3501	/* Configure TX MAC FIFO */
3502	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
3503	SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
3504}
3505
3506/*
3507 * Note that to properly initialize any part of the GEnesis chip,
3508 * you first have to take it out of reset mode.
3509 */
3510static void
3511sk_init(xsc)
3512	void			*xsc;
3513{
3514	struct sk_if_softc	*sc_if = xsc;
3515
3516	SK_IF_LOCK(sc_if);
3517	sk_init_locked(sc_if);
3518	SK_IF_UNLOCK(sc_if);
3519
3520	return;
3521}
3522
3523static void
3524sk_init_locked(sc_if)
3525	struct sk_if_softc	*sc_if;
3526{
3527	struct sk_softc		*sc;
3528	struct ifnet		*ifp;
3529	struct mii_data		*mii;
3530	u_int16_t		reg;
3531	u_int32_t		imr;
3532	int			error;
3533
3534	SK_IF_LOCK_ASSERT(sc_if);
3535
3536	ifp = sc_if->sk_ifp;
3537	sc = sc_if->sk_softc;
3538	mii = device_get_softc(sc_if->sk_miibus);
3539
3540	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3541		return;
3542
3543	/* Cancel pending I/O and free all RX/TX buffers. */
3544	sk_stop(sc_if);
3545
3546	if (sc->sk_type == SK_GENESIS) {
3547		/* Configure LINK_SYNC LED */
3548		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
3549		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
3550			SK_LINKLED_LINKSYNC_ON);
3551
3552		/* Configure RX LED */
3553		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
3554			SK_RXLEDCTL_COUNTER_START);
3555
3556		/* Configure TX LED */
3557		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
3558			SK_TXLEDCTL_COUNTER_START);
3559	}
3560
3561	/*
3562	 * Configure descriptor poll timer
3563	 *
3564	 * SK-NET GENESIS data sheet says that possibility of losing Start
3565	 * transmit command due to CPU/cache related interim storage problems
3566	 * under certain conditions. The document recommends a polling
3567	 * mechanism to send a Start transmit command to initiate transfer
3568	 * of ready descriptors regulary. To cope with this issue sk(4) now
3569	 * enables descriptor poll timer to initiate descriptor processing
3570	 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
3571	 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
3572	 * command instead of waiting for next descriptor polling time.
3573	 * The same rule may apply to Rx side too but it seems that is not
3574	 * needed at the moment.
3575	 * Since sk(4) uses descriptor polling as a last resort there is no
3576	 * need to set smaller polling time than maximum allowable one.
3577	 */
3578	SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
3579
3580	/* Configure I2C registers */
3581
3582	/* Configure XMAC(s) */
3583	switch (sc->sk_type) {
3584	case SK_GENESIS:
3585		sk_init_xmac(sc_if);
3586		break;
3587	case SK_YUKON:
3588	case SK_YUKON_LITE:
3589	case SK_YUKON_LP:
3590		sk_init_yukon(sc_if);
3591		break;
3592	}
3593	mii_mediachg(mii);
3594
3595	if (sc->sk_type == SK_GENESIS) {
3596		/* Configure MAC FIFOs */
3597		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
3598		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
3599		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
3600
3601		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
3602		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
3603		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
3604	}
3605
3606	/* Configure transmit arbiter(s) */
3607	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
3608	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
3609
3610	/* Configure RAMbuffers */
3611	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
3612	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
3613	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
3614	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
3615	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
3616	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
3617
3618	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
3619	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
3620	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
3621	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
3622	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
3623	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
3624	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
3625
3626	/* Configure BMUs */
3627	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
3628	if (ifp->if_mtu > SK_MAX_FRAMELEN) {
3629		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3630		    SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3631		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3632		    SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0)));
3633	} else {
3634		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
3635		    SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0)));
3636		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
3637		    SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0)));
3638	}
3639
3640	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
3641	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
3642	    SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0)));
3643	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
3644	    SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0)));
3645
3646	/* Init descriptors */
3647	if (ifp->if_mtu > SK_MAX_FRAMELEN)
3648		error = sk_init_jumbo_rx_ring(sc_if);
3649	else
3650		error = sk_init_rx_ring(sc_if);
3651	if (error != 0) {
3652		device_printf(sc_if->sk_if_dev,
3653		    "initialization failed: no memory for rx buffers\n");
3654		sk_stop(sc_if);
3655		return;
3656	}
3657	sk_init_tx_ring(sc_if);
3658
3659	/* Set interrupt moderation if changed via sysctl. */
3660	imr = sk_win_read_4(sc, SK_IMTIMERINIT);
3661	if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) {
3662		sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod,
3663		    sc->sk_int_ticks));
3664		if (bootverbose)
3665			device_printf(sc_if->sk_if_dev,
3666			    "interrupt moderation is %d us.\n",
3667			    sc->sk_int_mod);
3668	}
3669
3670	/* Configure interrupt handling */
3671	CSR_READ_4(sc, SK_ISSR);
3672	if (sc_if->sk_port == SK_PORT_A)
3673		sc->sk_intrmask |= SK_INTRS1;
3674	else
3675		sc->sk_intrmask |= SK_INTRS2;
3676
3677	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
3678
3679	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3680
3681	/* Start BMUs. */
3682	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
3683
3684	switch(sc->sk_type) {
3685	case SK_GENESIS:
3686		/* Enable XMACs TX and RX state machines */
3687		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
3688		SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
3689		break;
3690	case SK_YUKON:
3691	case SK_YUKON_LITE:
3692	case SK_YUKON_LP:
3693		reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
3694		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
3695#if 0
3696		/* XXX disable 100Mbps and full duplex mode? */
3697		reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
3698#endif
3699		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
3700	}
3701
3702	/* Activate descriptor polling timer */
3703	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
3704	/* start transfer of Tx descriptors */
3705	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
3706
3707	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3708	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3709
3710	switch (sc->sk_type) {
3711	case SK_YUKON:
3712	case SK_YUKON_LITE:
3713	case SK_YUKON_LP:
3714		callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if);
3715		break;
3716	}
3717
3718	callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp);
3719
3720	return;
3721}
3722
3723static void
3724sk_stop(sc_if)
3725	struct sk_if_softc	*sc_if;
3726{
3727	int			i;
3728	struct sk_softc		*sc;
3729	struct sk_txdesc	*txd;
3730	struct sk_rxdesc	*rxd;
3731	struct sk_rxdesc	*jrxd;
3732	struct ifnet		*ifp;
3733	u_int32_t		val;
3734
3735	SK_IF_LOCK_ASSERT(sc_if);
3736	sc = sc_if->sk_softc;
3737	ifp = sc_if->sk_ifp;
3738
3739	callout_stop(&sc_if->sk_tick_ch);
3740	callout_stop(&sc_if->sk_watchdog_ch);
3741
3742	/* stop Tx descriptor polling timer */
3743	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
3744	/* stop transfer of Tx descriptors */
3745	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
3746	for (i = 0; i < SK_TIMEOUT; i++) {
3747		val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
3748		if ((val & SK_TXBMU_TX_STOP) == 0)
3749			break;
3750		DELAY(1);
3751	}
3752	if (i == SK_TIMEOUT)
3753		device_printf(sc_if->sk_if_dev,
3754		    "can not stop transfer of Tx descriptor\n");
3755	/* stop transfer of Rx descriptors */
3756	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
3757	for (i = 0; i < SK_TIMEOUT; i++) {
3758		val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
3759		if ((val & SK_RXBMU_RX_STOP) == 0)
3760			break;
3761		DELAY(1);
3762	}
3763	if (i == SK_TIMEOUT)
3764		device_printf(sc_if->sk_if_dev,
3765		    "can not stop transfer of Rx descriptor\n");
3766
3767	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
3768		/* Put PHY back into reset. */
3769		val = sk_win_read_4(sc, SK_GPIO);
3770		if (sc_if->sk_port == SK_PORT_A) {
3771			val |= SK_GPIO_DIR0;
3772			val &= ~SK_GPIO_DAT0;
3773		} else {
3774			val |= SK_GPIO_DIR2;
3775			val &= ~SK_GPIO_DAT2;
3776		}
3777		sk_win_write_4(sc, SK_GPIO, val);
3778	}
3779
3780	/* Turn off various components of this interface. */
3781	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
3782	switch (sc->sk_type) {
3783	case SK_GENESIS:
3784		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
3785		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
3786		break;
3787	case SK_YUKON:
3788	case SK_YUKON_LITE:
3789	case SK_YUKON_LP:
3790		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
3791		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
3792		break;
3793	}
3794	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
3795	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3796	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
3797	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
3798	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
3799	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3800	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
3801	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
3802	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
3803
3804	/* Disable interrupts */
3805	if (sc_if->sk_port == SK_PORT_A)
3806		sc->sk_intrmask &= ~SK_INTRS1;
3807	else
3808		sc->sk_intrmask &= ~SK_INTRS2;
3809	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
3810
3811	SK_XM_READ_2(sc_if, XM_ISR);
3812	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
3813
3814	/* Free RX and TX mbufs still in the queues. */
3815	for (i = 0; i < SK_RX_RING_CNT; i++) {
3816		rxd = &sc_if->sk_cdata.sk_rxdesc[i];
3817		if (rxd->rx_m != NULL) {
3818			bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag,
3819			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3820			bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag,
3821			    rxd->rx_dmamap);
3822			m_freem(rxd->rx_m);
3823			rxd->rx_m = NULL;
3824		}
3825	}
3826	for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) {
3827		jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i];
3828		if (jrxd->rx_m != NULL) {
3829			bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag,
3830			    jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3831			bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag,
3832			    jrxd->rx_dmamap);
3833			m_freem(jrxd->rx_m);
3834			jrxd->rx_m = NULL;
3835		}
3836	}
3837	for (i = 0; i < SK_TX_RING_CNT; i++) {
3838		txd = &sc_if->sk_cdata.sk_txdesc[i];
3839		if (txd->tx_m != NULL) {
3840			bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag,
3841			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3842			bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag,
3843			    txd->tx_dmamap);
3844			m_freem(txd->tx_m);
3845			txd->tx_m = NULL;
3846		}
3847	}
3848
3849	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
3850
3851	return;
3852}
3853
3854static int
3855sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3856{
3857	int error, value;
3858
3859	if (!arg1)
3860		return (EINVAL);
3861	value = *(int *)arg1;
3862	error = sysctl_handle_int(oidp, &value, 0, req);
3863	if (error || !req->newptr)
3864		return (error);
3865	if (value < low || value > high)
3866		return (EINVAL);
3867	*(int *)arg1 = value;
3868	return (0);
3869}
3870
3871static int
3872sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3873{
3874	return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
3875}
3876