if_sk.c revision 112928
1/*
2 * Copyright (c) 1997, 1998, 1999, 2000
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: head/sys/dev/sk/if_sk.c 112928 2003-04-01 08:10:21Z phk $
33 */
34
35/*
36 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
37 * the SK-984x series adapters, both single port and dual port.
38 * References:
39 * 	The XaQti XMAC II datasheet,
40 *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
41 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
42 *
43 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
44 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
45 * convenience to others until Vitesse corrects this problem:
46 *
47 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
48 *
49 * Written by Bill Paul <wpaul@ee.columbia.edu>
50 * Department of Electrical Engineering
51 * Columbia University, New York City
52 */
53
54/*
55 * The SysKonnect gigabit ethernet adapters consist of two main
56 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
57 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
58 * components and a PHY while the GEnesis controller provides a PCI
59 * interface with DMA support. Each card may have between 512K and
60 * 2MB of SRAM on board depending on the configuration.
61 *
62 * The SysKonnect GEnesis controller can have either one or two XMAC
63 * chips connected to it, allowing single or dual port NIC configurations.
64 * SysKonnect has the distinction of being the only vendor on the market
65 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
66 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
67 * XMAC registers. This driver takes advantage of these features to allow
68 * both XMACs to operate as independent interfaces.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/sockio.h>
74#include <sys/mbuf.h>
75#include <sys/malloc.h>
76#include <sys/kernel.h>
77#include <sys/socket.h>
78#include <sys/queue.h>
79
80#include <net/if.h>
81#include <net/if_arp.h>
82#include <net/ethernet.h>
83#include <net/if_dl.h>
84#include <net/if_media.h>
85
86#include <net/bpf.h>
87
88#include <vm/vm.h>              /* for vtophys */
89#include <vm/pmap.h>            /* for vtophys */
90#include <machine/bus_pio.h>
91#include <machine/bus_memio.h>
92#include <machine/bus.h>
93#include <machine/resource.h>
94#include <sys/bus.h>
95#include <sys/rman.h>
96
97#include <dev/mii/mii.h>
98#include <dev/mii/miivar.h>
99#include <dev/mii/brgphyreg.h>
100
101#include <pci/pcireg.h>
102#include <pci/pcivar.h>
103
104#define SK_USEIOSPACE
105
106#include <pci/if_skreg.h>
107#include <pci/xmaciireg.h>
108
109MODULE_DEPEND(sk, miibus, 1, 1, 1);
110
111/* "controller miibus0" required.  See GENERIC if you get errors here. */
112#include "miibus_if.h"
113
114#ifndef lint
115static const char rcsid[] =
116  "$FreeBSD: head/sys/dev/sk/if_sk.c 112928 2003-04-01 08:10:21Z phk $";
117#endif
118
119static struct sk_type sk_devs[] = {
120	{ SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" },
121	{ 0, 0, NULL }
122};
123
124static int sk_probe		(device_t);
125static int sk_attach		(device_t);
126static int sk_detach		(device_t);
127static int sk_detach_xmac	(device_t);
128static int sk_probe_xmac	(device_t);
129static int sk_attach_xmac	(device_t);
130static void sk_tick		(void *);
131static void sk_intr		(void *);
132static void sk_intr_xmac	(struct sk_if_softc *);
133static void sk_intr_bcom	(struct sk_if_softc *);
134static void sk_rxeof		(struct sk_if_softc *);
135static void sk_txeof		(struct sk_if_softc *);
136static int sk_encap		(struct sk_if_softc *, struct mbuf *,
137					u_int32_t *);
138static void sk_start		(struct ifnet *);
139static int sk_ioctl		(struct ifnet *, u_long, caddr_t);
140static void sk_init		(void *);
141static void sk_init_xmac	(struct sk_if_softc *);
142static void sk_stop		(struct sk_if_softc *);
143static void sk_watchdog		(struct ifnet *);
144static void sk_shutdown		(device_t);
145static int sk_ifmedia_upd	(struct ifnet *);
146static void sk_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
147static void sk_reset		(struct sk_softc *);
148static int sk_newbuf		(struct sk_if_softc *,
149					struct sk_chain *, struct mbuf *);
150static int sk_alloc_jumbo_mem	(struct sk_if_softc *);
151static void *sk_jalloc		(struct sk_if_softc *);
152static void sk_jfree		(void *, void *);
153static int sk_init_rx_ring	(struct sk_if_softc *);
154static void sk_init_tx_ring	(struct sk_if_softc *);
155static u_int32_t sk_win_read_4	(struct sk_softc *, int);
156static u_int16_t sk_win_read_2	(struct sk_softc *, int);
157static u_int8_t sk_win_read_1	(struct sk_softc *, int);
158static void sk_win_write_4	(struct sk_softc *, int, u_int32_t);
159static void sk_win_write_2	(struct sk_softc *, int, u_int32_t);
160static void sk_win_write_1	(struct sk_softc *, int, u_int32_t);
161static u_int8_t sk_vpd_readbyte	(struct sk_softc *, int);
162static void sk_vpd_read_res	(struct sk_softc *, struct vpd_res *, int);
163static void sk_vpd_read		(struct sk_softc *);
164
165static int sk_miibus_readreg	(device_t, int, int);
166static int sk_miibus_writereg	(device_t, int, int, int);
167static void sk_miibus_statchg	(device_t);
168
169static u_int32_t sk_calchash	(caddr_t);
170static void sk_setfilt		(struct sk_if_softc *, caddr_t, int);
171static void sk_setmulti		(struct sk_if_softc *);
172
173#ifdef SK_USEIOSPACE
174#define SK_RES		SYS_RES_IOPORT
175#define SK_RID		SK_PCI_LOIO
176#else
177#define SK_RES		SYS_RES_MEMORY
178#define SK_RID		SK_PCI_LOMEM
179#endif
180
181/*
182 * Note that we have newbus methods for both the GEnesis controller
183 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
184 * the miibus code is a child of the XMACs. We need to do it this way
185 * so that the miibus drivers can access the PHY registers on the
186 * right PHY. It's not quite what I had in mind, but it's the only
187 * design that achieves the desired effect.
188 */
189static device_method_t skc_methods[] = {
190	/* Device interface */
191	DEVMETHOD(device_probe,		sk_probe),
192	DEVMETHOD(device_attach,	sk_attach),
193	DEVMETHOD(device_detach,	sk_detach),
194	DEVMETHOD(device_shutdown,	sk_shutdown),
195
196	/* bus interface */
197	DEVMETHOD(bus_print_child,	bus_generic_print_child),
198	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
199
200	{ 0, 0 }
201};
202
203static driver_t skc_driver = {
204	"skc",
205	skc_methods,
206	sizeof(struct sk_softc)
207};
208
209static devclass_t skc_devclass;
210
211static device_method_t sk_methods[] = {
212	/* Device interface */
213	DEVMETHOD(device_probe,		sk_probe_xmac),
214	DEVMETHOD(device_attach,	sk_attach_xmac),
215	DEVMETHOD(device_detach,	sk_detach_xmac),
216	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
217
218	/* bus interface */
219	DEVMETHOD(bus_print_child,	bus_generic_print_child),
220	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
221
222	/* MII interface */
223	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
224	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
225	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
226
227	{ 0, 0 }
228};
229
230static driver_t sk_driver = {
231	"sk",
232	sk_methods,
233	sizeof(struct sk_if_softc)
234};
235
236static devclass_t sk_devclass;
237
238DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0);
239DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
240DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
241
242#define SK_SETBIT(sc, reg, x)		\
243	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
244
245#define SK_CLRBIT(sc, reg, x)		\
246	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
247
248#define SK_WIN_SETBIT_4(sc, reg, x)	\
249	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
250
251#define SK_WIN_CLRBIT_4(sc, reg, x)	\
252	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
253
254#define SK_WIN_SETBIT_2(sc, reg, x)	\
255	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
256
257#define SK_WIN_CLRBIT_2(sc, reg, x)	\
258	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
259
260static u_int32_t
261sk_win_read_4(sc, reg)
262	struct sk_softc		*sc;
263	int			reg;
264{
265	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
266	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
267}
268
269static u_int16_t
270sk_win_read_2(sc, reg)
271	struct sk_softc		*sc;
272	int			reg;
273{
274	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
275	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
276}
277
278static u_int8_t
279sk_win_read_1(sc, reg)
280	struct sk_softc		*sc;
281	int			reg;
282{
283	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
284	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
285}
286
287static void
288sk_win_write_4(sc, reg, val)
289	struct sk_softc		*sc;
290	int			reg;
291	u_int32_t		val;
292{
293	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
294	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
295	return;
296}
297
298static void
299sk_win_write_2(sc, reg, val)
300	struct sk_softc		*sc;
301	int			reg;
302	u_int32_t		val;
303{
304	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
305	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
306	return;
307}
308
309static void
310sk_win_write_1(sc, reg, val)
311	struct sk_softc		*sc;
312	int			reg;
313	u_int32_t		val;
314{
315	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
316	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
317	return;
318}
319
320/*
321 * The VPD EEPROM contains Vital Product Data, as suggested in
322 * the PCI 2.1 specification. The VPD data is separared into areas
323 * denoted by resource IDs. The SysKonnect VPD contains an ID string
324 * resource (the name of the adapter), a read-only area resource
325 * containing various key/data fields and a read/write area which
326 * can be used to store asset management information or log messages.
327 * We read the ID string and read-only into buffers attached to
328 * the controller softc structure for later use. At the moment,
329 * we only use the ID string during sk_attach().
330 */
331static u_int8_t
332sk_vpd_readbyte(sc, addr)
333	struct sk_softc		*sc;
334	int			addr;
335{
336	int			i;
337
338	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
339	for (i = 0; i < SK_TIMEOUT; i++) {
340		DELAY(1);
341		if (sk_win_read_2(sc,
342		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
343			break;
344	}
345
346	if (i == SK_TIMEOUT)
347		return(0);
348
349	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
350}
351
352static void
353sk_vpd_read_res(sc, res, addr)
354	struct sk_softc		*sc;
355	struct vpd_res		*res;
356	int			addr;
357{
358	int			i;
359	u_int8_t		*ptr;
360
361	ptr = (u_int8_t *)res;
362	for (i = 0; i < sizeof(struct vpd_res); i++)
363		ptr[i] = sk_vpd_readbyte(sc, i + addr);
364
365	return;
366}
367
368static void
369sk_vpd_read(sc)
370	struct sk_softc		*sc;
371{
372	int			pos = 0, i;
373	struct vpd_res		res;
374
375	if (sc->sk_vpd_prodname != NULL)
376		free(sc->sk_vpd_prodname, M_DEVBUF);
377	if (sc->sk_vpd_readonly != NULL)
378		free(sc->sk_vpd_readonly, M_DEVBUF);
379	sc->sk_vpd_prodname = NULL;
380	sc->sk_vpd_readonly = NULL;
381
382	sk_vpd_read_res(sc, &res, pos);
383
384	if (res.vr_id != VPD_RES_ID) {
385		printf("skc%d: bad VPD resource id: expected %x got %x\n",
386		    sc->sk_unit, VPD_RES_ID, res.vr_id);
387		return;
388	}
389
390	pos += sizeof(res);
391	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
392	for (i = 0; i < res.vr_len; i++)
393		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
394	sc->sk_vpd_prodname[i] = '\0';
395	pos += i;
396
397	sk_vpd_read_res(sc, &res, pos);
398
399	if (res.vr_id != VPD_RES_READ) {
400		printf("skc%d: bad VPD resource id: expected %x got %x\n",
401		    sc->sk_unit, VPD_RES_READ, res.vr_id);
402		return;
403	}
404
405	pos += sizeof(res);
406	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
407	for (i = 0; i < res.vr_len + 1; i++)
408		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
409
410	return;
411}
412
413static int
414sk_miibus_readreg(dev, phy, reg)
415	device_t		dev;
416	int			phy, reg;
417{
418	struct sk_if_softc	*sc_if;
419	int			i;
420
421	sc_if = device_get_softc(dev);
422
423	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
424		return(0);
425
426	SK_IF_LOCK(sc_if);
427
428	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
429	SK_XM_READ_2(sc_if, XM_PHY_DATA);
430	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
431		for (i = 0; i < SK_TIMEOUT; i++) {
432			DELAY(1);
433			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
434			    XM_MMUCMD_PHYDATARDY)
435				break;
436		}
437
438		if (i == SK_TIMEOUT) {
439			printf("sk%d: phy failed to come ready\n",
440			    sc_if->sk_unit);
441			return(0);
442		}
443	}
444	DELAY(1);
445	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
446	SK_IF_UNLOCK(sc_if);
447	return(i);
448}
449
450static int
451sk_miibus_writereg(dev, phy, reg, val)
452	device_t		dev;
453	int			phy, reg, val;
454{
455	struct sk_if_softc	*sc_if;
456	int			i;
457
458	sc_if = device_get_softc(dev);
459	SK_IF_LOCK(sc_if);
460
461	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
462	for (i = 0; i < SK_TIMEOUT; i++) {
463		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
464			break;
465	}
466
467	if (i == SK_TIMEOUT) {
468		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
469		return(ETIMEDOUT);
470	}
471
472	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
473	for (i = 0; i < SK_TIMEOUT; i++) {
474		DELAY(1);
475		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
476			break;
477	}
478
479	SK_IF_UNLOCK(sc_if);
480
481	if (i == SK_TIMEOUT)
482		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
483
484	return(0);
485}
486
487static void
488sk_miibus_statchg(dev)
489	device_t		dev;
490{
491	struct sk_if_softc	*sc_if;
492	struct mii_data		*mii;
493
494	sc_if = device_get_softc(dev);
495	mii = device_get_softc(sc_if->sk_miibus);
496	SK_IF_LOCK(sc_if);
497	/*
498	 * If this is a GMII PHY, manually set the XMAC's
499	 * duplex mode accordingly.
500	 */
501	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
502		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
503			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
504		} else {
505			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
506		}
507	}
508	SK_IF_UNLOCK(sc_if);
509
510	return;
511}
512
513#define SK_POLY		0xEDB88320
514#define SK_BITS		6
515
516static u_int32_t
517sk_calchash(addr)
518	caddr_t			addr;
519{
520	u_int32_t		idx, bit, data, crc;
521
522	/* Compute CRC for the address value. */
523	crc = 0xFFFFFFFF; /* initial value */
524
525	for (idx = 0; idx < 6; idx++) {
526		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
527			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
528	}
529
530	return (~crc & ((1 << SK_BITS) - 1));
531}
532
533static void
534sk_setfilt(sc_if, addr, slot)
535	struct sk_if_softc	*sc_if;
536	caddr_t			addr;
537	int			slot;
538{
539	int			base;
540
541	base = XM_RXFILT_ENTRY(slot);
542
543	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
544	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
545	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
546
547	return;
548}
549
550static void
551sk_setmulti(sc_if)
552	struct sk_if_softc	*sc_if;
553{
554	struct ifnet		*ifp;
555	u_int32_t		hashes[2] = { 0, 0 };
556	int			h, i;
557	struct ifmultiaddr	*ifma;
558	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
559
560	ifp = &sc_if->arpcom.ac_if;
561
562	/* First, zot all the existing filters. */
563	for (i = 1; i < XM_RXFILT_MAX; i++)
564		sk_setfilt(sc_if, (caddr_t)&dummy, i);
565	SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
566	SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
567
568	/* Now program new ones. */
569	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
570		hashes[0] = 0xFFFFFFFF;
571		hashes[1] = 0xFFFFFFFF;
572	} else {
573		i = 1;
574		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
575			if (ifma->ifma_addr->sa_family != AF_LINK)
576				continue;
577			/*
578			 * Program the first XM_RXFILT_MAX multicast groups
579			 * into the perfect filter. For all others,
580			 * use the hash table.
581			 */
582			if (i < XM_RXFILT_MAX) {
583				sk_setfilt(sc_if,
584			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
585				i++;
586				continue;
587			}
588
589			h = sk_calchash(
590				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
591			if (h < 32)
592				hashes[0] |= (1 << h);
593			else
594				hashes[1] |= (1 << (h - 32));
595		}
596	}
597
598	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
599	    XM_MODE_RX_USE_PERFECT);
600	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
601	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
602
603	return;
604}
605
606static int
607sk_init_rx_ring(sc_if)
608	struct sk_if_softc	*sc_if;
609{
610	struct sk_chain_data	*cd;
611	struct sk_ring_data	*rd;
612	int			i;
613
614	cd = &sc_if->sk_cdata;
615	rd = sc_if->sk_rdata;
616
617	bzero((char *)rd->sk_rx_ring,
618	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
619
620	for (i = 0; i < SK_RX_RING_CNT; i++) {
621		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
622		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
623			return(ENOBUFS);
624		if (i == (SK_RX_RING_CNT - 1)) {
625			cd->sk_rx_chain[i].sk_next =
626			    &cd->sk_rx_chain[0];
627			rd->sk_rx_ring[i].sk_next =
628			    vtophys(&rd->sk_rx_ring[0]);
629		} else {
630			cd->sk_rx_chain[i].sk_next =
631			    &cd->sk_rx_chain[i + 1];
632			rd->sk_rx_ring[i].sk_next =
633			    vtophys(&rd->sk_rx_ring[i + 1]);
634		}
635	}
636
637	sc_if->sk_cdata.sk_rx_prod = 0;
638	sc_if->sk_cdata.sk_rx_cons = 0;
639
640	return(0);
641}
642
643static void
644sk_init_tx_ring(sc_if)
645	struct sk_if_softc	*sc_if;
646{
647	struct sk_chain_data	*cd;
648	struct sk_ring_data	*rd;
649	int			i;
650
651	cd = &sc_if->sk_cdata;
652	rd = sc_if->sk_rdata;
653
654	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
655	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
656
657	for (i = 0; i < SK_TX_RING_CNT; i++) {
658		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
659		if (i == (SK_TX_RING_CNT - 1)) {
660			cd->sk_tx_chain[i].sk_next =
661			    &cd->sk_tx_chain[0];
662			rd->sk_tx_ring[i].sk_next =
663			    vtophys(&rd->sk_tx_ring[0]);
664		} else {
665			cd->sk_tx_chain[i].sk_next =
666			    &cd->sk_tx_chain[i + 1];
667			rd->sk_tx_ring[i].sk_next =
668			    vtophys(&rd->sk_tx_ring[i + 1]);
669		}
670	}
671
672	sc_if->sk_cdata.sk_tx_prod = 0;
673	sc_if->sk_cdata.sk_tx_cons = 0;
674	sc_if->sk_cdata.sk_tx_cnt = 0;
675
676	return;
677}
678
679static int
680sk_newbuf(sc_if, c, m)
681	struct sk_if_softc	*sc_if;
682	struct sk_chain		*c;
683	struct mbuf		*m;
684{
685	struct mbuf		*m_new = NULL;
686	struct sk_rx_desc	*r;
687
688	if (m == NULL) {
689		caddr_t			*buf = NULL;
690
691		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
692		if (m_new == NULL)
693			return(ENOBUFS);
694
695		/* Allocate the jumbo buffer */
696		buf = sk_jalloc(sc_if);
697		if (buf == NULL) {
698			m_freem(m_new);
699#ifdef SK_VERBOSE
700			printf("sk%d: jumbo allocation failed "
701			    "-- packet dropped!\n", sc_if->sk_unit);
702#endif
703			return(ENOBUFS);
704		}
705
706		/* Attach the buffer to the mbuf */
707		MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
708		    (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
709		m_new->m_data = (void *)buf;
710		m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
711	} else {
712		/*
713	 	 * We're re-using a previously allocated mbuf;
714		 * be sure to re-init pointers and lengths to
715		 * default values.
716		 */
717		m_new = m;
718		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
719		m_new->m_data = m_new->m_ext.ext_buf;
720	}
721
722	/*
723	 * Adjust alignment so packet payload begins on a
724	 * longword boundary. Mandatory for Alpha, useful on
725	 * x86 too.
726	 */
727	m_adj(m_new, ETHER_ALIGN);
728
729	r = c->sk_desc;
730	c->sk_mbuf = m_new;
731	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
732	r->sk_ctl = m_new->m_len | SK_RXSTAT;
733
734	return(0);
735}
736
737/*
738 * Allocate jumbo buffer storage. The SysKonnect adapters support
739 * "jumbograms" (9K frames), although SysKonnect doesn't currently
740 * use them in their drivers. In order for us to use them, we need
741 * large 9K receive buffers, however standard mbuf clusters are only
742 * 2048 bytes in size. Consequently, we need to allocate and manage
743 * our own jumbo buffer pool. Fortunately, this does not require an
744 * excessive amount of additional code.
745 */
746static int
747sk_alloc_jumbo_mem(sc_if)
748	struct sk_if_softc	*sc_if;
749{
750	caddr_t			ptr;
751	register int		i;
752	struct sk_jpool_entry   *entry;
753
754	/* Grab a big chunk o' storage. */
755	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
756	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
757
758	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
759		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
760		return(ENOBUFS);
761	}
762
763	SLIST_INIT(&sc_if->sk_jfree_listhead);
764	SLIST_INIT(&sc_if->sk_jinuse_listhead);
765
766	/*
767	 * Now divide it up into 9K pieces and save the addresses
768	 * in an array.
769	 */
770	ptr = sc_if->sk_cdata.sk_jumbo_buf;
771	for (i = 0; i < SK_JSLOTS; i++) {
772		sc_if->sk_cdata.sk_jslots[i] = ptr;
773		ptr += SK_JLEN;
774		entry = malloc(sizeof(struct sk_jpool_entry),
775		    M_DEVBUF, M_NOWAIT);
776		if (entry == NULL) {
777			free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
778			sc_if->sk_cdata.sk_jumbo_buf = NULL;
779			printf("sk%d: no memory for jumbo "
780			    "buffer queue!\n", sc_if->sk_unit);
781			return(ENOBUFS);
782		}
783		entry->slot = i;
784		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
785		    entry, jpool_entries);
786	}
787
788	return(0);
789}
790
791/*
792 * Allocate a jumbo buffer.
793 */
794static void *
795sk_jalloc(sc_if)
796	struct sk_if_softc	*sc_if;
797{
798	struct sk_jpool_entry   *entry;
799
800	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
801
802	if (entry == NULL) {
803#ifdef SK_VERBOSE
804		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
805#endif
806		return(NULL);
807	}
808
809	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
810	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
811	return(sc_if->sk_cdata.sk_jslots[entry->slot]);
812}
813
814/*
815 * Release a jumbo buffer.
816 */
817static void
818sk_jfree(buf, args)
819	void			*buf;
820	void			*args;
821{
822	struct sk_if_softc	*sc_if;
823	int		        i;
824	struct sk_jpool_entry   *entry;
825
826	/* Extract the softc struct pointer. */
827	sc_if = (struct sk_if_softc *)args;
828
829	if (sc_if == NULL)
830		panic("sk_jfree: didn't get softc pointer!");
831
832	/* calculate the slot this buffer belongs to */
833	i = ((vm_offset_t)buf
834	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
835
836	if ((i < 0) || (i >= SK_JSLOTS))
837		panic("sk_jfree: asked to free buffer that we don't manage!");
838
839	entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
840	if (entry == NULL)
841		panic("sk_jfree: buffer not in use!");
842	entry->slot = i;
843	SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
844	SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
845
846	return;
847}
848
849/*
850 * Set media options.
851 */
852static int
853sk_ifmedia_upd(ifp)
854	struct ifnet		*ifp;
855{
856	struct sk_if_softc	*sc_if;
857	struct mii_data		*mii;
858
859	sc_if = ifp->if_softc;
860	mii = device_get_softc(sc_if->sk_miibus);
861	sk_init(sc_if);
862	mii_mediachg(mii);
863
864	return(0);
865}
866
867/*
868 * Report current media status.
869 */
870static void
871sk_ifmedia_sts(ifp, ifmr)
872	struct ifnet		*ifp;
873	struct ifmediareq	*ifmr;
874{
875	struct sk_if_softc	*sc_if;
876	struct mii_data		*mii;
877
878	sc_if = ifp->if_softc;
879	mii = device_get_softc(sc_if->sk_miibus);
880
881	mii_pollstat(mii);
882	ifmr->ifm_active = mii->mii_media_active;
883	ifmr->ifm_status = mii->mii_media_status;
884
885	return;
886}
887
888static int
889sk_ioctl(ifp, command, data)
890	struct ifnet		*ifp;
891	u_long			command;
892	caddr_t			data;
893{
894	struct sk_if_softc	*sc_if = ifp->if_softc;
895	struct ifreq		*ifr = (struct ifreq *) data;
896	int			error = 0;
897	struct mii_data		*mii;
898
899	SK_IF_LOCK(sc_if);
900
901	switch(command) {
902	case SIOCSIFMTU:
903		if (ifr->ifr_mtu > SK_JUMBO_MTU)
904			error = EINVAL;
905		else {
906			ifp->if_mtu = ifr->ifr_mtu;
907			sk_init(sc_if);
908		}
909		break;
910	case SIOCSIFFLAGS:
911		if (ifp->if_flags & IFF_UP) {
912			if (ifp->if_flags & IFF_RUNNING &&
913			    ifp->if_flags & IFF_PROMISC &&
914			    !(sc_if->sk_if_flags & IFF_PROMISC)) {
915				SK_XM_SETBIT_4(sc_if, XM_MODE,
916				    XM_MODE_RX_PROMISC);
917				sk_setmulti(sc_if);
918			} else if (ifp->if_flags & IFF_RUNNING &&
919			    !(ifp->if_flags & IFF_PROMISC) &&
920			    sc_if->sk_if_flags & IFF_PROMISC) {
921				SK_XM_CLRBIT_4(sc_if, XM_MODE,
922				    XM_MODE_RX_PROMISC);
923				sk_setmulti(sc_if);
924			} else
925				sk_init(sc_if);
926		} else {
927			if (ifp->if_flags & IFF_RUNNING)
928				sk_stop(sc_if);
929		}
930		sc_if->sk_if_flags = ifp->if_flags;
931		error = 0;
932		break;
933	case SIOCADDMULTI:
934	case SIOCDELMULTI:
935		sk_setmulti(sc_if);
936		error = 0;
937		break;
938	case SIOCGIFMEDIA:
939	case SIOCSIFMEDIA:
940		mii = device_get_softc(sc_if->sk_miibus);
941		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
942		break;
943	default:
944		error = ether_ioctl(ifp, command, data);
945		break;
946	}
947
948	SK_IF_UNLOCK(sc_if);
949
950	return(error);
951}
952
953/*
954 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
955 * IDs against our list and return a device name if we find a match.
956 */
957static int
958sk_probe(dev)
959	device_t		dev;
960{
961	struct sk_type		*t;
962
963	t = sk_devs;
964
965	while(t->sk_name != NULL) {
966		if ((pci_get_vendor(dev) == t->sk_vid) &&
967		    (pci_get_device(dev) == t->sk_did)) {
968			device_set_desc(dev, t->sk_name);
969			return(0);
970		}
971		t++;
972	}
973
974	return(ENXIO);
975}
976
977/*
978 * Force the GEnesis into reset, then bring it out of reset.
979 */
980static void
981sk_reset(sc)
982	struct sk_softc		*sc;
983{
984	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
985	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
986	DELAY(1000);
987	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
988	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
989
990	/* Configure packet arbiter */
991	sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
992	sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
993	sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
994	sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
995	sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
996
997	/* Enable RAM interface */
998	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
999
1000	/*
1001         * Configure interrupt moderation. The moderation timer
1002	 * defers interrupts specified in the interrupt moderation
1003	 * timer mask based on the timeout specified in the interrupt
1004	 * moderation timer init register. Each bit in the timer
1005	 * register represents 18.825ns, so to specify a timeout in
1006	 * microseconds, we have to multiply by 54.
1007	 */
1008	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
1009	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1010	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1011	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1012
1013	return;
1014}
1015
1016static int
1017sk_probe_xmac(dev)
1018	device_t		dev;
1019{
1020	/*
1021	 * Not much to do here. We always know there will be
1022	 * at least one XMAC present, and if there are two,
1023	 * sk_attach() will create a second device instance
1024	 * for us.
1025	 */
1026	device_set_desc(dev, "XaQti Corp. XMAC II");
1027
1028	return(0);
1029}
1030
1031/*
1032 * Each XMAC chip is attached as a separate logical IP interface.
1033 * Single port cards will have only one logical interface of course.
1034 */
1035static int
1036sk_attach_xmac(dev)
1037	device_t		dev;
1038{
1039	struct sk_softc		*sc;
1040	struct sk_if_softc	*sc_if;
1041	struct ifnet		*ifp;
1042	int			i, port, error;
1043
1044	if (dev == NULL)
1045		return(EINVAL);
1046
1047	error = 0;
1048	sc_if = device_get_softc(dev);
1049	sc = device_get_softc(device_get_parent(dev));
1050	SK_LOCK(sc);
1051	port = *(int *)device_get_ivars(dev);
1052	free(device_get_ivars(dev), M_DEVBUF);
1053	device_set_ivars(dev, NULL);
1054
1055	sc_if->sk_dev = dev;
1056	sc_if->sk_unit = device_get_unit(dev);
1057	sc_if->sk_port = port;
1058	sc_if->sk_softc = sc;
1059	sc->sk_if[port] = sc_if;
1060	if (port == SK_PORT_A)
1061		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1062	if (port == SK_PORT_B)
1063		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1064
1065	/*
1066	 * Get station address for this interface. Note that
1067	 * dual port cards actually come with three station
1068	 * addresses: one for each port, plus an extra. The
1069	 * extra one is used by the SysKonnect driver software
1070	 * as a 'virtual' station address for when both ports
1071	 * are operating in failover mode. Currently we don't
1072	 * use this extra address.
1073	 */
1074	for (i = 0; i < ETHER_ADDR_LEN; i++)
1075		sc_if->arpcom.ac_enaddr[i] =
1076		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1077
1078	printf("sk%d: Ethernet address: %6D\n",
1079	    sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":");
1080
1081	/*
1082	 * Set up RAM buffer addresses. The NIC will have a certain
1083	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1084	 * need to divide this up a) between the transmitter and
1085 	 * receiver and b) between the two XMACs, if this is a
1086	 * dual port NIC. Our algotithm is to divide up the memory
1087	 * evenly so that everyone gets a fair share.
1088	 */
1089	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1090		u_int32_t		chunk, val;
1091
1092		chunk = sc->sk_ramsize / 2;
1093		val = sc->sk_rboff / sizeof(u_int64_t);
1094		sc_if->sk_rx_ramstart = val;
1095		val += (chunk / sizeof(u_int64_t));
1096		sc_if->sk_rx_ramend = val - 1;
1097		sc_if->sk_tx_ramstart = val;
1098		val += (chunk / sizeof(u_int64_t));
1099		sc_if->sk_tx_ramend = val - 1;
1100	} else {
1101		u_int32_t		chunk, val;
1102
1103		chunk = sc->sk_ramsize / 4;
1104		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1105		    sizeof(u_int64_t);
1106		sc_if->sk_rx_ramstart = val;
1107		val += (chunk / sizeof(u_int64_t));
1108		sc_if->sk_rx_ramend = val - 1;
1109		sc_if->sk_tx_ramstart = val;
1110		val += (chunk / sizeof(u_int64_t));
1111		sc_if->sk_tx_ramend = val - 1;
1112	}
1113
1114	/* Read and save PHY type and set PHY address */
1115	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1116	switch(sc_if->sk_phytype) {
1117	case SK_PHYTYPE_XMAC:
1118		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1119		break;
1120	case SK_PHYTYPE_BCOM:
1121		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1122		break;
1123	default:
1124		printf("skc%d: unsupported PHY type: %d\n",
1125		    sc->sk_unit, sc_if->sk_phytype);
1126		error = ENODEV;
1127		goto fail_xmac;
1128	}
1129
1130	/* Allocate the descriptor queues. */
1131	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1132	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1133
1134	if (sc_if->sk_rdata == NULL) {
1135		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1136		error = ENOMEM;
1137		goto fail_xmac;
1138	}
1139
1140	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1141
1142	/* Try to allocate memory for jumbo buffers. */
1143	if (sk_alloc_jumbo_mem(sc_if)) {
1144		printf("sk%d: jumbo buffer allocation failed\n",
1145		    sc_if->sk_unit);
1146		error = ENOMEM;
1147		goto fail_xmac;
1148	}
1149
1150	ifp = &sc_if->arpcom.ac_if;
1151	ifp->if_softc = sc_if;
1152	ifp->if_unit = sc_if->sk_unit;
1153	ifp->if_name = "sk";
1154	ifp->if_mtu = ETHERMTU;
1155	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1156	ifp->if_ioctl = sk_ioctl;
1157	ifp->if_output = ether_output;
1158	ifp->if_start = sk_start;
1159	ifp->if_watchdog = sk_watchdog;
1160	ifp->if_init = sk_init;
1161	ifp->if_baudrate = 1000000000;
1162	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1163
1164	callout_handle_init(&sc_if->sk_tick_ch);
1165
1166	/*
1167	 * Call MI attach routine.
1168	 */
1169	ether_ifattach(ifp, sc_if->arpcom.ac_enaddr);
1170
1171	/*
1172	 * Do miibus setup.
1173	 */
1174	sk_init_xmac(sc_if);
1175	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1176	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1177		printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1178		error = ENXIO;
1179		goto fail_xmac;
1180	}
1181
1182fail_xmac:
1183	SK_UNLOCK(sc);
1184	if (error) {
1185		/* Access should be ok even though lock has been dropped */
1186		sc->sk_if[port] = NULL;
1187		sk_detach_xmac(dev);
1188	}
1189
1190	return(error);
1191}
1192
1193/*
1194 * Attach the interface. Allocate softc structures, do ifmedia
1195 * setup and ethernet/BPF attach.
1196 */
1197static int
1198sk_attach(dev)
1199	device_t		dev;
1200{
1201	u_int32_t		command;
1202	struct sk_softc		*sc;
1203	int			unit, error = 0, rid, *port;
1204
1205	sc = device_get_softc(dev);
1206	unit = device_get_unit(dev);
1207
1208	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1209	    MTX_DEF | MTX_RECURSE);
1210
1211	/*
1212	 * Handle power management nonsense.
1213	 */
1214	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1215		u_int32_t		iobase, membase, irq;
1216
1217		/* Save important PCI config data. */
1218		iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1219		membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1220		irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1221
1222		/* Reset the power state. */
1223		printf("skc%d: chip is in D%d power mode "
1224		    "-- setting to D0\n", unit,
1225		    pci_get_powerstate(dev));
1226		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1227
1228		/* Restore PCI config data. */
1229		pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1230		pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1231		pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1232	}
1233
1234	/*
1235	 * Map control/status registers.
1236	 */
1237	pci_enable_busmaster(dev);
1238	pci_enable_io(dev, SYS_RES_IOPORT);
1239	pci_enable_io(dev, SYS_RES_MEMORY);
1240	command = pci_read_config(dev, PCIR_COMMAND, 4);
1241
1242#ifdef SK_USEIOSPACE
1243	if (!(command & PCIM_CMD_PORTEN)) {
1244		printf("skc%d: failed to enable I/O ports!\n", unit);
1245		error = ENXIO;
1246		goto fail;
1247	}
1248#else
1249	if (!(command & PCIM_CMD_MEMEN)) {
1250		printf("skc%d: failed to enable memory mapping!\n", unit);
1251		error = ENXIO;
1252		goto fail;
1253	}
1254#endif
1255
1256	rid = SK_RID;
1257	sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid,
1258	    0, ~0, 1, RF_ACTIVE);
1259
1260	if (sc->sk_res == NULL) {
1261		printf("sk%d: couldn't map ports/memory\n", unit);
1262		error = ENXIO;
1263		goto fail;
1264	}
1265
1266	sc->sk_btag = rman_get_bustag(sc->sk_res);
1267	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1268
1269	/* Allocate interrupt */
1270	rid = 0;
1271	sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1272	    RF_SHAREABLE | RF_ACTIVE);
1273
1274	if (sc->sk_irq == NULL) {
1275		printf("skc%d: couldn't map interrupt\n", unit);
1276		error = ENXIO;
1277		goto fail;
1278	}
1279
1280	/* Reset the adapter. */
1281	sk_reset(sc);
1282
1283	sc->sk_unit = unit;
1284
1285	/* Read and save vital product data from EEPROM. */
1286	sk_vpd_read(sc);
1287
1288	/* Read and save RAM size and RAMbuffer offset */
1289	switch(sk_win_read_1(sc, SK_EPROM0)) {
1290	case SK_RAMSIZE_512K_64:
1291		sc->sk_ramsize = 0x80000;
1292		sc->sk_rboff = SK_RBOFF_0;
1293		break;
1294	case SK_RAMSIZE_1024K_64:
1295		sc->sk_ramsize = 0x100000;
1296		sc->sk_rboff = SK_RBOFF_80000;
1297		break;
1298	case SK_RAMSIZE_1024K_128:
1299		sc->sk_ramsize = 0x100000;
1300		sc->sk_rboff = SK_RBOFF_0;
1301		break;
1302	case SK_RAMSIZE_2048K_128:
1303		sc->sk_ramsize = 0x200000;
1304		sc->sk_rboff = SK_RBOFF_0;
1305		break;
1306	default:
1307		printf("skc%d: unknown ram size: %d\n",
1308		    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1309		error = ENXIO;
1310		goto fail;
1311	}
1312
1313	/* Read and save physical media type */
1314	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1315	case SK_PMD_1000BASESX:
1316		sc->sk_pmd = IFM_1000_SX;
1317		break;
1318	case SK_PMD_1000BASELX:
1319		sc->sk_pmd = IFM_1000_LX;
1320		break;
1321	case SK_PMD_1000BASECX:
1322		sc->sk_pmd = IFM_1000_CX;
1323		break;
1324	case SK_PMD_1000BASETX:
1325		sc->sk_pmd = IFM_1000_T;
1326		break;
1327	default:
1328		printf("skc%d: unknown media type: 0x%x\n",
1329		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1330		error = ENXIO;
1331		goto fail;
1332	}
1333
1334	/* Announce the product name. */
1335	printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1336	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1337	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1338	*port = SK_PORT_A;
1339	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1340
1341	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1342		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1343		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1344		*port = SK_PORT_B;
1345		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1346	}
1347
1348	/* Turn on the 'driver is loaded' LED. */
1349	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1350
1351	bus_generic_attach(dev);
1352
1353	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1354	    sk_intr, sc, &sc->sk_intrhand);
1355
1356	if (error) {
1357		printf("skc%d: couldn't set up irq\n", unit);
1358		goto fail;
1359	}
1360
1361fail:
1362	if (error)
1363		sk_detach(dev);
1364
1365	return(error);
1366}
1367
1368static int
1369sk_detach_xmac(dev)
1370	device_t		dev;
1371{
1372	struct sk_softc		*sc;
1373	struct sk_if_softc	*sc_if;
1374	struct ifnet		*ifp;
1375
1376	sc = device_get_softc(device_get_parent(dev));
1377	sc_if = device_get_softc(dev);
1378	KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1379	    ("sk mutex not initialized in sk_detach_xmac"));
1380	SK_IF_LOCK(sc_if);
1381
1382	ifp = &sc_if->arpcom.ac_if;
1383	if (device_is_alive(dev)) {
1384		if (bus_child_present(dev))
1385			sk_stop(sc_if);
1386		ether_ifdetach(ifp);
1387		device_delete_child(dev, sc_if->sk_miibus);
1388		bus_generic_detach(dev);
1389	}
1390	if (sc_if->sk_cdata.sk_jumbo_buf)
1391		contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1392	if (sc_if->sk_rdata) {
1393		contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
1394		    M_DEVBUF);
1395	}
1396	SK_IF_UNLOCK(sc_if);
1397
1398	return(0);
1399}
1400
1401static int
1402sk_detach(dev)
1403	device_t		dev;
1404{
1405	struct sk_softc		*sc;
1406
1407	sc = device_get_softc(dev);
1408	KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1409	SK_LOCK(sc);
1410
1411	if (device_is_alive(dev)) {
1412		if (sc->sk_devs[SK_PORT_A] != NULL)
1413			device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1414		if (sc->sk_devs[SK_PORT_B] != NULL)
1415			device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1416		bus_generic_detach(dev);
1417	}
1418
1419	if (sc->sk_intrhand)
1420		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1421	if (sc->sk_irq)
1422		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1423	if (sc->sk_res)
1424		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1425
1426	SK_UNLOCK(sc);
1427	mtx_destroy(&sc->sk_mtx);
1428
1429	return(0);
1430}
1431
1432static int
1433sk_encap(sc_if, m_head, txidx)
1434        struct sk_if_softc	*sc_if;
1435        struct mbuf		*m_head;
1436        u_int32_t		*txidx;
1437{
1438	struct sk_tx_desc	*f = NULL;
1439	struct mbuf		*m;
1440	u_int32_t		frag, cur, cnt = 0;
1441
1442	m = m_head;
1443	cur = frag = *txidx;
1444
1445	/*
1446	 * Start packing the mbufs in this chain into
1447	 * the fragment pointers. Stop when we run out
1448	 * of fragments or hit the end of the mbuf chain.
1449	 */
1450	for (m = m_head; m != NULL; m = m->m_next) {
1451		if (m->m_len != 0) {
1452			if ((SK_TX_RING_CNT -
1453			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1454				return(ENOBUFS);
1455			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1456			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1457			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1458			if (cnt == 0)
1459				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1460			else
1461				f->sk_ctl |= SK_TXCTL_OWN;
1462			cur = frag;
1463			SK_INC(frag, SK_TX_RING_CNT);
1464			cnt++;
1465		}
1466	}
1467
1468	if (m != NULL)
1469		return(ENOBUFS);
1470
1471	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1472		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1473	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1474	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1475	sc_if->sk_cdata.sk_tx_cnt += cnt;
1476
1477	*txidx = frag;
1478
1479	return(0);
1480}
1481
1482static void
1483sk_start(ifp)
1484	struct ifnet		*ifp;
1485{
1486        struct sk_softc		*sc;
1487        struct sk_if_softc	*sc_if;
1488        struct mbuf		*m_head = NULL;
1489        u_int32_t		idx;
1490
1491	sc_if = ifp->if_softc;
1492	sc = sc_if->sk_softc;
1493
1494	SK_IF_LOCK(sc_if);
1495
1496	idx = sc_if->sk_cdata.sk_tx_prod;
1497
1498	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1499		IF_DEQUEUE(&ifp->if_snd, m_head);
1500		if (m_head == NULL)
1501			break;
1502
1503		/*
1504		 * Pack the data into the transmit ring. If we
1505		 * don't have room, set the OACTIVE flag and wait
1506		 * for the NIC to drain the ring.
1507		 */
1508		if (sk_encap(sc_if, m_head, &idx)) {
1509			IF_PREPEND(&ifp->if_snd, m_head);
1510			ifp->if_flags |= IFF_OACTIVE;
1511			break;
1512		}
1513
1514		/*
1515		 * If there's a BPF listener, bounce a copy of this frame
1516		 * to him.
1517		 */
1518		BPF_MTAP(ifp, m_head);
1519	}
1520
1521	/* Transmit */
1522	sc_if->sk_cdata.sk_tx_prod = idx;
1523	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1524
1525	/* Set a timeout in case the chip goes out to lunch. */
1526	ifp->if_timer = 5;
1527	SK_IF_UNLOCK(sc_if);
1528
1529	return;
1530}
1531
1532
1533static void
1534sk_watchdog(ifp)
1535	struct ifnet		*ifp;
1536{
1537	struct sk_if_softc	*sc_if;
1538
1539	sc_if = ifp->if_softc;
1540
1541	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1542	sk_init(sc_if);
1543
1544	return;
1545}
1546
1547static void
1548sk_shutdown(dev)
1549	device_t		dev;
1550{
1551	struct sk_softc		*sc;
1552
1553	sc = device_get_softc(dev);
1554	SK_LOCK(sc);
1555
1556	/* Turn off the 'driver is loaded' LED. */
1557	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1558
1559	/*
1560	 * Reset the GEnesis controller. Doing this should also
1561	 * assert the resets on the attached XMAC(s).
1562	 */
1563	sk_reset(sc);
1564	SK_UNLOCK(sc);
1565
1566	return;
1567}
1568
1569static void
1570sk_rxeof(sc_if)
1571	struct sk_if_softc	*sc_if;
1572{
1573	struct mbuf		*m;
1574	struct ifnet		*ifp;
1575	struct sk_chain		*cur_rx;
1576	int			total_len = 0;
1577	int			i;
1578	u_int32_t		rxstat;
1579
1580	ifp = &sc_if->arpcom.ac_if;
1581	i = sc_if->sk_cdata.sk_rx_prod;
1582	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1583
1584	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1585
1586		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1587		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1588		m = cur_rx->sk_mbuf;
1589		cur_rx->sk_mbuf = NULL;
1590		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1591		SK_INC(i, SK_RX_RING_CNT);
1592
1593		if (rxstat & XM_RXSTAT_ERRFRAME) {
1594			ifp->if_ierrors++;
1595			sk_newbuf(sc_if, cur_rx, m);
1596			continue;
1597		}
1598
1599		/*
1600		 * Try to allocate a new jumbo buffer. If that
1601		 * fails, copy the packet to mbufs and put the
1602		 * jumbo buffer back in the ring so it can be
1603		 * re-used. If allocating mbufs fails, then we
1604		 * have to drop the packet.
1605		 */
1606		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1607			struct mbuf		*m0;
1608			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
1609			    ifp, NULL);
1610			sk_newbuf(sc_if, cur_rx, m);
1611			if (m0 == NULL) {
1612				printf("sk%d: no receive buffers "
1613				    "available -- packet dropped!\n",
1614				    sc_if->sk_unit);
1615				ifp->if_ierrors++;
1616				continue;
1617			}
1618			m = m0;
1619		} else {
1620			m->m_pkthdr.rcvif = ifp;
1621			m->m_pkthdr.len = m->m_len = total_len;
1622		}
1623
1624		ifp->if_ipackets++;
1625		(*ifp->if_input)(ifp, m);
1626	}
1627
1628	sc_if->sk_cdata.sk_rx_prod = i;
1629
1630	return;
1631}
1632
1633static void
1634sk_txeof(sc_if)
1635	struct sk_if_softc	*sc_if;
1636{
1637	struct sk_tx_desc	*cur_tx = NULL;
1638	struct ifnet		*ifp;
1639	u_int32_t		idx;
1640
1641	ifp = &sc_if->arpcom.ac_if;
1642
1643	/*
1644	 * Go through our tx ring and free mbufs for those
1645	 * frames that have been sent.
1646	 */
1647	idx = sc_if->sk_cdata.sk_tx_cons;
1648	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1649		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1650		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1651			break;
1652		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1653			ifp->if_opackets++;
1654		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1655			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1656			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1657		}
1658		sc_if->sk_cdata.sk_tx_cnt--;
1659		SK_INC(idx, SK_TX_RING_CNT);
1660		ifp->if_timer = 0;
1661	}
1662
1663	sc_if->sk_cdata.sk_tx_cons = idx;
1664
1665	if (cur_tx != NULL)
1666		ifp->if_flags &= ~IFF_OACTIVE;
1667
1668	return;
1669}
1670
1671static void
1672sk_tick(xsc_if)
1673	void			*xsc_if;
1674{
1675	struct sk_if_softc	*sc_if;
1676	struct mii_data		*mii;
1677	struct ifnet		*ifp;
1678	int			i;
1679
1680	sc_if = xsc_if;
1681	SK_IF_LOCK(sc_if);
1682	ifp = &sc_if->arpcom.ac_if;
1683	mii = device_get_softc(sc_if->sk_miibus);
1684
1685	if (!(ifp->if_flags & IFF_UP)) {
1686		SK_IF_UNLOCK(sc_if);
1687		return;
1688	}
1689
1690	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1691		sk_intr_bcom(sc_if);
1692		SK_IF_UNLOCK(sc_if);
1693		return;
1694	}
1695
1696	/*
1697	 * According to SysKonnect, the correct way to verify that
1698	 * the link has come back up is to poll bit 0 of the GPIO
1699	 * register three times. This pin has the signal from the
1700	 * link_sync pin connected to it; if we read the same link
1701	 * state 3 times in a row, we know the link is up.
1702	 */
1703	for (i = 0; i < 3; i++) {
1704		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1705			break;
1706	}
1707
1708	if (i != 3) {
1709		sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1710		SK_IF_UNLOCK(sc_if);
1711		return;
1712	}
1713
1714	/* Turn the GP0 interrupt back on. */
1715	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1716	SK_XM_READ_2(sc_if, XM_ISR);
1717	mii_tick(mii);
1718	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
1719
1720	SK_IF_UNLOCK(sc_if);
1721	return;
1722}
1723
1724static void
1725sk_intr_bcom(sc_if)
1726	struct sk_if_softc	*sc_if;
1727{
1728	struct sk_softc		*sc;
1729	struct mii_data		*mii;
1730	struct ifnet		*ifp;
1731	int			status;
1732
1733	sc = sc_if->sk_softc;
1734	mii = device_get_softc(sc_if->sk_miibus);
1735	ifp = &sc_if->arpcom.ac_if;
1736
1737	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1738
1739	/*
1740	 * Read the PHY interrupt register to make sure
1741	 * we clear any pending interrupts.
1742	 */
1743	status = sk_miibus_readreg(sc_if->sk_dev,
1744	    SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
1745
1746	if (!(ifp->if_flags & IFF_RUNNING)) {
1747		sk_init_xmac(sc_if);
1748		return;
1749	}
1750
1751	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
1752		int			lstat;
1753		lstat = sk_miibus_readreg(sc_if->sk_dev,
1754		    SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
1755
1756		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
1757			mii_mediachg(mii);
1758			/* Turn off the link LED. */
1759			SK_IF_WRITE_1(sc_if, 0,
1760			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
1761			sc_if->sk_link = 0;
1762		} else if (status & BRGPHY_ISR_LNK_CHG) {
1763			sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1764	    		    BRGPHY_MII_IMR, 0xFF00);
1765			mii_tick(mii);
1766			sc_if->sk_link = 1;
1767			/* Turn on the link LED. */
1768			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
1769			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
1770			    SK_LINKLED_BLINK_OFF);
1771		} else {
1772			mii_tick(mii);
1773			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1774		}
1775	}
1776
1777	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1778
1779	return;
1780}
1781
1782static void
1783sk_intr_xmac(sc_if)
1784	struct sk_if_softc	*sc_if;
1785{
1786	struct sk_softc		*sc;
1787	u_int16_t		status;
1788	struct mii_data		*mii;
1789
1790	sc = sc_if->sk_softc;
1791	mii = device_get_softc(sc_if->sk_miibus);
1792	status = SK_XM_READ_2(sc_if, XM_ISR);
1793
1794	/*
1795	 * Link has gone down. Start MII tick timeout to
1796	 * watch for link resync.
1797	 */
1798	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
1799		if (status & XM_ISR_GP0_SET) {
1800			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1801			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1802		}
1803
1804		if (status & XM_ISR_AUTONEG_DONE) {
1805			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1806		}
1807	}
1808
1809	if (status & XM_IMR_TX_UNDERRUN)
1810		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1811
1812	if (status & XM_IMR_RX_OVERRUN)
1813		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1814
1815	status = SK_XM_READ_2(sc_if, XM_ISR);
1816
1817	return;
1818}
1819
1820static void
1821sk_intr(xsc)
1822	void			*xsc;
1823{
1824	struct sk_softc		*sc = xsc;
1825	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1826	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1827	u_int32_t		status;
1828
1829	SK_LOCK(sc);
1830
1831	sc_if0 = sc->sk_if[SK_PORT_A];
1832	sc_if1 = sc->sk_if[SK_PORT_B];
1833
1834	if (sc_if0 != NULL)
1835		ifp0 = &sc_if0->arpcom.ac_if;
1836	if (sc_if1 != NULL)
1837		ifp1 = &sc_if1->arpcom.ac_if;
1838
1839	for (;;) {
1840		status = CSR_READ_4(sc, SK_ISSR);
1841		if (!(status & sc->sk_intrmask))
1842			break;
1843
1844		/* Handle receive interrupts first. */
1845		if (status & SK_ISR_RX1_EOF) {
1846			sk_rxeof(sc_if0);
1847			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1848			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1849		}
1850		if (status & SK_ISR_RX2_EOF) {
1851			sk_rxeof(sc_if1);
1852			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1853			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1854		}
1855
1856		/* Then transmit interrupts. */
1857		if (status & SK_ISR_TX1_S_EOF) {
1858			sk_txeof(sc_if0);
1859			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1860			    SK_TXBMU_CLR_IRQ_EOF);
1861		}
1862		if (status & SK_ISR_TX2_S_EOF) {
1863			sk_txeof(sc_if1);
1864			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1865			    SK_TXBMU_CLR_IRQ_EOF);
1866		}
1867
1868		/* Then MAC interrupts. */
1869		if (status & SK_ISR_MAC1 &&
1870		    ifp0->if_flags & IFF_RUNNING)
1871			sk_intr_xmac(sc_if0);
1872
1873		if (status & SK_ISR_MAC2 &&
1874		    ifp1->if_flags & IFF_RUNNING)
1875			sk_intr_xmac(sc_if1);
1876
1877		if (status & SK_ISR_EXTERNAL_REG) {
1878			if (ifp0 != NULL)
1879				sk_intr_bcom(sc_if0);
1880			if (ifp1 != NULL)
1881				sk_intr_bcom(sc_if1);
1882		}
1883	}
1884
1885	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1886
1887	if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
1888		sk_start(ifp0);
1889	if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
1890		sk_start(ifp1);
1891
1892	SK_UNLOCK(sc);
1893
1894	return;
1895}
1896
1897static void
1898sk_init_xmac(sc_if)
1899	struct sk_if_softc	*sc_if;
1900{
1901	struct sk_softc		*sc;
1902	struct ifnet		*ifp;
1903	struct sk_bcom_hack	bhack[] = {
1904	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
1905	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
1906	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1907	{ 0, 0 } };
1908
1909	sc = sc_if->sk_softc;
1910	ifp = &sc_if->arpcom.ac_if;
1911
1912	/* Unreset the XMAC. */
1913	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1914	DELAY(1000);
1915
1916	/* Reset the XMAC's internal state. */
1917	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
1918
1919	/* Save the XMAC II revision */
1920	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1921
1922	/*
1923	 * Perform additional initialization for external PHYs,
1924	 * namely for the 1000baseTX cards that use the XMAC's
1925	 * GMII mode.
1926	 */
1927	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1928		int			i = 0;
1929		u_int32_t		val;
1930
1931		/* Take PHY out of reset. */
1932		val = sk_win_read_4(sc, SK_GPIO);
1933		if (sc_if->sk_port == SK_PORT_A)
1934			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
1935		else
1936			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
1937		sk_win_write_4(sc, SK_GPIO, val);
1938
1939		/* Enable GMII mode on the XMAC. */
1940		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
1941
1942		sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1943		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
1944		DELAY(10000);
1945		sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1946		    BRGPHY_MII_IMR, 0xFFF0);
1947
1948		/*
1949		 * Early versions of the BCM5400 apparently have
1950		 * a bug that requires them to have their reserved
1951		 * registers initialized to some magic values. I don't
1952		 * know what the numbers do, I'm just the messenger.
1953		 */
1954		if (sk_miibus_readreg(sc_if->sk_dev,
1955		    SK_PHYADDR_BCOM, 0x03) == 0x6041) {
1956			while(bhack[i].reg) {
1957				sk_miibus_writereg(sc_if->sk_dev,
1958				    SK_PHYADDR_BCOM, bhack[i].reg,
1959				    bhack[i].val);
1960				i++;
1961			}
1962		}
1963	}
1964
1965	/* Set station address */
1966	SK_XM_WRITE_2(sc_if, XM_PAR0,
1967	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1968	SK_XM_WRITE_2(sc_if, XM_PAR1,
1969	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1970	SK_XM_WRITE_2(sc_if, XM_PAR2,
1971	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1972	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1973
1974	if (ifp->if_flags & IFF_PROMISC) {
1975		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1976	} else {
1977		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1978	}
1979
1980	if (ifp->if_flags & IFF_BROADCAST) {
1981		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1982	} else {
1983		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1984	}
1985
1986	/* We don't need the FCS appended to the packet. */
1987	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1988
1989	/* We want short frames padded to 60 bytes. */
1990	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1991
1992	/*
1993	 * Enable the reception of all error frames. This is is
1994	 * a necessary evil due to the design of the XMAC. The
1995	 * XMAC's receive FIFO is only 8K in size, however jumbo
1996	 * frames can be up to 9000 bytes in length. When bad
1997	 * frame filtering is enabled, the XMAC's RX FIFO operates
1998	 * in 'store and forward' mode. For this to work, the
1999	 * entire frame has to fit into the FIFO, but that means
2000	 * that jumbo frames larger than 8192 bytes will be
2001	 * truncated. Disabling all bad frame filtering causes
2002	 * the RX FIFO to operate in streaming mode, in which
2003	 * case the XMAC will start transfering frames out of the
2004	 * RX FIFO as soon as the FIFO threshold is reached.
2005	 */
2006	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2007	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2008	    XM_MODE_RX_INRANGELEN);
2009
2010	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2011		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2012	else
2013		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2014
2015	/*
2016	 * Bump up the transmit threshold. This helps hold off transmit
2017	 * underruns when we're blasting traffic from both ports at once.
2018	 */
2019	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2020
2021	/* Set multicast filter */
2022	sk_setmulti(sc_if);
2023
2024	/* Clear and enable interrupts */
2025	SK_XM_READ_2(sc_if, XM_ISR);
2026	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2027		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2028	else
2029		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2030
2031	/* Configure MAC arbiter */
2032	switch(sc_if->sk_xmac_rev) {
2033	case XM_XMAC_REV_B2:
2034		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2035		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2036		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2037		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2038		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2039		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2040		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2041		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2042		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2043		break;
2044	case XM_XMAC_REV_C1:
2045		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2046		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2047		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2048		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2049		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2050		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2051		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2052		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2053		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2054		break;
2055	default:
2056		break;
2057	}
2058	sk_win_write_2(sc, SK_MACARB_CTL,
2059	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2060
2061	sc_if->sk_link = 1;
2062
2063	return;
2064}
2065
2066/*
2067 * Note that to properly initialize any part of the GEnesis chip,
2068 * you first have to take it out of reset mode.
2069 */
2070static void
2071sk_init(xsc)
2072	void			*xsc;
2073{
2074	struct sk_if_softc	*sc_if = xsc;
2075	struct sk_softc		*sc;
2076	struct ifnet		*ifp;
2077	struct mii_data		*mii;
2078
2079	SK_IF_LOCK(sc_if);
2080
2081	ifp = &sc_if->arpcom.ac_if;
2082	sc = sc_if->sk_softc;
2083	mii = device_get_softc(sc_if->sk_miibus);
2084
2085	/* Cancel pending I/O and free all RX/TX buffers. */
2086	sk_stop(sc_if);
2087
2088	/* Configure LINK_SYNC LED */
2089	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2090	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
2091
2092	/* Configure RX LED */
2093	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
2094
2095	/* Configure TX LED */
2096	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
2097
2098	/* Configure I2C registers */
2099
2100	/* Configure XMAC(s) */
2101	sk_init_xmac(sc_if);
2102	mii_mediachg(mii);
2103
2104	/* Configure MAC FIFOs */
2105	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2106	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2107	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2108
2109	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2110	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2111	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2112
2113	/* Configure transmit arbiter(s) */
2114	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2115	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2116
2117	/* Configure RAMbuffers */
2118	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2119	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2120	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2121	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2122	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2123	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2124
2125	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2126	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2127	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2128	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2129	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2130	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2131	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2132
2133	/* Configure BMUs */
2134	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2135	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2136	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2137	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2138
2139	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2140	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2141	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2142	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2143
2144	/* Init descriptors */
2145	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2146		printf("sk%d: initialization failed: no "
2147		    "memory for rx buffers\n", sc_if->sk_unit);
2148		sk_stop(sc_if);
2149		SK_IF_UNLOCK(sc_if);
2150		return;
2151	}
2152	sk_init_tx_ring(sc_if);
2153
2154	/* Configure interrupt handling */
2155	CSR_READ_4(sc, SK_ISSR);
2156	if (sc_if->sk_port == SK_PORT_A)
2157		sc->sk_intrmask |= SK_INTRS1;
2158	else
2159		sc->sk_intrmask |= SK_INTRS2;
2160
2161	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2162
2163	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2164
2165	/* Start BMUs. */
2166	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2167
2168	/* Enable XMACs TX and RX state machines */
2169	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2170	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2171
2172	ifp->if_flags |= IFF_RUNNING;
2173	ifp->if_flags &= ~IFF_OACTIVE;
2174
2175	SK_IF_UNLOCK(sc_if);
2176
2177	return;
2178}
2179
2180static void
2181sk_stop(sc_if)
2182	struct sk_if_softc	*sc_if;
2183{
2184	int			i;
2185	struct sk_softc		*sc;
2186	struct ifnet		*ifp;
2187
2188	SK_IF_LOCK(sc_if);
2189	sc = sc_if->sk_softc;
2190	ifp = &sc_if->arpcom.ac_if;
2191
2192	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2193
2194	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2195		u_int32_t		val;
2196
2197		/* Put PHY back into reset. */
2198		val = sk_win_read_4(sc, SK_GPIO);
2199		if (sc_if->sk_port == SK_PORT_A) {
2200			val |= SK_GPIO_DIR0;
2201			val &= ~SK_GPIO_DAT0;
2202		} else {
2203			val |= SK_GPIO_DIR2;
2204			val &= ~SK_GPIO_DAT2;
2205		}
2206		sk_win_write_4(sc, SK_GPIO, val);
2207	}
2208
2209	/* Turn off various components of this interface. */
2210	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2211	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2212	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2213	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2214	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2215	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2216	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2217	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2218	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2219	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2220	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2221	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2222
2223	/* Disable interrupts */
2224	if (sc_if->sk_port == SK_PORT_A)
2225		sc->sk_intrmask &= ~SK_INTRS1;
2226	else
2227		sc->sk_intrmask &= ~SK_INTRS2;
2228	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2229
2230	SK_XM_READ_2(sc_if, XM_ISR);
2231	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2232
2233	/* Free RX and TX mbufs still in the queues. */
2234	for (i = 0; i < SK_RX_RING_CNT; i++) {
2235		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2236			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2237			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2238		}
2239	}
2240
2241	for (i = 0; i < SK_TX_RING_CNT; i++) {
2242		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2243			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2244			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2245		}
2246	}
2247
2248	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2249	SK_IF_UNLOCK(sc_if);
2250	return;
2251}
2252