if_sk.c revision 48973
1/*
2 * Copyright (c) 1997, 1998, 1999
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *	$Id: if_sk.c,v 1.51 1999/07/14 21:48:19 wpaul Exp $
33 */
34
35/*
36 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
37 * the SK-984x series adapters, both single port and dual port.
38 * References:
39 * 	The XaQti XMAC II datasheet, http://www.xaqti.com
40 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
41 *
42 * Written by Bill Paul <wpaul@ee.columbia.edu>
43 * Department of Electrical Engineering
44 * Columbia University, New York City
45 */
46
47/*
48 * The SysKonnect gigabit ethernet adapters consist of two main
49 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
50 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
51 * components and a PHY while the GEnesis controller provides a PCI
52 * interface with DMA support. Each card may have between 512K and
53 * 2MB of SRAM on board depending on the configuration.
54 *
55 * The SysKonnect GEnesis controller can have either one or two XMAC
56 * chips connected to it, allowing single or dual port NIC configurations.
57 * SysKonnect has the distinction of being the only vendor on the market
58 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
59 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
60 * XMAC registers. This driver takes advantage of these features to allow
61 * both XMACs to operate as independent interfaces.
62 */
63
64#include "bpf.h"
65
66#include <sys/param.h>
67#include <sys/systm.h>
68#include <sys/sockio.h>
69#include <sys/mbuf.h>
70#include <sys/malloc.h>
71#include <sys/kernel.h>
72#include <sys/socket.h>
73#include <sys/queue.h>
74
75#include <net/if.h>
76#include <net/if_arp.h>
77#include <net/ethernet.h>
78#include <net/if_dl.h>
79#include <net/if_media.h>
80
81#if NBPF > 0
82#include <net/bpf.h>
83#endif
84
85#include <vm/vm.h>              /* for vtophys */
86#include <vm/pmap.h>            /* for vtophys */
87#include <machine/clock.h>      /* for DELAY */
88#include <machine/bus_pio.h>
89#include <machine/bus_memio.h>
90#include <machine/bus.h>
91#include <machine/resource.h>
92#include <sys/bus.h>
93#include <sys/rman.h>
94
95#include <pci/pcireg.h>
96#include <pci/pcivar.h>
97
98#define SK_USEIOSPACE
99
100#include <pci/if_skreg.h>
101#include <pci/xmaciireg.h>
102
103#ifndef lint
104static const char rcsid[] =
105	"$Id: if_sk.c,v 1.51 1999/07/14 21:48:19 wpaul Exp $";
106#endif
107
108static struct sk_type sk_devs[] = {
109	{ SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" },
110	{ 0, 0, NULL }
111};
112
113static unsigned long sk_count = 0;
114static int sk_probe		__P((device_t));
115static int sk_attach		__P((device_t));
116static int sk_detach		__P((device_t));
117static int sk_attach_xmac	__P((struct sk_softc *, int));
118static void sk_intr		__P((void *));
119static void sk_intr_xmac	__P((struct sk_if_softc *));
120static void sk_rxeof		__P((struct sk_if_softc *));
121static void sk_txeof		__P((struct sk_if_softc *));
122static int sk_encap		__P((struct sk_if_softc *, struct mbuf *,
123					u_int32_t *));
124static void sk_start		__P((struct ifnet *));
125static int sk_ioctl		__P((struct ifnet *, u_long, caddr_t));
126static void sk_init		__P((void *));
127static void sk_init_xmac	__P((struct sk_if_softc *));
128static void sk_stop		__P((struct sk_if_softc *));
129static void sk_watchdog		__P((struct ifnet *));
130static void sk_shutdown		__P((device_t));
131static int sk_ifmedia_upd	__P((struct ifnet *));
132static void sk_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
133static void sk_reset		__P((struct sk_softc *));
134static int sk_newbuf		__P((struct sk_if_softc *,
135					struct sk_chain *, struct mbuf *));
136static int sk_alloc_jumbo_mem	__P((struct sk_if_softc *));
137static void *sk_jalloc		__P((struct sk_if_softc *));
138static void sk_jfree		__P((caddr_t, u_int));
139static void sk_jref		__P((caddr_t, u_int));
140static int sk_init_rx_ring	__P((struct sk_if_softc *));
141static void sk_init_tx_ring	__P((struct sk_if_softc *));
142#ifdef notdef
143static u_int32_t sk_win_read_4	__P((struct sk_softc *, int));
144#endif
145static u_int16_t sk_win_read_2	__P((struct sk_softc *, int));
146static u_int8_t sk_win_read_1	__P((struct sk_softc *, int));
147static void sk_win_write_4	__P((struct sk_softc *, int, u_int32_t));
148static void sk_win_write_2	__P((struct sk_softc *, int, u_int32_t));
149static void sk_win_write_1	__P((struct sk_softc *, int, u_int32_t));
150static u_int8_t sk_vpd_readbyte	__P((struct sk_softc *, int));
151static void sk_vpd_read_res	__P((struct sk_softc *,
152					struct vpd_res *, int));
153static void sk_vpd_read		__P((struct sk_softc *));
154static u_int16_t sk_phy_readreg	__P((struct sk_if_softc *, int));
155static void sk_phy_writereg	__P((struct sk_if_softc *, int, u_int32_t));
156static u_int32_t sk_calchash	__P((caddr_t));
157static void sk_setfilt		__P((struct sk_if_softc *, caddr_t, int));
158static void sk_setmulti		__P((struct sk_if_softc *));
159
160static device_method_t sk_methods[] = {
161	/* Device interface */
162	DEVMETHOD(device_probe,		sk_probe),
163	DEVMETHOD(device_attach,	sk_attach),
164	DEVMETHOD(device_detach,	sk_detach),
165	DEVMETHOD(device_shutdown,	sk_shutdown),
166	{ 0, 0 }
167};
168
169static driver_t sk_driver = {
170	"sk",
171	sk_methods,
172	sizeof(struct sk_softc)
173};
174
175static devclass_t sk_devclass;
176
177DRIVER_MODULE(sk, pci, sk_driver, sk_devclass, 0, 0);
178
179#define SK_SETBIT(sc, reg, x)		\
180	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
181
182#define SK_CLRBIT(sc, reg, x)		\
183	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
184
185#define SK_WIN_SETBIT_4(sc, reg, x)	\
186	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
187
188#define SK_WIN_CLRBIT_4(sc, reg, x)	\
189	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
190
191#define SK_WIN_SETBIT_2(sc, reg, x)	\
192	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
193
194#define SK_WIN_CLRBIT_2(sc, reg, x)	\
195	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
196
197#ifdef notdef
198static u_int32_t sk_win_read_4(sc, reg)
199	struct sk_softc		*sc;
200	int			reg;
201{
202	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
203	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
204}
205#endif
206
207static u_int16_t sk_win_read_2(sc, reg)
208	struct sk_softc		*sc;
209	int			reg;
210{
211	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
212	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
213}
214
215static u_int8_t sk_win_read_1(sc, reg)
216	struct sk_softc		*sc;
217	int			reg;
218{
219	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
220	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
221}
222
223static void sk_win_write_4(sc, reg, val)
224	struct sk_softc		*sc;
225	int			reg;
226	u_int32_t		val;
227{
228	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
229	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
230	return;
231}
232
233static void sk_win_write_2(sc, reg, val)
234	struct sk_softc		*sc;
235	int			reg;
236	u_int32_t		val;
237{
238	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
239	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
240	return;
241}
242
243static void sk_win_write_1(sc, reg, val)
244	struct sk_softc		*sc;
245	int			reg;
246	u_int32_t		val;
247{
248	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
249	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
250	return;
251}
252
253/*
254 * The VPD EEPROM contains Vital Product Data, as suggested in
255 * the PCI 2.1 specification. The VPD data is separared into areas
256 * denoted by resource IDs. The SysKonnect VPD contains an ID string
257 * resource (the name of the adapter), a read-only area resource
258 * containing various key/data fields and a read/write area which
259 * can be used to store asset management information or log messages.
260 * We read the ID string and read-only into buffers attached to
261 * the controller softc structure for later use. At the moment,
262 * we only use the ID string during sk_attach().
263 */
264static u_int8_t sk_vpd_readbyte(sc, addr)
265	struct sk_softc		*sc;
266	int			addr;
267{
268	int			i;
269
270	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
271	for (i = 0; i < SK_TIMEOUT; i++) {
272		DELAY(1);
273		if (sk_win_read_2(sc,
274		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
275			break;
276	}
277
278	if (i == SK_TIMEOUT)
279		return(0);
280
281	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
282}
283
284static void sk_vpd_read_res(sc, res, addr)
285	struct sk_softc		*sc;
286	struct vpd_res		*res;
287	int			addr;
288{
289	int			i;
290	u_int8_t		*ptr;
291
292	ptr = (u_int8_t *)res;
293	for (i = 0; i < sizeof(struct vpd_res); i++)
294		ptr[i] = sk_vpd_readbyte(sc, i + addr);
295
296	return;
297}
298
299static void sk_vpd_read(sc)
300	struct sk_softc		*sc;
301{
302	int			pos = 0, i;
303	struct vpd_res		res;
304
305	if (sc->sk_vpd_prodname != NULL)
306		free(sc->sk_vpd_prodname, M_DEVBUF);
307	if (sc->sk_vpd_readonly != NULL)
308		free(sc->sk_vpd_readonly, M_DEVBUF);
309	sc->sk_vpd_prodname = NULL;
310	sc->sk_vpd_readonly = NULL;
311
312	sk_vpd_read_res(sc, &res, pos);
313
314	if (res.vr_id != VPD_RES_ID) {
315		printf("skc%d: bad VPD resource id: expected %x got %x\n",
316		    sc->sk_unit, VPD_RES_ID, res.vr_id);
317		return;
318	}
319
320	pos += sizeof(res);
321	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
322	for (i = 0; i < res.vr_len; i++)
323		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
324	sc->sk_vpd_prodname[i] = '\0';
325	pos += i;
326
327	sk_vpd_read_res(sc, &res, pos);
328
329	if (res.vr_id != VPD_RES_READ) {
330		printf("skc%d: bad VPD resource id: expected %x got %x\n",
331		    sc->sk_unit, VPD_RES_READ, res.vr_id);
332		return;
333	}
334
335	pos += sizeof(res);
336	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
337	for (i = 0; i < res.vr_len + 1; i++)
338		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
339
340	return;
341}
342
343static u_int16_t sk_phy_readreg(sc_if, reg)
344	struct sk_if_softc	*sc_if;
345	int			reg;
346{
347	int			i;
348
349	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg);
350	for (i = 0; i < SK_TIMEOUT; i++) {
351		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
352			break;
353	}
354
355	if (i == SK_TIMEOUT) {
356		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
357		return(0);
358	}
359
360	return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
361}
362
363static void sk_phy_writereg(sc_if, reg, val)
364	struct sk_if_softc	*sc_if;
365	int			reg;
366	u_int32_t		val;
367{
368	int			i;
369
370	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg);
371	for (i = 0; i < SK_TIMEOUT; i++) {
372		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
373			break;
374	}
375
376	if (i == SK_TIMEOUT) {
377		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
378		return;
379	}
380
381	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
382	for (i = 0; i < SK_TIMEOUT; i++) {
383		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
384			break;
385	}
386
387	if (i == SK_TIMEOUT)
388		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
389
390	return;
391}
392
393#define SK_POLY		0xEDB88320
394#define SK_BITS		6
395
396static u_int32_t sk_calchash(addr)
397	caddr_t			addr;
398{
399	u_int32_t		idx, bit, data, crc;
400
401	/* Compute CRC for the address value. */
402	crc = 0xFFFFFFFF; /* initial value */
403
404	for (idx = 0; idx < 6; idx++) {
405		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
406			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
407	}
408
409	return (~crc & ((1 << SK_BITS) - 1));
410}
411
412static void sk_setfilt(sc_if, addr, slot)
413	struct sk_if_softc	*sc_if;
414	caddr_t			addr;
415	int			slot;
416{
417	int			base;
418
419	base = XM_RXFILT_ENTRY(slot);
420
421	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
422	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
423	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
424
425	return;
426}
427
428static void sk_setmulti(sc_if)
429	struct sk_if_softc	*sc_if;
430{
431	struct ifnet		*ifp;
432	u_int32_t		hashes[2] = { 0, 0 };
433	int			h, i;
434	struct ifmultiaddr	*ifma;
435	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
436
437	ifp = &sc_if->arpcom.ac_if;
438
439	/* First, zot all the existing filters. */
440	for (i = 1; i < XM_RXFILT_MAX; i++)
441		sk_setfilt(sc_if, (caddr_t)&dummy, i);
442	SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
443	SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
444
445	/* Now program new ones. */
446	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
447		hashes[0] = 0xFFFFFFFF;
448		hashes[1] = 0xFFFFFFFF;
449	} else {
450		i = 1;
451		/* First find the tail of the list. */
452		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
453					ifma = ifma->ifma_link.le_next) {
454			if (ifma->ifma_link.le_next == NULL)
455				break;
456		}
457		/* Now traverse the list backwards. */
458		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
459			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
460			if (ifma->ifma_addr->sa_family != AF_LINK)
461				continue;
462			/*
463			 * Program the first XM_RXFILT_MAX multicast groups
464			 * into the perfect filter. For all others,
465			 * use the hash table.
466			 */
467			if (i < XM_RXFILT_MAX) {
468				sk_setfilt(sc_if,
469			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
470				i++;
471				continue;
472			}
473
474			h = sk_calchash(
475				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
476			if (h < 32)
477				hashes[0] |= (1 << h);
478			else
479				hashes[1] |= (1 << (h - 32));
480		}
481	}
482
483	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
484	    XM_MODE_RX_USE_PERFECT);
485	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
486	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
487
488	return;
489}
490
491static int sk_init_rx_ring(sc_if)
492	struct sk_if_softc	*sc_if;
493{
494	struct sk_chain_data	*cd;
495	struct sk_ring_data	*rd;
496	int			i;
497
498	cd = &sc_if->sk_cdata;
499	rd = sc_if->sk_rdata;
500
501	bzero((char *)rd->sk_rx_ring,
502	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
503
504	for (i = 0; i < SK_RX_RING_CNT; i++) {
505		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
506		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
507			return(ENOBUFS);
508		if (i == (SK_RX_RING_CNT - 1)) {
509			cd->sk_rx_chain[i].sk_next =
510			    &cd->sk_rx_chain[0];
511			rd->sk_rx_ring[i].sk_next =
512			    vtophys(&rd->sk_rx_ring[0]);
513		} else {
514			cd->sk_rx_chain[i].sk_next =
515			    &cd->sk_rx_chain[i + 1];
516			rd->sk_rx_ring[i].sk_next =
517			    vtophys(&rd->sk_rx_ring[i + 1]);
518		}
519	}
520
521	sc_if->sk_cdata.sk_rx_prod = 0;
522	sc_if->sk_cdata.sk_rx_cons = 0;
523
524	return(0);
525}
526
527static void sk_init_tx_ring(sc_if)
528	struct sk_if_softc	*sc_if;
529{
530	struct sk_chain_data	*cd;
531	struct sk_ring_data	*rd;
532	int			i;
533
534	cd = &sc_if->sk_cdata;
535	rd = sc_if->sk_rdata;
536
537	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
538	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
539
540	for (i = 0; i < SK_TX_RING_CNT; i++) {
541		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
542		if (i == (SK_TX_RING_CNT - 1)) {
543			cd->sk_tx_chain[i].sk_next =
544			    &cd->sk_tx_chain[0];
545			rd->sk_tx_ring[i].sk_next =
546			    vtophys(&rd->sk_tx_ring[0]);
547		} else {
548			cd->sk_tx_chain[i].sk_next =
549			    &cd->sk_tx_chain[i + 1];
550			rd->sk_tx_ring[i].sk_next =
551			    vtophys(&rd->sk_tx_ring[i + 1]);
552		}
553	}
554
555	sc_if->sk_cdata.sk_tx_prod = 0;
556	sc_if->sk_cdata.sk_tx_cons = 0;
557	sc_if->sk_cdata.sk_tx_cnt = 0;
558
559	return;
560}
561
562static int sk_newbuf(sc_if, c, m)
563	struct sk_if_softc	*sc_if;
564	struct sk_chain		*c;
565	struct mbuf		*m;
566{
567	struct mbuf		*m_new = NULL;
568	struct sk_rx_desc	*r;
569
570	if (m == NULL) {
571		caddr_t			*buf = NULL;
572
573		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
574		if (m_new == NULL) {
575			printf("sk%d: no memory for rx list -- "
576			    "packet dropped!\n", sc_if->sk_unit);
577			return(ENOBUFS);
578		}
579
580		/* Allocate the jumbo buffer */
581		buf = sk_jalloc(sc_if);
582		if (buf == NULL) {
583			m_freem(m_new);
584#ifdef SK_VERBOSE
585			printf("sk%d: jumbo allocation failed "
586			    "-- packet dropped!\n", sc_if->sk_unit);
587#endif
588			return(ENOBUFS);
589		}
590
591		/* Attach the buffer to the mbuf */
592		m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
593		m_new->m_flags |= M_EXT;
594		m_new->m_ext.ext_size = m_new->m_pkthdr.len =
595		    m_new->m_len = SK_MCLBYTES;
596		m_new->m_ext.ext_free = sk_jfree;
597		m_new->m_ext.ext_ref = sk_jref;
598	} else {
599		/*
600	 	 * We're re-using a previously allocated mbuf;
601		 * be sure to re-init pointers and lengths to
602		 * default values.
603		 */
604		m_new = m;
605		m_new->m_len = m_new->m_pkthdr.len = SK_MCLBYTES;
606		m_new->m_data = m_new->m_ext.ext_buf;
607	}
608
609	/*
610	 * Adjust alignment so packet payload begins on a
611	 * longword boundary. Mandatory for Alpha, useful on
612	 * x86 too.
613	 */
614	m_adj(m_new, ETHER_ALIGN);
615
616	r = c->sk_desc;
617	c->sk_mbuf = m_new;
618	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
619	r->sk_ctl = m_new->m_len | SK_RXSTAT;
620
621	return(0);
622}
623
624/*
625 * Allocate jumbo buffer storage. The SysKonnect adapters support
626 * "jumbograms" (9K frames), although SysKonnect doesn't currently
627 * use them in their drivers. In order for us to use them, we need
628 * large 9K receive buffers, however standard mbuf clusters are only
629 * 2048 bytes in size. Consequently, we need to allocate and manage
630 * our own jumbo buffer pool. Fortunately, this does not require an
631 * excessive amount of additional code.
632 */
633static int sk_alloc_jumbo_mem(sc_if)
634	struct sk_if_softc	*sc_if;
635{
636	caddr_t			ptr;
637	register int		i;
638	struct sk_jpool_entry   *entry;
639
640	/* Grab a big chunk o' storage. */
641	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
642	    M_NOWAIT, 0x100000, 0xffffffff, PAGE_SIZE, 0);
643
644	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
645		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
646		return(ENOBUFS);
647	}
648
649	SLIST_INIT(&sc_if->sk_jfree_listhead);
650	SLIST_INIT(&sc_if->sk_jinuse_listhead);
651
652	/*
653	 * Now divide it up into 9K pieces and save the addresses
654	 * in an array. Note that we play an evil trick here by using
655	 * the first few bytes in the buffer to hold the the address
656	 * of the softc structure for this interface. This is because
657	 * sk_jfree() needs it, but it is called by the mbuf management
658	 * code which will not pass it to us explicitly.
659	 */
660	ptr = sc_if->sk_cdata.sk_jumbo_buf;
661	for (i = 0; i < SK_JSLOTS; i++) {
662		u_int64_t		**aptr;
663		aptr = (u_int64_t **)ptr;
664		aptr[0] = (u_int64_t *)sc_if;
665		ptr += sizeof(u_int64_t);
666		sc_if->sk_cdata.sk_jslots[i].sk_buf = ptr;
667		sc_if->sk_cdata.sk_jslots[i].sk_inuse = 0;
668		ptr += SK_MCLBYTES;
669		entry = malloc(sizeof(struct sk_jpool_entry),
670		    M_DEVBUF, M_NOWAIT);
671		if (entry == NULL) {
672			free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
673			sc_if->sk_cdata.sk_jumbo_buf = NULL;
674			printf("sk%d: no memory for jumbo "
675			    "buffer queue!\n", sc_if->sk_unit);
676			return(ENOBUFS);
677		}
678		entry->slot = i;
679		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
680		    entry, jpool_entries);
681	}
682
683	return(0);
684}
685
686/*
687 * Allocate a jumbo buffer.
688 */
689static void *sk_jalloc(sc_if)
690	struct sk_if_softc	*sc_if;
691{
692	struct sk_jpool_entry   *entry;
693
694	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
695
696	if (entry == NULL) {
697#ifdef SK_VERBOSE
698		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
699#endif
700		return(NULL);
701	}
702
703	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
704	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
705	sc_if->sk_cdata.sk_jslots[entry->slot].sk_inuse = 1;
706	return(sc_if->sk_cdata.sk_jslots[entry->slot].sk_buf);
707}
708
709/*
710 * Adjust usage count on a jumbo buffer. In general this doesn't
711 * get used much because our jumbo buffers don't get passed around
712 * a lot, but it's implemented for correctness.
713 */
714static void sk_jref(buf, size)
715	caddr_t			buf;
716	u_int			size;
717{
718	struct sk_if_softc	*sc_if;
719	u_int64_t		**aptr;
720	register int		i;
721
722	/* Extract the softc struct pointer. */
723	aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
724	sc_if = (struct sk_if_softc *)(aptr[0]);
725
726	if (sc_if == NULL)
727		panic("sk_jref: can't find softc pointer!");
728
729	if (size != SK_MCLBYTES)
730		panic("sk_jref: adjusting refcount of buf of wrong size!");
731
732	/* calculate the slot this buffer belongs to */
733
734	i = ((vm_offset_t)aptr
735	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
736
737	if ((i < 0) || (i >= SK_JSLOTS))
738		panic("sk_jref: asked to reference buffer "
739		    "that we don't manage!");
740	else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
741		panic("sk_jref: buffer already free!");
742	else
743		sc_if->sk_cdata.sk_jslots[i].sk_inuse++;
744
745	return;
746}
747
748/*
749 * Release a jumbo buffer.
750 */
751static void sk_jfree(buf, size)
752	caddr_t			buf;
753	u_int			size;
754{
755	struct sk_if_softc	*sc_if;
756	u_int64_t		**aptr;
757	int		        i;
758	struct sk_jpool_entry   *entry;
759
760	/* Extract the softc struct pointer. */
761	aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
762	sc_if = (struct sk_if_softc *)(aptr[0]);
763
764	if (sc_if == NULL)
765		panic("sk_jfree: can't find softc pointer!");
766
767	if (size != SK_MCLBYTES)
768		panic("sk_jfree: freeing buffer of wrong size!");
769
770	/* calculate the slot this buffer belongs to */
771
772	i = ((vm_offset_t)aptr
773	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
774
775	if ((i < 0) || (i >= SK_JSLOTS))
776		panic("sk_jfree: asked to free buffer that we don't manage!");
777	else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
778		panic("sk_jfree: buffer already free!");
779	else {
780		sc_if->sk_cdata.sk_jslots[i].sk_inuse--;
781		if(sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0) {
782			entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
783			if (entry == NULL)
784				panic("sk_jfree: buffer not in use!");
785			entry->slot = i;
786			SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead,
787					  jpool_entries);
788			SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
789					  entry, jpool_entries);
790		}
791	}
792
793	return;
794}
795
796/*
797 * Set media options.
798 */
799static int sk_ifmedia_upd(ifp)
800	struct ifnet		*ifp;
801{
802	struct sk_if_softc	*sc_if;
803	struct ifmedia		*ifm;
804
805	sc_if = ifp->if_softc;
806	ifm = &sc_if->ifmedia;
807
808	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
809		return(EINVAL);
810
811	switch(IFM_SUBTYPE(ifm->ifm_media)) {
812	case IFM_AUTO:
813		sk_phy_writereg(sc_if, XM_PHY_BMCR,
814		    XM_BMCR_RENEGOTIATE|XM_BMCR_AUTONEGENBL);
815		break;
816	case IFM_1000_LX:
817	case IFM_1000_SX:
818	case IFM_1000_CX:
819	case IFM_1000_TX:
820		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
821			sk_phy_writereg(sc_if, XM_PHY_BMCR, XM_BMCR_DUPLEX);
822		else
823			sk_phy_writereg(sc_if, XM_PHY_BMCR, 0);
824		break;
825	default:
826		printf("sk%d: invalid media selected\n", sc_if->sk_unit);
827		return(EINVAL);
828		break;
829	}
830
831	return(0);
832}
833
834/*
835 * Report current media status.
836 */
837static void sk_ifmedia_sts(ifp, ifmr)
838	struct ifnet		*ifp;
839	struct ifmediareq	*ifmr;
840{
841	struct sk_softc		*sc;
842	struct sk_if_softc	*sc_if;
843	u_int16_t		bmsr, extsts;
844
845	sc_if = ifp->if_softc;
846	sc = sc_if->sk_softc;
847
848	ifmr->ifm_status = IFM_AVALID;
849	ifmr->ifm_active = IFM_ETHER;
850
851	bmsr = sk_phy_readreg(sc_if, XM_PHY_BMSR);
852	extsts = sk_phy_readreg(sc_if, XM_PHY_EXTSTS);
853
854	if (!(bmsr & XM_BMSR_LINKSTAT))
855		return;
856
857	ifmr->ifm_status |= IFM_ACTIVE;
858	ifmr->ifm_active |= sc->sk_pmd;;
859	if (extsts & XM_EXTSTS_FULLDUPLEX)
860		ifmr->ifm_active |= IFM_FDX;
861	else
862		ifmr->ifm_active |= IFM_HDX;
863
864	return;
865}
866
867static int sk_ioctl(ifp, command, data)
868	struct ifnet		*ifp;
869	u_long			command;
870	caddr_t			data;
871{
872	struct sk_if_softc	*sc_if = ifp->if_softc;
873	struct ifreq		*ifr = (struct ifreq *) data;
874	int			s, error = 0;
875
876	s = splimp();
877
878	switch(command) {
879	case SIOCSIFADDR:
880	case SIOCGIFADDR:
881		error = ether_ioctl(ifp, command, data);
882		break;
883	case SIOCSIFMTU:
884		if (ifr->ifr_mtu > SK_JUMBO_MTU)
885			error = EINVAL;
886		else {
887			ifp->if_mtu = ifr->ifr_mtu;
888			sk_init(sc_if);
889		}
890		break;
891	case SIOCSIFFLAGS:
892		if (ifp->if_flags & IFF_UP) {
893			if (ifp->if_flags & IFF_RUNNING &&
894			    ifp->if_flags & IFF_PROMISC &&
895			    !(sc_if->sk_if_flags & IFF_PROMISC)) {
896				SK_XM_SETBIT_4(sc_if, XM_MODE,
897				    XM_MODE_RX_PROMISC);
898				sk_setmulti(sc_if);
899			} else if (ifp->if_flags & IFF_RUNNING &&
900			    !(ifp->if_flags & IFF_PROMISC) &&
901			    sc_if->sk_if_flags & IFF_PROMISC) {
902				SK_XM_CLRBIT_4(sc_if, XM_MODE,
903				    XM_MODE_RX_PROMISC);
904				sk_setmulti(sc_if);
905			} else
906				sk_init(sc_if);
907		} else {
908			if (ifp->if_flags & IFF_RUNNING)
909				sk_stop(sc_if);
910		}
911		sc_if->sk_if_flags = ifp->if_flags;
912		error = 0;
913		break;
914	case SIOCADDMULTI:
915	case SIOCDELMULTI:
916		sk_setmulti(sc_if);
917		error = 0;
918		break;
919	case SIOCGIFMEDIA:
920	case SIOCSIFMEDIA:
921		error = ifmedia_ioctl(ifp, ifr, &sc_if->ifmedia, command);
922		break;
923	default:
924		error = EINVAL;
925		break;
926	}
927
928	(void)splx(s);
929
930	return(error);
931}
932
933/*
934 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
935 * IDs against our list and return a device name if we find a match.
936 */
937static int sk_probe(dev)
938	device_t		dev;
939{
940	struct sk_type		*t;
941
942	t = sk_devs;
943
944	while(t->sk_name != NULL) {
945		if ((pci_get_vendor(dev) == t->sk_vid) &&
946		    (pci_get_device(dev) == t->sk_did)) {
947			device_set_desc(dev, t->sk_name);
948			return(0);
949		}
950		t++;
951	}
952
953	return(ENXIO);
954}
955
956/*
957 * Force the GEnesis into reset, then bring it out of reset.
958 */
959static void sk_reset(sc)
960	struct sk_softc		*sc;
961{
962	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
963	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
964	DELAY(1000);
965	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
966	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
967
968	/* Configure packet arbiter */
969	sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
970	sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
971	sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
972	sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
973	sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
974
975	/* Enable RAM interface */
976	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
977
978	/*
979         * Configure interrupt moderation. The moderation timer
980	 * defers interrupts specified in the interrupt moderation
981	 * timer mask based on the timeout specified in the interrupt
982	 * moderation timer init register. Each bit in the timer
983	 * register represents 18.825ns, so to specify a timeout in
984	 * microseconds, we have to multiply by 54.
985	 */
986        sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
987        sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
988	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
989        sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
990
991	return;
992}
993
994/*
995 * Each XMAC chip is attached as a separate logical IP interface.
996 * Single port cards will have only one logical interface of course.
997 */
998static int sk_attach_xmac(sc, port)
999	struct sk_softc		*sc;
1000	int			port;
1001{
1002	struct sk_if_softc	*sc_if;
1003	struct ifnet		*ifp;
1004	int			i;
1005
1006	if (sc == NULL)
1007		return(EINVAL);
1008
1009	if (port != SK_PORT_A && port != SK_PORT_B)
1010		return(EINVAL);
1011
1012	sc_if = malloc(sizeof(struct sk_if_softc), M_DEVBUF, M_NOWAIT);
1013	if (sc_if == NULL) {
1014		printf("sk%d: no memory for interface softc!\n", sc->sk_unit);
1015		return(ENOMEM);
1016	}
1017	bzero((char *)sc_if, sizeof(struct sk_if_softc));
1018
1019	sc_if->sk_unit = sk_count;
1020	sc_if->sk_port = port;
1021	sk_count++;
1022	sc_if->sk_softc = sc;
1023	sc->sk_if[port] = sc_if;
1024	if (port == SK_PORT_A)
1025		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1026	if (port == SK_PORT_B)
1027		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1028
1029	/*
1030	 * Get station address for this interface. Note that
1031	 * dual port cards actually come with three station
1032	 * addresses: one for each port, plus an extra. The
1033	 * extra one is used by the SysKonnect driver software
1034	 * as a 'virtual' station address for when both ports
1035	 * are operating in failover mode. Currently we don't
1036	 * use this extra address.
1037	 */
1038	for (i = 0; i < ETHER_ADDR_LEN; i++)
1039		sc_if->arpcom.ac_enaddr[i] =
1040		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1041
1042	printf("sk%d: <XaQti Corp. XMAC II> at skc%d port %d\n",
1043	    sc_if->sk_unit, sc->sk_unit, port);
1044
1045	printf("sk%d: Ethernet address: %6D\n",
1046	    sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":");
1047
1048	/*
1049	 * Set up RAM buffer addresses. The NIC will have a certain
1050	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1051	 * need to divide this up a) between the transmitter and
1052 	 * receiver and b) between the two XMACs, if this is a
1053	 * dual port NIC. Our algotithm is to divide up the memory
1054	 * evenly so that everyone gets a fair share.
1055	 */
1056	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1057		u_int32_t		chunk, val;
1058
1059		chunk = sc->sk_ramsize / 2;
1060		val = sc->sk_rboff / sizeof(u_int64_t);
1061		sc_if->sk_rx_ramstart = val;
1062		val += (chunk / sizeof(u_int64_t));
1063		sc_if->sk_rx_ramend = val - 1;
1064		sc_if->sk_tx_ramstart = val;
1065		val += (chunk / sizeof(u_int64_t));
1066		sc_if->sk_tx_ramend = val - 1;
1067	} else {
1068		u_int32_t		chunk, val;
1069
1070		chunk = sc->sk_ramsize / 4;
1071		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1072		    sizeof(u_int64_t);
1073		sc_if->sk_rx_ramstart = val;
1074		val += (chunk / sizeof(u_int64_t));
1075		sc_if->sk_rx_ramend = val - 1;
1076		sc_if->sk_tx_ramstart = val;
1077		val += (chunk / sizeof(u_int64_t));
1078		sc_if->sk_tx_ramend = val - 1;
1079	}
1080
1081	/* Allocate the descriptor queues. */
1082	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1083	    M_NOWAIT, 0x100000, 0xffffffff, PAGE_SIZE, 0);
1084
1085	if (sc_if->sk_rdata == NULL) {
1086		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1087		free(sc_if, M_DEVBUF);
1088		sc->sk_if[port] = NULL;
1089		return(ENOMEM);
1090	}
1091
1092	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1093
1094	/* Try to allocate memory for jumbo buffers. */
1095	if (sk_alloc_jumbo_mem(sc_if)) {
1096		printf("sk%d: jumbo buffer allocation failed\n",
1097		    sc_if->sk_unit);
1098		free(sc_if->sk_rdata, M_DEVBUF);
1099		free(sc_if, M_DEVBUF);
1100		sc->sk_if[port] = NULL;
1101		return(ENOMEM);
1102	}
1103
1104	ifp = &sc_if->arpcom.ac_if;
1105	ifp->if_softc = sc_if;
1106	ifp->if_unit = sc_if->sk_unit;
1107	ifp->if_name = "sk";
1108	ifp->if_mtu = ETHERMTU;
1109	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1110	ifp->if_ioctl = sk_ioctl;
1111	ifp->if_output = ether_output;
1112	ifp->if_start = sk_start;
1113	ifp->if_watchdog = sk_watchdog;
1114	ifp->if_init = sk_init;
1115	ifp->if_baudrate = 1000000000;
1116	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1117
1118	/*
1119	 * Do ifmedia setup.
1120	 */
1121	ifmedia_init(&sc_if->ifmedia, 0, sk_ifmedia_upd, sk_ifmedia_sts);
1122	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd, 0, NULL);
1123	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd|IFM_FDX, 0, NULL);
1124	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd|IFM_HDX, 0, NULL);
1125	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1126	ifmedia_set(&sc_if->ifmedia, IFM_ETHER|IFM_AUTO);
1127
1128	/*
1129	 * Call MI attach routines.
1130	 */
1131	if_attach(ifp);
1132	ether_ifattach(ifp);
1133
1134#if NBPF > 0
1135	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1136#endif
1137
1138	return(0);
1139}
1140
1141/*
1142 * Attach the interface. Allocate softc structures, do ifmedia
1143 * setup and ethernet/BPF attach.
1144 */
1145static int sk_attach(dev)
1146	device_t		dev;
1147{
1148	int			s;
1149	u_int32_t		command;
1150	struct sk_softc		*sc;
1151	int			unit, error = 0, rid;
1152
1153	s = splimp();
1154
1155	sc = device_get_softc(dev);
1156	unit = device_get_unit(dev);
1157	bzero(sc, sizeof(struct sk_softc));
1158
1159	/*
1160	 * Handle power management nonsense.
1161	 */
1162	command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF;
1163	if (command == 0x01) {
1164
1165		command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4);
1166		if (command & SK_PSTATE_MASK) {
1167			u_int32_t		iobase, membase, irq;
1168
1169			/* Save important PCI config data. */
1170			iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1171			membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1172			irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1173
1174			/* Reset the power state. */
1175			printf("skc%d: chip is in D%d power mode "
1176			"-- setting to D0\n", unit, command & SK_PSTATE_MASK);
1177			command &= 0xFFFFFFFC;
1178			pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4);
1179
1180			/* Restore PCI config data. */
1181			pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1182			pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1183			pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1184		}
1185	}
1186
1187	/*
1188	 * Map control/status registers.
1189	 */
1190	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
1191	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1192	pci_write_config(dev, PCI_COMMAND_STATUS_REG, command, 4);
1193	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
1194
1195#ifdef SK_USEIOSPACE
1196	if (!(command & PCIM_CMD_PORTEN)) {
1197		printf("skc%d: failed to enable I/O ports!\n", unit);
1198		error = ENXIO;
1199		goto fail;
1200	}
1201
1202	rid = SK_PCI_LOIO;
1203	sc->sk_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
1204	    0, ~0, 1, RF_ACTIVE);
1205#else
1206	if (!(command & PCIM_CMD_MEMEN)) {
1207		printf("skc%d: failed to enable memory mapping!\n", unit);
1208		error = ENXIO;
1209		goto fail;
1210	}
1211
1212	rid = SK_PCI_LOMEM;
1213	sc->sk_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1214	    0, ~0, 1, RF_ACTIVE);
1215#endif
1216
1217	if (sc->sk_res == NULL) {
1218		printf("sk%d: couldn't map ports/memory\n", unit);
1219		error = ENXIO;
1220		goto fail;
1221	}
1222
1223	sc->sk_btag = rman_get_bustag(sc->sk_res);
1224	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1225
1226	/* Allocate interrupt */
1227	rid = 0;
1228	sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1229	    RF_SHAREABLE | RF_ACTIVE);
1230
1231	if (sc->sk_irq == NULL) {
1232		printf("skc%d: couldn't map interrupt\n", unit);
1233		error = ENXIO;
1234		goto fail;
1235	}
1236
1237	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1238	    sk_intr, sc, &sc->sk_intrhand);
1239
1240	if (error) {
1241		printf("skc%d: couldn't set up irq\n", unit);
1242		goto fail;
1243	}
1244
1245	/* Reset the adapter. */
1246	sk_reset(sc);
1247
1248	sc->sk_unit = unit;
1249
1250	/* Read and save vital product data from EEPROM. */
1251	sk_vpd_read(sc);
1252
1253	/* Read and save RAM size and RAMbuffer offset */
1254	switch(sk_win_read_1(sc, SK_EPROM0)) {
1255	case SK_RAMSIZE_512K_64:
1256		sc->sk_ramsize = 0x80000;
1257		sc->sk_rboff = SK_RBOFF_0;
1258		break;
1259	case SK_RAMSIZE_1024K_64:
1260		sc->sk_ramsize = 0x100000;
1261		sc->sk_rboff = SK_RBOFF_80000;
1262		break;
1263	case SK_RAMSIZE_1024K_128:
1264		sc->sk_ramsize = 0x100000;
1265		sc->sk_rboff = SK_RBOFF_0;
1266		break;
1267	case SK_RAMSIZE_2048K_128:
1268		sc->sk_ramsize = 0x200000;
1269		sc->sk_rboff = SK_RBOFF_0;
1270		break;
1271	default:
1272		printf("skc%d: unknown ram size: %d\n",
1273		    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1274		error = ENXIO;
1275		goto fail;
1276		break;
1277	}
1278
1279	/* Read and save physical media type */
1280	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1281	case SK_PMD_1000BASESX:
1282		sc->sk_pmd = IFM_1000_SX;
1283		break;
1284	case SK_PMD_1000BASELX:
1285		sc->sk_pmd = IFM_1000_LX;
1286		break;
1287	case SK_PMD_1000BASECX:
1288		sc->sk_pmd = IFM_1000_CX;
1289		break;
1290	case SK_PMD_1000BASETX:
1291		sc->sk_pmd = IFM_1000_TX;
1292		break;
1293	default:
1294		printf("skc%d: unknown media type: 0x%x\n",
1295		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1296		error = ENXIO;
1297		goto fail;
1298	}
1299
1300	/* Announce the product name. */
1301	printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1302
1303	sk_attach_xmac(sc, SK_PORT_A);
1304	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC))
1305		sk_attach_xmac(sc, SK_PORT_B);
1306
1307	/* Turn on the 'driver is loaded' LED. */
1308	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1309
1310fail:
1311	splx(s);
1312	return(error);
1313}
1314
1315static int sk_detach(dev)
1316	device_t		dev;
1317{
1318	struct sk_softc		*sc;
1319	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1320	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1321	int			s;
1322
1323	s = splimp();
1324
1325	sc = device_get_softc(dev);
1326	sc_if0 = sc->sk_if[SK_PORT_A];
1327	ifp0 = &sc_if0->arpcom.ac_if;
1328	sk_stop(sc_if0);
1329	if_detach(ifp0);
1330	free(sc_if0->sk_cdata.sk_jumbo_buf, M_DEVBUF);
1331	ifmedia_removeall(&sc_if0->ifmedia);
1332	if (sc->sk_if[SK_PORT_B] != NULL) {
1333		sc_if1 = sc->sk_if[SK_PORT_B];
1334		ifp1 = &sc_if1->arpcom.ac_if;
1335		sk_stop(sc_if1);
1336		if_detach(ifp1);
1337		free(sc_if1->sk_cdata.sk_jumbo_buf, M_DEVBUF);
1338		ifmedia_removeall(&sc_if1->ifmedia);
1339	}
1340
1341	bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1342	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1343#ifdef SK_USEIOSPACE
1344	bus_release_resource(dev, SYS_RES_IOPORT, SK_PCI_LOIO, sc->sk_res);
1345#else
1346	bus_release_resource(dev, SYS_RES_IOPORT, SK_PCI_LOMEM, sc->sk_res);
1347#endif
1348
1349	splx(s);
1350
1351	return(0);
1352}
1353
1354static int sk_encap(sc_if, m_head, txidx)
1355        struct sk_if_softc	*sc_if;
1356        struct mbuf		*m_head;
1357        u_int32_t		*txidx;
1358{
1359	struct sk_tx_desc	*f = NULL;
1360	struct mbuf		*m;
1361	u_int32_t		frag, cur, cnt = 0;
1362
1363	m = m_head;
1364	cur = frag = *txidx;
1365
1366	/*
1367	 * Start packing the mbufs in this chain into
1368	 * the fragment pointers. Stop when we run out
1369	 * of fragments or hit the end of the mbuf chain.
1370	 */
1371	for (m = m_head; m != NULL; m = m->m_next) {
1372		if (m->m_len != 0) {
1373			if ((SK_TX_RING_CNT -
1374			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1375				return(ENOBUFS);
1376			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1377			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1378			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1379			if (cnt == 0)
1380				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1381			else
1382				f->sk_ctl |= SK_TXCTL_OWN;
1383			cur = frag;
1384			SK_INC(frag, SK_TX_RING_CNT);
1385			cnt++;
1386		}
1387	}
1388
1389	if (m != NULL)
1390		return(ENOBUFS);
1391
1392	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1393		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1394	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1395	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1396	sc_if->sk_cdata.sk_tx_cnt += cnt;
1397
1398	*txidx = frag;
1399
1400	return(0);
1401}
1402
1403static void sk_start(ifp)
1404	struct ifnet		*ifp;
1405{
1406        struct sk_softc		*sc;
1407        struct sk_if_softc	*sc_if;
1408        struct mbuf		*m_head = NULL;
1409        u_int32_t		idx;
1410
1411	sc_if = ifp->if_softc;
1412	sc = sc_if->sk_softc;
1413
1414	idx = sc_if->sk_cdata.sk_tx_prod;
1415
1416	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1417		IF_DEQUEUE(&ifp->if_snd, m_head);
1418		if (m_head == NULL)
1419			break;
1420
1421		/*
1422		 * Pack the data into the transmit ring. If we
1423		 * don't have room, set the OACTIVE flag and wait
1424		 * for the NIC to drain the ring.
1425		 */
1426		if (sk_encap(sc_if, m_head, &idx)) {
1427			IF_PREPEND(&ifp->if_snd, m_head);
1428			ifp->if_flags |= IFF_OACTIVE;
1429			break;
1430		}
1431
1432		/*
1433		 * If there's a BPF listener, bounce a copy of this frame
1434		 * to him.
1435		 */
1436#if NBPF > 0
1437		if (ifp->if_bpf)
1438			bpf_mtap(ifp, m_head);
1439#endif
1440	}
1441
1442	/* Transmit */
1443	sc_if->sk_cdata.sk_tx_prod = idx;
1444	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1445
1446	/* Set a timeout in case the chip goes out to lunch. */
1447	ifp->if_timer = 5;
1448
1449	return;
1450}
1451
1452
1453static void sk_watchdog(ifp)
1454	struct ifnet		*ifp;
1455{
1456	struct sk_if_softc	*sc_if;
1457
1458	sc_if = ifp->if_softc;
1459
1460	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1461	sk_init(sc_if);
1462
1463	return;
1464}
1465
1466static void sk_shutdown(dev)
1467	device_t		dev;
1468{
1469	struct sk_softc		*sc;
1470
1471	sc = device_get_softc(dev);
1472
1473	/* Turn off the 'driver is loaded' LED. */
1474	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1475
1476	/*
1477	 * Reset the GEnesis controller. Doing this should also
1478	 * assert the resets on the attached XMAC(s).
1479	 */
1480	sk_reset(sc);
1481
1482	return;
1483}
1484
1485static void sk_rxeof(sc_if)
1486	struct sk_if_softc	*sc_if;
1487{
1488	struct ether_header	*eh;
1489	struct mbuf		*m;
1490	struct ifnet		*ifp;
1491	struct sk_chain		*cur_rx;
1492	int			total_len = 0;
1493	int			i;
1494	u_int32_t		rxstat;
1495
1496	ifp = &sc_if->arpcom.ac_if;
1497	i = sc_if->sk_cdata.sk_rx_prod;
1498	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1499
1500	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1501
1502		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1503		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1504		m = cur_rx->sk_mbuf;
1505		cur_rx->sk_mbuf = NULL;
1506		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1507		SK_INC(i, SK_RX_RING_CNT);
1508
1509		if (rxstat & XM_RXSTAT_ERRFRAME) {
1510			ifp->if_ierrors++;
1511			sk_newbuf(sc_if, cur_rx, m);
1512			continue;
1513		}
1514
1515		/*
1516		 * Try to allocate a new jumbo buffer. If that
1517		 * fails, copy the packet to mbufs and put the
1518		 * jumbo buffer back in the ring so it can be
1519		 * re-used. If allocating mbufs fails, then we
1520		 * have to drop the packet.
1521		 */
1522		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1523			struct mbuf		*m0;
1524			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1525			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1526			sk_newbuf(sc_if, cur_rx, m);
1527			if (m0 == NULL) {
1528				printf("sk%d: no receive buffers "
1529				    "available -- packet dropped!\n",
1530				    sc_if->sk_unit);
1531				ifp->if_ierrors++;
1532				continue;
1533			}
1534			m_adj(m0, ETHER_ALIGN);
1535			m = m0;
1536		} else {
1537			m->m_pkthdr.rcvif = ifp;
1538			m->m_pkthdr.len = m->m_len = total_len;
1539		}
1540
1541		ifp->if_ipackets++;
1542		eh = mtod(m, struct ether_header *);
1543
1544#if NBPF > 0
1545		if (ifp->if_bpf) {
1546			bpf_mtap(ifp, m);
1547			if (ifp->if_flags & IFF_PROMISC &&
1548			    (bcmp(eh->ether_dhost, sc_if->arpcom.ac_enaddr,
1549			    ETHER_ADDR_LEN) && !(eh->ether_dhost[0] & 1))) {
1550				m_freem(m);
1551				continue;
1552			}
1553		}
1554#endif
1555		/* Remove header from mbuf and pass it on. */
1556		m_adj(m, sizeof(struct ether_header));
1557		ether_input(ifp, eh, m);
1558	}
1559
1560	sc_if->sk_cdata.sk_rx_prod = i;
1561
1562	return;
1563}
1564
1565static void sk_txeof(sc_if)
1566	struct sk_if_softc	*sc_if;
1567{
1568	struct sk_tx_desc	*cur_tx = NULL;
1569	struct ifnet		*ifp;
1570	u_int32_t		idx;
1571
1572	ifp = &sc_if->arpcom.ac_if;
1573
1574	/*
1575	 * Go through our tx ring and free mbufs for those
1576	 * frames that have been sent.
1577	 */
1578	idx = sc_if->sk_cdata.sk_tx_cons;
1579	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1580		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1581		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1582			break;
1583		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1584			ifp->if_opackets++;
1585		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1586			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1587			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1588		}
1589		sc_if->sk_cdata.sk_tx_cnt--;
1590		SK_INC(idx, SK_TX_RING_CNT);
1591		ifp->if_timer = 0;
1592	}
1593
1594	sc_if->sk_cdata.sk_tx_cons = idx;
1595
1596	if (cur_tx != NULL)
1597		ifp->if_flags &= ~IFF_OACTIVE;
1598
1599	return;
1600}
1601
1602static void sk_intr_xmac(sc_if)
1603	struct sk_if_softc	*sc_if;
1604{
1605	struct sk_softc		*sc;
1606	u_int16_t		status;
1607	u_int16_t		bmsr;
1608
1609	sc = sc_if->sk_softc;
1610	status = SK_XM_READ_2(sc_if, XM_ISR);
1611
1612	if (status & XM_ISR_LINKEVENT) {
1613		SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_LINKEVENT);
1614		if (sc_if->sk_link == 1) {
1615			printf("sk%d: gigabit link down\n", sc_if->sk_unit);
1616			sc_if->sk_link = 0;
1617		}
1618	}
1619
1620	if (status & XM_ISR_AUTONEG_DONE) {
1621		bmsr = sk_phy_readreg(sc_if, XM_PHY_BMSR);
1622		if (bmsr & XM_BMSR_LINKSTAT) {
1623			sc_if->sk_link = 1;
1624			SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_LINKEVENT);
1625			printf("sk%d: gigabit link up\n", sc_if->sk_unit);
1626		}
1627	}
1628
1629	if (status & XM_IMR_TX_UNDERRUN)
1630		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1631
1632	if (status & XM_IMR_RX_OVERRUN)
1633		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1634
1635	return;
1636}
1637
1638static void sk_intr(xsc)
1639	void			*xsc;
1640{
1641	struct sk_softc		*sc = xsc;
1642	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1643	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1644	u_int32_t		status;
1645
1646	sc_if0 = sc->sk_if[SK_PORT_A];
1647	sc_if1 = sc->sk_if[SK_PORT_B];
1648
1649	if (sc_if0 != NULL)
1650		ifp0 = &sc_if0->arpcom.ac_if;
1651	if (sc_if1 != NULL)
1652		ifp1 = &sc_if0->arpcom.ac_if;
1653
1654	for (;;) {
1655		status = CSR_READ_4(sc, SK_ISSR);
1656		if (!(status & sc->sk_intrmask))
1657			break;
1658
1659		/* Handle receive interrupts first. */
1660		if (status & SK_ISR_RX1_EOF) {
1661			sk_rxeof(sc_if0);
1662			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1663			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1664		}
1665		if (status & SK_ISR_RX2_EOF) {
1666			sk_rxeof(sc_if1);
1667			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1668			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1669		}
1670
1671		/* Then transmit interrupts. */
1672		if (status & SK_ISR_TX1_S_EOF) {
1673			sk_txeof(sc_if0);
1674			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1675			    SK_TXBMU_CLR_IRQ_EOF);
1676		}
1677		if (status & SK_ISR_TX2_S_EOF) {
1678			sk_txeof(sc_if1);
1679			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1680			    SK_TXBMU_CLR_IRQ_EOF);
1681		}
1682
1683		/* Then MAC interrupts. */
1684		if (status & SK_ISR_MAC1)
1685			sk_intr_xmac(sc_if0);
1686
1687		if (status & SK_ISR_MAC2)
1688			sk_intr_xmac(sc_if1);
1689	}
1690
1691	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1692
1693	return;
1694}
1695
1696static void sk_init_xmac(sc_if)
1697	struct sk_if_softc	*sc_if;
1698{
1699	struct sk_softc		*sc;
1700	struct ifnet		*ifp;
1701
1702	sc = sc_if->sk_softc;
1703	ifp = &sc_if->arpcom.ac_if;
1704
1705	/* Unreset the XMAC. */
1706	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1707	DELAY(1000);
1708
1709	/* Save the XMAC II revision */
1710	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1711
1712	/* Set station address */
1713	SK_XM_WRITE_2(sc_if, XM_PAR0,
1714	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1715	SK_XM_WRITE_2(sc_if, XM_PAR1,
1716	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1717	SK_XM_WRITE_2(sc_if, XM_PAR2,
1718	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1719	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1720
1721	if (ifp->if_flags & IFF_PROMISC) {
1722		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1723	} else {
1724		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1725	}
1726
1727	if (ifp->if_flags & IFF_BROADCAST) {
1728		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1729	} else {
1730		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1731	}
1732
1733	/* We don't need the FCS appended to the packet. */
1734	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1735
1736	/* We want short frames padded to 60 bytes. */
1737	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1738
1739	/*
1740	 * Enable the reception of all error frames. This is is
1741	 * a necessary evil due to the design of the XMAC. The
1742	 * XMAC's receive FIFO is only 8K in size, however jumbo
1743	 * frames can be up to 9000 bytes in length. When bad
1744	 * frame filtering is enabled, the XMAC's RX FIFO operates
1745	 * in 'store and forward' mode. For this to work, the
1746	 * entire frame has to fit into the FIFO, but that means
1747	 * that jumbo frames larger than 8192 bytes will be
1748	 * truncated. Disabling all bad frame filtering causes
1749	 * the RX FIFO to operate in streaming mode, in which
1750	 * case the XMAC will start transfering frames out of the
1751	 * RX FIFO as soon as the FIFO threshold is reached.
1752	 */
1753	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
1754	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
1755	    XM_MODE_RX_INRANGELEN);
1756
1757	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
1758		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1759	else
1760		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1761
1762	/*
1763	 * Bump up the transmit threshold. This helps hold off transmit
1764	 * underruns when we're blasting traffic from both ports at once.
1765	 */
1766	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
1767
1768	/* Set multicast filter */
1769	sk_setmulti(sc_if);
1770
1771	/* Clear and enable interrupts */
1772	SK_XM_READ_2(sc_if, XM_ISR);
1773	SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
1774
1775	sc_if->sk_link = 0;
1776
1777	/* Configure MAC arbiter */
1778	switch(sc_if->sk_xmac_rev) {
1779	case XM_XMAC_REV_B2:
1780		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
1781		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
1782		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
1783		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
1784		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
1785		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
1786		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
1787		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
1788		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1789		break;
1790	case XM_XMAC_REV_C1:
1791		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
1792		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
1793		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
1794		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
1795		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
1796		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
1797		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
1798		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
1799		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1800		break;
1801	default:
1802		break;
1803	}
1804	sk_win_write_2(sc, SK_MACARB_CTL,
1805	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
1806
1807	return;
1808}
1809
1810/*
1811 * Note that to properly initialize any part of the GEnesis chip,
1812 * you first have to take it out of reset mode.
1813 */
1814static void sk_init(xsc)
1815	void			*xsc;
1816{
1817	struct sk_if_softc	*sc_if = xsc;
1818	struct sk_softc		*sc;
1819	struct ifnet		*ifp;
1820	int			s;
1821
1822	s = splimp();
1823
1824	ifp = &sc_if->arpcom.ac_if;
1825	sc = sc_if->sk_softc;
1826
1827	/* Cancel pending I/O and free all RX/TX buffers. */
1828	sk_stop(sc_if);
1829
1830	/* Configure LINK_SYNC LED */
1831	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
1832	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
1833
1834	/* Configure RX LED */
1835	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
1836
1837	/* Configure TX LED */
1838	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
1839
1840	/* Configure I2C registers */
1841
1842	/* Configure XMAC(s) */
1843	sk_init_xmac(sc_if);
1844
1845	/* Configure MAC FIFOs */
1846	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
1847	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
1848	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
1849
1850	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
1851	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
1852	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
1853
1854	/* Configure transmit arbiter(s) */
1855	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
1856	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
1857
1858	/* Configure RAMbuffers */
1859	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
1860	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
1861	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
1862	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
1863	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
1864	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
1865
1866	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
1867	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
1868	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
1869	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
1870	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
1871	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
1872	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
1873
1874	/* Configure BMUs */
1875	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
1876	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
1877	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
1878	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
1879
1880	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
1881	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
1882	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
1883	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
1884
1885	/* Init descriptors */
1886	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
1887		printf("sk%d: initialization failed: no "
1888		    "memory for rx buffers\n", sc_if->sk_unit);
1889		sk_stop(sc_if);
1890		(void)splx(s);
1891		return;
1892	}
1893	sk_init_tx_ring(sc_if);
1894
1895	/* Configure interrupt handling */
1896	CSR_READ_4(sc, SK_ISSR);
1897	if (sc_if->sk_port == SK_PORT_A)
1898		sc->sk_intrmask |= SK_INTRS1;
1899	else
1900		sc->sk_intrmask |= SK_INTRS2;
1901	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1902
1903	/* Start BMUs. */
1904	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
1905
1906	/* Enable XMACs TX and RX state machines */
1907	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1908
1909	ifp->if_flags |= IFF_RUNNING;
1910	ifp->if_flags &= ~IFF_OACTIVE;
1911
1912	splx(s);
1913
1914	return;
1915}
1916
1917static void sk_stop(sc_if)
1918	struct sk_if_softc	*sc_if;
1919{
1920	int			i;
1921	struct sk_softc		*sc;
1922
1923	sc = sc_if->sk_softc;
1924
1925	/* Turn off various components of this interface. */
1926	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
1927	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
1928	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
1929	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1930	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
1931	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1932	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
1933	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1934	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1935	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
1936	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
1937
1938	/* Disable interrupts */
1939	if (sc_if->sk_port == SK_PORT_A)
1940		sc->sk_intrmask &= ~SK_INTRS1;
1941	else
1942		sc->sk_intrmask &= ~SK_INTRS2;
1943	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1944
1945	/* Free RX and TX mbufs still in the queues. */
1946	for (i = 0; i < SK_RX_RING_CNT; i++) {
1947		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
1948			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
1949			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
1950		}
1951	}
1952
1953	for (i = 0; i < SK_TX_RING_CNT; i++) {
1954		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
1955			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
1956			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
1957		}
1958	}
1959
1960	return;
1961}
1962