if_sk.c revision 51455
1/*
2 * Copyright (c) 1997, 1998, 1999
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: head/sys/dev/sk/if_sk.c 51455 1999-09-20 08:47:11Z wpaul $
33 */
34
35/*
36 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
37 * the SK-984x series adapters, both single port and dual port.
38 * References:
39 * 	The XaQti XMAC II datasheet, http://www.xaqti.com
40 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
41 *
42 * Written by Bill Paul <wpaul@ee.columbia.edu>
43 * Department of Electrical Engineering
44 * Columbia University, New York City
45 */
46
47/*
48 * The SysKonnect gigabit ethernet adapters consist of two main
49 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
50 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
51 * components and a PHY while the GEnesis controller provides a PCI
52 * interface with DMA support. Each card may have between 512K and
53 * 2MB of SRAM on board depending on the configuration.
54 *
55 * The SysKonnect GEnesis controller can have either one or two XMAC
56 * chips connected to it, allowing single or dual port NIC configurations.
57 * SysKonnect has the distinction of being the only vendor on the market
58 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
59 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
60 * XMAC registers. This driver takes advantage of these features to allow
61 * both XMACs to operate as independent interfaces.
62 */
63
64#include "bpf.h"
65
66#include <sys/param.h>
67#include <sys/systm.h>
68#include <sys/sockio.h>
69#include <sys/mbuf.h>
70#include <sys/malloc.h>
71#include <sys/kernel.h>
72#include <sys/socket.h>
73#include <sys/queue.h>
74
75#include <net/if.h>
76#include <net/if_arp.h>
77#include <net/ethernet.h>
78#include <net/if_dl.h>
79#include <net/if_media.h>
80
81#if NBPF > 0
82#include <net/bpf.h>
83#endif
84
85#include <vm/vm.h>              /* for vtophys */
86#include <vm/pmap.h>            /* for vtophys */
87#include <machine/clock.h>      /* for DELAY */
88#include <machine/bus_pio.h>
89#include <machine/bus_memio.h>
90#include <machine/bus.h>
91#include <machine/resource.h>
92#include <sys/bus.h>
93#include <sys/rman.h>
94
95#include <pci/pcireg.h>
96#include <pci/pcivar.h>
97
98#define SK_USEIOSPACE
99
100#include <pci/if_skreg.h>
101#include <pci/xmaciireg.h>
102
103#ifndef lint
104static const char rcsid[] =
105  "$FreeBSD: head/sys/dev/sk/if_sk.c 51455 1999-09-20 08:47:11Z wpaul $";
106#endif
107
108static struct sk_type sk_devs[] = {
109	{ SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" },
110	{ 0, 0, NULL }
111};
112
113static int sk_probe		__P((device_t));
114static int sk_attach		__P((device_t));
115static int sk_detach		__P((device_t));
116static int sk_attach_xmac	__P((struct sk_softc *, int));
117static void sk_intr		__P((void *));
118static void sk_intr_xmac	__P((struct sk_if_softc *));
119static void sk_rxeof		__P((struct sk_if_softc *));
120static void sk_txeof		__P((struct sk_if_softc *));
121static int sk_encap		__P((struct sk_if_softc *, struct mbuf *,
122					u_int32_t *));
123static void sk_start		__P((struct ifnet *));
124static int sk_ioctl		__P((struct ifnet *, u_long, caddr_t));
125static void sk_init		__P((void *));
126static void sk_init_xmac	__P((struct sk_if_softc *));
127static void sk_stop		__P((struct sk_if_softc *));
128static void sk_watchdog		__P((struct ifnet *));
129static void sk_shutdown		__P((device_t));
130static int sk_ifmedia_upd	__P((struct ifnet *));
131static void sk_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
132static void sk_reset		__P((struct sk_softc *));
133static int sk_newbuf		__P((struct sk_if_softc *,
134					struct sk_chain *, struct mbuf *));
135static int sk_alloc_jumbo_mem	__P((struct sk_if_softc *));
136static void *sk_jalloc		__P((struct sk_if_softc *));
137static void sk_jfree		__P((caddr_t, u_int));
138static void sk_jref		__P((caddr_t, u_int));
139static int sk_init_rx_ring	__P((struct sk_if_softc *));
140static void sk_init_tx_ring	__P((struct sk_if_softc *));
141#ifdef notdef
142static u_int32_t sk_win_read_4	__P((struct sk_softc *, int));
143#endif
144static u_int16_t sk_win_read_2	__P((struct sk_softc *, int));
145static u_int8_t sk_win_read_1	__P((struct sk_softc *, int));
146static void sk_win_write_4	__P((struct sk_softc *, int, u_int32_t));
147static void sk_win_write_2	__P((struct sk_softc *, int, u_int32_t));
148static void sk_win_write_1	__P((struct sk_softc *, int, u_int32_t));
149static u_int8_t sk_vpd_readbyte	__P((struct sk_softc *, int));
150static void sk_vpd_read_res	__P((struct sk_softc *,
151					struct vpd_res *, int));
152static void sk_vpd_read		__P((struct sk_softc *));
153static u_int16_t sk_phy_readreg	__P((struct sk_if_softc *, int));
154static void sk_phy_writereg	__P((struct sk_if_softc *, int, u_int32_t));
155static u_int32_t sk_calchash	__P((caddr_t));
156static void sk_setfilt		__P((struct sk_if_softc *, caddr_t, int));
157static void sk_setmulti		__P((struct sk_if_softc *));
158
159#ifdef SK_USEIOSPACE
160#define SK_RES		SYS_RES_IOPORT
161#define SK_RID		SK_PCI_LOIO
162#else
163#define SK_RES		SYS_RES_MEMORY
164#define SK_RID		SK_PCI_LOMEM
165#endif
166
167static device_method_t sk_methods[] = {
168	/* Device interface */
169	DEVMETHOD(device_probe,		sk_probe),
170	DEVMETHOD(device_attach,	sk_attach),
171	DEVMETHOD(device_detach,	sk_detach),
172	DEVMETHOD(device_shutdown,	sk_shutdown),
173	{ 0, 0 }
174};
175
176static driver_t sk_driver = {
177	"skc",
178	sk_methods,
179	sizeof(struct sk_softc)
180};
181
182static devclass_t sk_devclass;
183
184DRIVER_MODULE(if_skc, pci, sk_driver, sk_devclass, 0, 0);
185
186#define SK_SETBIT(sc, reg, x)		\
187	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
188
189#define SK_CLRBIT(sc, reg, x)		\
190	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
191
192#define SK_WIN_SETBIT_4(sc, reg, x)	\
193	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
194
195#define SK_WIN_CLRBIT_4(sc, reg, x)	\
196	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
197
198#define SK_WIN_SETBIT_2(sc, reg, x)	\
199	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
200
201#define SK_WIN_CLRBIT_2(sc, reg, x)	\
202	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
203
204#ifdef notdef
205static u_int32_t sk_win_read_4(sc, reg)
206	struct sk_softc		*sc;
207	int			reg;
208{
209	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
210	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
211}
212#endif
213
214static u_int16_t sk_win_read_2(sc, reg)
215	struct sk_softc		*sc;
216	int			reg;
217{
218	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
219	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
220}
221
222static u_int8_t sk_win_read_1(sc, reg)
223	struct sk_softc		*sc;
224	int			reg;
225{
226	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
227	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
228}
229
230static void sk_win_write_4(sc, reg, val)
231	struct sk_softc		*sc;
232	int			reg;
233	u_int32_t		val;
234{
235	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
236	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
237	return;
238}
239
240static void sk_win_write_2(sc, reg, val)
241	struct sk_softc		*sc;
242	int			reg;
243	u_int32_t		val;
244{
245	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
246	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
247	return;
248}
249
250static void sk_win_write_1(sc, reg, val)
251	struct sk_softc		*sc;
252	int			reg;
253	u_int32_t		val;
254{
255	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
256	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
257	return;
258}
259
260/*
261 * The VPD EEPROM contains Vital Product Data, as suggested in
262 * the PCI 2.1 specification. The VPD data is separared into areas
263 * denoted by resource IDs. The SysKonnect VPD contains an ID string
264 * resource (the name of the adapter), a read-only area resource
265 * containing various key/data fields and a read/write area which
266 * can be used to store asset management information or log messages.
267 * We read the ID string and read-only into buffers attached to
268 * the controller softc structure for later use. At the moment,
269 * we only use the ID string during sk_attach().
270 */
271static u_int8_t sk_vpd_readbyte(sc, addr)
272	struct sk_softc		*sc;
273	int			addr;
274{
275	int			i;
276
277	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
278	for (i = 0; i < SK_TIMEOUT; i++) {
279		DELAY(1);
280		if (sk_win_read_2(sc,
281		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
282			break;
283	}
284
285	if (i == SK_TIMEOUT)
286		return(0);
287
288	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
289}
290
291static void sk_vpd_read_res(sc, res, addr)
292	struct sk_softc		*sc;
293	struct vpd_res		*res;
294	int			addr;
295{
296	int			i;
297	u_int8_t		*ptr;
298
299	ptr = (u_int8_t *)res;
300	for (i = 0; i < sizeof(struct vpd_res); i++)
301		ptr[i] = sk_vpd_readbyte(sc, i + addr);
302
303	return;
304}
305
306static void sk_vpd_read(sc)
307	struct sk_softc		*sc;
308{
309	int			pos = 0, i;
310	struct vpd_res		res;
311
312	if (sc->sk_vpd_prodname != NULL)
313		free(sc->sk_vpd_prodname, M_DEVBUF);
314	if (sc->sk_vpd_readonly != NULL)
315		free(sc->sk_vpd_readonly, M_DEVBUF);
316	sc->sk_vpd_prodname = NULL;
317	sc->sk_vpd_readonly = NULL;
318
319	sk_vpd_read_res(sc, &res, pos);
320
321	if (res.vr_id != VPD_RES_ID) {
322		printf("skc%d: bad VPD resource id: expected %x got %x\n",
323		    sc->sk_unit, VPD_RES_ID, res.vr_id);
324		return;
325	}
326
327	pos += sizeof(res);
328	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
329	for (i = 0; i < res.vr_len; i++)
330		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
331	sc->sk_vpd_prodname[i] = '\0';
332	pos += i;
333
334	sk_vpd_read_res(sc, &res, pos);
335
336	if (res.vr_id != VPD_RES_READ) {
337		printf("skc%d: bad VPD resource id: expected %x got %x\n",
338		    sc->sk_unit, VPD_RES_READ, res.vr_id);
339		return;
340	}
341
342	pos += sizeof(res);
343	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
344	for (i = 0; i < res.vr_len + 1; i++)
345		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
346
347	return;
348}
349
350static u_int16_t sk_phy_readreg(sc_if, reg)
351	struct sk_if_softc	*sc_if;
352	int			reg;
353{
354	int			i;
355
356	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg);
357	for (i = 0; i < SK_TIMEOUT; i++) {
358		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
359			break;
360	}
361
362	if (i == SK_TIMEOUT) {
363		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
364		return(0);
365	}
366
367	return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
368}
369
370static void sk_phy_writereg(sc_if, reg, val)
371	struct sk_if_softc	*sc_if;
372	int			reg;
373	u_int32_t		val;
374{
375	int			i;
376
377	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg);
378	for (i = 0; i < SK_TIMEOUT; i++) {
379		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
380			break;
381	}
382
383	if (i == SK_TIMEOUT) {
384		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
385		return;
386	}
387
388	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
389	for (i = 0; i < SK_TIMEOUT; i++) {
390		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
391			break;
392	}
393
394	if (i == SK_TIMEOUT)
395		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
396
397	return;
398}
399
400#define SK_POLY		0xEDB88320
401#define SK_BITS		6
402
403static u_int32_t sk_calchash(addr)
404	caddr_t			addr;
405{
406	u_int32_t		idx, bit, data, crc;
407
408	/* Compute CRC for the address value. */
409	crc = 0xFFFFFFFF; /* initial value */
410
411	for (idx = 0; idx < 6; idx++) {
412		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
413			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
414	}
415
416	return (~crc & ((1 << SK_BITS) - 1));
417}
418
419static void sk_setfilt(sc_if, addr, slot)
420	struct sk_if_softc	*sc_if;
421	caddr_t			addr;
422	int			slot;
423{
424	int			base;
425
426	base = XM_RXFILT_ENTRY(slot);
427
428	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
429	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
430	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
431
432	return;
433}
434
435static void sk_setmulti(sc_if)
436	struct sk_if_softc	*sc_if;
437{
438	struct ifnet		*ifp;
439	u_int32_t		hashes[2] = { 0, 0 };
440	int			h, i;
441	struct ifmultiaddr	*ifma;
442	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
443
444	ifp = &sc_if->arpcom.ac_if;
445
446	/* First, zot all the existing filters. */
447	for (i = 1; i < XM_RXFILT_MAX; i++)
448		sk_setfilt(sc_if, (caddr_t)&dummy, i);
449	SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
450	SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
451
452	/* Now program new ones. */
453	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
454		hashes[0] = 0xFFFFFFFF;
455		hashes[1] = 0xFFFFFFFF;
456	} else {
457		i = 1;
458		/* First find the tail of the list. */
459		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
460					ifma = ifma->ifma_link.le_next) {
461			if (ifma->ifma_link.le_next == NULL)
462				break;
463		}
464		/* Now traverse the list backwards. */
465		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
466			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
467			if (ifma->ifma_addr->sa_family != AF_LINK)
468				continue;
469			/*
470			 * Program the first XM_RXFILT_MAX multicast groups
471			 * into the perfect filter. For all others,
472			 * use the hash table.
473			 */
474			if (i < XM_RXFILT_MAX) {
475				sk_setfilt(sc_if,
476			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
477				i++;
478				continue;
479			}
480
481			h = sk_calchash(
482				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
483			if (h < 32)
484				hashes[0] |= (1 << h);
485			else
486				hashes[1] |= (1 << (h - 32));
487		}
488	}
489
490	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
491	    XM_MODE_RX_USE_PERFECT);
492	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
493	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
494
495	return;
496}
497
498static int sk_init_rx_ring(sc_if)
499	struct sk_if_softc	*sc_if;
500{
501	struct sk_chain_data	*cd;
502	struct sk_ring_data	*rd;
503	int			i;
504
505	cd = &sc_if->sk_cdata;
506	rd = sc_if->sk_rdata;
507
508	bzero((char *)rd->sk_rx_ring,
509	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
510
511	for (i = 0; i < SK_RX_RING_CNT; i++) {
512		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
513		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
514			return(ENOBUFS);
515		if (i == (SK_RX_RING_CNT - 1)) {
516			cd->sk_rx_chain[i].sk_next =
517			    &cd->sk_rx_chain[0];
518			rd->sk_rx_ring[i].sk_next =
519			    vtophys(&rd->sk_rx_ring[0]);
520		} else {
521			cd->sk_rx_chain[i].sk_next =
522			    &cd->sk_rx_chain[i + 1];
523			rd->sk_rx_ring[i].sk_next =
524			    vtophys(&rd->sk_rx_ring[i + 1]);
525		}
526	}
527
528	sc_if->sk_cdata.sk_rx_prod = 0;
529	sc_if->sk_cdata.sk_rx_cons = 0;
530
531	return(0);
532}
533
534static void sk_init_tx_ring(sc_if)
535	struct sk_if_softc	*sc_if;
536{
537	struct sk_chain_data	*cd;
538	struct sk_ring_data	*rd;
539	int			i;
540
541	cd = &sc_if->sk_cdata;
542	rd = sc_if->sk_rdata;
543
544	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
545	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
546
547	for (i = 0; i < SK_TX_RING_CNT; i++) {
548		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
549		if (i == (SK_TX_RING_CNT - 1)) {
550			cd->sk_tx_chain[i].sk_next =
551			    &cd->sk_tx_chain[0];
552			rd->sk_tx_ring[i].sk_next =
553			    vtophys(&rd->sk_tx_ring[0]);
554		} else {
555			cd->sk_tx_chain[i].sk_next =
556			    &cd->sk_tx_chain[i + 1];
557			rd->sk_tx_ring[i].sk_next =
558			    vtophys(&rd->sk_tx_ring[i + 1]);
559		}
560	}
561
562	sc_if->sk_cdata.sk_tx_prod = 0;
563	sc_if->sk_cdata.sk_tx_cons = 0;
564	sc_if->sk_cdata.sk_tx_cnt = 0;
565
566	return;
567}
568
569static int sk_newbuf(sc_if, c, m)
570	struct sk_if_softc	*sc_if;
571	struct sk_chain		*c;
572	struct mbuf		*m;
573{
574	struct mbuf		*m_new = NULL;
575	struct sk_rx_desc	*r;
576
577	if (m == NULL) {
578		caddr_t			*buf = NULL;
579
580		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
581		if (m_new == NULL) {
582			printf("sk%d: no memory for rx list -- "
583			    "packet dropped!\n", sc_if->sk_unit);
584			return(ENOBUFS);
585		}
586
587		/* Allocate the jumbo buffer */
588		buf = sk_jalloc(sc_if);
589		if (buf == NULL) {
590			m_freem(m_new);
591#ifdef SK_VERBOSE
592			printf("sk%d: jumbo allocation failed "
593			    "-- packet dropped!\n", sc_if->sk_unit);
594#endif
595			return(ENOBUFS);
596		}
597
598		/* Attach the buffer to the mbuf */
599		m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
600		m_new->m_flags |= M_EXT;
601		m_new->m_ext.ext_size = m_new->m_pkthdr.len =
602		    m_new->m_len = SK_MCLBYTES;
603		m_new->m_ext.ext_free = sk_jfree;
604		m_new->m_ext.ext_ref = sk_jref;
605	} else {
606		/*
607	 	 * We're re-using a previously allocated mbuf;
608		 * be sure to re-init pointers and lengths to
609		 * default values.
610		 */
611		m_new = m;
612		m_new->m_len = m_new->m_pkthdr.len = SK_MCLBYTES;
613		m_new->m_data = m_new->m_ext.ext_buf;
614	}
615
616	/*
617	 * Adjust alignment so packet payload begins on a
618	 * longword boundary. Mandatory for Alpha, useful on
619	 * x86 too.
620	 */
621	m_adj(m_new, ETHER_ALIGN);
622
623	r = c->sk_desc;
624	c->sk_mbuf = m_new;
625	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
626	r->sk_ctl = m_new->m_len | SK_RXSTAT;
627
628	return(0);
629}
630
631/*
632 * Allocate jumbo buffer storage. The SysKonnect adapters support
633 * "jumbograms" (9K frames), although SysKonnect doesn't currently
634 * use them in their drivers. In order for us to use them, we need
635 * large 9K receive buffers, however standard mbuf clusters are only
636 * 2048 bytes in size. Consequently, we need to allocate and manage
637 * our own jumbo buffer pool. Fortunately, this does not require an
638 * excessive amount of additional code.
639 */
640static int sk_alloc_jumbo_mem(sc_if)
641	struct sk_if_softc	*sc_if;
642{
643	caddr_t			ptr;
644	register int		i;
645	struct sk_jpool_entry   *entry;
646
647	/* Grab a big chunk o' storage. */
648	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
649	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
650
651	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
652		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
653		return(ENOBUFS);
654	}
655
656	SLIST_INIT(&sc_if->sk_jfree_listhead);
657	SLIST_INIT(&sc_if->sk_jinuse_listhead);
658
659	/*
660	 * Now divide it up into 9K pieces and save the addresses
661	 * in an array. Note that we play an evil trick here by using
662	 * the first few bytes in the buffer to hold the the address
663	 * of the softc structure for this interface. This is because
664	 * sk_jfree() needs it, but it is called by the mbuf management
665	 * code which will not pass it to us explicitly.
666	 */
667	ptr = sc_if->sk_cdata.sk_jumbo_buf;
668	for (i = 0; i < SK_JSLOTS; i++) {
669		u_int64_t		**aptr;
670		aptr = (u_int64_t **)ptr;
671		aptr[0] = (u_int64_t *)sc_if;
672		ptr += sizeof(u_int64_t);
673		sc_if->sk_cdata.sk_jslots[i].sk_buf = ptr;
674		sc_if->sk_cdata.sk_jslots[i].sk_inuse = 0;
675		ptr += SK_MCLBYTES;
676		entry = malloc(sizeof(struct sk_jpool_entry),
677		    M_DEVBUF, M_NOWAIT);
678		if (entry == NULL) {
679			free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
680			sc_if->sk_cdata.sk_jumbo_buf = NULL;
681			printf("sk%d: no memory for jumbo "
682			    "buffer queue!\n", sc_if->sk_unit);
683			return(ENOBUFS);
684		}
685		entry->slot = i;
686		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
687		    entry, jpool_entries);
688	}
689
690	return(0);
691}
692
693/*
694 * Allocate a jumbo buffer.
695 */
696static void *sk_jalloc(sc_if)
697	struct sk_if_softc	*sc_if;
698{
699	struct sk_jpool_entry   *entry;
700
701	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
702
703	if (entry == NULL) {
704#ifdef SK_VERBOSE
705		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
706#endif
707		return(NULL);
708	}
709
710	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
711	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
712	sc_if->sk_cdata.sk_jslots[entry->slot].sk_inuse = 1;
713	return(sc_if->sk_cdata.sk_jslots[entry->slot].sk_buf);
714}
715
716/*
717 * Adjust usage count on a jumbo buffer. In general this doesn't
718 * get used much because our jumbo buffers don't get passed around
719 * a lot, but it's implemented for correctness.
720 */
721static void sk_jref(buf, size)
722	caddr_t			buf;
723	u_int			size;
724{
725	struct sk_if_softc	*sc_if;
726	u_int64_t		**aptr;
727	register int		i;
728
729	/* Extract the softc struct pointer. */
730	aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
731	sc_if = (struct sk_if_softc *)(aptr[0]);
732
733	if (sc_if == NULL)
734		panic("sk_jref: can't find softc pointer!");
735
736	if (size != SK_MCLBYTES)
737		panic("sk_jref: adjusting refcount of buf of wrong size!");
738
739	/* calculate the slot this buffer belongs to */
740
741	i = ((vm_offset_t)aptr
742	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
743
744	if ((i < 0) || (i >= SK_JSLOTS))
745		panic("sk_jref: asked to reference buffer "
746		    "that we don't manage!");
747	else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
748		panic("sk_jref: buffer already free!");
749	else
750		sc_if->sk_cdata.sk_jslots[i].sk_inuse++;
751
752	return;
753}
754
755/*
756 * Release a jumbo buffer.
757 */
758static void sk_jfree(buf, size)
759	caddr_t			buf;
760	u_int			size;
761{
762	struct sk_if_softc	*sc_if;
763	u_int64_t		**aptr;
764	int		        i;
765	struct sk_jpool_entry   *entry;
766
767	/* Extract the softc struct pointer. */
768	aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
769	sc_if = (struct sk_if_softc *)(aptr[0]);
770
771	if (sc_if == NULL)
772		panic("sk_jfree: can't find softc pointer!");
773
774	if (size != SK_MCLBYTES)
775		panic("sk_jfree: freeing buffer of wrong size!");
776
777	/* calculate the slot this buffer belongs to */
778
779	i = ((vm_offset_t)aptr
780	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
781
782	if ((i < 0) || (i >= SK_JSLOTS))
783		panic("sk_jfree: asked to free buffer that we don't manage!");
784	else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
785		panic("sk_jfree: buffer already free!");
786	else {
787		sc_if->sk_cdata.sk_jslots[i].sk_inuse--;
788		if(sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0) {
789			entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
790			if (entry == NULL)
791				panic("sk_jfree: buffer not in use!");
792			entry->slot = i;
793			SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead,
794					  jpool_entries);
795			SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
796					  entry, jpool_entries);
797		}
798	}
799
800	return;
801}
802
803/*
804 * Set media options.
805 */
806static int sk_ifmedia_upd(ifp)
807	struct ifnet		*ifp;
808{
809	struct sk_if_softc	*sc_if;
810	struct ifmedia		*ifm;
811
812	sc_if = ifp->if_softc;
813	ifm = &sc_if->ifmedia;
814
815	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
816		return(EINVAL);
817
818	switch(IFM_SUBTYPE(ifm->ifm_media)) {
819	case IFM_AUTO:
820		sk_phy_writereg(sc_if, XM_PHY_BMCR,
821		    XM_BMCR_RENEGOTIATE|XM_BMCR_AUTONEGENBL);
822		break;
823	case IFM_1000_LX:
824	case IFM_1000_SX:
825	case IFM_1000_CX:
826	case IFM_1000_TX:
827		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
828			sk_phy_writereg(sc_if, XM_PHY_BMCR, XM_BMCR_DUPLEX);
829		else
830			sk_phy_writereg(sc_if, XM_PHY_BMCR, 0);
831		break;
832	default:
833		printf("sk%d: invalid media selected\n", sc_if->sk_unit);
834		return(EINVAL);
835		break;
836	}
837
838	return(0);
839}
840
841/*
842 * Report current media status.
843 */
844static void sk_ifmedia_sts(ifp, ifmr)
845	struct ifnet		*ifp;
846	struct ifmediareq	*ifmr;
847{
848	struct sk_softc		*sc;
849	struct sk_if_softc	*sc_if;
850	u_int16_t		bmsr, extsts;
851
852	sc_if = ifp->if_softc;
853	sc = sc_if->sk_softc;
854
855	ifmr->ifm_status = IFM_AVALID;
856	ifmr->ifm_active = IFM_ETHER;
857
858	bmsr = sk_phy_readreg(sc_if, XM_PHY_BMSR);
859	extsts = sk_phy_readreg(sc_if, XM_PHY_EXTSTS);
860
861	if (!(bmsr & XM_BMSR_LINKSTAT))
862		return;
863
864	ifmr->ifm_status |= IFM_ACTIVE;
865	ifmr->ifm_active |= sc->sk_pmd;;
866	if (extsts & XM_EXTSTS_FULLDUPLEX)
867		ifmr->ifm_active |= IFM_FDX;
868	else
869		ifmr->ifm_active |= IFM_HDX;
870
871	return;
872}
873
874static int sk_ioctl(ifp, command, data)
875	struct ifnet		*ifp;
876	u_long			command;
877	caddr_t			data;
878{
879	struct sk_if_softc	*sc_if = ifp->if_softc;
880	struct ifreq		*ifr = (struct ifreq *) data;
881	int			s, error = 0;
882
883	s = splimp();
884
885	switch(command) {
886	case SIOCSIFADDR:
887	case SIOCGIFADDR:
888		error = ether_ioctl(ifp, command, data);
889		break;
890	case SIOCSIFMTU:
891		if (ifr->ifr_mtu > SK_JUMBO_MTU)
892			error = EINVAL;
893		else {
894			ifp->if_mtu = ifr->ifr_mtu;
895			sk_init(sc_if);
896		}
897		break;
898	case SIOCSIFFLAGS:
899		if (ifp->if_flags & IFF_UP) {
900			if (ifp->if_flags & IFF_RUNNING &&
901			    ifp->if_flags & IFF_PROMISC &&
902			    !(sc_if->sk_if_flags & IFF_PROMISC)) {
903				SK_XM_SETBIT_4(sc_if, XM_MODE,
904				    XM_MODE_RX_PROMISC);
905				sk_setmulti(sc_if);
906			} else if (ifp->if_flags & IFF_RUNNING &&
907			    !(ifp->if_flags & IFF_PROMISC) &&
908			    sc_if->sk_if_flags & IFF_PROMISC) {
909				SK_XM_CLRBIT_4(sc_if, XM_MODE,
910				    XM_MODE_RX_PROMISC);
911				sk_setmulti(sc_if);
912			} else
913				sk_init(sc_if);
914		} else {
915			if (ifp->if_flags & IFF_RUNNING)
916				sk_stop(sc_if);
917		}
918		sc_if->sk_if_flags = ifp->if_flags;
919		error = 0;
920		break;
921	case SIOCADDMULTI:
922	case SIOCDELMULTI:
923		sk_setmulti(sc_if);
924		error = 0;
925		break;
926	case SIOCGIFMEDIA:
927	case SIOCSIFMEDIA:
928		error = ifmedia_ioctl(ifp, ifr, &sc_if->ifmedia, command);
929		break;
930	default:
931		error = EINVAL;
932		break;
933	}
934
935	(void)splx(s);
936
937	return(error);
938}
939
940/*
941 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
942 * IDs against our list and return a device name if we find a match.
943 */
944static int sk_probe(dev)
945	device_t		dev;
946{
947	struct sk_type		*t;
948
949	t = sk_devs;
950
951	while(t->sk_name != NULL) {
952		if ((pci_get_vendor(dev) == t->sk_vid) &&
953		    (pci_get_device(dev) == t->sk_did)) {
954			device_set_desc(dev, t->sk_name);
955			return(0);
956		}
957		t++;
958	}
959
960	return(ENXIO);
961}
962
963/*
964 * Force the GEnesis into reset, then bring it out of reset.
965 */
966static void sk_reset(sc)
967	struct sk_softc		*sc;
968{
969	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
970	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
971	DELAY(1000);
972	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
973	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
974
975	/* Configure packet arbiter */
976	sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
977	sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
978	sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
979	sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
980	sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
981
982	/* Enable RAM interface */
983	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
984
985	/*
986         * Configure interrupt moderation. The moderation timer
987	 * defers interrupts specified in the interrupt moderation
988	 * timer mask based on the timeout specified in the interrupt
989	 * moderation timer init register. Each bit in the timer
990	 * register represents 18.825ns, so to specify a timeout in
991	 * microseconds, we have to multiply by 54.
992	 */
993        sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
994        sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
995	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
996        sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
997
998	return;
999}
1000
1001/*
1002 * Each XMAC chip is attached as a separate logical IP interface.
1003 * Single port cards will have only one logical interface of course.
1004 */
1005static int sk_attach_xmac(sc, port)
1006	struct sk_softc		*sc;
1007	int			port;
1008{
1009	struct sk_if_softc	*sc_if;
1010	struct ifnet		*ifp;
1011	int			i;
1012	char			ifname[64];
1013
1014	if (sc == NULL)
1015		return(EINVAL);
1016
1017	if (port != SK_PORT_A && port != SK_PORT_B)
1018		return(EINVAL);
1019
1020	sc_if = malloc(sizeof(struct sk_if_softc), M_DEVBUF, M_NOWAIT);
1021	if (sc_if == NULL) {
1022		printf("skc%d: no memory for interface softc!\n", sc->sk_unit);
1023		return(ENOMEM);
1024	}
1025	bzero((char *)sc_if, sizeof(struct sk_if_softc));
1026
1027	for (i = 0; i < SK_MAXUNIT; i++) {
1028		sprintf(ifname, "sk%d", i);
1029		if (ifunit(ifname) == NULL)
1030			break;
1031	}
1032
1033	if (i == SK_MAXUNIT) {
1034		printf("skc%d: too many sk units\n", sc->sk_unit);
1035		free(sc_if, M_DEVBUF);
1036		return(ENODEV);
1037	}
1038
1039	sc_if->sk_unit = i;
1040	sc_if->sk_port = port;
1041	sc_if->sk_softc = sc;
1042	sc->sk_if[port] = sc_if;
1043	if (port == SK_PORT_A)
1044		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1045	if (port == SK_PORT_B)
1046		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1047
1048	/*
1049	 * Get station address for this interface. Note that
1050	 * dual port cards actually come with three station
1051	 * addresses: one for each port, plus an extra. The
1052	 * extra one is used by the SysKonnect driver software
1053	 * as a 'virtual' station address for when both ports
1054	 * are operating in failover mode. Currently we don't
1055	 * use this extra address.
1056	 */
1057	for (i = 0; i < ETHER_ADDR_LEN; i++)
1058		sc_if->arpcom.ac_enaddr[i] =
1059		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1060
1061	printf("sk%d: <XaQti Corp. XMAC II> at skc%d port %d\n",
1062	    sc_if->sk_unit, sc->sk_unit, port);
1063
1064	printf("sk%d: Ethernet address: %6D\n",
1065	    sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":");
1066
1067	/*
1068	 * Set up RAM buffer addresses. The NIC will have a certain
1069	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1070	 * need to divide this up a) between the transmitter and
1071 	 * receiver and b) between the two XMACs, if this is a
1072	 * dual port NIC. Our algotithm is to divide up the memory
1073	 * evenly so that everyone gets a fair share.
1074	 */
1075	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1076		u_int32_t		chunk, val;
1077
1078		chunk = sc->sk_ramsize / 2;
1079		val = sc->sk_rboff / sizeof(u_int64_t);
1080		sc_if->sk_rx_ramstart = val;
1081		val += (chunk / sizeof(u_int64_t));
1082		sc_if->sk_rx_ramend = val - 1;
1083		sc_if->sk_tx_ramstart = val;
1084		val += (chunk / sizeof(u_int64_t));
1085		sc_if->sk_tx_ramend = val - 1;
1086	} else {
1087		u_int32_t		chunk, val;
1088
1089		chunk = sc->sk_ramsize / 4;
1090		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1091		    sizeof(u_int64_t);
1092		sc_if->sk_rx_ramstart = val;
1093		val += (chunk / sizeof(u_int64_t));
1094		sc_if->sk_rx_ramend = val - 1;
1095		sc_if->sk_tx_ramstart = val;
1096		val += (chunk / sizeof(u_int64_t));
1097		sc_if->sk_tx_ramend = val - 1;
1098	}
1099
1100	/* Allocate the descriptor queues. */
1101	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1102	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1103
1104	if (sc_if->sk_rdata == NULL) {
1105		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1106		free(sc_if, M_DEVBUF);
1107		sc->sk_if[port] = NULL;
1108		return(ENOMEM);
1109	}
1110
1111	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1112
1113	/* Try to allocate memory for jumbo buffers. */
1114	if (sk_alloc_jumbo_mem(sc_if)) {
1115		printf("sk%d: jumbo buffer allocation failed\n",
1116		    sc_if->sk_unit);
1117		free(sc_if->sk_rdata, M_DEVBUF);
1118		free(sc_if, M_DEVBUF);
1119		sc->sk_if[port] = NULL;
1120		return(ENOMEM);
1121	}
1122
1123	ifp = &sc_if->arpcom.ac_if;
1124	ifp->if_softc = sc_if;
1125	ifp->if_unit = sc_if->sk_unit;
1126	ifp->if_name = "sk";
1127	ifp->if_mtu = ETHERMTU;
1128	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1129	ifp->if_ioctl = sk_ioctl;
1130	ifp->if_output = ether_output;
1131	ifp->if_start = sk_start;
1132	ifp->if_watchdog = sk_watchdog;
1133	ifp->if_init = sk_init;
1134	ifp->if_baudrate = 1000000000;
1135	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1136
1137	/*
1138	 * Do ifmedia setup.
1139	 */
1140	ifmedia_init(&sc_if->ifmedia, 0, sk_ifmedia_upd, sk_ifmedia_sts);
1141	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd, 0, NULL);
1142	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd|IFM_FDX, 0, NULL);
1143	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd|IFM_HDX, 0, NULL);
1144	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1145	ifmedia_set(&sc_if->ifmedia, IFM_ETHER|IFM_AUTO);
1146
1147	/*
1148	 * Call MI attach routines.
1149	 */
1150	if_attach(ifp);
1151	ether_ifattach(ifp);
1152
1153#if NBPF > 0
1154	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1155#endif
1156
1157	return(0);
1158}
1159
1160/*
1161 * Attach the interface. Allocate softc structures, do ifmedia
1162 * setup and ethernet/BPF attach.
1163 */
1164static int sk_attach(dev)
1165	device_t		dev;
1166{
1167	int			s;
1168	u_int32_t		command;
1169	struct sk_softc		*sc;
1170	int			unit, error = 0, rid;
1171
1172	s = splimp();
1173
1174	sc = device_get_softc(dev);
1175	unit = device_get_unit(dev);
1176	bzero(sc, sizeof(struct sk_softc));
1177
1178	/*
1179	 * Handle power management nonsense.
1180	 */
1181	command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF;
1182	if (command == 0x01) {
1183
1184		command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4);
1185		if (command & SK_PSTATE_MASK) {
1186			u_int32_t		iobase, membase, irq;
1187
1188			/* Save important PCI config data. */
1189			iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1190			membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1191			irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1192
1193			/* Reset the power state. */
1194			printf("skc%d: chip is in D%d power mode "
1195			"-- setting to D0\n", unit, command & SK_PSTATE_MASK);
1196			command &= 0xFFFFFFFC;
1197			pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4);
1198
1199			/* Restore PCI config data. */
1200			pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1201			pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1202			pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1203		}
1204	}
1205
1206	/*
1207	 * Map control/status registers.
1208	 */
1209	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
1210	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1211	pci_write_config(dev, PCI_COMMAND_STATUS_REG, command, 4);
1212	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
1213
1214#ifdef SK_USEIOSPACE
1215	if (!(command & PCIM_CMD_PORTEN)) {
1216		printf("skc%d: failed to enable I/O ports!\n", unit);
1217		error = ENXIO;
1218		goto fail;
1219	}
1220#else
1221	if (!(command & PCIM_CMD_MEMEN)) {
1222		printf("skc%d: failed to enable memory mapping!\n", unit);
1223		error = ENXIO;
1224		goto fail;
1225	}
1226#endif
1227
1228	rid = SK_RID;
1229	sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid,
1230	    0, ~0, 1, RF_ACTIVE);
1231
1232	if (sc->sk_res == NULL) {
1233		printf("sk%d: couldn't map ports/memory\n", unit);
1234		error = ENXIO;
1235		goto fail;
1236	}
1237
1238	sc->sk_btag = rman_get_bustag(sc->sk_res);
1239	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1240
1241	/* Allocate interrupt */
1242	rid = 0;
1243	sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1244	    RF_SHAREABLE | RF_ACTIVE);
1245
1246	if (sc->sk_irq == NULL) {
1247		printf("skc%d: couldn't map interrupt\n", unit);
1248		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1249		error = ENXIO;
1250		goto fail;
1251	}
1252
1253	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1254	    sk_intr, sc, &sc->sk_intrhand);
1255
1256	if (error) {
1257		printf("skc%d: couldn't set up irq\n", unit);
1258		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1259		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_res);
1260		goto fail;
1261	}
1262
1263	/* Reset the adapter. */
1264	sk_reset(sc);
1265
1266	sc->sk_unit = unit;
1267
1268	/* Read and save vital product data from EEPROM. */
1269	sk_vpd_read(sc);
1270
1271	/* Read and save RAM size and RAMbuffer offset */
1272	switch(sk_win_read_1(sc, SK_EPROM0)) {
1273	case SK_RAMSIZE_512K_64:
1274		sc->sk_ramsize = 0x80000;
1275		sc->sk_rboff = SK_RBOFF_0;
1276		break;
1277	case SK_RAMSIZE_1024K_64:
1278		sc->sk_ramsize = 0x100000;
1279		sc->sk_rboff = SK_RBOFF_80000;
1280		break;
1281	case SK_RAMSIZE_1024K_128:
1282		sc->sk_ramsize = 0x100000;
1283		sc->sk_rboff = SK_RBOFF_0;
1284		break;
1285	case SK_RAMSIZE_2048K_128:
1286		sc->sk_ramsize = 0x200000;
1287		sc->sk_rboff = SK_RBOFF_0;
1288		break;
1289	default:
1290		printf("skc%d: unknown ram size: %d\n",
1291		    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1292		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1293		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1294		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1295		error = ENXIO;
1296		goto fail;
1297		break;
1298	}
1299
1300	/* Read and save physical media type */
1301	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1302	case SK_PMD_1000BASESX:
1303		sc->sk_pmd = IFM_1000_SX;
1304		break;
1305	case SK_PMD_1000BASELX:
1306		sc->sk_pmd = IFM_1000_LX;
1307		break;
1308	case SK_PMD_1000BASECX:
1309		sc->sk_pmd = IFM_1000_CX;
1310		break;
1311	case SK_PMD_1000BASETX:
1312		sc->sk_pmd = IFM_1000_TX;
1313		break;
1314	default:
1315		printf("skc%d: unknown media type: 0x%x\n",
1316		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1317		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1318		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1319		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1320		error = ENXIO;
1321		goto fail;
1322	}
1323
1324	/* Announce the product name. */
1325	printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1326
1327	sk_attach_xmac(sc, SK_PORT_A);
1328	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC))
1329		sk_attach_xmac(sc, SK_PORT_B);
1330
1331	/* Turn on the 'driver is loaded' LED. */
1332	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1333
1334fail:
1335	splx(s);
1336	return(error);
1337}
1338
1339static int sk_detach(dev)
1340	device_t		dev;
1341{
1342	struct sk_softc		*sc;
1343	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1344	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1345	int			s;
1346
1347	s = splimp();
1348
1349	sc = device_get_softc(dev);
1350	sc_if0 = sc->sk_if[SK_PORT_A];
1351	ifp0 = &sc_if0->arpcom.ac_if;
1352	sk_stop(sc_if0);
1353	if_detach(ifp0);
1354	contigfree(sc_if0->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1355	ifmedia_removeall(&sc_if0->ifmedia);
1356	free(sc->sk_if[SK_PORT_A], M_DEVBUF);
1357	if (sc->sk_if[SK_PORT_B] != NULL) {
1358		sc_if1 = sc->sk_if[SK_PORT_B];
1359		ifp1 = &sc_if1->arpcom.ac_if;
1360		sk_stop(sc_if1);
1361		if_detach(ifp1);
1362		contigfree(sc_if1->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1363		ifmedia_removeall(&sc_if1->ifmedia);
1364		free(sc->sk_if[SK_PORT_B], M_DEVBUF);
1365	}
1366
1367	bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1368	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1369	bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1370
1371	splx(s);
1372
1373	return(0);
1374}
1375
1376static int sk_encap(sc_if, m_head, txidx)
1377        struct sk_if_softc	*sc_if;
1378        struct mbuf		*m_head;
1379        u_int32_t		*txidx;
1380{
1381	struct sk_tx_desc	*f = NULL;
1382	struct mbuf		*m;
1383	u_int32_t		frag, cur, cnt = 0;
1384
1385	m = m_head;
1386	cur = frag = *txidx;
1387
1388	/*
1389	 * Start packing the mbufs in this chain into
1390	 * the fragment pointers. Stop when we run out
1391	 * of fragments or hit the end of the mbuf chain.
1392	 */
1393	for (m = m_head; m != NULL; m = m->m_next) {
1394		if (m->m_len != 0) {
1395			if ((SK_TX_RING_CNT -
1396			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1397				return(ENOBUFS);
1398			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1399			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1400			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1401			if (cnt == 0)
1402				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1403			else
1404				f->sk_ctl |= SK_TXCTL_OWN;
1405			cur = frag;
1406			SK_INC(frag, SK_TX_RING_CNT);
1407			cnt++;
1408		}
1409	}
1410
1411	if (m != NULL)
1412		return(ENOBUFS);
1413
1414	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1415		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1416	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1417	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1418	sc_if->sk_cdata.sk_tx_cnt += cnt;
1419
1420	*txidx = frag;
1421
1422	return(0);
1423}
1424
1425static void sk_start(ifp)
1426	struct ifnet		*ifp;
1427{
1428        struct sk_softc		*sc;
1429        struct sk_if_softc	*sc_if;
1430        struct mbuf		*m_head = NULL;
1431        u_int32_t		idx;
1432
1433	sc_if = ifp->if_softc;
1434	sc = sc_if->sk_softc;
1435
1436	idx = sc_if->sk_cdata.sk_tx_prod;
1437
1438	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1439		IF_DEQUEUE(&ifp->if_snd, m_head);
1440		if (m_head == NULL)
1441			break;
1442
1443		/*
1444		 * Pack the data into the transmit ring. If we
1445		 * don't have room, set the OACTIVE flag and wait
1446		 * for the NIC to drain the ring.
1447		 */
1448		if (sk_encap(sc_if, m_head, &idx)) {
1449			IF_PREPEND(&ifp->if_snd, m_head);
1450			ifp->if_flags |= IFF_OACTIVE;
1451			break;
1452		}
1453
1454		/*
1455		 * If there's a BPF listener, bounce a copy of this frame
1456		 * to him.
1457		 */
1458#if NBPF > 0
1459		if (ifp->if_bpf)
1460			bpf_mtap(ifp, m_head);
1461#endif
1462	}
1463
1464	/* Transmit */
1465	sc_if->sk_cdata.sk_tx_prod = idx;
1466	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1467
1468	/* Set a timeout in case the chip goes out to lunch. */
1469	ifp->if_timer = 5;
1470
1471	return;
1472}
1473
1474
1475static void sk_watchdog(ifp)
1476	struct ifnet		*ifp;
1477{
1478	struct sk_if_softc	*sc_if;
1479
1480	sc_if = ifp->if_softc;
1481
1482	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1483	sk_init(sc_if);
1484
1485	return;
1486}
1487
1488static void sk_shutdown(dev)
1489	device_t		dev;
1490{
1491	struct sk_softc		*sc;
1492
1493	sc = device_get_softc(dev);
1494
1495	/* Turn off the 'driver is loaded' LED. */
1496	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1497
1498	/*
1499	 * Reset the GEnesis controller. Doing this should also
1500	 * assert the resets on the attached XMAC(s).
1501	 */
1502	sk_reset(sc);
1503
1504	return;
1505}
1506
1507static void sk_rxeof(sc_if)
1508	struct sk_if_softc	*sc_if;
1509{
1510	struct ether_header	*eh;
1511	struct mbuf		*m;
1512	struct ifnet		*ifp;
1513	struct sk_chain		*cur_rx;
1514	int			total_len = 0;
1515	int			i;
1516	u_int32_t		rxstat;
1517
1518	ifp = &sc_if->arpcom.ac_if;
1519	i = sc_if->sk_cdata.sk_rx_prod;
1520	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1521
1522	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1523
1524		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1525		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1526		m = cur_rx->sk_mbuf;
1527		cur_rx->sk_mbuf = NULL;
1528		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1529		SK_INC(i, SK_RX_RING_CNT);
1530
1531		if (rxstat & XM_RXSTAT_ERRFRAME) {
1532			ifp->if_ierrors++;
1533			sk_newbuf(sc_if, cur_rx, m);
1534			continue;
1535		}
1536
1537		/*
1538		 * Try to allocate a new jumbo buffer. If that
1539		 * fails, copy the packet to mbufs and put the
1540		 * jumbo buffer back in the ring so it can be
1541		 * re-used. If allocating mbufs fails, then we
1542		 * have to drop the packet.
1543		 */
1544		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1545			struct mbuf		*m0;
1546			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1547			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1548			sk_newbuf(sc_if, cur_rx, m);
1549			if (m0 == NULL) {
1550				printf("sk%d: no receive buffers "
1551				    "available -- packet dropped!\n",
1552				    sc_if->sk_unit);
1553				ifp->if_ierrors++;
1554				continue;
1555			}
1556			m_adj(m0, ETHER_ALIGN);
1557			m = m0;
1558		} else {
1559			m->m_pkthdr.rcvif = ifp;
1560			m->m_pkthdr.len = m->m_len = total_len;
1561		}
1562
1563		ifp->if_ipackets++;
1564		eh = mtod(m, struct ether_header *);
1565
1566#if NBPF > 0
1567		if (ifp->if_bpf) {
1568			bpf_mtap(ifp, m);
1569			if (ifp->if_flags & IFF_PROMISC &&
1570			    (bcmp(eh->ether_dhost, sc_if->arpcom.ac_enaddr,
1571			    ETHER_ADDR_LEN) && !(eh->ether_dhost[0] & 1))) {
1572				m_freem(m);
1573				continue;
1574			}
1575		}
1576#endif
1577		/* Remove header from mbuf and pass it on. */
1578		m_adj(m, sizeof(struct ether_header));
1579		ether_input(ifp, eh, m);
1580	}
1581
1582	sc_if->sk_cdata.sk_rx_prod = i;
1583
1584	return;
1585}
1586
1587static void sk_txeof(sc_if)
1588	struct sk_if_softc	*sc_if;
1589{
1590	struct sk_tx_desc	*cur_tx = NULL;
1591	struct ifnet		*ifp;
1592	u_int32_t		idx;
1593
1594	ifp = &sc_if->arpcom.ac_if;
1595
1596	/*
1597	 * Go through our tx ring and free mbufs for those
1598	 * frames that have been sent.
1599	 */
1600	idx = sc_if->sk_cdata.sk_tx_cons;
1601	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1602		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1603		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1604			break;
1605		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1606			ifp->if_opackets++;
1607		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1608			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1609			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1610		}
1611		sc_if->sk_cdata.sk_tx_cnt--;
1612		SK_INC(idx, SK_TX_RING_CNT);
1613		ifp->if_timer = 0;
1614	}
1615
1616	sc_if->sk_cdata.sk_tx_cons = idx;
1617
1618	if (cur_tx != NULL)
1619		ifp->if_flags &= ~IFF_OACTIVE;
1620
1621	return;
1622}
1623
1624static void sk_intr_xmac(sc_if)
1625	struct sk_if_softc	*sc_if;
1626{
1627	struct sk_softc		*sc;
1628	u_int16_t		status;
1629	u_int16_t		bmsr;
1630
1631	sc = sc_if->sk_softc;
1632	status = SK_XM_READ_2(sc_if, XM_ISR);
1633
1634	if (status & XM_ISR_LINKEVENT) {
1635		SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_LINKEVENT);
1636		if (sc_if->sk_link == 1) {
1637			printf("sk%d: gigabit link down\n", sc_if->sk_unit);
1638			sc_if->sk_link = 0;
1639		}
1640	}
1641
1642	if (status & XM_ISR_AUTONEG_DONE) {
1643		bmsr = sk_phy_readreg(sc_if, XM_PHY_BMSR);
1644		if (bmsr & XM_BMSR_LINKSTAT) {
1645			sc_if->sk_link = 1;
1646			SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_LINKEVENT);
1647			printf("sk%d: gigabit link up\n", sc_if->sk_unit);
1648		}
1649	}
1650
1651	if (status & XM_IMR_TX_UNDERRUN)
1652		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1653
1654	if (status & XM_IMR_RX_OVERRUN)
1655		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1656
1657	return;
1658}
1659
1660static void sk_intr(xsc)
1661	void			*xsc;
1662{
1663	struct sk_softc		*sc = xsc;
1664	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1665	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1666	u_int32_t		status;
1667
1668	sc_if0 = sc->sk_if[SK_PORT_A];
1669	sc_if1 = sc->sk_if[SK_PORT_B];
1670
1671	if (sc_if0 != NULL)
1672		ifp0 = &sc_if0->arpcom.ac_if;
1673	if (sc_if1 != NULL)
1674		ifp1 = &sc_if0->arpcom.ac_if;
1675
1676	for (;;) {
1677		status = CSR_READ_4(sc, SK_ISSR);
1678		if (!(status & sc->sk_intrmask))
1679			break;
1680
1681		/* Handle receive interrupts first. */
1682		if (status & SK_ISR_RX1_EOF) {
1683			sk_rxeof(sc_if0);
1684			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1685			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1686		}
1687		if (status & SK_ISR_RX2_EOF) {
1688			sk_rxeof(sc_if1);
1689			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1690			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1691		}
1692
1693		/* Then transmit interrupts. */
1694		if (status & SK_ISR_TX1_S_EOF) {
1695			sk_txeof(sc_if0);
1696			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1697			    SK_TXBMU_CLR_IRQ_EOF);
1698		}
1699		if (status & SK_ISR_TX2_S_EOF) {
1700			sk_txeof(sc_if1);
1701			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1702			    SK_TXBMU_CLR_IRQ_EOF);
1703		}
1704
1705		/* Then MAC interrupts. */
1706		if (status & SK_ISR_MAC1)
1707			sk_intr_xmac(sc_if0);
1708
1709		if (status & SK_ISR_MAC2)
1710			sk_intr_xmac(sc_if1);
1711	}
1712
1713	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1714
1715	return;
1716}
1717
1718static void sk_init_xmac(sc_if)
1719	struct sk_if_softc	*sc_if;
1720{
1721	struct sk_softc		*sc;
1722	struct ifnet		*ifp;
1723
1724	sc = sc_if->sk_softc;
1725	ifp = &sc_if->arpcom.ac_if;
1726
1727	/* Unreset the XMAC. */
1728	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1729	DELAY(1000);
1730
1731	/* Save the XMAC II revision */
1732	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1733
1734	/* Set station address */
1735	SK_XM_WRITE_2(sc_if, XM_PAR0,
1736	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1737	SK_XM_WRITE_2(sc_if, XM_PAR1,
1738	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1739	SK_XM_WRITE_2(sc_if, XM_PAR2,
1740	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1741	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1742
1743	if (ifp->if_flags & IFF_PROMISC) {
1744		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1745	} else {
1746		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1747	}
1748
1749	if (ifp->if_flags & IFF_BROADCAST) {
1750		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1751	} else {
1752		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1753	}
1754
1755	/* We don't need the FCS appended to the packet. */
1756	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1757
1758	/* We want short frames padded to 60 bytes. */
1759	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1760
1761	/*
1762	 * Enable the reception of all error frames. This is is
1763	 * a necessary evil due to the design of the XMAC. The
1764	 * XMAC's receive FIFO is only 8K in size, however jumbo
1765	 * frames can be up to 9000 bytes in length. When bad
1766	 * frame filtering is enabled, the XMAC's RX FIFO operates
1767	 * in 'store and forward' mode. For this to work, the
1768	 * entire frame has to fit into the FIFO, but that means
1769	 * that jumbo frames larger than 8192 bytes will be
1770	 * truncated. Disabling all bad frame filtering causes
1771	 * the RX FIFO to operate in streaming mode, in which
1772	 * case the XMAC will start transfering frames out of the
1773	 * RX FIFO as soon as the FIFO threshold is reached.
1774	 */
1775	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
1776	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
1777	    XM_MODE_RX_INRANGELEN);
1778
1779	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
1780		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1781	else
1782		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1783
1784	/*
1785	 * Bump up the transmit threshold. This helps hold off transmit
1786	 * underruns when we're blasting traffic from both ports at once.
1787	 */
1788	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
1789
1790	/* Set multicast filter */
1791	sk_setmulti(sc_if);
1792
1793	/* Clear and enable interrupts */
1794	SK_XM_READ_2(sc_if, XM_ISR);
1795	SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
1796
1797	sc_if->sk_link = 0;
1798
1799	/* Configure MAC arbiter */
1800	switch(sc_if->sk_xmac_rev) {
1801	case XM_XMAC_REV_B2:
1802		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
1803		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
1804		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
1805		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
1806		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
1807		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
1808		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
1809		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
1810		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1811		break;
1812	case XM_XMAC_REV_C1:
1813		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
1814		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
1815		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
1816		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
1817		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
1818		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
1819		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
1820		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
1821		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1822		break;
1823	default:
1824		break;
1825	}
1826	sk_win_write_2(sc, SK_MACARB_CTL,
1827	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
1828
1829	return;
1830}
1831
1832/*
1833 * Note that to properly initialize any part of the GEnesis chip,
1834 * you first have to take it out of reset mode.
1835 */
1836static void sk_init(xsc)
1837	void			*xsc;
1838{
1839	struct sk_if_softc	*sc_if = xsc;
1840	struct sk_softc		*sc;
1841	struct ifnet		*ifp;
1842	int			s;
1843
1844	s = splimp();
1845
1846	ifp = &sc_if->arpcom.ac_if;
1847	sc = sc_if->sk_softc;
1848
1849	/* Cancel pending I/O and free all RX/TX buffers. */
1850	sk_stop(sc_if);
1851
1852	/* Configure LINK_SYNC LED */
1853	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
1854	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
1855
1856	/* Configure RX LED */
1857	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
1858
1859	/* Configure TX LED */
1860	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
1861
1862	/* Configure I2C registers */
1863
1864	/* Configure XMAC(s) */
1865	sk_init_xmac(sc_if);
1866
1867	/* Configure MAC FIFOs */
1868	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
1869	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
1870	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
1871
1872	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
1873	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
1874	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
1875
1876	/* Configure transmit arbiter(s) */
1877	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
1878	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
1879
1880	/* Configure RAMbuffers */
1881	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
1882	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
1883	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
1884	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
1885	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
1886	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
1887
1888	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
1889	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
1890	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
1891	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
1892	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
1893	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
1894	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
1895
1896	/* Configure BMUs */
1897	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
1898	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
1899	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
1900	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
1901
1902	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
1903	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
1904	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
1905	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
1906
1907	/* Init descriptors */
1908	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
1909		printf("sk%d: initialization failed: no "
1910		    "memory for rx buffers\n", sc_if->sk_unit);
1911		sk_stop(sc_if);
1912		(void)splx(s);
1913		return;
1914	}
1915	sk_init_tx_ring(sc_if);
1916
1917	/* Configure interrupt handling */
1918	CSR_READ_4(sc, SK_ISSR);
1919	if (sc_if->sk_port == SK_PORT_A)
1920		sc->sk_intrmask |= SK_INTRS1;
1921	else
1922		sc->sk_intrmask |= SK_INTRS2;
1923	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1924
1925	/* Start BMUs. */
1926	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
1927
1928	/* Enable XMACs TX and RX state machines */
1929	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1930
1931	ifp->if_flags |= IFF_RUNNING;
1932	ifp->if_flags &= ~IFF_OACTIVE;
1933
1934	splx(s);
1935
1936	return;
1937}
1938
1939static void sk_stop(sc_if)
1940	struct sk_if_softc	*sc_if;
1941{
1942	int			i;
1943	struct sk_softc		*sc;
1944	struct ifnet		*ifp;
1945
1946	sc = sc_if->sk_softc;
1947	ifp = &sc_if->arpcom.ac_if;
1948
1949	/* Turn off various components of this interface. */
1950	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
1951	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
1952	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
1953	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1954	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
1955	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1956	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
1957	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1958	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1959	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
1960	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
1961
1962	/* Disable interrupts */
1963	if (sc_if->sk_port == SK_PORT_A)
1964		sc->sk_intrmask &= ~SK_INTRS1;
1965	else
1966		sc->sk_intrmask &= ~SK_INTRS2;
1967	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1968
1969	/* Free RX and TX mbufs still in the queues. */
1970	for (i = 0; i < SK_RX_RING_CNT; i++) {
1971		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
1972			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
1973			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
1974		}
1975	}
1976
1977	for (i = 0; i < SK_TX_RING_CNT; i++) {
1978		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
1979			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
1980			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
1981		}
1982	}
1983
1984	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1985
1986	return;
1987}
1988