if_sk.c revision 95673
1/*
2 * Copyright (c) 1997, 1998, 1999, 2000
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: head/sys/dev/sk/if_sk.c 95673 2002-04-28 20:34:20Z phk $
33 */
34
35/*
36 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
37 * the SK-984x series adapters, both single port and dual port.
38 * References:
39 * 	The XaQti XMAC II datasheet,
40 *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
41 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
42 *
43 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
44 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
45 * convenience to others until Vitesse corrects this problem:
46 *
47 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
48 *
49 * Written by Bill Paul <wpaul@ee.columbia.edu>
50 * Department of Electrical Engineering
51 * Columbia University, New York City
52 */
53
54/*
55 * The SysKonnect gigabit ethernet adapters consist of two main
56 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
57 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
58 * components and a PHY while the GEnesis controller provides a PCI
59 * interface with DMA support. Each card may have between 512K and
60 * 2MB of SRAM on board depending on the configuration.
61 *
62 * The SysKonnect GEnesis controller can have either one or two XMAC
63 * chips connected to it, allowing single or dual port NIC configurations.
64 * SysKonnect has the distinction of being the only vendor on the market
65 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
66 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
67 * XMAC registers. This driver takes advantage of these features to allow
68 * both XMACs to operate as independent interfaces.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/sockio.h>
74#include <sys/mbuf.h>
75#include <sys/malloc.h>
76#include <sys/kernel.h>
77#include <sys/socket.h>
78#include <sys/queue.h>
79
80#include <net/if.h>
81#include <net/if_arp.h>
82#include <net/ethernet.h>
83#include <net/if_dl.h>
84#include <net/if_media.h>
85
86#include <net/bpf.h>
87
88#include <vm/vm.h>              /* for vtophys */
89#include <vm/pmap.h>            /* for vtophys */
90#include <machine/bus_pio.h>
91#include <machine/bus_memio.h>
92#include <machine/bus.h>
93#include <machine/resource.h>
94#include <sys/bus.h>
95#include <sys/rman.h>
96
97#include <dev/mii/mii.h>
98#include <dev/mii/miivar.h>
99#include <dev/mii/brgphyreg.h>
100
101#include <pci/pcireg.h>
102#include <pci/pcivar.h>
103
104#define SK_USEIOSPACE
105
106#include <pci/if_skreg.h>
107#include <pci/xmaciireg.h>
108
109MODULE_DEPEND(sk, miibus, 1, 1, 1);
110
111/* "controller miibus0" required.  See GENERIC if you get errors here. */
112#include "miibus_if.h"
113
114#ifndef lint
115static const char rcsid[] =
116  "$FreeBSD: head/sys/dev/sk/if_sk.c 95673 2002-04-28 20:34:20Z phk $";
117#endif
118
119static struct sk_type sk_devs[] = {
120	{ SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" },
121	{ 0, 0, NULL }
122};
123
124static int sk_probe		(device_t);
125static int sk_attach		(device_t);
126static int sk_detach		(device_t);
127static int sk_detach_xmac	(device_t);
128static int sk_probe_xmac	(device_t);
129static int sk_attach_xmac	(device_t);
130static void sk_tick		(void *);
131static void sk_intr		(void *);
132static void sk_intr_xmac	(struct sk_if_softc *);
133static void sk_intr_bcom	(struct sk_if_softc *);
134static void sk_rxeof		(struct sk_if_softc *);
135static void sk_txeof		(struct sk_if_softc *);
136static int sk_encap		(struct sk_if_softc *, struct mbuf *,
137					u_int32_t *);
138static void sk_start		(struct ifnet *);
139static int sk_ioctl		(struct ifnet *, u_long, caddr_t);
140static void sk_init		(void *);
141static void sk_init_xmac	(struct sk_if_softc *);
142static void sk_stop		(struct sk_if_softc *);
143static void sk_watchdog		(struct ifnet *);
144static void sk_shutdown		(device_t);
145static int sk_ifmedia_upd	(struct ifnet *);
146static void sk_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
147static void sk_reset		(struct sk_softc *);
148static int sk_newbuf		(struct sk_if_softc *,
149					struct sk_chain *, struct mbuf *);
150static int sk_alloc_jumbo_mem	(struct sk_if_softc *);
151static void *sk_jalloc		(struct sk_if_softc *);
152static void sk_jfree		(caddr_t, void *);
153static int sk_init_rx_ring	(struct sk_if_softc *);
154static void sk_init_tx_ring	(struct sk_if_softc *);
155static u_int32_t sk_win_read_4	(struct sk_softc *, int);
156static u_int16_t sk_win_read_2	(struct sk_softc *, int);
157static u_int8_t sk_win_read_1	(struct sk_softc *, int);
158static void sk_win_write_4	(struct sk_softc *, int, u_int32_t);
159static void sk_win_write_2	(struct sk_softc *, int, u_int32_t);
160static void sk_win_write_1	(struct sk_softc *, int, u_int32_t);
161static u_int8_t sk_vpd_readbyte	(struct sk_softc *, int);
162static void sk_vpd_read_res	(struct sk_softc *, struct vpd_res *, int);
163static void sk_vpd_read		(struct sk_softc *);
164
165static int sk_miibus_readreg	(device_t, int, int);
166static int sk_miibus_writereg	(device_t, int, int, int);
167static void sk_miibus_statchg	(device_t);
168
169static u_int32_t sk_calchash	(caddr_t);
170static void sk_setfilt		(struct sk_if_softc *, caddr_t, int);
171static void sk_setmulti		(struct sk_if_softc *);
172
173#ifdef SK_USEIOSPACE
174#define SK_RES		SYS_RES_IOPORT
175#define SK_RID		SK_PCI_LOIO
176#else
177#define SK_RES		SYS_RES_MEMORY
178#define SK_RID		SK_PCI_LOMEM
179#endif
180
181/*
182 * Note that we have newbus methods for both the GEnesis controller
183 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
184 * the miibus code is a child of the XMACs. We need to do it this way
185 * so that the miibus drivers can access the PHY registers on the
186 * right PHY. It's not quite what I had in mind, but it's the only
187 * design that achieves the desired effect.
188 */
189static device_method_t skc_methods[] = {
190	/* Device interface */
191	DEVMETHOD(device_probe,		sk_probe),
192	DEVMETHOD(device_attach,	sk_attach),
193	DEVMETHOD(device_detach,	sk_detach),
194	DEVMETHOD(device_shutdown,	sk_shutdown),
195
196	/* bus interface */
197	DEVMETHOD(bus_print_child,	bus_generic_print_child),
198	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
199
200	{ 0, 0 }
201};
202
203static driver_t skc_driver = {
204	"skc",
205	skc_methods,
206	sizeof(struct sk_softc)
207};
208
209static devclass_t skc_devclass;
210
211static device_method_t sk_methods[] = {
212	/* Device interface */
213	DEVMETHOD(device_probe,		sk_probe_xmac),
214	DEVMETHOD(device_attach,	sk_attach_xmac),
215	DEVMETHOD(device_detach,	sk_detach_xmac),
216	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
217
218	/* bus interface */
219	DEVMETHOD(bus_print_child,	bus_generic_print_child),
220	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
221
222	/* MII interface */
223	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
224	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
225	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
226
227	{ 0, 0 }
228};
229
230static driver_t sk_driver = {
231	"sk",
232	sk_methods,
233	sizeof(struct sk_if_softc)
234};
235
236static devclass_t sk_devclass;
237
238DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0);
239DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
240DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
241
242#define SK_SETBIT(sc, reg, x)		\
243	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
244
245#define SK_CLRBIT(sc, reg, x)		\
246	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
247
248#define SK_WIN_SETBIT_4(sc, reg, x)	\
249	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
250
251#define SK_WIN_CLRBIT_4(sc, reg, x)	\
252	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
253
254#define SK_WIN_SETBIT_2(sc, reg, x)	\
255	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
256
257#define SK_WIN_CLRBIT_2(sc, reg, x)	\
258	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
259
260static u_int32_t sk_win_read_4(sc, reg)
261	struct sk_softc		*sc;
262	int			reg;
263{
264	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
265	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
266}
267
268static u_int16_t sk_win_read_2(sc, reg)
269	struct sk_softc		*sc;
270	int			reg;
271{
272	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
273	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
274}
275
276static u_int8_t sk_win_read_1(sc, reg)
277	struct sk_softc		*sc;
278	int			reg;
279{
280	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
281	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
282}
283
284static void sk_win_write_4(sc, reg, val)
285	struct sk_softc		*sc;
286	int			reg;
287	u_int32_t		val;
288{
289	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
290	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
291	return;
292}
293
294static void sk_win_write_2(sc, reg, val)
295	struct sk_softc		*sc;
296	int			reg;
297	u_int32_t		val;
298{
299	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
300	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
301	return;
302}
303
304static void sk_win_write_1(sc, reg, val)
305	struct sk_softc		*sc;
306	int			reg;
307	u_int32_t		val;
308{
309	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
310	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
311	return;
312}
313
314/*
315 * The VPD EEPROM contains Vital Product Data, as suggested in
316 * the PCI 2.1 specification. The VPD data is separared into areas
317 * denoted by resource IDs. The SysKonnect VPD contains an ID string
318 * resource (the name of the adapter), a read-only area resource
319 * containing various key/data fields and a read/write area which
320 * can be used to store asset management information or log messages.
321 * We read the ID string and read-only into buffers attached to
322 * the controller softc structure for later use. At the moment,
323 * we only use the ID string during sk_attach().
324 */
325static u_int8_t sk_vpd_readbyte(sc, addr)
326	struct sk_softc		*sc;
327	int			addr;
328{
329	int			i;
330
331	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
332	for (i = 0; i < SK_TIMEOUT; i++) {
333		DELAY(1);
334		if (sk_win_read_2(sc,
335		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
336			break;
337	}
338
339	if (i == SK_TIMEOUT)
340		return(0);
341
342	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
343}
344
345static void sk_vpd_read_res(sc, res, addr)
346	struct sk_softc		*sc;
347	struct vpd_res		*res;
348	int			addr;
349{
350	int			i;
351	u_int8_t		*ptr;
352
353	ptr = (u_int8_t *)res;
354	for (i = 0; i < sizeof(struct vpd_res); i++)
355		ptr[i] = sk_vpd_readbyte(sc, i + addr);
356
357	return;
358}
359
360static void sk_vpd_read(sc)
361	struct sk_softc		*sc;
362{
363	int			pos = 0, i;
364	struct vpd_res		res;
365
366	if (sc->sk_vpd_prodname != NULL)
367		free(sc->sk_vpd_prodname, M_DEVBUF);
368	if (sc->sk_vpd_readonly != NULL)
369		free(sc->sk_vpd_readonly, M_DEVBUF);
370	sc->sk_vpd_prodname = NULL;
371	sc->sk_vpd_readonly = NULL;
372
373	sk_vpd_read_res(sc, &res, pos);
374
375	if (res.vr_id != VPD_RES_ID) {
376		printf("skc%d: bad VPD resource id: expected %x got %x\n",
377		    sc->sk_unit, VPD_RES_ID, res.vr_id);
378		return;
379	}
380
381	pos += sizeof(res);
382	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
383	for (i = 0; i < res.vr_len; i++)
384		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
385	sc->sk_vpd_prodname[i] = '\0';
386	pos += i;
387
388	sk_vpd_read_res(sc, &res, pos);
389
390	if (res.vr_id != VPD_RES_READ) {
391		printf("skc%d: bad VPD resource id: expected %x got %x\n",
392		    sc->sk_unit, VPD_RES_READ, res.vr_id);
393		return;
394	}
395
396	pos += sizeof(res);
397	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
398	for (i = 0; i < res.vr_len + 1; i++)
399		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
400
401	return;
402}
403
404static int sk_miibus_readreg(dev, phy, reg)
405	device_t		dev;
406	int			phy, reg;
407{
408	struct sk_if_softc	*sc_if;
409	int			i;
410
411	sc_if = device_get_softc(dev);
412
413	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
414		return(0);
415
416	SK_IF_LOCK(sc_if);
417
418	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
419	SK_XM_READ_2(sc_if, XM_PHY_DATA);
420	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
421		for (i = 0; i < SK_TIMEOUT; i++) {
422			DELAY(1);
423			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
424			    XM_MMUCMD_PHYDATARDY)
425				break;
426		}
427
428		if (i == SK_TIMEOUT) {
429			printf("sk%d: phy failed to come ready\n",
430			    sc_if->sk_unit);
431			return(0);
432		}
433	}
434	DELAY(1);
435	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
436	SK_IF_UNLOCK(sc_if);
437	return(i);
438}
439
440static int sk_miibus_writereg(dev, phy, reg, val)
441	device_t		dev;
442	int			phy, reg, val;
443{
444	struct sk_if_softc	*sc_if;
445	int			i;
446
447	sc_if = device_get_softc(dev);
448	SK_IF_LOCK(sc_if);
449
450	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
451	for (i = 0; i < SK_TIMEOUT; i++) {
452		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
453			break;
454	}
455
456	if (i == SK_TIMEOUT) {
457		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
458		return(ETIMEDOUT);
459	}
460
461	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
462	for (i = 0; i < SK_TIMEOUT; i++) {
463		DELAY(1);
464		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
465			break;
466	}
467
468	SK_IF_UNLOCK(sc_if);
469
470	if (i == SK_TIMEOUT)
471		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
472
473	return(0);
474}
475
476static void sk_miibus_statchg(dev)
477	device_t		dev;
478{
479	struct sk_if_softc	*sc_if;
480	struct mii_data		*mii;
481
482	sc_if = device_get_softc(dev);
483	mii = device_get_softc(sc_if->sk_miibus);
484	SK_IF_LOCK(sc_if);
485	/*
486	 * If this is a GMII PHY, manually set the XMAC's
487	 * duplex mode accordingly.
488	 */
489	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
490		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
491			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
492		} else {
493			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
494		}
495	}
496	SK_IF_UNLOCK(sc_if);
497
498	return;
499}
500
501#define SK_POLY		0xEDB88320
502#define SK_BITS		6
503
504static u_int32_t sk_calchash(addr)
505	caddr_t			addr;
506{
507	u_int32_t		idx, bit, data, crc;
508
509	/* Compute CRC for the address value. */
510	crc = 0xFFFFFFFF; /* initial value */
511
512	for (idx = 0; idx < 6; idx++) {
513		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
514			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
515	}
516
517	return (~crc & ((1 << SK_BITS) - 1));
518}
519
520static void sk_setfilt(sc_if, addr, slot)
521	struct sk_if_softc	*sc_if;
522	caddr_t			addr;
523	int			slot;
524{
525	int			base;
526
527	base = XM_RXFILT_ENTRY(slot);
528
529	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
530	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
531	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
532
533	return;
534}
535
536static void sk_setmulti(sc_if)
537	struct sk_if_softc	*sc_if;
538{
539	struct ifnet		*ifp;
540	u_int32_t		hashes[2] = { 0, 0 };
541	int			h, i;
542	struct ifmultiaddr	*ifma;
543	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
544
545	ifp = &sc_if->arpcom.ac_if;
546
547	/* First, zot all the existing filters. */
548	for (i = 1; i < XM_RXFILT_MAX; i++)
549		sk_setfilt(sc_if, (caddr_t)&dummy, i);
550	SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
551	SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
552
553	/* Now program new ones. */
554	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
555		hashes[0] = 0xFFFFFFFF;
556		hashes[1] = 0xFFFFFFFF;
557	} else {
558		i = 1;
559		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
560			if (ifma->ifma_addr->sa_family != AF_LINK)
561				continue;
562			/*
563			 * Program the first XM_RXFILT_MAX multicast groups
564			 * into the perfect filter. For all others,
565			 * use the hash table.
566			 */
567			if (i < XM_RXFILT_MAX) {
568				sk_setfilt(sc_if,
569			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
570				i++;
571				continue;
572			}
573
574			h = sk_calchash(
575				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
576			if (h < 32)
577				hashes[0] |= (1 << h);
578			else
579				hashes[1] |= (1 << (h - 32));
580		}
581	}
582
583	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
584	    XM_MODE_RX_USE_PERFECT);
585	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
586	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
587
588	return;
589}
590
591static int sk_init_rx_ring(sc_if)
592	struct sk_if_softc	*sc_if;
593{
594	struct sk_chain_data	*cd;
595	struct sk_ring_data	*rd;
596	int			i;
597
598	cd = &sc_if->sk_cdata;
599	rd = sc_if->sk_rdata;
600
601	bzero((char *)rd->sk_rx_ring,
602	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
603
604	for (i = 0; i < SK_RX_RING_CNT; i++) {
605		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
606		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
607			return(ENOBUFS);
608		if (i == (SK_RX_RING_CNT - 1)) {
609			cd->sk_rx_chain[i].sk_next =
610			    &cd->sk_rx_chain[0];
611			rd->sk_rx_ring[i].sk_next =
612			    vtophys(&rd->sk_rx_ring[0]);
613		} else {
614			cd->sk_rx_chain[i].sk_next =
615			    &cd->sk_rx_chain[i + 1];
616			rd->sk_rx_ring[i].sk_next =
617			    vtophys(&rd->sk_rx_ring[i + 1]);
618		}
619	}
620
621	sc_if->sk_cdata.sk_rx_prod = 0;
622	sc_if->sk_cdata.sk_rx_cons = 0;
623
624	return(0);
625}
626
627static void sk_init_tx_ring(sc_if)
628	struct sk_if_softc	*sc_if;
629{
630	struct sk_chain_data	*cd;
631	struct sk_ring_data	*rd;
632	int			i;
633
634	cd = &sc_if->sk_cdata;
635	rd = sc_if->sk_rdata;
636
637	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
638	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
639
640	for (i = 0; i < SK_TX_RING_CNT; i++) {
641		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
642		if (i == (SK_TX_RING_CNT - 1)) {
643			cd->sk_tx_chain[i].sk_next =
644			    &cd->sk_tx_chain[0];
645			rd->sk_tx_ring[i].sk_next =
646			    vtophys(&rd->sk_tx_ring[0]);
647		} else {
648			cd->sk_tx_chain[i].sk_next =
649			    &cd->sk_tx_chain[i + 1];
650			rd->sk_tx_ring[i].sk_next =
651			    vtophys(&rd->sk_tx_ring[i + 1]);
652		}
653	}
654
655	sc_if->sk_cdata.sk_tx_prod = 0;
656	sc_if->sk_cdata.sk_tx_cons = 0;
657	sc_if->sk_cdata.sk_tx_cnt = 0;
658
659	return;
660}
661
662static int sk_newbuf(sc_if, c, m)
663	struct sk_if_softc	*sc_if;
664	struct sk_chain		*c;
665	struct mbuf		*m;
666{
667	struct mbuf		*m_new = NULL;
668	struct sk_rx_desc	*r;
669
670	if (m == NULL) {
671		caddr_t			*buf = NULL;
672
673		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
674		if (m_new == NULL)
675			return(ENOBUFS);
676
677		/* Allocate the jumbo buffer */
678		buf = sk_jalloc(sc_if);
679		if (buf == NULL) {
680			m_freem(m_new);
681#ifdef SK_VERBOSE
682			printf("sk%d: jumbo allocation failed "
683			    "-- packet dropped!\n", sc_if->sk_unit);
684#endif
685			return(ENOBUFS);
686		}
687
688		/* Attach the buffer to the mbuf */
689		MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
690		    (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
691		m_new->m_data = (void *)buf;
692		m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
693	} else {
694		/*
695	 	 * We're re-using a previously allocated mbuf;
696		 * be sure to re-init pointers and lengths to
697		 * default values.
698		 */
699		m_new = m;
700		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
701		m_new->m_data = m_new->m_ext.ext_buf;
702	}
703
704	/*
705	 * Adjust alignment so packet payload begins on a
706	 * longword boundary. Mandatory for Alpha, useful on
707	 * x86 too.
708	 */
709	m_adj(m_new, ETHER_ALIGN);
710
711	r = c->sk_desc;
712	c->sk_mbuf = m_new;
713	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
714	r->sk_ctl = m_new->m_len | SK_RXSTAT;
715
716	return(0);
717}
718
719/*
720 * Allocate jumbo buffer storage. The SysKonnect adapters support
721 * "jumbograms" (9K frames), although SysKonnect doesn't currently
722 * use them in their drivers. In order for us to use them, we need
723 * large 9K receive buffers, however standard mbuf clusters are only
724 * 2048 bytes in size. Consequently, we need to allocate and manage
725 * our own jumbo buffer pool. Fortunately, this does not require an
726 * excessive amount of additional code.
727 */
728static int sk_alloc_jumbo_mem(sc_if)
729	struct sk_if_softc	*sc_if;
730{
731	caddr_t			ptr;
732	register int		i;
733	struct sk_jpool_entry   *entry;
734
735	/* Grab a big chunk o' storage. */
736	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
737	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
738
739	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
740		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
741		return(ENOBUFS);
742	}
743
744	SLIST_INIT(&sc_if->sk_jfree_listhead);
745	SLIST_INIT(&sc_if->sk_jinuse_listhead);
746
747	/*
748	 * Now divide it up into 9K pieces and save the addresses
749	 * in an array.
750	 */
751	ptr = sc_if->sk_cdata.sk_jumbo_buf;
752	for (i = 0; i < SK_JSLOTS; i++) {
753		sc_if->sk_cdata.sk_jslots[i] = ptr;
754		ptr += SK_JLEN;
755		entry = malloc(sizeof(struct sk_jpool_entry),
756		    M_DEVBUF, M_NOWAIT);
757		if (entry == NULL) {
758			free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
759			sc_if->sk_cdata.sk_jumbo_buf = NULL;
760			printf("sk%d: no memory for jumbo "
761			    "buffer queue!\n", sc_if->sk_unit);
762			return(ENOBUFS);
763		}
764		entry->slot = i;
765		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
766		    entry, jpool_entries);
767	}
768
769	return(0);
770}
771
772/*
773 * Allocate a jumbo buffer.
774 */
775static void *sk_jalloc(sc_if)
776	struct sk_if_softc	*sc_if;
777{
778	struct sk_jpool_entry   *entry;
779
780	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
781
782	if (entry == NULL) {
783#ifdef SK_VERBOSE
784		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
785#endif
786		return(NULL);
787	}
788
789	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
790	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
791	return(sc_if->sk_cdata.sk_jslots[entry->slot]);
792}
793
794/*
795 * Release a jumbo buffer.
796 */
797static void sk_jfree(buf, args)
798	caddr_t			buf;
799	void			*args;
800{
801	struct sk_if_softc	*sc_if;
802	int		        i;
803	struct sk_jpool_entry   *entry;
804
805	/* Extract the softc struct pointer. */
806	sc_if = (struct sk_if_softc *)args;
807
808	if (sc_if == NULL)
809		panic("sk_jfree: didn't get softc pointer!");
810
811	/* calculate the slot this buffer belongs to */
812	i = ((vm_offset_t)buf
813	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
814
815	if ((i < 0) || (i >= SK_JSLOTS))
816		panic("sk_jfree: asked to free buffer that we don't manage!");
817
818	entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
819	if (entry == NULL)
820		panic("sk_jfree: buffer not in use!");
821	entry->slot = i;
822	SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
823	SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
824
825	return;
826}
827
828/*
829 * Set media options.
830 */
831static int sk_ifmedia_upd(ifp)
832	struct ifnet		*ifp;
833{
834	struct sk_if_softc	*sc_if;
835	struct mii_data		*mii;
836
837	sc_if = ifp->if_softc;
838	mii = device_get_softc(sc_if->sk_miibus);
839	sk_init(sc_if);
840	mii_mediachg(mii);
841
842	return(0);
843}
844
845/*
846 * Report current media status.
847 */
848static void sk_ifmedia_sts(ifp, ifmr)
849	struct ifnet		*ifp;
850	struct ifmediareq	*ifmr;
851{
852	struct sk_if_softc	*sc_if;
853	struct mii_data		*mii;
854
855	sc_if = ifp->if_softc;
856	mii = device_get_softc(sc_if->sk_miibus);
857
858	mii_pollstat(mii);
859	ifmr->ifm_active = mii->mii_media_active;
860	ifmr->ifm_status = mii->mii_media_status;
861
862	return;
863}
864
865static int sk_ioctl(ifp, command, data)
866	struct ifnet		*ifp;
867	u_long			command;
868	caddr_t			data;
869{
870	struct sk_if_softc	*sc_if = ifp->if_softc;
871	struct ifreq		*ifr = (struct ifreq *) data;
872	int			error = 0;
873	struct mii_data		*mii;
874
875	SK_IF_LOCK(sc_if);
876
877	switch(command) {
878	case SIOCSIFADDR:
879	case SIOCGIFADDR:
880		error = ether_ioctl(ifp, command, data);
881		break;
882	case SIOCSIFMTU:
883		if (ifr->ifr_mtu > SK_JUMBO_MTU)
884			error = EINVAL;
885		else {
886			ifp->if_mtu = ifr->ifr_mtu;
887			sk_init(sc_if);
888		}
889		break;
890	case SIOCSIFFLAGS:
891		if (ifp->if_flags & IFF_UP) {
892			if (ifp->if_flags & IFF_RUNNING &&
893			    ifp->if_flags & IFF_PROMISC &&
894			    !(sc_if->sk_if_flags & IFF_PROMISC)) {
895				SK_XM_SETBIT_4(sc_if, XM_MODE,
896				    XM_MODE_RX_PROMISC);
897				sk_setmulti(sc_if);
898			} else if (ifp->if_flags & IFF_RUNNING &&
899			    !(ifp->if_flags & IFF_PROMISC) &&
900			    sc_if->sk_if_flags & IFF_PROMISC) {
901				SK_XM_CLRBIT_4(sc_if, XM_MODE,
902				    XM_MODE_RX_PROMISC);
903				sk_setmulti(sc_if);
904			} else
905				sk_init(sc_if);
906		} else {
907			if (ifp->if_flags & IFF_RUNNING)
908				sk_stop(sc_if);
909		}
910		sc_if->sk_if_flags = ifp->if_flags;
911		error = 0;
912		break;
913	case SIOCADDMULTI:
914	case SIOCDELMULTI:
915		sk_setmulti(sc_if);
916		error = 0;
917		break;
918	case SIOCGIFMEDIA:
919	case SIOCSIFMEDIA:
920		mii = device_get_softc(sc_if->sk_miibus);
921		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
922		break;
923	default:
924		error = EINVAL;
925		break;
926	}
927
928	SK_IF_UNLOCK(sc_if);
929
930	return(error);
931}
932
933/*
934 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
935 * IDs against our list and return a device name if we find a match.
936 */
937static int sk_probe(dev)
938	device_t		dev;
939{
940	struct sk_type		*t;
941
942	t = sk_devs;
943
944	while(t->sk_name != NULL) {
945		if ((pci_get_vendor(dev) == t->sk_vid) &&
946		    (pci_get_device(dev) == t->sk_did)) {
947			device_set_desc(dev, t->sk_name);
948			return(0);
949		}
950		t++;
951	}
952
953	return(ENXIO);
954}
955
956/*
957 * Force the GEnesis into reset, then bring it out of reset.
958 */
959static void sk_reset(sc)
960	struct sk_softc		*sc;
961{
962	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
963	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
964	DELAY(1000);
965	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
966	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
967
968	/* Configure packet arbiter */
969	sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
970	sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
971	sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
972	sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
973	sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
974
975	/* Enable RAM interface */
976	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
977
978	/*
979         * Configure interrupt moderation. The moderation timer
980	 * defers interrupts specified in the interrupt moderation
981	 * timer mask based on the timeout specified in the interrupt
982	 * moderation timer init register. Each bit in the timer
983	 * register represents 18.825ns, so to specify a timeout in
984	 * microseconds, we have to multiply by 54.
985	 */
986        sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
987        sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
988	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
989        sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
990
991	return;
992}
993
994static int sk_probe_xmac(dev)
995	device_t		dev;
996{
997	/*
998	 * Not much to do here. We always know there will be
999	 * at least one XMAC present, and if there are two,
1000	 * sk_attach() will create a second device instance
1001	 * for us.
1002	 */
1003	device_set_desc(dev, "XaQti Corp. XMAC II");
1004
1005	return(0);
1006}
1007
1008/*
1009 * Each XMAC chip is attached as a separate logical IP interface.
1010 * Single port cards will have only one logical interface of course.
1011 */
1012static int sk_attach_xmac(dev)
1013	device_t		dev;
1014{
1015	struct sk_softc		*sc;
1016	struct sk_if_softc	*sc_if;
1017	struct ifnet		*ifp;
1018	int			i, port;
1019
1020	if (dev == NULL)
1021		return(EINVAL);
1022
1023	sc_if = device_get_softc(dev);
1024	sc = device_get_softc(device_get_parent(dev));
1025	SK_LOCK(sc);
1026	port = *(int *)device_get_ivars(dev);
1027	free(device_get_ivars(dev), M_DEVBUF);
1028	device_set_ivars(dev, NULL);
1029	sc_if->sk_dev = dev;
1030
1031	bzero((char *)sc_if, sizeof(struct sk_if_softc));
1032
1033	sc_if->sk_dev = dev;
1034	sc_if->sk_unit = device_get_unit(dev);
1035	sc_if->sk_port = port;
1036	sc_if->sk_softc = sc;
1037	sc->sk_if[port] = sc_if;
1038	if (port == SK_PORT_A)
1039		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1040	if (port == SK_PORT_B)
1041		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1042
1043	/*
1044	 * Get station address for this interface. Note that
1045	 * dual port cards actually come with three station
1046	 * addresses: one for each port, plus an extra. The
1047	 * extra one is used by the SysKonnect driver software
1048	 * as a 'virtual' station address for when both ports
1049	 * are operating in failover mode. Currently we don't
1050	 * use this extra address.
1051	 */
1052	for (i = 0; i < ETHER_ADDR_LEN; i++)
1053		sc_if->arpcom.ac_enaddr[i] =
1054		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1055
1056	printf("sk%d: Ethernet address: %6D\n",
1057	    sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":");
1058
1059	/*
1060	 * Set up RAM buffer addresses. The NIC will have a certain
1061	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1062	 * need to divide this up a) between the transmitter and
1063 	 * receiver and b) between the two XMACs, if this is a
1064	 * dual port NIC. Our algotithm is to divide up the memory
1065	 * evenly so that everyone gets a fair share.
1066	 */
1067	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1068		u_int32_t		chunk, val;
1069
1070		chunk = sc->sk_ramsize / 2;
1071		val = sc->sk_rboff / sizeof(u_int64_t);
1072		sc_if->sk_rx_ramstart = val;
1073		val += (chunk / sizeof(u_int64_t));
1074		sc_if->sk_rx_ramend = val - 1;
1075		sc_if->sk_tx_ramstart = val;
1076		val += (chunk / sizeof(u_int64_t));
1077		sc_if->sk_tx_ramend = val - 1;
1078	} else {
1079		u_int32_t		chunk, val;
1080
1081		chunk = sc->sk_ramsize / 4;
1082		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1083		    sizeof(u_int64_t);
1084		sc_if->sk_rx_ramstart = val;
1085		val += (chunk / sizeof(u_int64_t));
1086		sc_if->sk_rx_ramend = val - 1;
1087		sc_if->sk_tx_ramstart = val;
1088		val += (chunk / sizeof(u_int64_t));
1089		sc_if->sk_tx_ramend = val - 1;
1090	}
1091
1092	/* Read and save PHY type and set PHY address */
1093	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1094	switch(sc_if->sk_phytype) {
1095	case SK_PHYTYPE_XMAC:
1096		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1097		break;
1098	case SK_PHYTYPE_BCOM:
1099		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1100		break;
1101	default:
1102		printf("skc%d: unsupported PHY type: %d\n",
1103		    sc->sk_unit, sc_if->sk_phytype);
1104		SK_UNLOCK(sc);
1105		return(ENODEV);
1106	}
1107
1108	/* Allocate the descriptor queues. */
1109	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1110	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1111
1112	if (sc_if->sk_rdata == NULL) {
1113		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1114		sc->sk_if[port] = NULL;
1115		SK_UNLOCK(sc);
1116		return(ENOMEM);
1117	}
1118
1119	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1120
1121	/* Try to allocate memory for jumbo buffers. */
1122	if (sk_alloc_jumbo_mem(sc_if)) {
1123		printf("sk%d: jumbo buffer allocation failed\n",
1124		    sc_if->sk_unit);
1125		contigfree(sc_if->sk_rdata,
1126		    sizeof(struct sk_ring_data), M_DEVBUF);
1127		sc->sk_if[port] = NULL;
1128		SK_UNLOCK(sc);
1129		return(ENOMEM);
1130	}
1131
1132	ifp = &sc_if->arpcom.ac_if;
1133	ifp->if_softc = sc_if;
1134	ifp->if_unit = sc_if->sk_unit;
1135	ifp->if_name = "sk";
1136	ifp->if_mtu = ETHERMTU;
1137	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1138	ifp->if_ioctl = sk_ioctl;
1139	ifp->if_output = ether_output;
1140	ifp->if_start = sk_start;
1141	ifp->if_watchdog = sk_watchdog;
1142	ifp->if_init = sk_init;
1143	ifp->if_baudrate = 1000000000;
1144	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1145
1146	/*
1147	 * Call MI attach routine.
1148	 */
1149	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1150	callout_handle_init(&sc_if->sk_tick_ch);
1151
1152	/*
1153	 * Do miibus setup.
1154	 */
1155	sk_init_xmac(sc_if);
1156	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1157	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1158		printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1159		contigfree(sc_if->sk_rdata,
1160		    sizeof(struct sk_ring_data), M_DEVBUF);
1161		ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1162		SK_UNLOCK(sc);
1163		return(ENXIO);
1164	}
1165
1166	SK_UNLOCK(sc);
1167
1168	return(0);
1169}
1170
1171/*
1172 * Attach the interface. Allocate softc structures, do ifmedia
1173 * setup and ethernet/BPF attach.
1174 */
1175static int sk_attach(dev)
1176	device_t		dev;
1177{
1178	u_int32_t		command;
1179	struct sk_softc		*sc;
1180	int			unit, error = 0, rid, *port;
1181
1182	sc = device_get_softc(dev);
1183	unit = device_get_unit(dev);
1184	bzero(sc, sizeof(struct sk_softc));
1185
1186	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1187	    MTX_DEF | MTX_RECURSE);
1188	SK_LOCK(sc);
1189
1190	/*
1191	 * Handle power management nonsense.
1192	 */
1193	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1194		u_int32_t		iobase, membase, irq;
1195
1196		/* Save important PCI config data. */
1197		iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1198		membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1199		irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1200
1201		/* Reset the power state. */
1202		printf("skc%d: chip is in D%d power mode "
1203		    "-- setting to D0\n", unit,
1204		    pci_get_powerstate(dev));
1205		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1206
1207		/* Restore PCI config data. */
1208		pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1209		pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1210		pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1211	}
1212
1213	/*
1214	 * Map control/status registers.
1215	 */
1216	pci_enable_busmaster(dev);
1217	pci_enable_io(dev, SYS_RES_IOPORT);
1218	pci_enable_io(dev, SYS_RES_MEMORY);
1219	command = pci_read_config(dev, PCIR_COMMAND, 4);
1220
1221#ifdef SK_USEIOSPACE
1222	if (!(command & PCIM_CMD_PORTEN)) {
1223		printf("skc%d: failed to enable I/O ports!\n", unit);
1224		error = ENXIO;
1225		goto fail;
1226	}
1227#else
1228	if (!(command & PCIM_CMD_MEMEN)) {
1229		printf("skc%d: failed to enable memory mapping!\n", unit);
1230		error = ENXIO;
1231		goto fail;
1232	}
1233#endif
1234
1235	rid = SK_RID;
1236	sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid,
1237	    0, ~0, 1, RF_ACTIVE);
1238
1239	if (sc->sk_res == NULL) {
1240		printf("sk%d: couldn't map ports/memory\n", unit);
1241		error = ENXIO;
1242		goto fail;
1243	}
1244
1245	sc->sk_btag = rman_get_bustag(sc->sk_res);
1246	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1247
1248	/* Allocate interrupt */
1249	rid = 0;
1250	sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1251	    RF_SHAREABLE | RF_ACTIVE);
1252
1253	if (sc->sk_irq == NULL) {
1254		printf("skc%d: couldn't map interrupt\n", unit);
1255		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1256		error = ENXIO;
1257		goto fail;
1258	}
1259
1260	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1261	    sk_intr, sc, &sc->sk_intrhand);
1262
1263	if (error) {
1264		printf("skc%d: couldn't set up irq\n", unit);
1265		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1266		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1267		goto fail;
1268	}
1269
1270	/* Reset the adapter. */
1271	sk_reset(sc);
1272
1273	sc->sk_unit = unit;
1274
1275	/* Read and save vital product data from EEPROM. */
1276	sk_vpd_read(sc);
1277
1278	/* Read and save RAM size and RAMbuffer offset */
1279	switch(sk_win_read_1(sc, SK_EPROM0)) {
1280	case SK_RAMSIZE_512K_64:
1281		sc->sk_ramsize = 0x80000;
1282		sc->sk_rboff = SK_RBOFF_0;
1283		break;
1284	case SK_RAMSIZE_1024K_64:
1285		sc->sk_ramsize = 0x100000;
1286		sc->sk_rboff = SK_RBOFF_80000;
1287		break;
1288	case SK_RAMSIZE_1024K_128:
1289		sc->sk_ramsize = 0x100000;
1290		sc->sk_rboff = SK_RBOFF_0;
1291		break;
1292	case SK_RAMSIZE_2048K_128:
1293		sc->sk_ramsize = 0x200000;
1294		sc->sk_rboff = SK_RBOFF_0;
1295		break;
1296	default:
1297		printf("skc%d: unknown ram size: %d\n",
1298		    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1299		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1300		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1301		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1302		error = ENXIO;
1303		goto fail;
1304		break;
1305	}
1306
1307	/* Read and save physical media type */
1308	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1309	case SK_PMD_1000BASESX:
1310		sc->sk_pmd = IFM_1000_SX;
1311		break;
1312	case SK_PMD_1000BASELX:
1313		sc->sk_pmd = IFM_1000_LX;
1314		break;
1315	case SK_PMD_1000BASECX:
1316		sc->sk_pmd = IFM_1000_CX;
1317		break;
1318	case SK_PMD_1000BASETX:
1319		sc->sk_pmd = IFM_1000_T;
1320		break;
1321	default:
1322		printf("skc%d: unknown media type: 0x%x\n",
1323		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1324		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1325		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1326		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1327		error = ENXIO;
1328		goto fail;
1329	}
1330
1331	/* Announce the product name. */
1332	printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1333	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1334	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1335	*port = SK_PORT_A;
1336	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1337
1338	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1339		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1340		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1341		*port = SK_PORT_B;
1342		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1343	}
1344
1345	/* Turn on the 'driver is loaded' LED. */
1346	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1347
1348	bus_generic_attach(dev);
1349	SK_UNLOCK(sc);
1350	return(0);
1351
1352fail:
1353	SK_UNLOCK(sc);
1354	mtx_destroy(&sc->sk_mtx);
1355	return(error);
1356}
1357
1358static int sk_detach_xmac(dev)
1359	device_t		dev;
1360{
1361	struct sk_softc		*sc;
1362	struct sk_if_softc	*sc_if;
1363	struct ifnet		*ifp;
1364
1365	sc = device_get_softc(device_get_parent(dev));
1366	sc_if = device_get_softc(dev);
1367	SK_IF_LOCK(sc_if);
1368
1369	ifp = &sc_if->arpcom.ac_if;
1370	sk_stop(sc_if);
1371	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1372	bus_generic_detach(dev);
1373	if (sc_if->sk_miibus != NULL)
1374		device_delete_child(dev, sc_if->sk_miibus);
1375	contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1376	contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF);
1377	SK_IF_UNLOCK(sc_if);
1378
1379	return(0);
1380}
1381
1382static int sk_detach(dev)
1383	device_t		dev;
1384{
1385	struct sk_softc		*sc;
1386
1387	sc = device_get_softc(dev);
1388	SK_LOCK(sc);
1389
1390	bus_generic_detach(dev);
1391	if (sc->sk_devs[SK_PORT_A] != NULL)
1392		device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1393	if (sc->sk_devs[SK_PORT_B] != NULL)
1394		device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1395
1396	bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1397	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1398	bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1399
1400	SK_UNLOCK(sc);
1401	mtx_destroy(&sc->sk_mtx);
1402
1403	return(0);
1404}
1405
1406static int sk_encap(sc_if, m_head, txidx)
1407        struct sk_if_softc	*sc_if;
1408        struct mbuf		*m_head;
1409        u_int32_t		*txidx;
1410{
1411	struct sk_tx_desc	*f = NULL;
1412	struct mbuf		*m;
1413	u_int32_t		frag, cur, cnt = 0;
1414
1415	m = m_head;
1416	cur = frag = *txidx;
1417
1418	/*
1419	 * Start packing the mbufs in this chain into
1420	 * the fragment pointers. Stop when we run out
1421	 * of fragments or hit the end of the mbuf chain.
1422	 */
1423	for (m = m_head; m != NULL; m = m->m_next) {
1424		if (m->m_len != 0) {
1425			if ((SK_TX_RING_CNT -
1426			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1427				return(ENOBUFS);
1428			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1429			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1430			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1431			if (cnt == 0)
1432				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1433			else
1434				f->sk_ctl |= SK_TXCTL_OWN;
1435			cur = frag;
1436			SK_INC(frag, SK_TX_RING_CNT);
1437			cnt++;
1438		}
1439	}
1440
1441	if (m != NULL)
1442		return(ENOBUFS);
1443
1444	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1445		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1446	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1447	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1448	sc_if->sk_cdata.sk_tx_cnt += cnt;
1449
1450	*txidx = frag;
1451
1452	return(0);
1453}
1454
1455static void sk_start(ifp)
1456	struct ifnet		*ifp;
1457{
1458        struct sk_softc		*sc;
1459        struct sk_if_softc	*sc_if;
1460        struct mbuf		*m_head = NULL;
1461        u_int32_t		idx;
1462
1463	sc_if = ifp->if_softc;
1464	sc = sc_if->sk_softc;
1465
1466	SK_IF_LOCK(sc_if);
1467
1468	idx = sc_if->sk_cdata.sk_tx_prod;
1469
1470	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1471		IF_DEQUEUE(&ifp->if_snd, m_head);
1472		if (m_head == NULL)
1473			break;
1474
1475		/*
1476		 * Pack the data into the transmit ring. If we
1477		 * don't have room, set the OACTIVE flag and wait
1478		 * for the NIC to drain the ring.
1479		 */
1480		if (sk_encap(sc_if, m_head, &idx)) {
1481			IF_PREPEND(&ifp->if_snd, m_head);
1482			ifp->if_flags |= IFF_OACTIVE;
1483			break;
1484		}
1485
1486		/*
1487		 * If there's a BPF listener, bounce a copy of this frame
1488		 * to him.
1489		 */
1490		if (ifp->if_bpf)
1491			bpf_mtap(ifp, m_head);
1492	}
1493
1494	/* Transmit */
1495	sc_if->sk_cdata.sk_tx_prod = idx;
1496	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1497
1498	/* Set a timeout in case the chip goes out to lunch. */
1499	ifp->if_timer = 5;
1500	SK_IF_UNLOCK(sc_if);
1501
1502	return;
1503}
1504
1505
1506static void sk_watchdog(ifp)
1507	struct ifnet		*ifp;
1508{
1509	struct sk_if_softc	*sc_if;
1510
1511	sc_if = ifp->if_softc;
1512
1513	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1514	sk_init(sc_if);
1515
1516	return;
1517}
1518
1519static void sk_shutdown(dev)
1520	device_t		dev;
1521{
1522	struct sk_softc		*sc;
1523
1524	sc = device_get_softc(dev);
1525	SK_LOCK(sc);
1526
1527	/* Turn off the 'driver is loaded' LED. */
1528	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1529
1530	/*
1531	 * Reset the GEnesis controller. Doing this should also
1532	 * assert the resets on the attached XMAC(s).
1533	 */
1534	sk_reset(sc);
1535	SK_UNLOCK(sc);
1536
1537	return;
1538}
1539
1540static void sk_rxeof(sc_if)
1541	struct sk_if_softc	*sc_if;
1542{
1543	struct ether_header	*eh;
1544	struct mbuf		*m;
1545	struct ifnet		*ifp;
1546	struct sk_chain		*cur_rx;
1547	int			total_len = 0;
1548	int			i;
1549	u_int32_t		rxstat;
1550
1551	ifp = &sc_if->arpcom.ac_if;
1552	i = sc_if->sk_cdata.sk_rx_prod;
1553	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1554
1555	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1556
1557		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1558		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1559		m = cur_rx->sk_mbuf;
1560		cur_rx->sk_mbuf = NULL;
1561		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1562		SK_INC(i, SK_RX_RING_CNT);
1563
1564		if (rxstat & XM_RXSTAT_ERRFRAME) {
1565			ifp->if_ierrors++;
1566			sk_newbuf(sc_if, cur_rx, m);
1567			continue;
1568		}
1569
1570		/*
1571		 * Try to allocate a new jumbo buffer. If that
1572		 * fails, copy the packet to mbufs and put the
1573		 * jumbo buffer back in the ring so it can be
1574		 * re-used. If allocating mbufs fails, then we
1575		 * have to drop the packet.
1576		 */
1577		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1578			struct mbuf		*m0;
1579			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
1580			    ifp, NULL);
1581			sk_newbuf(sc_if, cur_rx, m);
1582			if (m0 == NULL) {
1583				printf("sk%d: no receive buffers "
1584				    "available -- packet dropped!\n",
1585				    sc_if->sk_unit);
1586				ifp->if_ierrors++;
1587				continue;
1588			}
1589			m = m0;
1590		} else {
1591			m->m_pkthdr.rcvif = ifp;
1592			m->m_pkthdr.len = m->m_len = total_len;
1593		}
1594
1595		ifp->if_ipackets++;
1596		eh = mtod(m, struct ether_header *);
1597
1598		/* Remove header from mbuf and pass it on. */
1599		m_adj(m, sizeof(struct ether_header));
1600		ether_input(ifp, eh, m);
1601	}
1602
1603	sc_if->sk_cdata.sk_rx_prod = i;
1604
1605	return;
1606}
1607
1608static void sk_txeof(sc_if)
1609	struct sk_if_softc	*sc_if;
1610{
1611	struct sk_tx_desc	*cur_tx = NULL;
1612	struct ifnet		*ifp;
1613	u_int32_t		idx;
1614
1615	ifp = &sc_if->arpcom.ac_if;
1616
1617	/*
1618	 * Go through our tx ring and free mbufs for those
1619	 * frames that have been sent.
1620	 */
1621	idx = sc_if->sk_cdata.sk_tx_cons;
1622	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1623		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1624		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1625			break;
1626		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1627			ifp->if_opackets++;
1628		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1629			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1630			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1631		}
1632		sc_if->sk_cdata.sk_tx_cnt--;
1633		SK_INC(idx, SK_TX_RING_CNT);
1634		ifp->if_timer = 0;
1635	}
1636
1637	sc_if->sk_cdata.sk_tx_cons = idx;
1638
1639	if (cur_tx != NULL)
1640		ifp->if_flags &= ~IFF_OACTIVE;
1641
1642	return;
1643}
1644
1645static void sk_tick(xsc_if)
1646	void			*xsc_if;
1647{
1648	struct sk_if_softc	*sc_if;
1649	struct mii_data		*mii;
1650	struct ifnet		*ifp;
1651	int			i;
1652
1653	sc_if = xsc_if;
1654	SK_IF_LOCK(sc_if);
1655	ifp = &sc_if->arpcom.ac_if;
1656	mii = device_get_softc(sc_if->sk_miibus);
1657
1658	if (!(ifp->if_flags & IFF_UP)) {
1659		SK_IF_UNLOCK(sc_if);
1660		return;
1661	}
1662
1663	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1664		sk_intr_bcom(sc_if);
1665		SK_IF_UNLOCK(sc_if);
1666		return;
1667	}
1668
1669	/*
1670	 * According to SysKonnect, the correct way to verify that
1671	 * the link has come back up is to poll bit 0 of the GPIO
1672	 * register three times. This pin has the signal from the
1673	 * link_sync pin connected to it; if we read the same link
1674	 * state 3 times in a row, we know the link is up.
1675	 */
1676	for (i = 0; i < 3; i++) {
1677		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1678			break;
1679	}
1680
1681	if (i != 3) {
1682		sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1683		SK_IF_UNLOCK(sc_if);
1684		return;
1685	}
1686
1687	/* Turn the GP0 interrupt back on. */
1688	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1689	SK_XM_READ_2(sc_if, XM_ISR);
1690	mii_tick(mii);
1691	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
1692
1693	SK_IF_UNLOCK(sc_if);
1694	return;
1695}
1696
1697static void sk_intr_bcom(sc_if)
1698	struct sk_if_softc	*sc_if;
1699{
1700	struct sk_softc		*sc;
1701	struct mii_data		*mii;
1702	struct ifnet		*ifp;
1703	int			status;
1704
1705	sc = sc_if->sk_softc;
1706	mii = device_get_softc(sc_if->sk_miibus);
1707	ifp = &sc_if->arpcom.ac_if;
1708
1709	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1710
1711	/*
1712	 * Read the PHY interrupt register to make sure
1713	 * we clear any pending interrupts.
1714	 */
1715	status = sk_miibus_readreg(sc_if->sk_dev,
1716	    SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
1717
1718	if (!(ifp->if_flags & IFF_RUNNING)) {
1719		sk_init_xmac(sc_if);
1720		return;
1721	}
1722
1723	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
1724		int			lstat;
1725		lstat = sk_miibus_readreg(sc_if->sk_dev,
1726		    SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
1727
1728		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
1729			mii_mediachg(mii);
1730			/* Turn off the link LED. */
1731			SK_IF_WRITE_1(sc_if, 0,
1732			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
1733			sc_if->sk_link = 0;
1734		} else if (status & BRGPHY_ISR_LNK_CHG) {
1735			sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1736	    		    BRGPHY_MII_IMR, 0xFF00);
1737			mii_tick(mii);
1738			sc_if->sk_link = 1;
1739			/* Turn on the link LED. */
1740			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
1741			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
1742			    SK_LINKLED_BLINK_OFF);
1743		} else {
1744			mii_tick(mii);
1745			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1746		}
1747	}
1748
1749	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1750
1751	return;
1752}
1753
1754static void sk_intr_xmac(sc_if)
1755	struct sk_if_softc	*sc_if;
1756{
1757	struct sk_softc		*sc;
1758	u_int16_t		status;
1759	struct mii_data		*mii;
1760
1761	sc = sc_if->sk_softc;
1762	mii = device_get_softc(sc_if->sk_miibus);
1763	status = SK_XM_READ_2(sc_if, XM_ISR);
1764
1765	/*
1766	 * Link has gone down. Start MII tick timeout to
1767	 * watch for link resync.
1768	 */
1769	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
1770		if (status & XM_ISR_GP0_SET) {
1771			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1772			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1773		}
1774
1775		if (status & XM_ISR_AUTONEG_DONE) {
1776			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1777		}
1778	}
1779
1780	if (status & XM_IMR_TX_UNDERRUN)
1781		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1782
1783	if (status & XM_IMR_RX_OVERRUN)
1784		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1785
1786	status = SK_XM_READ_2(sc_if, XM_ISR);
1787
1788	return;
1789}
1790
1791static void sk_intr(xsc)
1792	void			*xsc;
1793{
1794	struct sk_softc		*sc = xsc;
1795	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1796	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1797	u_int32_t		status;
1798
1799	SK_LOCK(sc);
1800
1801	sc_if0 = sc->sk_if[SK_PORT_A];
1802	sc_if1 = sc->sk_if[SK_PORT_B];
1803
1804	if (sc_if0 != NULL)
1805		ifp0 = &sc_if0->arpcom.ac_if;
1806	if (sc_if1 != NULL)
1807		ifp1 = &sc_if1->arpcom.ac_if;
1808
1809	for (;;) {
1810		status = CSR_READ_4(sc, SK_ISSR);
1811		if (!(status & sc->sk_intrmask))
1812			break;
1813
1814		/* Handle receive interrupts first. */
1815		if (status & SK_ISR_RX1_EOF) {
1816			sk_rxeof(sc_if0);
1817			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1818			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1819		}
1820		if (status & SK_ISR_RX2_EOF) {
1821			sk_rxeof(sc_if1);
1822			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1823			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1824		}
1825
1826		/* Then transmit interrupts. */
1827		if (status & SK_ISR_TX1_S_EOF) {
1828			sk_txeof(sc_if0);
1829			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1830			    SK_TXBMU_CLR_IRQ_EOF);
1831		}
1832		if (status & SK_ISR_TX2_S_EOF) {
1833			sk_txeof(sc_if1);
1834			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1835			    SK_TXBMU_CLR_IRQ_EOF);
1836		}
1837
1838		/* Then MAC interrupts. */
1839		if (status & SK_ISR_MAC1 &&
1840		    ifp0->if_flags & IFF_RUNNING)
1841			sk_intr_xmac(sc_if0);
1842
1843		if (status & SK_ISR_MAC2 &&
1844		    ifp1->if_flags & IFF_RUNNING)
1845			sk_intr_xmac(sc_if1);
1846
1847		if (status & SK_ISR_EXTERNAL_REG) {
1848			if (ifp0 != NULL)
1849				sk_intr_bcom(sc_if0);
1850			if (ifp1 != NULL)
1851				sk_intr_bcom(sc_if1);
1852		}
1853	}
1854
1855	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1856
1857	if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
1858		sk_start(ifp0);
1859	if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
1860		sk_start(ifp1);
1861
1862	SK_UNLOCK(sc);
1863
1864	return;
1865}
1866
1867static void sk_init_xmac(sc_if)
1868	struct sk_if_softc	*sc_if;
1869{
1870	struct sk_softc		*sc;
1871	struct ifnet		*ifp;
1872	struct sk_bcom_hack	bhack[] = {
1873	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
1874	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
1875	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1876	{ 0, 0 } };
1877
1878	sc = sc_if->sk_softc;
1879	ifp = &sc_if->arpcom.ac_if;
1880
1881	/* Unreset the XMAC. */
1882	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1883	DELAY(1000);
1884
1885	/* Reset the XMAC's internal state. */
1886	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
1887
1888	/* Save the XMAC II revision */
1889	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1890
1891	/*
1892	 * Perform additional initialization for external PHYs,
1893	 * namely for the 1000baseTX cards that use the XMAC's
1894	 * GMII mode.
1895	 */
1896	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1897		int			i = 0;
1898		u_int32_t		val;
1899
1900		/* Take PHY out of reset. */
1901		val = sk_win_read_4(sc, SK_GPIO);
1902		if (sc_if->sk_port == SK_PORT_A)
1903			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
1904		else
1905			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
1906		sk_win_write_4(sc, SK_GPIO, val);
1907
1908		/* Enable GMII mode on the XMAC. */
1909		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
1910
1911		sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1912		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
1913		DELAY(10000);
1914		sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1915		    BRGPHY_MII_IMR, 0xFFF0);
1916
1917		/*
1918		 * Early versions of the BCM5400 apparently have
1919		 * a bug that requires them to have their reserved
1920		 * registers initialized to some magic values. I don't
1921		 * know what the numbers do, I'm just the messenger.
1922		 */
1923		if (sk_miibus_readreg(sc_if->sk_dev,
1924		    SK_PHYADDR_BCOM, 0x03) == 0x6041) {
1925			while(bhack[i].reg) {
1926				sk_miibus_writereg(sc_if->sk_dev,
1927				    SK_PHYADDR_BCOM, bhack[i].reg,
1928				    bhack[i].val);
1929				i++;
1930			}
1931		}
1932	}
1933
1934	/* Set station address */
1935	SK_XM_WRITE_2(sc_if, XM_PAR0,
1936	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1937	SK_XM_WRITE_2(sc_if, XM_PAR1,
1938	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1939	SK_XM_WRITE_2(sc_if, XM_PAR2,
1940	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1941	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1942
1943	if (ifp->if_flags & IFF_PROMISC) {
1944		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1945	} else {
1946		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1947	}
1948
1949	if (ifp->if_flags & IFF_BROADCAST) {
1950		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1951	} else {
1952		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1953	}
1954
1955	/* We don't need the FCS appended to the packet. */
1956	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1957
1958	/* We want short frames padded to 60 bytes. */
1959	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1960
1961	/*
1962	 * Enable the reception of all error frames. This is is
1963	 * a necessary evil due to the design of the XMAC. The
1964	 * XMAC's receive FIFO is only 8K in size, however jumbo
1965	 * frames can be up to 9000 bytes in length. When bad
1966	 * frame filtering is enabled, the XMAC's RX FIFO operates
1967	 * in 'store and forward' mode. For this to work, the
1968	 * entire frame has to fit into the FIFO, but that means
1969	 * that jumbo frames larger than 8192 bytes will be
1970	 * truncated. Disabling all bad frame filtering causes
1971	 * the RX FIFO to operate in streaming mode, in which
1972	 * case the XMAC will start transfering frames out of the
1973	 * RX FIFO as soon as the FIFO threshold is reached.
1974	 */
1975	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
1976	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
1977	    XM_MODE_RX_INRANGELEN);
1978
1979	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
1980		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1981	else
1982		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1983
1984	/*
1985	 * Bump up the transmit threshold. This helps hold off transmit
1986	 * underruns when we're blasting traffic from both ports at once.
1987	 */
1988	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
1989
1990	/* Set multicast filter */
1991	sk_setmulti(sc_if);
1992
1993	/* Clear and enable interrupts */
1994	SK_XM_READ_2(sc_if, XM_ISR);
1995	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
1996		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
1997	else
1998		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
1999
2000	/* Configure MAC arbiter */
2001	switch(sc_if->sk_xmac_rev) {
2002	case XM_XMAC_REV_B2:
2003		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2004		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2005		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2006		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2007		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2008		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2009		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2010		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2011		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2012		break;
2013	case XM_XMAC_REV_C1:
2014		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2015		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2016		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2017		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2018		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2019		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2020		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2021		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2022		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2023		break;
2024	default:
2025		break;
2026	}
2027	sk_win_write_2(sc, SK_MACARB_CTL,
2028	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2029
2030	sc_if->sk_link = 1;
2031
2032	return;
2033}
2034
2035/*
2036 * Note that to properly initialize any part of the GEnesis chip,
2037 * you first have to take it out of reset mode.
2038 */
2039static void sk_init(xsc)
2040	void			*xsc;
2041{
2042	struct sk_if_softc	*sc_if = xsc;
2043	struct sk_softc		*sc;
2044	struct ifnet		*ifp;
2045	struct mii_data		*mii;
2046
2047	SK_IF_LOCK(sc_if);
2048
2049	ifp = &sc_if->arpcom.ac_if;
2050	sc = sc_if->sk_softc;
2051	mii = device_get_softc(sc_if->sk_miibus);
2052
2053	/* Cancel pending I/O and free all RX/TX buffers. */
2054	sk_stop(sc_if);
2055
2056	/* Configure LINK_SYNC LED */
2057	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2058	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
2059
2060	/* Configure RX LED */
2061	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
2062
2063	/* Configure TX LED */
2064	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
2065
2066	/* Configure I2C registers */
2067
2068	/* Configure XMAC(s) */
2069	sk_init_xmac(sc_if);
2070	mii_mediachg(mii);
2071
2072	/* Configure MAC FIFOs */
2073	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2074	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2075	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2076
2077	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2078	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2079	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2080
2081	/* Configure transmit arbiter(s) */
2082	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2083	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2084
2085	/* Configure RAMbuffers */
2086	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2087	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2088	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2089	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2090	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2091	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2092
2093	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2094	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2095	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2096	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2097	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2098	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2099	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2100
2101	/* Configure BMUs */
2102	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2103	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2104	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2105	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2106
2107	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2108	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2109	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2110	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2111
2112	/* Init descriptors */
2113	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2114		printf("sk%d: initialization failed: no "
2115		    "memory for rx buffers\n", sc_if->sk_unit);
2116		sk_stop(sc_if);
2117		SK_IF_UNLOCK(sc_if);
2118		return;
2119	}
2120	sk_init_tx_ring(sc_if);
2121
2122	/* Configure interrupt handling */
2123	CSR_READ_4(sc, SK_ISSR);
2124	if (sc_if->sk_port == SK_PORT_A)
2125		sc->sk_intrmask |= SK_INTRS1;
2126	else
2127		sc->sk_intrmask |= SK_INTRS2;
2128
2129	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2130
2131	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2132
2133	/* Start BMUs. */
2134	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2135
2136	/* Enable XMACs TX and RX state machines */
2137	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2138	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2139
2140	ifp->if_flags |= IFF_RUNNING;
2141	ifp->if_flags &= ~IFF_OACTIVE;
2142
2143	SK_IF_UNLOCK(sc_if);
2144
2145	return;
2146}
2147
2148static void sk_stop(sc_if)
2149	struct sk_if_softc	*sc_if;
2150{
2151	int			i;
2152	struct sk_softc		*sc;
2153	struct ifnet		*ifp;
2154
2155	SK_IF_LOCK(sc_if);
2156	sc = sc_if->sk_softc;
2157	ifp = &sc_if->arpcom.ac_if;
2158
2159	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2160
2161	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2162		u_int32_t		val;
2163
2164		/* Put PHY back into reset. */
2165		val = sk_win_read_4(sc, SK_GPIO);
2166		if (sc_if->sk_port == SK_PORT_A) {
2167			val |= SK_GPIO_DIR0;
2168			val &= ~SK_GPIO_DAT0;
2169		} else {
2170			val |= SK_GPIO_DIR2;
2171			val &= ~SK_GPIO_DAT2;
2172		}
2173		sk_win_write_4(sc, SK_GPIO, val);
2174	}
2175
2176	/* Turn off various components of this interface. */
2177	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2178	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2179	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2180	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2181	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2182	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2183	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2184	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2185	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2186	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2187	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2188	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2189
2190	/* Disable interrupts */
2191	if (sc_if->sk_port == SK_PORT_A)
2192		sc->sk_intrmask &= ~SK_INTRS1;
2193	else
2194		sc->sk_intrmask &= ~SK_INTRS2;
2195	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2196
2197	SK_XM_READ_2(sc_if, XM_ISR);
2198	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2199
2200	/* Free RX and TX mbufs still in the queues. */
2201	for (i = 0; i < SK_RX_RING_CNT; i++) {
2202		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2203			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2204			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2205		}
2206	}
2207
2208	for (i = 0; i < SK_TX_RING_CNT; i++) {
2209		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2210			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2211			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2212		}
2213	}
2214
2215	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2216	SK_IF_UNLOCK(sc_if);
2217	return;
2218}
2219