if_sk.c revision 1.13
1/*	$OpenBSD: if_sk.c,v 1.13 2001/06/25 02:18:47 fgsch Exp $	*/
2
3/*
4 * Copyright (c) 1997, 1998, 1999, 2000
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
35 */
36
37/*
38 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
39 * the SK-984x series adapters, both single port and dual port.
40 * References:
41 * 	The XaQti XMAC II datasheet,
42 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
43 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
44 *
45 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
46 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
47 * convience to others until Vitesse corrects this problem:
48 *
49 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
50 *
51 * Written by Bill Paul <wpaul@ee.columbia.edu>
52 * Department of Electrical Engineering
53 * Columbia University, New York City
54 */
55
56/*
57 * The SysKonnect gigabit ethernet adapters consist of two main
58 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
59 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
60 * components and a PHY while the GEnesis controller provides a PCI
61 * interface with DMA support. Each card may have between 512K and
62 * 2MB of SRAM on board depending on the configuration.
63 *
64 * The SysKonnect GEnesis controller can have either one or two XMAC
65 * chips connected to it, allowing single or dual port NIC configurations.
66 * SysKonnect has the distinction of being the only vendor on the market
67 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
68 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
69 * XMAC registers. This driver takes advantage of these features to allow
70 * both XMACs to operate as independent interfaces.
71 */
72
73#include "bpfilter.h"
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/sockio.h>
78#include <sys/mbuf.h>
79#include <sys/malloc.h>
80#include <sys/kernel.h>
81#include <sys/socket.h>
82#include <sys/device.h>
83#include <sys/queue.h>
84
85#include <net/if.h>
86#include <net/if_dl.h>
87#include <net/if_types.h>
88
89#ifdef INET
90#include <netinet/in.h>
91#include <netinet/in_systm.h>
92#include <netinet/in_var.h>
93#include <netinet/ip.h>
94#include <netinet/if_ether.h>
95#endif
96
97#include <net/if_media.h>
98
99#if NBPFILTER > 0
100#include <net/bpf.h>
101#endif
102
103#include <vm/vm.h>              /* for vtophys */
104#include <vm/pmap.h>            /* for vtophys */
105#include <vm/vm_kern.h>
106#include <vm/vm_extern.h>
107#include <machine/bus.h>
108
109#include <dev/mii/mii.h>
110#include <dev/mii/miivar.h>
111#include <dev/mii/brgphyreg.h>
112
113#include <dev/pci/pcireg.h>
114#include <dev/pci/pcivar.h>
115#include <dev/pci/pcidevs.h>
116
117#define SK_USEIOSPACE
118#define	SK_VERBOSE
119
120#include <dev/pci/if_skreg.h>
121#include <dev/pci/xmaciireg.h>
122
123int skc_probe		__P((struct device *, void *, void *));
124void skc_attach		__P((struct device *, struct device *self, void *aux));
125int sk_probe		__P((struct device *, void *, void *));
126void sk_attach		__P((struct device *, struct device *self, void *aux));
127int skcprint		__P((void *, const char *));
128int sk_attach_xmac	__P((struct sk_softc *, int));
129int sk_intr		__P((void *));
130void sk_intr_bcom	__P((struct sk_if_softc *));
131void sk_intr_xmac	__P((struct sk_if_softc *));
132void sk_rxeof		__P((struct sk_if_softc *));
133void sk_txeof		__P((struct sk_if_softc *));
134int sk_encap		__P((struct sk_if_softc *, struct mbuf *, u_int32_t *));
135void sk_start		__P((struct ifnet *));
136int sk_ioctl		__P((struct ifnet *, u_long, caddr_t));
137void sk_init		__P((void *));
138void sk_init_xmac	__P((struct sk_if_softc *));
139void sk_stop		__P((struct sk_if_softc *));
140void sk_watchdog	__P((struct ifnet *));
141void sk_shutdown	__P((void *));
142int sk_ifmedia_upd	__P((struct ifnet *));
143void sk_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
144void sk_reset		__P((struct sk_softc *));
145int sk_newbuf		__P((struct sk_if_softc *, struct sk_chain *,
146    struct mbuf *));
147int sk_init_rx_ring	__P((struct sk_if_softc *));
148void sk_init_tx_ring	__P((struct sk_if_softc *));
149u_int32_t sk_win_read_4	__P((struct sk_softc *, int));
150u_int16_t sk_win_read_2	__P((struct sk_softc *, int));
151u_int8_t sk_win_read_1	__P((struct sk_softc *, int));
152void sk_win_write_4	__P((struct sk_softc *, int, u_int32_t));
153void sk_win_write_2	__P((struct sk_softc *, int, u_int32_t));
154void sk_win_write_1	__P((struct sk_softc *, int, u_int32_t));
155u_int8_t sk_vpd_readbyte	__P((struct sk_softc *, int));
156void sk_vpd_read_res	__P((struct sk_softc *,
157					struct vpd_res *, int));
158void sk_vpd_read	__P((struct sk_softc *));
159
160int sk_miibus_readreg	__P((struct device *, int, int));
161void sk_miibus_writereg	__P((struct device *, int, int, int));
162void sk_miibus_statchg	__P((struct device *));
163
164u_int32_t sk_calchash	__P((caddr_t));
165void sk_setfilt		__P((struct sk_if_softc *, caddr_t, int));
166void sk_setmulti	__P((struct sk_if_softc *));
167void sk_tick		__P((void *));
168
169#define SK_SETBIT(sc, reg, x)		\
170	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
171
172#define SK_CLRBIT(sc, reg, x)		\
173	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
174
175#define SK_WIN_SETBIT_4(sc, reg, x)	\
176	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
177
178#define SK_WIN_CLRBIT_4(sc, reg, x)	\
179	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
180
181#define SK_WIN_SETBIT_2(sc, reg, x)	\
182	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
183
184#define SK_WIN_CLRBIT_2(sc, reg, x)	\
185	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
186
187u_int32_t sk_win_read_4(sc, reg)
188	struct sk_softc		*sc;
189	int			reg;
190{
191	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
192	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
193}
194
195u_int16_t sk_win_read_2(sc, reg)
196	struct sk_softc		*sc;
197	int			reg;
198{
199	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
200	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
201}
202
203u_int8_t sk_win_read_1(sc, reg)
204	struct sk_softc		*sc;
205	int			reg;
206{
207	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
208	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
209}
210
211void sk_win_write_4(sc, reg, val)
212	struct sk_softc		*sc;
213	int			reg;
214	u_int32_t		val;
215{
216	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
217	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
218	return;
219}
220
221void sk_win_write_2(sc, reg, val)
222	struct sk_softc		*sc;
223	int			reg;
224	u_int32_t		val;
225{
226	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
227	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
228	return;
229}
230
231void sk_win_write_1(sc, reg, val)
232	struct sk_softc		*sc;
233	int			reg;
234	u_int32_t		val;
235{
236	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
237	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
238	return;
239}
240
241/*
242 * The VPD EEPROM contains Vital Product Data, as suggested in
243 * the PCI 2.1 specification. The VPD data is separared into areas
244 * denoted by resource IDs. The SysKonnect VPD contains an ID string
245 * resource (the name of the adapter), a read-only area resource
246 * containing various key/data fields and a read/write area which
247 * can be used to store asset management information or log messages.
248 * We read the ID string and read-only into buffers attached to
249 * the controller softc structure for later use. At the moment,
250 * we only use the ID string during sk_attach().
251 */
252u_int8_t sk_vpd_readbyte(sc, addr)
253	struct sk_softc		*sc;
254	int			addr;
255{
256	int			i;
257
258	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
259	for (i = 0; i < SK_TIMEOUT; i++) {
260		DELAY(1);
261		if (sk_win_read_2(sc,
262		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
263			break;
264	}
265
266	if (i == SK_TIMEOUT)
267		return(0);
268
269	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
270}
271
272void sk_vpd_read_res(sc, res, addr)
273	struct sk_softc		*sc;
274	struct vpd_res		*res;
275	int			addr;
276{
277	int			i;
278	u_int8_t		*ptr;
279
280	ptr = (u_int8_t *)res;
281	for (i = 0; i < sizeof(struct vpd_res); i++)
282		ptr[i] = sk_vpd_readbyte(sc, i + addr);
283
284	return;
285}
286
287void sk_vpd_read(sc)
288	struct sk_softc		*sc;
289{
290	int			pos = 0, i;
291	struct vpd_res		res;
292
293	if (sc->sk_vpd_prodname != NULL)
294		free(sc->sk_vpd_prodname, M_DEVBUF);
295	if (sc->sk_vpd_readonly != NULL)
296		free(sc->sk_vpd_readonly, M_DEVBUF);
297	sc->sk_vpd_prodname = NULL;
298	sc->sk_vpd_readonly = NULL;
299
300	sk_vpd_read_res(sc, &res, pos);
301
302	if (res.vr_id != VPD_RES_ID) {
303		printf("%s: bad VPD resource id: expected %x got %x\n",
304		    sc->sk_dev.dv_xname, VPD_RES_ID, res.vr_id);
305		return;
306	}
307
308	pos += sizeof(res);
309	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
310	for (i = 0; i < res.vr_len; i++)
311		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
312	sc->sk_vpd_prodname[i] = '\0';
313	pos += i;
314
315	sk_vpd_read_res(sc, &res, pos);
316
317	if (res.vr_id != VPD_RES_READ) {
318		printf("%s: bad VPD resource id: expected %x got %x\n",
319		    sc->sk_dev.dv_xname, VPD_RES_READ, res.vr_id);
320		return;
321	}
322
323	pos += sizeof(res);
324	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
325	for (i = 0; i < res.vr_len + 1; i++)
326		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
327
328	return;
329}
330
331int
332sk_miibus_readreg(dev, phy, reg)
333	struct device *dev;
334	int phy, reg;
335{
336	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
337	int i;
338
339	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
340		return(0);
341
342	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
343	SK_XM_READ_2(sc_if, XM_PHY_DATA);
344	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
345		for (i = 0; i < SK_TIMEOUT; i++) {
346			DELAY(1);
347			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
348			    XM_MMUCMD_PHYDATARDY)
349				break;
350		}
351
352		if (i == SK_TIMEOUT) {
353			printf("%s: phy failed to come ready\n",
354			    sc_if->sk_dev.dv_xname);
355			return(0);
356		}
357	}
358	DELAY(1);
359	return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
360}
361
362void
363sk_miibus_writereg(dev, phy, reg, val)
364	struct device *dev;
365	int phy, reg, val;
366{
367	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
368	int i;
369
370	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
371	for (i = 0; i < SK_TIMEOUT; i++) {
372		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
373			break;
374	}
375
376	if (i == SK_TIMEOUT) {
377		printf("%s: phy failed to come ready\n",
378		    sc_if->sk_dev.dv_xname);
379		return;
380	}
381
382	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
383	for (i = 0; i < SK_TIMEOUT; i++) {
384		DELAY(1);
385		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
386			break;
387	}
388
389	if (i == SK_TIMEOUT)
390		printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
391
392	return;
393}
394
395void
396sk_miibus_statchg(dev)
397	struct device *dev;
398{
399	struct sk_if_softc *sc_if;
400	struct mii_data *mii;
401
402	sc_if = (struct sk_if_softc *)dev;
403	mii = &sc_if->sk_mii;
404
405	/*
406	 * If this is a GMII PHY, manually set the XMAC's
407	 * duplex mode accordingly.
408	 */
409	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
410		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
411			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
412		} else {
413			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
414		}
415	}
416
417	return;
418}
419
420#define SK_POLY		0xEDB88320
421#define SK_BITS		6
422
423u_int32_t sk_calchash(addr)
424	caddr_t			addr;
425{
426	u_int32_t		idx, bit, data, crc;
427
428	/* Compute CRC for the address value. */
429	crc = 0xFFFFFFFF; /* initial value */
430
431	for (idx = 0; idx < 6; idx++) {
432		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
433			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
434	}
435
436	return (~crc & ((1 << SK_BITS) - 1));
437}
438
439void sk_setfilt(sc_if, addr, slot)
440	struct sk_if_softc	*sc_if;
441	caddr_t			addr;
442	int			slot;
443{
444	int			base;
445
446	base = XM_RXFILT_ENTRY(slot);
447
448	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
449	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
450	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
451
452	return;
453}
454
455void
456sk_setmulti(sc_if)
457	struct sk_if_softc	*sc_if;
458{
459	struct ifnet *ifp;
460	u_int32_t hashes[2] = { 0, 0 };
461	int h, i;
462	struct arpcom *ac = &sc_if->arpcom;
463	struct ether_multi *enm;
464	struct ether_multistep step;
465	u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
466
467	ifp = &sc_if->arpcom.ac_if;
468
469	/* First, zot all the existing filters. */
470	for (i = 1; i < XM_RXFILT_MAX; i++)
471		sk_setfilt(sc_if, (caddr_t)&dummy, i);
472	SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
473	SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
474
475	/* Now program new ones. */
476allmulti:
477	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
478		hashes[0] = 0xFFFFFFFF;
479		hashes[1] = 0xFFFFFFFF;
480	} else {
481		i = 1;
482		/* First find the tail of the list. */
483		ETHER_FIRST_MULTI(step, ac, enm);
484		while (enm != NULL) {
485			if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
486				ifp->if_flags |= IFF_ALLMULTI;
487				goto allmulti;
488			}
489			/*
490			 * Program the first XM_RXFILT_MAX multicast groups
491			 * into the perfect filter. For all others,
492			 * use the hash table.
493			 */
494			if (i < XM_RXFILT_MAX) {
495				sk_setfilt(sc_if, enm->enm_addrlo, i);
496				i++;
497			}
498			else {
499				h = sk_calchash(enm->enm_addrlo);
500				if (h < 32)
501					hashes[0] |= (1 << h);
502				else
503					hashes[1] |= (1 << (h - 32));
504			}
505
506			ETHER_NEXT_MULTI(step, enm);
507		}
508	}
509
510	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
511	    XM_MODE_RX_USE_PERFECT);
512	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
513	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
514
515	return;
516}
517
518int sk_init_rx_ring(sc_if)
519	struct sk_if_softc	*sc_if;
520{
521	struct sk_chain_data	*cd;
522	struct sk_ring_data	*rd;
523	int			i;
524
525	cd = &sc_if->sk_cdata;
526	rd = sc_if->sk_rdata;
527
528	bzero((char *)rd->sk_rx_ring,
529	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
530
531	for (i = 0; i < SK_RX_RING_CNT; i++) {
532		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
533		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) {
534			printf("%s: failed alloc of %dth mbuf\n",
535			    sc_if->sk_dev.dv_xname, i);
536			return(ENOBUFS);
537		}
538		if (i == (SK_RX_RING_CNT - 1)) {
539			cd->sk_rx_chain[i].sk_next =
540			    &cd->sk_rx_chain[0];
541			rd->sk_rx_ring[i].sk_next =
542			    vtophys(&rd->sk_rx_ring[0]);
543		} else {
544			cd->sk_rx_chain[i].sk_next =
545			    &cd->sk_rx_chain[i + 1];
546			rd->sk_rx_ring[i].sk_next =
547			    vtophys(&rd->sk_rx_ring[i + 1]);
548		}
549	}
550
551	sc_if->sk_cdata.sk_rx_prod = 0;
552	sc_if->sk_cdata.sk_rx_cons = 0;
553
554	return(0);
555}
556
557void sk_init_tx_ring(sc_if)
558	struct sk_if_softc	*sc_if;
559{
560	struct sk_chain_data	*cd;
561	struct sk_ring_data	*rd;
562	int			i;
563
564	cd = &sc_if->sk_cdata;
565	rd = sc_if->sk_rdata;
566
567	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
568	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
569
570	for (i = 0; i < SK_TX_RING_CNT; i++) {
571		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
572		if (i == (SK_TX_RING_CNT - 1)) {
573			cd->sk_tx_chain[i].sk_next =
574			    &cd->sk_tx_chain[0];
575			rd->sk_tx_ring[i].sk_next =
576			    vtophys(&rd->sk_tx_ring[0]);
577		} else {
578			cd->sk_tx_chain[i].sk_next =
579			    &cd->sk_tx_chain[i + 1];
580			rd->sk_tx_ring[i].sk_next =
581			    vtophys(&rd->sk_tx_ring[i + 1]);
582		}
583	}
584
585	sc_if->sk_cdata.sk_tx_prod = 0;
586	sc_if->sk_cdata.sk_tx_cons = 0;
587	sc_if->sk_cdata.sk_tx_cnt = 0;
588
589	return;
590}
591
592int sk_newbuf(sc_if, c, m)
593	struct sk_if_softc	*sc_if;
594	struct sk_chain		*c;
595	struct mbuf		*m;
596{
597	struct mbuf		*m_new = NULL;
598	struct sk_rx_desc	*r;
599
600	if (m == NULL) {
601		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
602		if (m_new == NULL) {
603			printf("%s: no memory for rx list -- "
604			    "packet dropped!\n", sc_if->sk_dev.dv_xname);
605			return(ENOBUFS);
606		}
607
608		/* Allocate the jumbo buffer */
609		MCLGET(m_new, M_DONTWAIT);
610		if (!(m_new->m_flags & M_EXT)) {
611			m_freem(m_new);
612			return (ENOBUFS);
613		}
614	} else {
615		/*
616	 	 * We're re-using a previously allocated mbuf;
617		 * be sure to re-init pointers and lengths to
618		 * default values.
619		 */
620		m_new = m;
621		m_new->m_data = m_new->m_ext.ext_buf;
622	}
623	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
624
625	/*
626	 * Adjust alignment so packet payload begins on a
627	 * longword boundary. Mandatory for Alpha, useful on
628	 * x86 too.
629	 */
630	m_adj(m_new, ETHER_ALIGN);
631
632	r = c->sk_desc;
633	c->sk_mbuf = m_new;
634	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
635	r->sk_ctl = m_new->m_len | SK_RXSTAT;
636
637	return(0);
638}
639
640/*
641 * Set media options.
642 */
643int
644sk_ifmedia_upd(ifp)
645	struct ifnet *ifp;
646{
647	struct sk_if_softc *sc_if = ifp->if_softc;
648
649	sk_init(sc_if);
650	mii_mediachg(&sc_if->sk_mii);
651	return(0);
652}
653
654/*
655 * Report current media status.
656 */
657void
658sk_ifmedia_sts(ifp, ifmr)
659	struct ifnet *ifp;
660	struct ifmediareq *ifmr;
661{
662	struct sk_if_softc *sc_if = ifp->if_softc;
663
664	mii_pollstat(&sc_if->sk_mii);
665	ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
666	ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
667}
668
669int
670sk_ioctl(ifp, command, data)
671	struct ifnet *ifp;
672	u_long command;
673	caddr_t data;
674{
675	struct sk_if_softc *sc_if = ifp->if_softc;
676	struct ifreq *ifr = (struct ifreq *) data;
677	struct ifaddr *ifa = (struct ifaddr *) data;
678	struct mii_data *mii;
679	int s, error = 0;
680
681	s = splimp();
682
683	if ((error = ether_ioctl(ifp, &sc_if->arpcom, command, data)) > 0) {
684		splx(s);
685		return error;
686	}
687
688	switch(command) {
689	case SIOCSIFADDR:
690		ifp->if_flags |= IFF_UP;
691		switch (ifa->ifa_addr->sa_family) {
692#ifdef INET
693		case AF_INET:
694			sk_init(sc_if);
695			arp_ifinit(&sc_if->arpcom, ifa);
696			break;
697#endif /* INET */
698		default:
699			sk_init(sc_if);
700			break;
701		}
702		break;
703	case SIOCSIFFLAGS:
704		if (ifp->if_flags & IFF_UP) {
705			if (ifp->if_flags & IFF_RUNNING &&
706			    ifp->if_flags & IFF_PROMISC &&
707			    !(sc_if->sk_if_flags & IFF_PROMISC)) {
708				SK_XM_SETBIT_4(sc_if, XM_MODE,
709				    XM_MODE_RX_PROMISC);
710				sk_setmulti(sc_if);
711			} else if (ifp->if_flags & IFF_RUNNING &&
712			    !(ifp->if_flags & IFF_PROMISC) &&
713			    sc_if->sk_if_flags & IFF_PROMISC) {
714				SK_XM_CLRBIT_4(sc_if, XM_MODE,
715				    XM_MODE_RX_PROMISC);
716				sk_setmulti(sc_if);
717			} else
718				sk_init(sc_if);
719		} else {
720			if (ifp->if_flags & IFF_RUNNING)
721				sk_stop(sc_if);
722		}
723		sc_if->sk_if_flags = ifp->if_flags;
724		error = 0;
725		break;
726	case SIOCADDMULTI:
727	case SIOCDELMULTI:
728		error = (command == SIOCADDMULTI) ?
729		    ether_addmulti(ifr, &sc_if->arpcom) :
730		    ether_delmulti(ifr, &sc_if->arpcom);
731
732		if (error == ENETRESET) {
733			/*
734			 * Multicast list has changed; set the hardware
735			 * filter accordingly.
736			 */
737			sk_setmulti(sc_if);
738			error = 0;
739		}
740		break;
741	case SIOCGIFMEDIA:
742	case SIOCSIFMEDIA:
743		mii = &sc_if->sk_mii;
744		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
745		break;
746	default:
747		error = EINVAL;
748		break;
749	}
750
751	(void)splx(s);
752
753	return(error);
754}
755
756/*
757 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
758 * IDs against our list and return a device name if we find a match.
759 */
760int
761skc_probe(parent, match, aux)
762	struct device *parent;
763	void *match, *aux;
764{
765	struct pci_attach_args *pa = aux;
766
767	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SCHNEIDERKOCH)
768		return (0);
769
770	if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_SCHNEIDERKOCH_GE)
771		return (0);
772
773	return (1);
774}
775
776/*
777 * Force the GEnesis into reset, then bring it out of reset.
778 */
779void sk_reset(sc)
780	struct sk_softc		*sc;
781{
782	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
783	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
784	DELAY(1000);
785	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
786	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
787
788	/* Configure packet arbiter */
789	sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
790	sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
791	sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
792	sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
793	sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
794
795	/* Enable RAM interface */
796	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
797
798	/*
799         * Configure interrupt moderation. The moderation timer
800	 * defers interrupts specified in the interrupt moderation
801	 * timer mask based on the timeout specified in the interrupt
802	 * moderation timer init register. Each bit in the timer
803	 * register represents 18.825ns, so to specify a timeout in
804	 * microseconds, we have to multiply by 54.
805	 */
806        sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
807        sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
808	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
809        sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
810
811	return;
812}
813
814int
815sk_probe(parent, match, aux)
816	struct device *parent;
817	void *match, *aux;
818{
819	struct skc_attach_args *sa = aux;
820
821	if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
822		return(0);
823
824	return (1);
825}
826
827/*
828 * Each XMAC chip is attached as a separate logical IP interface.
829 * Single port cards will have only one logical interface of course.
830 */
831void
832sk_attach(parent, self, aux)
833	struct device *parent, *self;
834	void *aux;
835{
836	struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
837	struct sk_softc *sc = (struct sk_softc *)parent;
838	struct skc_attach_args *sa = aux;
839	struct ifnet *ifp;
840	caddr_t kva;
841	bus_dma_segment_t seg;
842	bus_dmamap_t dmamap;
843	int i, rseg;
844
845	sc_if->sk_port = sa->skc_port;
846	sc_if->sk_softc = sc;
847	sc->sk_if[sa->skc_port] = sc_if;
848
849	if (sa->skc_port == SK_PORT_A)
850		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
851	if (sa->skc_port == SK_PORT_B)
852		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
853
854	/*
855	 * Get station address for this interface. Note that
856	 * dual port cards actually come with three station
857	 * addresses: one for each port, plus an extra. The
858	 * extra one is used by the SysKonnect driver software
859	 * as a 'virtual' station address for when both ports
860	 * are operating in failover mode. Currently we don't
861	 * use this extra address.
862	 */
863	for (i = 0; i < ETHER_ADDR_LEN; i++)
864		sc_if->arpcom.ac_enaddr[i] =
865		    sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
866
867
868	printf(": address %s\n",
869	    ether_sprintf(sc_if->arpcom.ac_enaddr));
870
871	/*
872	 * Set up RAM buffer addresses. The NIC will have a certain
873	 * amount of SRAM on it, somewhere between 512K and 2MB. We
874	 * need to divide this up a) between the transmitter and
875 	 * receiver and b) between the two XMACs, if this is a
876	 * dual port NIC. Our algotithm is to divide up the memory
877	 * evenly so that everyone gets a fair share.
878	 */
879	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
880		u_int32_t		chunk, val;
881
882		chunk = sc->sk_ramsize / 2;
883		val = sc->sk_rboff / sizeof(u_int64_t);
884		sc_if->sk_rx_ramstart = val;
885		val += (chunk / sizeof(u_int64_t));
886		sc_if->sk_rx_ramend = val - 1;
887		sc_if->sk_tx_ramstart = val;
888		val += (chunk / sizeof(u_int64_t));
889		sc_if->sk_tx_ramend = val - 1;
890	} else {
891		u_int32_t		chunk, val;
892
893		chunk = sc->sk_ramsize / 4;
894		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
895		    sizeof(u_int64_t);
896		sc_if->sk_rx_ramstart = val;
897		val += (chunk / sizeof(u_int64_t));
898		sc_if->sk_rx_ramend = val - 1;
899		sc_if->sk_tx_ramstart = val;
900		val += (chunk / sizeof(u_int64_t));
901		sc_if->sk_tx_ramend = val - 1;
902	}
903
904	/* Read and save PHY type and set PHY address */
905	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
906	switch (sc_if->sk_phytype) {
907	case SK_PHYTYPE_XMAC:
908		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
909		break;
910	case SK_PHYTYPE_BCOM:
911		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
912		break;
913	default:
914		printf("%s: unsupported PHY type: %d\n",
915		    sc->sk_dev.dv_xname, sc_if->sk_phytype);
916		return;
917	}
918
919	/* Allocate the descriptor queues. */
920	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct sk_ring_data),
921	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
922		printf("%s: can't alloc rx buffers\n", sc->sk_dev.dv_xname);
923		goto fail;
924	}
925	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
926	    sizeof(struct sk_ring_data), &kva, BUS_DMA_NOWAIT)) {
927		printf("%s: can't map dma buffers (%d bytes)\n",
928		       sc_if->sk_dev.dv_xname, sizeof(struct sk_ring_data));
929		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
930		goto fail;
931	}
932	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct sk_ring_data), 1,
933	    sizeof(struct sk_ring_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
934		printf("%s: can't create dma map\n", sc_if->sk_dev.dv_xname);
935		bus_dmamem_unmap(sc->sc_dmatag, kva,
936		    sizeof(struct sk_ring_data));
937		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
938		goto fail;
939	}
940	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva,
941	    sizeof(struct sk_ring_data), NULL, BUS_DMA_NOWAIT)) {
942		printf("%s: can't load dma map\n", sc_if->sk_dev.dv_xname);
943		bus_dmamap_destroy(sc->sc_dmatag, dmamap);
944		bus_dmamem_unmap(sc->sc_dmatag, kva,
945		    sizeof(struct sk_ring_data));
946		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
947		goto fail;
948	}
949        sc_if->sk_rdata = (struct sk_ring_data *)kva;
950	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
951
952	ifp = &sc_if->arpcom.ac_if;
953	ifp->if_softc = sc_if;
954	ifp->if_mtu = ETHERMTU;
955	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
956	ifp->if_ioctl = sk_ioctl;
957	ifp->if_output = ether_output;
958	ifp->if_start = sk_start;
959	ifp->if_watchdog = sk_watchdog;
960	ifp->if_baudrate = 1000000000;
961	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
962	bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
963
964	/*
965	 * Do miibus setup.
966	 */
967	sk_init_xmac(sc_if);
968	sc_if->sk_mii.mii_ifp = ifp;
969	sc_if->sk_mii.mii_readreg = sk_miibus_readreg;
970	sc_if->sk_mii.mii_writereg = sk_miibus_writereg;
971	sc_if->sk_mii.mii_statchg = sk_miibus_statchg;
972	ifmedia_init(&sc_if->sk_mii.mii_media, 0,
973	    sk_ifmedia_upd, sk_ifmedia_sts);
974	mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
975	    MII_OFFSET_ANY, 0);
976	if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
977		printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
978		ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
979		    0, NULL);
980		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
981	}
982	else
983		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
984
985	timeout_set(&sc_if->sk_tick_ch, sk_tick, sc_if);
986	timeout_add(&sc_if->sk_tick_ch, hz);
987
988	/*
989	 * Call MI attach routines.
990	 */
991	if_attach(ifp);
992	ether_ifattach(ifp);
993
994	return;
995
996fail:
997	sc->sk_if[sa->skc_port] = NULL;
998}
999
1000int
1001skcprint(aux, pnp)
1002	void *aux;
1003	const char *pnp;
1004{
1005	struct skc_attach_args *sa = aux;
1006
1007	if (pnp)
1008		printf("sk port %c at %s",
1009		    (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
1010	else
1011		printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
1012	return (UNCONF);
1013}
1014
1015/*
1016 * Attach the interface. Allocate softc structures, do ifmedia
1017 * setup and ethernet/BPF attach.
1018 */
1019void
1020skc_attach(parent, self, aux)
1021	struct device *parent, *self;
1022	void *aux;
1023{
1024	struct sk_softc *sc = (struct sk_softc *)self;
1025	struct pci_attach_args *pa = aux;
1026	struct skc_attach_args skca;
1027	pci_chipset_tag_t pc = pa->pa_pc;
1028	pci_intr_handle_t ih;
1029	const char *intrstr = NULL;
1030	bus_addr_t iobase;
1031	bus_size_t iosize;
1032	int s;
1033	u_int32_t command;
1034
1035	s = splimp();
1036
1037	/*
1038	 * Handle power management nonsense.
1039	 */
1040	command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
1041	if (command == 0x01) {
1042
1043		command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
1044		if (command & SK_PSTATE_MASK) {
1045			u_int32_t		iobase, membase, irq;
1046
1047			/* Save important PCI config data. */
1048			iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
1049			membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
1050			irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
1051
1052			/* Reset the power state. */
1053			printf("%s chip is in D%d power mode "
1054			    "-- setting to D0\n", sc->sk_dev.dv_xname,
1055			    command & SK_PSTATE_MASK);
1056			command &= 0xFFFFFFFC;
1057			pci_conf_write(pc, pa->pa_tag,
1058			    SK_PCI_PWRMGMTCTRL, command);
1059
1060			/* Restore PCI config data. */
1061			pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase);
1062			pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
1063			pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
1064		}
1065	}
1066
1067	/*
1068	 * Map control/status registers.
1069	 */
1070	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1071	command |= PCI_COMMAND_IO_ENABLE |
1072	    PCI_COMMAND_MEM_ENABLE |
1073	    PCI_COMMAND_MASTER_ENABLE;
1074	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
1075	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1076
1077#ifdef SK_USEIOSPACE
1078	if (!(command & PCI_COMMAND_IO_ENABLE)) {
1079		printf(": failed to enable I/O ports!\n");
1080		goto fail;
1081	}
1082	/*
1083	 * Map control/status registers.
1084	 */
1085	if (pci_io_find(pc, pa->pa_tag, SK_PCI_LOIO, &iobase, &iosize)) {
1086		printf(": can't find i/o space\n");
1087		goto fail;
1088	}
1089	if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->sk_bhandle)) {
1090		printf(": can't map i/o space\n");
1091		goto fail;
1092	}
1093	sc->sk_btag = pa->pa_iot;
1094#else
1095	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
1096		printf(": failed to enable memory mapping!\n");
1097		goto fail;
1098	}
1099	if (pci_mem_find(pc, pa->pa_tag, SK_PCI_LOMEM, &iobase, &iosize, NULL)){
1100		printf(": can't find mem space\n");
1101		goto fail;
1102	}
1103	if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->sk_bhandle)) {
1104		printf(": can't map mem space\n");
1105		goto fail;
1106	}
1107	sc->sk_btag = pa->pa_memt;
1108#endif
1109	sc->sc_dmatag = pa->pa_dmat;
1110
1111	/* Allocate interrupt */
1112	if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin,
1113	    pa->pa_intrline, &ih)) {
1114		printf(": couldn't map interrupt\n");
1115		goto fail;
1116	}
1117
1118	intrstr = pci_intr_string(pc, ih);
1119	sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, sk_intr, sc,
1120	    self->dv_xname);
1121	if (sc->sk_intrhand == NULL) {
1122		printf(": couldn't establish interrupt");
1123		if (intrstr != NULL)
1124			printf(" at %s", intrstr);
1125		goto fail;
1126	}
1127	printf(": %s\n", intrstr);
1128
1129	/* Reset the adapter. */
1130	sk_reset(sc);
1131
1132	/* Read and save vital product data from EEPROM. */
1133	sk_vpd_read(sc);
1134
1135	/* Read and save RAM size and RAMbuffer offset */
1136	switch(sk_win_read_1(sc, SK_EPROM0)) {
1137	case SK_RAMSIZE_512K_64:
1138		sc->sk_ramsize = 0x80000;
1139		sc->sk_rboff = SK_RBOFF_0;
1140		break;
1141	case SK_RAMSIZE_1024K_64:
1142		sc->sk_ramsize = 0x100000;
1143		sc->sk_rboff = SK_RBOFF_80000;
1144		break;
1145	case SK_RAMSIZE_1024K_128:
1146		sc->sk_ramsize = 0x100000;
1147		sc->sk_rboff = SK_RBOFF_0;
1148		break;
1149	case SK_RAMSIZE_2048K_128:
1150		sc->sk_ramsize = 0x200000;
1151		sc->sk_rboff = SK_RBOFF_0;
1152		break;
1153	default:
1154		printf("%s: unknown ram size: %d\n",
1155		    sc->sk_dev.dv_xname, sk_win_read_1(sc, SK_EPROM0));
1156		goto fail;
1157		break;
1158	}
1159
1160	/* Read and save physical media type */
1161	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1162	case SK_PMD_1000BASESX:
1163		sc->sk_pmd = IFM_1000_SX;
1164		break;
1165	case SK_PMD_1000BASELX:
1166		sc->sk_pmd = IFM_1000_LX;
1167		break;
1168	case SK_PMD_1000BASECX:
1169		sc->sk_pmd = IFM_1000_CX;
1170		break;
1171	case SK_PMD_1000BASETX:
1172		sc->sk_pmd = IFM_1000_TX;
1173		break;
1174	default:
1175		printf("%s: unknown media type: 0x%x\n",
1176		    sc->sk_dev.dv_xname, sk_win_read_1(sc, SK_PMDTYPE));
1177		goto fail;
1178	}
1179
1180	/* Announce the product name. */
1181	printf("%s: %s\n", sc->sk_dev.dv_xname, sc->sk_vpd_prodname);
1182
1183	skca.skc_port = SK_PORT_A;
1184	(void)config_found(&sc->sk_dev, &skca, skcprint);
1185
1186	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1187		skca.skc_port = SK_PORT_B;
1188		(void)config_found(&sc->sk_dev, &skca, skcprint);
1189	}
1190
1191	/* Turn on the 'driver is loaded' LED. */
1192	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1193
1194fail:
1195	splx(s);
1196}
1197
1198int sk_encap(sc_if, m_head, txidx)
1199        struct sk_if_softc	*sc_if;
1200        struct mbuf		*m_head;
1201        u_int32_t		*txidx;
1202{
1203	struct sk_tx_desc	*f = NULL;
1204	struct mbuf		*m;
1205	u_int32_t		frag, cur, cnt = 0;
1206
1207	m = m_head;
1208	cur = frag = *txidx;
1209
1210	/*
1211	 * Start packing the mbufs in this chain into
1212	 * the fragment pointers. Stop when we run out
1213	 * of fragments or hit the end of the mbuf chain.
1214	 */
1215	for (m = m_head; m != NULL; m = m->m_next) {
1216		if (m->m_len != 0) {
1217			if ((SK_TX_RING_CNT -
1218			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1219				return(ENOBUFS);
1220			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1221			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1222			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1223			if (cnt == 0)
1224				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1225			else
1226				f->sk_ctl |= SK_TXCTL_OWN;
1227			cur = frag;
1228			SK_INC(frag, SK_TX_RING_CNT);
1229			cnt++;
1230		}
1231	}
1232
1233	if (m != NULL)
1234		return(ENOBUFS);
1235
1236	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1237		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1238	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1239	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1240	sc_if->sk_cdata.sk_tx_cnt += cnt;
1241
1242	*txidx = frag;
1243
1244	return(0);
1245}
1246
1247void sk_start(ifp)
1248	struct ifnet		*ifp;
1249{
1250        struct sk_softc		*sc;
1251        struct sk_if_softc	*sc_if;
1252        struct mbuf		*m_head = NULL;
1253        u_int32_t		idx;
1254
1255	sc_if = ifp->if_softc;
1256	sc = sc_if->sk_softc;
1257
1258	idx = sc_if->sk_cdata.sk_tx_prod;
1259
1260	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1261		IF_DEQUEUE(&ifp->if_snd, m_head);
1262		if (m_head == NULL)
1263			break;
1264
1265		/*
1266		 * Pack the data into the transmit ring. If we
1267		 * don't have room, set the OACTIVE flag and wait
1268		 * for the NIC to drain the ring.
1269		 */
1270		if (sk_encap(sc_if, m_head, &idx)) {
1271			IF_PREPEND(&ifp->if_snd, m_head);
1272			ifp->if_flags |= IFF_OACTIVE;
1273			break;
1274		}
1275
1276		/*
1277		 * If there's a BPF listener, bounce a copy of this frame
1278		 * to him.
1279		 */
1280#if NBPFILTER > 0
1281		if (ifp->if_bpf)
1282			bpf_mtap(ifp->if_bpf, m_head);
1283#endif
1284	}
1285
1286	/* Transmit */
1287	sc_if->sk_cdata.sk_tx_prod = idx;
1288	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1289
1290	/* Set a timeout in case the chip goes out to lunch. */
1291	ifp->if_timer = 5;
1292
1293	return;
1294}
1295
1296
1297void sk_watchdog(ifp)
1298	struct ifnet		*ifp;
1299{
1300	struct sk_if_softc	*sc_if;
1301
1302	sc_if = ifp->if_softc;
1303
1304	printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
1305	sk_init(sc_if);
1306
1307	return;
1308}
1309
1310void sk_shutdown(v)
1311	void *v;
1312{
1313	struct sk_softc		*sc = v;
1314
1315	/* Turn off the 'driver is loaded' LED. */
1316	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1317
1318	/*
1319	 * Reset the GEnesis controller. Doing this should also
1320	 * assert the resets on the attached XMAC(s).
1321	 */
1322	sk_reset(sc);
1323
1324	return;
1325}
1326
1327void sk_rxeof(sc_if)
1328	struct sk_if_softc	*sc_if;
1329{
1330	struct mbuf		*m;
1331	struct ifnet		*ifp;
1332	struct sk_chain		*cur_rx;
1333	int			total_len = 0;
1334	int			i;
1335	u_int32_t		rxstat;
1336
1337	ifp = &sc_if->arpcom.ac_if;
1338	i = sc_if->sk_cdata.sk_rx_prod;
1339	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1340
1341	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1342
1343		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1344		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1345		m = cur_rx->sk_mbuf;
1346		cur_rx->sk_mbuf = NULL;
1347		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1348		SK_INC(i, SK_RX_RING_CNT);
1349
1350		if (rxstat & XM_RXSTAT_ERRFRAME) {
1351			ifp->if_ierrors++;
1352			sk_newbuf(sc_if, cur_rx, m);
1353			continue;
1354		}
1355
1356		/*
1357		 * Try to allocate a new jumbo buffer. If that
1358		 * fails, copy the packet to mbufs and put the
1359		 * jumbo buffer back in the ring so it can be
1360		 * re-used. If allocating mbufs fails, then we
1361		 * have to drop the packet.
1362		 */
1363		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1364			struct mbuf		*m0;
1365			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1366			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1367			sk_newbuf(sc_if, cur_rx, m);
1368			if (m0 == NULL) {
1369				printf("%s: no receive buffers "
1370				    "available -- packet dropped!\n",
1371				    sc_if->sk_dev.dv_xname);
1372				ifp->if_ierrors++;
1373				continue;
1374			}
1375			m_adj(m0, ETHER_ALIGN);
1376			m = m0;
1377		} else {
1378			m->m_pkthdr.rcvif = ifp;
1379			m->m_pkthdr.len = m->m_len = total_len;
1380		}
1381
1382		ifp->if_ipackets++;
1383
1384#if NBPFILTER > 0
1385		if (ifp->if_bpf)
1386			bpf_mtap(ifp->if_bpf, m);
1387#endif
1388		/* pass it on. */
1389		ether_input_mbuf(ifp, m);
1390	}
1391
1392	sc_if->sk_cdata.sk_rx_prod = i;
1393
1394	return;
1395}
1396
1397void sk_txeof(sc_if)
1398	struct sk_if_softc	*sc_if;
1399{
1400	struct sk_tx_desc	*cur_tx = NULL;
1401	struct ifnet		*ifp;
1402	u_int32_t		idx;
1403
1404	ifp = &sc_if->arpcom.ac_if;
1405
1406	/*
1407	 * Go through our tx ring and free mbufs for those
1408	 * frames that have been sent.
1409	 */
1410	idx = sc_if->sk_cdata.sk_tx_cons;
1411	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1412		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1413		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1414			break;
1415		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1416			ifp->if_opackets++;
1417		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1418			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1419			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1420		}
1421		sc_if->sk_cdata.sk_tx_cnt--;
1422		SK_INC(idx, SK_TX_RING_CNT);
1423		ifp->if_timer = 0;
1424	}
1425
1426	sc_if->sk_cdata.sk_tx_cons = idx;
1427
1428	if (cur_tx != NULL)
1429		ifp->if_flags &= ~IFF_OACTIVE;
1430
1431	return;
1432}
1433
1434void
1435sk_tick(xsc_if)
1436	void *xsc_if;
1437{
1438	struct sk_if_softc *sc_if;
1439	struct mii_data *mii;
1440	struct ifnet *ifp;
1441	int i;
1442
1443	sc_if = xsc_if;
1444	ifp = &sc_if->arpcom.ac_if;
1445	mii = &sc_if->sk_mii;
1446
1447	if (!(ifp->if_flags & IFF_UP))
1448		return;
1449
1450	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1451		sk_intr_bcom(sc_if);
1452		return;
1453	}
1454
1455	/*
1456	 * According to SysKonnect, the correct way to verify that
1457	 * the link has come back up is to poll bit 0 of the GPIO
1458	 * register three times. This pin has the signal from the
1459	 * link sync pin connected to it; if we read the same link
1460	 * state 3 times in a row, we know the link is up.
1461	 */
1462	for (i = 0; i < 3; i++) {
1463		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1464			break;
1465	}
1466
1467	if (i != 3) {
1468		timeout_add(&sc_if->sk_tick_ch, hz);
1469		return;
1470	}
1471
1472	/* Turn the GP0 interrupt back on. */
1473	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1474	SK_XM_READ_2(sc_if, XM_ISR);
1475	mii_tick(mii);
1476	mii_pollstat(mii);
1477	timeout_del(&sc_if->sk_tick_ch);
1478}
1479
1480void
1481sk_intr_bcom(sc_if)
1482	struct sk_if_softc *sc_if;
1483{
1484	struct sk_softc *sc;
1485	struct mii_data *mii;
1486	struct ifnet *ifp;
1487	int status;
1488
1489	sc = sc_if->sk_softc;
1490	mii = &sc_if->sk_mii;
1491	ifp = &sc_if->arpcom.ac_if;
1492
1493	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1494
1495	/*
1496	 * Read the PHY interrupt register to make sure
1497	 * we clear any pending interrupts.
1498	 */
1499	status = sk_miibus_readreg((struct device *)sc_if,
1500	    SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
1501
1502	if (!(ifp->if_flags & IFF_RUNNING)) {
1503		sk_init_xmac(sc_if);
1504		return;
1505	}
1506
1507	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
1508		int lstat;
1509		lstat = sk_miibus_readreg((struct device *)sc_if,
1510		    SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
1511
1512		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
1513			mii_mediachg(mii);
1514			/* Turn off the link LED. */
1515			SK_IF_WRITE_1(sc_if, 0,
1516			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
1517			sc_if->sk_link = 0;
1518		} else if (status & BRGPHY_ISR_LNK_CHG) {
1519			sk_miibus_writereg((struct device *)sc_if,
1520			    SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00);
1521			mii_tick(mii);
1522			sc_if->sk_link = 1;
1523			/* Turn on the link LED. */
1524			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
1525			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
1526			    SK_LINKLED_BLINK_OFF);
1527			mii_pollstat(mii);
1528		} else {
1529			mii_tick(mii);
1530			timeout_add(&sc_if->sk_tick_ch, hz);
1531		}
1532	}
1533
1534	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1535
1536	return;
1537}
1538
1539void sk_intr_xmac(sc_if)
1540	struct sk_if_softc	*sc_if;
1541{
1542	struct sk_softc		*sc;
1543	u_int16_t		status;
1544
1545	sc = sc_if->sk_softc;
1546	status = SK_XM_READ_2(sc_if, XM_ISR);
1547
1548	if (status & XM_ISR_LINKEVENT) {
1549		SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_LINKEVENT);
1550		if (sc_if->sk_link == 1)
1551			sc_if->sk_link = 0;
1552	}
1553
1554	if (status & XM_ISR_AUTONEG_DONE)
1555		timeout_add(&sc_if->sk_tick_ch, hz);
1556
1557	if (status & XM_IMR_TX_UNDERRUN)
1558		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1559
1560	if (status & XM_IMR_RX_OVERRUN)
1561		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1562
1563	return;
1564}
1565
1566int sk_intr(xsc)
1567	void			*xsc;
1568{
1569	struct sk_softc		*sc = xsc;
1570	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1571	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1572	u_int32_t		status;
1573	int			claimed = 0;
1574
1575	sc_if0 = sc->sk_if[SK_PORT_A];
1576	sc_if1 = sc->sk_if[SK_PORT_B];
1577
1578	if (sc_if0 != NULL)
1579		ifp0 = &sc_if0->arpcom.ac_if;
1580	if (sc_if1 != NULL)
1581		ifp1 = &sc_if1->arpcom.ac_if;
1582
1583	for (;;) {
1584		status = CSR_READ_4(sc, SK_ISSR);
1585		if (!(status & sc->sk_intrmask))
1586			break;
1587
1588		claimed = 1;
1589
1590		/* Handle receive interrupts first. */
1591		if (status & SK_ISR_RX1_EOF) {
1592			sk_rxeof(sc_if0);
1593			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1594			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1595		}
1596		if (status & SK_ISR_RX2_EOF) {
1597			sk_rxeof(sc_if1);
1598			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1599			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1600		}
1601
1602		/* Then transmit interrupts. */
1603		if (status & SK_ISR_TX1_S_EOF) {
1604			sk_txeof(sc_if0);
1605			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1606			    SK_TXBMU_CLR_IRQ_EOF);
1607		}
1608		if (status & SK_ISR_TX2_S_EOF) {
1609			sk_txeof(sc_if1);
1610			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1611			    SK_TXBMU_CLR_IRQ_EOF);
1612		}
1613
1614		/* Then MAC interrupts. */
1615		if (status & SK_ISR_MAC1 &&
1616		    ifp0->if_flags & IFF_RUNNING)
1617			sk_intr_xmac(sc_if0);
1618
1619		if (status & SK_ISR_MAC2 &&
1620		    ifp1->if_flags & IFF_RUNNING)
1621			sk_intr_xmac(sc_if1);
1622
1623		if (status & SK_ISR_EXTERNAL_REG) {
1624			if (ifp0 != NULL)
1625				sk_intr_bcom(sc_if0);
1626			if (ifp1 != NULL)
1627				sk_intr_bcom(sc_if1);
1628		}
1629	}
1630
1631	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1632
1633	if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
1634		sk_start(ifp0);
1635	if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
1636		sk_start(ifp1);
1637
1638	return (claimed);
1639}
1640
1641void sk_init_xmac(sc_if)
1642	struct sk_if_softc	*sc_if;
1643{
1644	struct sk_softc		*sc;
1645	struct ifnet		*ifp;
1646	struct sk_bcom_hack     bhack[] = {
1647	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
1648	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
1649	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1650	{ 0, 0 } };
1651
1652	sc = sc_if->sk_softc;
1653	ifp = &sc_if->arpcom.ac_if;
1654
1655	/* Unreset the XMAC. */
1656	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1657	DELAY(1000);
1658
1659	/* Reset the XMAC's internal state. */
1660	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
1661
1662	/* Save the XMAC II revision */
1663	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1664
1665	/*
1666	 * Perform additional initialization for external PHYs,
1667	 * namely for the 1000baseTX cards that use the XMAC's
1668	 * GMII mode.
1669	 */
1670	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1671		int			i = 0;
1672		u_int32_t		val;
1673
1674		/* Take PHY out of reset. */
1675		val = sk_win_read_4(sc, SK_GPIO);
1676		if (sc_if->sk_port == SK_PORT_A)
1677			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
1678		else
1679			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
1680		sk_win_write_4(sc, SK_GPIO, val);
1681
1682		/* Enable GMII mode on the XMAC. */
1683		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
1684
1685		sk_miibus_writereg((struct device *)sc_if, SK_PHYADDR_BCOM,
1686		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
1687		DELAY(10000);
1688		sk_miibus_writereg((struct device *)sc_if, SK_PHYADDR_BCOM,
1689		    BRGPHY_MII_IMR, 0xFFF0);
1690
1691		/*
1692		 * Early versions of the BCM5400 apparently have
1693		 * a bug that requires them to have their reserved
1694		 * registers initialized to some magic values. I don't
1695		 * know what the numbers do, I'm just the messenger.
1696		 */
1697		if (sk_miibus_readreg((struct device *)sc_if,
1698		    SK_PHYADDR_BCOM, 0x03) == 0x6041) {
1699			while(bhack[i].reg) {
1700				sk_miibus_writereg((struct device *)sc_if,
1701				    SK_PHYADDR_BCOM, bhack[i].reg,
1702				    bhack[i].val);
1703				i++;
1704			}
1705		}
1706	}
1707
1708	/* Set station address */
1709	SK_XM_WRITE_2(sc_if, XM_PAR0,
1710	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1711	SK_XM_WRITE_2(sc_if, XM_PAR1,
1712	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1713	SK_XM_WRITE_2(sc_if, XM_PAR2,
1714	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1715	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1716
1717	if (ifp->if_flags & IFF_PROMISC) {
1718		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1719	} else {
1720		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1721	}
1722
1723	if (ifp->if_flags & IFF_BROADCAST) {
1724		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1725	} else {
1726		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1727	}
1728
1729	/* We don't need the FCS appended to the packet. */
1730	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1731
1732	/* We want short frames padded to 60 bytes. */
1733	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1734
1735	/*
1736	 * Enable the reception of all error frames. This is is
1737	 * a necessary evil due to the design of the XMAC. The
1738	 * XMAC's receive FIFO is only 8K in size, however jumbo
1739	 * frames can be up to 9000 bytes in length. When bad
1740	 * frame filtering is enabled, the XMAC's RX FIFO operates
1741	 * in 'store and forward' mode. For this to work, the
1742	 * entire frame has to fit into the FIFO, but that means
1743	 * that jumbo frames larger than 8192 bytes will be
1744	 * truncated. Disabling all bad frame filtering causes
1745	 * the RX FIFO to operate in streaming mode, in which
1746	 * case the XMAC will start transfering frames out of the
1747	 * RX FIFO as soon as the FIFO threshold is reached.
1748	 */
1749	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
1750	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
1751	    XM_MODE_RX_INRANGELEN);
1752
1753	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
1754		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1755	else
1756		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1757
1758	/*
1759	 * Bump up the transmit threshold. This helps hold off transmit
1760	 * underruns when we're blasting traffic from both ports at once.
1761	 */
1762	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
1763
1764	/* Set multicast filter */
1765	sk_setmulti(sc_if);
1766
1767	/* Clear and enable interrupts */
1768	SK_XM_READ_2(sc_if, XM_ISR);
1769	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
1770		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
1771	else
1772		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
1773
1774	/* Configure MAC arbiter */
1775	switch(sc_if->sk_xmac_rev) {
1776	case XM_XMAC_REV_B2:
1777		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
1778		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
1779		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
1780		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
1781		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
1782		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
1783		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
1784		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
1785		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1786		break;
1787	case XM_XMAC_REV_C1:
1788		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
1789		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
1790		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
1791		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
1792		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
1793		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
1794		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
1795		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
1796		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1797		break;
1798	default:
1799		break;
1800	}
1801	sk_win_write_2(sc, SK_MACARB_CTL,
1802	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
1803
1804	sc_if->sk_link = 1;
1805
1806	return;
1807}
1808
1809/*
1810 * Note that to properly initialize any part of the GEnesis chip,
1811 * you first have to take it out of reset mode.
1812 */
1813void sk_init(xsc)
1814	void			*xsc;
1815{
1816	struct sk_if_softc	*sc_if = xsc;
1817	struct sk_softc		*sc;
1818	struct ifnet		*ifp;
1819	struct mii_data		*mii;
1820	int			s;
1821
1822	s = splimp();
1823
1824	ifp = &sc_if->arpcom.ac_if;
1825	sc = sc_if->sk_softc;
1826	mii = &sc_if->sk_mii;
1827
1828	/* Cancel pending I/O and free all RX/TX buffers. */
1829	sk_stop(sc_if);
1830
1831	/* Configure LINK_SYNC LED */
1832	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
1833	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
1834
1835	/* Configure RX LED */
1836	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
1837
1838	/* Configure TX LED */
1839	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
1840
1841	/* Configure I2C registers */
1842
1843	/* Configure XMAC(s) */
1844	sk_init_xmac(sc_if);
1845	mii_mediachg(mii);
1846
1847	/* Configure MAC FIFOs */
1848	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
1849	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
1850	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
1851
1852	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
1853	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
1854	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
1855
1856	/* Configure transmit arbiter(s) */
1857	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
1858	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
1859
1860	/* Configure RAMbuffers */
1861	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
1862	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
1863	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
1864	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
1865	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
1866	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
1867
1868	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
1869	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
1870	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
1871	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
1872	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
1873	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
1874	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
1875
1876	/* Configure BMUs */
1877	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
1878	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
1879	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
1880	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
1881
1882	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
1883	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
1884	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
1885	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
1886
1887	/* Init descriptors */
1888	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
1889		printf("%s: initialization failed: no "
1890		    "memory for rx buffers\n", sc_if->sk_dev.dv_xname);
1891		sk_stop(sc_if);
1892		(void)splx(s);
1893		return;
1894	}
1895	sk_init_tx_ring(sc_if);
1896
1897	/* Configure interrupt handling */
1898	CSR_READ_4(sc, SK_ISSR);
1899	if (sc_if->sk_port == SK_PORT_A)
1900		sc->sk_intrmask |= SK_INTRS1;
1901	else
1902		sc->sk_intrmask |= SK_INTRS2;
1903
1904	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
1905
1906	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1907
1908	/* Start BMUs. */
1909	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
1910
1911	/* Enable XMACs TX and RX state machines */
1912	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
1913	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1914
1915	ifp->if_flags |= IFF_RUNNING;
1916	ifp->if_flags &= ~IFF_OACTIVE;
1917
1918	splx(s);
1919
1920	return;
1921}
1922
1923void sk_stop(sc_if)
1924	struct sk_if_softc	*sc_if;
1925{
1926	int			i;
1927	struct sk_softc		*sc;
1928	struct ifnet		*ifp;
1929
1930	sc = sc_if->sk_softc;
1931	ifp = &sc_if->arpcom.ac_if;
1932
1933	timeout_del(&sc_if->sk_tick_ch);
1934
1935	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1936		u_int32_t		val;
1937
1938		/* Put PHY back into reset. */
1939		val = sk_win_read_4(sc, SK_GPIO);
1940		if (sc_if->sk_port == SK_PORT_A) {
1941			val |= SK_GPIO_DIR0;
1942			val &= ~SK_GPIO_DAT0;
1943		} else {
1944			val |= SK_GPIO_DIR2;
1945			val &= ~SK_GPIO_DAT2;
1946		}
1947		sk_win_write_4(sc, SK_GPIO, val);
1948	}
1949
1950	/* Turn off various components of this interface. */
1951	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
1952	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
1953	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
1954	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
1955	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1956	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
1957	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1958	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
1959	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1960	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1961	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
1962	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
1963
1964	/* Disable interrupts */
1965	if (sc_if->sk_port == SK_PORT_A)
1966		sc->sk_intrmask &= ~SK_INTRS1;
1967	else
1968		sc->sk_intrmask &= ~SK_INTRS2;
1969	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1970
1971	SK_XM_READ_2(sc_if, XM_ISR);
1972	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
1973
1974	/* Free RX and TX mbufs still in the queues. */
1975	for (i = 0; i < SK_RX_RING_CNT; i++) {
1976		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
1977			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
1978			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
1979		}
1980	}
1981
1982	for (i = 0; i < SK_TX_RING_CNT; i++) {
1983		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
1984			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
1985			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
1986		}
1987	}
1988
1989	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1990
1991	return;
1992}
1993
1994struct cfattach skc_ca = {
1995	sizeof(struct sk_softc), skc_probe, skc_attach,
1996};
1997
1998struct cfdriver skc_cd = {
1999	0, "skc", DV_DULL
2000};
2001
2002struct cfattach sk_ca = {
2003	sizeof(struct sk_if_softc), sk_probe, sk_attach,
2004};
2005
2006struct cfdriver sk_cd = {
2007	0, "sk", DV_IFNET
2008};
2009