if_sk.c revision 1.94
1/*	$OpenBSD: if_sk.c,v 1.94 2006/02/09 11:19:32 brad Exp $	*/
2
3/*
4 * Copyright (c) 1997, 1998, 1999, 2000
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
35 */
36
37/*
38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
39 *
40 * Permission to use, copy, modify, and distribute this software for any
41 * purpose with or without fee is hereby granted, provided that the above
42 * copyright notice and this permission notice appear in all copies.
43 *
44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
51 */
52
53/*
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
56 * References:
57 * 	The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60 *
61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
64 *
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66 *
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
70 */
71
72/*
73 * The SysKonnect gigabit ethernet adapters consist of two main
74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
76 * components and a PHY while the GEnesis controller provides a PCI
77 * interface with DMA support. Each card may have between 512K and
78 * 2MB of SRAM on board depending on the configuration.
79 *
80 * The SysKonnect GEnesis controller can have either one or two XMAC
81 * chips connected to it, allowing single or dual port NIC configurations.
82 * SysKonnect has the distinction of being the only vendor on the market
83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
85 * XMAC registers. This driver takes advantage of these features to allow
86 * both XMACs to operate as independent interfaces.
87 */
88
89#include "bpfilter.h"
90
91#include <sys/param.h>
92#include <sys/systm.h>
93#include <sys/sockio.h>
94#include <sys/mbuf.h>
95#include <sys/malloc.h>
96#include <sys/kernel.h>
97#include <sys/socket.h>
98#include <sys/device.h>
99#include <sys/queue.h>
100
101#include <net/if.h>
102#include <net/if_dl.h>
103#include <net/if_types.h>
104
105#ifdef INET
106#include <netinet/in.h>
107#include <netinet/in_systm.h>
108#include <netinet/in_var.h>
109#include <netinet/ip.h>
110#include <netinet/udp.h>
111#include <netinet/tcp.h>
112#include <netinet/if_ether.h>
113#endif
114
115#include <net/if_media.h>
116#include <net/if_vlan_var.h>
117
118#if NBPFILTER > 0
119#include <net/bpf.h>
120#endif
121
122#include <dev/mii/mii.h>
123#include <dev/mii/miivar.h>
124#include <dev/mii/brgphyreg.h>
125
126#include <dev/pci/pcireg.h>
127#include <dev/pci/pcivar.h>
128#include <dev/pci/pcidevs.h>
129
130#include <dev/pci/if_skreg.h>
131#include <dev/pci/if_skvar.h>
132
133int skc_probe(struct device *, void *, void *);
134void skc_attach(struct device *, struct device *self, void *aux);
135void skc_shutdown(void *);
136int sk_probe(struct device *, void *, void *);
137void sk_attach(struct device *, struct device *self, void *aux);
138int skcprint(void *, const char *);
139int sk_intr(void *);
140void sk_intr_bcom(struct sk_if_softc *);
141void sk_intr_xmac(struct sk_if_softc *);
142void sk_intr_yukon(struct sk_if_softc *);
143void sk_rxeof(struct sk_if_softc *);
144void sk_txeof(struct sk_if_softc *);
145int sk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *);
146void sk_start(struct ifnet *);
147int sk_ioctl(struct ifnet *, u_long, caddr_t);
148void sk_init(void *);
149void sk_init_xmac(struct sk_if_softc *);
150void sk_init_yukon(struct sk_if_softc *);
151void sk_stop(struct sk_if_softc *);
152void sk_watchdog(struct ifnet *);
153int sk_ifmedia_upd(struct ifnet *);
154void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
155void sk_reset(struct sk_softc *);
156int sk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t);
157int sk_alloc_jumbo_mem(struct sk_if_softc *);
158void *sk_jalloc(struct sk_if_softc *);
159void sk_jfree(caddr_t, u_int, void *);
160int sk_init_rx_ring(struct sk_if_softc *);
161int sk_init_tx_ring(struct sk_if_softc *);
162
163int sk_xmac_miibus_readreg(struct device *, int, int);
164void sk_xmac_miibus_writereg(struct device *, int, int, int);
165void sk_xmac_miibus_statchg(struct device *);
166
167int sk_marv_miibus_readreg(struct device *, int, int);
168void sk_marv_miibus_writereg(struct device *, int, int, int);
169void sk_marv_miibus_statchg(struct device *);
170
171u_int32_t sk_xmac_hash(caddr_t);
172u_int32_t sk_yukon_hash(caddr_t);
173void sk_setfilt(struct sk_if_softc *, caddr_t, int);
174void sk_setmulti(struct sk_if_softc *);
175void sk_tick(void *);
176void sk_rxcsum(struct ifnet *, struct mbuf *, const u_int16_t, const u_int16_t);
177
178#ifdef SK_DEBUG
179#define DPRINTF(x)	if (skdebug) printf x
180#define DPRINTFN(n,x)	if (skdebug >= (n)) printf x
181int	skdebug = 0;
182
183void sk_dump_txdesc(struct sk_tx_desc *, int);
184void sk_dump_mbuf(struct mbuf *);
185void sk_dump_bytes(const char *, int);
186#else
187#define DPRINTF(x)
188#define DPRINTFN(n,x)
189#endif
190
191#define SK_SETBIT(sc, reg, x)		\
192	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
193
194#define SK_CLRBIT(sc, reg, x)		\
195	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
196
197#define SK_WIN_SETBIT_4(sc, reg, x)	\
198	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
199
200#define SK_WIN_CLRBIT_4(sc, reg, x)	\
201	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
202
203#define SK_WIN_SETBIT_2(sc, reg, x)	\
204	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
205
206#define SK_WIN_CLRBIT_2(sc, reg, x)	\
207	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
208
209/* supported device vendors */
210const struct pci_matchid skc_devices[] = {
211	{ PCI_VENDOR_3COM,		PCI_PRODUCT_3COM_3C940 },
212	{ PCI_VENDOR_3COM,		PCI_PRODUCT_3COM_3C940B },
213	{ PCI_VENDOR_CNET,		PCI_PRODUCT_CNET_GIGACARD },
214	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE530T },
215	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE560T },
216	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE560T_2 },
217	{ PCI_VENDOR_LINKSYS,		PCI_PRODUCT_LINKSYS_EG1064 },
218	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON },
219	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_BELKIN },
220	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_3 },
221	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_8035 },
222	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_8036 },
223	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_8038 },
224	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_8052 },
225	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_8050 },
226	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_8053 },
227	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKONII_8021CU },
228	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKONII_8022CU },
229	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKONII_8021X },
230	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKONII_8022X },
231	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKONII_8061CU },
232	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKONII_8062CU },
233	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKONII_8061X },
234	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKONII_8062X },
235	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK98XX },
236	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK98XX2 },
237	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK9Sxx },
238	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK9Exx }
239};
240
241#define SK_LINKSYS_EG1032_SUBID 0x00151737
242
243static inline u_int32_t
244sk_win_read_4(struct sk_softc *sc, u_int32_t reg)
245{
246	return CSR_READ_4(sc, reg);
247}
248
249static inline u_int16_t
250sk_win_read_2(struct sk_softc *sc, u_int32_t reg)
251{
252	return CSR_READ_2(sc, reg);
253}
254
255static inline u_int8_t
256sk_win_read_1(struct sk_softc *sc, u_int32_t reg)
257{
258	return CSR_READ_1(sc, reg);
259}
260
261static inline void
262sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x)
263{
264	CSR_WRITE_4(sc, reg, x);
265}
266
267static inline void
268sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x)
269{
270	CSR_WRITE_2(sc, reg, x);
271}
272
273static inline void
274sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x)
275{
276	CSR_WRITE_1(sc, reg, x);
277}
278
279int
280sk_xmac_miibus_readreg(struct device *dev, int phy, int reg)
281{
282	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
283	int i;
284
285	DPRINTFN(9, ("sk_xmac_miibus_readreg\n"));
286
287	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
288		return(0);
289
290	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
291	SK_XM_READ_2(sc_if, XM_PHY_DATA);
292	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
293		for (i = 0; i < SK_TIMEOUT; i++) {
294			DELAY(1);
295			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
296			    XM_MMUCMD_PHYDATARDY)
297				break;
298		}
299
300		if (i == SK_TIMEOUT) {
301			printf("%s: phy failed to come ready\n",
302			    sc_if->sk_dev.dv_xname);
303			return(0);
304		}
305	}
306	DELAY(1);
307	return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
308}
309
310void
311sk_xmac_miibus_writereg(struct device *dev, int phy, int reg, int val)
312{
313	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
314	int i;
315
316	DPRINTFN(9, ("sk_xmac_miibus_writereg\n"));
317
318	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
319	for (i = 0; i < SK_TIMEOUT; i++) {
320		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
321			break;
322	}
323
324	if (i == SK_TIMEOUT) {
325		printf("%s: phy failed to come ready\n",
326		    sc_if->sk_dev.dv_xname);
327		return;
328	}
329
330	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
331	for (i = 0; i < SK_TIMEOUT; i++) {
332		DELAY(1);
333		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
334			break;
335	}
336
337	if (i == SK_TIMEOUT)
338		printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
339}
340
341void
342sk_xmac_miibus_statchg(struct device *dev)
343{
344	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
345	struct mii_data *mii = &sc_if->sk_mii;
346
347	DPRINTFN(9, ("sk_xmac_miibus_statchg\n"));
348
349	/*
350	 * If this is a GMII PHY, manually set the XMAC's
351	 * duplex mode accordingly.
352	 */
353	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
354		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
355			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
356		} else {
357			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
358		}
359	}
360}
361
362int
363sk_marv_miibus_readreg(dev, phy, reg)
364	struct device *dev;
365	int phy, reg;
366{
367	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
368	u_int16_t val;
369	int i;
370
371	if (phy != 0 ||
372	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
373	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
374		DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n",
375			     phy, reg));
376		return(0);
377	}
378
379        SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
380		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
381
382	for (i = 0; i < SK_TIMEOUT; i++) {
383		DELAY(1);
384		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
385		if (val & YU_SMICR_READ_VALID)
386			break;
387	}
388
389	if (i == SK_TIMEOUT) {
390		printf("%s: phy failed to come ready\n",
391		       sc_if->sk_dev.dv_xname);
392		return 0;
393	}
394
395 	DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i,
396		     SK_TIMEOUT));
397
398        val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
399
400	DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
401		     phy, reg, val));
402
403	return val;
404}
405
406void
407sk_marv_miibus_writereg(dev, phy, reg, val)
408	struct device *dev;
409	int phy, reg, val;
410{
411	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
412	int i;
413
414	DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n",
415		     phy, reg, val));
416
417	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
418	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
419		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
420
421	for (i = 0; i < SK_TIMEOUT; i++) {
422		DELAY(1);
423		if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
424			break;
425	}
426}
427
428void
429sk_marv_miibus_statchg(dev)
430	struct device *dev;
431{
432	DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n",
433		     SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR)));
434}
435
436#define HASH_BITS	6
437
438u_int32_t
439sk_xmac_hash(caddr_t addr)
440{
441	u_int32_t crc;
442
443	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
444	return (~crc & ((1 << HASH_BITS) - 1));
445}
446
447u_int32_t
448sk_yukon_hash(caddr_t addr)
449{
450	u_int32_t crc;
451
452	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
453	return (crc & ((1 << HASH_BITS) - 1));
454}
455
456void
457sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot)
458{
459	int base = XM_RXFILT_ENTRY(slot);
460
461	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
462	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
463	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
464}
465
466void
467sk_setmulti(struct sk_if_softc *sc_if)
468{
469	struct sk_softc *sc = sc_if->sk_softc;
470	struct ifnet *ifp= &sc_if->arpcom.ac_if;
471	u_int32_t hashes[2] = { 0, 0 };
472	int h, i;
473	struct arpcom *ac = &sc_if->arpcom;
474	struct ether_multi *enm;
475	struct ether_multistep step;
476	u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
477
478	/* First, zot all the existing filters. */
479	switch(sc->sk_type) {
480	case SK_GENESIS:
481		for (i = 1; i < XM_RXFILT_MAX; i++)
482			sk_setfilt(sc_if, (caddr_t)&dummy, i);
483
484		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
485		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
486		break;
487	case SK_YUKON:
488	case SK_YUKON_LITE:
489	case SK_YUKON_LP:
490		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
491		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
492		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
493		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
494		break;
495	}
496
497	/* Now program new ones. */
498allmulti:
499	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
500		hashes[0] = 0xFFFFFFFF;
501		hashes[1] = 0xFFFFFFFF;
502	} else {
503		i = 1;
504		/* First find the tail of the list. */
505		ETHER_FIRST_MULTI(step, ac, enm);
506		while (enm != NULL) {
507			if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
508				 ETHER_ADDR_LEN)) {
509				ifp->if_flags |= IFF_ALLMULTI;
510				goto allmulti;
511			}
512			/*
513			 * Program the first XM_RXFILT_MAX multicast groups
514			 * into the perfect filter. For all others,
515			 * use the hash table.
516			 */
517			if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
518				sk_setfilt(sc_if, enm->enm_addrlo, i);
519				i++;
520			}
521			else {
522				switch(sc->sk_type) {
523				case SK_GENESIS:
524					h = sk_xmac_hash(enm->enm_addrlo);
525					break;
526
527				case SK_YUKON:
528				case SK_YUKON_LITE:
529				case SK_YUKON_LP:
530					h = sk_yukon_hash(enm->enm_addrlo);
531					break;
532				}
533				if (h < 32)
534					hashes[0] |= (1 << h);
535				else
536					hashes[1] |= (1 << (h - 32));
537			}
538
539			ETHER_NEXT_MULTI(step, enm);
540		}
541	}
542
543	switch(sc->sk_type) {
544	case SK_GENESIS:
545		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
546			       XM_MODE_RX_USE_PERFECT);
547		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
548		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
549		break;
550	case SK_YUKON:
551	case SK_YUKON_LITE:
552	case SK_YUKON_LP:
553		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
554		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
555		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
556		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
557		break;
558	}
559}
560
561int
562sk_init_rx_ring(struct sk_if_softc *sc_if)
563{
564	struct sk_chain_data	*cd = &sc_if->sk_cdata;
565	struct sk_ring_data	*rd = sc_if->sk_rdata;
566	int			i;
567
568	bzero((char *)rd->sk_rx_ring,
569	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
570
571	for (i = 0; i < SK_RX_RING_CNT; i++) {
572		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
573		if (i == (SK_RX_RING_CNT - 1)) {
574			cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[0];
575			rd->sk_rx_ring[i].sk_next = SK_RX_RING_ADDR(sc_if, 0);
576		} else {
577			cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[i + 1];
578			rd->sk_rx_ring[i].sk_next = SK_RX_RING_ADDR(sc_if,i+1);
579		}
580		rd->sk_rx_ring[i].sk_csum1_start = ETHER_HDR_LEN;
581		rd->sk_rx_ring[i].sk_csum2_start = ETHER_HDR_LEN +
582		    sizeof(struct ip);
583	}
584
585	for (i = 0; i < SK_RX_RING_CNT; i++) {
586		if (sk_newbuf(sc_if, i, NULL,
587		    sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) {
588			printf("%s: failed alloc of %dth mbuf\n",
589			    sc_if->sk_dev.dv_xname, i);
590			return(ENOBUFS);
591		}
592	}
593
594	sc_if->sk_cdata.sk_rx_prod = 0;
595	sc_if->sk_cdata.sk_rx_cons = 0;
596
597	return(0);
598}
599
600int
601sk_init_tx_ring(struct sk_if_softc *sc_if)
602{
603	struct sk_softc		*sc = sc_if->sk_softc;
604	struct sk_chain_data	*cd = &sc_if->sk_cdata;
605	struct sk_ring_data	*rd = sc_if->sk_rdata;
606	bus_dmamap_t		dmamap;
607	struct sk_txmap_entry	*entry;
608	int			i;
609
610	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
611	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
612
613	SIMPLEQ_INIT(&sc_if->sk_txmap_head);
614	for (i = 0; i < SK_TX_RING_CNT; i++) {
615		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
616		if (i == (SK_TX_RING_CNT - 1)) {
617			cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[0];
618			rd->sk_tx_ring[i].sk_next = SK_TX_RING_ADDR(sc_if, 0);
619		} else {
620			cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[i + 1];
621			rd->sk_tx_ring[i].sk_next = SK_TX_RING_ADDR(sc_if,i+1);
622		}
623
624		if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG,
625		   SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap))
626			return (ENOBUFS);
627
628		entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
629		if (!entry) {
630			bus_dmamap_destroy(sc->sc_dmatag, dmamap);
631			return (ENOBUFS);
632		}
633		entry->dmamap = dmamap;
634		SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link);
635	}
636
637	sc_if->sk_cdata.sk_tx_prod = 0;
638	sc_if->sk_cdata.sk_tx_cons = 0;
639	sc_if->sk_cdata.sk_tx_cnt = 0;
640
641	SK_CDTXSYNC(sc_if, 0, SK_TX_RING_CNT,
642	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
643
644	return (0);
645}
646
647int
648sk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m,
649	  bus_dmamap_t dmamap)
650{
651	struct mbuf		*m_new = NULL;
652	struct sk_chain		*c;
653	struct sk_rx_desc	*r;
654
655	if (m == NULL) {
656		caddr_t buf = NULL;
657
658		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
659		if (m_new == NULL)
660			return(ENOBUFS);
661
662		/* Allocate the jumbo buffer */
663		buf = sk_jalloc(sc_if);
664		if (buf == NULL) {
665			m_freem(m_new);
666			DPRINTFN(1, ("%s jumbo allocation failed -- packet "
667			    "dropped!\n", sc_if->arpcom.ac_if.if_xname));
668			return(ENOBUFS);
669		}
670
671		/* Attach the buffer to the mbuf */
672		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
673		MEXTADD(m_new, buf, SK_JLEN, 0, sk_jfree, sc_if);
674	} else {
675		/*
676	 	 * We're re-using a previously allocated mbuf;
677		 * be sure to re-init pointers and lengths to
678		 * default values.
679		 */
680		m_new = m;
681		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
682		m_new->m_data = m_new->m_ext.ext_buf;
683	}
684	m_adj(m_new, ETHER_ALIGN);
685
686	c = &sc_if->sk_cdata.sk_rx_chain[i];
687	r = c->sk_desc;
688	c->sk_mbuf = m_new;
689	r->sk_data_lo = dmamap->dm_segs[0].ds_addr +
690	    (((vaddr_t)m_new->m_data
691             - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf));
692	r->sk_ctl = SK_JLEN | SK_RXSTAT;
693
694	SK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
695
696	return(0);
697}
698
699/*
700 * Memory management for jumbo frames.
701 */
702
703int
704sk_alloc_jumbo_mem(struct sk_if_softc *sc_if)
705{
706	struct sk_softc		*sc = sc_if->sk_softc;
707	caddr_t			ptr, kva;
708	bus_dma_segment_t	seg;
709	int		i, rseg, state, error;
710	struct sk_jpool_entry   *entry;
711
712	state = error = 0;
713
714	/* Grab a big chunk o' storage. */
715	if (bus_dmamem_alloc(sc->sc_dmatag, SK_JMEM, PAGE_SIZE, 0,
716			     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
717		printf(": can't alloc rx buffers");
718		return (ENOBUFS);
719	}
720
721	state = 1;
722	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, SK_JMEM, &kva,
723			   BUS_DMA_NOWAIT)) {
724		printf(": can't map dma buffers (%d bytes)", SK_JMEM);
725		error = ENOBUFS;
726		goto out;
727	}
728
729	state = 2;
730	if (bus_dmamap_create(sc->sc_dmatag, SK_JMEM, 1, SK_JMEM, 0,
731	    BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) {
732		printf(": can't create dma map");
733		error = ENOBUFS;
734		goto out;
735	}
736
737	state = 3;
738	if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map,
739			    kva, SK_JMEM, NULL, BUS_DMA_NOWAIT)) {
740		printf(": can't load dma map");
741		error = ENOBUFS;
742		goto out;
743	}
744
745	state = 4;
746	sc_if->sk_cdata.sk_jumbo_buf = (caddr_t)kva;
747	DPRINTFN(1,("sk_jumbo_buf = 0x%08X\n", sc_if->sk_cdata.sk_jumbo_buf));
748
749	LIST_INIT(&sc_if->sk_jfree_listhead);
750	LIST_INIT(&sc_if->sk_jinuse_listhead);
751
752	/*
753	 * Now divide it up into 9K pieces and save the addresses
754	 * in an array.
755	 */
756	ptr = sc_if->sk_cdata.sk_jumbo_buf;
757	for (i = 0; i < SK_JSLOTS; i++) {
758		sc_if->sk_cdata.sk_jslots[i] = ptr;
759		ptr += SK_JLEN;
760		entry = malloc(sizeof(struct sk_jpool_entry),
761		    M_DEVBUF, M_NOWAIT);
762		if (entry == NULL) {
763			printf(": no memory for jumbo buffer queue!");
764			error = ENOBUFS;
765			goto out;
766		}
767		entry->slot = i;
768		if (i)
769			LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
770				 entry, jpool_entries);
771		else
772			LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead,
773				 entry, jpool_entries);
774	}
775out:
776	if (error != 0) {
777		switch (state) {
778		case 4:
779			bus_dmamap_unload(sc->sc_dmatag,
780			    sc_if->sk_cdata.sk_rx_jumbo_map);
781		case 3:
782			bus_dmamap_destroy(sc->sc_dmatag,
783			    sc_if->sk_cdata.sk_rx_jumbo_map);
784		case 2:
785			bus_dmamem_unmap(sc->sc_dmatag, kva, SK_JMEM);
786		case 1:
787			bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
788			break;
789		default:
790			break;
791		}
792	}
793
794	return (error);
795}
796
797/*
798 * Allocate a jumbo buffer.
799 */
800void *
801sk_jalloc(struct sk_if_softc *sc_if)
802{
803	struct sk_jpool_entry   *entry;
804
805	entry = LIST_FIRST(&sc_if->sk_jfree_listhead);
806
807	if (entry == NULL)
808		return (NULL);
809
810	LIST_REMOVE(entry, jpool_entries);
811	LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
812	return (sc_if->sk_cdata.sk_jslots[entry->slot]);
813}
814
815/*
816 * Release a jumbo buffer.
817 */
818void
819sk_jfree(caddr_t buf, u_int size, void	*arg)
820{
821	struct sk_jpool_entry *entry;
822	struct sk_if_softc *sc;
823	int i;
824
825	/* Extract the softc struct pointer. */
826	sc = (struct sk_if_softc *)arg;
827
828	if (sc == NULL)
829		panic("sk_jfree: can't find softc pointer!");
830
831	/* calculate the slot this buffer belongs to */
832
833	i = ((vaddr_t)buf
834	     - (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN;
835
836	if ((i < 0) || (i >= SK_JSLOTS))
837		panic("sk_jfree: asked to free buffer that we don't manage!");
838
839	entry = LIST_FIRST(&sc->sk_jinuse_listhead);
840	if (entry == NULL)
841		panic("sk_jfree: buffer not in use!");
842	entry->slot = i;
843	LIST_REMOVE(entry, jpool_entries);
844	LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries);
845}
846
847/*
848 * Set media options.
849 */
850int
851sk_ifmedia_upd(struct ifnet *ifp)
852{
853	struct sk_if_softc *sc_if = ifp->if_softc;
854
855	sk_init(sc_if);
856	mii_mediachg(&sc_if->sk_mii);
857	return(0);
858}
859
860/*
861 * Report current media status.
862 */
863void
864sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
865{
866	struct sk_if_softc *sc_if = ifp->if_softc;
867
868	mii_pollstat(&sc_if->sk_mii);
869	ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
870	ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
871}
872
873int
874sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
875{
876	struct sk_if_softc *sc_if = ifp->if_softc;
877	struct sk_softc *sc = sc_if->sk_softc;
878	struct ifreq *ifr = (struct ifreq *) data;
879	struct ifaddr *ifa = (struct ifaddr *) data;
880	struct mii_data *mii;
881	int s, error = 0;
882
883	s = splnet();
884
885	if ((error = ether_ioctl(ifp, &sc_if->arpcom, command, data)) > 0) {
886		splx(s);
887		return error;
888	}
889
890	switch(command) {
891	case SIOCSIFADDR:
892		ifp->if_flags |= IFF_UP;
893		switch (ifa->ifa_addr->sa_family) {
894#ifdef INET
895		case AF_INET:
896			sk_init(sc_if);
897			arp_ifinit(&sc_if->arpcom, ifa);
898			break;
899#endif /* INET */
900		default:
901			sk_init(sc_if);
902			break;
903		}
904		break;
905	case SIOCSIFMTU:
906		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
907			error = EINVAL;
908		else if (ifp->if_mtu != ifr->ifr_mtu)
909			ifp->if_mtu = ifr->ifr_mtu;
910		break;
911	case SIOCSIFFLAGS:
912		if (ifp->if_flags & IFF_UP) {
913			if (ifp->if_flags & IFF_RUNNING &&
914			    ifp->if_flags & IFF_PROMISC &&
915			    !(sc_if->sk_if_flags & IFF_PROMISC)) {
916				switch(sc->sk_type) {
917				case SK_GENESIS:
918					SK_XM_SETBIT_4(sc_if, XM_MODE,
919					    XM_MODE_RX_PROMISC);
920					break;
921				case SK_YUKON:
922				case SK_YUKON_LITE:
923				case SK_YUKON_LP:
924					SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
925					    YU_RCR_UFLEN | YU_RCR_MUFLEN);
926					break;
927				}
928				sk_setmulti(sc_if);
929			} else if (ifp->if_flags & IFF_RUNNING &&
930			    !(ifp->if_flags & IFF_PROMISC) &&
931			    sc_if->sk_if_flags & IFF_PROMISC) {
932				switch(sc->sk_type) {
933				case SK_GENESIS:
934					SK_XM_CLRBIT_4(sc_if, XM_MODE,
935					    XM_MODE_RX_PROMISC);
936					break;
937				case SK_YUKON:
938				case SK_YUKON_LITE:
939				case SK_YUKON_LP:
940					SK_YU_SETBIT_2(sc_if, YUKON_RCR,
941					    YU_RCR_UFLEN | YU_RCR_MUFLEN);
942					break;
943				}
944
945				sk_setmulti(sc_if);
946			} else
947				sk_init(sc_if);
948		} else {
949			if (ifp->if_flags & IFF_RUNNING)
950				sk_stop(sc_if);
951		}
952		sc_if->sk_if_flags = ifp->if_flags;
953		error = 0;
954		break;
955	case SIOCADDMULTI:
956	case SIOCDELMULTI:
957		error = (command == SIOCADDMULTI) ?
958		    ether_addmulti(ifr, &sc_if->arpcom) :
959		    ether_delmulti(ifr, &sc_if->arpcom);
960
961		if (error == ENETRESET) {
962			/*
963			 * Multicast list has changed; set the hardware
964			 * filter accordingly.
965			 */
966			if (ifp->if_flags & IFF_RUNNING)
967				sk_setmulti(sc_if);
968			error = 0;
969		}
970		break;
971	case SIOCGIFMEDIA:
972	case SIOCSIFMEDIA:
973		mii = &sc_if->sk_mii;
974		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
975		break;
976	default:
977		error = EINVAL;
978		break;
979	}
980
981	splx(s);
982
983	return(error);
984}
985
986/*
987 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
988 * IDs against our list and return a device name if we find a match.
989 */
990int
991skc_probe(struct device *parent, void *match, void *aux)
992{
993	struct pci_attach_args *pa = aux;
994	pci_chipset_tag_t pc = pa->pa_pc;
995	pcireg_t subid;
996
997	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
998
999	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_LINKSYS &&
1000	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_LINKSYS_EG1032 &&
1001	    subid == SK_LINKSYS_EG1032_SUBID)
1002		return (1);
1003
1004	return (pci_matchbyid((struct pci_attach_args *)aux, skc_devices,
1005	    sizeof(skc_devices)/sizeof(skc_devices[0])));
1006}
1007
1008/*
1009 * Force the GEnesis into reset, then bring it out of reset.
1010 */
1011void sk_reset(struct sk_softc *sc)
1012{
1013	u_int32_t imtimer_ticks;
1014
1015	DPRINTFN(2, ("sk_reset\n"));
1016
1017	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1018	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1019	if (SK_YUKON_FAMILY(sc->sk_type))
1020		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1021
1022	DELAY(1000);
1023	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1024	DELAY(2);
1025	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1026	if (SK_YUKON_FAMILY(sc->sk_type))
1027		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1028
1029	DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR)));
1030	DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n",
1031		     CSR_READ_2(sc, SK_LINK_CTRL)));
1032
1033	if (sc->sk_type == SK_GENESIS) {
1034		/* Configure packet arbiter */
1035		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1036		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1037		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1038		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1039		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1040	}
1041
1042	/* Enable RAM interface */
1043	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1044
1045	/*
1046	 * Configure interrupt moderation. The moderation timer
1047	 * defers interrupts specified in the interrupt moderation
1048	 * timer mask based on the timeout specified in the interrupt
1049	 * moderation timer init register. Each bit in the timer
1050	 * register represents one tick, so to specify a timeout in
1051	 * microseconds, we have to multiply by the correct number of
1052	 * ticks-per-microsecond.
1053	 */
1054	switch (sc->sk_type) {
1055	case SK_GENESIS:
1056		imtimer_ticks = SK_IMTIMER_TICKS_GENESIS;
1057		break;
1058	case SK_YUKON_EC:
1059	case SK_YUKON_XL:
1060	case SK_YUKON_FE:
1061		imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC;
1062		break;
1063	default:
1064		imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
1065	}
1066	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(100));
1067	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1068	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1069	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1070}
1071
1072int
1073sk_probe(struct device *parent, void *match, void *aux)
1074{
1075	struct skc_attach_args *sa = aux;
1076
1077	if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
1078		return(0);
1079
1080	switch (sa->skc_type) {
1081	case SK_GENESIS:
1082	case SK_YUKON:
1083	case SK_YUKON_LITE:
1084	case SK_YUKON_LP:
1085#ifdef not_quite_yet
1086	case SK_YUKON_XL:
1087	case SK_YUKON_EC_U:
1088	case SK_YUKON_EC:
1089	case SK_YUKON_FE:
1090#endif
1091		return (1);
1092	}
1093
1094	return (0);
1095}
1096
1097/*
1098 * Each XMAC chip is attached as a separate logical IP interface.
1099 * Single port cards will have only one logical interface of course.
1100 */
1101void
1102sk_attach(struct device *parent, struct device *self, void *aux)
1103{
1104	struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
1105	struct sk_softc *sc = (struct sk_softc *)parent;
1106	struct skc_attach_args *sa = aux;
1107	struct ifnet *ifp;
1108	caddr_t kva;
1109	bus_dma_segment_t seg;
1110	int i, rseg;
1111
1112	sc_if->sk_port = sa->skc_port;
1113	sc_if->sk_softc = sc;
1114	sc->sk_if[sa->skc_port] = sc_if;
1115
1116	if (sa->skc_port == SK_PORT_A)
1117		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1118	if (sa->skc_port == SK_PORT_B)
1119		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1120
1121	DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port));
1122
1123	/*
1124	 * Get station address for this interface. Note that
1125	 * dual port cards actually come with three station
1126	 * addresses: one for each port, plus an extra. The
1127	 * extra one is used by the SysKonnect driver software
1128	 * as a 'virtual' station address for when both ports
1129	 * are operating in failover mode. Currently we don't
1130	 * use this extra address.
1131	 */
1132	for (i = 0; i < ETHER_ADDR_LEN; i++)
1133		sc_if->arpcom.ac_enaddr[i] =
1134		    sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
1135
1136
1137	printf(", address %s\n",
1138	    ether_sprintf(sc_if->arpcom.ac_enaddr));
1139
1140	/*
1141	 * Set up RAM buffer addresses. The NIC will have a certain
1142	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1143	 * need to divide this up a) between the transmitter and
1144 	 * receiver and b) between the two XMACs, if this is a
1145	 * dual port NIC. Our algorithm is to divide up the memory
1146	 * evenly so that everyone gets a fair share.
1147	 *
1148	 * Just to be contrary, Yukon2 appears to have separate memory
1149	 * for each MAC.
1150	 */
1151	if (SK_IS_YUKON2(sc) ||
1152	    sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1153		u_int32_t		chunk, val;
1154
1155		chunk = sc->sk_ramsize / 2;
1156		val = sc->sk_rboff / sizeof(u_int64_t);
1157		sc_if->sk_rx_ramstart = val;
1158		val += (chunk / sizeof(u_int64_t));
1159		sc_if->sk_rx_ramend = val - 1;
1160		sc_if->sk_tx_ramstart = val;
1161		val += (chunk / sizeof(u_int64_t));
1162		sc_if->sk_tx_ramend = val - 1;
1163	} else {
1164		u_int32_t		chunk, val;
1165
1166		chunk = sc->sk_ramsize / 4;
1167		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1168		    sizeof(u_int64_t);
1169		sc_if->sk_rx_ramstart = val;
1170		val += (chunk / sizeof(u_int64_t));
1171		sc_if->sk_rx_ramend = val - 1;
1172		sc_if->sk_tx_ramstart = val;
1173		val += (chunk / sizeof(u_int64_t));
1174		sc_if->sk_tx_ramend = val - 1;
1175	}
1176
1177	DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
1178		     "           tx_ramstart=%#x tx_ramend=%#x\n",
1179		     sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
1180		     sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
1181
1182	/* Read and save PHY type */
1183	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1184
1185	/* Set PHY address */
1186	if (SK_IS_GENESIS(sc)) {
1187		switch (sc_if->sk_phytype) {
1188			case SK_PHYTYPE_XMAC:
1189				sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1190				break;
1191			case SK_PHYTYPE_BCOM:
1192				sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1193				break;
1194			default:
1195				printf("%s: unsupported PHY type: %d\n",
1196				    sc->sk_dev.dv_xname, sc_if->sk_phytype);
1197				return;
1198		}
1199	}
1200
1201	if (SK_IS_YUKON(sc)) {
1202		if ((sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1203		    sc->sk_pmd != 'L' && sc->sk_pmd != 'S')) {
1204			/* not initialized, punt */
1205			sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1206
1207			sc->sk_coppertype = 1;
1208		}
1209
1210		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1211
1212		if (!(sc->sk_coppertype))
1213			sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1214	}
1215
1216	/* Allocate the descriptor queues. */
1217	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct sk_ring_data),
1218	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1219		printf(": can't alloc rx buffers\n");
1220		goto fail;
1221	}
1222	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1223	    sizeof(struct sk_ring_data), &kva, BUS_DMA_NOWAIT)) {
1224		printf(": can't map dma buffers (%d bytes)\n",
1225		       sizeof(struct sk_ring_data));
1226		goto fail_1;
1227	}
1228	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct sk_ring_data), 1,
1229	    sizeof(struct sk_ring_data), 0, BUS_DMA_NOWAIT,
1230            &sc_if->sk_ring_map)) {
1231		printf(": can't create dma map\n");
1232		goto fail_2;
1233	}
1234	if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,
1235	    sizeof(struct sk_ring_data), NULL, BUS_DMA_NOWAIT)) {
1236		printf(": can't load dma map\n");
1237		goto fail_3;
1238	}
1239        sc_if->sk_rdata = (struct sk_ring_data *)kva;
1240	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1241
1242	/* Try to allocate memory for jumbo buffers. */
1243	if (sk_alloc_jumbo_mem(sc_if)) {
1244		printf(": jumbo buffer allocation failed\n");
1245		goto fail_3;
1246	}
1247
1248	ifp = &sc_if->arpcom.ac_if;
1249	ifp->if_softc = sc_if;
1250	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1251	ifp->if_ioctl = sk_ioctl;
1252	ifp->if_start = sk_start;
1253	ifp->if_watchdog = sk_watchdog;
1254	ifp->if_baudrate = 1000000000;
1255	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1256	IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1257	IFQ_SET_READY(&ifp->if_snd);
1258	bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1259
1260	/*
1261	 * Do miibus setup.
1262	 */
1263	switch (sc->sk_type) {
1264	case SK_GENESIS:
1265		sk_init_xmac(sc_if);
1266		break;
1267	case SK_YUKON:
1268	case SK_YUKON_LITE:
1269	case SK_YUKON_LP:
1270	case SK_YUKON_XL:
1271	case SK_YUKON_EC_U:
1272	case SK_YUKON_EC:
1273	case SK_YUKON_FE:
1274		sk_init_yukon(sc_if);
1275		break;
1276	default:
1277		printf(": unknown device type %d\n", sc->sk_type);
1278		/* dealloc jumbo on error */
1279		goto fail_3;
1280	}
1281
1282 	DPRINTFN(2, ("sk_attach: 1\n"));
1283
1284	sc_if->sk_mii.mii_ifp = ifp;
1285	if (sc->sk_type == SK_GENESIS) {
1286		sc_if->sk_mii.mii_readreg = sk_xmac_miibus_readreg;
1287		sc_if->sk_mii.mii_writereg = sk_xmac_miibus_writereg;
1288		sc_if->sk_mii.mii_statchg = sk_xmac_miibus_statchg;
1289	} else {
1290		/* Yukon/Yukon-2 */
1291		sc_if->sk_mii.mii_readreg = sk_marv_miibus_readreg;
1292		sc_if->sk_mii.mii_writereg = sk_marv_miibus_writereg;
1293		sc_if->sk_mii.mii_statchg = sk_marv_miibus_statchg;
1294	}
1295
1296	ifmedia_init(&sc_if->sk_mii.mii_media, 0,
1297	    sk_ifmedia_upd, sk_ifmedia_sts);
1298	mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1299	    MII_OFFSET_ANY, 0);
1300	if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
1301		printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
1302		ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
1303			    0, NULL);
1304		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1305	}
1306	else
1307		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
1308
1309	timeout_set(&sc_if->sk_tick_ch, sk_tick, sc_if);
1310	timeout_add(&sc_if->sk_tick_ch, hz);
1311
1312	/*
1313	 * Call MI attach routines.
1314	 */
1315	if_attach(ifp);
1316	ether_ifattach(ifp);
1317
1318	shutdownhook_establish(skc_shutdown, sc);
1319
1320	DPRINTFN(2, ("sk_attach: end\n"));
1321	return;
1322
1323fail_3:
1324	bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1325fail_2:
1326	bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct sk_ring_data));
1327fail_1:
1328	bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1329fail:
1330	sc->sk_if[sa->skc_port] = NULL;
1331}
1332
1333int
1334skcprint(void *aux, const char *pnp)
1335{
1336	struct skc_attach_args *sa = aux;
1337
1338	if (pnp)
1339		printf("sk port %c at %s",
1340		    (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
1341	else
1342		printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
1343	return (UNCONF);
1344}
1345
1346/*
1347 * Attach the interface. Allocate softc structures, do ifmedia
1348 * setup and ethernet/BPF attach.
1349 */
1350void
1351skc_attach(struct device *parent, struct device *self, void *aux)
1352{
1353	struct sk_softc *sc = (struct sk_softc *)self;
1354	struct pci_attach_args *pa = aux;
1355	struct skc_attach_args skca;
1356	pci_chipset_tag_t pc = pa->pa_pc;
1357	pcireg_t command, memtype;
1358	pci_intr_handle_t ih;
1359	const char *intrstr = NULL;
1360	bus_size_t size;
1361	u_int8_t skrs;
1362	char *revstr = NULL;
1363
1364	DPRINTFN(2, ("begin skc_attach\n"));
1365
1366	/*
1367	 * Handle power management nonsense.
1368	 */
1369	command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
1370
1371	if (command == 0x01) {
1372		command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
1373		if (command & SK_PSTATE_MASK) {
1374			u_int32_t		iobase, membase, irq;
1375
1376			/* Save important PCI config data. */
1377			iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
1378			membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
1379			irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
1380
1381			/* Reset the power state. */
1382			printf("%s chip is in D%d power mode "
1383			    "-- setting to D0\n", sc->sk_dev.dv_xname,
1384			    command & SK_PSTATE_MASK);
1385			command &= 0xFFFFFFFC;
1386			pci_conf_write(pc, pa->pa_tag,
1387			    SK_PCI_PWRMGMTCTRL, command);
1388
1389			/* Restore PCI config data. */
1390			pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase);
1391			pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
1392			pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
1393		}
1394	}
1395
1396	/*
1397	 * Map control/status registers.
1398	 */
1399
1400	memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM);
1401	switch (memtype) {
1402	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1403	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1404		if (pci_mapreg_map(pa, SK_PCI_LOMEM,
1405				   memtype, 0, &sc->sk_btag, &sc->sk_bhandle,
1406				   NULL, &size, 0) == 0)
1407			break;
1408	default:
1409		printf(": can't map mem space\n");
1410		return;
1411	}
1412
1413	sc->sc_dmatag = pa->pa_dmat;
1414
1415	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1416	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1417
1418	/* bail out here if chip is not recognized */
1419	if (sc->sk_type != SK_GENESIS && ! SK_YUKON_FAMILY(sc->sk_type)) {
1420		printf(": unknown chip type: %d\n", sc->sk_type);
1421		goto fail_1;
1422	}
1423	DPRINTFN(2, ("skc_attach: allocate interrupt\n"));
1424
1425	/* Allocate interrupt */
1426	if (pci_intr_map(pa, &ih)) {
1427		printf(": couldn't map interrupt\n");
1428		goto fail_1;
1429	}
1430
1431	intrstr = pci_intr_string(pc, ih);
1432	sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, sk_intr, sc,
1433	    self->dv_xname);
1434	if (sc->sk_intrhand == NULL) {
1435		printf(": couldn't establish interrupt");
1436		if (intrstr != NULL)
1437			printf(" at %s", intrstr);
1438		printf("\n");
1439		goto fail_1;
1440	}
1441
1442	/* Reset the adapter. */
1443	sk_reset(sc);
1444
1445	skrs = sk_win_read_1(sc, SK_EPROM0);
1446	if (sc->sk_type == SK_GENESIS) {
1447		/* Read and save RAM size and RAMbuffer offset */
1448		switch(skrs) {
1449		case SK_RAMSIZE_512K_64:
1450			sc->sk_ramsize = 0x80000;
1451			sc->sk_rboff = SK_RBOFF_0;
1452			break;
1453		case SK_RAMSIZE_1024K_64:
1454			sc->sk_ramsize = 0x100000;
1455			sc->sk_rboff = SK_RBOFF_80000;
1456			break;
1457		case SK_RAMSIZE_1024K_128:
1458			sc->sk_ramsize = 0x100000;
1459			sc->sk_rboff = SK_RBOFF_0;
1460			break;
1461		case SK_RAMSIZE_2048K_128:
1462			sc->sk_ramsize = 0x200000;
1463			sc->sk_rboff = SK_RBOFF_0;
1464			break;
1465		default:
1466			printf(": unknown ram size: %d\n", skrs);
1467			goto fail_2;
1468			break;
1469		}
1470	} else {
1471		if (skrs == 0x00)
1472			sc->sk_ramsize = 0x20000;
1473		else
1474			sc->sk_ramsize = skrs * (1<<12);
1475		sc->sk_rboff = SK_RBOFF_0;
1476	}
1477
1478	DPRINTFN(2, ("skc_attach: ramsize=%d (%dk), rboff=%d\n",
1479		     sc->sk_ramsize, sc->sk_ramsize / 1024,
1480		     sc->sk_rboff));
1481
1482	/* Read and save physical media type */
1483	sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1484
1485	if (sc->sk_pmd == 'T' || sc->sk_pmd == '1' ||
1486	    (SK_IS_YUKON2(sc) && !(sc->sk_pmd == 'L' ||
1487	    sc->sk_pmd == 'S')))
1488		sc->sk_coppertype = 1;
1489	else
1490		sc->sk_coppertype = 0;
1491
1492	switch (sc->sk_type) {
1493	case SK_GENESIS:
1494		sc->sk_name = "SysKonnect GEnesis";
1495		break;
1496	case SK_YUKON:
1497		sc->sk_name = "Marvell Yukon";
1498		break;
1499	case SK_YUKON_LITE:
1500		sc->sk_name = "Marvell Yukon Lite";
1501		break;
1502	case SK_YUKON_LP:
1503		sc->sk_name = "Marvell Yukon LP";
1504		break;
1505	case SK_YUKON_XL:
1506		sc->sk_name = "Marvell Yukon-2 XL";
1507		break;
1508	case SK_YUKON_EC_U:
1509		sc->sk_name = "Marvell Yukon-2 EC Ultra";
1510		break;
1511	case SK_YUKON_EC:
1512		sc->sk_name = "Marvell Yukon-2 EC";
1513		break;
1514	case SK_YUKON_FE:
1515		sc->sk_name = "Marvell Yukon-2 FE";
1516		break;
1517	default:
1518		sc->sk_name = "Marvell Yukon (Unknown)";
1519	}
1520
1521	/* Yukon Lite Rev A0 needs special test, from sk98lin driver */
1522	if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1523		u_int32_t flashaddr;
1524		u_int8_t testbyte;
1525
1526		flashaddr = sk_win_read_4(sc, SK_EP_ADDR);
1527
1528		/* test Flash-Address Register */
1529		sk_win_write_1(sc, SK_EP_ADDR+3, 0xff);
1530		testbyte = sk_win_read_1(sc, SK_EP_ADDR+3);
1531
1532		if (testbyte != 0) {
1533			/* This is a Yukon Lite Rev A0 */
1534			sc->sk_type = SK_YUKON_LITE;
1535			sc->sk_rev = SK_YUKON_LITE_REV_A0;
1536			/* restore Flash-Address Register */
1537			sk_win_write_4(sc, SK_EP_ADDR, flashaddr);
1538		}
1539	}
1540
1541	if (sc->sk_type == SK_YUKON_LITE) {
1542		switch (sc->sk_rev) {
1543		case SK_YUKON_LITE_REV_A0:
1544			revstr = "A0";
1545			break;
1546		case SK_YUKON_LITE_REV_A1:
1547			revstr = "A1";
1548			break;
1549		case SK_YUKON_LITE_REV_A3:
1550			revstr = "A3";
1551			break;
1552		default:
1553			;
1554		}
1555	}
1556
1557	/* Announce the product name. */
1558	printf(", %s", sc->sk_name);
1559	if (revstr != NULL)
1560		printf(" rev. %s", revstr);
1561	printf(" (0x%x): %s\n", sc->sk_rev, intrstr);
1562
1563	sc->sk_macs = 1;
1564
1565	if (SK_IS_YUKON2(sc)) {
1566		u_int8_t hw;
1567
1568		hw = sk_win_read_1(sc, SK_Y2_HWRES);
1569		if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) {
1570			if ((sk_win_read_1(sc, SK_Y2_CLKGATE) &
1571			    SK_Y2_CLKGATE_LINK2_INACTIVE) == 0)
1572				sc->sk_macs++;
1573		}
1574	} else {
1575		if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC))
1576			sc->sk_macs++;
1577	}
1578
1579	skca.skc_port = SK_PORT_A;
1580	skca.skc_type = sc->sk_type;
1581	skca.skc_rev = sc->sk_rev;
1582	(void)config_found(&sc->sk_dev, &skca, skcprint);
1583
1584	if (sc->sk_macs > 1) {
1585		skca.skc_port = SK_PORT_B;
1586		skca.skc_type = sc->sk_type;
1587		skca.skc_rev = sc->sk_rev;
1588		(void)config_found(&sc->sk_dev, &skca, skcprint);
1589	}
1590
1591	/* Turn on the 'driver is loaded' LED. */
1592	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1593
1594	return;
1595
1596fail_2:
1597	pci_intr_disestablish(pc, sc->sk_intrhand);
1598fail_1:
1599	bus_space_unmap(sc->sk_btag, sc->sk_bhandle, size);
1600}
1601
1602int
1603sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx)
1604{
1605	struct sk_softc		*sc = sc_if->sk_softc;
1606	struct sk_tx_desc	*f = NULL;
1607	u_int32_t		frag, cur, cnt = 0;
1608	int			i;
1609	struct sk_txmap_entry	*entry;
1610	bus_dmamap_t		txmap;
1611
1612	DPRINTFN(2, ("sk_encap\n"));
1613
1614	entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head);
1615	if (entry == NULL) {
1616		DPRINTFN(2, ("sk_encap: no txmap available\n"));
1617		return ENOBUFS;
1618	}
1619	txmap = entry->dmamap;
1620
1621	cur = frag = *txidx;
1622
1623#ifdef SK_DEBUG
1624	if (skdebug >= 2)
1625		sk_dump_mbuf(m_head);
1626#endif
1627
1628	/*
1629	 * Start packing the mbufs in this chain into
1630	 * the fragment pointers. Stop when we run out
1631	 * of fragments or hit the end of the mbuf chain.
1632	 */
1633	if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
1634	    BUS_DMA_NOWAIT)) {
1635		DPRINTFN(2, ("sk_encap: dmamap failed\n"));
1636		return(ENOBUFS);
1637	}
1638
1639	DPRINTFN(2, ("sk_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
1640
1641	/* Sync the DMA map. */
1642	bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
1643	    BUS_DMASYNC_PREWRITE);
1644
1645	for (i = 0; i < txmap->dm_nsegs; i++) {
1646		if ((SK_TX_RING_CNT - (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) {
1647			DPRINTFN(2, ("sk_encap: too few descriptors free\n"));
1648			return(ENOBUFS);
1649		}
1650		f = &sc_if->sk_rdata->sk_tx_ring[frag];
1651		f->sk_data_lo = txmap->dm_segs[i].ds_addr;
1652		f->sk_ctl = txmap->dm_segs[i].ds_len | SK_OPCODE_DEFAULT;
1653		if (cnt == 0)
1654			f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1655		else
1656			f->sk_ctl |= SK_TXCTL_OWN;
1657
1658		cur = frag;
1659		SK_INC(frag, SK_TX_RING_CNT);
1660		cnt++;
1661	}
1662
1663	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1664	SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
1665
1666	sc_if->sk_cdata.sk_tx_map[cur] = entry;
1667	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1668		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1669
1670	/* Sync descriptors before handing to chip */
1671	SK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
1672	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1673
1674	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1675
1676	/* Sync first descriptor to hand it off */
1677	SK_CDTXSYNC(sc_if, *txidx, 1, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1678
1679	sc_if->sk_cdata.sk_tx_cnt += cnt;
1680
1681#ifdef SK_DEBUG
1682	if (skdebug >= 2) {
1683		struct sk_tx_desc *desc;
1684		u_int32_t idx;
1685		for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) {
1686			desc = &sc_if->sk_rdata->sk_tx_ring[idx];
1687			sk_dump_txdesc(desc, idx);
1688		}
1689	}
1690#endif
1691
1692	*txidx = frag;
1693
1694	DPRINTFN(2, ("sk_encap: completed successfully\n"));
1695
1696	return(0);
1697}
1698
1699void
1700sk_start(struct ifnet *ifp)
1701{
1702        struct sk_if_softc	*sc_if = ifp->if_softc;
1703        struct sk_softc		*sc = sc_if->sk_softc;
1704        struct mbuf		*m_head = NULL;
1705        u_int32_t		idx = sc_if->sk_cdata.sk_tx_prod;
1706	int			pkts = 0;
1707
1708	DPRINTFN(2, ("sk_start\n"));
1709
1710	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1711		IFQ_POLL(&ifp->if_snd, m_head);
1712		if (m_head == NULL)
1713			break;
1714
1715		/*
1716		 * Pack the data into the transmit ring. If we
1717		 * don't have room, set the OACTIVE flag and wait
1718		 * for the NIC to drain the ring.
1719		 */
1720		if (sk_encap(sc_if, m_head, &idx)) {
1721			ifp->if_flags |= IFF_OACTIVE;
1722			break;
1723		}
1724
1725		/* now we are committed to transmit the packet */
1726		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1727		pkts++;
1728
1729		/*
1730		 * If there's a BPF listener, bounce a copy of this frame
1731		 * to him.
1732		 */
1733#if NBPFILTER > 0
1734		if (ifp->if_bpf)
1735			bpf_mtap(ifp->if_bpf, m_head);
1736#endif
1737	}
1738	if (pkts == 0)
1739		return;
1740
1741	/* Transmit */
1742	if (idx != sc_if->sk_cdata.sk_tx_prod) {
1743		sc_if->sk_cdata.sk_tx_prod = idx;
1744		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1745
1746		/* Set a timeout in case the chip goes out to lunch. */
1747		ifp->if_timer = 5;
1748	}
1749}
1750
1751
1752void
1753sk_watchdog(struct ifnet *ifp)
1754{
1755	struct sk_if_softc *sc_if = ifp->if_softc;
1756
1757	printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
1758	ifp->if_flags &= ~IFF_RUNNING;
1759	sk_init(sc_if);
1760}
1761
1762void
1763skc_shutdown(void *v)
1764{
1765	struct sk_softc		*sc = v;
1766
1767	DPRINTFN(2, ("sk_shutdown\n"));
1768
1769	/* Turn off the 'driver is loaded' LED. */
1770	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1771
1772	/*
1773	 * Reset the GEnesis controller. Doing this should also
1774	 * assert the resets on the attached XMAC(s).
1775	 */
1776	sk_reset(sc);
1777}
1778
1779void
1780sk_rxeof(struct sk_if_softc *sc_if)
1781{
1782	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
1783	struct mbuf		*m;
1784	struct sk_chain		*cur_rx;
1785	struct sk_rx_desc	*cur_desc;
1786	int			i, cur, total_len = 0;
1787	u_int32_t		rxstat;
1788	bus_dmamap_t		dmamap;
1789	u_int16_t		csum1, csum2;
1790
1791	DPRINTFN(2, ("sk_rxeof\n"));
1792
1793	i = sc_if->sk_cdata.sk_rx_prod;
1794
1795	for (;;) {
1796		cur = i;
1797
1798		/* Sync the descriptor */
1799		SK_CDRXSYNC(sc_if, cur,
1800		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1801
1802		if (sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN) {
1803			/* Invalidate the descriptor -- it's not ready yet */
1804			SK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_PREREAD);
1805			sc_if->sk_cdata.sk_rx_prod = i;
1806			break;
1807		}
1808
1809		cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur];
1810		cur_desc = &sc_if->sk_rdata->sk_rx_ring[cur];
1811		dmamap = sc_if->sk_cdata.sk_rx_jumbo_map;
1812
1813		bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
1814		    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1815
1816		rxstat = cur_desc->sk_xmac_rxstat;
1817		m = cur_rx->sk_mbuf;
1818		cur_rx->sk_mbuf = NULL;
1819		total_len = SK_RXBYTES(cur_desc->sk_ctl);
1820
1821		csum1 = sc_if->sk_rdata->sk_rx_ring[i].sk_csum1;
1822		csum2 = sc_if->sk_rdata->sk_rx_ring[i].sk_csum2;
1823
1824		SK_INC(i, SK_RX_RING_CNT);
1825
1826		if (rxstat & XM_RXSTAT_ERRFRAME) {
1827			ifp->if_ierrors++;
1828			sk_newbuf(sc_if, cur, m, dmamap);
1829			continue;
1830		}
1831
1832		/*
1833		 * Try to allocate a new jumbo buffer. If that
1834		 * fails, copy the packet to mbufs and put the
1835		 * jumbo buffer back in the ring so it can be
1836		 * re-used. If allocating mbufs fails, then we
1837		 * have to drop the packet.
1838		 */
1839		if (sk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) {
1840			struct mbuf		*m0;
1841			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1842			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1843			sk_newbuf(sc_if, cur, m, dmamap);
1844			if (m0 == NULL) {
1845				ifp->if_ierrors++;
1846				continue;
1847			}
1848			m_adj(m0, ETHER_ALIGN);
1849			m = m0;
1850		} else {
1851			m->m_pkthdr.rcvif = ifp;
1852			m->m_pkthdr.len = m->m_len = total_len;
1853		}
1854
1855		ifp->if_ipackets++;
1856
1857		sk_rxcsum(ifp, m, csum1, csum2);
1858
1859#if NBPFILTER > 0
1860		if (ifp->if_bpf)
1861			bpf_mtap(ifp->if_bpf, m);
1862#endif
1863
1864		/* pass it on. */
1865		ether_input_mbuf(ifp, m);
1866	}
1867}
1868
1869void
1870sk_rxcsum(struct ifnet *ifp, struct mbuf *m, const u_int16_t csum1, const u_int16_t csum2)
1871{
1872	struct ether_header *eh;
1873	struct ip *ip;
1874	u_int8_t *pp;
1875	int hlen, len, plen;
1876	u_int16_t iph_csum, ipo_csum, ipd_csum, csum;
1877
1878	pp = mtod(m, u_int8_t *);
1879	plen = m->m_pkthdr.len;
1880	if (plen < sizeof(*eh))
1881		return;
1882	eh = (struct ether_header *)pp;
1883	iph_csum = in_cksum_addword(csum1, (~csum2 & 0xffff));
1884
1885	if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1886		u_int16_t *xp = (u_int16_t *)pp;
1887
1888		xp = (u_int16_t *)pp;
1889		if (xp[1] != htons(ETHERTYPE_IP))
1890			return;
1891		iph_csum = in_cksum_addword(iph_csum, (~xp[0] & 0xffff));
1892		iph_csum = in_cksum_addword(iph_csum, (~xp[1] & 0xffff));
1893		xp = (u_int16_t *)(pp + sizeof(struct ip));
1894		iph_csum = in_cksum_addword(iph_csum, xp[0]);
1895		iph_csum = in_cksum_addword(iph_csum, xp[1]);
1896		pp += EVL_ENCAPLEN;
1897	} else if (eh->ether_type != htons(ETHERTYPE_IP))
1898		return;
1899
1900	pp += sizeof(*eh);
1901	plen -= sizeof(*eh);
1902
1903	ip = (struct ip *)pp;
1904
1905	if (ip->ip_v != IPVERSION)
1906		return;
1907
1908	hlen = ip->ip_hl << 2;
1909	if (hlen < sizeof(struct ip))
1910		return;
1911	if (hlen > ntohs(ip->ip_len))
1912		return;
1913
1914	/* Don't deal with truncated or padded packets. */
1915	if (plen != ntohs(ip->ip_len))
1916		return;
1917
1918	len = hlen - sizeof(struct ip);
1919	if (len > 0) {
1920		u_int16_t *p;
1921
1922		p = (u_int16_t *)(ip + 1);
1923		ipo_csum = 0;
1924		for (ipo_csum = 0; len > 0; len -= sizeof(*p), p++)
1925			ipo_csum = in_cksum_addword(ipo_csum, *p);
1926		iph_csum = in_cksum_addword(iph_csum, ipo_csum);
1927		ipd_csum = in_cksum_addword(csum2, (~ipo_csum & 0xffff));
1928	} else
1929		ipd_csum = csum2;
1930
1931	if (iph_csum != 0xffff)
1932		return;
1933	m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1934
1935	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1936		return;                 /* ip frag, we're done for now */
1937
1938	pp += hlen;
1939
1940	/* Only know checksum protocol for udp/tcp */
1941	if (ip->ip_p == IPPROTO_UDP) {
1942		struct udphdr *uh = (struct udphdr *)pp;
1943
1944		if (uh->uh_sum == 0)    /* udp with no checksum */
1945			return;
1946	} else if (ip->ip_p != IPPROTO_TCP)
1947		return;
1948
1949	csum = in_cksum_phdr(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1950	    htonl(ntohs(ip->ip_len) - hlen + ip->ip_p) + ipd_csum);
1951	if (csum == 0xffff) {
1952		m->m_pkthdr.csum_flags |= (ip->ip_p == IPPROTO_TCP) ?
1953		    M_TCP_CSUM_IN_OK : M_UDP_CSUM_IN_OK;
1954	}
1955}
1956
1957void
1958sk_txeof(struct sk_if_softc *sc_if)
1959{
1960	struct sk_softc		*sc = sc_if->sk_softc;
1961	struct sk_tx_desc	*cur_tx;
1962	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
1963	u_int32_t		idx;
1964	struct sk_txmap_entry	*entry;
1965
1966	DPRINTFN(2, ("sk_txeof\n"));
1967
1968	/*
1969	 * Go through our tx ring and free mbufs for those
1970	 * frames that have been sent.
1971	 */
1972	idx = sc_if->sk_cdata.sk_tx_cons;
1973	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1974		SK_CDTXSYNC(sc_if, idx, 1,
1975		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1976
1977		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1978#ifdef SK_DEBUG
1979		if (skdebug >= 2)
1980			sk_dump_txdesc(cur_tx, idx);
1981#endif
1982		if (cur_tx->sk_ctl & SK_TXCTL_OWN) {
1983			SK_CDTXSYNC(sc_if, idx, 1, BUS_DMASYNC_PREREAD);
1984			break;
1985		}
1986		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1987			ifp->if_opackets++;
1988		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1989			entry = sc_if->sk_cdata.sk_tx_map[idx];
1990
1991			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1992			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1993
1994			bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1995			    entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1996
1997			bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1998			SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry,
1999					  link);
2000			sc_if->sk_cdata.sk_tx_map[idx] = NULL;
2001		}
2002		sc_if->sk_cdata.sk_tx_cnt--;
2003		SK_INC(idx, SK_TX_RING_CNT);
2004	}
2005	if (sc_if->sk_cdata.sk_tx_cnt == 0)
2006		ifp->if_timer = 0;
2007	else /* nudge chip to keep tx ring moving */
2008		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2009
2010	if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
2011		ifp->if_flags &= ~IFF_OACTIVE;
2012
2013	sc_if->sk_cdata.sk_tx_cons = idx;
2014}
2015
2016void
2017sk_tick(void *xsc_if)
2018{
2019	struct sk_if_softc *sc_if = xsc_if;
2020	struct mii_data *mii = &sc_if->sk_mii;
2021	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2022	int i;
2023
2024	DPRINTFN(2, ("sk_tick\n"));
2025
2026	if (!(ifp->if_flags & IFF_UP))
2027		return;
2028
2029	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2030		sk_intr_bcom(sc_if);
2031		return;
2032	}
2033
2034	/*
2035	 * According to SysKonnect, the correct way to verify that
2036	 * the link has come back up is to poll bit 0 of the GPIO
2037	 * register three times. This pin has the signal from the
2038	 * link sync pin connected to it; if we read the same link
2039	 * state 3 times in a row, we know the link is up.
2040	 */
2041	for (i = 0; i < 3; i++) {
2042		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2043			break;
2044	}
2045
2046	if (i != 3) {
2047		timeout_add(&sc_if->sk_tick_ch, hz);
2048		return;
2049	}
2050
2051	/* Turn the GP0 interrupt back on. */
2052	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2053	SK_XM_READ_2(sc_if, XM_ISR);
2054	mii_tick(mii);
2055	timeout_del(&sc_if->sk_tick_ch);
2056}
2057
2058void
2059sk_intr_bcom(struct sk_if_softc *sc_if)
2060{
2061	struct mii_data *mii = &sc_if->sk_mii;
2062	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2063	int status;
2064
2065	DPRINTFN(2, ("sk_intr_bcom\n"));
2066
2067	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2068
2069	/*
2070	 * Read the PHY interrupt register to make sure
2071	 * we clear any pending interrupts.
2072	 */
2073	status = sk_xmac_miibus_readreg((struct device *)sc_if,
2074	    SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2075
2076	if (!(ifp->if_flags & IFF_RUNNING)) {
2077		sk_init_xmac(sc_if);
2078		return;
2079	}
2080
2081	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2082		int lstat;
2083		lstat = sk_xmac_miibus_readreg((struct device *)sc_if,
2084		    SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
2085
2086		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2087			mii_mediachg(mii);
2088			/* Turn off the link LED. */
2089			SK_IF_WRITE_1(sc_if, 0,
2090			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
2091			sc_if->sk_link = 0;
2092		} else if (status & BRGPHY_ISR_LNK_CHG) {
2093			sk_xmac_miibus_writereg((struct device *)sc_if,
2094			    SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00);
2095			mii_tick(mii);
2096			sc_if->sk_link = 1;
2097			/* Turn on the link LED. */
2098			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2099			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2100			    SK_LINKLED_BLINK_OFF);
2101		} else {
2102			mii_tick(mii);
2103			timeout_add(&sc_if->sk_tick_ch, hz);
2104		}
2105	}
2106
2107	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2108}
2109
2110void
2111sk_intr_xmac(struct sk_if_softc	*sc_if)
2112{
2113	u_int16_t status = SK_XM_READ_2(sc_if, XM_ISR);
2114
2115	DPRINTFN(2, ("sk_intr_xmac\n"));
2116
2117	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2118		if (status & XM_ISR_GP0_SET) {
2119			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2120			timeout_add(&sc_if->sk_tick_ch, hz);
2121		}
2122
2123		if (status & XM_ISR_AUTONEG_DONE) {
2124			timeout_add(&sc_if->sk_tick_ch, hz);
2125		}
2126	}
2127
2128	if (status & XM_IMR_TX_UNDERRUN)
2129		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2130
2131	if (status & XM_IMR_RX_OVERRUN)
2132		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2133}
2134
2135void
2136sk_intr_yukon(sc_if)
2137	struct sk_if_softc *sc_if;
2138{
2139	int status;
2140
2141	status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2142
2143	DPRINTFN(2, ("sk_intr_yukon status=%#x\n", status));
2144}
2145
2146int
2147sk_intr(void *xsc)
2148{
2149	struct sk_softc		*sc = xsc;
2150	struct sk_if_softc	*sc_if0 = sc->sk_if[SK_PORT_A];
2151	struct sk_if_softc	*sc_if1 = sc->sk_if[SK_PORT_B];
2152	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2153	u_int32_t		status;
2154	int			claimed = 0;
2155
2156	if (sc_if0 != NULL)
2157		ifp0 = &sc_if0->arpcom.ac_if;
2158	if (sc_if1 != NULL)
2159		ifp1 = &sc_if1->arpcom.ac_if;
2160
2161	for (;;) {
2162		status = CSR_READ_4(sc, SK_ISSR);
2163		DPRINTFN(2, ("sk_intr: status=%#x\n", status));
2164
2165		if (!(status & sc->sk_intrmask))
2166			break;
2167
2168		claimed = 1;
2169
2170		/* Handle receive interrupts first. */
2171		if (status & SK_ISR_RX1_EOF) {
2172			sk_rxeof(sc_if0);
2173			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2174			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2175		}
2176		if (status & SK_ISR_RX2_EOF) {
2177			sk_rxeof(sc_if1);
2178			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2179			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2180		}
2181
2182		/* Then transmit interrupts. */
2183		if (status & SK_ISR_TX1_S_EOF) {
2184			sk_txeof(sc_if0);
2185			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2186			    SK_TXBMU_CLR_IRQ_EOF);
2187		}
2188		if (status & SK_ISR_TX2_S_EOF) {
2189			sk_txeof(sc_if1);
2190			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2191			    SK_TXBMU_CLR_IRQ_EOF);
2192		}
2193
2194		/* Then MAC interrupts. */
2195		if (status & SK_ISR_MAC1 && (ifp0->if_flags & IFF_RUNNING)) {
2196			if (sc->sk_type == SK_GENESIS)
2197				sk_intr_xmac(sc_if0);
2198			else
2199				sk_intr_yukon(sc_if0);
2200		}
2201
2202		if (status & SK_ISR_MAC2 && (ifp1->if_flags & IFF_RUNNING)) {
2203			if (sc->sk_type == SK_GENESIS)
2204				sk_intr_xmac(sc_if1);
2205			else
2206				sk_intr_yukon(sc_if1);
2207
2208		}
2209
2210		if (status & SK_ISR_EXTERNAL_REG) {
2211			if (ifp0 != NULL &&
2212			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2213				sk_intr_bcom(sc_if0);
2214
2215			if (ifp1 != NULL &&
2216			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2217				sk_intr_bcom(sc_if1);
2218		}
2219	}
2220
2221	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2222
2223	if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd))
2224		sk_start(ifp0);
2225	if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd))
2226		sk_start(ifp1);
2227
2228	return (claimed);
2229}
2230
2231void
2232sk_init_xmac(struct sk_if_softc	*sc_if)
2233{
2234	struct sk_softc		*sc = sc_if->sk_softc;
2235	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2236	struct sk_bcom_hack     bhack[] = {
2237	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2238	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2239	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2240	{ 0, 0 } };
2241
2242	DPRINTFN(2, ("sk_init_xmac\n"));
2243
2244	/* Unreset the XMAC. */
2245	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2246	DELAY(1000);
2247
2248	/* Reset the XMAC's internal state. */
2249	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2250
2251	/* Save the XMAC II revision */
2252	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2253
2254	/*
2255	 * Perform additional initialization for external PHYs,
2256	 * namely for the 1000baseTX cards that use the XMAC's
2257	 * GMII mode.
2258	 */
2259	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2260		int			i = 0;
2261		u_int32_t		val;
2262
2263		/* Take PHY out of reset. */
2264		val = sk_win_read_4(sc, SK_GPIO);
2265		if (sc_if->sk_port == SK_PORT_A)
2266			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2267		else
2268			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2269		sk_win_write_4(sc, SK_GPIO, val);
2270
2271		/* Enable GMII mode on the XMAC. */
2272		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2273
2274		sk_xmac_miibus_writereg((struct device *)sc_if,
2275		    SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2276		DELAY(10000);
2277		sk_xmac_miibus_writereg((struct device *)sc_if,
2278		    SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0);
2279
2280		/*
2281		 * Early versions of the BCM5400 apparently have
2282		 * a bug that requires them to have their reserved
2283		 * registers initialized to some magic values. I don't
2284		 * know what the numbers do, I'm just the messenger.
2285		 */
2286		if (sk_xmac_miibus_readreg((struct device *)sc_if,
2287		    SK_PHYADDR_BCOM, 0x03) == 0x6041) {
2288			while(bhack[i].reg) {
2289				sk_xmac_miibus_writereg((struct device *)sc_if,
2290				    SK_PHYADDR_BCOM, bhack[i].reg,
2291				    bhack[i].val);
2292				i++;
2293			}
2294		}
2295	}
2296
2297	/* Set station address */
2298	SK_XM_WRITE_2(sc_if, XM_PAR0,
2299	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2300	SK_XM_WRITE_2(sc_if, XM_PAR1,
2301	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2302	SK_XM_WRITE_2(sc_if, XM_PAR2,
2303	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2304	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2305
2306	if (ifp->if_flags & IFF_PROMISC) {
2307		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
2308	} else {
2309		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
2310	}
2311
2312	if (ifp->if_flags & IFF_BROADCAST) {
2313		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2314	} else {
2315		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2316	}
2317
2318	/* We don't need the FCS appended to the packet. */
2319	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2320
2321	/* We want short frames padded to 60 bytes. */
2322	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2323
2324	/*
2325	 * Enable the reception of all error frames. This is is
2326	 * a necessary evil due to the design of the XMAC. The
2327	 * XMAC's receive FIFO is only 8K in size, however jumbo
2328	 * frames can be up to 9000 bytes in length. When bad
2329	 * frame filtering is enabled, the XMAC's RX FIFO operates
2330	 * in 'store and forward' mode. For this to work, the
2331	 * entire frame has to fit into the FIFO, but that means
2332	 * that jumbo frames larger than 8192 bytes will be
2333	 * truncated. Disabling all bad frame filtering causes
2334	 * the RX FIFO to operate in streaming mode, in which
2335	 * case the XMAC will start transfering frames out of the
2336	 * RX FIFO as soon as the FIFO threshold is reached.
2337	 */
2338	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2339	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2340	    XM_MODE_RX_INRANGELEN);
2341
2342	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2343
2344	/*
2345	 * Bump up the transmit threshold. This helps hold off transmit
2346	 * underruns when we're blasting traffic from both ports at once.
2347	 */
2348	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2349
2350	/* Set multicast filter */
2351	sk_setmulti(sc_if);
2352
2353	/* Clear and enable interrupts */
2354	SK_XM_READ_2(sc_if, XM_ISR);
2355	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2356		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2357	else
2358		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2359
2360	/* Configure MAC arbiter */
2361	switch(sc_if->sk_xmac_rev) {
2362	case XM_XMAC_REV_B2:
2363		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2364		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2365		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2366		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2367		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2368		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2369		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2370		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2371		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2372		break;
2373	case XM_XMAC_REV_C1:
2374		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2375		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2376		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2377		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2378		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2379		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2380		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2381		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2382		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2383		break;
2384	default:
2385		break;
2386	}
2387	sk_win_write_2(sc, SK_MACARB_CTL,
2388	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2389
2390	sc_if->sk_link = 1;
2391}
2392
2393void sk_init_yukon(sc_if)
2394	struct sk_if_softc	*sc_if;
2395{
2396	u_int32_t		phy;
2397	u_int16_t		reg;
2398	struct sk_softc		*sc;
2399	int			i;
2400
2401	sc = sc_if->sk_softc;
2402
2403	DPRINTFN(2, ("sk_init_yukon: start: sk_csr=%#x\n",
2404		     CSR_READ_4(sc_if->sk_softc, SK_CSR)));
2405
2406	if (sc->sk_type == SK_YUKON_LITE &&
2407	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2408		/* Take PHY out of reset. */
2409		sk_win_write_4(sc, SK_GPIO,
2410			(sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9);
2411	}
2412
2413	/* GMAC and GPHY Reset */
2414	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2415
2416	DPRINTFN(6, ("sk_init_yukon: 1\n"));
2417
2418	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2419	DELAY(1000);
2420	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2421	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2422	DELAY(1000);
2423
2424	DPRINTFN(6, ("sk_init_yukon: 2\n"));
2425
2426	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2427		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2428
2429	if (sc->sk_coppertype)
2430		phy |= SK_GPHY_COPPER;
2431	else
2432		phy |= SK_GPHY_FIBER;
2433
2434	DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy));
2435
2436	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2437	DELAY(1000);
2438	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2439	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2440		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2441
2442	DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n",
2443		     SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
2444
2445	DPRINTFN(6, ("sk_init_yukon: 3\n"));
2446
2447	/* unused read of the interrupt source register */
2448	DPRINTFN(6, ("sk_init_yukon: 4\n"));
2449	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2450
2451	DPRINTFN(6, ("sk_init_yukon: 4a\n"));
2452	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2453	DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2454
2455	/* MIB Counter Clear Mode set */
2456        reg |= YU_PAR_MIB_CLR;
2457	DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2458	DPRINTFN(6, ("sk_init_yukon: 4b\n"));
2459	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2460
2461	/* MIB Counter Clear Mode clear */
2462	DPRINTFN(6, ("sk_init_yukon: 5\n"));
2463        reg &= ~YU_PAR_MIB_CLR;
2464	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2465
2466	/* receive control reg */
2467	DPRINTFN(6, ("sk_init_yukon: 7\n"));
2468	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_UFLEN | YU_RCR_MUFLEN |
2469		      YU_RCR_CRCR);
2470
2471	/* transmit parameter register */
2472	DPRINTFN(6, ("sk_init_yukon: 8\n"));
2473	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2474		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2475
2476	/* serial mode register */
2477	DPRINTFN(6, ("sk_init_yukon: 9\n"));
2478	SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2479		      YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO |
2480		      YU_SMR_IPG_DATA(0x1e));
2481
2482	DPRINTFN(6, ("sk_init_yukon: 10\n"));
2483	/* Setup Yukon's address */
2484	for (i = 0; i < 3; i++) {
2485		/* Write Source Address 1 (unicast filter) */
2486		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2487			      sc_if->arpcom.ac_enaddr[i * 2] |
2488			      sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2489	}
2490
2491	for (i = 0; i < 3; i++) {
2492		reg = sk_win_read_2(sc_if->sk_softc,
2493				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2494		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2495	}
2496
2497	/* Set multicast filter */
2498	DPRINTFN(6, ("sk_init_yukon: 11\n"));
2499	sk_setmulti(sc_if);
2500
2501	/* enable interrupt mask for counter overflows */
2502	DPRINTFN(6, ("sk_init_yukon: 12\n"));
2503	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2504	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2505	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2506
2507	/* Configure RX MAC FIFO */
2508	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2509	SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2510
2511	/* Configure TX MAC FIFO */
2512	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2513	SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2514
2515	DPRINTFN(6, ("sk_init_yukon: end\n"));
2516}
2517
2518/*
2519 * Note that to properly initialize any part of the GEnesis chip,
2520 * you first have to take it out of reset mode.
2521 */
2522void
2523sk_init(void *xsc_if)
2524{
2525	struct sk_if_softc	*sc_if = xsc_if;
2526	struct sk_softc		*sc = sc_if->sk_softc;
2527	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2528	struct mii_data		*mii = &sc_if->sk_mii;
2529	int			s;
2530
2531	DPRINTFN(2, ("sk_init\n"));
2532
2533	s = splnet();
2534
2535	if (ifp->if_flags & IFF_RUNNING) {
2536		splx(s);
2537		return;
2538	}
2539
2540	/* Cancel pending I/O and free all RX/TX buffers. */
2541	sk_stop(sc_if);
2542
2543	if (sc->sk_type == SK_GENESIS) {
2544		/* Configure LINK_SYNC LED */
2545		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2546		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2547			      SK_LINKLED_LINKSYNC_ON);
2548
2549		/* Configure RX LED */
2550		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2551			      SK_RXLEDCTL_COUNTER_START);
2552
2553		/* Configure TX LED */
2554		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2555			      SK_TXLEDCTL_COUNTER_START);
2556	}
2557
2558	/* Configure I2C registers */
2559
2560	/* Configure XMAC(s) */
2561	switch (sc->sk_type) {
2562	case SK_GENESIS:
2563		sk_init_xmac(sc_if);
2564		break;
2565	case SK_YUKON:
2566	case SK_YUKON_LITE:
2567	case SK_YUKON_LP:
2568		sk_init_yukon(sc_if);
2569		break;
2570	}
2571	mii_mediachg(mii);
2572
2573	if (sc->sk_type == SK_GENESIS) {
2574		/* Configure MAC FIFOs */
2575		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2576		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2577		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2578
2579		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2580		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2581		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2582	}
2583
2584	/* Configure transmit arbiter(s) */
2585	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2586	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2587
2588	/* Configure RAMbuffers */
2589	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2590	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2591	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2592	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2593	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2594	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2595
2596	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2597	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2598	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2599	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2600	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2601	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2602	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2603
2604	/* Configure BMUs */
2605	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2606	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2607	    SK_RX_RING_ADDR(sc_if, 0));
2608	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2609
2610	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2611	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2612            SK_TX_RING_ADDR(sc_if, 0));
2613	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2614
2615	/* Init descriptors */
2616	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2617		printf("%s: initialization failed: no "
2618		    "memory for rx buffers\n", sc_if->sk_dev.dv_xname);
2619		sk_stop(sc_if);
2620		splx(s);
2621		return;
2622	}
2623
2624	if (sk_init_tx_ring(sc_if) == ENOBUFS) {
2625		printf("%s: initialization failed: no "
2626		    "memory for tx buffers\n", sc_if->sk_dev.dv_xname);
2627		sk_stop(sc_if);
2628		splx(s);
2629		return;
2630	}
2631
2632	/* Configure interrupt handling */
2633	CSR_READ_4(sc, SK_ISSR);
2634	if (sc_if->sk_port == SK_PORT_A)
2635		sc->sk_intrmask |= SK_INTRS1;
2636	else
2637		sc->sk_intrmask |= SK_INTRS2;
2638
2639	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2640
2641	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2642
2643	/* Start BMUs. */
2644	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2645
2646	if (sc->sk_type == SK_GENESIS) {
2647		/* Enable XMACs TX and RX state machines */
2648		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2649		SK_XM_SETBIT_2(sc_if, XM_MMUCMD,
2650			       XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2651	}
2652
2653	if (SK_YUKON_FAMILY(sc->sk_type)) {
2654		u_int16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2655		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2656		reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2657		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2658	}
2659
2660
2661	ifp->if_flags |= IFF_RUNNING;
2662	ifp->if_flags &= ~IFF_OACTIVE;
2663
2664	splx(s);
2665}
2666
2667void
2668sk_stop(struct sk_if_softc *sc_if)
2669{
2670	struct sk_softc		*sc = sc_if->sk_softc;
2671	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2672	struct sk_txmap_entry	*dma;
2673	int			i;
2674
2675	DPRINTFN(2, ("sk_stop\n"));
2676
2677	timeout_del(&sc_if->sk_tick_ch);
2678
2679	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2680
2681	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2682		u_int32_t		val;
2683
2684		/* Put PHY back into reset. */
2685		val = sk_win_read_4(sc, SK_GPIO);
2686		if (sc_if->sk_port == SK_PORT_A) {
2687			val |= SK_GPIO_DIR0;
2688			val &= ~SK_GPIO_DAT0;
2689		} else {
2690			val |= SK_GPIO_DIR2;
2691			val &= ~SK_GPIO_DAT2;
2692		}
2693		sk_win_write_4(sc, SK_GPIO, val);
2694	}
2695
2696	/* Turn off various components of this interface. */
2697	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2698	switch (sc->sk_type) {
2699	case SK_GENESIS:
2700		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL,
2701			      SK_TXMACCTL_XMAC_RESET);
2702		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2703		break;
2704	case SK_YUKON:
2705	case SK_YUKON_LITE:
2706	case SK_YUKON_LP:
2707		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2708		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2709		break;
2710	}
2711	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2712	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2713	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2714	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2715	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2716	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2717	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2718	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2719	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2720
2721	/* Disable interrupts */
2722	if (sc_if->sk_port == SK_PORT_A)
2723		sc->sk_intrmask &= ~SK_INTRS1;
2724	else
2725		sc->sk_intrmask &= ~SK_INTRS2;
2726	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2727
2728	SK_XM_READ_2(sc_if, XM_ISR);
2729	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2730
2731	/* Free RX and TX mbufs still in the queues. */
2732	for (i = 0; i < SK_RX_RING_CNT; i++) {
2733		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2734			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2735			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2736		}
2737	}
2738
2739	for (i = 0; i < SK_TX_RING_CNT; i++) {
2740		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2741			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2742			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2743			SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head,
2744			    sc_if->sk_cdata.sk_tx_map[i], link);
2745			sc_if->sk_cdata.sk_tx_map[i] = 0;
2746		}
2747	}
2748
2749	while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) {
2750		SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
2751		bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap);
2752		free(dma, M_DEVBUF);
2753	}
2754}
2755
2756struct cfattach skc_ca = {
2757	sizeof(struct sk_softc), skc_probe, skc_attach,
2758};
2759
2760struct cfdriver skc_cd = {
2761	0, "skc", DV_DULL
2762};
2763
2764struct cfattach sk_ca = {
2765	sizeof(struct sk_if_softc), sk_probe, sk_attach,
2766};
2767
2768struct cfdriver sk_cd = {
2769	0, "sk", DV_IFNET
2770};
2771
2772#ifdef SK_DEBUG
2773void
2774sk_dump_txdesc(struct sk_tx_desc *desc, int idx)
2775{
2776#define DESC_PRINT(X)					\
2777	if (desc->X)					\
2778		printf("txdesc[%d]." #X "=%#x\n",	\
2779		       idx, desc->X);
2780
2781	DESC_PRINT(sk_ctl);
2782	DESC_PRINT(sk_next);
2783	DESC_PRINT(sk_data_lo);
2784	DESC_PRINT(sk_data_hi);
2785	DESC_PRINT(sk_xmac_txstat);
2786	DESC_PRINT(sk_rsvd0);
2787	DESC_PRINT(sk_csum_startval);
2788	DESC_PRINT(sk_csum_startpos);
2789	DESC_PRINT(sk_csum_writepos);
2790	DESC_PRINT(sk_rsvd1);
2791#undef PRINT
2792}
2793
2794void
2795sk_dump_bytes(const char *data, int len)
2796{
2797	int c, i, j;
2798
2799	for (i = 0; i < len; i += 16) {
2800		printf("%08x  ", i);
2801		c = len - i;
2802		if (c > 16) c = 16;
2803
2804		for (j = 0; j < c; j++) {
2805			printf("%02x ", data[i + j] & 0xff);
2806			if ((j & 0xf) == 7 && j > 0)
2807				printf(" ");
2808		}
2809
2810		for (; j < 16; j++)
2811			printf("   ");
2812		printf("  ");
2813
2814		for (j = 0; j < c; j++) {
2815			int ch = data[i + j] & 0xff;
2816			printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2817		}
2818
2819		printf("\n");
2820
2821		if (c < 16)
2822			break;
2823	}
2824}
2825
2826void
2827sk_dump_mbuf(struct mbuf *m)
2828{
2829	int count = m->m_pkthdr.len;
2830
2831	printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len);
2832
2833	while (count > 0 && m) {
2834		printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n",
2835		       m, m->m_data, m->m_len);
2836		sk_dump_bytes(mtod(m, char *), m->m_len);
2837
2838		count -= m->m_len;
2839		m = m->m_next;
2840	}
2841}
2842#endif
2843