if_sis.c revision 147256
1/*-
2 * Copyright (c) 2005 Poul-Henning Kamp <phk@FreeBSD.org>
3 * Copyright (c) 1997, 1998, 1999
4 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/pci/if_sis.c 147256 2005-06-10 16:49:24Z brooks $");
36
37/*
38 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
39 * available from http://www.sis.com.tw.
40 *
41 * This driver also supports the NatSemi DP83815. Datasheets are
42 * available from http://www.national.com.
43 *
44 * Written by Bill Paul <wpaul@ee.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48/*
49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50 * simple TX and RX descriptors of 3 longwords in size. The receiver
51 * has a single perfect filter entry for the station address and a
52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53 * transceiver while the 7016 requires an external transceiver chip.
54 * Both chips offer the standard bit-bang MII interface as well as
55 * an enchanced PHY interface which simplifies accessing MII registers.
56 *
57 * The only downside to this chipset is that RX descriptors must be
58 * longword aligned.
59 */
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/sockio.h>
64#include <sys/mbuf.h>
65#include <sys/malloc.h>
66#include <sys/kernel.h>
67#include <sys/module.h>
68#include <sys/socket.h>
69#include <sys/sysctl.h>
70
71#include <net/if.h>
72#include <net/if_arp.h>
73#include <net/ethernet.h>
74#include <net/if_dl.h>
75#include <net/if_media.h>
76#include <net/if_types.h>
77#include <net/if_vlan_var.h>
78
79#include <net/bpf.h>
80
81#include <machine/bus.h>
82#include <machine/resource.h>
83#include <sys/bus.h>
84#include <sys/rman.h>
85
86#include <dev/mii/mii.h>
87#include <dev/mii/miivar.h>
88
89#include <dev/pci/pcireg.h>
90#include <dev/pci/pcivar.h>
91
92#define SIS_USEIOSPACE
93
94#include <pci/if_sisreg.h>
95
96MODULE_DEPEND(sis, pci, 1, 1, 1);
97MODULE_DEPEND(sis, ether, 1, 1, 1);
98MODULE_DEPEND(sis, miibus, 1, 1, 1);
99
100/* "controller miibus0" required.  See GENERIC if you get errors here. */
101#include "miibus_if.h"
102
103/*
104 * Various supported device vendors/types and their names.
105 */
106static struct sis_type sis_devs[] = {
107	{ SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" },
108	{ SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" },
109	{ NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" },
110	{ 0, 0, NULL }
111};
112
113static int sis_detach(device_t);
114static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
115static int sis_ifmedia_upd(struct ifnet *);
116static void sis_init(void *);
117static void sis_initl(struct sis_softc *);
118static void sis_intr(void *);
119static int sis_ioctl(struct ifnet *, u_long, caddr_t);
120static int sis_newbuf(struct sis_softc *, struct sis_desc *, struct mbuf *);
121static void sis_start(struct ifnet *);
122static void sis_startl(struct ifnet *);
123static void sis_stop(struct sis_softc *);
124static void sis_watchdog(struct ifnet *);
125
126#ifdef SIS_USEIOSPACE
127#define SIS_RES			SYS_RES_IOPORT
128#define SIS_RID			SIS_PCI_LOIO
129#else
130#define SIS_RES			SYS_RES_MEMORY
131#define SIS_RID			SIS_PCI_LOMEM
132#endif
133
134#define SIS_SETBIT(sc, reg, x)				\
135	CSR_WRITE_4(sc, reg,				\
136		CSR_READ_4(sc, reg) | (x))
137
138#define SIS_CLRBIT(sc, reg, x)				\
139	CSR_WRITE_4(sc, reg,				\
140		CSR_READ_4(sc, reg) & ~(x))
141
142#define SIO_SET(x)					\
143	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
144
145#define SIO_CLR(x)					\
146	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
147
148static void
149sis_dma_map_desc_next(void *arg, bus_dma_segment_t *segs, int nseg, int error)
150{
151	struct sis_desc	*r;
152
153	r = arg;
154	r->sis_next = segs->ds_addr;
155}
156
157static void
158sis_dma_map_desc_ptr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
159{
160	struct sis_desc	*r;
161
162	r = arg;
163	r->sis_ptr = segs->ds_addr;
164}
165
166static void
167sis_dma_map_ring(void *arg, bus_dma_segment_t *segs, int nseg, int error)
168{
169	u_int32_t *p;
170
171	p = arg;
172	*p = segs->ds_addr;
173}
174
175/*
176 * Routine to reverse the bits in a word. Stolen almost
177 * verbatim from /usr/games/fortune.
178 */
179static uint16_t
180sis_reverse(uint16_t n)
181{
182	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
183	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
184	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
185	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
186
187	return(n);
188}
189
190static void
191sis_delay(struct sis_softc *sc)
192{
193	int			idx;
194
195	for (idx = (300 / 33) + 1; idx > 0; idx--)
196		CSR_READ_4(sc, SIS_CSR);
197}
198
199static void
200sis_eeprom_idle(struct sis_softc *sc)
201{
202	int		i;
203
204	SIO_SET(SIS_EECTL_CSEL);
205	sis_delay(sc);
206	SIO_SET(SIS_EECTL_CLK);
207	sis_delay(sc);
208
209	for (i = 0; i < 25; i++) {
210		SIO_CLR(SIS_EECTL_CLK);
211		sis_delay(sc);
212		SIO_SET(SIS_EECTL_CLK);
213		sis_delay(sc);
214	}
215
216	SIO_CLR(SIS_EECTL_CLK);
217	sis_delay(sc);
218	SIO_CLR(SIS_EECTL_CSEL);
219	sis_delay(sc);
220	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
221}
222
223/*
224 * Send a read command and address to the EEPROM, check for ACK.
225 */
226static void
227sis_eeprom_putbyte(struct sis_softc *sc, int addr)
228{
229	int		d, i;
230
231	d = addr | SIS_EECMD_READ;
232
233	/*
234	 * Feed in each bit and stobe the clock.
235	 */
236	for (i = 0x400; i; i >>= 1) {
237		if (d & i) {
238			SIO_SET(SIS_EECTL_DIN);
239		} else {
240			SIO_CLR(SIS_EECTL_DIN);
241		}
242		sis_delay(sc);
243		SIO_SET(SIS_EECTL_CLK);
244		sis_delay(sc);
245		SIO_CLR(SIS_EECTL_CLK);
246		sis_delay(sc);
247	}
248}
249
250/*
251 * Read a word of data stored in the EEPROM at address 'addr.'
252 */
253static void
254sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest)
255{
256	int		i;
257	u_int16_t		word = 0;
258
259	/* Force EEPROM to idle state. */
260	sis_eeprom_idle(sc);
261
262	/* Enter EEPROM access mode. */
263	sis_delay(sc);
264	SIO_CLR(SIS_EECTL_CLK);
265	sis_delay(sc);
266	SIO_SET(SIS_EECTL_CSEL);
267	sis_delay(sc);
268
269	/*
270	 * Send address of word we want to read.
271	 */
272	sis_eeprom_putbyte(sc, addr);
273
274	/*
275	 * Start reading bits from EEPROM.
276	 */
277	for (i = 0x8000; i; i >>= 1) {
278		SIO_SET(SIS_EECTL_CLK);
279		sis_delay(sc);
280		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
281			word |= i;
282		sis_delay(sc);
283		SIO_CLR(SIS_EECTL_CLK);
284		sis_delay(sc);
285	}
286
287	/* Turn off EEPROM access mode. */
288	sis_eeprom_idle(sc);
289
290	*dest = word;
291}
292
293/*
294 * Read a sequence of words from the EEPROM.
295 */
296static void
297sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap)
298{
299	int			i;
300	u_int16_t		word = 0, *ptr;
301
302	for (i = 0; i < cnt; i++) {
303		sis_eeprom_getword(sc, off + i, &word);
304		ptr = (u_int16_t *)(dest + (i * 2));
305		if (swap)
306			*ptr = ntohs(word);
307		else
308			*ptr = word;
309	}
310}
311
312#if defined(__i386__) || defined(__amd64__)
313static device_t
314sis_find_bridge(device_t dev)
315{
316	devclass_t		pci_devclass;
317	device_t		*pci_devices;
318	int			pci_count = 0;
319	device_t		*pci_children;
320	int			pci_childcount = 0;
321	device_t		*busp, *childp;
322	device_t		child = NULL;
323	int			i, j;
324
325	if ((pci_devclass = devclass_find("pci")) == NULL)
326		return(NULL);
327
328	devclass_get_devices(pci_devclass, &pci_devices, &pci_count);
329
330	for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) {
331		pci_childcount = 0;
332		device_get_children(*busp, &pci_children, &pci_childcount);
333		for (j = 0, childp = pci_children;
334		    j < pci_childcount; j++, childp++) {
335			if (pci_get_vendor(*childp) == SIS_VENDORID &&
336			    pci_get_device(*childp) == 0x0008) {
337				child = *childp;
338				goto done;
339			}
340		}
341	}
342
343done:
344	free(pci_devices, M_TEMP);
345	free(pci_children, M_TEMP);
346	return(child);
347}
348
349static void
350sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt)
351{
352	device_t		bridge;
353	u_int8_t		reg;
354	int			i;
355	bus_space_tag_t		btag;
356
357	bridge = sis_find_bridge(dev);
358	if (bridge == NULL)
359		return;
360	reg = pci_read_config(bridge, 0x48, 1);
361	pci_write_config(bridge, 0x48, reg|0x40, 1);
362
363	/* XXX */
364#if defined(__i386__)
365	btag = I386_BUS_SPACE_IO;
366#elif defined(__amd64__)
367	btag = AMD64_BUS_SPACE_IO;
368#endif
369
370	for (i = 0; i < cnt; i++) {
371		bus_space_write_1(btag, 0x0, 0x70, i + off);
372		*(dest + i) = bus_space_read_1(btag, 0x0, 0x71);
373	}
374
375	pci_write_config(bridge, 0x48, reg & ~0x40, 1);
376	return;
377}
378
379static void
380sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest)
381{
382	u_int32_t		filtsave, csrsave;
383
384	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
385	csrsave = CSR_READ_4(sc, SIS_CSR);
386
387	CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave);
388	CSR_WRITE_4(sc, SIS_CSR, 0);
389
390	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE);
391
392	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
393	((u_int16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA);
394	CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1);
395	((u_int16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA);
396	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
397	((u_int16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA);
398
399	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
400	CSR_WRITE_4(sc, SIS_CSR, csrsave);
401	return;
402}
403#endif
404
405/*
406 * Sync the PHYs by setting data bit and strobing the clock 32 times.
407 */
408static void
409sis_mii_sync(struct sis_softc *sc)
410{
411	int		i;
412
413 	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
414
415 	for (i = 0; i < 32; i++) {
416 		SIO_SET(SIS_MII_CLK);
417 		DELAY(1);
418 		SIO_CLR(SIS_MII_CLK);
419 		DELAY(1);
420 	}
421}
422
423/*
424 * Clock a series of bits through the MII.
425 */
426static void
427sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt)
428{
429	int			i;
430
431	SIO_CLR(SIS_MII_CLK);
432
433	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
434		if (bits & i) {
435			SIO_SET(SIS_MII_DATA);
436		} else {
437			SIO_CLR(SIS_MII_DATA);
438		}
439		DELAY(1);
440		SIO_CLR(SIS_MII_CLK);
441		DELAY(1);
442		SIO_SET(SIS_MII_CLK);
443	}
444}
445
446/*
447 * Read an PHY register through the MII.
448 */
449static int
450sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
451{
452	int			i, ack, s;
453
454	s = splimp();
455
456	/*
457	 * Set up frame for RX.
458	 */
459	frame->mii_stdelim = SIS_MII_STARTDELIM;
460	frame->mii_opcode = SIS_MII_READOP;
461	frame->mii_turnaround = 0;
462	frame->mii_data = 0;
463
464	/*
465 	 * Turn on data xmit.
466	 */
467	SIO_SET(SIS_MII_DIR);
468
469	sis_mii_sync(sc);
470
471	/*
472	 * Send command/address info.
473	 */
474	sis_mii_send(sc, frame->mii_stdelim, 2);
475	sis_mii_send(sc, frame->mii_opcode, 2);
476	sis_mii_send(sc, frame->mii_phyaddr, 5);
477	sis_mii_send(sc, frame->mii_regaddr, 5);
478
479	/* Idle bit */
480	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
481	DELAY(1);
482	SIO_SET(SIS_MII_CLK);
483	DELAY(1);
484
485	/* Turn off xmit. */
486	SIO_CLR(SIS_MII_DIR);
487
488	/* Check for ack */
489	SIO_CLR(SIS_MII_CLK);
490	DELAY(1);
491	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
492	SIO_SET(SIS_MII_CLK);
493	DELAY(1);
494
495	/*
496	 * Now try reading data bits. If the ack failed, we still
497	 * need to clock through 16 cycles to keep the PHY(s) in sync.
498	 */
499	if (ack) {
500		for(i = 0; i < 16; i++) {
501			SIO_CLR(SIS_MII_CLK);
502			DELAY(1);
503			SIO_SET(SIS_MII_CLK);
504			DELAY(1);
505		}
506		goto fail;
507	}
508
509	for (i = 0x8000; i; i >>= 1) {
510		SIO_CLR(SIS_MII_CLK);
511		DELAY(1);
512		if (!ack) {
513			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
514				frame->mii_data |= i;
515			DELAY(1);
516		}
517		SIO_SET(SIS_MII_CLK);
518		DELAY(1);
519	}
520
521fail:
522
523	SIO_CLR(SIS_MII_CLK);
524	DELAY(1);
525	SIO_SET(SIS_MII_CLK);
526	DELAY(1);
527
528	splx(s);
529
530	if (ack)
531		return(1);
532	return(0);
533}
534
535/*
536 * Write to a PHY register through the MII.
537 */
538static int
539sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
540{
541	int			s;
542
543	 s = splimp();
544 	/*
545 	 * Set up frame for TX.
546 	 */
547
548 	frame->mii_stdelim = SIS_MII_STARTDELIM;
549 	frame->mii_opcode = SIS_MII_WRITEOP;
550 	frame->mii_turnaround = SIS_MII_TURNAROUND;
551
552 	/*
553  	 * Turn on data output.
554 	 */
555 	SIO_SET(SIS_MII_DIR);
556
557 	sis_mii_sync(sc);
558
559 	sis_mii_send(sc, frame->mii_stdelim, 2);
560 	sis_mii_send(sc, frame->mii_opcode, 2);
561 	sis_mii_send(sc, frame->mii_phyaddr, 5);
562 	sis_mii_send(sc, frame->mii_regaddr, 5);
563 	sis_mii_send(sc, frame->mii_turnaround, 2);
564 	sis_mii_send(sc, frame->mii_data, 16);
565
566 	/* Idle bit. */
567 	SIO_SET(SIS_MII_CLK);
568 	DELAY(1);
569 	SIO_CLR(SIS_MII_CLK);
570 	DELAY(1);
571
572 	/*
573 	 * Turn off xmit.
574 	 */
575 	SIO_CLR(SIS_MII_DIR);
576
577 	splx(s);
578
579 	return(0);
580}
581
582static int
583sis_miibus_readreg(device_t dev, int phy, int reg)
584{
585	struct sis_softc	*sc;
586	struct sis_mii_frame    frame;
587
588	sc = device_get_softc(dev);
589
590	if (sc->sis_type == SIS_TYPE_83815) {
591		if (phy != 0)
592			return(0);
593		/*
594		 * The NatSemi chip can take a while after
595		 * a reset to come ready, during which the BMSR
596		 * returns a value of 0. This is *never* supposed
597		 * to happen: some of the BMSR bits are meant to
598		 * be hardwired in the on position, and this can
599		 * confuse the miibus code a bit during the probe
600		 * and attach phase. So we make an effort to check
601		 * for this condition and wait for it to clear.
602		 */
603		if (!CSR_READ_4(sc, NS_BMSR))
604			DELAY(1000);
605		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
606	}
607
608	/*
609	 * Chipsets < SIS_635 seem not to be able to read/write
610	 * through mdio. Use the enhanced PHY access register
611	 * again for them.
612	 */
613	if (sc->sis_type == SIS_TYPE_900 &&
614	    sc->sis_rev < SIS_REV_635) {
615		int i, val = 0;
616
617		if (phy != 0)
618			return(0);
619
620		CSR_WRITE_4(sc, SIS_PHYCTL,
621		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
622		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
623
624		for (i = 0; i < SIS_TIMEOUT; i++) {
625			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
626				break;
627		}
628
629		if (i == SIS_TIMEOUT) {
630			printf("sis%d: PHY failed to come ready\n",
631			    sc->sis_unit);
632			return(0);
633		}
634
635		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
636
637		if (val == 0xFFFF)
638			return(0);
639
640		return(val);
641	} else {
642		bzero((char *)&frame, sizeof(frame));
643
644		frame.mii_phyaddr = phy;
645		frame.mii_regaddr = reg;
646		sis_mii_readreg(sc, &frame);
647
648		return(frame.mii_data);
649	}
650}
651
652static int
653sis_miibus_writereg(device_t dev, int phy, int reg, int data)
654{
655	struct sis_softc	*sc;
656	struct sis_mii_frame	frame;
657
658	sc = device_get_softc(dev);
659
660	if (sc->sis_type == SIS_TYPE_83815) {
661		if (phy != 0)
662			return(0);
663		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
664		return(0);
665	}
666
667	/*
668	 * Chipsets < SIS_635 seem not to be able to read/write
669	 * through mdio. Use the enhanced PHY access register
670	 * again for them.
671	 */
672	if (sc->sis_type == SIS_TYPE_900 &&
673	    sc->sis_rev < SIS_REV_635) {
674		int i;
675
676		if (phy != 0)
677			return(0);
678
679		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
680		    (reg << 6) | SIS_PHYOP_WRITE);
681		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
682
683		for (i = 0; i < SIS_TIMEOUT; i++) {
684			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
685				break;
686		}
687
688		if (i == SIS_TIMEOUT)
689			printf("sis%d: PHY failed to come ready\n",
690			    sc->sis_unit);
691	} else {
692		bzero((char *)&frame, sizeof(frame));
693
694		frame.mii_phyaddr = phy;
695		frame.mii_regaddr = reg;
696		frame.mii_data = data;
697		sis_mii_writereg(sc, &frame);
698	}
699	return(0);
700}
701
702static void
703sis_miibus_statchg(device_t dev)
704{
705	struct sis_softc	*sc;
706
707	sc = device_get_softc(dev);
708	SIS_LOCK_ASSERT(sc);
709	sis_initl(sc);
710}
711
712static uint32_t
713sis_mchash(struct sis_softc *sc, const uint8_t *addr)
714{
715	uint32_t		crc;
716
717	/* Compute CRC for the address value. */
718	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
719
720	/*
721	 * return the filter bit position
722	 *
723	 * The NatSemi chip has a 512-bit filter, which is
724	 * different than the SiS, so we special-case it.
725	 */
726	if (sc->sis_type == SIS_TYPE_83815)
727		return (crc >> 23);
728	else if (sc->sis_rev >= SIS_REV_635 ||
729	    sc->sis_rev == SIS_REV_900B)
730		return (crc >> 24);
731	else
732		return (crc >> 25);
733}
734
735static void
736sis_setmulti_ns(struct sis_softc *sc)
737{
738	struct ifnet		*ifp;
739	struct ifmultiaddr	*ifma;
740	u_int32_t		h = 0, i, filtsave;
741	int			bit, index;
742
743	ifp = sc->sis_ifp;
744
745	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
746		SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
747		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
748		return;
749	}
750
751	/*
752	 * We have to explicitly enable the multicast hash table
753	 * on the NatSemi chip if we want to use it, which we do.
754	 */
755	SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
756	SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
757
758	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
759
760	/* first, zot all the existing hash bits */
761	for (i = 0; i < 32; i++) {
762		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2));
763		CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
764	}
765
766	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
767		if (ifma->ifma_addr->sa_family != AF_LINK)
768			continue;
769		h = sis_mchash(sc,
770		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
771		index = h >> 3;
772		bit = h & 0x1F;
773		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
774		if (bit > 0xF)
775			bit -= 0x10;
776		SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
777	}
778
779	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
780
781	return;
782}
783
784static void
785sis_setmulti_sis(struct sis_softc *sc)
786{
787	struct ifnet		*ifp;
788	struct ifmultiaddr	*ifma;
789	u_int32_t		h, i, n, ctl;
790	u_int16_t		hashes[16];
791
792	ifp = sc->sis_ifp;
793
794	/* hash table size */
795	if (sc->sis_rev >= SIS_REV_635 ||
796	    sc->sis_rev == SIS_REV_900B)
797		n = 16;
798	else
799		n = 8;
800
801	ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE;
802
803	if (ifp->if_flags & IFF_BROADCAST)
804		ctl |= SIS_RXFILTCTL_BROAD;
805
806	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
807		ctl |= SIS_RXFILTCTL_ALLMULTI;
808		if (ifp->if_flags & IFF_PROMISC)
809			ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS;
810		for (i = 0; i < n; i++)
811			hashes[i] = ~0;
812	} else {
813		for (i = 0; i < n; i++)
814			hashes[i] = 0;
815		i = 0;
816		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
817			if (ifma->ifma_addr->sa_family != AF_LINK)
818			continue;
819			h = sis_mchash(sc,
820			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
821			hashes[h >> 4] |= 1 << (h & 0xf);
822			i++;
823		}
824		if (i > n) {
825			ctl |= SIS_RXFILTCTL_ALLMULTI;
826			for (i = 0; i < n; i++)
827				hashes[i] = ~0;
828		}
829	}
830
831	for (i = 0; i < n; i++) {
832		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
833		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
834	}
835
836	CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl);
837}
838
839static void
840sis_reset(struct sis_softc *sc)
841{
842	int		i;
843
844	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
845
846	for (i = 0; i < SIS_TIMEOUT; i++) {
847		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
848			break;
849	}
850
851	if (i == SIS_TIMEOUT)
852		printf("sis%d: reset never completed\n", sc->sis_unit);
853
854	/* Wait a little while for the chip to get its brains in order. */
855	DELAY(1000);
856
857	/*
858	 * If this is a NetSemi chip, make sure to clear
859	 * PME mode.
860	 */
861	if (sc->sis_type == SIS_TYPE_83815) {
862		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
863		CSR_WRITE_4(sc, NS_CLKRUN, 0);
864	}
865
866        return;
867}
868
869/*
870 * Probe for an SiS chip. Check the PCI vendor and device
871 * IDs against our list and return a device name if we find a match.
872 */
873static int
874sis_probe(device_t dev)
875{
876	struct sis_type		*t;
877
878	t = sis_devs;
879
880	while(t->sis_name != NULL) {
881		if ((pci_get_vendor(dev) == t->sis_vid) &&
882		    (pci_get_device(dev) == t->sis_did)) {
883			device_set_desc(dev, t->sis_name);
884			return (BUS_PROBE_DEFAULT);
885		}
886		t++;
887	}
888
889	return(ENXIO);
890}
891
892/*
893 * Attach the interface. Allocate softc structures, do ifmedia
894 * setup and ethernet/BPF attach.
895 */
896static int
897sis_attach(device_t dev)
898{
899	u_char			eaddr[ETHER_ADDR_LEN];
900	struct sis_softc	*sc;
901	struct ifnet		*ifp;
902	int			unit, error = 0, rid, waittime = 0;
903
904	waittime = 0;
905	sc = device_get_softc(dev);
906	unit = device_get_unit(dev);
907
908	sc->sis_self = dev;
909
910	mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
911	    MTX_DEF);
912
913	if (pci_get_device(dev) == SIS_DEVICEID_900)
914		sc->sis_type = SIS_TYPE_900;
915	if (pci_get_device(dev) == SIS_DEVICEID_7016)
916		sc->sis_type = SIS_TYPE_7016;
917	if (pci_get_vendor(dev) == NS_VENDORID)
918		sc->sis_type = SIS_TYPE_83815;
919
920	sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1);
921	/*
922	 * Map control/status registers.
923	 */
924	pci_enable_busmaster(dev);
925
926	rid = SIS_RID;
927	sc->sis_res = bus_alloc_resource_any(dev, SIS_RES, &rid, RF_ACTIVE);
928
929	if (sc->sis_res == NULL) {
930		printf("sis%d: couldn't map ports/memory\n", unit);
931		error = ENXIO;
932		goto fail;
933	}
934
935	sc->sis_btag = rman_get_bustag(sc->sis_res);
936	sc->sis_bhandle = rman_get_bushandle(sc->sis_res);
937
938	/* Allocate interrupt */
939	rid = 0;
940	sc->sis_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
941	    RF_SHAREABLE | RF_ACTIVE);
942
943	if (sc->sis_irq == NULL) {
944		printf("sis%d: couldn't map interrupt\n", unit);
945		error = ENXIO;
946		goto fail;
947	}
948
949	/* Reset the adapter. */
950	sis_reset(sc);
951
952	if (sc->sis_type == SIS_TYPE_900 &&
953            (sc->sis_rev == SIS_REV_635 ||
954            sc->sis_rev == SIS_REV_900B)) {
955		SIO_SET(SIS_CFG_RND_CNT);
956		SIO_SET(SIS_CFG_PERR_DETECT);
957	}
958
959	/*
960	 * Get station address from the EEPROM.
961	 */
962	switch (pci_get_vendor(dev)) {
963	case NS_VENDORID:
964		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
965
966		/* We can't update the device description, so spew */
967		if (sc->sis_srr == NS_SRR_15C)
968			device_printf(dev, "Silicon Revision: DP83815C\n");
969		else if (sc->sis_srr == NS_SRR_15D)
970			device_printf(dev, "Silicon Revision: DP83815D\n");
971		else if (sc->sis_srr == NS_SRR_16A)
972			device_printf(dev, "Silicon Revision: DP83816A\n");
973		else
974			device_printf(dev, "Silicon Revision %x\n", sc->sis_srr);
975
976		/*
977		 * Reading the MAC address out of the EEPROM on
978		 * the NatSemi chip takes a bit more work than
979		 * you'd expect. The address spans 4 16-bit words,
980		 * with the first word containing only a single bit.
981		 * You have to shift everything over one bit to
982		 * get it aligned properly. Also, the bits are
983		 * stored backwards (the LSB is really the MSB,
984		 * and so on) so you have to reverse them in order
985		 * to get the MAC address into the form we want.
986		 * Why? Who the hell knows.
987		 */
988		{
989			u_int16_t		tmp[4];
990
991			sis_read_eeprom(sc, (caddr_t)&tmp,
992			    NS_EE_NODEADDR, 4, 0);
993
994			/* Shift everything over one bit. */
995			tmp[3] = tmp[3] >> 1;
996			tmp[3] |= tmp[2] << 15;
997			tmp[2] = tmp[2] >> 1;
998			tmp[2] |= tmp[1] << 15;
999			tmp[1] = tmp[1] >> 1;
1000			tmp[1] |= tmp[0] << 15;
1001
1002			/* Now reverse all the bits. */
1003			tmp[3] = sis_reverse(tmp[3]);
1004			tmp[2] = sis_reverse(tmp[2]);
1005			tmp[1] = sis_reverse(tmp[1]);
1006
1007			bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN);
1008		}
1009		break;
1010	case SIS_VENDORID:
1011	default:
1012#if defined(__i386__) || defined(__amd64__)
1013		/*
1014		 * If this is a SiS 630E chipset with an embedded
1015		 * SiS 900 controller, we have to read the MAC address
1016		 * from the APC CMOS RAM. Our method for doing this
1017		 * is very ugly since we have to reach out and grab
1018		 * ahold of hardware for which we cannot properly
1019		 * allocate resources. This code is only compiled on
1020		 * the i386 architecture since the SiS 630E chipset
1021		 * is for x86 motherboards only. Note that there are
1022		 * a lot of magic numbers in this hack. These are
1023		 * taken from SiS's Linux driver. I'd like to replace
1024		 * them with proper symbolic definitions, but that
1025		 * requires some datasheets that I don't have access
1026		 * to at the moment.
1027		 */
1028		if (sc->sis_rev == SIS_REV_630S ||
1029		    sc->sis_rev == SIS_REV_630E ||
1030		    sc->sis_rev == SIS_REV_630EA1)
1031			sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6);
1032
1033		else if (sc->sis_rev == SIS_REV_635 ||
1034			 sc->sis_rev == SIS_REV_630ET)
1035			sis_read_mac(sc, dev, (caddr_t)&eaddr);
1036		else if (sc->sis_rev == SIS_REV_96x) {
1037			/* Allow to read EEPROM from LAN. It is shared
1038			 * between a 1394 controller and the NIC and each
1039			 * time we access it, we need to set SIS_EECMD_REQ.
1040			 */
1041			SIO_SET(SIS_EECMD_REQ);
1042			for (waittime = 0; waittime < SIS_TIMEOUT;
1043			    waittime++) {
1044				/* Force EEPROM to idle state. */
1045				sis_eeprom_idle(sc);
1046				if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) {
1047					sis_read_eeprom(sc, (caddr_t)&eaddr,
1048					    SIS_EE_NODEADDR, 3, 0);
1049					break;
1050				}
1051				DELAY(1);
1052			}
1053			/*
1054			 * Set SIS_EECTL_CLK to high, so a other master
1055			 * can operate on the i2c bus.
1056			 */
1057			SIO_SET(SIS_EECTL_CLK);
1058			/* Refuse EEPROM access by LAN */
1059			SIO_SET(SIS_EECMD_DONE);
1060		} else
1061#endif
1062			sis_read_eeprom(sc, (caddr_t)&eaddr,
1063			    SIS_EE_NODEADDR, 3, 0);
1064		break;
1065	}
1066
1067	sc->sis_unit = unit;
1068	if (debug_mpsafenet)
1069		callout_init(&sc->sis_stat_ch, CALLOUT_MPSAFE);
1070	else
1071		callout_init(&sc->sis_stat_ch, 0);
1072
1073	/*
1074	 * Allocate the parent bus DMA tag appropriate for PCI.
1075	 */
1076#define SIS_NSEG_NEW 32
1077	 error = bus_dma_tag_create(NULL,	/* parent */
1078			1, 0,			/* alignment, boundary */
1079			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1080			BUS_SPACE_MAXADDR,	/* highaddr */
1081			NULL, NULL,		/* filter, filterarg */
1082			MAXBSIZE, SIS_NSEG_NEW,	/* maxsize, nsegments */
1083			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1084			BUS_DMA_ALLOCNOW,	/* flags */
1085			NULL, NULL,		/* lockfunc, lockarg */
1086			&sc->sis_parent_tag);
1087	if (error)
1088		goto fail;
1089
1090	/*
1091	 * Now allocate a tag for the DMA descriptor lists and a chunk
1092	 * of DMA-able memory based on the tag.  Also obtain the physical
1093	 * addresses of the RX and TX ring, which we'll need later.
1094	 * All of our lists are allocated as a contiguous block
1095	 * of memory.
1096	 */
1097	error = bus_dma_tag_create(sc->sis_parent_tag,	/* parent */
1098			1, 0,			/* alignment, boundary */
1099			BUS_SPACE_MAXADDR,	/* lowaddr */
1100			BUS_SPACE_MAXADDR,	/* highaddr */
1101			NULL, NULL,		/* filter, filterarg */
1102			SIS_RX_LIST_SZ, 1,	/* maxsize,nsegments */
1103			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1104			0,			/* flags */
1105			busdma_lock_mutex,	/* lockfunc */
1106			&Giant,			/* lockarg */
1107			&sc->sis_rx_tag);
1108	if (error)
1109		goto fail;
1110
1111	error = bus_dmamem_alloc(sc->sis_rx_tag,
1112	    (void **)&sc->sis_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1113	    &sc->sis_rx_dmamap);
1114
1115	if (error) {
1116		printf("sis%d: no memory for rx list buffers!\n", unit);
1117		bus_dma_tag_destroy(sc->sis_rx_tag);
1118		sc->sis_rx_tag = NULL;
1119		goto fail;
1120	}
1121
1122	error = bus_dmamap_load(sc->sis_rx_tag,
1123	    sc->sis_rx_dmamap, &(sc->sis_rx_list[0]),
1124	    sizeof(struct sis_desc), sis_dma_map_ring,
1125	    &sc->sis_rx_paddr, 0);
1126
1127	if (error) {
1128		printf("sis%d: cannot get address of the rx ring!\n", unit);
1129		bus_dmamem_free(sc->sis_rx_tag,
1130		    sc->sis_rx_list, sc->sis_rx_dmamap);
1131		bus_dma_tag_destroy(sc->sis_rx_tag);
1132		sc->sis_rx_tag = NULL;
1133		goto fail;
1134	}
1135
1136	error = bus_dma_tag_create(sc->sis_parent_tag,	/* parent */
1137			1, 0,			/* alignment, boundary */
1138			BUS_SPACE_MAXADDR,	/* lowaddr */
1139			BUS_SPACE_MAXADDR,	/* highaddr */
1140			NULL, NULL,		/* filter, filterarg */
1141			SIS_TX_LIST_SZ, 1,	/* maxsize,nsegments */
1142			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1143			0,			/* flags */
1144			busdma_lock_mutex,	/* lockfunc */
1145			&Giant,			/* lockarg */
1146			&sc->sis_tx_tag);
1147	if (error)
1148		goto fail;
1149
1150	error = bus_dmamem_alloc(sc->sis_tx_tag,
1151	    (void **)&sc->sis_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1152	    &sc->sis_tx_dmamap);
1153
1154	if (error) {
1155		printf("sis%d: no memory for tx list buffers!\n", unit);
1156		bus_dma_tag_destroy(sc->sis_tx_tag);
1157		sc->sis_tx_tag = NULL;
1158		goto fail;
1159	}
1160
1161	error = bus_dmamap_load(sc->sis_tx_tag,
1162	    sc->sis_tx_dmamap, &(sc->sis_tx_list[0]),
1163	    sizeof(struct sis_desc), sis_dma_map_ring,
1164	    &sc->sis_tx_paddr, 0);
1165
1166	if (error) {
1167		printf("sis%d: cannot get address of the tx ring!\n", unit);
1168		bus_dmamem_free(sc->sis_tx_tag,
1169		    sc->sis_tx_list, sc->sis_tx_dmamap);
1170		bus_dma_tag_destroy(sc->sis_tx_tag);
1171		sc->sis_tx_tag = NULL;
1172		goto fail;
1173	}
1174
1175	error = bus_dma_tag_create(sc->sis_parent_tag,	/* parent */
1176			1, 0,			/* alignment, boundary */
1177			BUS_SPACE_MAXADDR,	/* lowaddr */
1178			BUS_SPACE_MAXADDR,	/* highaddr */
1179			NULL, NULL,		/* filter, filterarg */
1180			MCLBYTES, 1,		/* maxsize,nsegments */
1181			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1182			0,			/* flags */
1183			busdma_lock_mutex,	/* lockfunc */
1184			&Giant,			/* lockarg */
1185			&sc->sis_tag);
1186	if (error)
1187		goto fail;
1188
1189	/*
1190	 * Obtain the physical addresses of the RX and TX
1191	 * rings which we'll need later in the init routine.
1192	 */
1193
1194	ifp = sc->sis_ifp = if_alloc(IFT_ETHER);
1195	if (ifp == NULL) {
1196		printf("sis%d: can not if_alloc()\n", sc->sis_unit);
1197		error = ENOSPC;
1198		goto fail;
1199	}
1200	ifp->if_softc = sc;
1201	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1202	ifp->if_mtu = ETHERMTU;
1203	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1204	ifp->if_ioctl = sis_ioctl;
1205	ifp->if_start = sis_start;
1206	ifp->if_watchdog = sis_watchdog;
1207	ifp->if_init = sis_init;
1208	ifp->if_baudrate = 10000000;
1209	IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1210	ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1;
1211	IFQ_SET_READY(&ifp->if_snd);
1212
1213	/*
1214	 * Do MII setup.
1215	 */
1216	if (mii_phy_probe(dev, &sc->sis_miibus,
1217	    sis_ifmedia_upd, sis_ifmedia_sts)) {
1218		printf("sis%d: MII without any PHY!\n", sc->sis_unit);
1219		if_free(ifp);
1220		error = ENXIO;
1221		goto fail;
1222	}
1223
1224	/*
1225	 * Call MI attach routine.
1226	 */
1227	ether_ifattach(ifp, eaddr);
1228
1229	/*
1230	 * Tell the upper layer(s) we support long frames.
1231	 */
1232	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1233	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1234
1235#ifdef DEVICE_POLLING
1236	ifp->if_capabilities |= IFCAP_POLLING;
1237#endif
1238	ifp->if_capenable = ifp->if_capabilities;
1239
1240	/* Hook interrupt last to avoid having to lock softc */
1241	error = bus_setup_intr(dev, sc->sis_irq, INTR_TYPE_NET | INTR_MPSAFE,
1242	    sis_intr, sc, &sc->sis_intrhand);
1243
1244	if (error) {
1245		printf("sis%d: couldn't set up irq\n", unit);
1246		ether_ifdetach(ifp);
1247		if_free(ifp);
1248		goto fail;
1249	}
1250
1251fail:
1252	if (error)
1253		sis_detach(dev);
1254
1255	return(error);
1256}
1257
1258/*
1259 * Shutdown hardware and free up resources. This can be called any
1260 * time after the mutex has been initialized. It is called in both
1261 * the error case in attach and the normal detach case so it needs
1262 * to be careful about only freeing resources that have actually been
1263 * allocated.
1264 */
1265static int
1266sis_detach(device_t dev)
1267{
1268	struct sis_softc	*sc;
1269	struct ifnet		*ifp;
1270
1271	sc = device_get_softc(dev);
1272	KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized"));
1273	SIS_LOCK(sc);
1274	ifp = sc->sis_ifp;
1275
1276	/* These should only be active if attach succeeded. */
1277	if (device_is_attached(dev)) {
1278		sis_reset(sc);
1279		sis_stop(sc);
1280		ether_ifdetach(ifp);
1281		if_free(ifp);
1282	}
1283	if (sc->sis_miibus)
1284		device_delete_child(dev, sc->sis_miibus);
1285	bus_generic_detach(dev);
1286
1287	if (sc->sis_intrhand)
1288		bus_teardown_intr(dev, sc->sis_irq, sc->sis_intrhand);
1289	if (sc->sis_irq)
1290		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sis_irq);
1291	if (sc->sis_res)
1292		bus_release_resource(dev, SIS_RES, SIS_RID, sc->sis_res);
1293
1294	if (sc->sis_rx_tag) {
1295		bus_dmamap_unload(sc->sis_rx_tag,
1296		    sc->sis_rx_dmamap);
1297		bus_dmamem_free(sc->sis_rx_tag,
1298		    sc->sis_rx_list, sc->sis_rx_dmamap);
1299		bus_dma_tag_destroy(sc->sis_rx_tag);
1300	}
1301	if (sc->sis_tx_tag) {
1302		bus_dmamap_unload(sc->sis_tx_tag,
1303		    sc->sis_tx_dmamap);
1304		bus_dmamem_free(sc->sis_tx_tag,
1305		    sc->sis_tx_list, sc->sis_tx_dmamap);
1306		bus_dma_tag_destroy(sc->sis_tx_tag);
1307	}
1308	if (sc->sis_parent_tag)
1309		bus_dma_tag_destroy(sc->sis_parent_tag);
1310	if (sc->sis_tag)
1311		bus_dma_tag_destroy(sc->sis_tag);
1312
1313	SIS_UNLOCK(sc);
1314	mtx_destroy(&sc->sis_mtx);
1315
1316	return(0);
1317}
1318
1319/*
1320 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1321 * we arrange the descriptors in a closed ring, so that the last descriptor
1322 * points back to the first.
1323 */
1324static int
1325sis_ring_init(struct sis_softc *sc)
1326{
1327	int i, error;
1328	struct sis_desc *dp;
1329
1330	dp = &sc->sis_tx_list[0];
1331	for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) {
1332		if (i == (SIS_TX_LIST_CNT - 1))
1333			dp->sis_nextdesc = &sc->sis_tx_list[0];
1334		else
1335			dp->sis_nextdesc = dp + 1;
1336		bus_dmamap_load(sc->sis_tx_tag,
1337		    sc->sis_tx_dmamap,
1338		    dp->sis_nextdesc, sizeof(struct sis_desc),
1339		    sis_dma_map_desc_next, dp, 0);
1340		dp->sis_mbuf = NULL;
1341		dp->sis_ptr = 0;
1342		dp->sis_ctl = 0;
1343	}
1344
1345	sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0;
1346
1347	bus_dmamap_sync(sc->sis_tx_tag,
1348	    sc->sis_rx_dmamap, BUS_DMASYNC_PREWRITE);
1349
1350	dp = &sc->sis_rx_list[0];
1351	for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) {
1352		error = sis_newbuf(sc, dp, NULL);
1353		if (error)
1354			return(error);
1355		if (i == (SIS_RX_LIST_CNT - 1))
1356			dp->sis_nextdesc = &sc->sis_rx_list[0];
1357		else
1358			dp->sis_nextdesc = dp + 1;
1359		bus_dmamap_load(sc->sis_rx_tag,
1360		    sc->sis_rx_dmamap,
1361		    dp->sis_nextdesc, sizeof(struct sis_desc),
1362		    sis_dma_map_desc_next, dp, 0);
1363		}
1364
1365	bus_dmamap_sync(sc->sis_rx_tag,
1366	    sc->sis_rx_dmamap, BUS_DMASYNC_PREWRITE);
1367
1368	sc->sis_rx_pdsc = &sc->sis_rx_list[0];
1369
1370	return(0);
1371}
1372
1373/*
1374 * Initialize an RX descriptor and attach an MBUF cluster.
1375 */
1376static int
1377sis_newbuf(struct sis_softc *sc, struct sis_desc *c, struct mbuf *m)
1378{
1379
1380	if (c == NULL)
1381		return(EINVAL);
1382
1383	if (m == NULL) {
1384		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1385		if (m == NULL)
1386			return(ENOBUFS);
1387	} else
1388		m->m_data = m->m_ext.ext_buf;
1389
1390	c->sis_mbuf = m;
1391	c->sis_ctl = SIS_RXLEN;
1392
1393	bus_dmamap_create(sc->sis_tag, 0, &c->sis_map);
1394	bus_dmamap_load(sc->sis_tag, c->sis_map,
1395	    mtod(m, void *), MCLBYTES,
1396	    sis_dma_map_desc_ptr, c, 0);
1397	bus_dmamap_sync(sc->sis_tag, c->sis_map, BUS_DMASYNC_PREREAD);
1398
1399	return(0);
1400}
1401
1402/*
1403 * A frame has been uploaded: pass the resulting mbuf chain up to
1404 * the higher level protocols.
1405 */
1406static void
1407sis_rxeof(struct sis_softc *sc)
1408{
1409        struct mbuf		*m;
1410        struct ifnet		*ifp;
1411	struct sis_desc		*cur_rx;
1412	int			total_len = 0;
1413	u_int32_t		rxstat;
1414
1415	SIS_LOCK_ASSERT(sc);
1416
1417	ifp = sc->sis_ifp;
1418
1419	for(cur_rx = sc->sis_rx_pdsc; SIS_OWNDESC(cur_rx);
1420	    cur_rx = cur_rx->sis_nextdesc) {
1421
1422#ifdef DEVICE_POLLING
1423		if (ifp->if_flags & IFF_POLLING) {
1424			if (sc->rxcycles <= 0)
1425				break;
1426			sc->rxcycles--;
1427		}
1428#endif /* DEVICE_POLLING */
1429		rxstat = cur_rx->sis_rxstat;
1430		bus_dmamap_sync(sc->sis_tag,
1431		    cur_rx->sis_map, BUS_DMASYNC_POSTWRITE);
1432		bus_dmamap_unload(sc->sis_tag, cur_rx->sis_map);
1433		bus_dmamap_destroy(sc->sis_tag, cur_rx->sis_map);
1434		m = cur_rx->sis_mbuf;
1435		cur_rx->sis_mbuf = NULL;
1436		total_len = SIS_RXBYTES(cur_rx);
1437
1438		/*
1439		 * If an error occurs, update stats, clear the
1440		 * status word and leave the mbuf cluster in place:
1441		 * it should simply get re-used next time this descriptor
1442	 	 * comes up in the ring.
1443		 */
1444		if (!(rxstat & SIS_CMDSTS_PKT_OK)) {
1445			ifp->if_ierrors++;
1446			if (rxstat & SIS_RXSTAT_COLL)
1447				ifp->if_collisions++;
1448			sis_newbuf(sc, cur_rx, m);
1449			continue;
1450		}
1451
1452		/* No errors; receive the packet. */
1453#if defined(__i386__) || defined(__amd64__)
1454		/*
1455		 * On the x86 we do not have alignment problems, so try to
1456		 * allocate a new buffer for the receive ring, and pass up
1457		 * the one where the packet is already, saving the expensive
1458		 * copy done in m_devget().
1459		 * If we are on an architecture with alignment problems, or
1460		 * if the allocation fails, then use m_devget and leave the
1461		 * existing buffer in the receive ring.
1462		 */
1463		if (sis_newbuf(sc, cur_rx, NULL) == 0)
1464			m->m_pkthdr.len = m->m_len = total_len;
1465		else
1466#endif
1467		{
1468			struct mbuf		*m0;
1469			m0 = m_devget(mtod(m, char *), total_len,
1470				ETHER_ALIGN, ifp, NULL);
1471			sis_newbuf(sc, cur_rx, m);
1472			if (m0 == NULL) {
1473				ifp->if_ierrors++;
1474				continue;
1475			}
1476			m = m0;
1477		}
1478
1479		ifp->if_ipackets++;
1480		m->m_pkthdr.rcvif = ifp;
1481
1482		SIS_UNLOCK(sc);
1483		(*ifp->if_input)(ifp, m);
1484		SIS_LOCK(sc);
1485	}
1486
1487	sc->sis_rx_pdsc = cur_rx;
1488}
1489
1490static void
1491sis_rxeoc(struct sis_softc *sc)
1492{
1493
1494	SIS_LOCK_ASSERT(sc);
1495	sis_rxeof(sc);
1496	sis_initl(sc);
1497}
1498
1499/*
1500 * A frame was downloaded to the chip. It's safe for us to clean up
1501 * the list buffers.
1502 */
1503
1504static void
1505sis_txeof(struct sis_softc *sc)
1506{
1507	struct ifnet		*ifp;
1508	u_int32_t		idx;
1509
1510	SIS_LOCK_ASSERT(sc);
1511	ifp = sc->sis_ifp;
1512
1513	/*
1514	 * Go through our tx list and free mbufs for those
1515	 * frames that have been transmitted.
1516	 */
1517	for (idx = sc->sis_tx_cons; sc->sis_tx_cnt > 0;
1518	    sc->sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT) ) {
1519		struct sis_desc *cur_tx = &sc->sis_tx_list[idx];
1520
1521		if (SIS_OWNDESC(cur_tx))
1522			break;
1523
1524		if (cur_tx->sis_ctl & SIS_CMDSTS_MORE)
1525			continue;
1526
1527		if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) {
1528			ifp->if_oerrors++;
1529			if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS)
1530				ifp->if_collisions++;
1531			if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL)
1532				ifp->if_collisions++;
1533		}
1534
1535		ifp->if_collisions +=
1536		    (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16;
1537
1538		ifp->if_opackets++;
1539		if (cur_tx->sis_mbuf != NULL) {
1540			m_freem(cur_tx->sis_mbuf);
1541			cur_tx->sis_mbuf = NULL;
1542			bus_dmamap_unload(sc->sis_tag, cur_tx->sis_map);
1543			bus_dmamap_destroy(sc->sis_tag, cur_tx->sis_map);
1544		}
1545	}
1546
1547	if (idx != sc->sis_tx_cons) {
1548		/* we freed up some buffers */
1549		sc->sis_tx_cons = idx;
1550		ifp->if_flags &= ~IFF_OACTIVE;
1551	}
1552
1553	ifp->if_timer = (sc->sis_tx_cnt == 0) ? 0 : 5;
1554
1555	return;
1556}
1557
1558static void
1559sis_tick(void *xsc)
1560{
1561	struct sis_softc	*sc;
1562	struct mii_data		*mii;
1563	struct ifnet		*ifp;
1564
1565	sc = xsc;
1566	SIS_LOCK(sc);
1567	sc->in_tick = 1;
1568	ifp = sc->sis_ifp;
1569
1570	mii = device_get_softc(sc->sis_miibus);
1571	mii_tick(mii);
1572
1573	if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE &&
1574	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1575		sc->sis_link++;
1576		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1577			sis_startl(ifp);
1578	}
1579
1580	callout_reset(&sc->sis_stat_ch, hz,  sis_tick, sc);
1581	sc->in_tick = 0;
1582	SIS_UNLOCK(sc);
1583}
1584
1585#ifdef DEVICE_POLLING
1586static poll_handler_t sis_poll;
1587
1588static void
1589sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1590{
1591	struct	sis_softc *sc = ifp->if_softc;
1592
1593	SIS_LOCK(sc);
1594	if (!(ifp->if_capenable & IFCAP_POLLING)) {
1595		ether_poll_deregister(ifp);
1596		cmd = POLL_DEREGISTER;
1597	}
1598	if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1599		CSR_WRITE_4(sc, SIS_IER, 1);
1600		goto done;
1601	}
1602
1603	/*
1604	 * On the sis, reading the status register also clears it.
1605	 * So before returning to intr mode we must make sure that all
1606	 * possible pending sources of interrupts have been served.
1607	 * In practice this means run to completion the *eof routines,
1608	 * and then call the interrupt routine
1609	 */
1610	sc->rxcycles = count;
1611	sis_rxeof(sc);
1612	sis_txeof(sc);
1613	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1614		sis_startl(ifp);
1615
1616	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1617		u_int32_t	status;
1618
1619		/* Reading the ISR register clears all interrupts. */
1620		status = CSR_READ_4(sc, SIS_ISR);
1621
1622		if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW))
1623			sis_rxeoc(sc);
1624
1625		if (status & (SIS_ISR_RX_IDLE))
1626			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1627
1628		if (status & SIS_ISR_SYSERR) {
1629			sis_reset(sc);
1630			sis_initl(sc);
1631		}
1632	}
1633done:
1634	SIS_UNLOCK(sc);
1635}
1636#endif /* DEVICE_POLLING */
1637
1638static void
1639sis_intr(void *arg)
1640{
1641	struct sis_softc	*sc;
1642	struct ifnet		*ifp;
1643	u_int32_t		status;
1644
1645	sc = arg;
1646	ifp = sc->sis_ifp;
1647
1648	if (sc->sis_stopped)	/* Most likely shared interrupt */
1649		return;
1650
1651	SIS_LOCK(sc);
1652#ifdef DEVICE_POLLING
1653	if (ifp->if_flags & IFF_POLLING)
1654		goto done;
1655	if ((ifp->if_capenable & IFCAP_POLLING) &&
1656	    ether_poll_register(sis_poll, ifp)) { /* ok, disable interrupts */
1657		CSR_WRITE_4(sc, SIS_IER, 0);
1658		goto done;
1659	}
1660#endif /* DEVICE_POLLING */
1661
1662	/* Disable interrupts. */
1663	CSR_WRITE_4(sc, SIS_IER, 0);
1664
1665	for (;;) {
1666		SIS_LOCK_ASSERT(sc);
1667		/* Reading the ISR register clears all interrupts. */
1668		status = CSR_READ_4(sc, SIS_ISR);
1669
1670		if ((status & SIS_INTRS) == 0)
1671			break;
1672
1673		if (status &
1674		    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1675		     SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) )
1676			sis_txeof(sc);
1677
1678		if (status & (SIS_ISR_RX_DESC_OK|SIS_ISR_RX_OK|SIS_ISR_RX_IDLE))
1679			sis_rxeof(sc);
1680
1681		if (status & (SIS_ISR_RX_ERR | SIS_ISR_RX_OFLOW))
1682			sis_rxeoc(sc);
1683
1684		if (status & (SIS_ISR_RX_IDLE))
1685			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1686
1687		if (status & SIS_ISR_SYSERR) {
1688			sis_reset(sc);
1689			sis_initl(sc);
1690		}
1691	}
1692
1693	/* Re-enable interrupts. */
1694	CSR_WRITE_4(sc, SIS_IER, 1);
1695
1696	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1697		sis_startl(ifp);
1698
1699#ifdef DEVICE_POLLING
1700done:
1701#endif /* DEVICE_POLLING */
1702	SIS_UNLOCK(sc);
1703}
1704
1705/*
1706 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1707 * pointers to the fragment pointers.
1708 */
1709static int
1710sis_encap(struct sis_softc *sc, struct mbuf **m_head, uint32_t *txidx)
1711{
1712	struct sis_desc		*f = NULL;
1713	struct mbuf		*m;
1714	int			frag, cur, cnt = 0, chainlen = 0;
1715
1716	/*
1717	 * If there's no way we can send any packets, return now.
1718	 */
1719	if (SIS_TX_LIST_CNT - sc->sis_tx_cnt < 2)
1720		return (ENOBUFS);
1721
1722	/*
1723	 * Count the number of frags in this chain to see if
1724	 * we need to m_defrag.  Since the descriptor list is shared
1725	 * by all packets, we'll m_defrag long chains so that they
1726	 * do not use up the entire list, even if they would fit.
1727	 */
1728
1729	for (m = *m_head; m != NULL; m = m->m_next)
1730		chainlen++;
1731
1732	if ((chainlen > SIS_TX_LIST_CNT / 4) ||
1733	    ((SIS_TX_LIST_CNT - (chainlen + sc->sis_tx_cnt)) < 2)) {
1734		m = m_defrag(*m_head, M_DONTWAIT);
1735		if (m == NULL)
1736			return (ENOBUFS);
1737		*m_head = m;
1738	}
1739
1740	/*
1741 	 * Start packing the mbufs in this chain into
1742	 * the fragment pointers. Stop when we run out
1743 	 * of fragments or hit the end of the mbuf chain.
1744	 */
1745	cur = frag = *txidx;
1746
1747	for (m = *m_head; m != NULL; m = m->m_next) {
1748		if (m->m_len != 0) {
1749			if ((SIS_TX_LIST_CNT -
1750			    (sc->sis_tx_cnt + cnt)) < 2)
1751				return(ENOBUFS);
1752			f = &sc->sis_tx_list[frag];
1753			f->sis_ctl = SIS_CMDSTS_MORE | m->m_len;
1754			bus_dmamap_create(sc->sis_tag, 0, &f->sis_map);
1755			bus_dmamap_load(sc->sis_tag, f->sis_map,
1756			    mtod(m, void *), m->m_len,
1757			    sis_dma_map_desc_ptr, f, 0);
1758			bus_dmamap_sync(sc->sis_tag,
1759			    f->sis_map, BUS_DMASYNC_PREREAD);
1760			if (cnt != 0)
1761				f->sis_ctl |= SIS_CMDSTS_OWN;
1762			cur = frag;
1763			SIS_INC(frag, SIS_TX_LIST_CNT);
1764			cnt++;
1765		}
1766	}
1767
1768	if (m != NULL)
1769		return(ENOBUFS);
1770
1771	sc->sis_tx_list[cur].sis_mbuf = *m_head;
1772	sc->sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE;
1773	sc->sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN;
1774	sc->sis_tx_cnt += cnt;
1775	*txidx = frag;
1776
1777	return(0);
1778}
1779
1780/*
1781 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1782 * to the mbuf data regions directly in the transmit lists. We also save a
1783 * copy of the pointers since the transmit list fragment pointers are
1784 * physical addresses.
1785 */
1786
1787static void
1788sis_start(struct ifnet *ifp)
1789{
1790	struct sis_softc	*sc;
1791
1792	sc = ifp->if_softc;
1793	SIS_LOCK(sc);
1794	sis_startl(ifp);
1795	SIS_UNLOCK(sc);
1796}
1797
1798static void
1799sis_startl(struct ifnet *ifp)
1800{
1801	struct sis_softc	*sc;
1802	struct mbuf		*m_head = NULL;
1803	u_int32_t		idx, queued = 0;
1804
1805	sc = ifp->if_softc;
1806
1807	SIS_LOCK_ASSERT(sc);
1808
1809	if (!sc->sis_link)
1810		return;
1811
1812	idx = sc->sis_tx_prod;
1813
1814	if (ifp->if_flags & IFF_OACTIVE)
1815		return;
1816
1817	while(sc->sis_tx_list[idx].sis_mbuf == NULL) {
1818		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1819		if (m_head == NULL)
1820			break;
1821
1822		if (sis_encap(sc, &m_head, &idx)) {
1823			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1824			ifp->if_flags |= IFF_OACTIVE;
1825			break;
1826		}
1827
1828		queued++;
1829
1830		/*
1831		 * If there's a BPF listener, bounce a copy of this frame
1832		 * to him.
1833		 */
1834		BPF_MTAP(ifp, m_head);
1835
1836	}
1837
1838	if (queued) {
1839		/* Transmit */
1840		sc->sis_tx_prod = idx;
1841		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1842
1843		/*
1844		 * Set a timeout in case the chip goes out to lunch.
1845		 */
1846		ifp->if_timer = 5;
1847	}
1848}
1849
1850static void
1851sis_init(void *xsc)
1852{
1853	struct sis_softc	*sc = xsc;
1854
1855	SIS_LOCK(sc);
1856	sis_initl(sc);
1857	SIS_UNLOCK(sc);
1858}
1859
1860static void
1861sis_initl(struct sis_softc *sc)
1862{
1863	struct ifnet		*ifp = sc->sis_ifp;
1864	struct mii_data		*mii;
1865
1866	SIS_LOCK_ASSERT(sc);
1867
1868	/*
1869	 * Cancel pending I/O and free all RX/TX buffers.
1870	 */
1871	sis_stop(sc);
1872	sc->sis_stopped = 0;
1873
1874#ifdef notyet
1875	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
1876		/*
1877		 * Configure 400usec of interrupt holdoff.  This is based
1878		 * on emperical tests on a Soekris 4801.
1879 		 */
1880		CSR_WRITE_4(sc, NS_IHR, 0x100 | 4);
1881	}
1882#endif
1883
1884	mii = device_get_softc(sc->sis_miibus);
1885
1886	/* Set MAC address */
1887	if (sc->sis_type == SIS_TYPE_83815) {
1888		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1889		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1890		    ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[0]);
1891		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
1892		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1893		    ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[1]);
1894		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
1895		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1896		    ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[2]);
1897	} else {
1898		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
1899		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1900		    ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[0]);
1901		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
1902		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1903		    ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[1]);
1904		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
1905		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1906		    ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[2]);
1907	}
1908
1909	/* Init circular TX/RX lists. */
1910	if (sis_ring_init(sc) != 0) {
1911		printf("sis%d: initialization failed: no "
1912			"memory for rx buffers\n", sc->sis_unit);
1913		sis_stop(sc);
1914		return;
1915	}
1916
1917	/*
1918	 * Short Cable Receive Errors (MP21.E)
1919	 * also: Page 78 of the DP83815 data sheet (september 2002 version)
1920	 * recommends the following register settings "for optimum
1921	 * performance." for rev 15C.  The driver from NS also sets
1922	 * the PHY_CR register for later versions.
1923	 */
1924	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
1925		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1926		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
1927		if (sc->sis_srr == NS_SRR_15C) {
1928			/* set val for c2 */
1929			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
1930			/* load/kill c2 */
1931			CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
1932			/* rais SD off, from 4 to c */
1933			CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
1934		}
1935		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1936	}
1937
1938
1939	/*
1940	 * For the NatSemi chip, we have to explicitly enable the
1941	 * reception of ARP frames, as well as turn on the 'perfect
1942	 * match' filter where we store the station address, otherwise
1943	 * we won't receive unicasts meant for this host.
1944	 */
1945	if (sc->sis_type == SIS_TYPE_83815) {
1946		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP);
1947		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT);
1948	}
1949
1950	 /* If we want promiscuous mode, set the allframes bit. */
1951	if (ifp->if_flags & IFF_PROMISC) {
1952		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
1953	} else {
1954		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
1955	}
1956
1957	/*
1958	 * Set the capture broadcast bit to capture broadcast frames.
1959	 */
1960	if (ifp->if_flags & IFF_BROADCAST) {
1961		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
1962	} else {
1963		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
1964	}
1965
1966	/*
1967	 * Load the multicast filter.
1968	 */
1969	if (sc->sis_type == SIS_TYPE_83815)
1970		sis_setmulti_ns(sc);
1971	else
1972		sis_setmulti_sis(sc);
1973
1974	/* Turn the receive filter on */
1975	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
1976
1977	/*
1978	 * Load the address of the RX and TX lists.
1979	 */
1980	CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sis_rx_paddr);
1981	CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sis_tx_paddr);
1982
1983	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
1984	 * the PCI bus. When this bit is set, the Max DMA Burst Size
1985	 * for TX/RX DMA should be no larger than 16 double words.
1986	 */
1987	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) {
1988		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
1989	} else {
1990		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
1991	}
1992
1993	/* Accept Long Packets for VLAN support */
1994	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
1995
1996	/* Set TX configuration */
1997	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) {
1998		CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
1999	} else {
2000		CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
2001	}
2002
2003	/* Set full/half duplex mode. */
2004	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
2005		SIS_SETBIT(sc, SIS_TX_CFG,
2006		    (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR));
2007		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
2008	} else {
2009		SIS_CLRBIT(sc, SIS_TX_CFG,
2010		    (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR));
2011		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
2012	}
2013
2014	if (sc->sis_type == SIS_TYPE_83816) {
2015		/*
2016		 * MPII03.D: Half Duplex Excessive Collisions.
2017		 * Also page 49 in 83816 manual
2018		 */
2019		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
2020	}
2021
2022	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
2023	     IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2024		uint32_t reg;
2025
2026		/*
2027		 * Short Cable Receive Errors (MP21.E)
2028		 */
2029		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
2030		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
2031		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
2032		DELAY(100000);
2033		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
2034		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
2035			device_printf(sc->sis_self,
2036			    "Applying short cable fix (reg=%x)\n", reg);
2037			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
2038			reg = CSR_READ_4(sc, NS_PHY_DSPCFG);
2039			SIS_SETBIT(sc, NS_PHY_DSPCFG, reg | 0x20);
2040		}
2041		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
2042	}
2043
2044	/*
2045	 * Enable interrupts.
2046	 */
2047	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
2048#ifdef DEVICE_POLLING
2049	/*
2050	 * ... only enable interrupts if we are not polling, make sure
2051	 * they are off otherwise.
2052	 */
2053	if (ifp->if_flags & IFF_POLLING)
2054		CSR_WRITE_4(sc, SIS_IER, 0);
2055	else
2056#endif /* DEVICE_POLLING */
2057	CSR_WRITE_4(sc, SIS_IER, 1);
2058
2059	/* Enable receiver and transmitter. */
2060	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
2061	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
2062
2063#ifdef notdef
2064	mii_mediachg(mii);
2065#endif
2066
2067	ifp->if_flags |= IFF_RUNNING;
2068	ifp->if_flags &= ~IFF_OACTIVE;
2069
2070	if (!sc->in_tick)
2071		callout_reset(&sc->sis_stat_ch, hz,  sis_tick, sc);
2072}
2073
2074/*
2075 * Set media options.
2076 */
2077static int
2078sis_ifmedia_upd(struct ifnet *ifp)
2079{
2080	struct sis_softc	*sc;
2081	struct mii_data		*mii;
2082
2083	sc = ifp->if_softc;
2084
2085	mii = device_get_softc(sc->sis_miibus);
2086	sc->sis_link = 0;
2087	if (mii->mii_instance) {
2088		struct mii_softc	*miisc;
2089		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2090			mii_phy_reset(miisc);
2091	}
2092	mii_mediachg(mii);
2093
2094	return(0);
2095}
2096
2097/*
2098 * Report current media status.
2099 */
2100static void
2101sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2102{
2103	struct sis_softc	*sc;
2104	struct mii_data		*mii;
2105
2106	sc = ifp->if_softc;
2107
2108	mii = device_get_softc(sc->sis_miibus);
2109	mii_pollstat(mii);
2110	ifmr->ifm_active = mii->mii_media_active;
2111	ifmr->ifm_status = mii->mii_media_status;
2112}
2113
2114static int
2115sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2116{
2117	struct sis_softc	*sc = ifp->if_softc;
2118	struct ifreq		*ifr = (struct ifreq *) data;
2119	struct mii_data		*mii;
2120	int			error = 0;
2121
2122	switch(command) {
2123	case SIOCSIFFLAGS:
2124		if (ifp->if_flags & IFF_UP) {
2125			sis_init(sc);
2126		} else if (ifp->if_flags & IFF_RUNNING) {
2127			SIS_LOCK(sc);
2128			sis_stop(sc);
2129			SIS_UNLOCK(sc);
2130		}
2131		error = 0;
2132		break;
2133	case SIOCADDMULTI:
2134	case SIOCDELMULTI:
2135		SIS_LOCK(sc);
2136		if (sc->sis_type == SIS_TYPE_83815)
2137			sis_setmulti_ns(sc);
2138		else
2139			sis_setmulti_sis(sc);
2140		SIS_UNLOCK(sc);
2141		error = 0;
2142		break;
2143	case SIOCGIFMEDIA:
2144	case SIOCSIFMEDIA:
2145		mii = device_get_softc(sc->sis_miibus);
2146		SIS_LOCK(sc);
2147		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2148		SIS_UNLOCK(sc);
2149		break;
2150	case SIOCSIFCAP:
2151		ifp->if_capenable &= ~IFCAP_POLLING;
2152		ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING;
2153		break;
2154	default:
2155		error = ether_ioctl(ifp, command, data);
2156		break;
2157	}
2158
2159	return(error);
2160}
2161
2162static void
2163sis_watchdog(struct ifnet *ifp)
2164{
2165	struct sis_softc *sc;
2166
2167	sc = ifp->if_softc;
2168
2169	SIS_LOCK(sc);
2170	if (sc->sis_stopped) {
2171		SIS_UNLOCK(sc);
2172		return;
2173	}
2174
2175	ifp->if_oerrors++;
2176	printf("sis%d: watchdog timeout\n", sc->sis_unit);
2177
2178	sis_stop(sc);
2179	sis_reset(sc);
2180	sis_initl(sc);
2181
2182	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2183		sis_startl(ifp);
2184
2185	SIS_UNLOCK(sc);
2186}
2187
2188/*
2189 * Stop the adapter and free any mbufs allocated to the
2190 * RX and TX lists.
2191 */
2192static void
2193sis_stop(struct sis_softc *sc)
2194{
2195	int i;
2196	struct ifnet *ifp;
2197	struct sis_desc *dp;
2198
2199	if (sc->sis_stopped)
2200		return;
2201	SIS_LOCK_ASSERT(sc);
2202	ifp = sc->sis_ifp;
2203	ifp->if_timer = 0;
2204
2205	callout_stop(&sc->sis_stat_ch);
2206
2207	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2208#ifdef DEVICE_POLLING
2209	ether_poll_deregister(ifp);
2210#endif
2211	CSR_WRITE_4(sc, SIS_IER, 0);
2212	CSR_WRITE_4(sc, SIS_IMR, 0);
2213	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
2214	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
2215	DELAY(1000);
2216	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
2217	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2218
2219	sc->sis_link = 0;
2220
2221	/*
2222	 * Free data in the RX lists.
2223	 */
2224	dp = &sc->sis_rx_list[0];
2225	for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) {
2226		if (dp->sis_mbuf == NULL)
2227			continue;
2228		bus_dmamap_unload(sc->sis_tag, dp->sis_map);
2229		bus_dmamap_destroy(sc->sis_tag, dp->sis_map);
2230		m_freem(dp->sis_mbuf);
2231		dp->sis_mbuf = NULL;
2232	}
2233	bzero(sc->sis_rx_list, SIS_RX_LIST_SZ);
2234
2235	/*
2236	 * Free the TX list buffers.
2237	 */
2238	dp = &sc->sis_tx_list[0];
2239	for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) {
2240		if (dp->sis_mbuf == NULL)
2241			continue;
2242		bus_dmamap_unload(sc->sis_tag, dp->sis_map);
2243		bus_dmamap_destroy(sc->sis_tag, dp->sis_map);
2244		m_freem(dp->sis_mbuf);
2245		dp->sis_mbuf = NULL;
2246	}
2247
2248	bzero(sc->sis_tx_list, SIS_TX_LIST_SZ);
2249
2250	sc->sis_stopped = 1;
2251}
2252
2253/*
2254 * Stop all chip I/O so that the kernel's probe routines don't
2255 * get confused by errant DMAs when rebooting.
2256 */
2257static void
2258sis_shutdown(device_t dev)
2259{
2260	struct sis_softc	*sc;
2261
2262	sc = device_get_softc(dev);
2263	SIS_LOCK(sc);
2264	sis_reset(sc);
2265	sis_stop(sc);
2266	SIS_UNLOCK(sc);
2267}
2268
2269static device_method_t sis_methods[] = {
2270	/* Device interface */
2271	DEVMETHOD(device_probe,		sis_probe),
2272	DEVMETHOD(device_attach,	sis_attach),
2273	DEVMETHOD(device_detach,	sis_detach),
2274	DEVMETHOD(device_shutdown,	sis_shutdown),
2275
2276	/* bus interface */
2277	DEVMETHOD(bus_print_child,	bus_generic_print_child),
2278	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
2279
2280	/* MII interface */
2281	DEVMETHOD(miibus_readreg,	sis_miibus_readreg),
2282	DEVMETHOD(miibus_writereg,	sis_miibus_writereg),
2283	DEVMETHOD(miibus_statchg,	sis_miibus_statchg),
2284
2285	{ 0, 0 }
2286};
2287
2288static driver_t sis_driver = {
2289	"sis",
2290	sis_methods,
2291	sizeof(struct sis_softc)
2292};
2293
2294static devclass_t sis_devclass;
2295
2296DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0);
2297DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0);
2298