1/*	$OpenBSD: if_sis.c,v 1.145 2024/05/24 06:02:56 jsg Exp $ */
2/*
3 * Copyright (c) 1997, 1998, 1999
4 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/pci/if_sis.c,v 1.30 2001/02/06 10:11:47 phk Exp $
34 */
35
36/*
37 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
38 * available from http://www.sis.com.tw.
39 *
40 * This driver also supports the NatSemi DP83815. Datasheets are
41 * available from http://www.national.com.
42 *
43 * Written by Bill Paul <wpaul@ee.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47
48/*
49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50 * simple TX and RX descriptors of 3 longwords in size. The receiver
51 * has a single perfect filter entry for the station address and a
52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53 * transceiver while the 7016 requires an external transceiver chip.
54 * Both chips offer the standard bit-bang MII interface as well as
55 * an enhanced PHY interface which simplifies accessing MII registers.
56 *
57 * The only downside to this chipset is that RX descriptors must be
58 * longword aligned.
59 */
60
61#include "bpfilter.h"
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/mbuf.h>
66#include <sys/ioctl.h>
67#include <sys/errno.h>
68#include <sys/timeout.h>
69
70#include <net/if.h>
71
72#include <netinet/in.h>
73#include <netinet/if_ether.h>
74
75#include <net/if_media.h>
76
77#if NBPFILTER > 0
78#include <net/bpf.h>
79#endif
80
81#include <sys/device.h>
82
83#include <dev/mii/miivar.h>
84
85#include <dev/pci/pcireg.h>
86#include <dev/pci/pcivar.h>
87#include <dev/pci/pcidevs.h>
88
89#define SIS_USEIOSPACE
90
91#include <dev/pci/if_sisreg.h>
92
93int sis_probe(struct device *, void *, void *);
94void sis_attach(struct device *, struct device *, void *);
95int sis_activate(struct device *, int);
96
97const struct cfattach sis_ca = {
98	sizeof(struct sis_softc), sis_probe, sis_attach, NULL,
99	sis_activate
100};
101
102struct cfdriver sis_cd = {
103	NULL, "sis", DV_IFNET
104};
105
106int sis_intr(void *);
107void sis_fill_rx_ring(struct sis_softc *);
108int sis_newbuf(struct sis_softc *, struct sis_desc *);
109int sis_encap(struct sis_softc *, struct mbuf *, u_int32_t *);
110void sis_rxeof(struct sis_softc *);
111void sis_txeof(struct sis_softc *);
112void sis_tick(void *);
113void sis_start(struct ifnet *);
114int sis_ioctl(struct ifnet *, u_long, caddr_t);
115void sis_init(void *);
116void sis_stop(struct sis_softc *);
117void sis_watchdog(struct ifnet *);
118int sis_ifmedia_upd(struct ifnet *);
119void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
120
121u_int16_t sis_reverse(u_int16_t);
122void sis_delay(struct sis_softc *);
123void sis_eeprom_idle(struct sis_softc *);
124void sis_eeprom_putbyte(struct sis_softc *, int);
125void sis_eeprom_getword(struct sis_softc *, int, u_int16_t *);
126#if defined(__amd64__) || defined(__i386__)
127void sis_read_cmos(struct sis_softc *, struct pci_attach_args *, caddr_t, int, int);
128#endif
129void sis_read_mac(struct sis_softc *, struct pci_attach_args *);
130void sis_read_eeprom(struct sis_softc *, caddr_t, int, int, int);
131void sis_read96x_mac(struct sis_softc *);
132
133void sis_mii_sync(struct sis_softc *);
134void sis_mii_send(struct sis_softc *, u_int32_t, int);
135int sis_mii_readreg(struct sis_softc *, struct sis_mii_frame *);
136int sis_mii_writereg(struct sis_softc *, struct sis_mii_frame *);
137int sis_miibus_readreg(struct device *, int, int);
138void sis_miibus_writereg(struct device *, int, int, int);
139void sis_miibus_statchg(struct device *);
140
141u_int32_t sis_mchash(struct sis_softc *, const uint8_t *);
142void sis_iff(struct sis_softc *);
143void sis_iff_ns(struct sis_softc *);
144void sis_iff_sis(struct sis_softc *);
145void sis_reset(struct sis_softc *);
146int sis_ring_init(struct sis_softc *);
147
148#define SIS_SETBIT(sc, reg, x)				\
149	CSR_WRITE_4(sc, reg,				\
150		CSR_READ_4(sc, reg) | (x))
151
152#define SIS_CLRBIT(sc, reg, x)				\
153	CSR_WRITE_4(sc, reg,				\
154		CSR_READ_4(sc, reg) & ~(x))
155
156#define SIO_SET(x)					\
157	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
158
159#define SIO_CLR(x)					\
160	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
161
162const struct pci_matchid sis_devices[] = {
163	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900 },
164	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016 },
165	{ PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815 }
166};
167
168/*
169 * Routine to reverse the bits in a word. Stolen almost
170 * verbatim from /usr/games/fortune.
171 */
172u_int16_t
173sis_reverse(u_int16_t n)
174{
175	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
176	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
177	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
178	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
179
180	return (n);
181}
182
183void
184sis_delay(struct sis_softc *sc)
185{
186	int			idx;
187
188	for (idx = (300 / 33) + 1; idx > 0; idx--)
189		CSR_READ_4(sc, SIS_CSR);
190}
191
192void
193sis_eeprom_idle(struct sis_softc *sc)
194{
195	int			i;
196
197	SIO_SET(SIS_EECTL_CSEL);
198	sis_delay(sc);
199	SIO_SET(SIS_EECTL_CLK);
200	sis_delay(sc);
201
202	for (i = 0; i < 25; i++) {
203		SIO_CLR(SIS_EECTL_CLK);
204		sis_delay(sc);
205		SIO_SET(SIS_EECTL_CLK);
206		sis_delay(sc);
207	}
208
209	SIO_CLR(SIS_EECTL_CLK);
210	sis_delay(sc);
211	SIO_CLR(SIS_EECTL_CSEL);
212	sis_delay(sc);
213	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
214}
215
216/*
217 * Send a read command and address to the EEPROM, check for ACK.
218 */
219void
220sis_eeprom_putbyte(struct sis_softc *sc, int addr)
221{
222	int			d, i;
223
224	d = addr | SIS_EECMD_READ;
225
226	/*
227	 * Feed in each bit and strobe the clock.
228	 */
229	for (i = 0x400; i; i >>= 1) {
230		if (d & i)
231			SIO_SET(SIS_EECTL_DIN);
232		else
233			SIO_CLR(SIS_EECTL_DIN);
234		sis_delay(sc);
235		SIO_SET(SIS_EECTL_CLK);
236		sis_delay(sc);
237		SIO_CLR(SIS_EECTL_CLK);
238		sis_delay(sc);
239	}
240}
241
242/*
243 * Read a word of data stored in the EEPROM at address 'addr.'
244 */
245void
246sis_eeprom_getword(struct sis_softc *sc, int addr, u_int16_t *dest)
247{
248	int			i;
249	u_int16_t		word = 0;
250
251	/* Force EEPROM to idle state. */
252	sis_eeprom_idle(sc);
253
254	/* Enter EEPROM access mode. */
255	sis_delay(sc);
256	SIO_CLR(SIS_EECTL_CLK);
257	sis_delay(sc);
258	SIO_SET(SIS_EECTL_CSEL);
259	sis_delay(sc);
260
261	/*
262	 * Send address of word we want to read.
263	 */
264	sis_eeprom_putbyte(sc, addr);
265
266	/*
267	 * Start reading bits from EEPROM.
268	 */
269	for (i = 0x8000; i; i >>= 1) {
270		SIO_SET(SIS_EECTL_CLK);
271		sis_delay(sc);
272		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
273			word |= i;
274		sis_delay(sc);
275		SIO_CLR(SIS_EECTL_CLK);
276		sis_delay(sc);
277	}
278
279	/* Turn off EEPROM access mode. */
280	sis_eeprom_idle(sc);
281
282	*dest = word;
283}
284
285/*
286 * Read a sequence of words from the EEPROM.
287 */
288void
289sis_read_eeprom(struct sis_softc *sc, caddr_t dest,
290    int off, int cnt, int swap)
291{
292	int			i;
293	u_int16_t		word = 0, *ptr;
294
295	for (i = 0; i < cnt; i++) {
296		sis_eeprom_getword(sc, off + i, &word);
297		ptr = (u_int16_t *)(dest + (i * 2));
298		if (swap)
299			*ptr = letoh16(word);
300		else
301			*ptr = word;
302	}
303}
304
305#if defined(__amd64__) || defined(__i386__)
306void
307sis_read_cmos(struct sis_softc *sc, struct pci_attach_args *pa,
308    caddr_t dest, int off, int cnt)
309{
310	u_int32_t reg;
311	int i;
312
313	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x48);
314	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg | 0x40);
315
316	for (i = 0; i < cnt; i++) {
317		bus_space_write_1(pa->pa_iot, 0x0, 0x70, i + off);
318		*(dest + i) = bus_space_read_1(pa->pa_iot, 0x0, 0x71);
319	}
320
321	pci_conf_write(pa->pa_pc, pa->pa_tag, 0x48, reg & ~0x40);
322}
323#endif
324
325void
326sis_read_mac(struct sis_softc *sc, struct pci_attach_args *pa)
327{
328	uint32_t rxfilt, csrsave;
329	u_int16_t *enaddr = (u_int16_t *) &sc->arpcom.ac_enaddr;
330
331	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
332	csrsave = CSR_READ_4(sc, SIS_CSR);
333
334	CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | csrsave);
335	CSR_WRITE_4(sc, SIS_CSR, 0);
336
337	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
338
339	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
340	enaddr[0] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
341	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
342	enaddr[1] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
343	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
344	enaddr[2] = letoh16(CSR_READ_4(sc, SIS_RXFILT_DATA) & 0xffff);
345
346	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
347	CSR_WRITE_4(sc, SIS_CSR, csrsave);
348}
349
350void
351sis_read96x_mac(struct sis_softc *sc)
352{
353	int i;
354
355	SIO_SET(SIS96x_EECTL_REQ);
356
357	for (i = 0; i < 2000; i++) {
358		if ((CSR_READ_4(sc, SIS_EECTL) & SIS96x_EECTL_GNT)) {
359			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
360			    SIS_EE_NODEADDR, 3, 1);
361			break;
362		} else
363			DELAY(1);
364	}
365
366	SIO_SET(SIS96x_EECTL_DONE);
367}
368
369/*
370 * Sync the PHYs by setting data bit and strobing the clock 32 times.
371 */
372void
373sis_mii_sync(struct sis_softc *sc)
374{
375	int			i;
376
377 	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
378
379 	for (i = 0; i < 32; i++) {
380 		SIO_SET(SIS_MII_CLK);
381 		DELAY(1);
382 		SIO_CLR(SIS_MII_CLK);
383 		DELAY(1);
384 	}
385}
386
387/*
388 * Clock a series of bits through the MII.
389 */
390void
391sis_mii_send(struct sis_softc *sc, u_int32_t bits, int cnt)
392{
393	int			i;
394
395	SIO_CLR(SIS_MII_CLK);
396
397	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
398		if (bits & i)
399			SIO_SET(SIS_MII_DATA);
400		else
401			SIO_CLR(SIS_MII_DATA);
402		DELAY(1);
403		SIO_CLR(SIS_MII_CLK);
404		DELAY(1);
405		SIO_SET(SIS_MII_CLK);
406	}
407}
408
409/*
410 * Read an PHY register through the MII.
411 */
412int
413sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
414{
415	int			i, ack, s;
416
417	s = splnet();
418
419	/*
420	 * Set up frame for RX.
421	 */
422	frame->mii_stdelim = SIS_MII_STARTDELIM;
423	frame->mii_opcode = SIS_MII_READOP;
424	frame->mii_turnaround = 0;
425	frame->mii_data = 0;
426
427	/*
428 	 * Turn on data xmit.
429	 */
430	SIO_SET(SIS_MII_DIR);
431
432	sis_mii_sync(sc);
433
434	/*
435	 * Send command/address info.
436	 */
437	sis_mii_send(sc, frame->mii_stdelim, 2);
438	sis_mii_send(sc, frame->mii_opcode, 2);
439	sis_mii_send(sc, frame->mii_phyaddr, 5);
440	sis_mii_send(sc, frame->mii_regaddr, 5);
441
442	/* Idle bit */
443	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
444	DELAY(1);
445	SIO_SET(SIS_MII_CLK);
446	DELAY(1);
447
448	/* Turn off xmit. */
449	SIO_CLR(SIS_MII_DIR);
450
451	/* Check for ack */
452	SIO_CLR(SIS_MII_CLK);
453	DELAY(1);
454	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
455	SIO_SET(SIS_MII_CLK);
456	DELAY(1);
457
458	/*
459	 * Now try reading data bits. If the ack failed, we still
460	 * need to clock through 16 cycles to keep the PHY(s) in sync.
461	 */
462	if (ack) {
463		for(i = 0; i < 16; i++) {
464			SIO_CLR(SIS_MII_CLK);
465			DELAY(1);
466			SIO_SET(SIS_MII_CLK);
467			DELAY(1);
468		}
469		goto fail;
470	}
471
472	for (i = 0x8000; i; i >>= 1) {
473		SIO_CLR(SIS_MII_CLK);
474		DELAY(1);
475		if (!ack) {
476			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
477				frame->mii_data |= i;
478			DELAY(1);
479		}
480		SIO_SET(SIS_MII_CLK);
481		DELAY(1);
482	}
483
484fail:
485
486	SIO_CLR(SIS_MII_CLK);
487	DELAY(1);
488	SIO_SET(SIS_MII_CLK);
489	DELAY(1);
490
491	splx(s);
492
493	if (ack)
494		return (1);
495	return (0);
496}
497
498/*
499 * Write to a PHY register through the MII.
500 */
501int
502sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
503{
504	int			s;
505
506	s = splnet();
507 	/*
508 	 * Set up frame for TX.
509 	 */
510
511 	frame->mii_stdelim = SIS_MII_STARTDELIM;
512 	frame->mii_opcode = SIS_MII_WRITEOP;
513 	frame->mii_turnaround = SIS_MII_TURNAROUND;
514
515 	/*
516  	 * Turn on data output.
517 	 */
518 	SIO_SET(SIS_MII_DIR);
519
520 	sis_mii_sync(sc);
521
522 	sis_mii_send(sc, frame->mii_stdelim, 2);
523 	sis_mii_send(sc, frame->mii_opcode, 2);
524 	sis_mii_send(sc, frame->mii_phyaddr, 5);
525 	sis_mii_send(sc, frame->mii_regaddr, 5);
526 	sis_mii_send(sc, frame->mii_turnaround, 2);
527 	sis_mii_send(sc, frame->mii_data, 16);
528
529 	/* Idle bit. */
530 	SIO_SET(SIS_MII_CLK);
531 	DELAY(1);
532 	SIO_CLR(SIS_MII_CLK);
533 	DELAY(1);
534
535 	/*
536 	 * Turn off xmit.
537 	 */
538 	SIO_CLR(SIS_MII_DIR);
539
540 	splx(s);
541
542 	return (0);
543}
544
545int
546sis_miibus_readreg(struct device *self, int phy, int reg)
547{
548	struct sis_softc	*sc = (struct sis_softc *)self;
549	struct sis_mii_frame    frame;
550
551	if (sc->sis_type == SIS_TYPE_83815) {
552		if (phy != 0)
553			return (0);
554		/*
555		 * The NatSemi chip can take a while after
556		 * a reset to come ready, during which the BMSR
557		 * returns a value of 0. This is *never* supposed
558		 * to happen: some of the BMSR bits are meant to
559		 * be hardwired in the on position, and this can
560		 * confuse the miibus code a bit during the probe
561		 * and attach phase. So we make an effort to check
562		 * for this condition and wait for it to clear.
563		 */
564		if (!CSR_READ_4(sc, NS_BMSR))
565			DELAY(1000);
566		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
567	}
568
569	/*
570	 * Chipsets < SIS_635 seem not to be able to read/write
571	 * through mdio. Use the enhanced PHY access register
572	 * again for them.
573	 */
574	if (sc->sis_type == SIS_TYPE_900 &&
575	    sc->sis_rev < SIS_REV_635) {
576		int i, val = 0;
577
578		if (phy != 0)
579			return (0);
580
581		CSR_WRITE_4(sc, SIS_PHYCTL,
582		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
583		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
584
585		for (i = 0; i < SIS_TIMEOUT; i++) {
586			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
587				break;
588		}
589
590		if (i == SIS_TIMEOUT) {
591			printf("%s: PHY failed to come ready\n",
592			    sc->sc_dev.dv_xname);
593			return (0);
594		}
595
596		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
597
598		if (val == 0xFFFF)
599			return (0);
600
601		return (val);
602	} else {
603		bzero(&frame, sizeof(frame));
604
605		frame.mii_phyaddr = phy;
606		frame.mii_regaddr = reg;
607		sis_mii_readreg(sc, &frame);
608
609		return (frame.mii_data);
610	}
611}
612
613void
614sis_miibus_writereg(struct device *self, int phy, int reg, int data)
615{
616	struct sis_softc	*sc = (struct sis_softc *)self;
617	struct sis_mii_frame	frame;
618
619	if (sc->sis_type == SIS_TYPE_83815) {
620		if (phy != 0)
621			return;
622		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
623		return;
624	}
625
626	/*
627	 * Chipsets < SIS_635 seem not to be able to read/write
628	 * through mdio. Use the enhanced PHY access register
629	 * again for them.
630	 */
631	if (sc->sis_type == SIS_TYPE_900 &&
632	    sc->sis_rev < SIS_REV_635) {
633		int i;
634
635		if (phy != 0)
636			return;
637
638		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
639		    (reg << 6) | SIS_PHYOP_WRITE);
640		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
641
642		for (i = 0; i < SIS_TIMEOUT; i++) {
643			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
644				break;
645		}
646
647		if (i == SIS_TIMEOUT)
648			printf("%s: PHY failed to come ready\n",
649			    sc->sc_dev.dv_xname);
650	} else {
651		bzero(&frame, sizeof(frame));
652
653		frame.mii_phyaddr = phy;
654		frame.mii_regaddr = reg;
655		frame.mii_data = data;
656		sis_mii_writereg(sc, &frame);
657	}
658}
659
660void
661sis_miibus_statchg(struct device *self)
662{
663	struct sis_softc	*sc = (struct sis_softc *)self;
664	struct ifnet		*ifp = &sc->arpcom.ac_if;
665	struct mii_data		*mii = &sc->sc_mii;
666
667	if ((ifp->if_flags & IFF_RUNNING) == 0)
668		return;
669
670	sc->sis_link = 0;
671	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
672	    (IFM_ACTIVE | IFM_AVALID)) {
673		switch (IFM_SUBTYPE(mii->mii_media_active)) {
674		case IFM_10_T:
675			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
676			sc->sis_link++;
677			break;
678		case IFM_100_TX:
679			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
680			sc->sis_link++;
681			break;
682		default:
683			break;
684		}
685	}
686
687	if (!sc->sis_link) {
688		/*
689		 * Stopping MACs seem to reset SIS_TX_LISTPTR and
690		 * SIS_RX_LISTPTR which in turn requires resetting
691		 * TX/RX buffers.  So just don't do anything for
692		 * lost link.
693		 */
694		return;
695	}
696
697	/* Set full/half duplex mode. */
698	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
699		SIS_SETBIT(sc, SIS_TX_CFG,
700		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
701		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
702	} else {
703		SIS_CLRBIT(sc, SIS_TX_CFG,
704		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
705		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
706	}
707
708	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
709		/*
710		 * MPII03.D: Half Duplex Excessive Collisions.
711		 * Also page 49 in 83816 manual
712		 */
713		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
714	}
715
716	/*
717	 * Some DP83815s experience problems when used with short
718	 * (< 30m/100ft) Ethernet cables in 100baseTX mode.  This
719	 * sequence adjusts the DSP's signal attenuation to fix the
720	 * problem.
721	 */
722	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
723	    IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
724		uint32_t reg;
725
726		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
727		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
728		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
729		DELAY(100);
730		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
731		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
732#ifdef DEBUG
733			printf("%s: Applying short cable fix (reg=%x)\n",
734			    sc->sc_dev.dv_xname, reg);
735#endif
736			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
737			SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20);
738		}
739		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
740	}
741	/* Enable TX/RX MACs. */
742	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
743	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE | SIS_CSR_RX_ENABLE);
744}
745
746u_int32_t
747sis_mchash(struct sis_softc *sc, const uint8_t *addr)
748{
749	uint32_t		crc;
750
751	/* Compute CRC for the address value. */
752	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
753
754	/*
755	 * return the filter bit position
756	 *
757	 * The NatSemi chip has a 512-bit filter, which is
758	 * different than the SiS, so we special-case it.
759	 */
760	if (sc->sis_type == SIS_TYPE_83815)
761		return (crc >> 23);
762	else if (sc->sis_rev >= SIS_REV_635 ||
763	    sc->sis_rev == SIS_REV_900B)
764		return (crc >> 24);
765	else
766		return (crc >> 25);
767}
768
769void
770sis_iff(struct sis_softc *sc)
771{
772	if (sc->sis_type == SIS_TYPE_83815)
773		sis_iff_ns(sc);
774	else
775		sis_iff_sis(sc);
776}
777
778void
779sis_iff_ns(struct sis_softc *sc)
780{
781	struct ifnet		*ifp = &sc->arpcom.ac_if;
782	struct arpcom		*ac = &sc->arpcom;
783	struct ether_multi	*enm;
784	struct ether_multistep  step;
785	u_int32_t		h = 0, i, rxfilt;
786	int			bit, index;
787
788	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
789	if (rxfilt & SIS_RXFILTCTL_ENABLE) {
790		/*
791		 * Filter should be disabled to program other bits.
792		 */
793		CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
794		CSR_READ_4(sc, SIS_RXFILT_CTL);
795	}
796	rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
797	    NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD | NS_RXFILTCTL_MCHASH |
798	    NS_RXFILTCTL_PERFECT);
799	ifp->if_flags &= ~IFF_ALLMULTI;
800
801	/*
802	 * Always accept ARP frames.
803	 * Always accept broadcast frames.
804	 * Always accept frames destined to our station address.
805	 */
806	rxfilt |= NS_RXFILTCTL_ARP | SIS_RXFILTCTL_BROAD |
807	    NS_RXFILTCTL_PERFECT;
808
809	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
810		ifp->if_flags |= IFF_ALLMULTI;
811		rxfilt |= SIS_RXFILTCTL_ALLMULTI;
812		if (ifp->if_flags & IFF_PROMISC)
813			rxfilt |= SIS_RXFILTCTL_ALLPHYS;
814	} else {
815		/*
816		 * We have to explicitly enable the multicast hash table
817		 * on the NatSemi chip if we want to use it, which we do.
818		 */
819		rxfilt |= NS_RXFILTCTL_MCHASH;
820
821		/* first, zot all the existing hash bits */
822		for (i = 0; i < 32; i++) {
823			CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i * 2));
824			CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
825		}
826
827		ETHER_FIRST_MULTI(step, ac, enm);
828		while (enm != NULL) {
829			h = sis_mchash(sc, enm->enm_addrlo);
830
831			index = h >> 3;
832			bit = h & 0x1F;
833
834			CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
835
836			if (bit > 0xF)
837				bit -= 0x10;
838
839			SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
840
841			ETHER_NEXT_MULTI(step, enm);
842		}
843	}
844
845	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
846	/* Turn the receive filter on. */
847	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt | SIS_RXFILTCTL_ENABLE);
848	CSR_READ_4(sc, SIS_RXFILT_CTL);
849}
850
851void
852sis_iff_sis(struct sis_softc *sc)
853{
854	struct ifnet		*ifp = &sc->arpcom.ac_if;
855	struct arpcom		*ac = &sc->arpcom;
856	struct ether_multi	*enm;
857	struct ether_multistep	step;
858	u_int32_t		h, i, maxmulti, rxfilt;
859	u_int16_t		hashes[16];
860
861	/* hash table size */
862	if (sc->sis_rev >= SIS_REV_635 ||
863	    sc->sis_rev == SIS_REV_900B)
864		maxmulti = 16;
865	else
866		maxmulti = 8;
867
868	rxfilt = CSR_READ_4(sc, SIS_RXFILT_CTL);
869	if (rxfilt & SIS_RXFILTCTL_ENABLE) {
870		/*
871		 * Filter should be disabled to program other bits.
872		 */
873		CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt & ~SIS_RXFILTCTL_ENABLE);
874		CSR_READ_4(sc, SIS_RXFILT_CTL);
875	}
876	rxfilt &= ~(SIS_RXFILTCTL_ALLMULTI | SIS_RXFILTCTL_ALLPHYS |
877	    SIS_RXFILTCTL_BROAD);
878	ifp->if_flags &= ~IFF_ALLMULTI;
879
880	/*
881	 * Always accept broadcast frames.
882	 */
883	rxfilt |= SIS_RXFILTCTL_BROAD;
884
885	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
886	    ac->ac_multicnt > maxmulti) {
887		ifp->if_flags |= IFF_ALLMULTI;
888		rxfilt |= SIS_RXFILTCTL_ALLMULTI;
889		if (ifp->if_flags & IFF_PROMISC)
890			rxfilt |= SIS_RXFILTCTL_ALLPHYS;
891
892		for (i = 0; i < maxmulti; i++)
893			hashes[i] = ~0;
894	} else {
895		for (i = 0; i < maxmulti; i++)
896			hashes[i] = 0;
897
898		ETHER_FIRST_MULTI(step, ac, enm);
899		while (enm != NULL) {
900			h = sis_mchash(sc, enm->enm_addrlo);
901
902			hashes[h >> 4] |= 1 << (h & 0xf);
903
904			ETHER_NEXT_MULTI(step, enm);
905		}
906	}
907
908	for (i = 0; i < maxmulti; i++) {
909		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
910		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
911	}
912
913	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt);
914	/* Turn the receive filter on. */
915	CSR_WRITE_4(sc, SIS_RXFILT_CTL, rxfilt | SIS_RXFILTCTL_ENABLE);
916	CSR_READ_4(sc, SIS_RXFILT_CTL);
917}
918
919void
920sis_reset(struct sis_softc *sc)
921{
922	int			i;
923
924	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
925
926	for (i = 0; i < SIS_TIMEOUT; i++) {
927		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
928			break;
929	}
930
931	if (i == SIS_TIMEOUT)
932		printf("%s: reset never completed\n", sc->sc_dev.dv_xname);
933
934	/* Wait a little while for the chip to get its brains in order. */
935	DELAY(1000);
936
937	/*
938	 * If this is a NetSemi chip, make sure to clear
939	 * PME mode.
940	 */
941	if (sc->sis_type == SIS_TYPE_83815) {
942		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
943		CSR_WRITE_4(sc, NS_CLKRUN, 0);
944	}
945}
946
947/*
948 * Probe for an SiS chip. Check the PCI vendor and device
949 * IDs against our list and return a device name if we find a match.
950 */
951int
952sis_probe(struct device *parent, void *match, void *aux)
953{
954	return (pci_matchbyid((struct pci_attach_args *)aux, sis_devices,
955	    nitems(sis_devices)));
956}
957
958/*
959 * Attach the interface. Allocate softc structures, do ifmedia
960 * setup and ethernet/BPF attach.
961 */
962void
963sis_attach(struct device *parent, struct device *self, void *aux)
964{
965	int			i;
966	const char		*intrstr = NULL;
967	struct sis_softc	*sc = (struct sis_softc *)self;
968	struct pci_attach_args	*pa = aux;
969	pci_chipset_tag_t	pc = pa->pa_pc;
970	pci_intr_handle_t	ih;
971	struct ifnet		*ifp;
972	bus_size_t		size;
973
974	sc->sis_stopped = 1;
975
976	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
977
978	/*
979	 * Map control/status registers.
980	 */
981
982#ifdef SIS_USEIOSPACE
983	if (pci_mapreg_map(pa, SIS_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
984	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
985		printf(": can't map i/o space\n");
986		return;
987 	}
988#else
989	if (pci_mapreg_map(pa, SIS_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
990	    &sc->sis_btag, &sc->sis_bhandle, NULL, &size, 0)) {
991 		printf(": can't map mem space\n");
992		return;
993 	}
994#endif
995
996	/* Allocate interrupt */
997	if (pci_intr_map(pa, &ih)) {
998		printf(": couldn't map interrupt\n");
999		goto fail_1;
1000	}
1001	intrstr = pci_intr_string(pc, ih);
1002	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, sis_intr, sc,
1003	    self->dv_xname);
1004	if (sc->sc_ih == NULL) {
1005		printf(": couldn't establish interrupt");
1006		if (intrstr != NULL)
1007			printf(" at %s", intrstr);
1008		printf("\n");
1009		goto fail_1;
1010	}
1011
1012	switch (PCI_PRODUCT(pa->pa_id)) {
1013	case PCI_PRODUCT_SIS_900:
1014		sc->sis_type = SIS_TYPE_900;
1015		break;
1016	case PCI_PRODUCT_SIS_7016:
1017		sc->sis_type = SIS_TYPE_7016;
1018		break;
1019	case PCI_PRODUCT_NS_DP83815:
1020		sc->sis_type = SIS_TYPE_83815;
1021		break;
1022	default:
1023		break;
1024	}
1025	sc->sis_rev = PCI_REVISION(pa->pa_class);
1026
1027	/* Reset the adapter. */
1028	sis_reset(sc);
1029
1030	if (sc->sis_type == SIS_TYPE_900 &&
1031	   (sc->sis_rev == SIS_REV_635 ||
1032	    sc->sis_rev == SIS_REV_900B)) {
1033		SIO_SET(SIS_CFG_RND_CNT);
1034		SIO_SET(SIS_CFG_PERR_DETECT);
1035	}
1036
1037	/*
1038	 * Get station address from the EEPROM.
1039	 */
1040	switch (PCI_VENDOR(pa->pa_id)) {
1041	case PCI_VENDOR_NS:
1042		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
1043
1044		if (sc->sis_srr == NS_SRR_15C)
1045			printf(", DP83815C");
1046		else if (sc->sis_srr == NS_SRR_15D)
1047			printf(", DP83815D");
1048		else if (sc->sis_srr == NS_SRR_16A)
1049			printf(", DP83816A");
1050		else
1051			printf(", srr %x", sc->sis_srr);
1052
1053		/*
1054		 * Reading the MAC address out of the EEPROM on
1055		 * the NatSemi chip takes a bit more work than
1056		 * you'd expect. The address spans 4 16-bit words,
1057		 * with the first word containing only a single bit.
1058		 * You have to shift everything over one bit to
1059		 * get it aligned properly. Also, the bits are
1060		 * stored backwards (the LSB is really the MSB,
1061		 * and so on) so you have to reverse them in order
1062		 * to get the MAC address into the form we want.
1063		 * Why? Who the hell knows.
1064		 */
1065		{
1066			u_int16_t		tmp[4];
1067
1068			sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR,
1069			    4, 0);
1070
1071			/* Shift everything over one bit. */
1072			tmp[3] = tmp[3] >> 1;
1073			tmp[3] |= tmp[2] << 15;
1074			tmp[2] = tmp[2] >> 1;
1075			tmp[2] |= tmp[1] << 15;
1076			tmp[1] = tmp[1] >> 1;
1077			tmp[1] |= tmp[0] << 15;
1078
1079			/* Now reverse all the bits. */
1080			tmp[3] = letoh16(sis_reverse(tmp[3]));
1081			tmp[2] = letoh16(sis_reverse(tmp[2]));
1082			tmp[1] = letoh16(sis_reverse(tmp[1]));
1083
1084			bcopy(&tmp[1], sc->arpcom.ac_enaddr,
1085			    ETHER_ADDR_LEN);
1086		}
1087		break;
1088	case PCI_VENDOR_SIS:
1089	default:
1090#if defined(__amd64__) || defined(__i386__)
1091		/*
1092		 * If this is a SiS 630E chipset with an embedded
1093		 * SiS 900 controller, we have to read the MAC address
1094		 * from the APC CMOS RAM. Our method for doing this
1095		 * is very ugly since we have to reach out and grab
1096		 * ahold of hardware for which we cannot properly
1097		 * allocate resources. This code is only compiled on
1098		 * the i386 architecture since the SiS 630E chipset
1099		 * is for x86 motherboards only. Note that there are
1100		 * a lot of magic numbers in this hack. These are
1101		 * taken from SiS's Linux driver. I'd like to replace
1102		 * them with proper symbolic definitions, but that
1103		 * requires some datasheets that I don't have access
1104		 * to at the moment.
1105		 */
1106		if (sc->sis_rev == SIS_REV_630S ||
1107		    sc->sis_rev == SIS_REV_630E)
1108			sis_read_cmos(sc, pa, (caddr_t)&sc->arpcom.ac_enaddr,
1109			    0x9, 6);
1110		else
1111#endif
1112		if (sc->sis_rev == SIS_REV_96x)
1113			sis_read96x_mac(sc);
1114		else if (sc->sis_rev == SIS_REV_635 ||
1115		    sc->sis_rev == SIS_REV_630ET ||
1116		    sc->sis_rev == SIS_REV_630EA1)
1117			sis_read_mac(sc, pa);
1118		else
1119			sis_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1120			    SIS_EE_NODEADDR, 3, 1);
1121		break;
1122	}
1123
1124	printf(": %s, address %s\n", intrstr,
1125	    ether_sprintf(sc->arpcom.ac_enaddr));
1126
1127	sc->sc_dmat = pa->pa_dmat;
1128
1129	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sis_list_data),
1130	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1131	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
1132		printf(": can't alloc list mem\n");
1133		goto fail_2;
1134	}
1135	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1136	    sizeof(struct sis_list_data), &sc->sc_listkva,
1137	    BUS_DMA_NOWAIT) != 0) {
1138		printf(": can't map list mem\n");
1139		goto fail_2;
1140	}
1141	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct sis_list_data), 1,
1142	    sizeof(struct sis_list_data), 0, BUS_DMA_NOWAIT,
1143	    &sc->sc_listmap) != 0) {
1144		printf(": can't alloc list map\n");
1145		goto fail_2;
1146	}
1147	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1148	    sizeof(struct sis_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1149		printf(": can't load list map\n");
1150		goto fail_2;
1151	}
1152	sc->sis_ldata = (struct sis_list_data *)sc->sc_listkva;
1153
1154	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1155		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1156		    BUS_DMA_NOWAIT, &sc->sis_ldata->sis_rx_list[i].map) != 0) {
1157			printf(": can't create rx map\n");
1158			goto fail_2;
1159		}
1160	}
1161
1162	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1163		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1164		    SIS_MAXTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
1165		    &sc->sis_ldata->sis_tx_list[i].map) != 0) {
1166			printf(": can't create tx map\n");
1167			goto fail_2;
1168		}
1169	}
1170	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, SIS_MAXTXSEGS,
1171	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1172		printf(": can't create tx spare map\n");
1173		goto fail_2;
1174	}
1175
1176	timeout_set(&sc->sis_timeout, sis_tick, sc);
1177
1178	ifp = &sc->arpcom.ac_if;
1179	ifp->if_softc = sc;
1180	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1181	ifp->if_ioctl = sis_ioctl;
1182	ifp->if_start = sis_start;
1183	ifp->if_watchdog = sis_watchdog;
1184	ifq_init_maxlen(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1185	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1186	ifp->if_hardmtu = 1518; /* determined experimentally on DP83815 */
1187
1188	ifp->if_capabilities = IFCAP_VLAN_MTU;
1189
1190	sc->sc_mii.mii_ifp = ifp;
1191	sc->sc_mii.mii_readreg = sis_miibus_readreg;
1192	sc->sc_mii.mii_writereg = sis_miibus_writereg;
1193	sc->sc_mii.mii_statchg = sis_miibus_statchg;
1194	ifmedia_init(&sc->sc_mii.mii_media, 0, sis_ifmedia_upd,sis_ifmedia_sts);
1195	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
1196	    0);
1197	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1198		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1199		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1200	} else
1201		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1202
1203	/*
1204	 * Call MI attach routines.
1205	 */
1206	if_attach(ifp);
1207	ether_ifattach(ifp);
1208	return;
1209
1210fail_2:
1211	pci_intr_disestablish(pc, sc->sc_ih);
1212
1213fail_1:
1214	bus_space_unmap(sc->sis_btag, sc->sis_bhandle, size);
1215}
1216
1217int
1218sis_activate(struct device *self, int act)
1219{
1220	struct sis_softc *sc = (struct sis_softc *)self;
1221	struct ifnet *ifp = &sc->arpcom.ac_if;
1222	int rv = 0;
1223
1224	switch (act) {
1225	case DVACT_SUSPEND:
1226		if (ifp->if_flags & IFF_RUNNING)
1227			sis_stop(sc);
1228		rv = config_activate_children(self, act);
1229		break;
1230	case DVACT_RESUME:
1231		if (ifp->if_flags & IFF_UP)
1232			sis_init(sc);
1233		break;
1234	default:
1235		rv = config_activate_children(self, act);
1236		break;
1237	}
1238	return (rv);
1239}
1240
1241/*
1242 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1243 * we arrange the descriptors in a closed ring, so that the last descriptor
1244 * points back to the first.
1245 */
1246int
1247sis_ring_init(struct sis_softc *sc)
1248{
1249	struct sis_list_data	*ld;
1250	struct sis_ring_data	*cd;
1251	int			i, nexti;
1252
1253	cd = &sc->sis_cdata;
1254	ld = sc->sis_ldata;
1255
1256	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1257		if (i == (SIS_TX_LIST_CNT - 1))
1258			nexti = 0;
1259		else
1260			nexti = i + 1;
1261		ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[nexti];
1262		ld->sis_tx_list[i].sis_next =
1263		    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1264		      offsetof(struct sis_list_data, sis_tx_list[nexti]));
1265		ld->sis_tx_list[i].sis_mbuf = NULL;
1266		ld->sis_tx_list[i].sis_ptr = 0;
1267		ld->sis_tx_list[i].sis_ctl = 0;
1268	}
1269
1270	cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0;
1271
1272	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1273		if (i == SIS_RX_LIST_CNT - 1)
1274			nexti = 0;
1275		else
1276			nexti = i + 1;
1277		ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[nexti];
1278		ld->sis_rx_list[i].sis_next =
1279		    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1280		      offsetof(struct sis_list_data, sis_rx_list[nexti]));
1281		ld->sis_rx_list[i].sis_ctl = 0;
1282	}
1283
1284	cd->sis_rx_prod = cd->sis_rx_cons = 0;
1285	if_rxr_init(&cd->sis_rx_ring, 2, SIS_RX_LIST_CNT - 1);
1286	sis_fill_rx_ring(sc);
1287
1288	return (0);
1289}
1290
1291void
1292sis_fill_rx_ring(struct sis_softc *sc)
1293{
1294	struct sis_list_data    *ld;
1295	struct sis_ring_data    *cd;
1296	u_int			slots;
1297
1298	cd = &sc->sis_cdata;
1299	ld = sc->sis_ldata;
1300
1301	for (slots = if_rxr_get(&cd->sis_rx_ring, SIS_RX_LIST_CNT);
1302	    slots > 0; slots--) {
1303		if (sis_newbuf(sc, &ld->sis_rx_list[cd->sis_rx_prod]))
1304			break;
1305
1306		SIS_INC(cd->sis_rx_prod, SIS_RX_LIST_CNT);
1307	}
1308	if_rxr_put(&cd->sis_rx_ring, slots);
1309}
1310
1311/*
1312 * Initialize an RX descriptor and attach an MBUF cluster.
1313 */
1314int
1315sis_newbuf(struct sis_softc *sc, struct sis_desc *c)
1316{
1317	struct mbuf		*m_new = NULL;
1318
1319	if (c == NULL)
1320		return (EINVAL);
1321
1322	m_new = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1323	if (!m_new)
1324		return (ENOBUFS);
1325
1326	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1327
1328	if (bus_dmamap_load_mbuf(sc->sc_dmat, c->map, m_new,
1329	    BUS_DMA_NOWAIT)) {
1330		m_free(m_new);
1331		return (ENOBUFS);
1332	}
1333
1334	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1335	    BUS_DMASYNC_PREREAD);
1336
1337	c->sis_mbuf = m_new;
1338	c->sis_ptr = htole32(c->map->dm_segs[0].ds_addr);
1339
1340	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1341	    ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1342	    BUS_DMASYNC_PREWRITE);
1343
1344	c->sis_ctl = htole32(ETHER_MAX_DIX_LEN);
1345
1346	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1347	    ((caddr_t)c - sc->sc_listkva), sizeof(struct sis_desc),
1348	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1349
1350	return (0);
1351}
1352
1353/*
1354 * A frame has been uploaded: pass the resulting mbuf chain up to
1355 * the higher level protocols.
1356 */
1357void
1358sis_rxeof(struct sis_softc *sc)
1359{
1360	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
1361	struct mbuf		*m;
1362	struct ifnet		*ifp;
1363	struct sis_desc		*cur_rx;
1364	int			total_len = 0;
1365	u_int32_t		rxstat;
1366
1367	ifp = &sc->arpcom.ac_if;
1368
1369	while (if_rxr_inuse(&sc->sis_cdata.sis_rx_ring) > 0) {
1370		cur_rx = &sc->sis_ldata->sis_rx_list[sc->sis_cdata.sis_rx_cons];
1371		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1372		    ((caddr_t)cur_rx - sc->sc_listkva),
1373		    sizeof(struct sis_desc),
1374		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1375		if (!SIS_OWNDESC(cur_rx))
1376			break;
1377
1378		rxstat = letoh32(cur_rx->sis_rxstat);
1379		m = cur_rx->sis_mbuf;
1380		cur_rx->sis_mbuf = NULL;
1381		total_len = SIS_RXBYTES(cur_rx);
1382		/* from here on the buffer is consumed */
1383		SIS_INC(sc->sis_cdata.sis_rx_cons, SIS_RX_LIST_CNT);
1384		if_rxr_put(&sc->sis_cdata.sis_rx_ring, 1);
1385
1386		/*
1387		 * DP83816A sometimes produces zero-length packets
1388		 * shortly after initialisation.
1389		 */
1390		if (total_len == 0) {
1391			m_freem(m);
1392			continue;
1393		}
1394
1395		/* The ethernet CRC is always included */
1396		total_len -= ETHER_CRC_LEN;
1397
1398		/*
1399		 * If an error occurs, update stats, clear the
1400		 * status word and leave the mbuf cluster in place:
1401		 * it should simply get re-used next time this descriptor
1402	 	 * comes up in the ring. However, don't report long
1403		 * frames as errors since they could be VLANs.
1404		 */
1405		if (rxstat & SIS_RXSTAT_GIANT &&
1406		    total_len <= (ETHER_MAX_DIX_LEN - ETHER_CRC_LEN))
1407			rxstat &= ~SIS_RXSTAT_GIANT;
1408		if (SIS_RXSTAT_ERROR(rxstat)) {
1409			ifp->if_ierrors++;
1410			if (rxstat & SIS_RXSTAT_COLL)
1411				ifp->if_collisions++;
1412			m_freem(m);
1413			continue;
1414		}
1415
1416		/* No errors; receive the packet. */
1417		bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0,
1418		    cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1419#ifdef __STRICT_ALIGNMENT
1420		/*
1421		 * On some architectures, we do not have alignment problems,
1422		 * so try to allocate a new buffer for the receive ring, and
1423		 * pass up the one where the packet is already, saving the
1424		 * expensive copy done in m_devget().
1425		 * If we are on an architecture with alignment problems, or
1426		 * if the allocation fails, then use m_devget and leave the
1427		 * existing buffer in the receive ring.
1428		 */
1429		{
1430			struct mbuf *m0;
1431			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
1432			m_freem(m);
1433			if (m0 == NULL) {
1434				ifp->if_ierrors++;
1435				continue;
1436			}
1437			m = m0;
1438		}
1439#else
1440		m->m_pkthdr.len = m->m_len = total_len;
1441#endif
1442
1443		ml_enqueue(&ml, m);
1444	}
1445
1446	if (ifiq_input(&ifp->if_rcv, &ml))
1447		if_rxr_livelocked(&sc->sis_cdata.sis_rx_ring);
1448
1449	sis_fill_rx_ring(sc);
1450}
1451
1452/*
1453 * A frame was downloaded to the chip. It's safe for us to clean up
1454 * the list buffers.
1455 */
1456
1457void
1458sis_txeof(struct sis_softc *sc)
1459{
1460	struct ifnet		*ifp;
1461	u_int32_t		idx, ctl, txstat;
1462
1463	ifp = &sc->arpcom.ac_if;
1464
1465	/*
1466	 * Go through our tx list and free mbufs for those
1467	 * frames that have been transmitted.
1468	 */
1469	for (idx = sc->sis_cdata.sis_tx_cons; sc->sis_cdata.sis_tx_cnt > 0;
1470	    sc->sis_cdata.sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT)) {
1471		struct sis_desc *cur_tx = &sc->sis_ldata->sis_tx_list[idx];
1472
1473		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1474		    ((caddr_t)cur_tx - sc->sc_listkva),
1475		    sizeof(struct sis_desc),
1476		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1477
1478		if (SIS_OWNDESC(cur_tx))
1479			break;
1480
1481		ctl = letoh32(cur_tx->sis_ctl);
1482
1483		if (ctl & SIS_CMDSTS_MORE)
1484			continue;
1485
1486		txstat = letoh32(cur_tx->sis_txstat);
1487
1488		if (!(ctl & SIS_CMDSTS_PKT_OK)) {
1489			ifp->if_oerrors++;
1490			if (txstat & SIS_TXSTAT_EXCESSCOLLS)
1491				ifp->if_collisions++;
1492			if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
1493				ifp->if_collisions++;
1494		}
1495
1496		ifp->if_collisions += (txstat & SIS_TXSTAT_COLLCNT) >> 16;
1497
1498		if (cur_tx->map->dm_nsegs != 0) {
1499			bus_dmamap_t map = cur_tx->map;
1500
1501			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1502			    BUS_DMASYNC_POSTWRITE);
1503			bus_dmamap_unload(sc->sc_dmat, map);
1504		}
1505		if (cur_tx->sis_mbuf != NULL) {
1506			m_freem(cur_tx->sis_mbuf);
1507			cur_tx->sis_mbuf = NULL;
1508		}
1509	}
1510
1511	if (idx != sc->sis_cdata.sis_tx_cons) {
1512		/* we freed up some buffers */
1513		sc->sis_cdata.sis_tx_cons = idx;
1514		ifq_clr_oactive(&ifp->if_snd);
1515	}
1516
1517	ifp->if_timer = (sc->sis_cdata.sis_tx_cnt == 0) ? 0 : 5;
1518}
1519
1520void
1521sis_tick(void *xsc)
1522{
1523	struct sis_softc	*sc = (struct sis_softc *)xsc;
1524	struct mii_data		*mii;
1525	int			s;
1526
1527	s = splnet();
1528
1529	mii = &sc->sc_mii;
1530	mii_tick(mii);
1531
1532	if (!sc->sis_link)
1533		sis_miibus_statchg(&sc->sc_dev);
1534
1535	timeout_add_sec(&sc->sis_timeout, 1);
1536
1537	splx(s);
1538}
1539
1540int
1541sis_intr(void *arg)
1542{
1543	struct sis_softc	*sc = arg;
1544	struct ifnet		*ifp = &sc->arpcom.ac_if;
1545	u_int32_t		status;
1546
1547	if (sc->sis_stopped)	/* Most likely shared interrupt */
1548		return (0);
1549
1550	/* Reading the ISR register clears all interrupts. */
1551	status = CSR_READ_4(sc, SIS_ISR);
1552	if ((status & SIS_INTRS) == 0)
1553		return (0);
1554
1555	if (status &
1556	    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1557	     SIS_ISR_TX_OK | SIS_ISR_TX_IDLE))
1558		sis_txeof(sc);
1559
1560	if (status &
1561	    (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1562	     SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1563		sis_rxeof(sc);
1564
1565	if (status & (SIS_ISR_RX_IDLE)) {
1566		/* consume what's there so that sis_rx_cons points
1567		 * to the first HW owned descriptor. */
1568		sis_rxeof(sc);
1569		/* reprogram the RX listptr */
1570		CSR_WRITE_4(sc, SIS_RX_LISTPTR,
1571		    sc->sc_listmap->dm_segs[0].ds_addr +
1572		    offsetof(struct sis_list_data,
1573		    sis_rx_list[sc->sis_cdata.sis_rx_cons]));
1574	}
1575
1576	if (status & SIS_ISR_SYSERR)
1577		sis_init(sc);
1578
1579	/*
1580	 * XXX: Re-enable RX engine every time otherwise it occasionally
1581	 * stops under unknown circumstances.
1582	 */
1583	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1584
1585	if (!ifq_empty(&ifp->if_snd))
1586		sis_start(ifp);
1587
1588	return (1);
1589}
1590
1591/*
1592 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1593 * pointers to the fragment pointers.
1594 */
1595int
1596sis_encap(struct sis_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
1597{
1598	struct sis_desc		*f = NULL;
1599	bus_dmamap_t		map;
1600	int			frag, cur, i, error;
1601
1602	map = sc->sc_tx_sparemap;
1603
1604	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head,
1605	    BUS_DMA_NOWAIT);
1606	switch (error) {
1607	case 0:
1608		break;
1609
1610	case EFBIG:
1611		if (m_defrag(m_head, M_DONTWAIT) == 0 &&
1612		    bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head,
1613		    BUS_DMA_NOWAIT) == 0)
1614			break;
1615
1616		/* FALLTHROUGH */
1617	default:
1618		return (ENOBUFS);
1619	}
1620
1621	if ((SIS_TX_LIST_CNT - (sc->sis_cdata.sis_tx_cnt + map->dm_nsegs)) < 2) {
1622		bus_dmamap_unload(sc->sc_dmat, map);
1623		return (ENOBUFS);
1624	}
1625
1626	/*
1627 	 * Start packing the mbufs in this chain into
1628	 * the fragment pointers. Stop when we run out
1629 	 * of fragments or hit the end of the mbuf chain.
1630	 */
1631	cur = frag = *txidx;
1632
1633	for (i = 0; i < map->dm_nsegs; i++) {
1634		f = &sc->sis_ldata->sis_tx_list[frag];
1635		f->sis_ctl = htole32(SIS_CMDSTS_MORE | map->dm_segs[i].ds_len);
1636		f->sis_ptr = htole32(map->dm_segs[i].ds_addr);
1637		if (i != 0)
1638			f->sis_ctl |= htole32(SIS_CMDSTS_OWN);
1639		cur = frag;
1640		SIS_INC(frag, SIS_TX_LIST_CNT);
1641	}
1642
1643	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1644	    BUS_DMASYNC_PREWRITE);
1645
1646	sc->sis_ldata->sis_tx_list[cur].sis_mbuf = m_head;
1647	sc->sis_ldata->sis_tx_list[cur].sis_ctl &= ~htole32(SIS_CMDSTS_MORE);
1648	sc->sis_ldata->sis_tx_list[*txidx].sis_ctl |= htole32(SIS_CMDSTS_OWN);
1649	sc->sis_cdata.sis_tx_cnt += map->dm_nsegs;
1650	*txidx = frag;
1651
1652	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1653	    offsetof(struct sis_list_data, sis_tx_list[0]),
1654	    sizeof(struct sis_desc) * SIS_TX_LIST_CNT,
1655	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1656
1657	return (0);
1658}
1659
1660/*
1661 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1662 * to the mbuf data regions directly in the transmit lists. We also save a
1663 * copy of the pointers since the transmit list fragment pointers are
1664 * physical addresses.
1665 */
1666
1667void
1668sis_start(struct ifnet *ifp)
1669{
1670	struct sis_softc	*sc;
1671	struct mbuf		*m_head = NULL;
1672	u_int32_t		idx, queued = 0;
1673
1674	sc = ifp->if_softc;
1675
1676	if (!sc->sis_link)
1677		return;
1678
1679	idx = sc->sis_cdata.sis_tx_prod;
1680
1681	if (ifq_is_oactive(&ifp->if_snd))
1682		return;
1683
1684	while(sc->sis_ldata->sis_tx_list[idx].sis_mbuf == NULL) {
1685		m_head = ifq_deq_begin(&ifp->if_snd);
1686		if (m_head == NULL)
1687			break;
1688
1689		if (sis_encap(sc, m_head, &idx)) {
1690			ifq_deq_rollback(&ifp->if_snd, m_head);
1691			ifq_set_oactive(&ifp->if_snd);
1692			break;
1693		}
1694
1695		/* now we are committed to transmit the packet */
1696		ifq_deq_commit(&ifp->if_snd, m_head);
1697
1698		queued++;
1699
1700		/*
1701		 * If there's a BPF listener, bounce a copy of this frame
1702		 * to him.
1703		 */
1704#if NBPFILTER > 0
1705		if (ifp->if_bpf)
1706			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1707#endif
1708	}
1709
1710	if (queued) {
1711		/* Transmit */
1712		sc->sis_cdata.sis_tx_prod = idx;
1713		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1714
1715		/*
1716		 * Set a timeout in case the chip goes out to lunch.
1717		 */
1718		ifp->if_timer = 5;
1719	}
1720}
1721
1722void
1723sis_init(void *xsc)
1724{
1725	struct sis_softc	*sc = (struct sis_softc *)xsc;
1726	struct ifnet		*ifp = &sc->arpcom.ac_if;
1727	struct mii_data		*mii;
1728	int			s;
1729
1730	s = splnet();
1731
1732	/*
1733	 * Cancel pending I/O and free all RX/TX buffers.
1734	 */
1735	sis_stop(sc);
1736
1737	/*
1738	 * Reset the chip to a known state.
1739	 */
1740	sis_reset(sc);
1741
1742#if NS_IHR_DELAY > 0
1743	/* Configure interrupt holdoff register. */
1744	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr == NS_SRR_16A)
1745		CSR_WRITE_4(sc, NS_IHR, NS_IHR_VALUE);
1746#endif
1747
1748	mii = &sc->sc_mii;
1749
1750	/* Set MAC address */
1751	if (sc->sis_type == SIS_TYPE_83815) {
1752		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1753		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1754		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1755		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
1756		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1757		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1758		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
1759		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1760		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1761	} else {
1762		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
1763		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1764		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[0]));
1765		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
1766		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1767		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[1]));
1768		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
1769		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1770		    htole16(((u_int16_t *)sc->arpcom.ac_enaddr)[2]));
1771	}
1772
1773	/* Init circular TX/RX lists. */
1774	if (sis_ring_init(sc) != 0) {
1775		printf("%s: initialization failed: no memory for rx buffers\n",
1776		    sc->sc_dev.dv_xname);
1777		sis_stop(sc);
1778		splx(s);
1779		return;
1780	}
1781
1782        /*
1783	 * Page 78 of the DP83815 data sheet (september 2002 version)
1784	 * recommends the following register settings "for optimum
1785	 * performance." for rev 15C.  The driver from NS also sets
1786	 * the PHY_CR register for later versions.
1787	 *
1788	 * This resolves an issue with tons of errors in AcceptPerfectMatch
1789	 * (non-IFF_PROMISC) mode.
1790	 */
1791	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
1792		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
1793		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
1794		/* set val for c2 */
1795		CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
1796		/* load/kill c2 */
1797		CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
1798		/* raise SD off, from 4 to c */
1799		CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
1800		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
1801	}
1802
1803	/*
1804	 * Program promiscuous mode and multicast filters.
1805	 */
1806	sis_iff(sc);
1807
1808	/*
1809	 * Load the address of the RX and TX lists.
1810	 */
1811	CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1812	    offsetof(struct sis_list_data, sis_rx_list[0]));
1813	CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sc_listmap->dm_segs[0].ds_addr +
1814	    offsetof(struct sis_list_data, sis_tx_list[0]));
1815
1816	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
1817	 * the PCI bus. When this bit is set, the Max DMA Burst Size
1818	 * for TX/RX DMA should be no larger than 16 double words.
1819	 */
1820	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN)
1821		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
1822	else
1823		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
1824
1825	/* Accept Long Packets for VLAN support */
1826	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
1827
1828	/*
1829	 * Assume 100Mbps link, actual MAC configuration is done
1830	 * after getting a valid link.
1831	 */
1832	CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
1833
1834	/*
1835	 * Enable interrupts.
1836	 */
1837	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
1838	CSR_WRITE_4(sc, SIS_IER, 1);
1839
1840	/* Clear MAC disable. */
1841	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
1842
1843	sc->sis_link = 0;
1844	mii_mediachg(mii);
1845
1846	sc->sis_stopped = 0;
1847	ifp->if_flags |= IFF_RUNNING;
1848	ifq_clr_oactive(&ifp->if_snd);
1849
1850	splx(s);
1851
1852	timeout_add_sec(&sc->sis_timeout, 1);
1853}
1854
1855/*
1856 * Set media options.
1857 */
1858int
1859sis_ifmedia_upd(struct ifnet *ifp)
1860{
1861	struct sis_softc	*sc;
1862	struct mii_data		*mii;
1863
1864	sc = ifp->if_softc;
1865
1866	mii = &sc->sc_mii;
1867	if (mii->mii_instance) {
1868		struct mii_softc	*miisc;
1869		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1870			mii_phy_reset(miisc);
1871	}
1872	mii_mediachg(mii);
1873
1874	return (0);
1875}
1876
1877/*
1878 * Report current media status.
1879 */
1880void
1881sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1882{
1883	struct sis_softc	*sc;
1884	struct mii_data		*mii;
1885
1886	sc = ifp->if_softc;
1887
1888	mii = &sc->sc_mii;
1889	mii_pollstat(mii);
1890	ifmr->ifm_active = mii->mii_media_active;
1891	ifmr->ifm_status = mii->mii_media_status;
1892}
1893
1894int
1895sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1896{
1897	struct sis_softc	*sc = ifp->if_softc;
1898	struct ifreq		*ifr = (struct ifreq *) data;
1899	struct mii_data		*mii;
1900	int			s, error = 0;
1901
1902	s = splnet();
1903
1904	switch(command) {
1905	case SIOCSIFADDR:
1906		ifp->if_flags |= IFF_UP;
1907		if (!(ifp->if_flags & IFF_RUNNING))
1908			sis_init(sc);
1909		break;
1910
1911	case SIOCSIFFLAGS:
1912		if (ifp->if_flags & IFF_UP) {
1913			if (ifp->if_flags & IFF_RUNNING)
1914				error = ENETRESET;
1915			else
1916				sis_init(sc);
1917		} else {
1918			if (ifp->if_flags & IFF_RUNNING)
1919				sis_stop(sc);
1920		}
1921		break;
1922
1923	case SIOCGIFMEDIA:
1924	case SIOCSIFMEDIA:
1925		mii = &sc->sc_mii;
1926		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1927		break;
1928
1929	case SIOCGIFRXR:
1930		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1931		    NULL, MCLBYTES, &sc->sis_cdata.sis_rx_ring);
1932		break;
1933
1934	default:
1935		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1936	}
1937
1938	if (error == ENETRESET) {
1939		if (ifp->if_flags & IFF_RUNNING)
1940			sis_iff(sc);
1941		error = 0;
1942	}
1943
1944	splx(s);
1945	return(error);
1946}
1947
1948void
1949sis_watchdog(struct ifnet *ifp)
1950{
1951	struct sis_softc	*sc;
1952	int			s;
1953
1954	sc = ifp->if_softc;
1955
1956	if (sc->sis_stopped)
1957		return;
1958
1959	ifp->if_oerrors++;
1960	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1961
1962	s = splnet();
1963	sis_init(sc);
1964
1965	if (!ifq_empty(&ifp->if_snd))
1966		sis_start(ifp);
1967
1968	splx(s);
1969}
1970
1971/*
1972 * Stop the adapter and free any mbufs allocated to the
1973 * RX and TX lists.
1974 */
1975void
1976sis_stop(struct sis_softc *sc)
1977{
1978	int			i;
1979	struct ifnet		*ifp;
1980
1981	if (sc->sis_stopped)
1982		return;
1983
1984	ifp = &sc->arpcom.ac_if;
1985	ifp->if_timer = 0;
1986
1987	timeout_del(&sc->sis_timeout);
1988
1989	ifp->if_flags &= ~IFF_RUNNING;
1990	ifq_clr_oactive(&ifp->if_snd);
1991	sc->sis_stopped = 1;
1992
1993	CSR_WRITE_4(sc, SIS_IER, 0);
1994	CSR_WRITE_4(sc, SIS_IMR, 0);
1995	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
1996	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
1997	DELAY(1000);
1998	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
1999	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2000
2001	sc->sis_link = 0;
2002
2003	/*
2004	 * Free data in the RX lists.
2005	 */
2006	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
2007		if (sc->sis_ldata->sis_rx_list[i].map->dm_nsegs != 0) {
2008			bus_dmamap_t map = sc->sis_ldata->sis_rx_list[i].map;
2009
2010			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2011			    BUS_DMASYNC_POSTREAD);
2012			bus_dmamap_unload(sc->sc_dmat, map);
2013		}
2014		if (sc->sis_ldata->sis_rx_list[i].sis_mbuf != NULL) {
2015			m_freem(sc->sis_ldata->sis_rx_list[i].sis_mbuf);
2016			sc->sis_ldata->sis_rx_list[i].sis_mbuf = NULL;
2017		}
2018		bzero(&sc->sis_ldata->sis_rx_list[i],
2019		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2020	}
2021
2022	/*
2023	 * Free the TX list buffers.
2024	 */
2025	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
2026		if (sc->sis_ldata->sis_tx_list[i].map->dm_nsegs != 0) {
2027			bus_dmamap_t map = sc->sis_ldata->sis_tx_list[i].map;
2028
2029			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2030			    BUS_DMASYNC_POSTWRITE);
2031			bus_dmamap_unload(sc->sc_dmat, map);
2032		}
2033		if (sc->sis_ldata->sis_tx_list[i].sis_mbuf != NULL) {
2034			m_freem(sc->sis_ldata->sis_tx_list[i].sis_mbuf);
2035			sc->sis_ldata->sis_tx_list[i].sis_mbuf = NULL;
2036		}
2037		bzero(&sc->sis_ldata->sis_tx_list[i],
2038		    sizeof(struct sis_desc) - sizeof(bus_dmamap_t));
2039	}
2040}
2041