if_sis.c revision 212121
1/*-
2 * Copyright (c) 2005 Poul-Henning Kamp <phk@FreeBSD.org>
3 * Copyright (c) 1997, 1998, 1999
4 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/sis/if_sis.c 212121 2010-09-01 22:50:11Z yongari $");
36
37/*
38 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
39 * available from http://www.sis.com.tw.
40 *
41 * This driver also supports the NatSemi DP83815. Datasheets are
42 * available from http://www.national.com.
43 *
44 * Written by Bill Paul <wpaul@ee.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48/*
49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50 * simple TX and RX descriptors of 3 longwords in size. The receiver
51 * has a single perfect filter entry for the station address and a
52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53 * transceiver while the 7016 requires an external transceiver chip.
54 * Both chips offer the standard bit-bang MII interface as well as
55 * an enchanced PHY interface which simplifies accessing MII registers.
56 *
57 * The only downside to this chipset is that RX descriptors must be
58 * longword aligned.
59 */
60
61#ifdef HAVE_KERNEL_OPTION_HEADERS
62#include "opt_device_polling.h"
63#endif
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/bus.h>
68#include <sys/endian.h>
69#include <sys/kernel.h>
70#include <sys/lock.h>
71#include <sys/malloc.h>
72#include <sys/mbuf.h>
73#include <sys/module.h>
74#include <sys/socket.h>
75#include <sys/sockio.h>
76
77#include <net/if.h>
78#include <net/if_arp.h>
79#include <net/ethernet.h>
80#include <net/if_dl.h>
81#include <net/if_media.h>
82#include <net/if_types.h>
83#include <net/if_vlan_var.h>
84
85#include <net/bpf.h>
86
87#include <machine/bus.h>
88#include <machine/resource.h>
89#include <sys/bus.h>
90#include <sys/rman.h>
91
92#include <dev/mii/mii.h>
93#include <dev/mii/miivar.h>
94
95#include <dev/pci/pcireg.h>
96#include <dev/pci/pcivar.h>
97
98#define SIS_USEIOSPACE
99
100#include <dev/sis/if_sisreg.h>
101
102MODULE_DEPEND(sis, pci, 1, 1, 1);
103MODULE_DEPEND(sis, ether, 1, 1, 1);
104MODULE_DEPEND(sis, miibus, 1, 1, 1);
105
106/* "device miibus" required.  See GENERIC if you get errors here. */
107#include "miibus_if.h"
108
109#define	SIS_LOCK(_sc)		mtx_lock(&(_sc)->sis_mtx)
110#define	SIS_UNLOCK(_sc)		mtx_unlock(&(_sc)->sis_mtx)
111#define	SIS_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sis_mtx, MA_OWNED)
112
113/*
114 * register space access macros
115 */
116#define CSR_WRITE_4(sc, reg, val)	bus_write_4(sc->sis_res[0], reg, val)
117
118#define CSR_READ_4(sc, reg)		bus_read_4(sc->sis_res[0], reg)
119
120#define CSR_READ_2(sc, reg)		bus_read_2(sc->sis_res[0], reg)
121
122/*
123 * Various supported device vendors/types and their names.
124 */
125static struct sis_type sis_devs[] = {
126	{ SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" },
127	{ SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" },
128	{ NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" },
129	{ 0, 0, NULL }
130};
131
132static int sis_detach(device_t);
133static __inline void sis_discard_rxbuf(struct sis_rxdesc *);
134static int sis_dma_alloc(struct sis_softc *);
135static void sis_dma_free(struct sis_softc *);
136static int sis_dma_ring_alloc(struct sis_softc *, bus_size_t, bus_size_t,
137    bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
138static void sis_dmamap_cb(void *, bus_dma_segment_t *, int, int);
139#ifndef __NO_STRICT_ALIGNMENT
140static __inline void sis_fixup_rx(struct mbuf *);
141#endif
142static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
143static int sis_ifmedia_upd(struct ifnet *);
144static void sis_init(void *);
145static void sis_initl(struct sis_softc *);
146static void sis_intr(void *);
147static int sis_ioctl(struct ifnet *, u_long, caddr_t);
148static int sis_newbuf(struct sis_softc *, struct sis_rxdesc *);
149static int sis_rxeof(struct sis_softc *);
150static void sis_start(struct ifnet *);
151static void sis_startl(struct ifnet *);
152static void sis_stop(struct sis_softc *);
153static void sis_watchdog(struct sis_softc *);
154
155
156static struct resource_spec sis_res_spec[] = {
157#ifdef SIS_USEIOSPACE
158	{ SYS_RES_IOPORT,	SIS_PCI_LOIO,	RF_ACTIVE},
159#else
160	{ SYS_RES_MEMORY,	SIS_PCI_LOMEM,	RF_ACTIVE},
161#endif
162	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE},
163	{ -1, 0 }
164};
165
166#define SIS_SETBIT(sc, reg, x)				\
167	CSR_WRITE_4(sc, reg,				\
168		CSR_READ_4(sc, reg) | (x))
169
170#define SIS_CLRBIT(sc, reg, x)				\
171	CSR_WRITE_4(sc, reg,				\
172		CSR_READ_4(sc, reg) & ~(x))
173
174#define SIO_SET(x)					\
175	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
176
177#define SIO_CLR(x)					\
178	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
179
180/*
181 * Routine to reverse the bits in a word. Stolen almost
182 * verbatim from /usr/games/fortune.
183 */
184static uint16_t
185sis_reverse(uint16_t n)
186{
187	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
188	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
189	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
190	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
191
192	return (n);
193}
194
195static void
196sis_delay(struct sis_softc *sc)
197{
198	int			idx;
199
200	for (idx = (300 / 33) + 1; idx > 0; idx--)
201		CSR_READ_4(sc, SIS_CSR);
202}
203
204static void
205sis_eeprom_idle(struct sis_softc *sc)
206{
207	int		i;
208
209	SIO_SET(SIS_EECTL_CSEL);
210	sis_delay(sc);
211	SIO_SET(SIS_EECTL_CLK);
212	sis_delay(sc);
213
214	for (i = 0; i < 25; i++) {
215		SIO_CLR(SIS_EECTL_CLK);
216		sis_delay(sc);
217		SIO_SET(SIS_EECTL_CLK);
218		sis_delay(sc);
219	}
220
221	SIO_CLR(SIS_EECTL_CLK);
222	sis_delay(sc);
223	SIO_CLR(SIS_EECTL_CSEL);
224	sis_delay(sc);
225	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
226}
227
228/*
229 * Send a read command and address to the EEPROM, check for ACK.
230 */
231static void
232sis_eeprom_putbyte(struct sis_softc *sc, int addr)
233{
234	int		d, i;
235
236	d = addr | SIS_EECMD_READ;
237
238	/*
239	 * Feed in each bit and stobe the clock.
240	 */
241	for (i = 0x400; i; i >>= 1) {
242		if (d & i) {
243			SIO_SET(SIS_EECTL_DIN);
244		} else {
245			SIO_CLR(SIS_EECTL_DIN);
246		}
247		sis_delay(sc);
248		SIO_SET(SIS_EECTL_CLK);
249		sis_delay(sc);
250		SIO_CLR(SIS_EECTL_CLK);
251		sis_delay(sc);
252	}
253}
254
255/*
256 * Read a word of data stored in the EEPROM at address 'addr.'
257 */
258static void
259sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest)
260{
261	int		i;
262	uint16_t	word = 0;
263
264	/* Force EEPROM to idle state. */
265	sis_eeprom_idle(sc);
266
267	/* Enter EEPROM access mode. */
268	sis_delay(sc);
269	SIO_CLR(SIS_EECTL_CLK);
270	sis_delay(sc);
271	SIO_SET(SIS_EECTL_CSEL);
272	sis_delay(sc);
273
274	/*
275	 * Send address of word we want to read.
276	 */
277	sis_eeprom_putbyte(sc, addr);
278
279	/*
280	 * Start reading bits from EEPROM.
281	 */
282	for (i = 0x8000; i; i >>= 1) {
283		SIO_SET(SIS_EECTL_CLK);
284		sis_delay(sc);
285		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
286			word |= i;
287		sis_delay(sc);
288		SIO_CLR(SIS_EECTL_CLK);
289		sis_delay(sc);
290	}
291
292	/* Turn off EEPROM access mode. */
293	sis_eeprom_idle(sc);
294
295	*dest = word;
296}
297
298/*
299 * Read a sequence of words from the EEPROM.
300 */
301static void
302sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap)
303{
304	int			i;
305	uint16_t		word = 0, *ptr;
306
307	for (i = 0; i < cnt; i++) {
308		sis_eeprom_getword(sc, off + i, &word);
309		ptr = (uint16_t *)(dest + (i * 2));
310		if (swap)
311			*ptr = ntohs(word);
312		else
313			*ptr = word;
314	}
315}
316
317#if defined(__i386__) || defined(__amd64__)
318static device_t
319sis_find_bridge(device_t dev)
320{
321	devclass_t		pci_devclass;
322	device_t		*pci_devices;
323	int			pci_count = 0;
324	device_t		*pci_children;
325	int			pci_childcount = 0;
326	device_t		*busp, *childp;
327	device_t		child = NULL;
328	int			i, j;
329
330	if ((pci_devclass = devclass_find("pci")) == NULL)
331		return (NULL);
332
333	devclass_get_devices(pci_devclass, &pci_devices, &pci_count);
334
335	for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) {
336		if (device_get_children(*busp, &pci_children, &pci_childcount))
337			continue;
338		for (j = 0, childp = pci_children;
339		    j < pci_childcount; j++, childp++) {
340			if (pci_get_vendor(*childp) == SIS_VENDORID &&
341			    pci_get_device(*childp) == 0x0008) {
342				child = *childp;
343				free(pci_children, M_TEMP);
344				goto done;
345			}
346		}
347		free(pci_children, M_TEMP);
348	}
349
350done:
351	free(pci_devices, M_TEMP);
352	return (child);
353}
354
355static void
356sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt)
357{
358	device_t		bridge;
359	uint8_t			reg;
360	int			i;
361	bus_space_tag_t		btag;
362
363	bridge = sis_find_bridge(dev);
364	if (bridge == NULL)
365		return;
366	reg = pci_read_config(bridge, 0x48, 1);
367	pci_write_config(bridge, 0x48, reg|0x40, 1);
368
369	/* XXX */
370#if defined(__i386__)
371	btag = I386_BUS_SPACE_IO;
372#elif defined(__amd64__)
373	btag = AMD64_BUS_SPACE_IO;
374#endif
375
376	for (i = 0; i < cnt; i++) {
377		bus_space_write_1(btag, 0x0, 0x70, i + off);
378		*(dest + i) = bus_space_read_1(btag, 0x0, 0x71);
379	}
380
381	pci_write_config(bridge, 0x48, reg & ~0x40, 1);
382}
383
384static void
385sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest)
386{
387	uint32_t		filtsave, csrsave;
388
389	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
390	csrsave = CSR_READ_4(sc, SIS_CSR);
391
392	CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave);
393	CSR_WRITE_4(sc, SIS_CSR, 0);
394
395	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE);
396
397	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
398	((uint16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA);
399	CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1);
400	((uint16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA);
401	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
402	((uint16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA);
403
404	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
405	CSR_WRITE_4(sc, SIS_CSR, csrsave);
406}
407#endif
408
409/*
410 * Sync the PHYs by setting data bit and strobing the clock 32 times.
411 */
412static void
413sis_mii_sync(struct sis_softc *sc)
414{
415	int		i;
416
417 	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
418
419 	for (i = 0; i < 32; i++) {
420 		SIO_SET(SIS_MII_CLK);
421 		DELAY(1);
422 		SIO_CLR(SIS_MII_CLK);
423 		DELAY(1);
424 	}
425}
426
427/*
428 * Clock a series of bits through the MII.
429 */
430static void
431sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt)
432{
433	int			i;
434
435	SIO_CLR(SIS_MII_CLK);
436
437	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
438		if (bits & i) {
439			SIO_SET(SIS_MII_DATA);
440		} else {
441			SIO_CLR(SIS_MII_DATA);
442		}
443		DELAY(1);
444		SIO_CLR(SIS_MII_CLK);
445		DELAY(1);
446		SIO_SET(SIS_MII_CLK);
447	}
448}
449
450/*
451 * Read an PHY register through the MII.
452 */
453static int
454sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
455{
456	int			i, ack;
457
458	/*
459	 * Set up frame for RX.
460	 */
461	frame->mii_stdelim = SIS_MII_STARTDELIM;
462	frame->mii_opcode = SIS_MII_READOP;
463	frame->mii_turnaround = 0;
464	frame->mii_data = 0;
465
466	/*
467 	 * Turn on data xmit.
468	 */
469	SIO_SET(SIS_MII_DIR);
470
471	sis_mii_sync(sc);
472
473	/*
474	 * Send command/address info.
475	 */
476	sis_mii_send(sc, frame->mii_stdelim, 2);
477	sis_mii_send(sc, frame->mii_opcode, 2);
478	sis_mii_send(sc, frame->mii_phyaddr, 5);
479	sis_mii_send(sc, frame->mii_regaddr, 5);
480
481	/* Idle bit */
482	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
483	DELAY(1);
484	SIO_SET(SIS_MII_CLK);
485	DELAY(1);
486
487	/* Turn off xmit. */
488	SIO_CLR(SIS_MII_DIR);
489
490	/* Check for ack */
491	SIO_CLR(SIS_MII_CLK);
492	DELAY(1);
493	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
494	SIO_SET(SIS_MII_CLK);
495	DELAY(1);
496
497	/*
498	 * Now try reading data bits. If the ack failed, we still
499	 * need to clock through 16 cycles to keep the PHY(s) in sync.
500	 */
501	if (ack) {
502		for (i = 0; i < 16; i++) {
503			SIO_CLR(SIS_MII_CLK);
504			DELAY(1);
505			SIO_SET(SIS_MII_CLK);
506			DELAY(1);
507		}
508		goto fail;
509	}
510
511	for (i = 0x8000; i; i >>= 1) {
512		SIO_CLR(SIS_MII_CLK);
513		DELAY(1);
514		if (!ack) {
515			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
516				frame->mii_data |= i;
517			DELAY(1);
518		}
519		SIO_SET(SIS_MII_CLK);
520		DELAY(1);
521	}
522
523fail:
524
525	SIO_CLR(SIS_MII_CLK);
526	DELAY(1);
527	SIO_SET(SIS_MII_CLK);
528	DELAY(1);
529
530	if (ack)
531		return (1);
532	return (0);
533}
534
535/*
536 * Write to a PHY register through the MII.
537 */
538static int
539sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
540{
541
542 	/*
543 	 * Set up frame for TX.
544 	 */
545
546 	frame->mii_stdelim = SIS_MII_STARTDELIM;
547 	frame->mii_opcode = SIS_MII_WRITEOP;
548 	frame->mii_turnaround = SIS_MII_TURNAROUND;
549
550 	/*
551  	 * Turn on data output.
552 	 */
553 	SIO_SET(SIS_MII_DIR);
554
555 	sis_mii_sync(sc);
556
557 	sis_mii_send(sc, frame->mii_stdelim, 2);
558 	sis_mii_send(sc, frame->mii_opcode, 2);
559 	sis_mii_send(sc, frame->mii_phyaddr, 5);
560 	sis_mii_send(sc, frame->mii_regaddr, 5);
561 	sis_mii_send(sc, frame->mii_turnaround, 2);
562 	sis_mii_send(sc, frame->mii_data, 16);
563
564 	/* Idle bit. */
565 	SIO_SET(SIS_MII_CLK);
566 	DELAY(1);
567 	SIO_CLR(SIS_MII_CLK);
568 	DELAY(1);
569
570 	/*
571 	 * Turn off xmit.
572 	 */
573 	SIO_CLR(SIS_MII_DIR);
574
575 	return (0);
576}
577
578static int
579sis_miibus_readreg(device_t dev, int phy, int reg)
580{
581	struct sis_softc	*sc;
582	struct sis_mii_frame    frame;
583
584	sc = device_get_softc(dev);
585
586	if (sc->sis_type == SIS_TYPE_83815) {
587		if (phy != 0)
588			return (0);
589		/*
590		 * The NatSemi chip can take a while after
591		 * a reset to come ready, during which the BMSR
592		 * returns a value of 0. This is *never* supposed
593		 * to happen: some of the BMSR bits are meant to
594		 * be hardwired in the on position, and this can
595		 * confuse the miibus code a bit during the probe
596		 * and attach phase. So we make an effort to check
597		 * for this condition and wait for it to clear.
598		 */
599		if (!CSR_READ_4(sc, NS_BMSR))
600			DELAY(1000);
601		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
602	}
603
604	/*
605	 * Chipsets < SIS_635 seem not to be able to read/write
606	 * through mdio. Use the enhanced PHY access register
607	 * again for them.
608	 */
609	if (sc->sis_type == SIS_TYPE_900 &&
610	    sc->sis_rev < SIS_REV_635) {
611		int i, val = 0;
612
613		if (phy != 0)
614			return (0);
615
616		CSR_WRITE_4(sc, SIS_PHYCTL,
617		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
618		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
619
620		for (i = 0; i < SIS_TIMEOUT; i++) {
621			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
622				break;
623		}
624
625		if (i == SIS_TIMEOUT) {
626			device_printf(sc->sis_dev, "PHY failed to come ready\n");
627			return (0);
628		}
629
630		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
631
632		if (val == 0xFFFF)
633			return (0);
634
635		return (val);
636	} else {
637		bzero((char *)&frame, sizeof(frame));
638
639		frame.mii_phyaddr = phy;
640		frame.mii_regaddr = reg;
641		sis_mii_readreg(sc, &frame);
642
643		return (frame.mii_data);
644	}
645}
646
647static int
648sis_miibus_writereg(device_t dev, int phy, int reg, int data)
649{
650	struct sis_softc	*sc;
651	struct sis_mii_frame	frame;
652
653	sc = device_get_softc(dev);
654
655	if (sc->sis_type == SIS_TYPE_83815) {
656		if (phy != 0)
657			return (0);
658		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
659		return (0);
660	}
661
662	/*
663	 * Chipsets < SIS_635 seem not to be able to read/write
664	 * through mdio. Use the enhanced PHY access register
665	 * again for them.
666	 */
667	if (sc->sis_type == SIS_TYPE_900 &&
668	    sc->sis_rev < SIS_REV_635) {
669		int i;
670
671		if (phy != 0)
672			return (0);
673
674		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
675		    (reg << 6) | SIS_PHYOP_WRITE);
676		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
677
678		for (i = 0; i < SIS_TIMEOUT; i++) {
679			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
680				break;
681		}
682
683		if (i == SIS_TIMEOUT)
684			device_printf(sc->sis_dev, "PHY failed to come ready\n");
685	} else {
686		bzero((char *)&frame, sizeof(frame));
687
688		frame.mii_phyaddr = phy;
689		frame.mii_regaddr = reg;
690		frame.mii_data = data;
691		sis_mii_writereg(sc, &frame);
692	}
693	return (0);
694}
695
696static void
697sis_miibus_statchg(device_t dev)
698{
699	struct sis_softc	*sc;
700	struct mii_data		*mii;
701	struct ifnet		*ifp;
702	uint32_t		reg;
703
704	sc = device_get_softc(dev);
705	SIS_LOCK_ASSERT(sc);
706
707	mii = device_get_softc(sc->sis_miibus);
708	ifp = sc->sis_ifp;
709	if (mii == NULL || ifp == NULL ||
710	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
711		return;
712
713	sc->sis_link = 0;
714	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
715	    (IFM_ACTIVE | IFM_AVALID)) {
716		switch (IFM_SUBTYPE(mii->mii_media_active)) {
717		case IFM_10_T:
718			sc->sis_link++;
719			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
720			break;
721		case IFM_100_TX:
722			sc->sis_link++;
723			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
724			break;
725		default:
726			break;
727		}
728	}
729
730	if (sc->sis_link == 0) {
731		/*
732		 * Stopping MACs seem to reset SIS_TX_LISTPTR and
733		 * SIS_RX_LISTPTR which in turn requires resetting
734		 * TX/RX buffers.  So just don't do anything for
735		 * lost link.
736		 */
737		return;
738	}
739
740	/* Set full/half duplex mode. */
741	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
742		SIS_SETBIT(sc, SIS_TX_CFG,
743		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
744		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
745	} else {
746		SIS_CLRBIT(sc, SIS_TX_CFG,
747		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
748		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
749	}
750
751	if (sc->sis_type == SIS_TYPE_83816) {
752		/*
753		 * MPII03.D: Half Duplex Excessive Collisions.
754		 * Also page 49 in 83816 manual
755		 */
756		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
757	}
758
759	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
760	    IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
761		/*
762		 * Short Cable Receive Errors (MP21.E)
763		 */
764		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
765		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
766		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
767		DELAY(100);
768		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
769		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
770			device_printf(sc->sis_dev,
771			    "Applying short cable fix (reg=%x)\n", reg);
772			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
773			SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20);
774		}
775		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
776	}
777	/* Enable TX/RX MACs. */
778	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
779	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE | SIS_CSR_RX_ENABLE);
780}
781
782static uint32_t
783sis_mchash(struct sis_softc *sc, const uint8_t *addr)
784{
785	uint32_t		crc;
786
787	/* Compute CRC for the address value. */
788	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
789
790	/*
791	 * return the filter bit position
792	 *
793	 * The NatSemi chip has a 512-bit filter, which is
794	 * different than the SiS, so we special-case it.
795	 */
796	if (sc->sis_type == SIS_TYPE_83815)
797		return (crc >> 23);
798	else if (sc->sis_rev >= SIS_REV_635 ||
799	    sc->sis_rev == SIS_REV_900B)
800		return (crc >> 24);
801	else
802		return (crc >> 25);
803}
804
805static void
806sis_setmulti_ns(struct sis_softc *sc)
807{
808	struct ifnet		*ifp;
809	struct ifmultiaddr	*ifma;
810	uint32_t		h = 0, i, filtsave;
811	int			bit, index;
812
813	ifp = sc->sis_ifp;
814
815	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
816		SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
817		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
818		return;
819	}
820
821	/*
822	 * We have to explicitly enable the multicast hash table
823	 * on the NatSemi chip if we want to use it, which we do.
824	 */
825	SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
826	SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
827
828	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
829
830	/* first, zot all the existing hash bits */
831	for (i = 0; i < 32; i++) {
832		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2));
833		CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
834	}
835
836	if_maddr_rlock(ifp);
837	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
838		if (ifma->ifma_addr->sa_family != AF_LINK)
839			continue;
840		h = sis_mchash(sc,
841		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
842		index = h >> 3;
843		bit = h & 0x1F;
844		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
845		if (bit > 0xF)
846			bit -= 0x10;
847		SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
848	}
849	if_maddr_runlock(ifp);
850
851	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
852}
853
854static void
855sis_setmulti_sis(struct sis_softc *sc)
856{
857	struct ifnet		*ifp;
858	struct ifmultiaddr	*ifma;
859	uint32_t		h, i, n, ctl;
860	uint16_t		hashes[16];
861
862	ifp = sc->sis_ifp;
863
864	/* hash table size */
865	if (sc->sis_rev >= SIS_REV_635 ||
866	    sc->sis_rev == SIS_REV_900B)
867		n = 16;
868	else
869		n = 8;
870
871	ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE;
872
873	if (ifp->if_flags & IFF_BROADCAST)
874		ctl |= SIS_RXFILTCTL_BROAD;
875
876	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
877		ctl |= SIS_RXFILTCTL_ALLMULTI;
878		if (ifp->if_flags & IFF_PROMISC)
879			ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS;
880		for (i = 0; i < n; i++)
881			hashes[i] = ~0;
882	} else {
883		for (i = 0; i < n; i++)
884			hashes[i] = 0;
885		i = 0;
886		if_maddr_rlock(ifp);
887		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
888			if (ifma->ifma_addr->sa_family != AF_LINK)
889			continue;
890			h = sis_mchash(sc,
891			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
892			hashes[h >> 4] |= 1 << (h & 0xf);
893			i++;
894		}
895		if_maddr_runlock(ifp);
896		if (i > n) {
897			ctl |= SIS_RXFILTCTL_ALLMULTI;
898			for (i = 0; i < n; i++)
899				hashes[i] = ~0;
900		}
901	}
902
903	for (i = 0; i < n; i++) {
904		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
905		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
906	}
907
908	CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl);
909}
910
911static void
912sis_reset(struct sis_softc *sc)
913{
914	int		i;
915
916	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
917
918	for (i = 0; i < SIS_TIMEOUT; i++) {
919		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
920			break;
921	}
922
923	if (i == SIS_TIMEOUT)
924		device_printf(sc->sis_dev, "reset never completed\n");
925
926	/* Wait a little while for the chip to get its brains in order. */
927	DELAY(1000);
928
929	/*
930	 * If this is a NetSemi chip, make sure to clear
931	 * PME mode.
932	 */
933	if (sc->sis_type == SIS_TYPE_83815) {
934		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
935		CSR_WRITE_4(sc, NS_CLKRUN, 0);
936	}
937}
938
939/*
940 * Probe for an SiS chip. Check the PCI vendor and device
941 * IDs against our list and return a device name if we find a match.
942 */
943static int
944sis_probe(device_t dev)
945{
946	struct sis_type		*t;
947
948	t = sis_devs;
949
950	while (t->sis_name != NULL) {
951		if ((pci_get_vendor(dev) == t->sis_vid) &&
952		    (pci_get_device(dev) == t->sis_did)) {
953			device_set_desc(dev, t->sis_name);
954			return (BUS_PROBE_DEFAULT);
955		}
956		t++;
957	}
958
959	return (ENXIO);
960}
961
962/*
963 * Attach the interface. Allocate softc structures, do ifmedia
964 * setup and ethernet/BPF attach.
965 */
966static int
967sis_attach(device_t dev)
968{
969	u_char			eaddr[ETHER_ADDR_LEN];
970	struct sis_softc	*sc;
971	struct ifnet		*ifp;
972	int			error = 0, waittime = 0;
973
974	waittime = 0;
975	sc = device_get_softc(dev);
976
977	sc->sis_dev = dev;
978
979	mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
980	    MTX_DEF);
981	callout_init_mtx(&sc->sis_stat_ch, &sc->sis_mtx, 0);
982
983	if (pci_get_device(dev) == SIS_DEVICEID_900)
984		sc->sis_type = SIS_TYPE_900;
985	if (pci_get_device(dev) == SIS_DEVICEID_7016)
986		sc->sis_type = SIS_TYPE_7016;
987	if (pci_get_vendor(dev) == NS_VENDORID)
988		sc->sis_type = SIS_TYPE_83815;
989
990	sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1);
991	/*
992	 * Map control/status registers.
993	 */
994	pci_enable_busmaster(dev);
995
996	error = bus_alloc_resources(dev, sis_res_spec, sc->sis_res);
997	if (error) {
998		device_printf(dev, "couldn't allocate resources\n");
999		goto fail;
1000	}
1001
1002	/* Reset the adapter. */
1003	sis_reset(sc);
1004
1005	if (sc->sis_type == SIS_TYPE_900 &&
1006	    (sc->sis_rev == SIS_REV_635 ||
1007	    sc->sis_rev == SIS_REV_900B)) {
1008		SIO_SET(SIS_CFG_RND_CNT);
1009		SIO_SET(SIS_CFG_PERR_DETECT);
1010	}
1011
1012	/*
1013	 * Get station address from the EEPROM.
1014	 */
1015	switch (pci_get_vendor(dev)) {
1016	case NS_VENDORID:
1017		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
1018
1019		/* We can't update the device description, so spew */
1020		if (sc->sis_srr == NS_SRR_15C)
1021			device_printf(dev, "Silicon Revision: DP83815C\n");
1022		else if (sc->sis_srr == NS_SRR_15D)
1023			device_printf(dev, "Silicon Revision: DP83815D\n");
1024		else if (sc->sis_srr == NS_SRR_16A)
1025			device_printf(dev, "Silicon Revision: DP83816A\n");
1026		else
1027			device_printf(dev, "Silicon Revision %x\n", sc->sis_srr);
1028
1029		/*
1030		 * Reading the MAC address out of the EEPROM on
1031		 * the NatSemi chip takes a bit more work than
1032		 * you'd expect. The address spans 4 16-bit words,
1033		 * with the first word containing only a single bit.
1034		 * You have to shift everything over one bit to
1035		 * get it aligned properly. Also, the bits are
1036		 * stored backwards (the LSB is really the MSB,
1037		 * and so on) so you have to reverse them in order
1038		 * to get the MAC address into the form we want.
1039		 * Why? Who the hell knows.
1040		 */
1041		{
1042			uint16_t		tmp[4];
1043
1044			sis_read_eeprom(sc, (caddr_t)&tmp,
1045			    NS_EE_NODEADDR, 4, 0);
1046
1047			/* Shift everything over one bit. */
1048			tmp[3] = tmp[3] >> 1;
1049			tmp[3] |= tmp[2] << 15;
1050			tmp[2] = tmp[2] >> 1;
1051			tmp[2] |= tmp[1] << 15;
1052			tmp[1] = tmp[1] >> 1;
1053			tmp[1] |= tmp[0] << 15;
1054
1055			/* Now reverse all the bits. */
1056			tmp[3] = sis_reverse(tmp[3]);
1057			tmp[2] = sis_reverse(tmp[2]);
1058			tmp[1] = sis_reverse(tmp[1]);
1059
1060			bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN);
1061		}
1062		break;
1063	case SIS_VENDORID:
1064	default:
1065#if defined(__i386__) || defined(__amd64__)
1066		/*
1067		 * If this is a SiS 630E chipset with an embedded
1068		 * SiS 900 controller, we have to read the MAC address
1069		 * from the APC CMOS RAM. Our method for doing this
1070		 * is very ugly since we have to reach out and grab
1071		 * ahold of hardware for which we cannot properly
1072		 * allocate resources. This code is only compiled on
1073		 * the i386 architecture since the SiS 630E chipset
1074		 * is for x86 motherboards only. Note that there are
1075		 * a lot of magic numbers in this hack. These are
1076		 * taken from SiS's Linux driver. I'd like to replace
1077		 * them with proper symbolic definitions, but that
1078		 * requires some datasheets that I don't have access
1079		 * to at the moment.
1080		 */
1081		if (sc->sis_rev == SIS_REV_630S ||
1082		    sc->sis_rev == SIS_REV_630E ||
1083		    sc->sis_rev == SIS_REV_630EA1)
1084			sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6);
1085
1086		else if (sc->sis_rev == SIS_REV_635 ||
1087			 sc->sis_rev == SIS_REV_630ET)
1088			sis_read_mac(sc, dev, (caddr_t)&eaddr);
1089		else if (sc->sis_rev == SIS_REV_96x) {
1090			/* Allow to read EEPROM from LAN. It is shared
1091			 * between a 1394 controller and the NIC and each
1092			 * time we access it, we need to set SIS_EECMD_REQ.
1093			 */
1094			SIO_SET(SIS_EECMD_REQ);
1095			for (waittime = 0; waittime < SIS_TIMEOUT;
1096			    waittime++) {
1097				/* Force EEPROM to idle state. */
1098				sis_eeprom_idle(sc);
1099				if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) {
1100					sis_read_eeprom(sc, (caddr_t)&eaddr,
1101					    SIS_EE_NODEADDR, 3, 0);
1102					break;
1103				}
1104				DELAY(1);
1105			}
1106			/*
1107			 * Set SIS_EECTL_CLK to high, so a other master
1108			 * can operate on the i2c bus.
1109			 */
1110			SIO_SET(SIS_EECTL_CLK);
1111			/* Refuse EEPROM access by LAN */
1112			SIO_SET(SIS_EECMD_DONE);
1113		} else
1114#endif
1115			sis_read_eeprom(sc, (caddr_t)&eaddr,
1116			    SIS_EE_NODEADDR, 3, 0);
1117		break;
1118	}
1119
1120	/* Allocate DMA'able memory. */
1121	if ((error = sis_dma_alloc(sc)) != 0)
1122		goto fail;
1123
1124	ifp = sc->sis_ifp = if_alloc(IFT_ETHER);
1125	if (ifp == NULL) {
1126		device_printf(dev, "can not if_alloc()\n");
1127		error = ENOSPC;
1128		goto fail;
1129	}
1130	ifp->if_softc = sc;
1131	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1132	ifp->if_mtu = ETHERMTU;
1133	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1134	ifp->if_ioctl = sis_ioctl;
1135	ifp->if_start = sis_start;
1136	ifp->if_init = sis_init;
1137	IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1138	ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1;
1139	IFQ_SET_READY(&ifp->if_snd);
1140
1141	/*
1142	 * Do MII setup.
1143	 */
1144	if (mii_phy_probe(dev, &sc->sis_miibus,
1145	    sis_ifmedia_upd, sis_ifmedia_sts)) {
1146		device_printf(dev, "MII without any PHY!\n");
1147		error = ENXIO;
1148		goto fail;
1149	}
1150
1151	/*
1152	 * Call MI attach routine.
1153	 */
1154	ether_ifattach(ifp, eaddr);
1155
1156	/*
1157	 * Tell the upper layer(s) we support long frames.
1158	 */
1159	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1160	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1161	ifp->if_capenable = ifp->if_capabilities;
1162#ifdef DEVICE_POLLING
1163	ifp->if_capabilities |= IFCAP_POLLING;
1164#endif
1165
1166	/* Hook interrupt last to avoid having to lock softc */
1167	error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE,
1168	    NULL, sis_intr, sc, &sc->sis_intrhand);
1169
1170	if (error) {
1171		device_printf(dev, "couldn't set up irq\n");
1172		ether_ifdetach(ifp);
1173		goto fail;
1174	}
1175
1176fail:
1177	if (error)
1178		sis_detach(dev);
1179
1180	return (error);
1181}
1182
1183/*
1184 * Shutdown hardware and free up resources. This can be called any
1185 * time after the mutex has been initialized. It is called in both
1186 * the error case in attach and the normal detach case so it needs
1187 * to be careful about only freeing resources that have actually been
1188 * allocated.
1189 */
1190static int
1191sis_detach(device_t dev)
1192{
1193	struct sis_softc	*sc;
1194	struct ifnet		*ifp;
1195
1196	sc = device_get_softc(dev);
1197	KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized"));
1198	ifp = sc->sis_ifp;
1199
1200#ifdef DEVICE_POLLING
1201	if (ifp->if_capenable & IFCAP_POLLING)
1202		ether_poll_deregister(ifp);
1203#endif
1204
1205	/* These should only be active if attach succeeded. */
1206	if (device_is_attached(dev)) {
1207		SIS_LOCK(sc);
1208		sis_stop(sc);
1209		SIS_UNLOCK(sc);
1210		callout_drain(&sc->sis_stat_ch);
1211		ether_ifdetach(ifp);
1212	}
1213	if (sc->sis_miibus)
1214		device_delete_child(dev, sc->sis_miibus);
1215	bus_generic_detach(dev);
1216
1217	if (sc->sis_intrhand)
1218		bus_teardown_intr(dev, sc->sis_res[1], sc->sis_intrhand);
1219	bus_release_resources(dev, sis_res_spec, sc->sis_res);
1220
1221	if (ifp)
1222		if_free(ifp);
1223
1224	sis_dma_free(sc);
1225
1226	mtx_destroy(&sc->sis_mtx);
1227
1228	return (0);
1229}
1230
1231struct sis_dmamap_arg {
1232	bus_addr_t	sis_busaddr;
1233};
1234
1235static void
1236sis_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1237{
1238	struct sis_dmamap_arg	*ctx;
1239
1240	if (error != 0)
1241		return;
1242
1243	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1244
1245	ctx = (struct sis_dmamap_arg *)arg;
1246	ctx->sis_busaddr = segs[0].ds_addr;
1247}
1248
1249static int
1250sis_dma_ring_alloc(struct sis_softc *sc, bus_size_t alignment,
1251    bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
1252    bus_addr_t *paddr, const char *msg)
1253{
1254	struct sis_dmamap_arg	ctx;
1255	int			error;
1256
1257	error = bus_dma_tag_create(sc->sis_parent_tag, alignment, 0,
1258	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1,
1259	    maxsize, 0, NULL, NULL, tag);
1260	if (error != 0) {
1261		device_printf(sc->sis_dev,
1262		    "could not create %s dma tag\n", msg);
1263		return (ENOMEM);
1264	}
1265	/* Allocate DMA'able memory for ring. */
1266	error = bus_dmamem_alloc(*tag, (void **)ring,
1267	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
1268	if (error != 0) {
1269		device_printf(sc->sis_dev,
1270		    "could not allocate DMA'able memory for %s\n", msg);
1271		return (ENOMEM);
1272	}
1273	/* Load the address of the ring. */
1274	ctx.sis_busaddr = 0;
1275	error = bus_dmamap_load(*tag, *map, *ring, maxsize, sis_dmamap_cb,
1276	    &ctx, BUS_DMA_NOWAIT);
1277	if (error != 0) {
1278		device_printf(sc->sis_dev,
1279		    "could not load DMA'able memory for %s\n", msg);
1280		return (ENOMEM);
1281	}
1282	*paddr = ctx.sis_busaddr;
1283	return (0);
1284}
1285
1286static int
1287sis_dma_alloc(struct sis_softc *sc)
1288{
1289	struct sis_rxdesc	*rxd;
1290	struct sis_txdesc	*txd;
1291	int			error, i;
1292
1293	/* Allocate the parent bus DMA tag appropriate for PCI. */
1294	error = bus_dma_tag_create(bus_get_dma_tag(sc->sis_dev),
1295	    1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1296	    NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
1297	    0, NULL, NULL, &sc->sis_parent_tag);
1298	if (error != 0) {
1299		device_printf(sc->sis_dev,
1300		    "could not allocate parent dma tag\n");
1301		return (ENOMEM);
1302	}
1303
1304	/* Create RX ring. */
1305	error = sis_dma_ring_alloc(sc, SIS_DESC_ALIGN, SIS_RX_LIST_SZ,
1306	    &sc->sis_rx_list_tag, (uint8_t **)&sc->sis_rx_list,
1307	    &sc->sis_rx_list_map, &sc->sis_rx_paddr, "RX ring");
1308	if (error)
1309		return (error);
1310
1311	/* Create TX ring. */
1312	error = sis_dma_ring_alloc(sc, SIS_DESC_ALIGN, SIS_TX_LIST_SZ,
1313	    &sc->sis_tx_list_tag, (uint8_t **)&sc->sis_tx_list,
1314	    &sc->sis_tx_list_map, &sc->sis_tx_paddr, "TX ring");
1315	if (error)
1316		return (error);
1317
1318	/* Create tag for RX mbufs. */
1319	error = bus_dma_tag_create(sc->sis_parent_tag, SIS_RX_BUF_ALIGN, 0,
1320	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
1321	    MCLBYTES, 0, NULL, NULL, &sc->sis_rx_tag);
1322	if (error) {
1323		device_printf(sc->sis_dev, "could not allocate RX dma tag\n");
1324		return (error);
1325	}
1326
1327	/* Create tag for TX mbufs. */
1328	error = bus_dma_tag_create(sc->sis_parent_tag, 1, 0,
1329	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1330	    MCLBYTES * SIS_MAXTXSEGS, SIS_MAXTXSEGS, MCLBYTES, 0, NULL, NULL,
1331	    &sc->sis_tx_tag);
1332	if (error) {
1333		device_printf(sc->sis_dev, "could not allocate TX dma tag\n");
1334		return (error);
1335	}
1336
1337	/* Create DMA maps for RX buffers. */
1338	error = bus_dmamap_create(sc->sis_rx_tag, 0, &sc->sis_rx_sparemap);
1339	if (error) {
1340		device_printf(sc->sis_dev,
1341		    "can't create spare DMA map for RX\n");
1342		return (error);
1343	}
1344	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1345		rxd = &sc->sis_rxdesc[i];
1346		rxd->rx_m = NULL;
1347		error = bus_dmamap_create(sc->sis_rx_tag, 0, &rxd->rx_dmamap);
1348		if (error) {
1349			device_printf(sc->sis_dev,
1350			    "can't create DMA map for RX\n");
1351			return (error);
1352		}
1353	}
1354
1355	/* Create DMA maps for TX buffers. */
1356	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1357		txd = &sc->sis_txdesc[i];
1358		txd->tx_m = NULL;
1359		error = bus_dmamap_create(sc->sis_tx_tag, 0, &txd->tx_dmamap);
1360		if (error) {
1361			device_printf(sc->sis_dev,
1362			    "can't create DMA map for TX\n");
1363			return (error);
1364		}
1365	}
1366
1367	return (0);
1368}
1369
1370static void
1371sis_dma_free(struct sis_softc *sc)
1372{
1373	struct sis_rxdesc	*rxd;
1374	struct sis_txdesc	*txd;
1375	int			i;
1376
1377	/* Destroy DMA maps for RX buffers. */
1378	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1379		rxd = &sc->sis_rxdesc[i];
1380		if (rxd->rx_dmamap)
1381			bus_dmamap_destroy(sc->sis_rx_tag, rxd->rx_dmamap);
1382	}
1383	if (sc->sis_rx_sparemap)
1384		bus_dmamap_destroy(sc->sis_rx_tag, sc->sis_rx_sparemap);
1385
1386	/* Destroy DMA maps for TX buffers. */
1387	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1388		txd = &sc->sis_txdesc[i];
1389		if (txd->tx_dmamap)
1390			bus_dmamap_destroy(sc->sis_tx_tag, txd->tx_dmamap);
1391	}
1392
1393	if (sc->sis_rx_tag)
1394		bus_dma_tag_destroy(sc->sis_rx_tag);
1395	if (sc->sis_tx_tag)
1396		bus_dma_tag_destroy(sc->sis_tx_tag);
1397
1398	/* Destroy RX ring. */
1399	if (sc->sis_rx_list_map)
1400		bus_dmamap_unload(sc->sis_rx_list_tag, sc->sis_rx_list_map);
1401	if (sc->sis_rx_list_map && sc->sis_rx_list)
1402		bus_dmamem_free(sc->sis_rx_list_tag, sc->sis_rx_list,
1403		    sc->sis_rx_list_map);
1404
1405	if (sc->sis_rx_list_tag)
1406		bus_dma_tag_destroy(sc->sis_rx_list_tag);
1407
1408	/* Destroy TX ring. */
1409	if (sc->sis_tx_list_map)
1410		bus_dmamap_unload(sc->sis_tx_list_tag, sc->sis_tx_list_map);
1411
1412	if (sc->sis_tx_list_map && sc->sis_tx_list)
1413		bus_dmamem_free(sc->sis_tx_list_tag, sc->sis_tx_list,
1414		    sc->sis_tx_list_map);
1415
1416	if (sc->sis_tx_list_tag)
1417		bus_dma_tag_destroy(sc->sis_tx_list_tag);
1418
1419	/* Destroy the parent tag. */
1420	if (sc->sis_parent_tag)
1421		bus_dma_tag_destroy(sc->sis_parent_tag);
1422}
1423
1424/*
1425 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1426 * we arrange the descriptors in a closed ring, so that the last descriptor
1427 * points back to the first.
1428 */
1429static int
1430sis_ring_init(struct sis_softc *sc)
1431{
1432	struct sis_rxdesc	*rxd;
1433	struct sis_txdesc	*txd;
1434	bus_addr_t		next;
1435	int			error, i;
1436
1437	bzero(&sc->sis_tx_list[0], SIS_TX_LIST_SZ);
1438	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1439		txd = &sc->sis_txdesc[i];
1440		txd->tx_m = NULL;
1441		if (i == SIS_TX_LIST_CNT - 1)
1442			next = SIS_TX_RING_ADDR(sc, 0);
1443		else
1444			next = SIS_TX_RING_ADDR(sc, i + 1);
1445		sc->sis_tx_list[i].sis_next = htole32(SIS_ADDR_LO(next));
1446	}
1447	sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0;
1448	bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1449	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1450
1451	sc->sis_rx_cons = 0;
1452	bzero(&sc->sis_rx_list[0], SIS_RX_LIST_SZ);
1453	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1454		rxd = &sc->sis_rxdesc[i];
1455		rxd->rx_desc = &sc->sis_rx_list[i];
1456		if (i == SIS_RX_LIST_CNT - 1)
1457			next = SIS_RX_RING_ADDR(sc, 0);
1458		else
1459			next = SIS_RX_RING_ADDR(sc, i + 1);
1460		rxd->rx_desc->sis_next = htole32(SIS_ADDR_LO(next));
1461		error = sis_newbuf(sc, rxd);
1462		if (error)
1463			return (error);
1464	}
1465	bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1466	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1467
1468	return (0);
1469}
1470
1471/*
1472 * Initialize an RX descriptor and attach an MBUF cluster.
1473 */
1474static int
1475sis_newbuf(struct sis_softc *sc, struct sis_rxdesc *rxd)
1476{
1477	struct mbuf		*m;
1478	bus_dma_segment_t	segs[1];
1479	bus_dmamap_t		map;
1480	int nsegs;
1481
1482	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1483	if (m == NULL)
1484		return (ENOBUFS);
1485	m->m_len = m->m_pkthdr.len = SIS_RXLEN;
1486#ifndef __NO_STRICT_ALIGNMENT
1487	m_adj(m, SIS_RX_BUF_ALIGN);
1488#endif
1489
1490	if (bus_dmamap_load_mbuf_sg(sc->sis_rx_tag, sc->sis_rx_sparemap, m,
1491	    segs, &nsegs, 0) != 0) {
1492		m_freem(m);
1493		return (ENOBUFS);
1494	}
1495	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1496
1497	if (rxd->rx_m != NULL) {
1498		bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap,
1499		    BUS_DMASYNC_POSTREAD);
1500		bus_dmamap_unload(sc->sis_rx_tag, rxd->rx_dmamap);
1501	}
1502	map = rxd->rx_dmamap;
1503	rxd->rx_dmamap = sc->sis_rx_sparemap;
1504	sc->sis_rx_sparemap = map;
1505	bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD);
1506	rxd->rx_m = m;
1507	rxd->rx_desc->sis_cmdsts = htole32(SIS_RXLEN);
1508	rxd->rx_desc->sis_ptr = htole32(SIS_ADDR_LO(segs[0].ds_addr));
1509	return (0);
1510}
1511
1512static __inline void
1513sis_discard_rxbuf(struct sis_rxdesc *rxd)
1514{
1515
1516	rxd->rx_desc->sis_cmdsts = htole32(SIS_RXLEN);
1517}
1518
1519#ifndef __NO_STRICT_ALIGNMENT
1520static __inline void
1521sis_fixup_rx(struct mbuf *m)
1522{
1523	uint16_t		*src, *dst;
1524	int			i;
1525
1526	src = mtod(m, uint16_t *);
1527	dst = src - (SIS_RX_BUF_ALIGN - ETHER_ALIGN) / sizeof(*src);
1528
1529	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1530		*dst++ = *src++;
1531
1532	m->m_data -= SIS_RX_BUF_ALIGN - ETHER_ALIGN;
1533}
1534#endif
1535
1536/*
1537 * A frame has been uploaded: pass the resulting mbuf chain up to
1538 * the higher level protocols.
1539 */
1540static int
1541sis_rxeof(struct sis_softc *sc)
1542{
1543	struct mbuf		*m;
1544	struct ifnet		*ifp;
1545	struct sis_rxdesc	*rxd;
1546	struct sis_desc		*cur_rx;
1547	int			prog, rx_cons, rx_npkts = 0, total_len;
1548	uint32_t		rxstat;
1549
1550	SIS_LOCK_ASSERT(sc);
1551
1552	bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1553	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1554
1555	rx_cons = sc->sis_rx_cons;
1556	ifp = sc->sis_ifp;
1557
1558	for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1559	    SIS_INC(rx_cons, SIS_RX_LIST_CNT), prog++) {
1560#ifdef DEVICE_POLLING
1561		if (ifp->if_capenable & IFCAP_POLLING) {
1562			if (sc->rxcycles <= 0)
1563				break;
1564			sc->rxcycles--;
1565		}
1566#endif
1567		cur_rx = &sc->sis_rx_list[rx_cons];
1568		rxstat = le32toh(cur_rx->sis_cmdsts);
1569		if ((rxstat & SIS_CMDSTS_OWN) == 0)
1570			break;
1571		rxd = &sc->sis_rxdesc[rx_cons];
1572
1573		total_len = (rxstat & SIS_CMDSTS_BUFLEN) - ETHER_CRC_LEN;
1574		if ((ifp->if_capenable & IFCAP_VLAN_MTU) != 0 &&
1575		    total_len <= (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN -
1576		    ETHER_CRC_LEN))
1577			rxstat &= ~SIS_RXSTAT_GIANT;
1578		if (SIS_RXSTAT_ERROR(rxstat) != 0) {
1579			ifp->if_ierrors++;
1580			if (rxstat & SIS_RXSTAT_COLL)
1581				ifp->if_collisions++;
1582			sis_discard_rxbuf(rxd);
1583			continue;
1584		}
1585
1586		/* Add a new receive buffer to the ring. */
1587		m = rxd->rx_m;
1588		if (sis_newbuf(sc, rxd) != 0) {
1589			ifp->if_iqdrops++;
1590			sis_discard_rxbuf(rxd);
1591			continue;
1592		}
1593
1594		/* No errors; receive the packet. */
1595		m->m_pkthdr.len = m->m_len = total_len;
1596#ifndef __NO_STRICT_ALIGNMENT
1597		/*
1598		 * On architectures without alignment problems we try to
1599		 * allocate a new buffer for the receive ring, and pass up
1600		 * the one where the packet is already, saving the expensive
1601		 * copy operation.
1602		 */
1603		sis_fixup_rx(m);
1604#endif
1605		ifp->if_ipackets++;
1606		m->m_pkthdr.rcvif = ifp;
1607
1608		SIS_UNLOCK(sc);
1609		(*ifp->if_input)(ifp, m);
1610		SIS_LOCK(sc);
1611		rx_npkts++;
1612	}
1613
1614	if (prog > 0) {
1615		sc->sis_rx_cons = rx_cons;
1616		bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1617		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1618	}
1619
1620	return (rx_npkts);
1621}
1622
1623/*
1624 * A frame was downloaded to the chip. It's safe for us to clean up
1625 * the list buffers.
1626 */
1627
1628static void
1629sis_txeof(struct sis_softc *sc)
1630{
1631	struct ifnet		*ifp;
1632	struct sis_desc		*cur_tx;
1633	struct sis_txdesc	*txd;
1634	uint32_t		cons, txstat;
1635
1636	SIS_LOCK_ASSERT(sc);
1637
1638	cons = sc->sis_tx_cons;
1639	if (cons == sc->sis_tx_prod)
1640		return;
1641
1642	ifp = sc->sis_ifp;
1643	bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1644	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1645
1646	/*
1647	 * Go through our tx list and free mbufs for those
1648	 * frames that have been transmitted.
1649	 */
1650	for (; cons != sc->sis_tx_prod; SIS_INC(cons, SIS_TX_LIST_CNT)) {
1651		cur_tx = &sc->sis_tx_list[cons];
1652		txstat = le32toh(cur_tx->sis_cmdsts);
1653		if ((txstat & SIS_CMDSTS_OWN) != 0)
1654			break;
1655		txd = &sc->sis_txdesc[cons];
1656		if (txd->tx_m != NULL) {
1657			bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap,
1658			    BUS_DMASYNC_POSTWRITE);
1659			bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
1660			m_freem(txd->tx_m);
1661			txd->tx_m = NULL;
1662			if ((txstat & SIS_CMDSTS_PKT_OK) != 0) {
1663				ifp->if_opackets++;
1664				ifp->if_collisions +=
1665				    (txstat & SIS_TXSTAT_COLLCNT) >> 16;
1666			} else {
1667				ifp->if_oerrors++;
1668				if (txstat & SIS_TXSTAT_EXCESSCOLLS)
1669					ifp->if_collisions++;
1670				if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
1671					ifp->if_collisions++;
1672			}
1673		}
1674		sc->sis_tx_cnt--;
1675		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1676	}
1677	sc->sis_tx_cons = cons;
1678	if (sc->sis_tx_cnt == 0)
1679		sc->sis_watchdog_timer = 0;
1680}
1681
1682static void
1683sis_tick(void *xsc)
1684{
1685	struct sis_softc	*sc;
1686	struct mii_data		*mii;
1687	struct ifnet		*ifp;
1688
1689	sc = xsc;
1690	SIS_LOCK_ASSERT(sc);
1691	ifp = sc->sis_ifp;
1692
1693	mii = device_get_softc(sc->sis_miibus);
1694	mii_tick(mii);
1695	sis_watchdog(sc);
1696	if (sc->sis_link == 0)
1697		sis_miibus_statchg(sc->sis_dev);
1698	callout_reset(&sc->sis_stat_ch, hz,  sis_tick, sc);
1699}
1700
1701#ifdef DEVICE_POLLING
1702static poll_handler_t sis_poll;
1703
1704static int
1705sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1706{
1707	struct	sis_softc *sc = ifp->if_softc;
1708	int rx_npkts = 0;
1709
1710	SIS_LOCK(sc);
1711	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1712		SIS_UNLOCK(sc);
1713		return (rx_npkts);
1714	}
1715
1716	/*
1717	 * On the sis, reading the status register also clears it.
1718	 * So before returning to intr mode we must make sure that all
1719	 * possible pending sources of interrupts have been served.
1720	 * In practice this means run to completion the *eof routines,
1721	 * and then call the interrupt routine
1722	 */
1723	sc->rxcycles = count;
1724	rx_npkts = sis_rxeof(sc);
1725	sis_txeof(sc);
1726	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1727		sis_startl(ifp);
1728
1729	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1730		uint32_t	status;
1731
1732		/* Reading the ISR register clears all interrupts. */
1733		status = CSR_READ_4(sc, SIS_ISR);
1734
1735		if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW))
1736			ifp->if_ierrors++;
1737
1738		if (status & (SIS_ISR_RX_IDLE))
1739			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1740
1741		if (status & SIS_ISR_SYSERR) {
1742			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1743			sis_initl(sc);
1744		}
1745	}
1746
1747	SIS_UNLOCK(sc);
1748	return (rx_npkts);
1749}
1750#endif /* DEVICE_POLLING */
1751
1752static void
1753sis_intr(void *arg)
1754{
1755	struct sis_softc	*sc;
1756	struct ifnet		*ifp;
1757	uint32_t		status;
1758
1759	sc = arg;
1760	ifp = sc->sis_ifp;
1761
1762	SIS_LOCK(sc);
1763#ifdef DEVICE_POLLING
1764	if (ifp->if_capenable & IFCAP_POLLING) {
1765		SIS_UNLOCK(sc);
1766		return;
1767	}
1768#endif
1769
1770	/* Reading the ISR register clears all interrupts. */
1771	status = CSR_READ_4(sc, SIS_ISR);
1772	if ((status & SIS_INTRS) == 0) {
1773		/* Not ours. */
1774		SIS_UNLOCK(sc);
1775	}
1776
1777	/* Disable interrupts. */
1778	CSR_WRITE_4(sc, SIS_IER, 0);
1779
1780	for (;(status & SIS_INTRS) != 0;) {
1781		if (status &
1782		    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1783		    SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) )
1784			sis_txeof(sc);
1785
1786		if (status & (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1787		    SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1788			sis_rxeof(sc);
1789
1790		if (status & SIS_ISR_RX_OFLOW)
1791			ifp->if_ierrors++;
1792
1793		if (status & (SIS_ISR_RX_IDLE))
1794			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1795
1796		if (status & SIS_ISR_SYSERR) {
1797			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1798			sis_initl(sc);
1799			SIS_UNLOCK(sc);
1800			return;
1801		}
1802		status = CSR_READ_4(sc, SIS_ISR);
1803	}
1804
1805	/* Re-enable interrupts. */
1806	CSR_WRITE_4(sc, SIS_IER, 1);
1807
1808	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1809		sis_startl(ifp);
1810
1811	SIS_UNLOCK(sc);
1812}
1813
1814/*
1815 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1816 * pointers to the fragment pointers.
1817 */
1818static int
1819sis_encap(struct sis_softc *sc, struct mbuf **m_head)
1820{
1821	struct mbuf		*m;
1822	struct sis_txdesc	*txd;
1823	struct sis_desc		*f;
1824	bus_dma_segment_t	segs[SIS_MAXTXSEGS];
1825	bus_dmamap_t		map;
1826	int			error, i, frag, nsegs, prod;
1827
1828	prod = sc->sis_tx_prod;
1829	txd = &sc->sis_txdesc[prod];
1830	error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
1831	    *m_head, segs, &nsegs, 0);
1832	if (error == EFBIG) {
1833		m = m_collapse(*m_head, M_DONTWAIT, SIS_MAXTXSEGS);
1834		if (m == NULL) {
1835			m_freem(*m_head);
1836			*m_head = NULL;
1837			return (ENOBUFS);
1838		}
1839		*m_head = m;
1840		error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
1841		    *m_head, segs, &nsegs, 0);
1842		if (error != 0) {
1843			m_freem(*m_head);
1844			*m_head = NULL;
1845			return (error);
1846		}
1847	} else if (error != 0)
1848		return (error);
1849
1850	/* Check for descriptor overruns. */
1851	if (sc->sis_tx_cnt + nsegs > SIS_TX_LIST_CNT - 1) {
1852		bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
1853		return (ENOBUFS);
1854	}
1855
1856	bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE);
1857
1858	frag = prod;
1859	for (i = 0; i < nsegs; i++) {
1860		f = &sc->sis_tx_list[prod];
1861		if (i == 0)
1862			f->sis_cmdsts = htole32(segs[i].ds_len |
1863			    SIS_CMDSTS_MORE);
1864		else
1865			f->sis_cmdsts = htole32(segs[i].ds_len |
1866			    SIS_CMDSTS_OWN | SIS_CMDSTS_MORE);
1867		f->sis_ptr = htole32(SIS_ADDR_LO(segs[i].ds_addr));
1868		SIS_INC(prod, SIS_TX_LIST_CNT);
1869		sc->sis_tx_cnt++;
1870	}
1871
1872	/* Update producer index. */
1873	sc->sis_tx_prod = prod;
1874
1875	/* Remove MORE flag on the last descriptor. */
1876	prod = (prod - 1) & (SIS_TX_LIST_CNT - 1);
1877	f = &sc->sis_tx_list[prod];
1878	f->sis_cmdsts &= ~htole32(SIS_CMDSTS_MORE);
1879
1880	/* Lastly transfer ownership of packet to the controller. */
1881	f = &sc->sis_tx_list[frag];
1882	f->sis_cmdsts |= htole32(SIS_CMDSTS_OWN);
1883
1884	/* Swap the last and the first dmamaps. */
1885	map = txd->tx_dmamap;
1886	txd->tx_dmamap = sc->sis_txdesc[frag].tx_dmamap;
1887	sc->sis_txdesc[frag].tx_dmamap = map;
1888	txd->tx_m = *m_head;
1889
1890	return (0);
1891}
1892
1893static void
1894sis_start(struct ifnet *ifp)
1895{
1896	struct sis_softc	*sc;
1897
1898	sc = ifp->if_softc;
1899	SIS_LOCK(sc);
1900	sis_startl(ifp);
1901	SIS_UNLOCK(sc);
1902}
1903
1904static void
1905sis_startl(struct ifnet *ifp)
1906{
1907	struct sis_softc	*sc;
1908	struct mbuf		*m_head;
1909	int			queued;
1910
1911	sc = ifp->if_softc;
1912
1913	SIS_LOCK_ASSERT(sc);
1914
1915	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1916	    IFF_DRV_RUNNING || sc->sis_link == 0)
1917		return;
1918
1919	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1920	    sc->sis_tx_cnt < SIS_TX_LIST_CNT - 4;) {
1921		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1922		if (m_head == NULL)
1923			break;
1924
1925		if (sis_encap(sc, &m_head) != 0) {
1926			if (m_head == NULL)
1927				break;
1928			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1929			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1930			break;
1931		}
1932
1933		queued++;
1934
1935		/*
1936		 * If there's a BPF listener, bounce a copy of this frame
1937		 * to him.
1938		 */
1939		BPF_MTAP(ifp, m_head);
1940	}
1941
1942	if (queued) {
1943		/* Transmit */
1944		bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1945		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1946		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1947
1948		/*
1949		 * Set a timeout in case the chip goes out to lunch.
1950		 */
1951		sc->sis_watchdog_timer = 5;
1952	}
1953}
1954
1955static void
1956sis_init(void *xsc)
1957{
1958	struct sis_softc	*sc = xsc;
1959
1960	SIS_LOCK(sc);
1961	sis_initl(sc);
1962	SIS_UNLOCK(sc);
1963}
1964
1965static void
1966sis_initl(struct sis_softc *sc)
1967{
1968	struct ifnet		*ifp = sc->sis_ifp;
1969	struct mii_data		*mii;
1970
1971	SIS_LOCK_ASSERT(sc);
1972
1973	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1974		return;
1975
1976	/*
1977	 * Cancel pending I/O and free all RX/TX buffers.
1978	 */
1979	sis_stop(sc);
1980	/*
1981	 * Reset the chip to a known state.
1982	 */
1983	sis_reset(sc);
1984#ifdef notyet
1985	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
1986		/*
1987		 * Configure 400usec of interrupt holdoff.  This is based
1988		 * on emperical tests on a Soekris 4801.
1989 		 */
1990		CSR_WRITE_4(sc, NS_IHR, 0x100 | 4);
1991	}
1992#endif
1993
1994	mii = device_get_softc(sc->sis_miibus);
1995
1996	/* Set MAC address */
1997	if (sc->sis_type == SIS_TYPE_83815) {
1998		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1999		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
2000		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[0]);
2001		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
2002		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
2003		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[1]);
2004		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
2005		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
2006		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[2]);
2007	} else {
2008		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
2009		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
2010		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[0]);
2011		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
2012		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
2013		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[1]);
2014		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
2015		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
2016		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[2]);
2017	}
2018
2019	/* Init circular TX/RX lists. */
2020	if (sis_ring_init(sc) != 0) {
2021		device_printf(sc->sis_dev,
2022		    "initialization failed: no memory for rx buffers\n");
2023		sis_stop(sc);
2024		return;
2025	}
2026
2027	/*
2028	 * Short Cable Receive Errors (MP21.E)
2029	 * also: Page 78 of the DP83815 data sheet (september 2002 version)
2030	 * recommends the following register settings "for optimum
2031	 * performance." for rev 15C.  Set this also for 15D parts as
2032	 * they require it in practice.
2033	 */
2034	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
2035		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
2036		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
2037		/* set val for c2 */
2038		CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
2039		/* load/kill c2 */
2040		CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
2041		/* rais SD off, from 4 to c */
2042		CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
2043		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
2044	}
2045
2046	/*
2047	 * For the NatSemi chip, we have to explicitly enable the
2048	 * reception of ARP frames, as well as turn on the 'perfect
2049	 * match' filter where we store the station address, otherwise
2050	 * we won't receive unicasts meant for this host.
2051	 */
2052	if (sc->sis_type == SIS_TYPE_83815) {
2053		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP);
2054		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT);
2055	}
2056
2057	 /* If we want promiscuous mode, set the allframes bit. */
2058	if (ifp->if_flags & IFF_PROMISC) {
2059		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
2060	} else {
2061		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
2062	}
2063
2064	/*
2065	 * Set the capture broadcast bit to capture broadcast frames.
2066	 */
2067	if (ifp->if_flags & IFF_BROADCAST) {
2068		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
2069	} else {
2070		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
2071	}
2072
2073	/*
2074	 * Load the multicast filter.
2075	 */
2076	if (sc->sis_type == SIS_TYPE_83815)
2077		sis_setmulti_ns(sc);
2078	else
2079		sis_setmulti_sis(sc);
2080
2081	/* Turn the receive filter on */
2082	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
2083
2084	/*
2085	 * Load the address of the RX and TX lists.
2086	 */
2087	CSR_WRITE_4(sc, SIS_RX_LISTPTR, SIS_ADDR_LO(sc->sis_rx_paddr));
2088	CSR_WRITE_4(sc, SIS_TX_LISTPTR, SIS_ADDR_LO(sc->sis_tx_paddr));
2089
2090	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
2091	 * the PCI bus. When this bit is set, the Max DMA Burst Size
2092	 * for TX/RX DMA should be no larger than 16 double words.
2093	 */
2094	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) {
2095		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
2096	} else {
2097		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
2098	}
2099
2100	/* Accept Long Packets for VLAN support */
2101	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
2102
2103	/*
2104	 * Assume 100Mbps link, actual MAC configuration is done
2105	 * after getting a valid link.
2106	 */
2107	CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
2108
2109	/*
2110	 * Enable interrupts.
2111	 */
2112	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
2113#ifdef DEVICE_POLLING
2114	/*
2115	 * ... only enable interrupts if we are not polling, make sure
2116	 * they are off otherwise.
2117	 */
2118	if (ifp->if_capenable & IFCAP_POLLING)
2119		CSR_WRITE_4(sc, SIS_IER, 0);
2120	else
2121#endif
2122	CSR_WRITE_4(sc, SIS_IER, 1);
2123
2124	/* Clear MAC disable. */
2125	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
2126
2127	sc->sis_link = 0;
2128	mii_mediachg(mii);
2129
2130	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2131	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2132
2133	callout_reset(&sc->sis_stat_ch, hz,  sis_tick, sc);
2134}
2135
2136/*
2137 * Set media options.
2138 */
2139static int
2140sis_ifmedia_upd(struct ifnet *ifp)
2141{
2142	struct sis_softc	*sc;
2143	struct mii_data		*mii;
2144	int			error;
2145
2146	sc = ifp->if_softc;
2147
2148	SIS_LOCK(sc);
2149	mii = device_get_softc(sc->sis_miibus);
2150	sc->sis_link = 0;
2151	if (mii->mii_instance) {
2152		struct mii_softc	*miisc;
2153		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2154			mii_phy_reset(miisc);
2155	}
2156	error = mii_mediachg(mii);
2157	SIS_UNLOCK(sc);
2158
2159	return (error);
2160}
2161
2162/*
2163 * Report current media status.
2164 */
2165static void
2166sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2167{
2168	struct sis_softc	*sc;
2169	struct mii_data		*mii;
2170
2171	sc = ifp->if_softc;
2172
2173	SIS_LOCK(sc);
2174	mii = device_get_softc(sc->sis_miibus);
2175	mii_pollstat(mii);
2176	SIS_UNLOCK(sc);
2177	ifmr->ifm_active = mii->mii_media_active;
2178	ifmr->ifm_status = mii->mii_media_status;
2179}
2180
2181static int
2182sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2183{
2184	struct sis_softc	*sc = ifp->if_softc;
2185	struct ifreq		*ifr = (struct ifreq *) data;
2186	struct mii_data		*mii;
2187	int			error = 0;
2188
2189	switch (command) {
2190	case SIOCSIFFLAGS:
2191		SIS_LOCK(sc);
2192		if (ifp->if_flags & IFF_UP) {
2193			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2194			    ((ifp->if_flags ^ sc->sis_if_flags) &
2195			    (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2196				if (sc->sis_type == SIS_TYPE_83815)
2197					sis_setmulti_ns(sc);
2198				else
2199					sis_setmulti_sis(sc);
2200			} else
2201				sis_initl(sc);
2202		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2203			sis_stop(sc);
2204		}
2205		sc->sis_if_flags = ifp->if_flags;
2206		SIS_UNLOCK(sc);
2207		error = 0;
2208		break;
2209	case SIOCADDMULTI:
2210	case SIOCDELMULTI:
2211		SIS_LOCK(sc);
2212		if (sc->sis_type == SIS_TYPE_83815)
2213			sis_setmulti_ns(sc);
2214		else
2215			sis_setmulti_sis(sc);
2216		SIS_UNLOCK(sc);
2217		break;
2218	case SIOCGIFMEDIA:
2219	case SIOCSIFMEDIA:
2220		mii = device_get_softc(sc->sis_miibus);
2221		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2222		break;
2223	case SIOCSIFCAP:
2224		/* ok, disable interrupts */
2225#ifdef DEVICE_POLLING
2226		if (ifr->ifr_reqcap & IFCAP_POLLING &&
2227		    !(ifp->if_capenable & IFCAP_POLLING)) {
2228			error = ether_poll_register(sis_poll, ifp);
2229			if (error)
2230				return (error);
2231			SIS_LOCK(sc);
2232			/* Disable interrupts */
2233			CSR_WRITE_4(sc, SIS_IER, 0);
2234			ifp->if_capenable |= IFCAP_POLLING;
2235			SIS_UNLOCK(sc);
2236			return (error);
2237
2238		}
2239		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
2240		    ifp->if_capenable & IFCAP_POLLING) {
2241			error = ether_poll_deregister(ifp);
2242			/* Enable interrupts. */
2243			SIS_LOCK(sc);
2244			CSR_WRITE_4(sc, SIS_IER, 1);
2245			ifp->if_capenable &= ~IFCAP_POLLING;
2246			SIS_UNLOCK(sc);
2247			return (error);
2248		}
2249#endif /* DEVICE_POLLING */
2250		break;
2251	default:
2252		error = ether_ioctl(ifp, command, data);
2253		break;
2254	}
2255
2256	return (error);
2257}
2258
2259static void
2260sis_watchdog(struct sis_softc *sc)
2261{
2262
2263	SIS_LOCK_ASSERT(sc);
2264
2265	if (sc->sis_watchdog_timer == 0 || --sc->sis_watchdog_timer >0)
2266		return;
2267
2268	device_printf(sc->sis_dev, "watchdog timeout\n");
2269	sc->sis_ifp->if_oerrors++;
2270
2271	sc->sis_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2272	sis_initl(sc);
2273
2274	if (!IFQ_DRV_IS_EMPTY(&sc->sis_ifp->if_snd))
2275		sis_startl(sc->sis_ifp);
2276}
2277
2278/*
2279 * Stop the adapter and free any mbufs allocated to the
2280 * RX and TX lists.
2281 */
2282static void
2283sis_stop(struct sis_softc *sc)
2284{
2285	struct ifnet *ifp;
2286	struct sis_rxdesc *rxd;
2287	struct sis_txdesc *txd;
2288	int i;
2289
2290	SIS_LOCK_ASSERT(sc);
2291
2292	ifp = sc->sis_ifp;
2293	sc->sis_watchdog_timer = 0;
2294
2295	callout_stop(&sc->sis_stat_ch);
2296
2297	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2298	CSR_WRITE_4(sc, SIS_IER, 0);
2299	CSR_WRITE_4(sc, SIS_IMR, 0);
2300	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
2301	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
2302	DELAY(1000);
2303	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
2304	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2305
2306	sc->sis_link = 0;
2307
2308	/*
2309	 * Free data in the RX lists.
2310	 */
2311	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
2312		rxd = &sc->sis_rxdesc[i];
2313		if (rxd->rx_m != NULL) {
2314			bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap,
2315			    BUS_DMASYNC_POSTREAD);
2316			bus_dmamap_unload(sc->sis_rx_tag, rxd->rx_dmamap);
2317			m_freem(rxd->rx_m);
2318			rxd->rx_m = NULL;
2319		}
2320	}
2321
2322	/*
2323	 * Free the TX list buffers.
2324	 */
2325	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
2326		txd = &sc->sis_txdesc[i];
2327		if (txd->tx_m != NULL) {
2328			bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap,
2329			    BUS_DMASYNC_POSTWRITE);
2330			bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
2331			m_freem(txd->tx_m);
2332			txd->tx_m = NULL;
2333		}
2334	}
2335}
2336
2337/*
2338 * Stop all chip I/O so that the kernel's probe routines don't
2339 * get confused by errant DMAs when rebooting.
2340 */
2341static int
2342sis_shutdown(device_t dev)
2343{
2344	struct sis_softc	*sc;
2345
2346	sc = device_get_softc(dev);
2347	SIS_LOCK(sc);
2348	sis_stop(sc);
2349	SIS_UNLOCK(sc);
2350	return (0);
2351}
2352
2353static device_method_t sis_methods[] = {
2354	/* Device interface */
2355	DEVMETHOD(device_probe,		sis_probe),
2356	DEVMETHOD(device_attach,	sis_attach),
2357	DEVMETHOD(device_detach,	sis_detach),
2358	DEVMETHOD(device_shutdown,	sis_shutdown),
2359
2360	/* bus interface */
2361	DEVMETHOD(bus_print_child,	bus_generic_print_child),
2362	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
2363
2364	/* MII interface */
2365	DEVMETHOD(miibus_readreg,	sis_miibus_readreg),
2366	DEVMETHOD(miibus_writereg,	sis_miibus_writereg),
2367	DEVMETHOD(miibus_statchg,	sis_miibus_statchg),
2368
2369	{ 0, 0 }
2370};
2371
2372static driver_t sis_driver = {
2373	"sis",
2374	sis_methods,
2375	sizeof(struct sis_softc)
2376};
2377
2378static devclass_t sis_devclass;
2379
2380DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0);
2381DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0);
2382