if_sis.c revision 214089
1/*-
2 * Copyright (c) 2005 Poul-Henning Kamp <phk@FreeBSD.org>
3 * Copyright (c) 1997, 1998, 1999
4 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/sis/if_sis.c 214089 2010-10-20 00:19:25Z yongari $");
36
37/*
38 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
39 * available from http://www.sis.com.tw.
40 *
41 * This driver also supports the NatSemi DP83815. Datasheets are
42 * available from http://www.national.com.
43 *
44 * Written by Bill Paul <wpaul@ee.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48/*
49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50 * simple TX and RX descriptors of 3 longwords in size. The receiver
51 * has a single perfect filter entry for the station address and a
52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53 * transceiver while the 7016 requires an external transceiver chip.
54 * Both chips offer the standard bit-bang MII interface as well as
55 * an enchanced PHY interface which simplifies accessing MII registers.
56 *
57 * The only downside to this chipset is that RX descriptors must be
58 * longword aligned.
59 */
60
61#ifdef HAVE_KERNEL_OPTION_HEADERS
62#include "opt_device_polling.h"
63#endif
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/bus.h>
68#include <sys/endian.h>
69#include <sys/kernel.h>
70#include <sys/lock.h>
71#include <sys/malloc.h>
72#include <sys/mbuf.h>
73#include <sys/module.h>
74#include <sys/socket.h>
75#include <sys/sockio.h>
76#include <sys/sysctl.h>
77
78#include <net/if.h>
79#include <net/if_arp.h>
80#include <net/ethernet.h>
81#include <net/if_dl.h>
82#include <net/if_media.h>
83#include <net/if_types.h>
84#include <net/if_vlan_var.h>
85
86#include <net/bpf.h>
87
88#include <machine/bus.h>
89#include <machine/resource.h>
90#include <sys/bus.h>
91#include <sys/rman.h>
92
93#include <dev/mii/mii.h>
94#include <dev/mii/miivar.h>
95
96#include <dev/pci/pcireg.h>
97#include <dev/pci/pcivar.h>
98
99#define SIS_USEIOSPACE
100
101#include <dev/sis/if_sisreg.h>
102
103MODULE_DEPEND(sis, pci, 1, 1, 1);
104MODULE_DEPEND(sis, ether, 1, 1, 1);
105MODULE_DEPEND(sis, miibus, 1, 1, 1);
106
107/* "device miibus" required.  See GENERIC if you get errors here. */
108#include "miibus_if.h"
109
110#define	SIS_LOCK(_sc)		mtx_lock(&(_sc)->sis_mtx)
111#define	SIS_UNLOCK(_sc)		mtx_unlock(&(_sc)->sis_mtx)
112#define	SIS_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sis_mtx, MA_OWNED)
113
114/*
115 * register space access macros
116 */
117#define CSR_WRITE_4(sc, reg, val)	bus_write_4(sc->sis_res[0], reg, val)
118
119#define CSR_READ_4(sc, reg)		bus_read_4(sc->sis_res[0], reg)
120
121#define CSR_READ_2(sc, reg)		bus_read_2(sc->sis_res[0], reg)
122
123/*
124 * Various supported device vendors/types and their names.
125 */
126static struct sis_type sis_devs[] = {
127	{ SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" },
128	{ SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" },
129	{ NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" },
130	{ 0, 0, NULL }
131};
132
133static int sis_detach(device_t);
134static __inline void sis_discard_rxbuf(struct sis_rxdesc *);
135static int sis_dma_alloc(struct sis_softc *);
136static void sis_dma_free(struct sis_softc *);
137static int sis_dma_ring_alloc(struct sis_softc *, bus_size_t, bus_size_t,
138    bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
139static void sis_dmamap_cb(void *, bus_dma_segment_t *, int, int);
140#ifndef __NO_STRICT_ALIGNMENT
141static __inline void sis_fixup_rx(struct mbuf *);
142#endif
143static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
144static int sis_ifmedia_upd(struct ifnet *);
145static void sis_init(void *);
146static void sis_initl(struct sis_softc *);
147static void sis_intr(void *);
148static int sis_ioctl(struct ifnet *, u_long, caddr_t);
149static int sis_newbuf(struct sis_softc *, struct sis_rxdesc *);
150static int sis_resume(device_t);
151static int sis_rxeof(struct sis_softc *);
152static void sis_start(struct ifnet *);
153static void sis_startl(struct ifnet *);
154static void sis_stop(struct sis_softc *);
155static int sis_suspend(device_t);
156static void sis_add_sysctls(struct sis_softc *);
157static void sis_watchdog(struct sis_softc *);
158static void sis_wol(struct sis_softc *);
159
160
161static struct resource_spec sis_res_spec[] = {
162#ifdef SIS_USEIOSPACE
163	{ SYS_RES_IOPORT,	SIS_PCI_LOIO,	RF_ACTIVE},
164#else
165	{ SYS_RES_MEMORY,	SIS_PCI_LOMEM,	RF_ACTIVE},
166#endif
167	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE},
168	{ -1, 0 }
169};
170
171#define SIS_SETBIT(sc, reg, x)				\
172	CSR_WRITE_4(sc, reg,				\
173		CSR_READ_4(sc, reg) | (x))
174
175#define SIS_CLRBIT(sc, reg, x)				\
176	CSR_WRITE_4(sc, reg,				\
177		CSR_READ_4(sc, reg) & ~(x))
178
179#define SIO_SET(x)					\
180	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
181
182#define SIO_CLR(x)					\
183	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
184
185/*
186 * Routine to reverse the bits in a word. Stolen almost
187 * verbatim from /usr/games/fortune.
188 */
189static uint16_t
190sis_reverse(uint16_t n)
191{
192	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
193	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
194	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
195	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
196
197	return (n);
198}
199
200static void
201sis_delay(struct sis_softc *sc)
202{
203	int			idx;
204
205	for (idx = (300 / 33) + 1; idx > 0; idx--)
206		CSR_READ_4(sc, SIS_CSR);
207}
208
209static void
210sis_eeprom_idle(struct sis_softc *sc)
211{
212	int		i;
213
214	SIO_SET(SIS_EECTL_CSEL);
215	sis_delay(sc);
216	SIO_SET(SIS_EECTL_CLK);
217	sis_delay(sc);
218
219	for (i = 0; i < 25; i++) {
220		SIO_CLR(SIS_EECTL_CLK);
221		sis_delay(sc);
222		SIO_SET(SIS_EECTL_CLK);
223		sis_delay(sc);
224	}
225
226	SIO_CLR(SIS_EECTL_CLK);
227	sis_delay(sc);
228	SIO_CLR(SIS_EECTL_CSEL);
229	sis_delay(sc);
230	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
231}
232
233/*
234 * Send a read command and address to the EEPROM, check for ACK.
235 */
236static void
237sis_eeprom_putbyte(struct sis_softc *sc, int addr)
238{
239	int		d, i;
240
241	d = addr | SIS_EECMD_READ;
242
243	/*
244	 * Feed in each bit and stobe the clock.
245	 */
246	for (i = 0x400; i; i >>= 1) {
247		if (d & i) {
248			SIO_SET(SIS_EECTL_DIN);
249		} else {
250			SIO_CLR(SIS_EECTL_DIN);
251		}
252		sis_delay(sc);
253		SIO_SET(SIS_EECTL_CLK);
254		sis_delay(sc);
255		SIO_CLR(SIS_EECTL_CLK);
256		sis_delay(sc);
257	}
258}
259
260/*
261 * Read a word of data stored in the EEPROM at address 'addr.'
262 */
263static void
264sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest)
265{
266	int		i;
267	uint16_t	word = 0;
268
269	/* Force EEPROM to idle state. */
270	sis_eeprom_idle(sc);
271
272	/* Enter EEPROM access mode. */
273	sis_delay(sc);
274	SIO_CLR(SIS_EECTL_CLK);
275	sis_delay(sc);
276	SIO_SET(SIS_EECTL_CSEL);
277	sis_delay(sc);
278
279	/*
280	 * Send address of word we want to read.
281	 */
282	sis_eeprom_putbyte(sc, addr);
283
284	/*
285	 * Start reading bits from EEPROM.
286	 */
287	for (i = 0x8000; i; i >>= 1) {
288		SIO_SET(SIS_EECTL_CLK);
289		sis_delay(sc);
290		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
291			word |= i;
292		sis_delay(sc);
293		SIO_CLR(SIS_EECTL_CLK);
294		sis_delay(sc);
295	}
296
297	/* Turn off EEPROM access mode. */
298	sis_eeprom_idle(sc);
299
300	*dest = word;
301}
302
303/*
304 * Read a sequence of words from the EEPROM.
305 */
306static void
307sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap)
308{
309	int			i;
310	uint16_t		word = 0, *ptr;
311
312	for (i = 0; i < cnt; i++) {
313		sis_eeprom_getword(sc, off + i, &word);
314		ptr = (uint16_t *)(dest + (i * 2));
315		if (swap)
316			*ptr = ntohs(word);
317		else
318			*ptr = word;
319	}
320}
321
322#if defined(__i386__) || defined(__amd64__)
323static device_t
324sis_find_bridge(device_t dev)
325{
326	devclass_t		pci_devclass;
327	device_t		*pci_devices;
328	int			pci_count = 0;
329	device_t		*pci_children;
330	int			pci_childcount = 0;
331	device_t		*busp, *childp;
332	device_t		child = NULL;
333	int			i, j;
334
335	if ((pci_devclass = devclass_find("pci")) == NULL)
336		return (NULL);
337
338	devclass_get_devices(pci_devclass, &pci_devices, &pci_count);
339
340	for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) {
341		if (device_get_children(*busp, &pci_children, &pci_childcount))
342			continue;
343		for (j = 0, childp = pci_children;
344		    j < pci_childcount; j++, childp++) {
345			if (pci_get_vendor(*childp) == SIS_VENDORID &&
346			    pci_get_device(*childp) == 0x0008) {
347				child = *childp;
348				free(pci_children, M_TEMP);
349				goto done;
350			}
351		}
352		free(pci_children, M_TEMP);
353	}
354
355done:
356	free(pci_devices, M_TEMP);
357	return (child);
358}
359
360static void
361sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt)
362{
363	device_t		bridge;
364	uint8_t			reg;
365	int			i;
366	bus_space_tag_t		btag;
367
368	bridge = sis_find_bridge(dev);
369	if (bridge == NULL)
370		return;
371	reg = pci_read_config(bridge, 0x48, 1);
372	pci_write_config(bridge, 0x48, reg|0x40, 1);
373
374	/* XXX */
375#if defined(__i386__)
376	btag = I386_BUS_SPACE_IO;
377#elif defined(__amd64__)
378	btag = AMD64_BUS_SPACE_IO;
379#endif
380
381	for (i = 0; i < cnt; i++) {
382		bus_space_write_1(btag, 0x0, 0x70, i + off);
383		*(dest + i) = bus_space_read_1(btag, 0x0, 0x71);
384	}
385
386	pci_write_config(bridge, 0x48, reg & ~0x40, 1);
387}
388
389static void
390sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest)
391{
392	uint32_t		filtsave, csrsave;
393
394	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
395	csrsave = CSR_READ_4(sc, SIS_CSR);
396
397	CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave);
398	CSR_WRITE_4(sc, SIS_CSR, 0);
399
400	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE);
401
402	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
403	((uint16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA);
404	CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1);
405	((uint16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA);
406	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
407	((uint16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA);
408
409	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
410	CSR_WRITE_4(sc, SIS_CSR, csrsave);
411}
412#endif
413
414/*
415 * Sync the PHYs by setting data bit and strobing the clock 32 times.
416 */
417static void
418sis_mii_sync(struct sis_softc *sc)
419{
420	int		i;
421
422 	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
423
424 	for (i = 0; i < 32; i++) {
425 		SIO_SET(SIS_MII_CLK);
426 		DELAY(1);
427 		SIO_CLR(SIS_MII_CLK);
428 		DELAY(1);
429 	}
430}
431
432/*
433 * Clock a series of bits through the MII.
434 */
435static void
436sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt)
437{
438	int			i;
439
440	SIO_CLR(SIS_MII_CLK);
441
442	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
443		if (bits & i) {
444			SIO_SET(SIS_MII_DATA);
445		} else {
446			SIO_CLR(SIS_MII_DATA);
447		}
448		DELAY(1);
449		SIO_CLR(SIS_MII_CLK);
450		DELAY(1);
451		SIO_SET(SIS_MII_CLK);
452	}
453}
454
455/*
456 * Read an PHY register through the MII.
457 */
458static int
459sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
460{
461	int			i, ack;
462
463	/*
464	 * Set up frame for RX.
465	 */
466	frame->mii_stdelim = SIS_MII_STARTDELIM;
467	frame->mii_opcode = SIS_MII_READOP;
468	frame->mii_turnaround = 0;
469	frame->mii_data = 0;
470
471	/*
472 	 * Turn on data xmit.
473	 */
474	SIO_SET(SIS_MII_DIR);
475
476	sis_mii_sync(sc);
477
478	/*
479	 * Send command/address info.
480	 */
481	sis_mii_send(sc, frame->mii_stdelim, 2);
482	sis_mii_send(sc, frame->mii_opcode, 2);
483	sis_mii_send(sc, frame->mii_phyaddr, 5);
484	sis_mii_send(sc, frame->mii_regaddr, 5);
485
486	/* Idle bit */
487	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
488	DELAY(1);
489	SIO_SET(SIS_MII_CLK);
490	DELAY(1);
491
492	/* Turn off xmit. */
493	SIO_CLR(SIS_MII_DIR);
494
495	/* Check for ack */
496	SIO_CLR(SIS_MII_CLK);
497	DELAY(1);
498	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
499	SIO_SET(SIS_MII_CLK);
500	DELAY(1);
501
502	/*
503	 * Now try reading data bits. If the ack failed, we still
504	 * need to clock through 16 cycles to keep the PHY(s) in sync.
505	 */
506	if (ack) {
507		for (i = 0; i < 16; i++) {
508			SIO_CLR(SIS_MII_CLK);
509			DELAY(1);
510			SIO_SET(SIS_MII_CLK);
511			DELAY(1);
512		}
513		goto fail;
514	}
515
516	for (i = 0x8000; i; i >>= 1) {
517		SIO_CLR(SIS_MII_CLK);
518		DELAY(1);
519		if (!ack) {
520			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
521				frame->mii_data |= i;
522			DELAY(1);
523		}
524		SIO_SET(SIS_MII_CLK);
525		DELAY(1);
526	}
527
528fail:
529
530	SIO_CLR(SIS_MII_CLK);
531	DELAY(1);
532	SIO_SET(SIS_MII_CLK);
533	DELAY(1);
534
535	if (ack)
536		return (1);
537	return (0);
538}
539
540/*
541 * Write to a PHY register through the MII.
542 */
543static int
544sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
545{
546
547 	/*
548 	 * Set up frame for TX.
549 	 */
550
551 	frame->mii_stdelim = SIS_MII_STARTDELIM;
552 	frame->mii_opcode = SIS_MII_WRITEOP;
553 	frame->mii_turnaround = SIS_MII_TURNAROUND;
554
555 	/*
556  	 * Turn on data output.
557 	 */
558 	SIO_SET(SIS_MII_DIR);
559
560 	sis_mii_sync(sc);
561
562 	sis_mii_send(sc, frame->mii_stdelim, 2);
563 	sis_mii_send(sc, frame->mii_opcode, 2);
564 	sis_mii_send(sc, frame->mii_phyaddr, 5);
565 	sis_mii_send(sc, frame->mii_regaddr, 5);
566 	sis_mii_send(sc, frame->mii_turnaround, 2);
567 	sis_mii_send(sc, frame->mii_data, 16);
568
569 	/* Idle bit. */
570 	SIO_SET(SIS_MII_CLK);
571 	DELAY(1);
572 	SIO_CLR(SIS_MII_CLK);
573 	DELAY(1);
574
575 	/*
576 	 * Turn off xmit.
577 	 */
578 	SIO_CLR(SIS_MII_DIR);
579
580 	return (0);
581}
582
583static int
584sis_miibus_readreg(device_t dev, int phy, int reg)
585{
586	struct sis_softc	*sc;
587	struct sis_mii_frame    frame;
588
589	sc = device_get_softc(dev);
590
591	if (sc->sis_type == SIS_TYPE_83815) {
592		if (phy != 0)
593			return (0);
594		/*
595		 * The NatSemi chip can take a while after
596		 * a reset to come ready, during which the BMSR
597		 * returns a value of 0. This is *never* supposed
598		 * to happen: some of the BMSR bits are meant to
599		 * be hardwired in the on position, and this can
600		 * confuse the miibus code a bit during the probe
601		 * and attach phase. So we make an effort to check
602		 * for this condition and wait for it to clear.
603		 */
604		if (!CSR_READ_4(sc, NS_BMSR))
605			DELAY(1000);
606		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
607	}
608
609	/*
610	 * Chipsets < SIS_635 seem not to be able to read/write
611	 * through mdio. Use the enhanced PHY access register
612	 * again for them.
613	 */
614	if (sc->sis_type == SIS_TYPE_900 &&
615	    sc->sis_rev < SIS_REV_635) {
616		int i, val = 0;
617
618		if (phy != 0)
619			return (0);
620
621		CSR_WRITE_4(sc, SIS_PHYCTL,
622		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
623		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
624
625		for (i = 0; i < SIS_TIMEOUT; i++) {
626			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
627				break;
628		}
629
630		if (i == SIS_TIMEOUT) {
631			device_printf(sc->sis_dev, "PHY failed to come ready\n");
632			return (0);
633		}
634
635		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
636
637		if (val == 0xFFFF)
638			return (0);
639
640		return (val);
641	} else {
642		bzero((char *)&frame, sizeof(frame));
643
644		frame.mii_phyaddr = phy;
645		frame.mii_regaddr = reg;
646		sis_mii_readreg(sc, &frame);
647
648		return (frame.mii_data);
649	}
650}
651
652static int
653sis_miibus_writereg(device_t dev, int phy, int reg, int data)
654{
655	struct sis_softc	*sc;
656	struct sis_mii_frame	frame;
657
658	sc = device_get_softc(dev);
659
660	if (sc->sis_type == SIS_TYPE_83815) {
661		if (phy != 0)
662			return (0);
663		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
664		return (0);
665	}
666
667	/*
668	 * Chipsets < SIS_635 seem not to be able to read/write
669	 * through mdio. Use the enhanced PHY access register
670	 * again for them.
671	 */
672	if (sc->sis_type == SIS_TYPE_900 &&
673	    sc->sis_rev < SIS_REV_635) {
674		int i;
675
676		if (phy != 0)
677			return (0);
678
679		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
680		    (reg << 6) | SIS_PHYOP_WRITE);
681		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
682
683		for (i = 0; i < SIS_TIMEOUT; i++) {
684			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
685				break;
686		}
687
688		if (i == SIS_TIMEOUT)
689			device_printf(sc->sis_dev, "PHY failed to come ready\n");
690	} else {
691		bzero((char *)&frame, sizeof(frame));
692
693		frame.mii_phyaddr = phy;
694		frame.mii_regaddr = reg;
695		frame.mii_data = data;
696		sis_mii_writereg(sc, &frame);
697	}
698	return (0);
699}
700
701static void
702sis_miibus_statchg(device_t dev)
703{
704	struct sis_softc	*sc;
705	struct mii_data		*mii;
706	struct ifnet		*ifp;
707	uint32_t		reg;
708
709	sc = device_get_softc(dev);
710	SIS_LOCK_ASSERT(sc);
711
712	mii = device_get_softc(sc->sis_miibus);
713	ifp = sc->sis_ifp;
714	if (mii == NULL || ifp == NULL ||
715	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
716		return;
717
718	sc->sis_flags &= ~SIS_FLAG_LINK;
719	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
720	    (IFM_ACTIVE | IFM_AVALID)) {
721		switch (IFM_SUBTYPE(mii->mii_media_active)) {
722		case IFM_10_T:
723			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
724			sc->sis_flags |= SIS_FLAG_LINK;
725			break;
726		case IFM_100_TX:
727			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
728			sc->sis_flags |= SIS_FLAG_LINK;
729			break;
730		default:
731			break;
732		}
733	}
734
735	if ((sc->sis_flags & SIS_FLAG_LINK) == 0) {
736		/*
737		 * Stopping MACs seem to reset SIS_TX_LISTPTR and
738		 * SIS_RX_LISTPTR which in turn requires resetting
739		 * TX/RX buffers.  So just don't do anything for
740		 * lost link.
741		 */
742		return;
743	}
744
745	/* Set full/half duplex mode. */
746	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
747		SIS_SETBIT(sc, SIS_TX_CFG,
748		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
749		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
750	} else {
751		SIS_CLRBIT(sc, SIS_TX_CFG,
752		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
753		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
754	}
755
756	if (sc->sis_type == SIS_TYPE_83816) {
757		/*
758		 * MPII03.D: Half Duplex Excessive Collisions.
759		 * Also page 49 in 83816 manual
760		 */
761		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
762	}
763
764	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
765	    IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
766		/*
767		 * Short Cable Receive Errors (MP21.E)
768		 */
769		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
770		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
771		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
772		DELAY(100);
773		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
774		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
775			device_printf(sc->sis_dev,
776			    "Applying short cable fix (reg=%x)\n", reg);
777			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
778			SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20);
779		}
780		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
781	}
782	/* Enable TX/RX MACs. */
783	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
784	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE | SIS_CSR_RX_ENABLE);
785}
786
787static uint32_t
788sis_mchash(struct sis_softc *sc, const uint8_t *addr)
789{
790	uint32_t		crc;
791
792	/* Compute CRC for the address value. */
793	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
794
795	/*
796	 * return the filter bit position
797	 *
798	 * The NatSemi chip has a 512-bit filter, which is
799	 * different than the SiS, so we special-case it.
800	 */
801	if (sc->sis_type == SIS_TYPE_83815)
802		return (crc >> 23);
803	else if (sc->sis_rev >= SIS_REV_635 ||
804	    sc->sis_rev == SIS_REV_900B)
805		return (crc >> 24);
806	else
807		return (crc >> 25);
808}
809
810static void
811sis_setmulti_ns(struct sis_softc *sc)
812{
813	struct ifnet		*ifp;
814	struct ifmultiaddr	*ifma;
815	uint32_t		h = 0, i, filtsave;
816	int			bit, index;
817
818	ifp = sc->sis_ifp;
819
820	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
821		SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
822		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
823		return;
824	}
825
826	/*
827	 * We have to explicitly enable the multicast hash table
828	 * on the NatSemi chip if we want to use it, which we do.
829	 */
830	SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
831	SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
832
833	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
834
835	/* first, zot all the existing hash bits */
836	for (i = 0; i < 32; i++) {
837		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2));
838		CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
839	}
840
841	if_maddr_rlock(ifp);
842	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
843		if (ifma->ifma_addr->sa_family != AF_LINK)
844			continue;
845		h = sis_mchash(sc,
846		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
847		index = h >> 3;
848		bit = h & 0x1F;
849		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
850		if (bit > 0xF)
851			bit -= 0x10;
852		SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
853	}
854	if_maddr_runlock(ifp);
855
856	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
857}
858
859static void
860sis_setmulti_sis(struct sis_softc *sc)
861{
862	struct ifnet		*ifp;
863	struct ifmultiaddr	*ifma;
864	uint32_t		h, i, n, ctl;
865	uint16_t		hashes[16];
866
867	ifp = sc->sis_ifp;
868
869	/* hash table size */
870	if (sc->sis_rev >= SIS_REV_635 ||
871	    sc->sis_rev == SIS_REV_900B)
872		n = 16;
873	else
874		n = 8;
875
876	ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE;
877
878	if (ifp->if_flags & IFF_BROADCAST)
879		ctl |= SIS_RXFILTCTL_BROAD;
880
881	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
882		ctl |= SIS_RXFILTCTL_ALLMULTI;
883		if (ifp->if_flags & IFF_PROMISC)
884			ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS;
885		for (i = 0; i < n; i++)
886			hashes[i] = ~0;
887	} else {
888		for (i = 0; i < n; i++)
889			hashes[i] = 0;
890		i = 0;
891		if_maddr_rlock(ifp);
892		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
893			if (ifma->ifma_addr->sa_family != AF_LINK)
894			continue;
895			h = sis_mchash(sc,
896			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
897			hashes[h >> 4] |= 1 << (h & 0xf);
898			i++;
899		}
900		if_maddr_runlock(ifp);
901		if (i > n) {
902			ctl |= SIS_RXFILTCTL_ALLMULTI;
903			for (i = 0; i < n; i++)
904				hashes[i] = ~0;
905		}
906	}
907
908	for (i = 0; i < n; i++) {
909		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
910		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
911	}
912
913	CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl);
914}
915
916static void
917sis_reset(struct sis_softc *sc)
918{
919	int		i;
920
921	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
922
923	for (i = 0; i < SIS_TIMEOUT; i++) {
924		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
925			break;
926	}
927
928	if (i == SIS_TIMEOUT)
929		device_printf(sc->sis_dev, "reset never completed\n");
930
931	/* Wait a little while for the chip to get its brains in order. */
932	DELAY(1000);
933
934	/*
935	 * If this is a NetSemi chip, make sure to clear
936	 * PME mode.
937	 */
938	if (sc->sis_type == SIS_TYPE_83815) {
939		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
940		CSR_WRITE_4(sc, NS_CLKRUN, 0);
941	} else {
942		/* Disable WOL functions. */
943		CSR_WRITE_4(sc, SIS_PWRMAN_CTL, 0);
944	}
945}
946
947/*
948 * Probe for an SiS chip. Check the PCI vendor and device
949 * IDs against our list and return a device name if we find a match.
950 */
951static int
952sis_probe(device_t dev)
953{
954	struct sis_type		*t;
955
956	t = sis_devs;
957
958	while (t->sis_name != NULL) {
959		if ((pci_get_vendor(dev) == t->sis_vid) &&
960		    (pci_get_device(dev) == t->sis_did)) {
961			device_set_desc(dev, t->sis_name);
962			return (BUS_PROBE_DEFAULT);
963		}
964		t++;
965	}
966
967	return (ENXIO);
968}
969
970/*
971 * Attach the interface. Allocate softc structures, do ifmedia
972 * setup and ethernet/BPF attach.
973 */
974static int
975sis_attach(device_t dev)
976{
977	u_char			eaddr[ETHER_ADDR_LEN];
978	struct sis_softc	*sc;
979	struct ifnet		*ifp;
980	int			error = 0, pmc, waittime = 0;
981
982	waittime = 0;
983	sc = device_get_softc(dev);
984
985	sc->sis_dev = dev;
986
987	mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
988	    MTX_DEF);
989	callout_init_mtx(&sc->sis_stat_ch, &sc->sis_mtx, 0);
990
991	if (pci_get_device(dev) == SIS_DEVICEID_900)
992		sc->sis_type = SIS_TYPE_900;
993	if (pci_get_device(dev) == SIS_DEVICEID_7016)
994		sc->sis_type = SIS_TYPE_7016;
995	if (pci_get_vendor(dev) == NS_VENDORID)
996		sc->sis_type = SIS_TYPE_83815;
997
998	sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1);
999	/*
1000	 * Map control/status registers.
1001	 */
1002	pci_enable_busmaster(dev);
1003
1004	error = bus_alloc_resources(dev, sis_res_spec, sc->sis_res);
1005	if (error) {
1006		device_printf(dev, "couldn't allocate resources\n");
1007		goto fail;
1008	}
1009
1010	/* Reset the adapter. */
1011	sis_reset(sc);
1012
1013	if (sc->sis_type == SIS_TYPE_900 &&
1014	    (sc->sis_rev == SIS_REV_635 ||
1015	    sc->sis_rev == SIS_REV_900B)) {
1016		SIO_SET(SIS_CFG_RND_CNT);
1017		SIO_SET(SIS_CFG_PERR_DETECT);
1018	}
1019
1020	/*
1021	 * Get station address from the EEPROM.
1022	 */
1023	switch (pci_get_vendor(dev)) {
1024	case NS_VENDORID:
1025		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
1026
1027		/* We can't update the device description, so spew */
1028		if (sc->sis_srr == NS_SRR_15C)
1029			device_printf(dev, "Silicon Revision: DP83815C\n");
1030		else if (sc->sis_srr == NS_SRR_15D)
1031			device_printf(dev, "Silicon Revision: DP83815D\n");
1032		else if (sc->sis_srr == NS_SRR_16A)
1033			device_printf(dev, "Silicon Revision: DP83816A\n");
1034		else
1035			device_printf(dev, "Silicon Revision %x\n", sc->sis_srr);
1036
1037		/*
1038		 * Reading the MAC address out of the EEPROM on
1039		 * the NatSemi chip takes a bit more work than
1040		 * you'd expect. The address spans 4 16-bit words,
1041		 * with the first word containing only a single bit.
1042		 * You have to shift everything over one bit to
1043		 * get it aligned properly. Also, the bits are
1044		 * stored backwards (the LSB is really the MSB,
1045		 * and so on) so you have to reverse them in order
1046		 * to get the MAC address into the form we want.
1047		 * Why? Who the hell knows.
1048		 */
1049		{
1050			uint16_t		tmp[4];
1051
1052			sis_read_eeprom(sc, (caddr_t)&tmp,
1053			    NS_EE_NODEADDR, 4, 0);
1054
1055			/* Shift everything over one bit. */
1056			tmp[3] = tmp[3] >> 1;
1057			tmp[3] |= tmp[2] << 15;
1058			tmp[2] = tmp[2] >> 1;
1059			tmp[2] |= tmp[1] << 15;
1060			tmp[1] = tmp[1] >> 1;
1061			tmp[1] |= tmp[0] << 15;
1062
1063			/* Now reverse all the bits. */
1064			tmp[3] = sis_reverse(tmp[3]);
1065			tmp[2] = sis_reverse(tmp[2]);
1066			tmp[1] = sis_reverse(tmp[1]);
1067
1068			eaddr[0] = (tmp[1] >> 0) & 0xFF;
1069			eaddr[1] = (tmp[1] >> 8) & 0xFF;
1070			eaddr[2] = (tmp[2] >> 0) & 0xFF;
1071			eaddr[3] = (tmp[2] >> 8) & 0xFF;
1072			eaddr[4] = (tmp[3] >> 0) & 0xFF;
1073			eaddr[5] = (tmp[3] >> 8) & 0xFF;
1074		}
1075		break;
1076	case SIS_VENDORID:
1077	default:
1078#if defined(__i386__) || defined(__amd64__)
1079		/*
1080		 * If this is a SiS 630E chipset with an embedded
1081		 * SiS 900 controller, we have to read the MAC address
1082		 * from the APC CMOS RAM. Our method for doing this
1083		 * is very ugly since we have to reach out and grab
1084		 * ahold of hardware for which we cannot properly
1085		 * allocate resources. This code is only compiled on
1086		 * the i386 architecture since the SiS 630E chipset
1087		 * is for x86 motherboards only. Note that there are
1088		 * a lot of magic numbers in this hack. These are
1089		 * taken from SiS's Linux driver. I'd like to replace
1090		 * them with proper symbolic definitions, but that
1091		 * requires some datasheets that I don't have access
1092		 * to at the moment.
1093		 */
1094		if (sc->sis_rev == SIS_REV_630S ||
1095		    sc->sis_rev == SIS_REV_630E ||
1096		    sc->sis_rev == SIS_REV_630EA1)
1097			sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6);
1098
1099		else if (sc->sis_rev == SIS_REV_635 ||
1100			 sc->sis_rev == SIS_REV_630ET)
1101			sis_read_mac(sc, dev, (caddr_t)&eaddr);
1102		else if (sc->sis_rev == SIS_REV_96x) {
1103			/* Allow to read EEPROM from LAN. It is shared
1104			 * between a 1394 controller and the NIC and each
1105			 * time we access it, we need to set SIS_EECMD_REQ.
1106			 */
1107			SIO_SET(SIS_EECMD_REQ);
1108			for (waittime = 0; waittime < SIS_TIMEOUT;
1109			    waittime++) {
1110				/* Force EEPROM to idle state. */
1111				sis_eeprom_idle(sc);
1112				if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) {
1113					sis_read_eeprom(sc, (caddr_t)&eaddr,
1114					    SIS_EE_NODEADDR, 3, 0);
1115					break;
1116				}
1117				DELAY(1);
1118			}
1119			/*
1120			 * Set SIS_EECTL_CLK to high, so a other master
1121			 * can operate on the i2c bus.
1122			 */
1123			SIO_SET(SIS_EECTL_CLK);
1124			/* Refuse EEPROM access by LAN */
1125			SIO_SET(SIS_EECMD_DONE);
1126		} else
1127#endif
1128			sis_read_eeprom(sc, (caddr_t)&eaddr,
1129			    SIS_EE_NODEADDR, 3, 0);
1130		break;
1131	}
1132
1133	sis_add_sysctls(sc);
1134
1135	/* Allocate DMA'able memory. */
1136	if ((error = sis_dma_alloc(sc)) != 0)
1137		goto fail;
1138
1139	ifp = sc->sis_ifp = if_alloc(IFT_ETHER);
1140	if (ifp == NULL) {
1141		device_printf(dev, "can not if_alloc()\n");
1142		error = ENOSPC;
1143		goto fail;
1144	}
1145	ifp->if_softc = sc;
1146	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1147	ifp->if_mtu = ETHERMTU;
1148	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1149	ifp->if_ioctl = sis_ioctl;
1150	ifp->if_start = sis_start;
1151	ifp->if_init = sis_init;
1152	IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1153	ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1;
1154	IFQ_SET_READY(&ifp->if_snd);
1155
1156	if (pci_find_extcap(sc->sis_dev, PCIY_PMG, &pmc) == 0) {
1157		if (sc->sis_type == SIS_TYPE_83815)
1158			ifp->if_capabilities |= IFCAP_WOL;
1159		else
1160			ifp->if_capabilities |= IFCAP_WOL_MAGIC;
1161		ifp->if_capenable = ifp->if_capabilities;
1162	}
1163
1164	/*
1165	 * Do MII setup.
1166	 */
1167	error = mii_attach(dev, &sc->sis_miibus, ifp, sis_ifmedia_upd,
1168	    sis_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1169	if (error != 0) {
1170		device_printf(dev, "attaching PHYs failed\n");
1171		goto fail;
1172	}
1173
1174	/*
1175	 * Call MI attach routine.
1176	 */
1177	ether_ifattach(ifp, eaddr);
1178
1179	/*
1180	 * Tell the upper layer(s) we support long frames.
1181	 */
1182	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1183	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1184	ifp->if_capenable = ifp->if_capabilities;
1185#ifdef DEVICE_POLLING
1186	ifp->if_capabilities |= IFCAP_POLLING;
1187#endif
1188
1189	/* Hook interrupt last to avoid having to lock softc */
1190	error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE,
1191	    NULL, sis_intr, sc, &sc->sis_intrhand);
1192
1193	if (error) {
1194		device_printf(dev, "couldn't set up irq\n");
1195		ether_ifdetach(ifp);
1196		goto fail;
1197	}
1198
1199fail:
1200	if (error)
1201		sis_detach(dev);
1202
1203	return (error);
1204}
1205
1206/*
1207 * Shutdown hardware and free up resources. This can be called any
1208 * time after the mutex has been initialized. It is called in both
1209 * the error case in attach and the normal detach case so it needs
1210 * to be careful about only freeing resources that have actually been
1211 * allocated.
1212 */
1213static int
1214sis_detach(device_t dev)
1215{
1216	struct sis_softc	*sc;
1217	struct ifnet		*ifp;
1218
1219	sc = device_get_softc(dev);
1220	KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized"));
1221	ifp = sc->sis_ifp;
1222
1223#ifdef DEVICE_POLLING
1224	if (ifp->if_capenable & IFCAP_POLLING)
1225		ether_poll_deregister(ifp);
1226#endif
1227
1228	/* These should only be active if attach succeeded. */
1229	if (device_is_attached(dev)) {
1230		SIS_LOCK(sc);
1231		sis_stop(sc);
1232		SIS_UNLOCK(sc);
1233		callout_drain(&sc->sis_stat_ch);
1234		ether_ifdetach(ifp);
1235	}
1236	if (sc->sis_miibus)
1237		device_delete_child(dev, sc->sis_miibus);
1238	bus_generic_detach(dev);
1239
1240	if (sc->sis_intrhand)
1241		bus_teardown_intr(dev, sc->sis_res[1], sc->sis_intrhand);
1242	bus_release_resources(dev, sis_res_spec, sc->sis_res);
1243
1244	if (ifp)
1245		if_free(ifp);
1246
1247	sis_dma_free(sc);
1248
1249	mtx_destroy(&sc->sis_mtx);
1250
1251	return (0);
1252}
1253
1254struct sis_dmamap_arg {
1255	bus_addr_t	sis_busaddr;
1256};
1257
1258static void
1259sis_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1260{
1261	struct sis_dmamap_arg	*ctx;
1262
1263	if (error != 0)
1264		return;
1265
1266	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1267
1268	ctx = (struct sis_dmamap_arg *)arg;
1269	ctx->sis_busaddr = segs[0].ds_addr;
1270}
1271
1272static int
1273sis_dma_ring_alloc(struct sis_softc *sc, bus_size_t alignment,
1274    bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
1275    bus_addr_t *paddr, const char *msg)
1276{
1277	struct sis_dmamap_arg	ctx;
1278	int			error;
1279
1280	error = bus_dma_tag_create(sc->sis_parent_tag, alignment, 0,
1281	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1,
1282	    maxsize, 0, NULL, NULL, tag);
1283	if (error != 0) {
1284		device_printf(sc->sis_dev,
1285		    "could not create %s dma tag\n", msg);
1286		return (ENOMEM);
1287	}
1288	/* Allocate DMA'able memory for ring. */
1289	error = bus_dmamem_alloc(*tag, (void **)ring,
1290	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
1291	if (error != 0) {
1292		device_printf(sc->sis_dev,
1293		    "could not allocate DMA'able memory for %s\n", msg);
1294		return (ENOMEM);
1295	}
1296	/* Load the address of the ring. */
1297	ctx.sis_busaddr = 0;
1298	error = bus_dmamap_load(*tag, *map, *ring, maxsize, sis_dmamap_cb,
1299	    &ctx, BUS_DMA_NOWAIT);
1300	if (error != 0) {
1301		device_printf(sc->sis_dev,
1302		    "could not load DMA'able memory for %s\n", msg);
1303		return (ENOMEM);
1304	}
1305	*paddr = ctx.sis_busaddr;
1306	return (0);
1307}
1308
1309static int
1310sis_dma_alloc(struct sis_softc *sc)
1311{
1312	struct sis_rxdesc	*rxd;
1313	struct sis_txdesc	*txd;
1314	int			error, i;
1315
1316	/* Allocate the parent bus DMA tag appropriate for PCI. */
1317	error = bus_dma_tag_create(bus_get_dma_tag(sc->sis_dev),
1318	    1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1319	    NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
1320	    0, NULL, NULL, &sc->sis_parent_tag);
1321	if (error != 0) {
1322		device_printf(sc->sis_dev,
1323		    "could not allocate parent dma tag\n");
1324		return (ENOMEM);
1325	}
1326
1327	/* Create RX ring. */
1328	error = sis_dma_ring_alloc(sc, SIS_DESC_ALIGN, SIS_RX_LIST_SZ,
1329	    &sc->sis_rx_list_tag, (uint8_t **)&sc->sis_rx_list,
1330	    &sc->sis_rx_list_map, &sc->sis_rx_paddr, "RX ring");
1331	if (error)
1332		return (error);
1333
1334	/* Create TX ring. */
1335	error = sis_dma_ring_alloc(sc, SIS_DESC_ALIGN, SIS_TX_LIST_SZ,
1336	    &sc->sis_tx_list_tag, (uint8_t **)&sc->sis_tx_list,
1337	    &sc->sis_tx_list_map, &sc->sis_tx_paddr, "TX ring");
1338	if (error)
1339		return (error);
1340
1341	/* Create tag for RX mbufs. */
1342	error = bus_dma_tag_create(sc->sis_parent_tag, SIS_RX_BUF_ALIGN, 0,
1343	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
1344	    MCLBYTES, 0, NULL, NULL, &sc->sis_rx_tag);
1345	if (error) {
1346		device_printf(sc->sis_dev, "could not allocate RX dma tag\n");
1347		return (error);
1348	}
1349
1350	/* Create tag for TX mbufs. */
1351	error = bus_dma_tag_create(sc->sis_parent_tag, 1, 0,
1352	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1353	    MCLBYTES * SIS_MAXTXSEGS, SIS_MAXTXSEGS, MCLBYTES, 0, NULL, NULL,
1354	    &sc->sis_tx_tag);
1355	if (error) {
1356		device_printf(sc->sis_dev, "could not allocate TX dma tag\n");
1357		return (error);
1358	}
1359
1360	/* Create DMA maps for RX buffers. */
1361	error = bus_dmamap_create(sc->sis_rx_tag, 0, &sc->sis_rx_sparemap);
1362	if (error) {
1363		device_printf(sc->sis_dev,
1364		    "can't create spare DMA map for RX\n");
1365		return (error);
1366	}
1367	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1368		rxd = &sc->sis_rxdesc[i];
1369		rxd->rx_m = NULL;
1370		error = bus_dmamap_create(sc->sis_rx_tag, 0, &rxd->rx_dmamap);
1371		if (error) {
1372			device_printf(sc->sis_dev,
1373			    "can't create DMA map for RX\n");
1374			return (error);
1375		}
1376	}
1377
1378	/* Create DMA maps for TX buffers. */
1379	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1380		txd = &sc->sis_txdesc[i];
1381		txd->tx_m = NULL;
1382		error = bus_dmamap_create(sc->sis_tx_tag, 0, &txd->tx_dmamap);
1383		if (error) {
1384			device_printf(sc->sis_dev,
1385			    "can't create DMA map for TX\n");
1386			return (error);
1387		}
1388	}
1389
1390	return (0);
1391}
1392
1393static void
1394sis_dma_free(struct sis_softc *sc)
1395{
1396	struct sis_rxdesc	*rxd;
1397	struct sis_txdesc	*txd;
1398	int			i;
1399
1400	/* Destroy DMA maps for RX buffers. */
1401	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1402		rxd = &sc->sis_rxdesc[i];
1403		if (rxd->rx_dmamap)
1404			bus_dmamap_destroy(sc->sis_rx_tag, rxd->rx_dmamap);
1405	}
1406	if (sc->sis_rx_sparemap)
1407		bus_dmamap_destroy(sc->sis_rx_tag, sc->sis_rx_sparemap);
1408
1409	/* Destroy DMA maps for TX buffers. */
1410	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1411		txd = &sc->sis_txdesc[i];
1412		if (txd->tx_dmamap)
1413			bus_dmamap_destroy(sc->sis_tx_tag, txd->tx_dmamap);
1414	}
1415
1416	if (sc->sis_rx_tag)
1417		bus_dma_tag_destroy(sc->sis_rx_tag);
1418	if (sc->sis_tx_tag)
1419		bus_dma_tag_destroy(sc->sis_tx_tag);
1420
1421	/* Destroy RX ring. */
1422	if (sc->sis_rx_list_map)
1423		bus_dmamap_unload(sc->sis_rx_list_tag, sc->sis_rx_list_map);
1424	if (sc->sis_rx_list_map && sc->sis_rx_list)
1425		bus_dmamem_free(sc->sis_rx_list_tag, sc->sis_rx_list,
1426		    sc->sis_rx_list_map);
1427
1428	if (sc->sis_rx_list_tag)
1429		bus_dma_tag_destroy(sc->sis_rx_list_tag);
1430
1431	/* Destroy TX ring. */
1432	if (sc->sis_tx_list_map)
1433		bus_dmamap_unload(sc->sis_tx_list_tag, sc->sis_tx_list_map);
1434
1435	if (sc->sis_tx_list_map && sc->sis_tx_list)
1436		bus_dmamem_free(sc->sis_tx_list_tag, sc->sis_tx_list,
1437		    sc->sis_tx_list_map);
1438
1439	if (sc->sis_tx_list_tag)
1440		bus_dma_tag_destroy(sc->sis_tx_list_tag);
1441
1442	/* Destroy the parent tag. */
1443	if (sc->sis_parent_tag)
1444		bus_dma_tag_destroy(sc->sis_parent_tag);
1445}
1446
1447/*
1448 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1449 * we arrange the descriptors in a closed ring, so that the last descriptor
1450 * points back to the first.
1451 */
1452static int
1453sis_ring_init(struct sis_softc *sc)
1454{
1455	struct sis_rxdesc	*rxd;
1456	struct sis_txdesc	*txd;
1457	bus_addr_t		next;
1458	int			error, i;
1459
1460	bzero(&sc->sis_tx_list[0], SIS_TX_LIST_SZ);
1461	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1462		txd = &sc->sis_txdesc[i];
1463		txd->tx_m = NULL;
1464		if (i == SIS_TX_LIST_CNT - 1)
1465			next = SIS_TX_RING_ADDR(sc, 0);
1466		else
1467			next = SIS_TX_RING_ADDR(sc, i + 1);
1468		sc->sis_tx_list[i].sis_next = htole32(SIS_ADDR_LO(next));
1469	}
1470	sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0;
1471	bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1472	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1473
1474	sc->sis_rx_cons = 0;
1475	bzero(&sc->sis_rx_list[0], SIS_RX_LIST_SZ);
1476	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1477		rxd = &sc->sis_rxdesc[i];
1478		rxd->rx_desc = &sc->sis_rx_list[i];
1479		if (i == SIS_RX_LIST_CNT - 1)
1480			next = SIS_RX_RING_ADDR(sc, 0);
1481		else
1482			next = SIS_RX_RING_ADDR(sc, i + 1);
1483		rxd->rx_desc->sis_next = htole32(SIS_ADDR_LO(next));
1484		error = sis_newbuf(sc, rxd);
1485		if (error)
1486			return (error);
1487	}
1488	bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1489	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1490
1491	return (0);
1492}
1493
1494/*
1495 * Initialize an RX descriptor and attach an MBUF cluster.
1496 */
1497static int
1498sis_newbuf(struct sis_softc *sc, struct sis_rxdesc *rxd)
1499{
1500	struct mbuf		*m;
1501	bus_dma_segment_t	segs[1];
1502	bus_dmamap_t		map;
1503	int nsegs;
1504
1505	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1506	if (m == NULL)
1507		return (ENOBUFS);
1508	m->m_len = m->m_pkthdr.len = SIS_RXLEN;
1509#ifndef __NO_STRICT_ALIGNMENT
1510	m_adj(m, SIS_RX_BUF_ALIGN);
1511#endif
1512
1513	if (bus_dmamap_load_mbuf_sg(sc->sis_rx_tag, sc->sis_rx_sparemap, m,
1514	    segs, &nsegs, 0) != 0) {
1515		m_freem(m);
1516		return (ENOBUFS);
1517	}
1518	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1519
1520	if (rxd->rx_m != NULL) {
1521		bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap,
1522		    BUS_DMASYNC_POSTREAD);
1523		bus_dmamap_unload(sc->sis_rx_tag, rxd->rx_dmamap);
1524	}
1525	map = rxd->rx_dmamap;
1526	rxd->rx_dmamap = sc->sis_rx_sparemap;
1527	sc->sis_rx_sparemap = map;
1528	bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD);
1529	rxd->rx_m = m;
1530	rxd->rx_desc->sis_cmdsts = htole32(SIS_RXLEN);
1531	rxd->rx_desc->sis_ptr = htole32(SIS_ADDR_LO(segs[0].ds_addr));
1532	return (0);
1533}
1534
1535static __inline void
1536sis_discard_rxbuf(struct sis_rxdesc *rxd)
1537{
1538
1539	rxd->rx_desc->sis_cmdsts = htole32(SIS_RXLEN);
1540}
1541
1542#ifndef __NO_STRICT_ALIGNMENT
1543static __inline void
1544sis_fixup_rx(struct mbuf *m)
1545{
1546	uint16_t		*src, *dst;
1547	int			i;
1548
1549	src = mtod(m, uint16_t *);
1550	dst = src - (SIS_RX_BUF_ALIGN - ETHER_ALIGN) / sizeof(*src);
1551
1552	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1553		*dst++ = *src++;
1554
1555	m->m_data -= SIS_RX_BUF_ALIGN - ETHER_ALIGN;
1556}
1557#endif
1558
1559/*
1560 * A frame has been uploaded: pass the resulting mbuf chain up to
1561 * the higher level protocols.
1562 */
1563static int
1564sis_rxeof(struct sis_softc *sc)
1565{
1566	struct mbuf		*m;
1567	struct ifnet		*ifp;
1568	struct sis_rxdesc	*rxd;
1569	struct sis_desc		*cur_rx;
1570	int			prog, rx_cons, rx_npkts = 0, total_len;
1571	uint32_t		rxstat;
1572
1573	SIS_LOCK_ASSERT(sc);
1574
1575	bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1576	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1577
1578	rx_cons = sc->sis_rx_cons;
1579	ifp = sc->sis_ifp;
1580
1581	for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1582	    SIS_INC(rx_cons, SIS_RX_LIST_CNT), prog++) {
1583#ifdef DEVICE_POLLING
1584		if (ifp->if_capenable & IFCAP_POLLING) {
1585			if (sc->rxcycles <= 0)
1586				break;
1587			sc->rxcycles--;
1588		}
1589#endif
1590		cur_rx = &sc->sis_rx_list[rx_cons];
1591		rxstat = le32toh(cur_rx->sis_cmdsts);
1592		if ((rxstat & SIS_CMDSTS_OWN) == 0)
1593			break;
1594		rxd = &sc->sis_rxdesc[rx_cons];
1595
1596		total_len = (rxstat & SIS_CMDSTS_BUFLEN) - ETHER_CRC_LEN;
1597		if ((ifp->if_capenable & IFCAP_VLAN_MTU) != 0 &&
1598		    total_len <= (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN -
1599		    ETHER_CRC_LEN))
1600			rxstat &= ~SIS_RXSTAT_GIANT;
1601		if (SIS_RXSTAT_ERROR(rxstat) != 0) {
1602			ifp->if_ierrors++;
1603			if (rxstat & SIS_RXSTAT_COLL)
1604				ifp->if_collisions++;
1605			sis_discard_rxbuf(rxd);
1606			continue;
1607		}
1608
1609		/* Add a new receive buffer to the ring. */
1610		m = rxd->rx_m;
1611		if (sis_newbuf(sc, rxd) != 0) {
1612			ifp->if_iqdrops++;
1613			sis_discard_rxbuf(rxd);
1614			continue;
1615		}
1616
1617		/* No errors; receive the packet. */
1618		m->m_pkthdr.len = m->m_len = total_len;
1619#ifndef __NO_STRICT_ALIGNMENT
1620		/*
1621		 * On architectures without alignment problems we try to
1622		 * allocate a new buffer for the receive ring, and pass up
1623		 * the one where the packet is already, saving the expensive
1624		 * copy operation.
1625		 */
1626		sis_fixup_rx(m);
1627#endif
1628		ifp->if_ipackets++;
1629		m->m_pkthdr.rcvif = ifp;
1630
1631		SIS_UNLOCK(sc);
1632		(*ifp->if_input)(ifp, m);
1633		SIS_LOCK(sc);
1634		rx_npkts++;
1635	}
1636
1637	if (prog > 0) {
1638		sc->sis_rx_cons = rx_cons;
1639		bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1640		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1641	}
1642
1643	return (rx_npkts);
1644}
1645
1646/*
1647 * A frame was downloaded to the chip. It's safe for us to clean up
1648 * the list buffers.
1649 */
1650
1651static void
1652sis_txeof(struct sis_softc *sc)
1653{
1654	struct ifnet		*ifp;
1655	struct sis_desc		*cur_tx;
1656	struct sis_txdesc	*txd;
1657	uint32_t		cons, txstat;
1658
1659	SIS_LOCK_ASSERT(sc);
1660
1661	cons = sc->sis_tx_cons;
1662	if (cons == sc->sis_tx_prod)
1663		return;
1664
1665	ifp = sc->sis_ifp;
1666	bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1667	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1668
1669	/*
1670	 * Go through our tx list and free mbufs for those
1671	 * frames that have been transmitted.
1672	 */
1673	for (; cons != sc->sis_tx_prod; SIS_INC(cons, SIS_TX_LIST_CNT)) {
1674		cur_tx = &sc->sis_tx_list[cons];
1675		txstat = le32toh(cur_tx->sis_cmdsts);
1676		if ((txstat & SIS_CMDSTS_OWN) != 0)
1677			break;
1678		txd = &sc->sis_txdesc[cons];
1679		if (txd->tx_m != NULL) {
1680			bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap,
1681			    BUS_DMASYNC_POSTWRITE);
1682			bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
1683			m_freem(txd->tx_m);
1684			txd->tx_m = NULL;
1685			if ((txstat & SIS_CMDSTS_PKT_OK) != 0) {
1686				ifp->if_opackets++;
1687				ifp->if_collisions +=
1688				    (txstat & SIS_TXSTAT_COLLCNT) >> 16;
1689			} else {
1690				ifp->if_oerrors++;
1691				if (txstat & SIS_TXSTAT_EXCESSCOLLS)
1692					ifp->if_collisions++;
1693				if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
1694					ifp->if_collisions++;
1695			}
1696		}
1697		sc->sis_tx_cnt--;
1698		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1699	}
1700	sc->sis_tx_cons = cons;
1701	if (sc->sis_tx_cnt == 0)
1702		sc->sis_watchdog_timer = 0;
1703}
1704
1705static void
1706sis_tick(void *xsc)
1707{
1708	struct sis_softc	*sc;
1709	struct mii_data		*mii;
1710	struct ifnet		*ifp;
1711
1712	sc = xsc;
1713	SIS_LOCK_ASSERT(sc);
1714	ifp = sc->sis_ifp;
1715
1716	mii = device_get_softc(sc->sis_miibus);
1717	mii_tick(mii);
1718	sis_watchdog(sc);
1719	if ((sc->sis_flags & SIS_FLAG_LINK) == 0)
1720		sis_miibus_statchg(sc->sis_dev);
1721	callout_reset(&sc->sis_stat_ch, hz,  sis_tick, sc);
1722}
1723
1724#ifdef DEVICE_POLLING
1725static poll_handler_t sis_poll;
1726
1727static int
1728sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1729{
1730	struct	sis_softc *sc = ifp->if_softc;
1731	int rx_npkts = 0;
1732
1733	SIS_LOCK(sc);
1734	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1735		SIS_UNLOCK(sc);
1736		return (rx_npkts);
1737	}
1738
1739	/*
1740	 * On the sis, reading the status register also clears it.
1741	 * So before returning to intr mode we must make sure that all
1742	 * possible pending sources of interrupts have been served.
1743	 * In practice this means run to completion the *eof routines,
1744	 * and then call the interrupt routine
1745	 */
1746	sc->rxcycles = count;
1747	rx_npkts = sis_rxeof(sc);
1748	sis_txeof(sc);
1749	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1750		sis_startl(ifp);
1751
1752	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1753		uint32_t	status;
1754
1755		/* Reading the ISR register clears all interrupts. */
1756		status = CSR_READ_4(sc, SIS_ISR);
1757
1758		if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW))
1759			ifp->if_ierrors++;
1760
1761		if (status & (SIS_ISR_RX_IDLE))
1762			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1763
1764		if (status & SIS_ISR_SYSERR) {
1765			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1766			sis_initl(sc);
1767		}
1768	}
1769
1770	SIS_UNLOCK(sc);
1771	return (rx_npkts);
1772}
1773#endif /* DEVICE_POLLING */
1774
1775static void
1776sis_intr(void *arg)
1777{
1778	struct sis_softc	*sc;
1779	struct ifnet		*ifp;
1780	uint32_t		status;
1781
1782	sc = arg;
1783	ifp = sc->sis_ifp;
1784
1785	SIS_LOCK(sc);
1786#ifdef DEVICE_POLLING
1787	if (ifp->if_capenable & IFCAP_POLLING) {
1788		SIS_UNLOCK(sc);
1789		return;
1790	}
1791#endif
1792
1793	/* Reading the ISR register clears all interrupts. */
1794	status = CSR_READ_4(sc, SIS_ISR);
1795	if ((status & SIS_INTRS) == 0) {
1796		/* Not ours. */
1797		SIS_UNLOCK(sc);
1798		return;
1799	}
1800
1801	/* Disable interrupts. */
1802	CSR_WRITE_4(sc, SIS_IER, 0);
1803
1804	for (;(status & SIS_INTRS) != 0;) {
1805		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1806			break;
1807		if (status &
1808		    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1809		    SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) )
1810			sis_txeof(sc);
1811
1812		if (status & (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1813		    SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1814			sis_rxeof(sc);
1815
1816		if (status & SIS_ISR_RX_OFLOW)
1817			ifp->if_ierrors++;
1818
1819		if (status & (SIS_ISR_RX_IDLE))
1820			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1821
1822		if (status & SIS_ISR_SYSERR) {
1823			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1824			sis_initl(sc);
1825			SIS_UNLOCK(sc);
1826			return;
1827		}
1828		status = CSR_READ_4(sc, SIS_ISR);
1829	}
1830
1831	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1832		/* Re-enable interrupts. */
1833		CSR_WRITE_4(sc, SIS_IER, 1);
1834
1835		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1836			sis_startl(ifp);
1837	}
1838
1839	SIS_UNLOCK(sc);
1840}
1841
1842/*
1843 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1844 * pointers to the fragment pointers.
1845 */
1846static int
1847sis_encap(struct sis_softc *sc, struct mbuf **m_head)
1848{
1849	struct mbuf		*m;
1850	struct sis_txdesc	*txd;
1851	struct sis_desc		*f;
1852	bus_dma_segment_t	segs[SIS_MAXTXSEGS];
1853	bus_dmamap_t		map;
1854	int			error, i, frag, nsegs, prod;
1855	int			padlen;
1856
1857	prod = sc->sis_tx_prod;
1858	txd = &sc->sis_txdesc[prod];
1859	if ((sc->sis_flags & SIS_FLAG_MANUAL_PAD) != 0 &&
1860	    (*m_head)->m_pkthdr.len < SIS_MIN_FRAMELEN) {
1861		m = *m_head;
1862		padlen = SIS_MIN_FRAMELEN - m->m_pkthdr.len;
1863		if (M_WRITABLE(m) == 0) {
1864			/* Get a writable copy. */
1865			m = m_dup(*m_head, M_DONTWAIT);
1866			m_freem(*m_head);
1867			if (m == NULL) {
1868				*m_head = NULL;
1869				return (ENOBUFS);
1870			}
1871			*m_head = m;
1872		}
1873		if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
1874			m = m_defrag(m, M_DONTWAIT);
1875			if (m == NULL) {
1876				m_freem(*m_head);
1877				*m_head = NULL;
1878				return (ENOBUFS);
1879			}
1880		}
1881		/*
1882		 * Manually pad short frames, and zero the pad space
1883		 * to avoid leaking data.
1884		 */
1885		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1886		m->m_pkthdr.len += padlen;
1887		m->m_len = m->m_pkthdr.len;
1888		*m_head = m;
1889	}
1890	error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
1891	    *m_head, segs, &nsegs, 0);
1892	if (error == EFBIG) {
1893		m = m_collapse(*m_head, M_DONTWAIT, SIS_MAXTXSEGS);
1894		if (m == NULL) {
1895			m_freem(*m_head);
1896			*m_head = NULL;
1897			return (ENOBUFS);
1898		}
1899		*m_head = m;
1900		error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
1901		    *m_head, segs, &nsegs, 0);
1902		if (error != 0) {
1903			m_freem(*m_head);
1904			*m_head = NULL;
1905			return (error);
1906		}
1907	} else if (error != 0)
1908		return (error);
1909
1910	/* Check for descriptor overruns. */
1911	if (sc->sis_tx_cnt + nsegs > SIS_TX_LIST_CNT - 1) {
1912		bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
1913		return (ENOBUFS);
1914	}
1915
1916	bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE);
1917
1918	frag = prod;
1919	for (i = 0; i < nsegs; i++) {
1920		f = &sc->sis_tx_list[prod];
1921		if (i == 0)
1922			f->sis_cmdsts = htole32(segs[i].ds_len |
1923			    SIS_CMDSTS_MORE);
1924		else
1925			f->sis_cmdsts = htole32(segs[i].ds_len |
1926			    SIS_CMDSTS_OWN | SIS_CMDSTS_MORE);
1927		f->sis_ptr = htole32(SIS_ADDR_LO(segs[i].ds_addr));
1928		SIS_INC(prod, SIS_TX_LIST_CNT);
1929		sc->sis_tx_cnt++;
1930	}
1931
1932	/* Update producer index. */
1933	sc->sis_tx_prod = prod;
1934
1935	/* Remove MORE flag on the last descriptor. */
1936	prod = (prod - 1) & (SIS_TX_LIST_CNT - 1);
1937	f = &sc->sis_tx_list[prod];
1938	f->sis_cmdsts &= ~htole32(SIS_CMDSTS_MORE);
1939
1940	/* Lastly transfer ownership of packet to the controller. */
1941	f = &sc->sis_tx_list[frag];
1942	f->sis_cmdsts |= htole32(SIS_CMDSTS_OWN);
1943
1944	/* Swap the last and the first dmamaps. */
1945	map = txd->tx_dmamap;
1946	txd->tx_dmamap = sc->sis_txdesc[prod].tx_dmamap;
1947	sc->sis_txdesc[prod].tx_dmamap = map;
1948	sc->sis_txdesc[prod].tx_m = *m_head;
1949
1950	return (0);
1951}
1952
1953static void
1954sis_start(struct ifnet *ifp)
1955{
1956	struct sis_softc	*sc;
1957
1958	sc = ifp->if_softc;
1959	SIS_LOCK(sc);
1960	sis_startl(ifp);
1961	SIS_UNLOCK(sc);
1962}
1963
1964static void
1965sis_startl(struct ifnet *ifp)
1966{
1967	struct sis_softc	*sc;
1968	struct mbuf		*m_head;
1969	int			queued;
1970
1971	sc = ifp->if_softc;
1972
1973	SIS_LOCK_ASSERT(sc);
1974
1975	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1976	    IFF_DRV_RUNNING || (sc->sis_flags & SIS_FLAG_LINK) == 0)
1977		return;
1978
1979	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1980	    sc->sis_tx_cnt < SIS_TX_LIST_CNT - 4;) {
1981		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1982		if (m_head == NULL)
1983			break;
1984
1985		if (sis_encap(sc, &m_head) != 0) {
1986			if (m_head == NULL)
1987				break;
1988			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1989			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1990			break;
1991		}
1992
1993		queued++;
1994
1995		/*
1996		 * If there's a BPF listener, bounce a copy of this frame
1997		 * to him.
1998		 */
1999		BPF_MTAP(ifp, m_head);
2000	}
2001
2002	if (queued) {
2003		/* Transmit */
2004		bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
2005		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2006		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
2007
2008		/*
2009		 * Set a timeout in case the chip goes out to lunch.
2010		 */
2011		sc->sis_watchdog_timer = 5;
2012	}
2013}
2014
2015static void
2016sis_init(void *xsc)
2017{
2018	struct sis_softc	*sc = xsc;
2019
2020	SIS_LOCK(sc);
2021	sis_initl(sc);
2022	SIS_UNLOCK(sc);
2023}
2024
2025static void
2026sis_initl(struct sis_softc *sc)
2027{
2028	struct ifnet		*ifp = sc->sis_ifp;
2029	struct mii_data		*mii;
2030	uint8_t			*eaddr;
2031
2032	SIS_LOCK_ASSERT(sc);
2033
2034	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2035		return;
2036
2037	/*
2038	 * Cancel pending I/O and free all RX/TX buffers.
2039	 */
2040	sis_stop(sc);
2041	/*
2042	 * Reset the chip to a known state.
2043	 */
2044	sis_reset(sc);
2045#ifdef notyet
2046	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
2047		/*
2048		 * Configure 400usec of interrupt holdoff.  This is based
2049		 * on emperical tests on a Soekris 4801.
2050 		 */
2051		CSR_WRITE_4(sc, NS_IHR, 0x100 | 4);
2052	}
2053#endif
2054
2055	mii = device_get_softc(sc->sis_miibus);
2056
2057	/* Set MAC address */
2058	eaddr = IF_LLADDR(sc->sis_ifp);
2059	if (sc->sis_type == SIS_TYPE_83815) {
2060		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
2061		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[0] | eaddr[1] << 8);
2062		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
2063		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[2] | eaddr[3] << 8);
2064		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
2065		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[4] | eaddr[5] << 8);
2066	} else {
2067		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
2068		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[0] | eaddr[1] << 8);
2069		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
2070		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[2] | eaddr[3] << 8);
2071		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
2072		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[4] | eaddr[5] << 8);
2073	}
2074
2075	/* Init circular TX/RX lists. */
2076	if (sis_ring_init(sc) != 0) {
2077		device_printf(sc->sis_dev,
2078		    "initialization failed: no memory for rx buffers\n");
2079		sis_stop(sc);
2080		return;
2081	}
2082
2083	if (sc->sis_type == SIS_TYPE_83815 || sc->sis_type == SIS_TYPE_83816) {
2084		if (sc->sis_manual_pad != 0)
2085			sc->sis_flags |= SIS_FLAG_MANUAL_PAD;
2086		else
2087			sc->sis_flags &= ~SIS_FLAG_MANUAL_PAD;
2088	}
2089
2090	/*
2091	 * Short Cable Receive Errors (MP21.E)
2092	 * also: Page 78 of the DP83815 data sheet (september 2002 version)
2093	 * recommends the following register settings "for optimum
2094	 * performance." for rev 15C.  Set this also for 15D parts as
2095	 * they require it in practice.
2096	 */
2097	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
2098		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
2099		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
2100		/* set val for c2 */
2101		CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
2102		/* load/kill c2 */
2103		CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
2104		/* rais SD off, from 4 to c */
2105		CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
2106		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
2107	}
2108
2109	/*
2110	 * For the NatSemi chip, we have to explicitly enable the
2111	 * reception of ARP frames, as well as turn on the 'perfect
2112	 * match' filter where we store the station address, otherwise
2113	 * we won't receive unicasts meant for this host.
2114	 */
2115	if (sc->sis_type == SIS_TYPE_83815) {
2116		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP);
2117		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT);
2118	}
2119
2120	 /* If we want promiscuous mode, set the allframes bit. */
2121	if (ifp->if_flags & IFF_PROMISC) {
2122		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
2123	} else {
2124		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
2125	}
2126
2127	/*
2128	 * Set the capture broadcast bit to capture broadcast frames.
2129	 */
2130	if (ifp->if_flags & IFF_BROADCAST) {
2131		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
2132	} else {
2133		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
2134	}
2135
2136	/*
2137	 * Load the multicast filter.
2138	 */
2139	if (sc->sis_type == SIS_TYPE_83815)
2140		sis_setmulti_ns(sc);
2141	else
2142		sis_setmulti_sis(sc);
2143
2144	/* Turn the receive filter on */
2145	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
2146
2147	/*
2148	 * Load the address of the RX and TX lists.
2149	 */
2150	CSR_WRITE_4(sc, SIS_RX_LISTPTR, SIS_ADDR_LO(sc->sis_rx_paddr));
2151	CSR_WRITE_4(sc, SIS_TX_LISTPTR, SIS_ADDR_LO(sc->sis_tx_paddr));
2152
2153	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
2154	 * the PCI bus. When this bit is set, the Max DMA Burst Size
2155	 * for TX/RX DMA should be no larger than 16 double words.
2156	 */
2157	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) {
2158		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
2159	} else {
2160		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
2161	}
2162
2163	/* Accept Long Packets for VLAN support */
2164	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
2165
2166	/*
2167	 * Assume 100Mbps link, actual MAC configuration is done
2168	 * after getting a valid link.
2169	 */
2170	CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
2171
2172	/*
2173	 * Enable interrupts.
2174	 */
2175	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
2176#ifdef DEVICE_POLLING
2177	/*
2178	 * ... only enable interrupts if we are not polling, make sure
2179	 * they are off otherwise.
2180	 */
2181	if (ifp->if_capenable & IFCAP_POLLING)
2182		CSR_WRITE_4(sc, SIS_IER, 0);
2183	else
2184#endif
2185	CSR_WRITE_4(sc, SIS_IER, 1);
2186
2187	/* Clear MAC disable. */
2188	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
2189
2190	sc->sis_flags &= ~SIS_FLAG_LINK;
2191	mii_mediachg(mii);
2192
2193	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2194	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2195
2196	callout_reset(&sc->sis_stat_ch, hz,  sis_tick, sc);
2197}
2198
2199/*
2200 * Set media options.
2201 */
2202static int
2203sis_ifmedia_upd(struct ifnet *ifp)
2204{
2205	struct sis_softc	*sc;
2206	struct mii_data		*mii;
2207	int			error;
2208
2209	sc = ifp->if_softc;
2210
2211	SIS_LOCK(sc);
2212	mii = device_get_softc(sc->sis_miibus);
2213	if (mii->mii_instance) {
2214		struct mii_softc	*miisc;
2215		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2216			mii_phy_reset(miisc);
2217	}
2218	error = mii_mediachg(mii);
2219	SIS_UNLOCK(sc);
2220
2221	return (error);
2222}
2223
2224/*
2225 * Report current media status.
2226 */
2227static void
2228sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2229{
2230	struct sis_softc	*sc;
2231	struct mii_data		*mii;
2232
2233	sc = ifp->if_softc;
2234
2235	SIS_LOCK(sc);
2236	mii = device_get_softc(sc->sis_miibus);
2237	mii_pollstat(mii);
2238	SIS_UNLOCK(sc);
2239	ifmr->ifm_active = mii->mii_media_active;
2240	ifmr->ifm_status = mii->mii_media_status;
2241}
2242
2243static int
2244sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2245{
2246	struct sis_softc	*sc = ifp->if_softc;
2247	struct ifreq		*ifr = (struct ifreq *) data;
2248	struct mii_data		*mii;
2249	int			error = 0, mask;
2250
2251	switch (command) {
2252	case SIOCSIFFLAGS:
2253		SIS_LOCK(sc);
2254		if (ifp->if_flags & IFF_UP) {
2255			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2256			    ((ifp->if_flags ^ sc->sis_if_flags) &
2257			    (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2258				if (sc->sis_type == SIS_TYPE_83815)
2259					sis_setmulti_ns(sc);
2260				else
2261					sis_setmulti_sis(sc);
2262			} else
2263				sis_initl(sc);
2264		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2265			sis_stop(sc);
2266		}
2267		sc->sis_if_flags = ifp->if_flags;
2268		SIS_UNLOCK(sc);
2269		error = 0;
2270		break;
2271	case SIOCADDMULTI:
2272	case SIOCDELMULTI:
2273		SIS_LOCK(sc);
2274		if (sc->sis_type == SIS_TYPE_83815)
2275			sis_setmulti_ns(sc);
2276		else
2277			sis_setmulti_sis(sc);
2278		SIS_UNLOCK(sc);
2279		break;
2280	case SIOCGIFMEDIA:
2281	case SIOCSIFMEDIA:
2282		mii = device_get_softc(sc->sis_miibus);
2283		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2284		break;
2285	case SIOCSIFCAP:
2286		SIS_LOCK(sc);
2287		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2288#ifdef DEVICE_POLLING
2289		if ((mask & IFCAP_POLLING) != 0 &&
2290		    (IFCAP_POLLING & ifp->if_capabilities) != 0) {
2291			ifp->if_capenable ^= IFCAP_POLLING;
2292			if ((IFCAP_POLLING & ifp->if_capenable) != 0) {
2293				error = ether_poll_register(sis_poll, ifp);
2294				if (error != 0) {
2295					SIS_UNLOCK(sc);
2296					break;
2297				}
2298				/* Disable interrupts. */
2299				CSR_WRITE_4(sc, SIS_IER, 0);
2300                        } else {
2301                                error = ether_poll_deregister(ifp);
2302                                /* Enable interrupts. */
2303				CSR_WRITE_4(sc, SIS_IER, 1);
2304                        }
2305		}
2306#endif /* DEVICE_POLLING */
2307		if ((mask & IFCAP_WOL) != 0 &&
2308		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
2309			if ((mask & IFCAP_WOL_UCAST) != 0)
2310				ifp->if_capenable ^= IFCAP_WOL_UCAST;
2311			if ((mask & IFCAP_WOL_MCAST) != 0)
2312				ifp->if_capenable ^= IFCAP_WOL_MCAST;
2313			if ((mask & IFCAP_WOL_MAGIC) != 0)
2314				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2315		}
2316		SIS_UNLOCK(sc);
2317		break;
2318	default:
2319		error = ether_ioctl(ifp, command, data);
2320		break;
2321	}
2322
2323	return (error);
2324}
2325
2326static void
2327sis_watchdog(struct sis_softc *sc)
2328{
2329
2330	SIS_LOCK_ASSERT(sc);
2331
2332	if (sc->sis_watchdog_timer == 0 || --sc->sis_watchdog_timer >0)
2333		return;
2334
2335	device_printf(sc->sis_dev, "watchdog timeout\n");
2336	sc->sis_ifp->if_oerrors++;
2337
2338	sc->sis_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2339	sis_initl(sc);
2340
2341	if (!IFQ_DRV_IS_EMPTY(&sc->sis_ifp->if_snd))
2342		sis_startl(sc->sis_ifp);
2343}
2344
2345/*
2346 * Stop the adapter and free any mbufs allocated to the
2347 * RX and TX lists.
2348 */
2349static void
2350sis_stop(struct sis_softc *sc)
2351{
2352	struct ifnet *ifp;
2353	struct sis_rxdesc *rxd;
2354	struct sis_txdesc *txd;
2355	int i;
2356
2357	SIS_LOCK_ASSERT(sc);
2358
2359	ifp = sc->sis_ifp;
2360	sc->sis_watchdog_timer = 0;
2361
2362	callout_stop(&sc->sis_stat_ch);
2363
2364	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2365	CSR_WRITE_4(sc, SIS_IER, 0);
2366	CSR_WRITE_4(sc, SIS_IMR, 0);
2367	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
2368	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
2369	DELAY(1000);
2370	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
2371	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2372
2373	sc->sis_flags &= ~SIS_FLAG_LINK;
2374
2375	/*
2376	 * Free data in the RX lists.
2377	 */
2378	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
2379		rxd = &sc->sis_rxdesc[i];
2380		if (rxd->rx_m != NULL) {
2381			bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap,
2382			    BUS_DMASYNC_POSTREAD);
2383			bus_dmamap_unload(sc->sis_rx_tag, rxd->rx_dmamap);
2384			m_freem(rxd->rx_m);
2385			rxd->rx_m = NULL;
2386		}
2387	}
2388
2389	/*
2390	 * Free the TX list buffers.
2391	 */
2392	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
2393		txd = &sc->sis_txdesc[i];
2394		if (txd->tx_m != NULL) {
2395			bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap,
2396			    BUS_DMASYNC_POSTWRITE);
2397			bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
2398			m_freem(txd->tx_m);
2399			txd->tx_m = NULL;
2400		}
2401	}
2402}
2403
2404/*
2405 * Stop all chip I/O so that the kernel's probe routines don't
2406 * get confused by errant DMAs when rebooting.
2407 */
2408static int
2409sis_shutdown(device_t dev)
2410{
2411
2412	return (sis_suspend(dev));
2413}
2414
2415static int
2416sis_suspend(device_t dev)
2417{
2418	struct sis_softc	*sc;
2419
2420	sc = device_get_softc(dev);
2421	SIS_LOCK(sc);
2422	sis_stop(sc);
2423	sis_wol(sc);
2424	SIS_UNLOCK(sc);
2425	return (0);
2426}
2427
2428static int
2429sis_resume(device_t dev)
2430{
2431	struct sis_softc	*sc;
2432	struct ifnet		*ifp;
2433
2434	sc = device_get_softc(dev);
2435	SIS_LOCK(sc);
2436	ifp = sc->sis_ifp;
2437	if ((ifp->if_flags & IFF_UP) != 0) {
2438		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2439		sis_initl(sc);
2440	}
2441	SIS_UNLOCK(sc);
2442	return (0);
2443}
2444
2445static void
2446sis_wol(struct sis_softc *sc)
2447{
2448	struct ifnet		*ifp;
2449	uint32_t		val;
2450	uint16_t		pmstat;
2451	int			pmc;
2452
2453	ifp = sc->sis_ifp;
2454	if ((ifp->if_capenable & IFCAP_WOL) == 0)
2455		return;
2456
2457	if (sc->sis_type == SIS_TYPE_83815) {
2458		/* Reset RXDP. */
2459		CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2460
2461		/* Configure WOL events. */
2462		CSR_READ_4(sc, NS_WCSR);
2463		val = 0;
2464		if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2465			val |= NS_WCSR_WAKE_UCAST;
2466		if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2467			val |= NS_WCSR_WAKE_MCAST;
2468		if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2469			val |= NS_WCSR_WAKE_MAGIC;
2470		CSR_WRITE_4(sc, NS_WCSR, val);
2471		/* Enable PME and clear PMESTS. */
2472		val = CSR_READ_4(sc, NS_CLKRUN);
2473		val |= NS_CLKRUN_PMEENB | NS_CLKRUN_PMESTS;
2474		CSR_WRITE_4(sc, NS_CLKRUN, val);
2475		/* Enable silent RX mode. */
2476		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
2477	} else {
2478		if (pci_find_extcap(sc->sis_dev, PCIY_PMG, &pmc) != 0)
2479			return;
2480		val = 0;
2481		if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2482			val |= SIS_PWRMAN_WOL_MAGIC;
2483		CSR_WRITE_4(sc, SIS_PWRMAN_CTL, val);
2484		/* Request PME. */
2485		pmstat = pci_read_config(sc->sis_dev,
2486		    pmc + PCIR_POWER_STATUS, 2);
2487		pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2488		if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2489			pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2490		pci_write_config(sc->sis_dev,
2491		    pmc + PCIR_POWER_STATUS, pmstat, 2);
2492	}
2493}
2494
2495static void
2496sis_add_sysctls(struct sis_softc *sc)
2497{
2498	struct sysctl_ctx_list *ctx;
2499	struct sysctl_oid_list *children;
2500	char tn[32];
2501	int unit;
2502
2503	ctx = device_get_sysctl_ctx(sc->sis_dev);
2504	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sis_dev));
2505
2506	unit = device_get_unit(sc->sis_dev);
2507	/*
2508	 * Unlike most other controllers, NS DP83815/DP83816 controllers
2509	 * seem to pad with 0xFF when it encounter short frames.  According
2510	 * to RFC 1042 the pad bytes should be 0x00.  Turning this tunable
2511	 * on will have driver pad manully but it's disabled by default
2512	 * because it will consume extra CPU cycles for short frames.
2513	 */
2514	sc->sis_manual_pad = 0;
2515	snprintf(tn, sizeof(tn), "dev.sis.%d.manual_pad", unit);
2516	TUNABLE_INT_FETCH(tn, &sc->sis_manual_pad);
2517	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "manual_pad",
2518	    CTLFLAG_RW, &sc->sis_manual_pad, 0, "Manually pad short frames");
2519}
2520
2521static device_method_t sis_methods[] = {
2522	/* Device interface */
2523	DEVMETHOD(device_probe,		sis_probe),
2524	DEVMETHOD(device_attach,	sis_attach),
2525	DEVMETHOD(device_detach,	sis_detach),
2526	DEVMETHOD(device_shutdown,	sis_shutdown),
2527	DEVMETHOD(device_suspend,	sis_suspend),
2528	DEVMETHOD(device_resume,	sis_resume),
2529
2530	/* bus interface */
2531	DEVMETHOD(bus_print_child,	bus_generic_print_child),
2532	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
2533
2534	/* MII interface */
2535	DEVMETHOD(miibus_readreg,	sis_miibus_readreg),
2536	DEVMETHOD(miibus_writereg,	sis_miibus_writereg),
2537	DEVMETHOD(miibus_statchg,	sis_miibus_statchg),
2538
2539	{ 0, 0 }
2540};
2541
2542static driver_t sis_driver = {
2543	"sis",
2544	sis_methods,
2545	sizeof(struct sis_softc)
2546};
2547
2548static devclass_t sis_devclass;
2549
2550DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0);
2551DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0);
2552