if_sis.c revision 212166
1/*-
2 * Copyright (c) 2005 Poul-Henning Kamp <phk@FreeBSD.org>
3 * Copyright (c) 1997, 1998, 1999
4 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/sis/if_sis.c 212166 2010-09-02 22:37:13Z yongari $");
36
37/*
38 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
39 * available from http://www.sis.com.tw.
40 *
41 * This driver also supports the NatSemi DP83815. Datasheets are
42 * available from http://www.national.com.
43 *
44 * Written by Bill Paul <wpaul@ee.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48/*
49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50 * simple TX and RX descriptors of 3 longwords in size. The receiver
51 * has a single perfect filter entry for the station address and a
52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53 * transceiver while the 7016 requires an external transceiver chip.
54 * Both chips offer the standard bit-bang MII interface as well as
55 * an enchanced PHY interface which simplifies accessing MII registers.
56 *
57 * The only downside to this chipset is that RX descriptors must be
58 * longword aligned.
59 */
60
61#ifdef HAVE_KERNEL_OPTION_HEADERS
62#include "opt_device_polling.h"
63#endif
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/bus.h>
68#include <sys/endian.h>
69#include <sys/kernel.h>
70#include <sys/lock.h>
71#include <sys/malloc.h>
72#include <sys/mbuf.h>
73#include <sys/module.h>
74#include <sys/socket.h>
75#include <sys/sockio.h>
76#include <sys/sysctl.h>
77
78#include <net/if.h>
79#include <net/if_arp.h>
80#include <net/ethernet.h>
81#include <net/if_dl.h>
82#include <net/if_media.h>
83#include <net/if_types.h>
84#include <net/if_vlan_var.h>
85
86#include <net/bpf.h>
87
88#include <machine/bus.h>
89#include <machine/resource.h>
90#include <sys/bus.h>
91#include <sys/rman.h>
92
93#include <dev/mii/mii.h>
94#include <dev/mii/miivar.h>
95
96#include <dev/pci/pcireg.h>
97#include <dev/pci/pcivar.h>
98
99#define SIS_USEIOSPACE
100
101#include <dev/sis/if_sisreg.h>
102
103MODULE_DEPEND(sis, pci, 1, 1, 1);
104MODULE_DEPEND(sis, ether, 1, 1, 1);
105MODULE_DEPEND(sis, miibus, 1, 1, 1);
106
107/* "device miibus" required.  See GENERIC if you get errors here. */
108#include "miibus_if.h"
109
110#define	SIS_LOCK(_sc)		mtx_lock(&(_sc)->sis_mtx)
111#define	SIS_UNLOCK(_sc)		mtx_unlock(&(_sc)->sis_mtx)
112#define	SIS_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sis_mtx, MA_OWNED)
113
114/*
115 * register space access macros
116 */
117#define CSR_WRITE_4(sc, reg, val)	bus_write_4(sc->sis_res[0], reg, val)
118
119#define CSR_READ_4(sc, reg)		bus_read_4(sc->sis_res[0], reg)
120
121#define CSR_READ_2(sc, reg)		bus_read_2(sc->sis_res[0], reg)
122
123/*
124 * Various supported device vendors/types and their names.
125 */
126static struct sis_type sis_devs[] = {
127	{ SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" },
128	{ SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" },
129	{ NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" },
130	{ 0, 0, NULL }
131};
132
133static int sis_detach(device_t);
134static __inline void sis_discard_rxbuf(struct sis_rxdesc *);
135static int sis_dma_alloc(struct sis_softc *);
136static void sis_dma_free(struct sis_softc *);
137static int sis_dma_ring_alloc(struct sis_softc *, bus_size_t, bus_size_t,
138    bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
139static void sis_dmamap_cb(void *, bus_dma_segment_t *, int, int);
140#ifndef __NO_STRICT_ALIGNMENT
141static __inline void sis_fixup_rx(struct mbuf *);
142#endif
143static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
144static int sis_ifmedia_upd(struct ifnet *);
145static void sis_init(void *);
146static void sis_initl(struct sis_softc *);
147static void sis_intr(void *);
148static int sis_ioctl(struct ifnet *, u_long, caddr_t);
149static int sis_newbuf(struct sis_softc *, struct sis_rxdesc *);
150static int sis_rxeof(struct sis_softc *);
151static void sis_start(struct ifnet *);
152static void sis_startl(struct ifnet *);
153static void sis_stop(struct sis_softc *);
154static void sis_add_sysctls(struct sis_softc *);
155static void sis_watchdog(struct sis_softc *);
156
157
158static struct resource_spec sis_res_spec[] = {
159#ifdef SIS_USEIOSPACE
160	{ SYS_RES_IOPORT,	SIS_PCI_LOIO,	RF_ACTIVE},
161#else
162	{ SYS_RES_MEMORY,	SIS_PCI_LOMEM,	RF_ACTIVE},
163#endif
164	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE},
165	{ -1, 0 }
166};
167
168#define SIS_SETBIT(sc, reg, x)				\
169	CSR_WRITE_4(sc, reg,				\
170		CSR_READ_4(sc, reg) | (x))
171
172#define SIS_CLRBIT(sc, reg, x)				\
173	CSR_WRITE_4(sc, reg,				\
174		CSR_READ_4(sc, reg) & ~(x))
175
176#define SIO_SET(x)					\
177	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
178
179#define SIO_CLR(x)					\
180	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
181
182/*
183 * Routine to reverse the bits in a word. Stolen almost
184 * verbatim from /usr/games/fortune.
185 */
186static uint16_t
187sis_reverse(uint16_t n)
188{
189	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
190	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
191	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
192	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
193
194	return (n);
195}
196
197static void
198sis_delay(struct sis_softc *sc)
199{
200	int			idx;
201
202	for (idx = (300 / 33) + 1; idx > 0; idx--)
203		CSR_READ_4(sc, SIS_CSR);
204}
205
206static void
207sis_eeprom_idle(struct sis_softc *sc)
208{
209	int		i;
210
211	SIO_SET(SIS_EECTL_CSEL);
212	sis_delay(sc);
213	SIO_SET(SIS_EECTL_CLK);
214	sis_delay(sc);
215
216	for (i = 0; i < 25; i++) {
217		SIO_CLR(SIS_EECTL_CLK);
218		sis_delay(sc);
219		SIO_SET(SIS_EECTL_CLK);
220		sis_delay(sc);
221	}
222
223	SIO_CLR(SIS_EECTL_CLK);
224	sis_delay(sc);
225	SIO_CLR(SIS_EECTL_CSEL);
226	sis_delay(sc);
227	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
228}
229
230/*
231 * Send a read command and address to the EEPROM, check for ACK.
232 */
233static void
234sis_eeprom_putbyte(struct sis_softc *sc, int addr)
235{
236	int		d, i;
237
238	d = addr | SIS_EECMD_READ;
239
240	/*
241	 * Feed in each bit and stobe the clock.
242	 */
243	for (i = 0x400; i; i >>= 1) {
244		if (d & i) {
245			SIO_SET(SIS_EECTL_DIN);
246		} else {
247			SIO_CLR(SIS_EECTL_DIN);
248		}
249		sis_delay(sc);
250		SIO_SET(SIS_EECTL_CLK);
251		sis_delay(sc);
252		SIO_CLR(SIS_EECTL_CLK);
253		sis_delay(sc);
254	}
255}
256
257/*
258 * Read a word of data stored in the EEPROM at address 'addr.'
259 */
260static void
261sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest)
262{
263	int		i;
264	uint16_t	word = 0;
265
266	/* Force EEPROM to idle state. */
267	sis_eeprom_idle(sc);
268
269	/* Enter EEPROM access mode. */
270	sis_delay(sc);
271	SIO_CLR(SIS_EECTL_CLK);
272	sis_delay(sc);
273	SIO_SET(SIS_EECTL_CSEL);
274	sis_delay(sc);
275
276	/*
277	 * Send address of word we want to read.
278	 */
279	sis_eeprom_putbyte(sc, addr);
280
281	/*
282	 * Start reading bits from EEPROM.
283	 */
284	for (i = 0x8000; i; i >>= 1) {
285		SIO_SET(SIS_EECTL_CLK);
286		sis_delay(sc);
287		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
288			word |= i;
289		sis_delay(sc);
290		SIO_CLR(SIS_EECTL_CLK);
291		sis_delay(sc);
292	}
293
294	/* Turn off EEPROM access mode. */
295	sis_eeprom_idle(sc);
296
297	*dest = word;
298}
299
300/*
301 * Read a sequence of words from the EEPROM.
302 */
303static void
304sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap)
305{
306	int			i;
307	uint16_t		word = 0, *ptr;
308
309	for (i = 0; i < cnt; i++) {
310		sis_eeprom_getword(sc, off + i, &word);
311		ptr = (uint16_t *)(dest + (i * 2));
312		if (swap)
313			*ptr = ntohs(word);
314		else
315			*ptr = word;
316	}
317}
318
319#if defined(__i386__) || defined(__amd64__)
320static device_t
321sis_find_bridge(device_t dev)
322{
323	devclass_t		pci_devclass;
324	device_t		*pci_devices;
325	int			pci_count = 0;
326	device_t		*pci_children;
327	int			pci_childcount = 0;
328	device_t		*busp, *childp;
329	device_t		child = NULL;
330	int			i, j;
331
332	if ((pci_devclass = devclass_find("pci")) == NULL)
333		return (NULL);
334
335	devclass_get_devices(pci_devclass, &pci_devices, &pci_count);
336
337	for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) {
338		if (device_get_children(*busp, &pci_children, &pci_childcount))
339			continue;
340		for (j = 0, childp = pci_children;
341		    j < pci_childcount; j++, childp++) {
342			if (pci_get_vendor(*childp) == SIS_VENDORID &&
343			    pci_get_device(*childp) == 0x0008) {
344				child = *childp;
345				free(pci_children, M_TEMP);
346				goto done;
347			}
348		}
349		free(pci_children, M_TEMP);
350	}
351
352done:
353	free(pci_devices, M_TEMP);
354	return (child);
355}
356
357static void
358sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt)
359{
360	device_t		bridge;
361	uint8_t			reg;
362	int			i;
363	bus_space_tag_t		btag;
364
365	bridge = sis_find_bridge(dev);
366	if (bridge == NULL)
367		return;
368	reg = pci_read_config(bridge, 0x48, 1);
369	pci_write_config(bridge, 0x48, reg|0x40, 1);
370
371	/* XXX */
372#if defined(__i386__)
373	btag = I386_BUS_SPACE_IO;
374#elif defined(__amd64__)
375	btag = AMD64_BUS_SPACE_IO;
376#endif
377
378	for (i = 0; i < cnt; i++) {
379		bus_space_write_1(btag, 0x0, 0x70, i + off);
380		*(dest + i) = bus_space_read_1(btag, 0x0, 0x71);
381	}
382
383	pci_write_config(bridge, 0x48, reg & ~0x40, 1);
384}
385
386static void
387sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest)
388{
389	uint32_t		filtsave, csrsave;
390
391	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
392	csrsave = CSR_READ_4(sc, SIS_CSR);
393
394	CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave);
395	CSR_WRITE_4(sc, SIS_CSR, 0);
396
397	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE);
398
399	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
400	((uint16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA);
401	CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1);
402	((uint16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA);
403	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
404	((uint16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA);
405
406	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
407	CSR_WRITE_4(sc, SIS_CSR, csrsave);
408}
409#endif
410
411/*
412 * Sync the PHYs by setting data bit and strobing the clock 32 times.
413 */
414static void
415sis_mii_sync(struct sis_softc *sc)
416{
417	int		i;
418
419 	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
420
421 	for (i = 0; i < 32; i++) {
422 		SIO_SET(SIS_MII_CLK);
423 		DELAY(1);
424 		SIO_CLR(SIS_MII_CLK);
425 		DELAY(1);
426 	}
427}
428
429/*
430 * Clock a series of bits through the MII.
431 */
432static void
433sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt)
434{
435	int			i;
436
437	SIO_CLR(SIS_MII_CLK);
438
439	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
440		if (bits & i) {
441			SIO_SET(SIS_MII_DATA);
442		} else {
443			SIO_CLR(SIS_MII_DATA);
444		}
445		DELAY(1);
446		SIO_CLR(SIS_MII_CLK);
447		DELAY(1);
448		SIO_SET(SIS_MII_CLK);
449	}
450}
451
452/*
453 * Read an PHY register through the MII.
454 */
455static int
456sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
457{
458	int			i, ack;
459
460	/*
461	 * Set up frame for RX.
462	 */
463	frame->mii_stdelim = SIS_MII_STARTDELIM;
464	frame->mii_opcode = SIS_MII_READOP;
465	frame->mii_turnaround = 0;
466	frame->mii_data = 0;
467
468	/*
469 	 * Turn on data xmit.
470	 */
471	SIO_SET(SIS_MII_DIR);
472
473	sis_mii_sync(sc);
474
475	/*
476	 * Send command/address info.
477	 */
478	sis_mii_send(sc, frame->mii_stdelim, 2);
479	sis_mii_send(sc, frame->mii_opcode, 2);
480	sis_mii_send(sc, frame->mii_phyaddr, 5);
481	sis_mii_send(sc, frame->mii_regaddr, 5);
482
483	/* Idle bit */
484	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
485	DELAY(1);
486	SIO_SET(SIS_MII_CLK);
487	DELAY(1);
488
489	/* Turn off xmit. */
490	SIO_CLR(SIS_MII_DIR);
491
492	/* Check for ack */
493	SIO_CLR(SIS_MII_CLK);
494	DELAY(1);
495	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
496	SIO_SET(SIS_MII_CLK);
497	DELAY(1);
498
499	/*
500	 * Now try reading data bits. If the ack failed, we still
501	 * need to clock through 16 cycles to keep the PHY(s) in sync.
502	 */
503	if (ack) {
504		for (i = 0; i < 16; i++) {
505			SIO_CLR(SIS_MII_CLK);
506			DELAY(1);
507			SIO_SET(SIS_MII_CLK);
508			DELAY(1);
509		}
510		goto fail;
511	}
512
513	for (i = 0x8000; i; i >>= 1) {
514		SIO_CLR(SIS_MII_CLK);
515		DELAY(1);
516		if (!ack) {
517			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
518				frame->mii_data |= i;
519			DELAY(1);
520		}
521		SIO_SET(SIS_MII_CLK);
522		DELAY(1);
523	}
524
525fail:
526
527	SIO_CLR(SIS_MII_CLK);
528	DELAY(1);
529	SIO_SET(SIS_MII_CLK);
530	DELAY(1);
531
532	if (ack)
533		return (1);
534	return (0);
535}
536
537/*
538 * Write to a PHY register through the MII.
539 */
540static int
541sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
542{
543
544 	/*
545 	 * Set up frame for TX.
546 	 */
547
548 	frame->mii_stdelim = SIS_MII_STARTDELIM;
549 	frame->mii_opcode = SIS_MII_WRITEOP;
550 	frame->mii_turnaround = SIS_MII_TURNAROUND;
551
552 	/*
553  	 * Turn on data output.
554 	 */
555 	SIO_SET(SIS_MII_DIR);
556
557 	sis_mii_sync(sc);
558
559 	sis_mii_send(sc, frame->mii_stdelim, 2);
560 	sis_mii_send(sc, frame->mii_opcode, 2);
561 	sis_mii_send(sc, frame->mii_phyaddr, 5);
562 	sis_mii_send(sc, frame->mii_regaddr, 5);
563 	sis_mii_send(sc, frame->mii_turnaround, 2);
564 	sis_mii_send(sc, frame->mii_data, 16);
565
566 	/* Idle bit. */
567 	SIO_SET(SIS_MII_CLK);
568 	DELAY(1);
569 	SIO_CLR(SIS_MII_CLK);
570 	DELAY(1);
571
572 	/*
573 	 * Turn off xmit.
574 	 */
575 	SIO_CLR(SIS_MII_DIR);
576
577 	return (0);
578}
579
580static int
581sis_miibus_readreg(device_t dev, int phy, int reg)
582{
583	struct sis_softc	*sc;
584	struct sis_mii_frame    frame;
585
586	sc = device_get_softc(dev);
587
588	if (sc->sis_type == SIS_TYPE_83815) {
589		if (phy != 0)
590			return (0);
591		/*
592		 * The NatSemi chip can take a while after
593		 * a reset to come ready, during which the BMSR
594		 * returns a value of 0. This is *never* supposed
595		 * to happen: some of the BMSR bits are meant to
596		 * be hardwired in the on position, and this can
597		 * confuse the miibus code a bit during the probe
598		 * and attach phase. So we make an effort to check
599		 * for this condition and wait for it to clear.
600		 */
601		if (!CSR_READ_4(sc, NS_BMSR))
602			DELAY(1000);
603		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
604	}
605
606	/*
607	 * Chipsets < SIS_635 seem not to be able to read/write
608	 * through mdio. Use the enhanced PHY access register
609	 * again for them.
610	 */
611	if (sc->sis_type == SIS_TYPE_900 &&
612	    sc->sis_rev < SIS_REV_635) {
613		int i, val = 0;
614
615		if (phy != 0)
616			return (0);
617
618		CSR_WRITE_4(sc, SIS_PHYCTL,
619		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
620		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
621
622		for (i = 0; i < SIS_TIMEOUT; i++) {
623			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
624				break;
625		}
626
627		if (i == SIS_TIMEOUT) {
628			device_printf(sc->sis_dev, "PHY failed to come ready\n");
629			return (0);
630		}
631
632		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
633
634		if (val == 0xFFFF)
635			return (0);
636
637		return (val);
638	} else {
639		bzero((char *)&frame, sizeof(frame));
640
641		frame.mii_phyaddr = phy;
642		frame.mii_regaddr = reg;
643		sis_mii_readreg(sc, &frame);
644
645		return (frame.mii_data);
646	}
647}
648
649static int
650sis_miibus_writereg(device_t dev, int phy, int reg, int data)
651{
652	struct sis_softc	*sc;
653	struct sis_mii_frame	frame;
654
655	sc = device_get_softc(dev);
656
657	if (sc->sis_type == SIS_TYPE_83815) {
658		if (phy != 0)
659			return (0);
660		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
661		return (0);
662	}
663
664	/*
665	 * Chipsets < SIS_635 seem not to be able to read/write
666	 * through mdio. Use the enhanced PHY access register
667	 * again for them.
668	 */
669	if (sc->sis_type == SIS_TYPE_900 &&
670	    sc->sis_rev < SIS_REV_635) {
671		int i;
672
673		if (phy != 0)
674			return (0);
675
676		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
677		    (reg << 6) | SIS_PHYOP_WRITE);
678		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
679
680		for (i = 0; i < SIS_TIMEOUT; i++) {
681			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
682				break;
683		}
684
685		if (i == SIS_TIMEOUT)
686			device_printf(sc->sis_dev, "PHY failed to come ready\n");
687	} else {
688		bzero((char *)&frame, sizeof(frame));
689
690		frame.mii_phyaddr = phy;
691		frame.mii_regaddr = reg;
692		frame.mii_data = data;
693		sis_mii_writereg(sc, &frame);
694	}
695	return (0);
696}
697
698static void
699sis_miibus_statchg(device_t dev)
700{
701	struct sis_softc	*sc;
702	struct mii_data		*mii;
703	struct ifnet		*ifp;
704	uint32_t		reg;
705
706	sc = device_get_softc(dev);
707	SIS_LOCK_ASSERT(sc);
708
709	mii = device_get_softc(sc->sis_miibus);
710	ifp = sc->sis_ifp;
711	if (mii == NULL || ifp == NULL ||
712	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
713		return;
714
715	sc->sis_flags &= ~SIS_FLAG_LINK;
716	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
717	    (IFM_ACTIVE | IFM_AVALID)) {
718		switch (IFM_SUBTYPE(mii->mii_media_active)) {
719		case IFM_10_T:
720			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
721			sc->sis_flags |= SIS_FLAG_LINK;
722			break;
723		case IFM_100_TX:
724			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
725			sc->sis_flags |= SIS_FLAG_LINK;
726			break;
727		default:
728			break;
729		}
730	}
731
732	if ((sc->sis_flags & SIS_FLAG_LINK) == 0) {
733		/*
734		 * Stopping MACs seem to reset SIS_TX_LISTPTR and
735		 * SIS_RX_LISTPTR which in turn requires resetting
736		 * TX/RX buffers.  So just don't do anything for
737		 * lost link.
738		 */
739		return;
740	}
741
742	/* Set full/half duplex mode. */
743	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
744		SIS_SETBIT(sc, SIS_TX_CFG,
745		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
746		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
747	} else {
748		SIS_CLRBIT(sc, SIS_TX_CFG,
749		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
750		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
751	}
752
753	if (sc->sis_type == SIS_TYPE_83816) {
754		/*
755		 * MPII03.D: Half Duplex Excessive Collisions.
756		 * Also page 49 in 83816 manual
757		 */
758		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
759	}
760
761	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
762	    IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
763		/*
764		 * Short Cable Receive Errors (MP21.E)
765		 */
766		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
767		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
768		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
769		DELAY(100);
770		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
771		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
772			device_printf(sc->sis_dev,
773			    "Applying short cable fix (reg=%x)\n", reg);
774			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
775			SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20);
776		}
777		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
778	}
779	/* Enable TX/RX MACs. */
780	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
781	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE | SIS_CSR_RX_ENABLE);
782}
783
784static uint32_t
785sis_mchash(struct sis_softc *sc, const uint8_t *addr)
786{
787	uint32_t		crc;
788
789	/* Compute CRC for the address value. */
790	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
791
792	/*
793	 * return the filter bit position
794	 *
795	 * The NatSemi chip has a 512-bit filter, which is
796	 * different than the SiS, so we special-case it.
797	 */
798	if (sc->sis_type == SIS_TYPE_83815)
799		return (crc >> 23);
800	else if (sc->sis_rev >= SIS_REV_635 ||
801	    sc->sis_rev == SIS_REV_900B)
802		return (crc >> 24);
803	else
804		return (crc >> 25);
805}
806
807static void
808sis_setmulti_ns(struct sis_softc *sc)
809{
810	struct ifnet		*ifp;
811	struct ifmultiaddr	*ifma;
812	uint32_t		h = 0, i, filtsave;
813	int			bit, index;
814
815	ifp = sc->sis_ifp;
816
817	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
818		SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
819		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
820		return;
821	}
822
823	/*
824	 * We have to explicitly enable the multicast hash table
825	 * on the NatSemi chip if we want to use it, which we do.
826	 */
827	SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
828	SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
829
830	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
831
832	/* first, zot all the existing hash bits */
833	for (i = 0; i < 32; i++) {
834		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2));
835		CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
836	}
837
838	if_maddr_rlock(ifp);
839	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
840		if (ifma->ifma_addr->sa_family != AF_LINK)
841			continue;
842		h = sis_mchash(sc,
843		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
844		index = h >> 3;
845		bit = h & 0x1F;
846		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
847		if (bit > 0xF)
848			bit -= 0x10;
849		SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
850	}
851	if_maddr_runlock(ifp);
852
853	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
854}
855
856static void
857sis_setmulti_sis(struct sis_softc *sc)
858{
859	struct ifnet		*ifp;
860	struct ifmultiaddr	*ifma;
861	uint32_t		h, i, n, ctl;
862	uint16_t		hashes[16];
863
864	ifp = sc->sis_ifp;
865
866	/* hash table size */
867	if (sc->sis_rev >= SIS_REV_635 ||
868	    sc->sis_rev == SIS_REV_900B)
869		n = 16;
870	else
871		n = 8;
872
873	ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE;
874
875	if (ifp->if_flags & IFF_BROADCAST)
876		ctl |= SIS_RXFILTCTL_BROAD;
877
878	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
879		ctl |= SIS_RXFILTCTL_ALLMULTI;
880		if (ifp->if_flags & IFF_PROMISC)
881			ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS;
882		for (i = 0; i < n; i++)
883			hashes[i] = ~0;
884	} else {
885		for (i = 0; i < n; i++)
886			hashes[i] = 0;
887		i = 0;
888		if_maddr_rlock(ifp);
889		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
890			if (ifma->ifma_addr->sa_family != AF_LINK)
891			continue;
892			h = sis_mchash(sc,
893			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
894			hashes[h >> 4] |= 1 << (h & 0xf);
895			i++;
896		}
897		if_maddr_runlock(ifp);
898		if (i > n) {
899			ctl |= SIS_RXFILTCTL_ALLMULTI;
900			for (i = 0; i < n; i++)
901				hashes[i] = ~0;
902		}
903	}
904
905	for (i = 0; i < n; i++) {
906		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
907		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
908	}
909
910	CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl);
911}
912
913static void
914sis_reset(struct sis_softc *sc)
915{
916	int		i;
917
918	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
919
920	for (i = 0; i < SIS_TIMEOUT; i++) {
921		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
922			break;
923	}
924
925	if (i == SIS_TIMEOUT)
926		device_printf(sc->sis_dev, "reset never completed\n");
927
928	/* Wait a little while for the chip to get its brains in order. */
929	DELAY(1000);
930
931	/*
932	 * If this is a NetSemi chip, make sure to clear
933	 * PME mode.
934	 */
935	if (sc->sis_type == SIS_TYPE_83815) {
936		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
937		CSR_WRITE_4(sc, NS_CLKRUN, 0);
938	}
939}
940
941/*
942 * Probe for an SiS chip. Check the PCI vendor and device
943 * IDs against our list and return a device name if we find a match.
944 */
945static int
946sis_probe(device_t dev)
947{
948	struct sis_type		*t;
949
950	t = sis_devs;
951
952	while (t->sis_name != NULL) {
953		if ((pci_get_vendor(dev) == t->sis_vid) &&
954		    (pci_get_device(dev) == t->sis_did)) {
955			device_set_desc(dev, t->sis_name);
956			return (BUS_PROBE_DEFAULT);
957		}
958		t++;
959	}
960
961	return (ENXIO);
962}
963
964/*
965 * Attach the interface. Allocate softc structures, do ifmedia
966 * setup and ethernet/BPF attach.
967 */
968static int
969sis_attach(device_t dev)
970{
971	u_char			eaddr[ETHER_ADDR_LEN];
972	struct sis_softc	*sc;
973	struct ifnet		*ifp;
974	int			error = 0, waittime = 0;
975
976	waittime = 0;
977	sc = device_get_softc(dev);
978
979	sc->sis_dev = dev;
980
981	mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
982	    MTX_DEF);
983	callout_init_mtx(&sc->sis_stat_ch, &sc->sis_mtx, 0);
984
985	if (pci_get_device(dev) == SIS_DEVICEID_900)
986		sc->sis_type = SIS_TYPE_900;
987	if (pci_get_device(dev) == SIS_DEVICEID_7016)
988		sc->sis_type = SIS_TYPE_7016;
989	if (pci_get_vendor(dev) == NS_VENDORID)
990		sc->sis_type = SIS_TYPE_83815;
991
992	sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1);
993	/*
994	 * Map control/status registers.
995	 */
996	pci_enable_busmaster(dev);
997
998	error = bus_alloc_resources(dev, sis_res_spec, sc->sis_res);
999	if (error) {
1000		device_printf(dev, "couldn't allocate resources\n");
1001		goto fail;
1002	}
1003
1004	/* Reset the adapter. */
1005	sis_reset(sc);
1006
1007	if (sc->sis_type == SIS_TYPE_900 &&
1008	    (sc->sis_rev == SIS_REV_635 ||
1009	    sc->sis_rev == SIS_REV_900B)) {
1010		SIO_SET(SIS_CFG_RND_CNT);
1011		SIO_SET(SIS_CFG_PERR_DETECT);
1012	}
1013
1014	/*
1015	 * Get station address from the EEPROM.
1016	 */
1017	switch (pci_get_vendor(dev)) {
1018	case NS_VENDORID:
1019		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
1020
1021		/* We can't update the device description, so spew */
1022		if (sc->sis_srr == NS_SRR_15C)
1023			device_printf(dev, "Silicon Revision: DP83815C\n");
1024		else if (sc->sis_srr == NS_SRR_15D)
1025			device_printf(dev, "Silicon Revision: DP83815D\n");
1026		else if (sc->sis_srr == NS_SRR_16A)
1027			device_printf(dev, "Silicon Revision: DP83816A\n");
1028		else
1029			device_printf(dev, "Silicon Revision %x\n", sc->sis_srr);
1030
1031		/*
1032		 * Reading the MAC address out of the EEPROM on
1033		 * the NatSemi chip takes a bit more work than
1034		 * you'd expect. The address spans 4 16-bit words,
1035		 * with the first word containing only a single bit.
1036		 * You have to shift everything over one bit to
1037		 * get it aligned properly. Also, the bits are
1038		 * stored backwards (the LSB is really the MSB,
1039		 * and so on) so you have to reverse them in order
1040		 * to get the MAC address into the form we want.
1041		 * Why? Who the hell knows.
1042		 */
1043		{
1044			uint16_t		tmp[4];
1045
1046			sis_read_eeprom(sc, (caddr_t)&tmp,
1047			    NS_EE_NODEADDR, 4, 0);
1048
1049			/* Shift everything over one bit. */
1050			tmp[3] = tmp[3] >> 1;
1051			tmp[3] |= tmp[2] << 15;
1052			tmp[2] = tmp[2] >> 1;
1053			tmp[2] |= tmp[1] << 15;
1054			tmp[1] = tmp[1] >> 1;
1055			tmp[1] |= tmp[0] << 15;
1056
1057			/* Now reverse all the bits. */
1058			tmp[3] = sis_reverse(tmp[3]);
1059			tmp[2] = sis_reverse(tmp[2]);
1060			tmp[1] = sis_reverse(tmp[1]);
1061
1062			eaddr[0] = (tmp[1] >> 0) & 0xFF;
1063			eaddr[1] = (tmp[1] >> 8) & 0xFF;
1064			eaddr[2] = (tmp[2] >> 0) & 0xFF;
1065			eaddr[3] = (tmp[2] >> 8) & 0xFF;
1066			eaddr[4] = (tmp[3] >> 0) & 0xFF;
1067			eaddr[5] = (tmp[3] >> 8) & 0xFF;
1068		}
1069		break;
1070	case SIS_VENDORID:
1071	default:
1072#if defined(__i386__) || defined(__amd64__)
1073		/*
1074		 * If this is a SiS 630E chipset with an embedded
1075		 * SiS 900 controller, we have to read the MAC address
1076		 * from the APC CMOS RAM. Our method for doing this
1077		 * is very ugly since we have to reach out and grab
1078		 * ahold of hardware for which we cannot properly
1079		 * allocate resources. This code is only compiled on
1080		 * the i386 architecture since the SiS 630E chipset
1081		 * is for x86 motherboards only. Note that there are
1082		 * a lot of magic numbers in this hack. These are
1083		 * taken from SiS's Linux driver. I'd like to replace
1084		 * them with proper symbolic definitions, but that
1085		 * requires some datasheets that I don't have access
1086		 * to at the moment.
1087		 */
1088		if (sc->sis_rev == SIS_REV_630S ||
1089		    sc->sis_rev == SIS_REV_630E ||
1090		    sc->sis_rev == SIS_REV_630EA1)
1091			sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6);
1092
1093		else if (sc->sis_rev == SIS_REV_635 ||
1094			 sc->sis_rev == SIS_REV_630ET)
1095			sis_read_mac(sc, dev, (caddr_t)&eaddr);
1096		else if (sc->sis_rev == SIS_REV_96x) {
1097			/* Allow to read EEPROM from LAN. It is shared
1098			 * between a 1394 controller and the NIC and each
1099			 * time we access it, we need to set SIS_EECMD_REQ.
1100			 */
1101			SIO_SET(SIS_EECMD_REQ);
1102			for (waittime = 0; waittime < SIS_TIMEOUT;
1103			    waittime++) {
1104				/* Force EEPROM to idle state. */
1105				sis_eeprom_idle(sc);
1106				if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) {
1107					sis_read_eeprom(sc, (caddr_t)&eaddr,
1108					    SIS_EE_NODEADDR, 3, 0);
1109					break;
1110				}
1111				DELAY(1);
1112			}
1113			/*
1114			 * Set SIS_EECTL_CLK to high, so a other master
1115			 * can operate on the i2c bus.
1116			 */
1117			SIO_SET(SIS_EECTL_CLK);
1118			/* Refuse EEPROM access by LAN */
1119			SIO_SET(SIS_EECMD_DONE);
1120		} else
1121#endif
1122			sis_read_eeprom(sc, (caddr_t)&eaddr,
1123			    SIS_EE_NODEADDR, 3, 0);
1124		break;
1125	}
1126
1127	sis_add_sysctls(sc);
1128
1129	/* Allocate DMA'able memory. */
1130	if ((error = sis_dma_alloc(sc)) != 0)
1131		goto fail;
1132
1133	ifp = sc->sis_ifp = if_alloc(IFT_ETHER);
1134	if (ifp == NULL) {
1135		device_printf(dev, "can not if_alloc()\n");
1136		error = ENOSPC;
1137		goto fail;
1138	}
1139	ifp->if_softc = sc;
1140	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1141	ifp->if_mtu = ETHERMTU;
1142	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1143	ifp->if_ioctl = sis_ioctl;
1144	ifp->if_start = sis_start;
1145	ifp->if_init = sis_init;
1146	IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1147	ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1;
1148	IFQ_SET_READY(&ifp->if_snd);
1149
1150	/*
1151	 * Do MII setup.
1152	 */
1153	if (mii_phy_probe(dev, &sc->sis_miibus,
1154	    sis_ifmedia_upd, sis_ifmedia_sts)) {
1155		device_printf(dev, "MII without any PHY!\n");
1156		error = ENXIO;
1157		goto fail;
1158	}
1159
1160	/*
1161	 * Call MI attach routine.
1162	 */
1163	ether_ifattach(ifp, eaddr);
1164
1165	/*
1166	 * Tell the upper layer(s) we support long frames.
1167	 */
1168	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1169	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1170	ifp->if_capenable = ifp->if_capabilities;
1171#ifdef DEVICE_POLLING
1172	ifp->if_capabilities |= IFCAP_POLLING;
1173#endif
1174
1175	/* Hook interrupt last to avoid having to lock softc */
1176	error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE,
1177	    NULL, sis_intr, sc, &sc->sis_intrhand);
1178
1179	if (error) {
1180		device_printf(dev, "couldn't set up irq\n");
1181		ether_ifdetach(ifp);
1182		goto fail;
1183	}
1184
1185fail:
1186	if (error)
1187		sis_detach(dev);
1188
1189	return (error);
1190}
1191
1192/*
1193 * Shutdown hardware and free up resources. This can be called any
1194 * time after the mutex has been initialized. It is called in both
1195 * the error case in attach and the normal detach case so it needs
1196 * to be careful about only freeing resources that have actually been
1197 * allocated.
1198 */
1199static int
1200sis_detach(device_t dev)
1201{
1202	struct sis_softc	*sc;
1203	struct ifnet		*ifp;
1204
1205	sc = device_get_softc(dev);
1206	KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized"));
1207	ifp = sc->sis_ifp;
1208
1209#ifdef DEVICE_POLLING
1210	if (ifp->if_capenable & IFCAP_POLLING)
1211		ether_poll_deregister(ifp);
1212#endif
1213
1214	/* These should only be active if attach succeeded. */
1215	if (device_is_attached(dev)) {
1216		SIS_LOCK(sc);
1217		sis_stop(sc);
1218		SIS_UNLOCK(sc);
1219		callout_drain(&sc->sis_stat_ch);
1220		ether_ifdetach(ifp);
1221	}
1222	if (sc->sis_miibus)
1223		device_delete_child(dev, sc->sis_miibus);
1224	bus_generic_detach(dev);
1225
1226	if (sc->sis_intrhand)
1227		bus_teardown_intr(dev, sc->sis_res[1], sc->sis_intrhand);
1228	bus_release_resources(dev, sis_res_spec, sc->sis_res);
1229
1230	if (ifp)
1231		if_free(ifp);
1232
1233	sis_dma_free(sc);
1234
1235	mtx_destroy(&sc->sis_mtx);
1236
1237	return (0);
1238}
1239
1240struct sis_dmamap_arg {
1241	bus_addr_t	sis_busaddr;
1242};
1243
1244static void
1245sis_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1246{
1247	struct sis_dmamap_arg	*ctx;
1248
1249	if (error != 0)
1250		return;
1251
1252	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1253
1254	ctx = (struct sis_dmamap_arg *)arg;
1255	ctx->sis_busaddr = segs[0].ds_addr;
1256}
1257
1258static int
1259sis_dma_ring_alloc(struct sis_softc *sc, bus_size_t alignment,
1260    bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
1261    bus_addr_t *paddr, const char *msg)
1262{
1263	struct sis_dmamap_arg	ctx;
1264	int			error;
1265
1266	error = bus_dma_tag_create(sc->sis_parent_tag, alignment, 0,
1267	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1,
1268	    maxsize, 0, NULL, NULL, tag);
1269	if (error != 0) {
1270		device_printf(sc->sis_dev,
1271		    "could not create %s dma tag\n", msg);
1272		return (ENOMEM);
1273	}
1274	/* Allocate DMA'able memory for ring. */
1275	error = bus_dmamem_alloc(*tag, (void **)ring,
1276	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
1277	if (error != 0) {
1278		device_printf(sc->sis_dev,
1279		    "could not allocate DMA'able memory for %s\n", msg);
1280		return (ENOMEM);
1281	}
1282	/* Load the address of the ring. */
1283	ctx.sis_busaddr = 0;
1284	error = bus_dmamap_load(*tag, *map, *ring, maxsize, sis_dmamap_cb,
1285	    &ctx, BUS_DMA_NOWAIT);
1286	if (error != 0) {
1287		device_printf(sc->sis_dev,
1288		    "could not load DMA'able memory for %s\n", msg);
1289		return (ENOMEM);
1290	}
1291	*paddr = ctx.sis_busaddr;
1292	return (0);
1293}
1294
1295static int
1296sis_dma_alloc(struct sis_softc *sc)
1297{
1298	struct sis_rxdesc	*rxd;
1299	struct sis_txdesc	*txd;
1300	int			error, i;
1301
1302	/* Allocate the parent bus DMA tag appropriate for PCI. */
1303	error = bus_dma_tag_create(bus_get_dma_tag(sc->sis_dev),
1304	    1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1305	    NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
1306	    0, NULL, NULL, &sc->sis_parent_tag);
1307	if (error != 0) {
1308		device_printf(sc->sis_dev,
1309		    "could not allocate parent dma tag\n");
1310		return (ENOMEM);
1311	}
1312
1313	/* Create RX ring. */
1314	error = sis_dma_ring_alloc(sc, SIS_DESC_ALIGN, SIS_RX_LIST_SZ,
1315	    &sc->sis_rx_list_tag, (uint8_t **)&sc->sis_rx_list,
1316	    &sc->sis_rx_list_map, &sc->sis_rx_paddr, "RX ring");
1317	if (error)
1318		return (error);
1319
1320	/* Create TX ring. */
1321	error = sis_dma_ring_alloc(sc, SIS_DESC_ALIGN, SIS_TX_LIST_SZ,
1322	    &sc->sis_tx_list_tag, (uint8_t **)&sc->sis_tx_list,
1323	    &sc->sis_tx_list_map, &sc->sis_tx_paddr, "TX ring");
1324	if (error)
1325		return (error);
1326
1327	/* Create tag for RX mbufs. */
1328	error = bus_dma_tag_create(sc->sis_parent_tag, SIS_RX_BUF_ALIGN, 0,
1329	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
1330	    MCLBYTES, 0, NULL, NULL, &sc->sis_rx_tag);
1331	if (error) {
1332		device_printf(sc->sis_dev, "could not allocate RX dma tag\n");
1333		return (error);
1334	}
1335
1336	/* Create tag for TX mbufs. */
1337	error = bus_dma_tag_create(sc->sis_parent_tag, 1, 0,
1338	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1339	    MCLBYTES * SIS_MAXTXSEGS, SIS_MAXTXSEGS, MCLBYTES, 0, NULL, NULL,
1340	    &sc->sis_tx_tag);
1341	if (error) {
1342		device_printf(sc->sis_dev, "could not allocate TX dma tag\n");
1343		return (error);
1344	}
1345
1346	/* Create DMA maps for RX buffers. */
1347	error = bus_dmamap_create(sc->sis_rx_tag, 0, &sc->sis_rx_sparemap);
1348	if (error) {
1349		device_printf(sc->sis_dev,
1350		    "can't create spare DMA map for RX\n");
1351		return (error);
1352	}
1353	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1354		rxd = &sc->sis_rxdesc[i];
1355		rxd->rx_m = NULL;
1356		error = bus_dmamap_create(sc->sis_rx_tag, 0, &rxd->rx_dmamap);
1357		if (error) {
1358			device_printf(sc->sis_dev,
1359			    "can't create DMA map for RX\n");
1360			return (error);
1361		}
1362	}
1363
1364	/* Create DMA maps for TX buffers. */
1365	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1366		txd = &sc->sis_txdesc[i];
1367		txd->tx_m = NULL;
1368		error = bus_dmamap_create(sc->sis_tx_tag, 0, &txd->tx_dmamap);
1369		if (error) {
1370			device_printf(sc->sis_dev,
1371			    "can't create DMA map for TX\n");
1372			return (error);
1373		}
1374	}
1375
1376	return (0);
1377}
1378
1379static void
1380sis_dma_free(struct sis_softc *sc)
1381{
1382	struct sis_rxdesc	*rxd;
1383	struct sis_txdesc	*txd;
1384	int			i;
1385
1386	/* Destroy DMA maps for RX buffers. */
1387	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1388		rxd = &sc->sis_rxdesc[i];
1389		if (rxd->rx_dmamap)
1390			bus_dmamap_destroy(sc->sis_rx_tag, rxd->rx_dmamap);
1391	}
1392	if (sc->sis_rx_sparemap)
1393		bus_dmamap_destroy(sc->sis_rx_tag, sc->sis_rx_sparemap);
1394
1395	/* Destroy DMA maps for TX buffers. */
1396	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1397		txd = &sc->sis_txdesc[i];
1398		if (txd->tx_dmamap)
1399			bus_dmamap_destroy(sc->sis_tx_tag, txd->tx_dmamap);
1400	}
1401
1402	if (sc->sis_rx_tag)
1403		bus_dma_tag_destroy(sc->sis_rx_tag);
1404	if (sc->sis_tx_tag)
1405		bus_dma_tag_destroy(sc->sis_tx_tag);
1406
1407	/* Destroy RX ring. */
1408	if (sc->sis_rx_list_map)
1409		bus_dmamap_unload(sc->sis_rx_list_tag, sc->sis_rx_list_map);
1410	if (sc->sis_rx_list_map && sc->sis_rx_list)
1411		bus_dmamem_free(sc->sis_rx_list_tag, sc->sis_rx_list,
1412		    sc->sis_rx_list_map);
1413
1414	if (sc->sis_rx_list_tag)
1415		bus_dma_tag_destroy(sc->sis_rx_list_tag);
1416
1417	/* Destroy TX ring. */
1418	if (sc->sis_tx_list_map)
1419		bus_dmamap_unload(sc->sis_tx_list_tag, sc->sis_tx_list_map);
1420
1421	if (sc->sis_tx_list_map && sc->sis_tx_list)
1422		bus_dmamem_free(sc->sis_tx_list_tag, sc->sis_tx_list,
1423		    sc->sis_tx_list_map);
1424
1425	if (sc->sis_tx_list_tag)
1426		bus_dma_tag_destroy(sc->sis_tx_list_tag);
1427
1428	/* Destroy the parent tag. */
1429	if (sc->sis_parent_tag)
1430		bus_dma_tag_destroy(sc->sis_parent_tag);
1431}
1432
1433/*
1434 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1435 * we arrange the descriptors in a closed ring, so that the last descriptor
1436 * points back to the first.
1437 */
1438static int
1439sis_ring_init(struct sis_softc *sc)
1440{
1441	struct sis_rxdesc	*rxd;
1442	struct sis_txdesc	*txd;
1443	bus_addr_t		next;
1444	int			error, i;
1445
1446	bzero(&sc->sis_tx_list[0], SIS_TX_LIST_SZ);
1447	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1448		txd = &sc->sis_txdesc[i];
1449		txd->tx_m = NULL;
1450		if (i == SIS_TX_LIST_CNT - 1)
1451			next = SIS_TX_RING_ADDR(sc, 0);
1452		else
1453			next = SIS_TX_RING_ADDR(sc, i + 1);
1454		sc->sis_tx_list[i].sis_next = htole32(SIS_ADDR_LO(next));
1455	}
1456	sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0;
1457	bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1458	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1459
1460	sc->sis_rx_cons = 0;
1461	bzero(&sc->sis_rx_list[0], SIS_RX_LIST_SZ);
1462	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1463		rxd = &sc->sis_rxdesc[i];
1464		rxd->rx_desc = &sc->sis_rx_list[i];
1465		if (i == SIS_RX_LIST_CNT - 1)
1466			next = SIS_RX_RING_ADDR(sc, 0);
1467		else
1468			next = SIS_RX_RING_ADDR(sc, i + 1);
1469		rxd->rx_desc->sis_next = htole32(SIS_ADDR_LO(next));
1470		error = sis_newbuf(sc, rxd);
1471		if (error)
1472			return (error);
1473	}
1474	bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1475	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1476
1477	return (0);
1478}
1479
1480/*
1481 * Initialize an RX descriptor and attach an MBUF cluster.
1482 */
1483static int
1484sis_newbuf(struct sis_softc *sc, struct sis_rxdesc *rxd)
1485{
1486	struct mbuf		*m;
1487	bus_dma_segment_t	segs[1];
1488	bus_dmamap_t		map;
1489	int nsegs;
1490
1491	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1492	if (m == NULL)
1493		return (ENOBUFS);
1494	m->m_len = m->m_pkthdr.len = SIS_RXLEN;
1495#ifndef __NO_STRICT_ALIGNMENT
1496	m_adj(m, SIS_RX_BUF_ALIGN);
1497#endif
1498
1499	if (bus_dmamap_load_mbuf_sg(sc->sis_rx_tag, sc->sis_rx_sparemap, m,
1500	    segs, &nsegs, 0) != 0) {
1501		m_freem(m);
1502		return (ENOBUFS);
1503	}
1504	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1505
1506	if (rxd->rx_m != NULL) {
1507		bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap,
1508		    BUS_DMASYNC_POSTREAD);
1509		bus_dmamap_unload(sc->sis_rx_tag, rxd->rx_dmamap);
1510	}
1511	map = rxd->rx_dmamap;
1512	rxd->rx_dmamap = sc->sis_rx_sparemap;
1513	sc->sis_rx_sparemap = map;
1514	bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD);
1515	rxd->rx_m = m;
1516	rxd->rx_desc->sis_cmdsts = htole32(SIS_RXLEN);
1517	rxd->rx_desc->sis_ptr = htole32(SIS_ADDR_LO(segs[0].ds_addr));
1518	return (0);
1519}
1520
1521static __inline void
1522sis_discard_rxbuf(struct sis_rxdesc *rxd)
1523{
1524
1525	rxd->rx_desc->sis_cmdsts = htole32(SIS_RXLEN);
1526}
1527
1528#ifndef __NO_STRICT_ALIGNMENT
1529static __inline void
1530sis_fixup_rx(struct mbuf *m)
1531{
1532	uint16_t		*src, *dst;
1533	int			i;
1534
1535	src = mtod(m, uint16_t *);
1536	dst = src - (SIS_RX_BUF_ALIGN - ETHER_ALIGN) / sizeof(*src);
1537
1538	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1539		*dst++ = *src++;
1540
1541	m->m_data -= SIS_RX_BUF_ALIGN - ETHER_ALIGN;
1542}
1543#endif
1544
1545/*
1546 * A frame has been uploaded: pass the resulting mbuf chain up to
1547 * the higher level protocols.
1548 */
1549static int
1550sis_rxeof(struct sis_softc *sc)
1551{
1552	struct mbuf		*m;
1553	struct ifnet		*ifp;
1554	struct sis_rxdesc	*rxd;
1555	struct sis_desc		*cur_rx;
1556	int			prog, rx_cons, rx_npkts = 0, total_len;
1557	uint32_t		rxstat;
1558
1559	SIS_LOCK_ASSERT(sc);
1560
1561	bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1562	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1563
1564	rx_cons = sc->sis_rx_cons;
1565	ifp = sc->sis_ifp;
1566
1567	for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1568	    SIS_INC(rx_cons, SIS_RX_LIST_CNT), prog++) {
1569#ifdef DEVICE_POLLING
1570		if (ifp->if_capenable & IFCAP_POLLING) {
1571			if (sc->rxcycles <= 0)
1572				break;
1573			sc->rxcycles--;
1574		}
1575#endif
1576		cur_rx = &sc->sis_rx_list[rx_cons];
1577		rxstat = le32toh(cur_rx->sis_cmdsts);
1578		if ((rxstat & SIS_CMDSTS_OWN) == 0)
1579			break;
1580		rxd = &sc->sis_rxdesc[rx_cons];
1581
1582		total_len = (rxstat & SIS_CMDSTS_BUFLEN) - ETHER_CRC_LEN;
1583		if ((ifp->if_capenable & IFCAP_VLAN_MTU) != 0 &&
1584		    total_len <= (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN -
1585		    ETHER_CRC_LEN))
1586			rxstat &= ~SIS_RXSTAT_GIANT;
1587		if (SIS_RXSTAT_ERROR(rxstat) != 0) {
1588			ifp->if_ierrors++;
1589			if (rxstat & SIS_RXSTAT_COLL)
1590				ifp->if_collisions++;
1591			sis_discard_rxbuf(rxd);
1592			continue;
1593		}
1594
1595		/* Add a new receive buffer to the ring. */
1596		m = rxd->rx_m;
1597		if (sis_newbuf(sc, rxd) != 0) {
1598			ifp->if_iqdrops++;
1599			sis_discard_rxbuf(rxd);
1600			continue;
1601		}
1602
1603		/* No errors; receive the packet. */
1604		m->m_pkthdr.len = m->m_len = total_len;
1605#ifndef __NO_STRICT_ALIGNMENT
1606		/*
1607		 * On architectures without alignment problems we try to
1608		 * allocate a new buffer for the receive ring, and pass up
1609		 * the one where the packet is already, saving the expensive
1610		 * copy operation.
1611		 */
1612		sis_fixup_rx(m);
1613#endif
1614		ifp->if_ipackets++;
1615		m->m_pkthdr.rcvif = ifp;
1616
1617		SIS_UNLOCK(sc);
1618		(*ifp->if_input)(ifp, m);
1619		SIS_LOCK(sc);
1620		rx_npkts++;
1621	}
1622
1623	if (prog > 0) {
1624		sc->sis_rx_cons = rx_cons;
1625		bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1626		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1627	}
1628
1629	return (rx_npkts);
1630}
1631
1632/*
1633 * A frame was downloaded to the chip. It's safe for us to clean up
1634 * the list buffers.
1635 */
1636
1637static void
1638sis_txeof(struct sis_softc *sc)
1639{
1640	struct ifnet		*ifp;
1641	struct sis_desc		*cur_tx;
1642	struct sis_txdesc	*txd;
1643	uint32_t		cons, txstat;
1644
1645	SIS_LOCK_ASSERT(sc);
1646
1647	cons = sc->sis_tx_cons;
1648	if (cons == sc->sis_tx_prod)
1649		return;
1650
1651	ifp = sc->sis_ifp;
1652	bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1653	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1654
1655	/*
1656	 * Go through our tx list and free mbufs for those
1657	 * frames that have been transmitted.
1658	 */
1659	for (; cons != sc->sis_tx_prod; SIS_INC(cons, SIS_TX_LIST_CNT)) {
1660		cur_tx = &sc->sis_tx_list[cons];
1661		txstat = le32toh(cur_tx->sis_cmdsts);
1662		if ((txstat & SIS_CMDSTS_OWN) != 0)
1663			break;
1664		txd = &sc->sis_txdesc[cons];
1665		if (txd->tx_m != NULL) {
1666			bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap,
1667			    BUS_DMASYNC_POSTWRITE);
1668			bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
1669			m_freem(txd->tx_m);
1670			txd->tx_m = NULL;
1671			if ((txstat & SIS_CMDSTS_PKT_OK) != 0) {
1672				ifp->if_opackets++;
1673				ifp->if_collisions +=
1674				    (txstat & SIS_TXSTAT_COLLCNT) >> 16;
1675			} else {
1676				ifp->if_oerrors++;
1677				if (txstat & SIS_TXSTAT_EXCESSCOLLS)
1678					ifp->if_collisions++;
1679				if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
1680					ifp->if_collisions++;
1681			}
1682		}
1683		sc->sis_tx_cnt--;
1684		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1685	}
1686	sc->sis_tx_cons = cons;
1687	if (sc->sis_tx_cnt == 0)
1688		sc->sis_watchdog_timer = 0;
1689}
1690
1691static void
1692sis_tick(void *xsc)
1693{
1694	struct sis_softc	*sc;
1695	struct mii_data		*mii;
1696	struct ifnet		*ifp;
1697
1698	sc = xsc;
1699	SIS_LOCK_ASSERT(sc);
1700	ifp = sc->sis_ifp;
1701
1702	mii = device_get_softc(sc->sis_miibus);
1703	mii_tick(mii);
1704	sis_watchdog(sc);
1705	if ((sc->sis_flags & SIS_FLAG_LINK) == 0)
1706		sis_miibus_statchg(sc->sis_dev);
1707	callout_reset(&sc->sis_stat_ch, hz,  sis_tick, sc);
1708}
1709
1710#ifdef DEVICE_POLLING
1711static poll_handler_t sis_poll;
1712
1713static int
1714sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1715{
1716	struct	sis_softc *sc = ifp->if_softc;
1717	int rx_npkts = 0;
1718
1719	SIS_LOCK(sc);
1720	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1721		SIS_UNLOCK(sc);
1722		return (rx_npkts);
1723	}
1724
1725	/*
1726	 * On the sis, reading the status register also clears it.
1727	 * So before returning to intr mode we must make sure that all
1728	 * possible pending sources of interrupts have been served.
1729	 * In practice this means run to completion the *eof routines,
1730	 * and then call the interrupt routine
1731	 */
1732	sc->rxcycles = count;
1733	rx_npkts = sis_rxeof(sc);
1734	sis_txeof(sc);
1735	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1736		sis_startl(ifp);
1737
1738	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1739		uint32_t	status;
1740
1741		/* Reading the ISR register clears all interrupts. */
1742		status = CSR_READ_4(sc, SIS_ISR);
1743
1744		if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW))
1745			ifp->if_ierrors++;
1746
1747		if (status & (SIS_ISR_RX_IDLE))
1748			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1749
1750		if (status & SIS_ISR_SYSERR) {
1751			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1752			sis_initl(sc);
1753		}
1754	}
1755
1756	SIS_UNLOCK(sc);
1757	return (rx_npkts);
1758}
1759#endif /* DEVICE_POLLING */
1760
1761static void
1762sis_intr(void *arg)
1763{
1764	struct sis_softc	*sc;
1765	struct ifnet		*ifp;
1766	uint32_t		status;
1767
1768	sc = arg;
1769	ifp = sc->sis_ifp;
1770
1771	SIS_LOCK(sc);
1772#ifdef DEVICE_POLLING
1773	if (ifp->if_capenable & IFCAP_POLLING) {
1774		SIS_UNLOCK(sc);
1775		return;
1776	}
1777#endif
1778
1779	/* Reading the ISR register clears all interrupts. */
1780	status = CSR_READ_4(sc, SIS_ISR);
1781	if ((status & SIS_INTRS) == 0) {
1782		/* Not ours. */
1783		SIS_UNLOCK(sc);
1784	}
1785
1786	/* Disable interrupts. */
1787	CSR_WRITE_4(sc, SIS_IER, 0);
1788
1789	for (;(status & SIS_INTRS) != 0;) {
1790		if (status &
1791		    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1792		    SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) )
1793			sis_txeof(sc);
1794
1795		if (status & (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1796		    SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1797			sis_rxeof(sc);
1798
1799		if (status & SIS_ISR_RX_OFLOW)
1800			ifp->if_ierrors++;
1801
1802		if (status & (SIS_ISR_RX_IDLE))
1803			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1804
1805		if (status & SIS_ISR_SYSERR) {
1806			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1807			sis_initl(sc);
1808			SIS_UNLOCK(sc);
1809			return;
1810		}
1811		status = CSR_READ_4(sc, SIS_ISR);
1812	}
1813
1814	/* Re-enable interrupts. */
1815	CSR_WRITE_4(sc, SIS_IER, 1);
1816
1817	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1818		sis_startl(ifp);
1819
1820	SIS_UNLOCK(sc);
1821}
1822
1823/*
1824 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1825 * pointers to the fragment pointers.
1826 */
1827static int
1828sis_encap(struct sis_softc *sc, struct mbuf **m_head)
1829{
1830	struct mbuf		*m;
1831	struct sis_txdesc	*txd;
1832	struct sis_desc		*f;
1833	bus_dma_segment_t	segs[SIS_MAXTXSEGS];
1834	bus_dmamap_t		map;
1835	int			error, i, frag, nsegs, prod;
1836	int			padlen;
1837
1838	prod = sc->sis_tx_prod;
1839	txd = &sc->sis_txdesc[prod];
1840	if ((sc->sis_flags & SIS_FLAG_MANUAL_PAD) != 0 &&
1841	    (*m_head)->m_pkthdr.len < SIS_MIN_FRAMELEN) {
1842		m = *m_head;
1843		padlen = SIS_MIN_FRAMELEN - m->m_pkthdr.len;
1844		if (M_WRITABLE(m) == 0) {
1845			/* Get a writable copy. */
1846			m = m_dup(*m_head, M_DONTWAIT);
1847			m_freem(*m_head);
1848			if (m == NULL) {
1849				*m_head = NULL;
1850				return (ENOBUFS);
1851			}
1852			*m_head = m;
1853		}
1854		if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
1855			m = m_defrag(m, M_DONTWAIT);
1856			if (m == NULL) {
1857				m_freem(*m_head);
1858				*m_head = NULL;
1859				return (ENOBUFS);
1860			}
1861		}
1862		/*
1863		 * Manually pad short frames, and zero the pad space
1864		 * to avoid leaking data.
1865		 */
1866		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1867		m->m_pkthdr.len += padlen;
1868		m->m_len = m->m_pkthdr.len;
1869		*m_head = m;
1870	}
1871	error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
1872	    *m_head, segs, &nsegs, 0);
1873	if (error == EFBIG) {
1874		m = m_collapse(*m_head, M_DONTWAIT, SIS_MAXTXSEGS);
1875		if (m == NULL) {
1876			m_freem(*m_head);
1877			*m_head = NULL;
1878			return (ENOBUFS);
1879		}
1880		*m_head = m;
1881		error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
1882		    *m_head, segs, &nsegs, 0);
1883		if (error != 0) {
1884			m_freem(*m_head);
1885			*m_head = NULL;
1886			return (error);
1887		}
1888	} else if (error != 0)
1889		return (error);
1890
1891	/* Check for descriptor overruns. */
1892	if (sc->sis_tx_cnt + nsegs > SIS_TX_LIST_CNT - 1) {
1893		bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
1894		return (ENOBUFS);
1895	}
1896
1897	bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE);
1898
1899	frag = prod;
1900	for (i = 0; i < nsegs; i++) {
1901		f = &sc->sis_tx_list[prod];
1902		if (i == 0)
1903			f->sis_cmdsts = htole32(segs[i].ds_len |
1904			    SIS_CMDSTS_MORE);
1905		else
1906			f->sis_cmdsts = htole32(segs[i].ds_len |
1907			    SIS_CMDSTS_OWN | SIS_CMDSTS_MORE);
1908		f->sis_ptr = htole32(SIS_ADDR_LO(segs[i].ds_addr));
1909		SIS_INC(prod, SIS_TX_LIST_CNT);
1910		sc->sis_tx_cnt++;
1911	}
1912
1913	/* Update producer index. */
1914	sc->sis_tx_prod = prod;
1915
1916	/* Remove MORE flag on the last descriptor. */
1917	prod = (prod - 1) & (SIS_TX_LIST_CNT - 1);
1918	f = &sc->sis_tx_list[prod];
1919	f->sis_cmdsts &= ~htole32(SIS_CMDSTS_MORE);
1920
1921	/* Lastly transfer ownership of packet to the controller. */
1922	f = &sc->sis_tx_list[frag];
1923	f->sis_cmdsts |= htole32(SIS_CMDSTS_OWN);
1924
1925	/* Swap the last and the first dmamaps. */
1926	map = txd->tx_dmamap;
1927	txd->tx_dmamap = sc->sis_txdesc[prod].tx_dmamap;
1928	sc->sis_txdesc[prod].tx_dmamap = map;
1929	txd->tx_m = *m_head;
1930
1931	return (0);
1932}
1933
1934static void
1935sis_start(struct ifnet *ifp)
1936{
1937	struct sis_softc	*sc;
1938
1939	sc = ifp->if_softc;
1940	SIS_LOCK(sc);
1941	sis_startl(ifp);
1942	SIS_UNLOCK(sc);
1943}
1944
1945static void
1946sis_startl(struct ifnet *ifp)
1947{
1948	struct sis_softc	*sc;
1949	struct mbuf		*m_head;
1950	int			queued;
1951
1952	sc = ifp->if_softc;
1953
1954	SIS_LOCK_ASSERT(sc);
1955
1956	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1957	    IFF_DRV_RUNNING || (sc->sis_flags & SIS_FLAG_LINK) == 0)
1958		return;
1959
1960	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1961	    sc->sis_tx_cnt < SIS_TX_LIST_CNT - 4;) {
1962		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1963		if (m_head == NULL)
1964			break;
1965
1966		if (sis_encap(sc, &m_head) != 0) {
1967			if (m_head == NULL)
1968				break;
1969			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1970			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1971			break;
1972		}
1973
1974		queued++;
1975
1976		/*
1977		 * If there's a BPF listener, bounce a copy of this frame
1978		 * to him.
1979		 */
1980		BPF_MTAP(ifp, m_head);
1981	}
1982
1983	if (queued) {
1984		/* Transmit */
1985		bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1986		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1987		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1988
1989		/*
1990		 * Set a timeout in case the chip goes out to lunch.
1991		 */
1992		sc->sis_watchdog_timer = 5;
1993	}
1994}
1995
1996static void
1997sis_init(void *xsc)
1998{
1999	struct sis_softc	*sc = xsc;
2000
2001	SIS_LOCK(sc);
2002	sis_initl(sc);
2003	SIS_UNLOCK(sc);
2004}
2005
2006static void
2007sis_initl(struct sis_softc *sc)
2008{
2009	struct ifnet		*ifp = sc->sis_ifp;
2010	struct mii_data		*mii;
2011	uint8_t			*eaddr;
2012
2013	SIS_LOCK_ASSERT(sc);
2014
2015	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2016		return;
2017
2018	/*
2019	 * Cancel pending I/O and free all RX/TX buffers.
2020	 */
2021	sis_stop(sc);
2022	/*
2023	 * Reset the chip to a known state.
2024	 */
2025	sis_reset(sc);
2026#ifdef notyet
2027	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
2028		/*
2029		 * Configure 400usec of interrupt holdoff.  This is based
2030		 * on emperical tests on a Soekris 4801.
2031 		 */
2032		CSR_WRITE_4(sc, NS_IHR, 0x100 | 4);
2033	}
2034#endif
2035
2036	mii = device_get_softc(sc->sis_miibus);
2037
2038	/* Set MAC address */
2039	eaddr = IF_LLADDR(sc->sis_ifp);
2040	if (sc->sis_type == SIS_TYPE_83815) {
2041		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
2042		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[0] | eaddr[1] << 8);
2043		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
2044		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[2] | eaddr[3] << 8);
2045		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
2046		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[4] | eaddr[5] << 8);
2047	} else {
2048		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
2049		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[0] | eaddr[1] << 8);
2050		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
2051		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[2] | eaddr[3] << 8);
2052		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
2053		CSR_WRITE_4(sc, SIS_RXFILT_DATA, eaddr[4] | eaddr[5] << 8);
2054	}
2055
2056	/* Init circular TX/RX lists. */
2057	if (sis_ring_init(sc) != 0) {
2058		device_printf(sc->sis_dev,
2059		    "initialization failed: no memory for rx buffers\n");
2060		sis_stop(sc);
2061		return;
2062	}
2063
2064	if (sc->sis_type == SIS_TYPE_83815 || sc->sis_type == SIS_TYPE_83816) {
2065		if (sc->sis_manual_pad != 0)
2066			sc->sis_flags |= SIS_FLAG_MANUAL_PAD;
2067		else
2068			sc->sis_flags &= ~SIS_FLAG_MANUAL_PAD;
2069	}
2070
2071	/*
2072	 * Short Cable Receive Errors (MP21.E)
2073	 * also: Page 78 of the DP83815 data sheet (september 2002 version)
2074	 * recommends the following register settings "for optimum
2075	 * performance." for rev 15C.  Set this also for 15D parts as
2076	 * they require it in practice.
2077	 */
2078	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
2079		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
2080		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
2081		/* set val for c2 */
2082		CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
2083		/* load/kill c2 */
2084		CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
2085		/* rais SD off, from 4 to c */
2086		CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
2087		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
2088	}
2089
2090	/*
2091	 * For the NatSemi chip, we have to explicitly enable the
2092	 * reception of ARP frames, as well as turn on the 'perfect
2093	 * match' filter where we store the station address, otherwise
2094	 * we won't receive unicasts meant for this host.
2095	 */
2096	if (sc->sis_type == SIS_TYPE_83815) {
2097		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP);
2098		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT);
2099	}
2100
2101	 /* If we want promiscuous mode, set the allframes bit. */
2102	if (ifp->if_flags & IFF_PROMISC) {
2103		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
2104	} else {
2105		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
2106	}
2107
2108	/*
2109	 * Set the capture broadcast bit to capture broadcast frames.
2110	 */
2111	if (ifp->if_flags & IFF_BROADCAST) {
2112		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
2113	} else {
2114		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
2115	}
2116
2117	/*
2118	 * Load the multicast filter.
2119	 */
2120	if (sc->sis_type == SIS_TYPE_83815)
2121		sis_setmulti_ns(sc);
2122	else
2123		sis_setmulti_sis(sc);
2124
2125	/* Turn the receive filter on */
2126	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
2127
2128	/*
2129	 * Load the address of the RX and TX lists.
2130	 */
2131	CSR_WRITE_4(sc, SIS_RX_LISTPTR, SIS_ADDR_LO(sc->sis_rx_paddr));
2132	CSR_WRITE_4(sc, SIS_TX_LISTPTR, SIS_ADDR_LO(sc->sis_tx_paddr));
2133
2134	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
2135	 * the PCI bus. When this bit is set, the Max DMA Burst Size
2136	 * for TX/RX DMA should be no larger than 16 double words.
2137	 */
2138	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) {
2139		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
2140	} else {
2141		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
2142	}
2143
2144	/* Accept Long Packets for VLAN support */
2145	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
2146
2147	/*
2148	 * Assume 100Mbps link, actual MAC configuration is done
2149	 * after getting a valid link.
2150	 */
2151	CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
2152
2153	/*
2154	 * Enable interrupts.
2155	 */
2156	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
2157#ifdef DEVICE_POLLING
2158	/*
2159	 * ... only enable interrupts if we are not polling, make sure
2160	 * they are off otherwise.
2161	 */
2162	if (ifp->if_capenable & IFCAP_POLLING)
2163		CSR_WRITE_4(sc, SIS_IER, 0);
2164	else
2165#endif
2166	CSR_WRITE_4(sc, SIS_IER, 1);
2167
2168	/* Clear MAC disable. */
2169	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
2170
2171	sc->sis_flags &= ~SIS_FLAG_LINK;
2172	mii_mediachg(mii);
2173
2174	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2175	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2176
2177	callout_reset(&sc->sis_stat_ch, hz,  sis_tick, sc);
2178}
2179
2180/*
2181 * Set media options.
2182 */
2183static int
2184sis_ifmedia_upd(struct ifnet *ifp)
2185{
2186	struct sis_softc	*sc;
2187	struct mii_data		*mii;
2188	int			error;
2189
2190	sc = ifp->if_softc;
2191
2192	SIS_LOCK(sc);
2193	mii = device_get_softc(sc->sis_miibus);
2194	if (mii->mii_instance) {
2195		struct mii_softc	*miisc;
2196		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2197			mii_phy_reset(miisc);
2198	}
2199	error = mii_mediachg(mii);
2200	SIS_UNLOCK(sc);
2201
2202	return (error);
2203}
2204
2205/*
2206 * Report current media status.
2207 */
2208static void
2209sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2210{
2211	struct sis_softc	*sc;
2212	struct mii_data		*mii;
2213
2214	sc = ifp->if_softc;
2215
2216	SIS_LOCK(sc);
2217	mii = device_get_softc(sc->sis_miibus);
2218	mii_pollstat(mii);
2219	SIS_UNLOCK(sc);
2220	ifmr->ifm_active = mii->mii_media_active;
2221	ifmr->ifm_status = mii->mii_media_status;
2222}
2223
2224static int
2225sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2226{
2227	struct sis_softc	*sc = ifp->if_softc;
2228	struct ifreq		*ifr = (struct ifreq *) data;
2229	struct mii_data		*mii;
2230	int			error = 0;
2231
2232	switch (command) {
2233	case SIOCSIFFLAGS:
2234		SIS_LOCK(sc);
2235		if (ifp->if_flags & IFF_UP) {
2236			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2237			    ((ifp->if_flags ^ sc->sis_if_flags) &
2238			    (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2239				if (sc->sis_type == SIS_TYPE_83815)
2240					sis_setmulti_ns(sc);
2241				else
2242					sis_setmulti_sis(sc);
2243			} else
2244				sis_initl(sc);
2245		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2246			sis_stop(sc);
2247		}
2248		sc->sis_if_flags = ifp->if_flags;
2249		SIS_UNLOCK(sc);
2250		error = 0;
2251		break;
2252	case SIOCADDMULTI:
2253	case SIOCDELMULTI:
2254		SIS_LOCK(sc);
2255		if (sc->sis_type == SIS_TYPE_83815)
2256			sis_setmulti_ns(sc);
2257		else
2258			sis_setmulti_sis(sc);
2259		SIS_UNLOCK(sc);
2260		break;
2261	case SIOCGIFMEDIA:
2262	case SIOCSIFMEDIA:
2263		mii = device_get_softc(sc->sis_miibus);
2264		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2265		break;
2266	case SIOCSIFCAP:
2267		/* ok, disable interrupts */
2268#ifdef DEVICE_POLLING
2269		if (ifr->ifr_reqcap & IFCAP_POLLING &&
2270		    !(ifp->if_capenable & IFCAP_POLLING)) {
2271			error = ether_poll_register(sis_poll, ifp);
2272			if (error)
2273				return (error);
2274			SIS_LOCK(sc);
2275			/* Disable interrupts */
2276			CSR_WRITE_4(sc, SIS_IER, 0);
2277			ifp->if_capenable |= IFCAP_POLLING;
2278			SIS_UNLOCK(sc);
2279			return (error);
2280
2281		}
2282		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
2283		    ifp->if_capenable & IFCAP_POLLING) {
2284			error = ether_poll_deregister(ifp);
2285			/* Enable interrupts. */
2286			SIS_LOCK(sc);
2287			CSR_WRITE_4(sc, SIS_IER, 1);
2288			ifp->if_capenable &= ~IFCAP_POLLING;
2289			SIS_UNLOCK(sc);
2290			return (error);
2291		}
2292#endif /* DEVICE_POLLING */
2293		break;
2294	default:
2295		error = ether_ioctl(ifp, command, data);
2296		break;
2297	}
2298
2299	return (error);
2300}
2301
2302static void
2303sis_watchdog(struct sis_softc *sc)
2304{
2305
2306	SIS_LOCK_ASSERT(sc);
2307
2308	if (sc->sis_watchdog_timer == 0 || --sc->sis_watchdog_timer >0)
2309		return;
2310
2311	device_printf(sc->sis_dev, "watchdog timeout\n");
2312	sc->sis_ifp->if_oerrors++;
2313
2314	sc->sis_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2315	sis_initl(sc);
2316
2317	if (!IFQ_DRV_IS_EMPTY(&sc->sis_ifp->if_snd))
2318		sis_startl(sc->sis_ifp);
2319}
2320
2321/*
2322 * Stop the adapter and free any mbufs allocated to the
2323 * RX and TX lists.
2324 */
2325static void
2326sis_stop(struct sis_softc *sc)
2327{
2328	struct ifnet *ifp;
2329	struct sis_rxdesc *rxd;
2330	struct sis_txdesc *txd;
2331	int i;
2332
2333	SIS_LOCK_ASSERT(sc);
2334
2335	ifp = sc->sis_ifp;
2336	sc->sis_watchdog_timer = 0;
2337
2338	callout_stop(&sc->sis_stat_ch);
2339
2340	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2341	CSR_WRITE_4(sc, SIS_IER, 0);
2342	CSR_WRITE_4(sc, SIS_IMR, 0);
2343	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
2344	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
2345	DELAY(1000);
2346	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
2347	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2348
2349	sc->sis_flags &= ~SIS_FLAG_LINK;
2350
2351	/*
2352	 * Free data in the RX lists.
2353	 */
2354	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
2355		rxd = &sc->sis_rxdesc[i];
2356		if (rxd->rx_m != NULL) {
2357			bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap,
2358			    BUS_DMASYNC_POSTREAD);
2359			bus_dmamap_unload(sc->sis_rx_tag, rxd->rx_dmamap);
2360			m_freem(rxd->rx_m);
2361			rxd->rx_m = NULL;
2362		}
2363	}
2364
2365	/*
2366	 * Free the TX list buffers.
2367	 */
2368	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
2369		txd = &sc->sis_txdesc[i];
2370		if (txd->tx_m != NULL) {
2371			bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap,
2372			    BUS_DMASYNC_POSTWRITE);
2373			bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
2374			m_freem(txd->tx_m);
2375			txd->tx_m = NULL;
2376		}
2377	}
2378}
2379
2380/*
2381 * Stop all chip I/O so that the kernel's probe routines don't
2382 * get confused by errant DMAs when rebooting.
2383 */
2384static int
2385sis_shutdown(device_t dev)
2386{
2387	struct sis_softc	*sc;
2388
2389	sc = device_get_softc(dev);
2390	SIS_LOCK(sc);
2391	sis_stop(sc);
2392	SIS_UNLOCK(sc);
2393	return (0);
2394}
2395
2396static int
2397sis_suspend(device_t dev)
2398{
2399	struct sis_softc	*sc;
2400
2401	sc = device_get_softc(dev);
2402	SIS_LOCK(sc);
2403	sis_stop(sc);
2404	SIS_UNLOCK(sc);
2405	return (0);
2406}
2407
2408static int
2409sis_resume(device_t dev)
2410{
2411	struct sis_softc	*sc;
2412	struct ifnet		*ifp;
2413
2414	sc = device_get_softc(dev);
2415	SIS_LOCK(sc);
2416	ifp = sc->sis_ifp;
2417	if ((ifp->if_flags & IFF_UP) != 0) {
2418		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2419		sis_initl(sc);
2420	}
2421	SIS_UNLOCK(sc);
2422	return (0);
2423}
2424
2425static void
2426sis_add_sysctls(struct sis_softc *sc)
2427{
2428	struct sysctl_ctx_list *ctx;
2429	struct sysctl_oid_list *children;
2430	char tn[32];
2431	int unit;
2432
2433	ctx = device_get_sysctl_ctx(sc->sis_dev);
2434	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sis_dev));
2435
2436	unit = device_get_unit(sc->sis_dev);
2437	/*
2438	 * Unlike most other controllers, NS DP83815/DP83816 controllers
2439	 * seem to pad with 0xFF when it encounter short frames.  According
2440	 * to RFC 1042 the pad bytes should be 0x00.  Turning this tunable
2441	 * on will have driver pad manully but it's disabled by default
2442	 * because it will consume extra CPU cycles for short frames.
2443	 */
2444	sc->sis_manual_pad = 0;
2445	snprintf(tn, sizeof(tn), "dev.sis.%d.manual_pad", unit);
2446	TUNABLE_INT_FETCH(tn, &sc->sis_manual_pad);
2447	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "manual_pad",
2448	    CTLFLAG_RW, &sc->sis_manual_pad, 0, "Manually pad short frames");
2449}
2450
2451static device_method_t sis_methods[] = {
2452	/* Device interface */
2453	DEVMETHOD(device_probe,		sis_probe),
2454	DEVMETHOD(device_attach,	sis_attach),
2455	DEVMETHOD(device_detach,	sis_detach),
2456	DEVMETHOD(device_shutdown,	sis_shutdown),
2457	DEVMETHOD(device_suspend,	sis_suspend),
2458	DEVMETHOD(device_resume,	sis_resume),
2459
2460	/* bus interface */
2461	DEVMETHOD(bus_print_child,	bus_generic_print_child),
2462	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
2463
2464	/* MII interface */
2465	DEVMETHOD(miibus_readreg,	sis_miibus_readreg),
2466	DEVMETHOD(miibus_writereg,	sis_miibus_writereg),
2467	DEVMETHOD(miibus_statchg,	sis_miibus_statchg),
2468
2469	{ 0, 0 }
2470};
2471
2472static driver_t sis_driver = {
2473	"sis",
2474	sis_methods,
2475	sizeof(struct sis_softc)
2476};
2477
2478static devclass_t sis_devclass;
2479
2480DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0);
2481DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0);
2482