if_sis.c revision 212119
1/*-
2 * Copyright (c) 2005 Poul-Henning Kamp <phk@FreeBSD.org>
3 * Copyright (c) 1997, 1998, 1999
4 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/sis/if_sis.c 212119 2010-09-01 22:08:23Z yongari $");
36
37/*
38 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are
39 * available from http://www.sis.com.tw.
40 *
41 * This driver also supports the NatSemi DP83815. Datasheets are
42 * available from http://www.national.com.
43 *
44 * Written by Bill Paul <wpaul@ee.columbia.edu>
45 * Electrical Engineering Department
46 * Columbia University, New York City
47 */
48/*
49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with
50 * simple TX and RX descriptors of 3 longwords in size. The receiver
51 * has a single perfect filter entry for the station address and a
52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based
53 * transceiver while the 7016 requires an external transceiver chip.
54 * Both chips offer the standard bit-bang MII interface as well as
55 * an enchanced PHY interface which simplifies accessing MII registers.
56 *
57 * The only downside to this chipset is that RX descriptors must be
58 * longword aligned.
59 */
60
61#ifdef HAVE_KERNEL_OPTION_HEADERS
62#include "opt_device_polling.h"
63#endif
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/bus.h>
68#include <sys/endian.h>
69#include <sys/kernel.h>
70#include <sys/lock.h>
71#include <sys/malloc.h>
72#include <sys/mbuf.h>
73#include <sys/module.h>
74#include <sys/socket.h>
75#include <sys/sockio.h>
76
77#include <net/if.h>
78#include <net/if_arp.h>
79#include <net/ethernet.h>
80#include <net/if_dl.h>
81#include <net/if_media.h>
82#include <net/if_types.h>
83#include <net/if_vlan_var.h>
84
85#include <net/bpf.h>
86
87#include <machine/bus.h>
88#include <machine/resource.h>
89#include <sys/bus.h>
90#include <sys/rman.h>
91
92#include <dev/mii/mii.h>
93#include <dev/mii/miivar.h>
94
95#include <dev/pci/pcireg.h>
96#include <dev/pci/pcivar.h>
97
98#define SIS_USEIOSPACE
99
100#include <dev/sis/if_sisreg.h>
101
102MODULE_DEPEND(sis, pci, 1, 1, 1);
103MODULE_DEPEND(sis, ether, 1, 1, 1);
104MODULE_DEPEND(sis, miibus, 1, 1, 1);
105
106/* "device miibus" required.  See GENERIC if you get errors here. */
107#include "miibus_if.h"
108
109#define	SIS_LOCK(_sc)		mtx_lock(&(_sc)->sis_mtx)
110#define	SIS_UNLOCK(_sc)		mtx_unlock(&(_sc)->sis_mtx)
111#define	SIS_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sis_mtx, MA_OWNED)
112
113/*
114 * register space access macros
115 */
116#define CSR_WRITE_4(sc, reg, val)	bus_write_4(sc->sis_res[0], reg, val)
117
118#define CSR_READ_4(sc, reg)		bus_read_4(sc->sis_res[0], reg)
119
120#define CSR_READ_2(sc, reg)		bus_read_2(sc->sis_res[0], reg)
121
122/*
123 * Various supported device vendors/types and their names.
124 */
125static struct sis_type sis_devs[] = {
126	{ SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" },
127	{ SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" },
128	{ NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" },
129	{ 0, 0, NULL }
130};
131
132static int sis_detach(device_t);
133static __inline void sis_discard_rxbuf(struct sis_rxdesc *);
134static int sis_dma_alloc(struct sis_softc *);
135static void sis_dma_free(struct sis_softc *);
136static int sis_dma_ring_alloc(struct sis_softc *, bus_size_t, bus_size_t,
137    bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
138static void sis_dmamap_cb(void *, bus_dma_segment_t *, int, int);
139#ifndef __NO_STRICT_ALIGNMENT
140static __inline void sis_fixup_rx(struct mbuf *);
141#endif
142static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *);
143static int sis_ifmedia_upd(struct ifnet *);
144static void sis_init(void *);
145static void sis_initl(struct sis_softc *);
146static void sis_intr(void *);
147static int sis_ioctl(struct ifnet *, u_long, caddr_t);
148static int sis_newbuf(struct sis_softc *, struct sis_rxdesc *);
149static int sis_rxeof(struct sis_softc *);
150static void sis_start(struct ifnet *);
151static void sis_startl(struct ifnet *);
152static void sis_stop(struct sis_softc *);
153static void sis_watchdog(struct sis_softc *);
154
155
156static struct resource_spec sis_res_spec[] = {
157#ifdef SIS_USEIOSPACE
158	{ SYS_RES_IOPORT,	SIS_PCI_LOIO,	RF_ACTIVE},
159#else
160	{ SYS_RES_MEMORY,	SIS_PCI_LOMEM,	RF_ACTIVE},
161#endif
162	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE},
163	{ -1, 0 }
164};
165
166#define SIS_SETBIT(sc, reg, x)				\
167	CSR_WRITE_4(sc, reg,				\
168		CSR_READ_4(sc, reg) | (x))
169
170#define SIS_CLRBIT(sc, reg, x)				\
171	CSR_WRITE_4(sc, reg,				\
172		CSR_READ_4(sc, reg) & ~(x))
173
174#define SIO_SET(x)					\
175	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x)
176
177#define SIO_CLR(x)					\
178	CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x)
179
180/*
181 * Routine to reverse the bits in a word. Stolen almost
182 * verbatim from /usr/games/fortune.
183 */
184static uint16_t
185sis_reverse(uint16_t n)
186{
187	n = ((n >>  1) & 0x5555) | ((n <<  1) & 0xaaaa);
188	n = ((n >>  2) & 0x3333) | ((n <<  2) & 0xcccc);
189	n = ((n >>  4) & 0x0f0f) | ((n <<  4) & 0xf0f0);
190	n = ((n >>  8) & 0x00ff) | ((n <<  8) & 0xff00);
191
192	return (n);
193}
194
195static void
196sis_delay(struct sis_softc *sc)
197{
198	int			idx;
199
200	for (idx = (300 / 33) + 1; idx > 0; idx--)
201		CSR_READ_4(sc, SIS_CSR);
202}
203
204static void
205sis_eeprom_idle(struct sis_softc *sc)
206{
207	int		i;
208
209	SIO_SET(SIS_EECTL_CSEL);
210	sis_delay(sc);
211	SIO_SET(SIS_EECTL_CLK);
212	sis_delay(sc);
213
214	for (i = 0; i < 25; i++) {
215		SIO_CLR(SIS_EECTL_CLK);
216		sis_delay(sc);
217		SIO_SET(SIS_EECTL_CLK);
218		sis_delay(sc);
219	}
220
221	SIO_CLR(SIS_EECTL_CLK);
222	sis_delay(sc);
223	SIO_CLR(SIS_EECTL_CSEL);
224	sis_delay(sc);
225	CSR_WRITE_4(sc, SIS_EECTL, 0x00000000);
226}
227
228/*
229 * Send a read command and address to the EEPROM, check for ACK.
230 */
231static void
232sis_eeprom_putbyte(struct sis_softc *sc, int addr)
233{
234	int		d, i;
235
236	d = addr | SIS_EECMD_READ;
237
238	/*
239	 * Feed in each bit and stobe the clock.
240	 */
241	for (i = 0x400; i; i >>= 1) {
242		if (d & i) {
243			SIO_SET(SIS_EECTL_DIN);
244		} else {
245			SIO_CLR(SIS_EECTL_DIN);
246		}
247		sis_delay(sc);
248		SIO_SET(SIS_EECTL_CLK);
249		sis_delay(sc);
250		SIO_CLR(SIS_EECTL_CLK);
251		sis_delay(sc);
252	}
253}
254
255/*
256 * Read a word of data stored in the EEPROM at address 'addr.'
257 */
258static void
259sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest)
260{
261	int		i;
262	uint16_t	word = 0;
263
264	/* Force EEPROM to idle state. */
265	sis_eeprom_idle(sc);
266
267	/* Enter EEPROM access mode. */
268	sis_delay(sc);
269	SIO_CLR(SIS_EECTL_CLK);
270	sis_delay(sc);
271	SIO_SET(SIS_EECTL_CSEL);
272	sis_delay(sc);
273
274	/*
275	 * Send address of word we want to read.
276	 */
277	sis_eeprom_putbyte(sc, addr);
278
279	/*
280	 * Start reading bits from EEPROM.
281	 */
282	for (i = 0x8000; i; i >>= 1) {
283		SIO_SET(SIS_EECTL_CLK);
284		sis_delay(sc);
285		if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT)
286			word |= i;
287		sis_delay(sc);
288		SIO_CLR(SIS_EECTL_CLK);
289		sis_delay(sc);
290	}
291
292	/* Turn off EEPROM access mode. */
293	sis_eeprom_idle(sc);
294
295	*dest = word;
296}
297
298/*
299 * Read a sequence of words from the EEPROM.
300 */
301static void
302sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap)
303{
304	int			i;
305	uint16_t		word = 0, *ptr;
306
307	for (i = 0; i < cnt; i++) {
308		sis_eeprom_getword(sc, off + i, &word);
309		ptr = (uint16_t *)(dest + (i * 2));
310		if (swap)
311			*ptr = ntohs(word);
312		else
313			*ptr = word;
314	}
315}
316
317#if defined(__i386__) || defined(__amd64__)
318static device_t
319sis_find_bridge(device_t dev)
320{
321	devclass_t		pci_devclass;
322	device_t		*pci_devices;
323	int			pci_count = 0;
324	device_t		*pci_children;
325	int			pci_childcount = 0;
326	device_t		*busp, *childp;
327	device_t		child = NULL;
328	int			i, j;
329
330	if ((pci_devclass = devclass_find("pci")) == NULL)
331		return (NULL);
332
333	devclass_get_devices(pci_devclass, &pci_devices, &pci_count);
334
335	for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) {
336		if (device_get_children(*busp, &pci_children, &pci_childcount))
337			continue;
338		for (j = 0, childp = pci_children;
339		    j < pci_childcount; j++, childp++) {
340			if (pci_get_vendor(*childp) == SIS_VENDORID &&
341			    pci_get_device(*childp) == 0x0008) {
342				child = *childp;
343				free(pci_children, M_TEMP);
344				goto done;
345			}
346		}
347		free(pci_children, M_TEMP);
348	}
349
350done:
351	free(pci_devices, M_TEMP);
352	return (child);
353}
354
355static void
356sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt)
357{
358	device_t		bridge;
359	uint8_t			reg;
360	int			i;
361	bus_space_tag_t		btag;
362
363	bridge = sis_find_bridge(dev);
364	if (bridge == NULL)
365		return;
366	reg = pci_read_config(bridge, 0x48, 1);
367	pci_write_config(bridge, 0x48, reg|0x40, 1);
368
369	/* XXX */
370#if defined(__i386__)
371	btag = I386_BUS_SPACE_IO;
372#elif defined(__amd64__)
373	btag = AMD64_BUS_SPACE_IO;
374#endif
375
376	for (i = 0; i < cnt; i++) {
377		bus_space_write_1(btag, 0x0, 0x70, i + off);
378		*(dest + i) = bus_space_read_1(btag, 0x0, 0x71);
379	}
380
381	pci_write_config(bridge, 0x48, reg & ~0x40, 1);
382}
383
384static void
385sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest)
386{
387	uint32_t		filtsave, csrsave;
388
389	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
390	csrsave = CSR_READ_4(sc, SIS_CSR);
391
392	CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave);
393	CSR_WRITE_4(sc, SIS_CSR, 0);
394
395	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE);
396
397	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
398	((uint16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA);
399	CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1);
400	((uint16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA);
401	CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
402	((uint16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA);
403
404	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
405	CSR_WRITE_4(sc, SIS_CSR, csrsave);
406}
407#endif
408
409/*
410 * Sync the PHYs by setting data bit and strobing the clock 32 times.
411 */
412static void
413sis_mii_sync(struct sis_softc *sc)
414{
415	int		i;
416
417 	SIO_SET(SIS_MII_DIR|SIS_MII_DATA);
418
419 	for (i = 0; i < 32; i++) {
420 		SIO_SET(SIS_MII_CLK);
421 		DELAY(1);
422 		SIO_CLR(SIS_MII_CLK);
423 		DELAY(1);
424 	}
425}
426
427/*
428 * Clock a series of bits through the MII.
429 */
430static void
431sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt)
432{
433	int			i;
434
435	SIO_CLR(SIS_MII_CLK);
436
437	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
438		if (bits & i) {
439			SIO_SET(SIS_MII_DATA);
440		} else {
441			SIO_CLR(SIS_MII_DATA);
442		}
443		DELAY(1);
444		SIO_CLR(SIS_MII_CLK);
445		DELAY(1);
446		SIO_SET(SIS_MII_CLK);
447	}
448}
449
450/*
451 * Read an PHY register through the MII.
452 */
453static int
454sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame)
455{
456	int			i, ack;
457
458	/*
459	 * Set up frame for RX.
460	 */
461	frame->mii_stdelim = SIS_MII_STARTDELIM;
462	frame->mii_opcode = SIS_MII_READOP;
463	frame->mii_turnaround = 0;
464	frame->mii_data = 0;
465
466	/*
467 	 * Turn on data xmit.
468	 */
469	SIO_SET(SIS_MII_DIR);
470
471	sis_mii_sync(sc);
472
473	/*
474	 * Send command/address info.
475	 */
476	sis_mii_send(sc, frame->mii_stdelim, 2);
477	sis_mii_send(sc, frame->mii_opcode, 2);
478	sis_mii_send(sc, frame->mii_phyaddr, 5);
479	sis_mii_send(sc, frame->mii_regaddr, 5);
480
481	/* Idle bit */
482	SIO_CLR((SIS_MII_CLK|SIS_MII_DATA));
483	DELAY(1);
484	SIO_SET(SIS_MII_CLK);
485	DELAY(1);
486
487	/* Turn off xmit. */
488	SIO_CLR(SIS_MII_DIR);
489
490	/* Check for ack */
491	SIO_CLR(SIS_MII_CLK);
492	DELAY(1);
493	ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA;
494	SIO_SET(SIS_MII_CLK);
495	DELAY(1);
496
497	/*
498	 * Now try reading data bits. If the ack failed, we still
499	 * need to clock through 16 cycles to keep the PHY(s) in sync.
500	 */
501	if (ack) {
502		for (i = 0; i < 16; i++) {
503			SIO_CLR(SIS_MII_CLK);
504			DELAY(1);
505			SIO_SET(SIS_MII_CLK);
506			DELAY(1);
507		}
508		goto fail;
509	}
510
511	for (i = 0x8000; i; i >>= 1) {
512		SIO_CLR(SIS_MII_CLK);
513		DELAY(1);
514		if (!ack) {
515			if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA)
516				frame->mii_data |= i;
517			DELAY(1);
518		}
519		SIO_SET(SIS_MII_CLK);
520		DELAY(1);
521	}
522
523fail:
524
525	SIO_CLR(SIS_MII_CLK);
526	DELAY(1);
527	SIO_SET(SIS_MII_CLK);
528	DELAY(1);
529
530	if (ack)
531		return (1);
532	return (0);
533}
534
535/*
536 * Write to a PHY register through the MII.
537 */
538static int
539sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame)
540{
541
542 	/*
543 	 * Set up frame for TX.
544 	 */
545
546 	frame->mii_stdelim = SIS_MII_STARTDELIM;
547 	frame->mii_opcode = SIS_MII_WRITEOP;
548 	frame->mii_turnaround = SIS_MII_TURNAROUND;
549
550 	/*
551  	 * Turn on data output.
552 	 */
553 	SIO_SET(SIS_MII_DIR);
554
555 	sis_mii_sync(sc);
556
557 	sis_mii_send(sc, frame->mii_stdelim, 2);
558 	sis_mii_send(sc, frame->mii_opcode, 2);
559 	sis_mii_send(sc, frame->mii_phyaddr, 5);
560 	sis_mii_send(sc, frame->mii_regaddr, 5);
561 	sis_mii_send(sc, frame->mii_turnaround, 2);
562 	sis_mii_send(sc, frame->mii_data, 16);
563
564 	/* Idle bit. */
565 	SIO_SET(SIS_MII_CLK);
566 	DELAY(1);
567 	SIO_CLR(SIS_MII_CLK);
568 	DELAY(1);
569
570 	/*
571 	 * Turn off xmit.
572 	 */
573 	SIO_CLR(SIS_MII_DIR);
574
575 	return (0);
576}
577
578static int
579sis_miibus_readreg(device_t dev, int phy, int reg)
580{
581	struct sis_softc	*sc;
582	struct sis_mii_frame    frame;
583
584	sc = device_get_softc(dev);
585
586	if (sc->sis_type == SIS_TYPE_83815) {
587		if (phy != 0)
588			return (0);
589		/*
590		 * The NatSemi chip can take a while after
591		 * a reset to come ready, during which the BMSR
592		 * returns a value of 0. This is *never* supposed
593		 * to happen: some of the BMSR bits are meant to
594		 * be hardwired in the on position, and this can
595		 * confuse the miibus code a bit during the probe
596		 * and attach phase. So we make an effort to check
597		 * for this condition and wait for it to clear.
598		 */
599		if (!CSR_READ_4(sc, NS_BMSR))
600			DELAY(1000);
601		return CSR_READ_4(sc, NS_BMCR + (reg * 4));
602	}
603
604	/*
605	 * Chipsets < SIS_635 seem not to be able to read/write
606	 * through mdio. Use the enhanced PHY access register
607	 * again for them.
608	 */
609	if (sc->sis_type == SIS_TYPE_900 &&
610	    sc->sis_rev < SIS_REV_635) {
611		int i, val = 0;
612
613		if (phy != 0)
614			return (0);
615
616		CSR_WRITE_4(sc, SIS_PHYCTL,
617		    (phy << 11) | (reg << 6) | SIS_PHYOP_READ);
618		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
619
620		for (i = 0; i < SIS_TIMEOUT; i++) {
621			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
622				break;
623		}
624
625		if (i == SIS_TIMEOUT) {
626			device_printf(sc->sis_dev, "PHY failed to come ready\n");
627			return (0);
628		}
629
630		val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF;
631
632		if (val == 0xFFFF)
633			return (0);
634
635		return (val);
636	} else {
637		bzero((char *)&frame, sizeof(frame));
638
639		frame.mii_phyaddr = phy;
640		frame.mii_regaddr = reg;
641		sis_mii_readreg(sc, &frame);
642
643		return (frame.mii_data);
644	}
645}
646
647static int
648sis_miibus_writereg(device_t dev, int phy, int reg, int data)
649{
650	struct sis_softc	*sc;
651	struct sis_mii_frame	frame;
652
653	sc = device_get_softc(dev);
654
655	if (sc->sis_type == SIS_TYPE_83815) {
656		if (phy != 0)
657			return (0);
658		CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data);
659		return (0);
660	}
661
662	/*
663	 * Chipsets < SIS_635 seem not to be able to read/write
664	 * through mdio. Use the enhanced PHY access register
665	 * again for them.
666	 */
667	if (sc->sis_type == SIS_TYPE_900 &&
668	    sc->sis_rev < SIS_REV_635) {
669		int i;
670
671		if (phy != 0)
672			return (0);
673
674		CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) |
675		    (reg << 6) | SIS_PHYOP_WRITE);
676		SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS);
677
678		for (i = 0; i < SIS_TIMEOUT; i++) {
679			if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS))
680				break;
681		}
682
683		if (i == SIS_TIMEOUT)
684			device_printf(sc->sis_dev, "PHY failed to come ready\n");
685	} else {
686		bzero((char *)&frame, sizeof(frame));
687
688		frame.mii_phyaddr = phy;
689		frame.mii_regaddr = reg;
690		frame.mii_data = data;
691		sis_mii_writereg(sc, &frame);
692	}
693	return (0);
694}
695
696static void
697sis_miibus_statchg(device_t dev)
698{
699	struct sis_softc	*sc;
700	struct mii_data		*mii;
701	struct ifnet		*ifp;
702	uint32_t		reg;
703
704	sc = device_get_softc(dev);
705	SIS_LOCK_ASSERT(sc);
706
707	mii = device_get_softc(sc->sis_miibus);
708	ifp = sc->sis_ifp;
709	if (mii == NULL || ifp == NULL ||
710	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
711		return;
712
713	sc->sis_link = 0;
714	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
715	    (IFM_ACTIVE | IFM_AVALID)) {
716		switch (IFM_SUBTYPE(mii->mii_media_active)) {
717		case IFM_10_T:
718			sc->sis_link++;
719			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10);
720			break;
721		case IFM_100_TX:
722			sc->sis_link++;
723			CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
724			break;
725		default:
726			break;
727		}
728	}
729
730	if (sc->sis_link == 0) {
731		/*
732		 * Stopping MACs seem to reset SIS_TX_LISTPTR and
733		 * SIS_RX_LISTPTR which in turn requires resetting
734		 * TX/RX buffers.  So just don't do anything for
735		 * lost link.
736		 */
737		return;
738	}
739
740	/* Set full/half duplex mode. */
741	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
742		SIS_SETBIT(sc, SIS_TX_CFG,
743		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
744		SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
745	} else {
746		SIS_CLRBIT(sc, SIS_TX_CFG,
747		    (SIS_TXCFG_IGN_HBEAT | SIS_TXCFG_IGN_CARR));
748		SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS);
749	}
750
751	if (sc->sis_type == SIS_TYPE_83816) {
752		/*
753		 * MPII03.D: Half Duplex Excessive Collisions.
754		 * Also page 49 in 83816 manual
755		 */
756		SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D);
757	}
758
759	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A &&
760	    IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
761		/*
762		 * Short Cable Receive Errors (MP21.E)
763		 */
764		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
765		reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff;
766		CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000);
767		DELAY(100);
768		reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff;
769		if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) {
770			device_printf(sc->sis_dev,
771			    "Applying short cable fix (reg=%x)\n", reg);
772			CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8);
773			SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20);
774		}
775		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
776	}
777	/* Enable TX/RX MACs. */
778	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
779	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE | SIS_CSR_RX_ENABLE);
780}
781
782static uint32_t
783sis_mchash(struct sis_softc *sc, const uint8_t *addr)
784{
785	uint32_t		crc;
786
787	/* Compute CRC for the address value. */
788	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
789
790	/*
791	 * return the filter bit position
792	 *
793	 * The NatSemi chip has a 512-bit filter, which is
794	 * different than the SiS, so we special-case it.
795	 */
796	if (sc->sis_type == SIS_TYPE_83815)
797		return (crc >> 23);
798	else if (sc->sis_rev >= SIS_REV_635 ||
799	    sc->sis_rev == SIS_REV_900B)
800		return (crc >> 24);
801	else
802		return (crc >> 25);
803}
804
805static void
806sis_setmulti_ns(struct sis_softc *sc)
807{
808	struct ifnet		*ifp;
809	struct ifmultiaddr	*ifma;
810	uint32_t		h = 0, i, filtsave;
811	int			bit, index;
812
813	ifp = sc->sis_ifp;
814
815	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
816		SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
817		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
818		return;
819	}
820
821	/*
822	 * We have to explicitly enable the multicast hash table
823	 * on the NatSemi chip if we want to use it, which we do.
824	 */
825	SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH);
826	SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI);
827
828	filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL);
829
830	/* first, zot all the existing hash bits */
831	for (i = 0; i < 32; i++) {
832		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2));
833		CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0);
834	}
835
836	if_maddr_rlock(ifp);
837	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
838		if (ifma->ifma_addr->sa_family != AF_LINK)
839			continue;
840		h = sis_mchash(sc,
841		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
842		index = h >> 3;
843		bit = h & 0x1F;
844		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index);
845		if (bit > 0xF)
846			bit -= 0x10;
847		SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit));
848	}
849	if_maddr_runlock(ifp);
850
851	CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave);
852}
853
854static void
855sis_setmulti_sis(struct sis_softc *sc)
856{
857	struct ifnet		*ifp;
858	struct ifmultiaddr	*ifma;
859	uint32_t		h, i, n, ctl;
860	uint16_t		hashes[16];
861
862	ifp = sc->sis_ifp;
863
864	/* hash table size */
865	if (sc->sis_rev >= SIS_REV_635 ||
866	    sc->sis_rev == SIS_REV_900B)
867		n = 16;
868	else
869		n = 8;
870
871	ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE;
872
873	if (ifp->if_flags & IFF_BROADCAST)
874		ctl |= SIS_RXFILTCTL_BROAD;
875
876	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
877		ctl |= SIS_RXFILTCTL_ALLMULTI;
878		if (ifp->if_flags & IFF_PROMISC)
879			ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS;
880		for (i = 0; i < n; i++)
881			hashes[i] = ~0;
882	} else {
883		for (i = 0; i < n; i++)
884			hashes[i] = 0;
885		i = 0;
886		if_maddr_rlock(ifp);
887		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
888			if (ifma->ifma_addr->sa_family != AF_LINK)
889			continue;
890			h = sis_mchash(sc,
891			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
892			hashes[h >> 4] |= 1 << (h & 0xf);
893			i++;
894		}
895		if_maddr_runlock(ifp);
896		if (i > n) {
897			ctl |= SIS_RXFILTCTL_ALLMULTI;
898			for (i = 0; i < n; i++)
899				hashes[i] = ~0;
900		}
901	}
902
903	for (i = 0; i < n; i++) {
904		CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16);
905		CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]);
906	}
907
908	CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl);
909}
910
911static void
912sis_reset(struct sis_softc *sc)
913{
914	int		i;
915
916	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET);
917
918	for (i = 0; i < SIS_TIMEOUT; i++) {
919		if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET))
920			break;
921	}
922
923	if (i == SIS_TIMEOUT)
924		device_printf(sc->sis_dev, "reset never completed\n");
925
926	/* Wait a little while for the chip to get its brains in order. */
927	DELAY(1000);
928
929	/*
930	 * If this is a NetSemi chip, make sure to clear
931	 * PME mode.
932	 */
933	if (sc->sis_type == SIS_TYPE_83815) {
934		CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS);
935		CSR_WRITE_4(sc, NS_CLKRUN, 0);
936	}
937}
938
939/*
940 * Probe for an SiS chip. Check the PCI vendor and device
941 * IDs against our list and return a device name if we find a match.
942 */
943static int
944sis_probe(device_t dev)
945{
946	struct sis_type		*t;
947
948	t = sis_devs;
949
950	while (t->sis_name != NULL) {
951		if ((pci_get_vendor(dev) == t->sis_vid) &&
952		    (pci_get_device(dev) == t->sis_did)) {
953			device_set_desc(dev, t->sis_name);
954			return (BUS_PROBE_DEFAULT);
955		}
956		t++;
957	}
958
959	return (ENXIO);
960}
961
962/*
963 * Attach the interface. Allocate softc structures, do ifmedia
964 * setup and ethernet/BPF attach.
965 */
966static int
967sis_attach(device_t dev)
968{
969	u_char			eaddr[ETHER_ADDR_LEN];
970	struct sis_softc	*sc;
971	struct ifnet		*ifp;
972	int			error = 0, waittime = 0;
973
974	waittime = 0;
975	sc = device_get_softc(dev);
976
977	sc->sis_dev = dev;
978
979	mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
980	    MTX_DEF);
981	callout_init_mtx(&sc->sis_stat_ch, &sc->sis_mtx, 0);
982
983	if (pci_get_device(dev) == SIS_DEVICEID_900)
984		sc->sis_type = SIS_TYPE_900;
985	if (pci_get_device(dev) == SIS_DEVICEID_7016)
986		sc->sis_type = SIS_TYPE_7016;
987	if (pci_get_vendor(dev) == NS_VENDORID)
988		sc->sis_type = SIS_TYPE_83815;
989
990	sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1);
991	/*
992	 * Map control/status registers.
993	 */
994	pci_enable_busmaster(dev);
995
996	error = bus_alloc_resources(dev, sis_res_spec, sc->sis_res);
997	if (error) {
998		device_printf(dev, "couldn't allocate resources\n");
999		goto fail;
1000	}
1001
1002	/* Reset the adapter. */
1003	sis_reset(sc);
1004
1005	if (sc->sis_type == SIS_TYPE_900 &&
1006	    (sc->sis_rev == SIS_REV_635 ||
1007	    sc->sis_rev == SIS_REV_900B)) {
1008		SIO_SET(SIS_CFG_RND_CNT);
1009		SIO_SET(SIS_CFG_PERR_DETECT);
1010	}
1011
1012	/*
1013	 * Get station address from the EEPROM.
1014	 */
1015	switch (pci_get_vendor(dev)) {
1016	case NS_VENDORID:
1017		sc->sis_srr = CSR_READ_4(sc, NS_SRR);
1018
1019		/* We can't update the device description, so spew */
1020		if (sc->sis_srr == NS_SRR_15C)
1021			device_printf(dev, "Silicon Revision: DP83815C\n");
1022		else if (sc->sis_srr == NS_SRR_15D)
1023			device_printf(dev, "Silicon Revision: DP83815D\n");
1024		else if (sc->sis_srr == NS_SRR_16A)
1025			device_printf(dev, "Silicon Revision: DP83816A\n");
1026		else
1027			device_printf(dev, "Silicon Revision %x\n", sc->sis_srr);
1028
1029		/*
1030		 * Reading the MAC address out of the EEPROM on
1031		 * the NatSemi chip takes a bit more work than
1032		 * you'd expect. The address spans 4 16-bit words,
1033		 * with the first word containing only a single bit.
1034		 * You have to shift everything over one bit to
1035		 * get it aligned properly. Also, the bits are
1036		 * stored backwards (the LSB is really the MSB,
1037		 * and so on) so you have to reverse them in order
1038		 * to get the MAC address into the form we want.
1039		 * Why? Who the hell knows.
1040		 */
1041		{
1042			uint16_t		tmp[4];
1043
1044			sis_read_eeprom(sc, (caddr_t)&tmp,
1045			    NS_EE_NODEADDR, 4, 0);
1046
1047			/* Shift everything over one bit. */
1048			tmp[3] = tmp[3] >> 1;
1049			tmp[3] |= tmp[2] << 15;
1050			tmp[2] = tmp[2] >> 1;
1051			tmp[2] |= tmp[1] << 15;
1052			tmp[1] = tmp[1] >> 1;
1053			tmp[1] |= tmp[0] << 15;
1054
1055			/* Now reverse all the bits. */
1056			tmp[3] = sis_reverse(tmp[3]);
1057			tmp[2] = sis_reverse(tmp[2]);
1058			tmp[1] = sis_reverse(tmp[1]);
1059
1060			bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN);
1061		}
1062		break;
1063	case SIS_VENDORID:
1064	default:
1065#if defined(__i386__) || defined(__amd64__)
1066		/*
1067		 * If this is a SiS 630E chipset with an embedded
1068		 * SiS 900 controller, we have to read the MAC address
1069		 * from the APC CMOS RAM. Our method for doing this
1070		 * is very ugly since we have to reach out and grab
1071		 * ahold of hardware for which we cannot properly
1072		 * allocate resources. This code is only compiled on
1073		 * the i386 architecture since the SiS 630E chipset
1074		 * is for x86 motherboards only. Note that there are
1075		 * a lot of magic numbers in this hack. These are
1076		 * taken from SiS's Linux driver. I'd like to replace
1077		 * them with proper symbolic definitions, but that
1078		 * requires some datasheets that I don't have access
1079		 * to at the moment.
1080		 */
1081		if (sc->sis_rev == SIS_REV_630S ||
1082		    sc->sis_rev == SIS_REV_630E ||
1083		    sc->sis_rev == SIS_REV_630EA1)
1084			sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6);
1085
1086		else if (sc->sis_rev == SIS_REV_635 ||
1087			 sc->sis_rev == SIS_REV_630ET)
1088			sis_read_mac(sc, dev, (caddr_t)&eaddr);
1089		else if (sc->sis_rev == SIS_REV_96x) {
1090			/* Allow to read EEPROM from LAN. It is shared
1091			 * between a 1394 controller and the NIC and each
1092			 * time we access it, we need to set SIS_EECMD_REQ.
1093			 */
1094			SIO_SET(SIS_EECMD_REQ);
1095			for (waittime = 0; waittime < SIS_TIMEOUT;
1096			    waittime++) {
1097				/* Force EEPROM to idle state. */
1098				sis_eeprom_idle(sc);
1099				if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) {
1100					sis_read_eeprom(sc, (caddr_t)&eaddr,
1101					    SIS_EE_NODEADDR, 3, 0);
1102					break;
1103				}
1104				DELAY(1);
1105			}
1106			/*
1107			 * Set SIS_EECTL_CLK to high, so a other master
1108			 * can operate on the i2c bus.
1109			 */
1110			SIO_SET(SIS_EECTL_CLK);
1111			/* Refuse EEPROM access by LAN */
1112			SIO_SET(SIS_EECMD_DONE);
1113		} else
1114#endif
1115			sis_read_eeprom(sc, (caddr_t)&eaddr,
1116			    SIS_EE_NODEADDR, 3, 0);
1117		break;
1118	}
1119
1120	/* Allocate DMA'able memory. */
1121	if ((error = sis_dma_alloc(sc)) != 0)
1122		goto fail;
1123
1124	ifp = sc->sis_ifp = if_alloc(IFT_ETHER);
1125	if (ifp == NULL) {
1126		device_printf(dev, "can not if_alloc()\n");
1127		error = ENOSPC;
1128		goto fail;
1129	}
1130	ifp->if_softc = sc;
1131	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1132	ifp->if_mtu = ETHERMTU;
1133	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1134	ifp->if_ioctl = sis_ioctl;
1135	ifp->if_start = sis_start;
1136	ifp->if_init = sis_init;
1137	IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1);
1138	ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1;
1139	IFQ_SET_READY(&ifp->if_snd);
1140
1141	/*
1142	 * Do MII setup.
1143	 */
1144	if (mii_phy_probe(dev, &sc->sis_miibus,
1145	    sis_ifmedia_upd, sis_ifmedia_sts)) {
1146		device_printf(dev, "MII without any PHY!\n");
1147		error = ENXIO;
1148		goto fail;
1149	}
1150
1151	/*
1152	 * Call MI attach routine.
1153	 */
1154	ether_ifattach(ifp, eaddr);
1155
1156	/*
1157	 * Tell the upper layer(s) we support long frames.
1158	 */
1159	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1160	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1161	ifp->if_capenable = ifp->if_capabilities;
1162#ifdef DEVICE_POLLING
1163	ifp->if_capabilities |= IFCAP_POLLING;
1164#endif
1165
1166	/* Hook interrupt last to avoid having to lock softc */
1167	error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE,
1168	    NULL, sis_intr, sc, &sc->sis_intrhand);
1169
1170	if (error) {
1171		device_printf(dev, "couldn't set up irq\n");
1172		ether_ifdetach(ifp);
1173		goto fail;
1174	}
1175
1176fail:
1177	if (error)
1178		sis_detach(dev);
1179
1180	return (error);
1181}
1182
1183/*
1184 * Shutdown hardware and free up resources. This can be called any
1185 * time after the mutex has been initialized. It is called in both
1186 * the error case in attach and the normal detach case so it needs
1187 * to be careful about only freeing resources that have actually been
1188 * allocated.
1189 */
1190static int
1191sis_detach(device_t dev)
1192{
1193	struct sis_softc	*sc;
1194	struct ifnet		*ifp;
1195
1196	sc = device_get_softc(dev);
1197	KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized"));
1198	ifp = sc->sis_ifp;
1199
1200#ifdef DEVICE_POLLING
1201	if (ifp->if_capenable & IFCAP_POLLING)
1202		ether_poll_deregister(ifp);
1203#endif
1204
1205	/* These should only be active if attach succeeded. */
1206	if (device_is_attached(dev)) {
1207		SIS_LOCK(sc);
1208		sis_reset(sc);
1209		sis_stop(sc);
1210		SIS_UNLOCK(sc);
1211		callout_drain(&sc->sis_stat_ch);
1212		ether_ifdetach(ifp);
1213	}
1214	if (sc->sis_miibus)
1215		device_delete_child(dev, sc->sis_miibus);
1216	bus_generic_detach(dev);
1217
1218	if (sc->sis_intrhand)
1219		bus_teardown_intr(dev, sc->sis_res[1], sc->sis_intrhand);
1220	bus_release_resources(dev, sis_res_spec, sc->sis_res);
1221
1222	if (ifp)
1223		if_free(ifp);
1224
1225	sis_dma_free(sc);
1226
1227	mtx_destroy(&sc->sis_mtx);
1228
1229	return (0);
1230}
1231
1232struct sis_dmamap_arg {
1233	bus_addr_t	sis_busaddr;
1234};
1235
1236static void
1237sis_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1238{
1239	struct sis_dmamap_arg	*ctx;
1240
1241	if (error != 0)
1242		return;
1243
1244	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1245
1246	ctx = (struct sis_dmamap_arg *)arg;
1247	ctx->sis_busaddr = segs[0].ds_addr;
1248}
1249
1250static int
1251sis_dma_ring_alloc(struct sis_softc *sc, bus_size_t alignment,
1252    bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
1253    bus_addr_t *paddr, const char *msg)
1254{
1255	struct sis_dmamap_arg	ctx;
1256	int			error;
1257
1258	error = bus_dma_tag_create(sc->sis_parent_tag, alignment, 0,
1259	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1,
1260	    maxsize, 0, NULL, NULL, tag);
1261	if (error != 0) {
1262		device_printf(sc->sis_dev,
1263		    "could not create %s dma tag\n", msg);
1264		return (ENOMEM);
1265	}
1266	/* Allocate DMA'able memory for ring. */
1267	error = bus_dmamem_alloc(*tag, (void **)ring,
1268	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
1269	if (error != 0) {
1270		device_printf(sc->sis_dev,
1271		    "could not allocate DMA'able memory for %s\n", msg);
1272		return (ENOMEM);
1273	}
1274	/* Load the address of the ring. */
1275	ctx.sis_busaddr = 0;
1276	error = bus_dmamap_load(*tag, *map, *ring, maxsize, sis_dmamap_cb,
1277	    &ctx, BUS_DMA_NOWAIT);
1278	if (error != 0) {
1279		device_printf(sc->sis_dev,
1280		    "could not load DMA'able memory for %s\n", msg);
1281		return (ENOMEM);
1282	}
1283	*paddr = ctx.sis_busaddr;
1284	return (0);
1285}
1286
1287static int
1288sis_dma_alloc(struct sis_softc *sc)
1289{
1290	struct sis_rxdesc	*rxd;
1291	struct sis_txdesc	*txd;
1292	int			error, i;
1293
1294	/* Allocate the parent bus DMA tag appropriate for PCI. */
1295	error = bus_dma_tag_create(bus_get_dma_tag(sc->sis_dev),
1296	    1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1297	    NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
1298	    0, NULL, NULL, &sc->sis_parent_tag);
1299	if (error != 0) {
1300		device_printf(sc->sis_dev,
1301		    "could not allocate parent dma tag\n");
1302		return (ENOMEM);
1303	}
1304
1305	/* Create RX ring. */
1306	error = sis_dma_ring_alloc(sc, SIS_DESC_ALIGN, SIS_RX_LIST_SZ,
1307	    &sc->sis_rx_list_tag, (uint8_t **)&sc->sis_rx_list,
1308	    &sc->sis_rx_list_map, &sc->sis_rx_paddr, "RX ring");
1309	if (error)
1310		return (error);
1311
1312	/* Create TX ring. */
1313	error = sis_dma_ring_alloc(sc, SIS_DESC_ALIGN, SIS_TX_LIST_SZ,
1314	    &sc->sis_tx_list_tag, (uint8_t **)&sc->sis_tx_list,
1315	    &sc->sis_tx_list_map, &sc->sis_tx_paddr, "TX ring");
1316	if (error)
1317		return (error);
1318
1319	/* Create tag for RX mbufs. */
1320	error = bus_dma_tag_create(sc->sis_parent_tag, SIS_RX_BUF_ALIGN, 0,
1321	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
1322	    MCLBYTES, 0, NULL, NULL, &sc->sis_rx_tag);
1323	if (error) {
1324		device_printf(sc->sis_dev, "could not allocate RX dma tag\n");
1325		return (error);
1326	}
1327
1328	/* Create tag for TX mbufs. */
1329	error = bus_dma_tag_create(sc->sis_parent_tag, 1, 0,
1330	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1331	    MCLBYTES * SIS_MAXTXSEGS, SIS_MAXTXSEGS, MCLBYTES, 0, NULL, NULL,
1332	    &sc->sis_tx_tag);
1333	if (error) {
1334		device_printf(sc->sis_dev, "could not allocate TX dma tag\n");
1335		return (error);
1336	}
1337
1338	/* Create DMA maps for RX buffers. */
1339	error = bus_dmamap_create(sc->sis_rx_tag, 0, &sc->sis_rx_sparemap);
1340	if (error) {
1341		device_printf(sc->sis_dev,
1342		    "can't create spare DMA map for RX\n");
1343		return (error);
1344	}
1345	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1346		rxd = &sc->sis_rxdesc[i];
1347		rxd->rx_m = NULL;
1348		error = bus_dmamap_create(sc->sis_rx_tag, 0, &rxd->rx_dmamap);
1349		if (error) {
1350			device_printf(sc->sis_dev,
1351			    "can't create DMA map for RX\n");
1352			return (error);
1353		}
1354	}
1355
1356	/* Create DMA maps for TX buffers. */
1357	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1358		txd = &sc->sis_txdesc[i];
1359		txd->tx_m = NULL;
1360		error = bus_dmamap_create(sc->sis_tx_tag, 0, &txd->tx_dmamap);
1361		if (error) {
1362			device_printf(sc->sis_dev,
1363			    "can't create DMA map for TX\n");
1364			return (error);
1365		}
1366	}
1367
1368	return (0);
1369}
1370
1371static void
1372sis_dma_free(struct sis_softc *sc)
1373{
1374	struct sis_rxdesc	*rxd;
1375	struct sis_txdesc	*txd;
1376	int			i;
1377
1378	/* Destroy DMA maps for RX buffers. */
1379	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1380		rxd = &sc->sis_rxdesc[i];
1381		if (rxd->rx_dmamap)
1382			bus_dmamap_destroy(sc->sis_rx_tag, rxd->rx_dmamap);
1383	}
1384	if (sc->sis_rx_sparemap)
1385		bus_dmamap_destroy(sc->sis_rx_tag, sc->sis_rx_sparemap);
1386
1387	/* Destroy DMA maps for TX buffers. */
1388	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1389		txd = &sc->sis_txdesc[i];
1390		if (txd->tx_dmamap)
1391			bus_dmamap_destroy(sc->sis_tx_tag, txd->tx_dmamap);
1392	}
1393
1394	if (sc->sis_rx_tag)
1395		bus_dma_tag_destroy(sc->sis_rx_tag);
1396	if (sc->sis_tx_tag)
1397		bus_dma_tag_destroy(sc->sis_tx_tag);
1398
1399	/* Destroy RX ring. */
1400	if (sc->sis_rx_list_map)
1401		bus_dmamap_unload(sc->sis_rx_list_tag, sc->sis_rx_list_map);
1402	if (sc->sis_rx_list_map && sc->sis_rx_list)
1403		bus_dmamem_free(sc->sis_rx_list_tag, sc->sis_rx_list,
1404		    sc->sis_rx_list_map);
1405
1406	if (sc->sis_rx_list_tag)
1407		bus_dma_tag_destroy(sc->sis_rx_list_tag);
1408
1409	/* Destroy TX ring. */
1410	if (sc->sis_tx_list_map)
1411		bus_dmamap_unload(sc->sis_tx_list_tag, sc->sis_tx_list_map);
1412
1413	if (sc->sis_tx_list_map && sc->sis_tx_list)
1414		bus_dmamem_free(sc->sis_tx_list_tag, sc->sis_tx_list,
1415		    sc->sis_tx_list_map);
1416
1417	if (sc->sis_tx_list_tag)
1418		bus_dma_tag_destroy(sc->sis_tx_list_tag);
1419
1420	/* Destroy the parent tag. */
1421	if (sc->sis_parent_tag)
1422		bus_dma_tag_destroy(sc->sis_parent_tag);
1423}
1424
1425/*
1426 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that
1427 * we arrange the descriptors in a closed ring, so that the last descriptor
1428 * points back to the first.
1429 */
1430static int
1431sis_ring_init(struct sis_softc *sc)
1432{
1433	struct sis_rxdesc	*rxd;
1434	struct sis_txdesc	*txd;
1435	bus_addr_t		next;
1436	int			error, i;
1437
1438	bzero(&sc->sis_tx_list[0], SIS_TX_LIST_SZ);
1439	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
1440		txd = &sc->sis_txdesc[i];
1441		txd->tx_m = NULL;
1442		if (i == SIS_TX_LIST_CNT - 1)
1443			next = SIS_TX_RING_ADDR(sc, 0);
1444		else
1445			next = SIS_TX_RING_ADDR(sc, i + 1);
1446		sc->sis_tx_list[i].sis_next = htole32(SIS_ADDR_LO(next));
1447	}
1448	sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0;
1449	bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1450	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1451
1452	sc->sis_rx_cons = 0;
1453	bzero(&sc->sis_rx_list[0], SIS_RX_LIST_SZ);
1454	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
1455		rxd = &sc->sis_rxdesc[i];
1456		rxd->rx_desc = &sc->sis_rx_list[i];
1457		if (i == SIS_RX_LIST_CNT - 1)
1458			next = SIS_RX_RING_ADDR(sc, 0);
1459		else
1460			next = SIS_RX_RING_ADDR(sc, i + 1);
1461		rxd->rx_desc->sis_next = htole32(SIS_ADDR_LO(next));
1462		error = sis_newbuf(sc, rxd);
1463		if (error)
1464			return (error);
1465	}
1466	bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1467	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1468
1469	return (0);
1470}
1471
1472/*
1473 * Initialize an RX descriptor and attach an MBUF cluster.
1474 */
1475static int
1476sis_newbuf(struct sis_softc *sc, struct sis_rxdesc *rxd)
1477{
1478	struct mbuf		*m;
1479	bus_dma_segment_t	segs[1];
1480	bus_dmamap_t		map;
1481	int nsegs;
1482
1483	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1484	if (m == NULL)
1485		return (ENOBUFS);
1486	m->m_len = m->m_pkthdr.len = SIS_RXLEN;
1487#ifndef __NO_STRICT_ALIGNMENT
1488	m_adj(m, SIS_RX_BUF_ALIGN);
1489#endif
1490
1491	if (bus_dmamap_load_mbuf_sg(sc->sis_rx_tag, sc->sis_rx_sparemap, m,
1492	    segs, &nsegs, 0) != 0) {
1493		m_freem(m);
1494		return (ENOBUFS);
1495	}
1496	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1497
1498	if (rxd->rx_m != NULL) {
1499		bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap,
1500		    BUS_DMASYNC_POSTREAD);
1501		bus_dmamap_unload(sc->sis_rx_tag, rxd->rx_dmamap);
1502	}
1503	map = rxd->rx_dmamap;
1504	rxd->rx_dmamap = sc->sis_rx_sparemap;
1505	sc->sis_rx_sparemap = map;
1506	bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD);
1507	rxd->rx_m = m;
1508	rxd->rx_desc->sis_cmdsts = htole32(SIS_RXLEN);
1509	rxd->rx_desc->sis_ptr = htole32(SIS_ADDR_LO(segs[0].ds_addr));
1510	return (0);
1511}
1512
1513static __inline void
1514sis_discard_rxbuf(struct sis_rxdesc *rxd)
1515{
1516
1517	rxd->rx_desc->sis_cmdsts = htole32(SIS_RXLEN);
1518}
1519
1520#ifndef __NO_STRICT_ALIGNMENT
1521static __inline void
1522sis_fixup_rx(struct mbuf *m)
1523{
1524	uint16_t		*src, *dst;
1525	int			i;
1526
1527	src = mtod(m, uint16_t *);
1528	dst = src - (SIS_RX_BUF_ALIGN - ETHER_ALIGN) / sizeof(*src);
1529
1530	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1531		*dst++ = *src++;
1532
1533	m->m_data -= SIS_RX_BUF_ALIGN - ETHER_ALIGN;
1534}
1535#endif
1536
1537/*
1538 * A frame has been uploaded: pass the resulting mbuf chain up to
1539 * the higher level protocols.
1540 */
1541static int
1542sis_rxeof(struct sis_softc *sc)
1543{
1544	struct mbuf		*m;
1545	struct ifnet		*ifp;
1546	struct sis_rxdesc	*rxd;
1547	struct sis_desc		*cur_rx;
1548	int			prog, rx_cons, rx_npkts = 0, total_len;
1549	uint32_t		rxstat;
1550
1551	SIS_LOCK_ASSERT(sc);
1552
1553	bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1554	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1555
1556	rx_cons = sc->sis_rx_cons;
1557	ifp = sc->sis_ifp;
1558
1559	for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1560	    SIS_INC(rx_cons, SIS_RX_LIST_CNT), prog++) {
1561#ifdef DEVICE_POLLING
1562		if (ifp->if_capenable & IFCAP_POLLING) {
1563			if (sc->rxcycles <= 0)
1564				break;
1565			sc->rxcycles--;
1566		}
1567#endif
1568		cur_rx = &sc->sis_rx_list[rx_cons];
1569		rxstat = le32toh(cur_rx->sis_cmdsts);
1570		if ((rxstat & SIS_CMDSTS_OWN) == 0)
1571			break;
1572		rxd = &sc->sis_rxdesc[rx_cons];
1573
1574		total_len = (rxstat & SIS_CMDSTS_BUFLEN) - ETHER_CRC_LEN;
1575		if ((ifp->if_capenable & IFCAP_VLAN_MTU) != 0 &&
1576		    total_len <= (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN -
1577		    ETHER_CRC_LEN))
1578			rxstat &= ~SIS_RXSTAT_GIANT;
1579		if (SIS_RXSTAT_ERROR(rxstat) != 0) {
1580			ifp->if_ierrors++;
1581			if (rxstat & SIS_RXSTAT_COLL)
1582				ifp->if_collisions++;
1583			sis_discard_rxbuf(rxd);
1584			continue;
1585		}
1586
1587		/* Add a new receive buffer to the ring. */
1588		m = rxd->rx_m;
1589		if (sis_newbuf(sc, rxd) != 0) {
1590			ifp->if_iqdrops++;
1591			sis_discard_rxbuf(rxd);
1592			continue;
1593		}
1594
1595		/* No errors; receive the packet. */
1596		m->m_pkthdr.len = m->m_len = total_len;
1597#ifndef __NO_STRICT_ALIGNMENT
1598		/*
1599		 * On architectures without alignment problems we try to
1600		 * allocate a new buffer for the receive ring, and pass up
1601		 * the one where the packet is already, saving the expensive
1602		 * copy operation.
1603		 */
1604		sis_fixup_rx(m);
1605#endif
1606		ifp->if_ipackets++;
1607		m->m_pkthdr.rcvif = ifp;
1608
1609		SIS_UNLOCK(sc);
1610		(*ifp->if_input)(ifp, m);
1611		SIS_LOCK(sc);
1612		rx_npkts++;
1613	}
1614
1615	if (prog > 0) {
1616		sc->sis_rx_cons = rx_cons;
1617		bus_dmamap_sync(sc->sis_rx_list_tag, sc->sis_rx_list_map,
1618		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1619	}
1620
1621	return (rx_npkts);
1622}
1623
1624/*
1625 * A frame was downloaded to the chip. It's safe for us to clean up
1626 * the list buffers.
1627 */
1628
1629static void
1630sis_txeof(struct sis_softc *sc)
1631{
1632	struct ifnet		*ifp;
1633	struct sis_desc		*cur_tx;
1634	struct sis_txdesc	*txd;
1635	uint32_t		cons, txstat;
1636
1637	SIS_LOCK_ASSERT(sc);
1638
1639	cons = sc->sis_tx_cons;
1640	if (cons == sc->sis_tx_prod)
1641		return;
1642
1643	ifp = sc->sis_ifp;
1644	bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1645	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1646
1647	/*
1648	 * Go through our tx list and free mbufs for those
1649	 * frames that have been transmitted.
1650	 */
1651	for (; cons != sc->sis_tx_prod; SIS_INC(cons, SIS_TX_LIST_CNT)) {
1652		cur_tx = &sc->sis_tx_list[cons];
1653		txstat = le32toh(cur_tx->sis_cmdsts);
1654		if ((txstat & SIS_CMDSTS_OWN) != 0)
1655			break;
1656		txd = &sc->sis_txdesc[cons];
1657		if (txd->tx_m != NULL) {
1658			bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap,
1659			    BUS_DMASYNC_POSTWRITE);
1660			bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
1661			m_freem(txd->tx_m);
1662			txd->tx_m = NULL;
1663			if ((txstat & SIS_CMDSTS_PKT_OK) != 0) {
1664				ifp->if_opackets++;
1665				ifp->if_collisions +=
1666				    (txstat & SIS_TXSTAT_COLLCNT) >> 16;
1667			} else {
1668				ifp->if_oerrors++;
1669				if (txstat & SIS_TXSTAT_EXCESSCOLLS)
1670					ifp->if_collisions++;
1671				if (txstat & SIS_TXSTAT_OUTOFWINCOLL)
1672					ifp->if_collisions++;
1673			}
1674		}
1675		sc->sis_tx_cnt--;
1676		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1677	}
1678	sc->sis_tx_cons = cons;
1679	if (sc->sis_tx_cnt == 0)
1680		sc->sis_watchdog_timer = 0;
1681}
1682
1683static void
1684sis_tick(void *xsc)
1685{
1686	struct sis_softc	*sc;
1687	struct mii_data		*mii;
1688	struct ifnet		*ifp;
1689
1690	sc = xsc;
1691	SIS_LOCK_ASSERT(sc);
1692	ifp = sc->sis_ifp;
1693
1694	mii = device_get_softc(sc->sis_miibus);
1695	mii_tick(mii);
1696	sis_watchdog(sc);
1697	if (sc->sis_link == 0)
1698		sis_miibus_statchg(sc->sis_dev);
1699	callout_reset(&sc->sis_stat_ch, hz,  sis_tick, sc);
1700}
1701
1702#ifdef DEVICE_POLLING
1703static poll_handler_t sis_poll;
1704
1705static int
1706sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1707{
1708	struct	sis_softc *sc = ifp->if_softc;
1709	int rx_npkts = 0;
1710
1711	SIS_LOCK(sc);
1712	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1713		SIS_UNLOCK(sc);
1714		return (rx_npkts);
1715	}
1716
1717	/*
1718	 * On the sis, reading the status register also clears it.
1719	 * So before returning to intr mode we must make sure that all
1720	 * possible pending sources of interrupts have been served.
1721	 * In practice this means run to completion the *eof routines,
1722	 * and then call the interrupt routine
1723	 */
1724	sc->rxcycles = count;
1725	rx_npkts = sis_rxeof(sc);
1726	sis_txeof(sc);
1727	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1728		sis_startl(ifp);
1729
1730	if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1731		uint32_t	status;
1732
1733		/* Reading the ISR register clears all interrupts. */
1734		status = CSR_READ_4(sc, SIS_ISR);
1735
1736		if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW))
1737			ifp->if_ierrors++;
1738
1739		if (status & (SIS_ISR_RX_IDLE))
1740			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1741
1742		if (status & SIS_ISR_SYSERR) {
1743			sis_reset(sc);
1744			sis_initl(sc);
1745		}
1746	}
1747
1748	SIS_UNLOCK(sc);
1749	return (rx_npkts);
1750}
1751#endif /* DEVICE_POLLING */
1752
1753static void
1754sis_intr(void *arg)
1755{
1756	struct sis_softc	*sc;
1757	struct ifnet		*ifp;
1758	uint32_t		status;
1759
1760	sc = arg;
1761	ifp = sc->sis_ifp;
1762
1763	SIS_LOCK(sc);
1764#ifdef DEVICE_POLLING
1765	if (ifp->if_capenable & IFCAP_POLLING) {
1766		SIS_UNLOCK(sc);
1767		return;
1768	}
1769#endif
1770
1771	/* Reading the ISR register clears all interrupts. */
1772	status = CSR_READ_4(sc, SIS_ISR);
1773	if ((status & SIS_INTRS) == 0) {
1774		/* Not ours. */
1775		SIS_UNLOCK(sc);
1776	}
1777
1778	/* Disable interrupts. */
1779	CSR_WRITE_4(sc, SIS_IER, 0);
1780
1781	for (;(status & SIS_INTRS) != 0;) {
1782		if (status &
1783		    (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR |
1784		    SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) )
1785			sis_txeof(sc);
1786
1787		if (status & (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK |
1788		    SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE))
1789			sis_rxeof(sc);
1790
1791		if (status & SIS_ISR_RX_OFLOW)
1792			ifp->if_ierrors++;
1793
1794		if (status & (SIS_ISR_RX_IDLE))
1795			SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
1796
1797		if (status & SIS_ISR_SYSERR) {
1798			sis_reset(sc);
1799			sis_initl(sc);
1800			SIS_UNLOCK(sc);
1801			return;
1802		}
1803		status = CSR_READ_4(sc, SIS_ISR);
1804	}
1805
1806	/* Re-enable interrupts. */
1807	CSR_WRITE_4(sc, SIS_IER, 1);
1808
1809	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1810		sis_startl(ifp);
1811
1812	SIS_UNLOCK(sc);
1813}
1814
1815/*
1816 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1817 * pointers to the fragment pointers.
1818 */
1819static int
1820sis_encap(struct sis_softc *sc, struct mbuf **m_head)
1821{
1822	struct mbuf		*m;
1823	struct sis_txdesc	*txd;
1824	struct sis_desc		*f;
1825	bus_dma_segment_t	segs[SIS_MAXTXSEGS];
1826	bus_dmamap_t		map;
1827	int			error, i, frag, nsegs, prod;
1828
1829	prod = sc->sis_tx_prod;
1830	txd = &sc->sis_txdesc[prod];
1831	error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
1832	    *m_head, segs, &nsegs, 0);
1833	if (error == EFBIG) {
1834		m = m_collapse(*m_head, M_DONTWAIT, SIS_MAXTXSEGS);
1835		if (m == NULL) {
1836			m_freem(*m_head);
1837			*m_head = NULL;
1838			return (ENOBUFS);
1839		}
1840		*m_head = m;
1841		error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
1842		    *m_head, segs, &nsegs, 0);
1843		if (error != 0) {
1844			m_freem(*m_head);
1845			*m_head = NULL;
1846			return (error);
1847		}
1848	} else if (error != 0)
1849		return (error);
1850
1851	/* Check for descriptor overruns. */
1852	if (sc->sis_tx_cnt + nsegs > SIS_TX_LIST_CNT - 1) {
1853		bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
1854		return (ENOBUFS);
1855	}
1856
1857	bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE);
1858
1859	frag = prod;
1860	for (i = 0; i < nsegs; i++) {
1861		f = &sc->sis_tx_list[prod];
1862		if (i == 0)
1863			f->sis_cmdsts = htole32(segs[i].ds_len |
1864			    SIS_CMDSTS_MORE);
1865		else
1866			f->sis_cmdsts = htole32(segs[i].ds_len |
1867			    SIS_CMDSTS_OWN | SIS_CMDSTS_MORE);
1868		f->sis_ptr = htole32(SIS_ADDR_LO(segs[i].ds_addr));
1869		SIS_INC(prod, SIS_TX_LIST_CNT);
1870		sc->sis_tx_cnt++;
1871	}
1872
1873	/* Update producer index. */
1874	sc->sis_tx_prod = prod;
1875
1876	/* Remove MORE flag on the last descriptor. */
1877	prod = (prod - 1) & (SIS_TX_LIST_CNT - 1);
1878	f = &sc->sis_tx_list[prod];
1879	f->sis_cmdsts &= ~htole32(SIS_CMDSTS_MORE);
1880
1881	/* Lastly transfer ownership of packet to the controller. */
1882	f = &sc->sis_tx_list[frag];
1883	f->sis_cmdsts |= htole32(SIS_CMDSTS_OWN);
1884
1885	/* Swap the last and the first dmamaps. */
1886	map = txd->tx_dmamap;
1887	txd->tx_dmamap = sc->sis_txdesc[frag].tx_dmamap;
1888	sc->sis_txdesc[frag].tx_dmamap = map;
1889	txd->tx_m = *m_head;
1890
1891	return (0);
1892}
1893
1894static void
1895sis_start(struct ifnet *ifp)
1896{
1897	struct sis_softc	*sc;
1898
1899	sc = ifp->if_softc;
1900	SIS_LOCK(sc);
1901	sis_startl(ifp);
1902	SIS_UNLOCK(sc);
1903}
1904
1905static void
1906sis_startl(struct ifnet *ifp)
1907{
1908	struct sis_softc	*sc;
1909	struct mbuf		*m_head;
1910	int			queued;
1911
1912	sc = ifp->if_softc;
1913
1914	SIS_LOCK_ASSERT(sc);
1915
1916	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1917	    IFF_DRV_RUNNING || sc->sis_link == 0)
1918		return;
1919
1920	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1921	    sc->sis_tx_cnt < SIS_TX_LIST_CNT - 4;) {
1922		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1923		if (m_head == NULL)
1924			break;
1925
1926		if (sis_encap(sc, &m_head) != 0) {
1927			if (m_head == NULL)
1928				break;
1929			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1930			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1931			break;
1932		}
1933
1934		queued++;
1935
1936		/*
1937		 * If there's a BPF listener, bounce a copy of this frame
1938		 * to him.
1939		 */
1940		BPF_MTAP(ifp, m_head);
1941	}
1942
1943	if (queued) {
1944		/* Transmit */
1945		bus_dmamap_sync(sc->sis_tx_list_tag, sc->sis_tx_list_map,
1946		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1947		SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE);
1948
1949		/*
1950		 * Set a timeout in case the chip goes out to lunch.
1951		 */
1952		sc->sis_watchdog_timer = 5;
1953	}
1954}
1955
1956static void
1957sis_init(void *xsc)
1958{
1959	struct sis_softc	*sc = xsc;
1960
1961	SIS_LOCK(sc);
1962	sis_initl(sc);
1963	SIS_UNLOCK(sc);
1964}
1965
1966static void
1967sis_initl(struct sis_softc *sc)
1968{
1969	struct ifnet		*ifp = sc->sis_ifp;
1970	struct mii_data		*mii;
1971
1972	SIS_LOCK_ASSERT(sc);
1973
1974	/*
1975	 * Cancel pending I/O and free all RX/TX buffers.
1976	 */
1977	sis_stop(sc);
1978
1979#ifdef notyet
1980	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) {
1981		/*
1982		 * Configure 400usec of interrupt holdoff.  This is based
1983		 * on emperical tests on a Soekris 4801.
1984 		 */
1985		CSR_WRITE_4(sc, NS_IHR, 0x100 | 4);
1986	}
1987#endif
1988
1989	mii = device_get_softc(sc->sis_miibus);
1990
1991	/* Set MAC address */
1992	if (sc->sis_type == SIS_TYPE_83815) {
1993		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0);
1994		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1995		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[0]);
1996		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1);
1997		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
1998		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[1]);
1999		CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2);
2000		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
2001		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[2]);
2002	} else {
2003		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0);
2004		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
2005		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[0]);
2006		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1);
2007		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
2008		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[1]);
2009		CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2);
2010		CSR_WRITE_4(sc, SIS_RXFILT_DATA,
2011		    ((uint16_t *)IF_LLADDR(sc->sis_ifp))[2]);
2012	}
2013
2014	/* Init circular TX/RX lists. */
2015	if (sis_ring_init(sc) != 0) {
2016		device_printf(sc->sis_dev,
2017		    "initialization failed: no memory for rx buffers\n");
2018		sis_stop(sc);
2019		return;
2020	}
2021
2022	/*
2023	 * Short Cable Receive Errors (MP21.E)
2024	 * also: Page 78 of the DP83815 data sheet (september 2002 version)
2025	 * recommends the following register settings "for optimum
2026	 * performance." for rev 15C.  Set this also for 15D parts as
2027	 * they require it in practice.
2028	 */
2029	if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) {
2030		CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001);
2031		CSR_WRITE_4(sc, NS_PHY_CR, 0x189C);
2032		/* set val for c2 */
2033		CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000);
2034		/* load/kill c2 */
2035		CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040);
2036		/* rais SD off, from 4 to c */
2037		CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C);
2038		CSR_WRITE_4(sc, NS_PHY_PAGE, 0);
2039	}
2040
2041	/*
2042	 * For the NatSemi chip, we have to explicitly enable the
2043	 * reception of ARP frames, as well as turn on the 'perfect
2044	 * match' filter where we store the station address, otherwise
2045	 * we won't receive unicasts meant for this host.
2046	 */
2047	if (sc->sis_type == SIS_TYPE_83815) {
2048		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP);
2049		SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT);
2050	}
2051
2052	 /* If we want promiscuous mode, set the allframes bit. */
2053	if (ifp->if_flags & IFF_PROMISC) {
2054		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
2055	} else {
2056		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS);
2057	}
2058
2059	/*
2060	 * Set the capture broadcast bit to capture broadcast frames.
2061	 */
2062	if (ifp->if_flags & IFF_BROADCAST) {
2063		SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
2064	} else {
2065		SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD);
2066	}
2067
2068	/*
2069	 * Load the multicast filter.
2070	 */
2071	if (sc->sis_type == SIS_TYPE_83815)
2072		sis_setmulti_ns(sc);
2073	else
2074		sis_setmulti_sis(sc);
2075
2076	/* Turn the receive filter on */
2077	SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE);
2078
2079	/*
2080	 * Load the address of the RX and TX lists.
2081	 */
2082	CSR_WRITE_4(sc, SIS_RX_LISTPTR, SIS_ADDR_LO(sc->sis_rx_paddr));
2083	CSR_WRITE_4(sc, SIS_TX_LISTPTR, SIS_ADDR_LO(sc->sis_tx_paddr));
2084
2085	/* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of
2086	 * the PCI bus. When this bit is set, the Max DMA Burst Size
2087	 * for TX/RX DMA should be no larger than 16 double words.
2088	 */
2089	if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) {
2090		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64);
2091	} else {
2092		CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256);
2093	}
2094
2095	/* Accept Long Packets for VLAN support */
2096	SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER);
2097
2098	/*
2099	 * Assume 100Mbps link, actual MAC configuration is done
2100	 * after getting a valid link.
2101	 */
2102	CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100);
2103
2104	/*
2105	 * Enable interrupts.
2106	 */
2107	CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS);
2108#ifdef DEVICE_POLLING
2109	/*
2110	 * ... only enable interrupts if we are not polling, make sure
2111	 * they are off otherwise.
2112	 */
2113	if (ifp->if_capenable & IFCAP_POLLING)
2114		CSR_WRITE_4(sc, SIS_IER, 0);
2115	else
2116#endif
2117	CSR_WRITE_4(sc, SIS_IER, 1);
2118
2119	/* Clear MAC disable. */
2120	SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE | SIS_CSR_RX_DISABLE);
2121
2122	sc->sis_link = 0;
2123	mii_mediachg(mii);
2124
2125	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2126	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2127
2128	callout_reset(&sc->sis_stat_ch, hz,  sis_tick, sc);
2129}
2130
2131/*
2132 * Set media options.
2133 */
2134static int
2135sis_ifmedia_upd(struct ifnet *ifp)
2136{
2137	struct sis_softc	*sc;
2138	struct mii_data		*mii;
2139	int			error;
2140
2141	sc = ifp->if_softc;
2142
2143	SIS_LOCK(sc);
2144	mii = device_get_softc(sc->sis_miibus);
2145	sc->sis_link = 0;
2146	if (mii->mii_instance) {
2147		struct mii_softc	*miisc;
2148		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2149			mii_phy_reset(miisc);
2150	}
2151	error = mii_mediachg(mii);
2152	SIS_UNLOCK(sc);
2153
2154	return (error);
2155}
2156
2157/*
2158 * Report current media status.
2159 */
2160static void
2161sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2162{
2163	struct sis_softc	*sc;
2164	struct mii_data		*mii;
2165
2166	sc = ifp->if_softc;
2167
2168	SIS_LOCK(sc);
2169	mii = device_get_softc(sc->sis_miibus);
2170	mii_pollstat(mii);
2171	SIS_UNLOCK(sc);
2172	ifmr->ifm_active = mii->mii_media_active;
2173	ifmr->ifm_status = mii->mii_media_status;
2174}
2175
2176static int
2177sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2178{
2179	struct sis_softc	*sc = ifp->if_softc;
2180	struct ifreq		*ifr = (struct ifreq *) data;
2181	struct mii_data		*mii;
2182	int			error = 0;
2183
2184	switch (command) {
2185	case SIOCSIFFLAGS:
2186		SIS_LOCK(sc);
2187		if (ifp->if_flags & IFF_UP) {
2188			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2189			    ((ifp->if_flags ^ sc->sis_if_flags) &
2190			    (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2191				if (sc->sis_type == SIS_TYPE_83815)
2192					sis_setmulti_ns(sc);
2193				else
2194					sis_setmulti_sis(sc);
2195			} else
2196				sis_initl(sc);
2197		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2198			sis_stop(sc);
2199		}
2200		sc->sis_if_flags = ifp->if_flags;
2201		SIS_UNLOCK(sc);
2202		error = 0;
2203		break;
2204	case SIOCADDMULTI:
2205	case SIOCDELMULTI:
2206		SIS_LOCK(sc);
2207		if (sc->sis_type == SIS_TYPE_83815)
2208			sis_setmulti_ns(sc);
2209		else
2210			sis_setmulti_sis(sc);
2211		SIS_UNLOCK(sc);
2212		break;
2213	case SIOCGIFMEDIA:
2214	case SIOCSIFMEDIA:
2215		mii = device_get_softc(sc->sis_miibus);
2216		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2217		break;
2218	case SIOCSIFCAP:
2219		/* ok, disable interrupts */
2220#ifdef DEVICE_POLLING
2221		if (ifr->ifr_reqcap & IFCAP_POLLING &&
2222		    !(ifp->if_capenable & IFCAP_POLLING)) {
2223			error = ether_poll_register(sis_poll, ifp);
2224			if (error)
2225				return (error);
2226			SIS_LOCK(sc);
2227			/* Disable interrupts */
2228			CSR_WRITE_4(sc, SIS_IER, 0);
2229			ifp->if_capenable |= IFCAP_POLLING;
2230			SIS_UNLOCK(sc);
2231			return (error);
2232
2233		}
2234		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
2235		    ifp->if_capenable & IFCAP_POLLING) {
2236			error = ether_poll_deregister(ifp);
2237			/* Enable interrupts. */
2238			SIS_LOCK(sc);
2239			CSR_WRITE_4(sc, SIS_IER, 1);
2240			ifp->if_capenable &= ~IFCAP_POLLING;
2241			SIS_UNLOCK(sc);
2242			return (error);
2243		}
2244#endif /* DEVICE_POLLING */
2245		break;
2246	default:
2247		error = ether_ioctl(ifp, command, data);
2248		break;
2249	}
2250
2251	return (error);
2252}
2253
2254static void
2255sis_watchdog(struct sis_softc *sc)
2256{
2257
2258	SIS_LOCK_ASSERT(sc);
2259
2260	if (sc->sis_watchdog_timer == 0 || --sc->sis_watchdog_timer >0)
2261		return;
2262
2263	device_printf(sc->sis_dev, "watchdog timeout\n");
2264	sc->sis_ifp->if_oerrors++;
2265
2266	sis_stop(sc);
2267	sis_reset(sc);
2268	sis_initl(sc);
2269
2270	if (!IFQ_DRV_IS_EMPTY(&sc->sis_ifp->if_snd))
2271		sis_startl(sc->sis_ifp);
2272}
2273
2274/*
2275 * Stop the adapter and free any mbufs allocated to the
2276 * RX and TX lists.
2277 */
2278static void
2279sis_stop(struct sis_softc *sc)
2280{
2281	struct ifnet *ifp;
2282	struct sis_rxdesc *rxd;
2283	struct sis_txdesc *txd;
2284	int i;
2285
2286	SIS_LOCK_ASSERT(sc);
2287
2288	ifp = sc->sis_ifp;
2289	sc->sis_watchdog_timer = 0;
2290
2291	callout_stop(&sc->sis_stat_ch);
2292
2293	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2294	CSR_WRITE_4(sc, SIS_IER, 0);
2295	CSR_WRITE_4(sc, SIS_IMR, 0);
2296	CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */
2297	SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE);
2298	DELAY(1000);
2299	CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0);
2300	CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0);
2301
2302	sc->sis_link = 0;
2303
2304	/*
2305	 * Free data in the RX lists.
2306	 */
2307	for (i = 0; i < SIS_RX_LIST_CNT; i++) {
2308		rxd = &sc->sis_rxdesc[i];
2309		if (rxd->rx_m != NULL) {
2310			bus_dmamap_sync(sc->sis_rx_tag, rxd->rx_dmamap,
2311			    BUS_DMASYNC_POSTREAD);
2312			bus_dmamap_unload(sc->sis_rx_tag, rxd->rx_dmamap);
2313			m_freem(rxd->rx_m);
2314			rxd->rx_m = NULL;
2315		}
2316	}
2317
2318	/*
2319	 * Free the TX list buffers.
2320	 */
2321	for (i = 0; i < SIS_TX_LIST_CNT; i++) {
2322		txd = &sc->sis_txdesc[i];
2323		if (txd->tx_m != NULL) {
2324			bus_dmamap_sync(sc->sis_tx_tag, txd->tx_dmamap,
2325			    BUS_DMASYNC_POSTWRITE);
2326			bus_dmamap_unload(sc->sis_tx_tag, txd->tx_dmamap);
2327			m_freem(txd->tx_m);
2328			txd->tx_m = NULL;
2329		}
2330	}
2331}
2332
2333/*
2334 * Stop all chip I/O so that the kernel's probe routines don't
2335 * get confused by errant DMAs when rebooting.
2336 */
2337static int
2338sis_shutdown(device_t dev)
2339{
2340	struct sis_softc	*sc;
2341
2342	sc = device_get_softc(dev);
2343	SIS_LOCK(sc);
2344	sis_reset(sc);
2345	sis_stop(sc);
2346	SIS_UNLOCK(sc);
2347	return (0);
2348}
2349
2350static device_method_t sis_methods[] = {
2351	/* Device interface */
2352	DEVMETHOD(device_probe,		sis_probe),
2353	DEVMETHOD(device_attach,	sis_attach),
2354	DEVMETHOD(device_detach,	sis_detach),
2355	DEVMETHOD(device_shutdown,	sis_shutdown),
2356
2357	/* bus interface */
2358	DEVMETHOD(bus_print_child,	bus_generic_print_child),
2359	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
2360
2361	/* MII interface */
2362	DEVMETHOD(miibus_readreg,	sis_miibus_readreg),
2363	DEVMETHOD(miibus_writereg,	sis_miibus_writereg),
2364	DEVMETHOD(miibus_statchg,	sis_miibus_statchg),
2365
2366	{ 0, 0 }
2367};
2368
2369static driver_t sis_driver = {
2370	"sis",
2371	sis_methods,
2372	sizeof(struct sis_softc)
2373};
2374
2375static devclass_t sis_devclass;
2376
2377DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0);
2378DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0);
2379