if_ste.c revision 149646
1/*-
2 * Copyright (c) 1997, 1998, 1999
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/pci/if_ste.c 149646 2005-08-30 20:35:08Z jhb $");
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/sockio.h>
39#include <sys/mbuf.h>
40#include <sys/malloc.h>
41#include <sys/kernel.h>
42#include <sys/module.h>
43#include <sys/socket.h>
44#include <sys/sysctl.h>
45
46#include <net/if.h>
47#include <net/if_arp.h>
48#include <net/ethernet.h>
49#include <net/if_dl.h>
50#include <net/if_media.h>
51#include <net/if_types.h>
52#include <net/if_vlan_var.h>
53
54#include <net/bpf.h>
55
56#include <vm/vm.h>              /* for vtophys */
57#include <vm/pmap.h>            /* for vtophys */
58#include <machine/bus.h>
59#include <machine/resource.h>
60#include <sys/bus.h>
61#include <sys/rman.h>
62
63#include <dev/mii/mii.h>
64#include <dev/mii/miivar.h>
65
66#include <dev/pci/pcireg.h>
67#include <dev/pci/pcivar.h>
68
69/* "controller miibus0" required.  See GENERIC if you get errors here. */
70#include "miibus_if.h"
71
72#define STE_USEIOSPACE
73
74#include <pci/if_stereg.h>
75
76MODULE_DEPEND(ste, pci, 1, 1, 1);
77MODULE_DEPEND(ste, ether, 1, 1, 1);
78MODULE_DEPEND(ste, miibus, 1, 1, 1);
79
80/*
81 * Various supported device vendors/types and their names.
82 */
83static struct ste_type ste_devs[] = {
84	{ ST_VENDORID, ST_DEVICEID_ST201, "Sundance ST201 10/100BaseTX" },
85	{ DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" },
86	{ 0, 0, NULL }
87};
88
89static int ste_probe(device_t);
90static int ste_attach(device_t);
91static int ste_detach(device_t);
92static void ste_init(void *);
93static void ste_init_locked(struct ste_softc *);
94static void ste_intr(void *);
95static void ste_rxeoc(struct ste_softc *);
96static void ste_rxeof(struct ste_softc *);
97static void ste_txeoc(struct ste_softc *);
98static void ste_txeof(struct ste_softc *);
99static void ste_stats_update(void *);
100static void ste_stop(struct ste_softc *);
101static void ste_reset(struct ste_softc *);
102static int ste_ioctl(struct ifnet *, u_long, caddr_t);
103static int ste_encap(struct ste_softc *, struct ste_chain *, struct mbuf *);
104static void ste_start(struct ifnet *);
105static void ste_start_locked(struct ifnet *);
106static void ste_watchdog(struct ifnet *);
107static void ste_shutdown(device_t);
108static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *,
109		struct mbuf *);
110static int ste_ifmedia_upd(struct ifnet *);
111static void ste_ifmedia_upd_locked(struct ifnet *);
112static void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
113
114static void ste_mii_sync(struct ste_softc *);
115static void ste_mii_send(struct ste_softc *, u_int32_t, int);
116static int ste_mii_readreg(struct ste_softc *, struct ste_mii_frame *);
117static int ste_mii_writereg(struct ste_softc *, struct ste_mii_frame *);
118static int ste_miibus_readreg(device_t, int, int);
119static int ste_miibus_writereg(device_t, int, int, int);
120static void ste_miibus_statchg(device_t);
121
122static int ste_eeprom_wait(struct ste_softc *);
123static int ste_read_eeprom(struct ste_softc *, caddr_t, int, int, int);
124static void ste_wait(struct ste_softc *);
125static void ste_setmulti(struct ste_softc *);
126static int ste_init_rx_list(struct ste_softc *);
127static void ste_init_tx_list(struct ste_softc *);
128
129#ifdef STE_USEIOSPACE
130#define STE_RES			SYS_RES_IOPORT
131#define STE_RID			STE_PCI_LOIO
132#else
133#define STE_RES			SYS_RES_MEMORY
134#define STE_RID			STE_PCI_LOMEM
135#endif
136
137static device_method_t ste_methods[] = {
138	/* Device interface */
139	DEVMETHOD(device_probe,		ste_probe),
140	DEVMETHOD(device_attach,	ste_attach),
141	DEVMETHOD(device_detach,	ste_detach),
142	DEVMETHOD(device_shutdown,	ste_shutdown),
143
144	/* bus interface */
145	DEVMETHOD(bus_print_child,	bus_generic_print_child),
146	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
147
148	/* MII interface */
149	DEVMETHOD(miibus_readreg,	ste_miibus_readreg),
150	DEVMETHOD(miibus_writereg,	ste_miibus_writereg),
151	DEVMETHOD(miibus_statchg,	ste_miibus_statchg),
152
153	{ 0, 0 }
154};
155
156static driver_t ste_driver = {
157	"ste",
158	ste_methods,
159	sizeof(struct ste_softc)
160};
161
162static devclass_t ste_devclass;
163
164DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0);
165DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0);
166
167SYSCTL_NODE(_hw, OID_AUTO, ste, CTLFLAG_RD, 0, "if_ste parameters");
168
169static int ste_rxsyncs;
170SYSCTL_INT(_hw_ste, OID_AUTO, rxsyncs, CTLFLAG_RW, &ste_rxsyncs, 0, "");
171
172#define STE_SETBIT4(sc, reg, x)				\
173	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
174
175#define STE_CLRBIT4(sc, reg, x)				\
176	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
177
178#define STE_SETBIT2(sc, reg, x)				\
179	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x))
180
181#define STE_CLRBIT2(sc, reg, x)				\
182	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x))
183
184#define STE_SETBIT1(sc, reg, x)				\
185	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x))
186
187#define STE_CLRBIT1(sc, reg, x)				\
188	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x))
189
190
191#define MII_SET(x)		STE_SETBIT1(sc, STE_PHYCTL, x)
192#define MII_CLR(x)		STE_CLRBIT1(sc, STE_PHYCTL, x)
193
194/*
195 * Sync the PHYs by setting data bit and strobing the clock 32 times.
196 */
197static void
198ste_mii_sync(sc)
199	struct ste_softc		*sc;
200{
201	register int		i;
202
203	MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
204
205	for (i = 0; i < 32; i++) {
206		MII_SET(STE_PHYCTL_MCLK);
207		DELAY(1);
208		MII_CLR(STE_PHYCTL_MCLK);
209		DELAY(1);
210	}
211
212	return;
213}
214
215/*
216 * Clock a series of bits through the MII.
217 */
218static void
219ste_mii_send(sc, bits, cnt)
220	struct ste_softc		*sc;
221	u_int32_t		bits;
222	int			cnt;
223{
224	int			i;
225
226	MII_CLR(STE_PHYCTL_MCLK);
227
228	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
229		if (bits & i) {
230			MII_SET(STE_PHYCTL_MDATA);
231                } else {
232			MII_CLR(STE_PHYCTL_MDATA);
233                }
234		DELAY(1);
235		MII_CLR(STE_PHYCTL_MCLK);
236		DELAY(1);
237		MII_SET(STE_PHYCTL_MCLK);
238	}
239}
240
241/*
242 * Read an PHY register through the MII.
243 */
244static int
245ste_mii_readreg(sc, frame)
246	struct ste_softc		*sc;
247	struct ste_mii_frame	*frame;
248
249{
250	int			i, ack;
251
252	/*
253	 * Set up frame for RX.
254	 */
255	frame->mii_stdelim = STE_MII_STARTDELIM;
256	frame->mii_opcode = STE_MII_READOP;
257	frame->mii_turnaround = 0;
258	frame->mii_data = 0;
259
260	CSR_WRITE_2(sc, STE_PHYCTL, 0);
261	/*
262 	 * Turn on data xmit.
263	 */
264	MII_SET(STE_PHYCTL_MDIR);
265
266	ste_mii_sync(sc);
267
268	/*
269	 * Send command/address info.
270	 */
271	ste_mii_send(sc, frame->mii_stdelim, 2);
272	ste_mii_send(sc, frame->mii_opcode, 2);
273	ste_mii_send(sc, frame->mii_phyaddr, 5);
274	ste_mii_send(sc, frame->mii_regaddr, 5);
275
276	/* Turn off xmit. */
277	MII_CLR(STE_PHYCTL_MDIR);
278
279	/* Idle bit */
280	MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
281	DELAY(1);
282	MII_SET(STE_PHYCTL_MCLK);
283	DELAY(1);
284
285	/* Check for ack */
286	MII_CLR(STE_PHYCTL_MCLK);
287	DELAY(1);
288	ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
289	MII_SET(STE_PHYCTL_MCLK);
290	DELAY(1);
291
292	/*
293	 * Now try reading data bits. If the ack failed, we still
294	 * need to clock through 16 cycles to keep the PHY(s) in sync.
295	 */
296	if (ack) {
297		for(i = 0; i < 16; i++) {
298			MII_CLR(STE_PHYCTL_MCLK);
299			DELAY(1);
300			MII_SET(STE_PHYCTL_MCLK);
301			DELAY(1);
302		}
303		goto fail;
304	}
305
306	for (i = 0x8000; i; i >>= 1) {
307		MII_CLR(STE_PHYCTL_MCLK);
308		DELAY(1);
309		if (!ack) {
310			if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
311				frame->mii_data |= i;
312			DELAY(1);
313		}
314		MII_SET(STE_PHYCTL_MCLK);
315		DELAY(1);
316	}
317
318fail:
319
320	MII_CLR(STE_PHYCTL_MCLK);
321	DELAY(1);
322	MII_SET(STE_PHYCTL_MCLK);
323	DELAY(1);
324
325	if (ack)
326		return(1);
327	return(0);
328}
329
330/*
331 * Write to a PHY register through the MII.
332 */
333static int
334ste_mii_writereg(sc, frame)
335	struct ste_softc		*sc;
336	struct ste_mii_frame	*frame;
337
338{
339
340	/*
341	 * Set up frame for TX.
342	 */
343
344	frame->mii_stdelim = STE_MII_STARTDELIM;
345	frame->mii_opcode = STE_MII_WRITEOP;
346	frame->mii_turnaround = STE_MII_TURNAROUND;
347
348	/*
349 	 * Turn on data output.
350	 */
351	MII_SET(STE_PHYCTL_MDIR);
352
353	ste_mii_sync(sc);
354
355	ste_mii_send(sc, frame->mii_stdelim, 2);
356	ste_mii_send(sc, frame->mii_opcode, 2);
357	ste_mii_send(sc, frame->mii_phyaddr, 5);
358	ste_mii_send(sc, frame->mii_regaddr, 5);
359	ste_mii_send(sc, frame->mii_turnaround, 2);
360	ste_mii_send(sc, frame->mii_data, 16);
361
362	/* Idle bit. */
363	MII_SET(STE_PHYCTL_MCLK);
364	DELAY(1);
365	MII_CLR(STE_PHYCTL_MCLK);
366	DELAY(1);
367
368	/*
369	 * Turn off xmit.
370	 */
371	MII_CLR(STE_PHYCTL_MDIR);
372
373	return(0);
374}
375
376static int
377ste_miibus_readreg(dev, phy, reg)
378	device_t		dev;
379	int			phy, reg;
380{
381	struct ste_softc	*sc;
382	struct ste_mii_frame	frame;
383
384	sc = device_get_softc(dev);
385
386	if ( sc->ste_one_phy && phy != 0 )
387		return (0);
388
389	bzero((char *)&frame, sizeof(frame));
390
391	frame.mii_phyaddr = phy;
392	frame.mii_regaddr = reg;
393	ste_mii_readreg(sc, &frame);
394
395	return(frame.mii_data);
396}
397
398static int
399ste_miibus_writereg(dev, phy, reg, data)
400	device_t		dev;
401	int			phy, reg, data;
402{
403	struct ste_softc	*sc;
404	struct ste_mii_frame	frame;
405
406	sc = device_get_softc(dev);
407	bzero((char *)&frame, sizeof(frame));
408
409	frame.mii_phyaddr = phy;
410	frame.mii_regaddr = reg;
411	frame.mii_data = data;
412
413	ste_mii_writereg(sc, &frame);
414
415	return(0);
416}
417
418static void
419ste_miibus_statchg(dev)
420	device_t		dev;
421{
422	struct ste_softc	*sc;
423	struct mii_data		*mii;
424
425	sc = device_get_softc(dev);
426
427	mii = device_get_softc(sc->ste_miibus);
428
429	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
430		STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
431	} else {
432		STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
433	}
434
435	return;
436}
437
438static int
439ste_ifmedia_upd(ifp)
440	struct ifnet		*ifp;
441{
442	struct ste_softc	*sc;
443
444	sc = ifp->if_softc;
445	STE_LOCK(sc);
446	ste_ifmedia_upd_locked(ifp);
447	STE_UNLOCK(sc);
448
449	return(0);
450}
451
452static void
453ste_ifmedia_upd_locked(ifp)
454	struct ifnet		*ifp;
455{
456	struct ste_softc	*sc;
457	struct mii_data		*mii;
458
459	sc = ifp->if_softc;
460	STE_LOCK_ASSERT(sc);
461	mii = device_get_softc(sc->ste_miibus);
462	sc->ste_link = 0;
463	if (mii->mii_instance) {
464		struct mii_softc	*miisc;
465		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
466			mii_phy_reset(miisc);
467	}
468	mii_mediachg(mii);
469}
470
471static void
472ste_ifmedia_sts(ifp, ifmr)
473	struct ifnet		*ifp;
474	struct ifmediareq	*ifmr;
475{
476	struct ste_softc	*sc;
477	struct mii_data		*mii;
478
479	sc = ifp->if_softc;
480	mii = device_get_softc(sc->ste_miibus);
481
482	STE_LOCK(sc);
483	mii_pollstat(mii);
484	ifmr->ifm_active = mii->mii_media_active;
485	ifmr->ifm_status = mii->mii_media_status;
486	STE_UNLOCK(sc);
487
488	return;
489}
490
491static void
492ste_wait(sc)
493	struct ste_softc		*sc;
494{
495	register int		i;
496
497	for (i = 0; i < STE_TIMEOUT; i++) {
498		if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
499			break;
500	}
501
502	if (i == STE_TIMEOUT)
503		if_printf(sc->ste_ifp, "command never completed!\n");
504
505	return;
506}
507
508/*
509 * The EEPROM is slow: give it time to come ready after issuing
510 * it a command.
511 */
512static int
513ste_eeprom_wait(sc)
514	struct ste_softc		*sc;
515{
516	int			i;
517
518	DELAY(1000);
519
520	for (i = 0; i < 100; i++) {
521		if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
522			DELAY(1000);
523		else
524			break;
525	}
526
527	if (i == 100) {
528		if_printf(sc->ste_ifp, "eeprom failed to come ready\n");
529		return(1);
530	}
531
532	return(0);
533}
534
535/*
536 * Read a sequence of words from the EEPROM. Note that ethernet address
537 * data is stored in the EEPROM in network byte order.
538 */
539static int
540ste_read_eeprom(sc, dest, off, cnt, swap)
541	struct ste_softc		*sc;
542	caddr_t			dest;
543	int			off;
544	int			cnt;
545	int			swap;
546{
547	int			err = 0, i;
548	u_int16_t		word = 0, *ptr;
549
550	if (ste_eeprom_wait(sc))
551		return(1);
552
553	for (i = 0; i < cnt; i++) {
554		CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
555		err = ste_eeprom_wait(sc);
556		if (err)
557			break;
558		word = CSR_READ_2(sc, STE_EEPROM_DATA);
559		ptr = (u_int16_t *)(dest + (i * 2));
560		if (swap)
561			*ptr = ntohs(word);
562		else
563			*ptr = word;
564	}
565
566	return(err ? 1 : 0);
567}
568
569static void
570ste_setmulti(sc)
571	struct ste_softc	*sc;
572{
573	struct ifnet		*ifp;
574	int			h = 0;
575	u_int32_t		hashes[2] = { 0, 0 };
576	struct ifmultiaddr	*ifma;
577
578	ifp = sc->ste_ifp;
579	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
580		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI);
581		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH);
582		return;
583	}
584
585	/* first, zot all the existing hash bits */
586	CSR_WRITE_2(sc, STE_MAR0, 0);
587	CSR_WRITE_2(sc, STE_MAR1, 0);
588	CSR_WRITE_2(sc, STE_MAR2, 0);
589	CSR_WRITE_2(sc, STE_MAR3, 0);
590
591	/* now program new ones */
592	IF_ADDR_LOCK(ifp);
593	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
594		if (ifma->ifma_addr->sa_family != AF_LINK)
595			continue;
596		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
597		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F;
598		if (h < 32)
599			hashes[0] |= (1 << h);
600		else
601			hashes[1] |= (1 << (h - 32));
602	}
603	IF_ADDR_UNLOCK(ifp);
604
605	CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
606	CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
607	CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
608	CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
609	STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI);
610	STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH);
611
612	return;
613}
614
615#ifdef DEVICE_POLLING
616static poll_handler_t ste_poll, ste_poll_locked;
617
618static void
619ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
620{
621	struct ste_softc *sc = ifp->if_softc;
622
623	STE_LOCK(sc);
624	ste_poll_locked(ifp, cmd, count);
625	STE_UNLOCK(sc);
626}
627
628static void
629ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
630{
631	struct ste_softc *sc = ifp->if_softc;
632
633	STE_LOCK_ASSERT(sc);
634	if (!(ifp->if_capenable & IFCAP_POLLING)) {
635		ether_poll_deregister(ifp);
636		cmd = POLL_DEREGISTER;
637	}
638	if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
639		CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
640		return;
641	}
642
643	sc->rxcycles = count;
644	if (cmd == POLL_AND_CHECK_STATUS)
645		ste_rxeoc(sc);
646	ste_rxeof(sc);
647	ste_txeof(sc);
648	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
649		ste_start_locked(ifp);
650
651	if (cmd == POLL_AND_CHECK_STATUS) {
652		u_int16_t status;
653
654		status = CSR_READ_2(sc, STE_ISR_ACK);
655
656		if (status & STE_ISR_TX_DONE)
657			ste_txeoc(sc);
658
659		if (status & STE_ISR_STATS_OFLOW) {
660			callout_stop(&sc->ste_stat_callout);
661			ste_stats_update(sc);
662		}
663
664		if (status & STE_ISR_LINKEVENT)
665			mii_pollstat(device_get_softc(sc->ste_miibus));
666
667		if (status & STE_ISR_HOSTERR) {
668			ste_reset(sc);
669			ste_init_locked(sc);
670		}
671	}
672}
673#endif /* DEVICE_POLLING */
674
675static void
676ste_intr(xsc)
677	void			*xsc;
678{
679	struct ste_softc	*sc;
680	struct ifnet		*ifp;
681	u_int16_t		status;
682
683	sc = xsc;
684	STE_LOCK(sc);
685	ifp = sc->ste_ifp;
686
687#ifdef DEVICE_POLLING
688	if (ifp->if_flags & IFF_POLLING)
689		goto done;
690	if ((ifp->if_capenable & IFCAP_POLLING) &&
691	    ether_poll_register(ste_poll, ifp)) { /* ok, disable interrupts */
692		CSR_WRITE_2(sc, STE_IMR, 0);
693		ste_poll_locked(ifp, 0, 1);
694		goto done;
695	}
696#endif /* DEVICE_POLLING */
697
698	/* See if this is really our interrupt. */
699	if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) {
700		STE_UNLOCK(sc);
701		return;
702	}
703
704	for (;;) {
705		status = CSR_READ_2(sc, STE_ISR_ACK);
706
707		if (!(status & STE_INTRS))
708			break;
709
710		if (status & STE_ISR_RX_DMADONE) {
711			ste_rxeoc(sc);
712			ste_rxeof(sc);
713		}
714
715		if (status & STE_ISR_TX_DMADONE)
716			ste_txeof(sc);
717
718		if (status & STE_ISR_TX_DONE)
719			ste_txeoc(sc);
720
721		if (status & STE_ISR_STATS_OFLOW) {
722			callout_stop(&sc->ste_stat_callout);
723			ste_stats_update(sc);
724		}
725
726		if (status & STE_ISR_LINKEVENT)
727			mii_pollstat(device_get_softc(sc->ste_miibus));
728
729
730		if (status & STE_ISR_HOSTERR) {
731			ste_reset(sc);
732			ste_init_locked(sc);
733		}
734	}
735
736	/* Re-enable interrupts */
737	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
738
739	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
740		ste_start_locked(ifp);
741
742#ifdef DEVICE_POLLING
743done:
744#endif /* DEVICE_POLLING */
745	STE_UNLOCK(sc);
746
747	return;
748}
749
750static void
751ste_rxeoc(struct ste_softc *sc)
752{
753	struct ste_chain_onefrag *cur_rx;
754
755	STE_LOCK_ASSERT(sc);
756
757	if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
758		cur_rx = sc->ste_cdata.ste_rx_head;
759		do {
760			cur_rx = cur_rx->ste_next;
761			/* If the ring is empty, just return. */
762			if (cur_rx == sc->ste_cdata.ste_rx_head)
763				return;
764		} while (cur_rx->ste_ptr->ste_status == 0);
765		if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
766			/* We've fallen behind the chip: catch it. */
767			sc->ste_cdata.ste_rx_head = cur_rx;
768			++ste_rxsyncs;
769		}
770	}
771}
772
773/*
774 * A frame has been uploaded: pass the resulting mbuf chain up to
775 * the higher level protocols.
776 */
777static void
778ste_rxeof(sc)
779	struct ste_softc		*sc;
780{
781        struct mbuf		*m;
782        struct ifnet		*ifp;
783	struct ste_chain_onefrag	*cur_rx;
784	int			total_len = 0, count=0;
785	u_int32_t		rxstat;
786
787	STE_LOCK_ASSERT(sc);
788
789	ifp = sc->ste_ifp;
790
791	while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
792	      & STE_RXSTAT_DMADONE) {
793#ifdef DEVICE_POLLING
794		if (ifp->if_flags & IFF_POLLING) {
795			if (sc->rxcycles <= 0)
796				break;
797			sc->rxcycles--;
798		}
799#endif /* DEVICE_POLLING */
800		if ((STE_RX_LIST_CNT - count) < 3) {
801			break;
802		}
803
804		cur_rx = sc->ste_cdata.ste_rx_head;
805		sc->ste_cdata.ste_rx_head = cur_rx->ste_next;
806
807		/*
808		 * If an error occurs, update stats, clear the
809		 * status word and leave the mbuf cluster in place:
810		 * it should simply get re-used next time this descriptor
811	 	 * comes up in the ring.
812		 */
813		if (rxstat & STE_RXSTAT_FRAME_ERR) {
814			ifp->if_ierrors++;
815			cur_rx->ste_ptr->ste_status = 0;
816			continue;
817		}
818
819		/*
820		 * If there error bit was not set, the upload complete
821		 * bit should be set which means we have a valid packet.
822		 * If not, something truly strange has happened.
823		 */
824		if (!(rxstat & STE_RXSTAT_DMADONE)) {
825			if_printf(ifp,
826			    "bad receive status -- packet dropped\n");
827			ifp->if_ierrors++;
828			cur_rx->ste_ptr->ste_status = 0;
829			continue;
830		}
831
832		/* No errors; receive the packet. */
833		m = cur_rx->ste_mbuf;
834		total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;
835
836		/*
837		 * Try to conjure up a new mbuf cluster. If that
838		 * fails, it means we have an out of memory condition and
839		 * should leave the buffer in place and continue. This will
840		 * result in a lost packet, but there's little else we
841		 * can do in this situation.
842		 */
843		if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
844			ifp->if_ierrors++;
845			cur_rx->ste_ptr->ste_status = 0;
846			continue;
847		}
848
849		m->m_pkthdr.rcvif = ifp;
850		m->m_pkthdr.len = m->m_len = total_len;
851
852		ifp->if_ipackets++;
853		STE_UNLOCK(sc);
854		(*ifp->if_input)(ifp, m);
855		STE_LOCK(sc);
856
857		cur_rx->ste_ptr->ste_status = 0;
858		count++;
859	}
860
861	return;
862}
863
864static void
865ste_txeoc(sc)
866	struct ste_softc	*sc;
867{
868	u_int8_t		txstat;
869	struct ifnet		*ifp;
870
871	ifp = sc->ste_ifp;
872
873	while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
874	    STE_TXSTATUS_TXDONE) {
875		if (txstat & STE_TXSTATUS_UNDERRUN ||
876		    txstat & STE_TXSTATUS_EXCESSCOLLS ||
877		    txstat & STE_TXSTATUS_RECLAIMERR) {
878			ifp->if_oerrors++;
879			if_printf(ifp, "transmission error: %x\n", txstat);
880
881			ste_reset(sc);
882			ste_init_locked(sc);
883
884			if (txstat & STE_TXSTATUS_UNDERRUN &&
885			    sc->ste_tx_thresh < STE_PACKET_SIZE) {
886				sc->ste_tx_thresh += STE_MIN_FRAMELEN;
887				if_printf(ifp, "tx underrun, increasing tx"
888				    " start threshold to %d bytes\n",
889				    sc->ste_tx_thresh);
890			}
891			CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
892			CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
893			    (STE_PACKET_SIZE >> 4));
894		}
895		ste_init_locked(sc);
896		CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
897	}
898
899	return;
900}
901
902static void
903ste_txeof(sc)
904	struct ste_softc	*sc;
905{
906	struct ste_chain	*cur_tx;
907	struct ifnet		*ifp;
908	int			idx;
909
910	ifp = sc->ste_ifp;
911
912	idx = sc->ste_cdata.ste_tx_cons;
913	while(idx != sc->ste_cdata.ste_tx_prod) {
914		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
915
916		if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE))
917			break;
918
919		m_freem(cur_tx->ste_mbuf);
920		cur_tx->ste_mbuf = NULL;
921		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
922		ifp->if_opackets++;
923
924		STE_INC(idx, STE_TX_LIST_CNT);
925	}
926
927	sc->ste_cdata.ste_tx_cons = idx;
928	if (idx == sc->ste_cdata.ste_tx_prod)
929		ifp->if_timer = 0;
930}
931
932static void
933ste_stats_update(xsc)
934	void			*xsc;
935{
936	struct ste_softc	*sc;
937	struct ifnet		*ifp;
938	struct mii_data		*mii;
939
940	sc = xsc;
941	STE_LOCK_ASSERT(sc);
942
943	ifp = sc->ste_ifp;
944	mii = device_get_softc(sc->ste_miibus);
945
946	ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS)
947	    + CSR_READ_1(sc, STE_MULTI_COLLS)
948	    + CSR_READ_1(sc, STE_SINGLE_COLLS);
949
950	if (!sc->ste_link) {
951		mii_pollstat(mii);
952		if (mii->mii_media_status & IFM_ACTIVE &&
953		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
954			sc->ste_link++;
955			/*
956			* we don't get a call-back on re-init so do it
957			* otherwise we get stuck in the wrong link state
958			*/
959			ste_miibus_statchg(sc->ste_dev);
960			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
961				ste_start_locked(ifp);
962		}
963	}
964
965	callout_reset(&sc->ste_stat_callout, hz, ste_stats_update, sc);
966
967	return;
968}
969
970
971/*
972 * Probe for a Sundance ST201 chip. Check the PCI vendor and device
973 * IDs against our list and return a device name if we find a match.
974 */
975static int
976ste_probe(dev)
977	device_t		dev;
978{
979	struct ste_type		*t;
980
981	t = ste_devs;
982
983	while(t->ste_name != NULL) {
984		if ((pci_get_vendor(dev) == t->ste_vid) &&
985		    (pci_get_device(dev) == t->ste_did)) {
986			device_set_desc(dev, t->ste_name);
987			return (BUS_PROBE_DEFAULT);
988		}
989		t++;
990	}
991
992	return(ENXIO);
993}
994
995/*
996 * Attach the interface. Allocate softc structures, do ifmedia
997 * setup and ethernet/BPF attach.
998 */
999static int
1000ste_attach(dev)
1001	device_t		dev;
1002{
1003	struct ste_softc	*sc;
1004	struct ifnet		*ifp;
1005	int			error = 0, rid;
1006	u_char			eaddr[6];
1007
1008	sc = device_get_softc(dev);
1009	sc->ste_dev = dev;
1010
1011	/*
1012	 * Only use one PHY since this chip reports multiple
1013	 * Note on the DFE-550 the PHY is at 1 on the DFE-580
1014	 * it is at 0 & 1.  It is rev 0x12.
1015	 */
1016	if (pci_get_vendor(dev) == DL_VENDORID &&
1017	    pci_get_device(dev) == DL_DEVICEID_DL10050 &&
1018	    pci_get_revid(dev) == 0x12 )
1019		sc->ste_one_phy = 1;
1020
1021	mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1022	    MTX_DEF);
1023	/*
1024	 * Map control/status registers.
1025	 */
1026	pci_enable_busmaster(dev);
1027
1028	rid = STE_RID;
1029	sc->ste_res = bus_alloc_resource_any(dev, STE_RES, &rid, RF_ACTIVE);
1030
1031	if (sc->ste_res == NULL) {
1032		device_printf(dev, "couldn't map ports/memory\n");
1033		error = ENXIO;
1034		goto fail;
1035	}
1036
1037	sc->ste_btag = rman_get_bustag(sc->ste_res);
1038	sc->ste_bhandle = rman_get_bushandle(sc->ste_res);
1039
1040	/* Allocate interrupt */
1041	rid = 0;
1042	sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1043	    RF_SHAREABLE | RF_ACTIVE);
1044
1045	if (sc->ste_irq == NULL) {
1046		device_printf(dev, "couldn't map interrupt\n");
1047		error = ENXIO;
1048		goto fail;
1049	}
1050
1051	callout_init_mtx(&sc->ste_stat_callout, &sc->ste_mtx, 0);
1052
1053	/* Reset the adapter. */
1054	ste_reset(sc);
1055
1056	/*
1057	 * Get station address from the EEPROM.
1058	 */
1059	if (ste_read_eeprom(sc, eaddr,
1060	    STE_EEADDR_NODE0, 3, 0)) {
1061		device_printf(dev, "failed to read station address\n");
1062		error = ENXIO;;
1063		goto fail;
1064	}
1065
1066	/* Allocate the descriptor queues. */
1067	sc->ste_ldata = contigmalloc(sizeof(struct ste_list_data), M_DEVBUF,
1068	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1069
1070	if (sc->ste_ldata == NULL) {
1071		device_printf(dev, "no memory for list buffers!\n");
1072		error = ENXIO;
1073		goto fail;
1074	}
1075
1076	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1077
1078	ifp = sc->ste_ifp = if_alloc(IFT_ETHER);
1079	if (ifp == NULL) {
1080		device_printf(dev, "can not if_alloc()\n");
1081		error = ENOSPC;
1082		goto fail;
1083	}
1084
1085	/* Do MII setup. */
1086	if (mii_phy_probe(dev, &sc->ste_miibus,
1087	    ste_ifmedia_upd, ste_ifmedia_sts)) {
1088		device_printf(dev, "MII without any phy!\n");
1089		error = ENXIO;
1090		goto fail;
1091	}
1092
1093	ifp->if_softc = sc;
1094	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1095	ifp->if_mtu = ETHERMTU;
1096	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1097	ifp->if_ioctl = ste_ioctl;
1098	ifp->if_start = ste_start;
1099	ifp->if_watchdog = ste_watchdog;
1100	ifp->if_init = ste_init;
1101	ifp->if_baudrate = 10000000;
1102	IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1);
1103	ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1;
1104	IFQ_SET_READY(&ifp->if_snd);
1105
1106	sc->ste_tx_thresh = STE_TXSTART_THRESH;
1107
1108	/*
1109	 * Call MI attach routine.
1110	 */
1111	ether_ifattach(ifp, eaddr);
1112
1113	/*
1114	 * Tell the upper layer(s) we support long frames.
1115	 */
1116	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1117	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1118#ifdef DEVICE_POLLING
1119	ifp->if_capabilities |= IFCAP_POLLING;
1120#endif
1121	ifp->if_capenable = ifp->if_capabilities;
1122
1123	/* Hook interrupt last to avoid having to lock softc */
1124	error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE,
1125	    ste_intr, sc, &sc->ste_intrhand);
1126
1127	if (error) {
1128		device_printf(dev, "couldn't set up irq\n");
1129		ether_ifdetach(ifp);
1130		if_free(ifp);
1131		goto fail;
1132	}
1133
1134fail:
1135	if (error)
1136		ste_detach(dev);
1137
1138	return(error);
1139}
1140
1141/*
1142 * Shutdown hardware and free up resources. This can be called any
1143 * time after the mutex has been initialized. It is called in both
1144 * the error case in attach and the normal detach case so it needs
1145 * to be careful about only freeing resources that have actually been
1146 * allocated.
1147 */
1148static int
1149ste_detach(dev)
1150	device_t		dev;
1151{
1152	struct ste_softc	*sc;
1153	struct ifnet		*ifp;
1154
1155	sc = device_get_softc(dev);
1156	KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized"));
1157	ifp = sc->ste_ifp;
1158
1159	/* These should only be active if attach succeeded */
1160	if (device_is_attached(dev)) {
1161		STE_LOCK(sc);
1162		ste_stop(sc);
1163		STE_UNLOCK(sc);
1164		callout_drain(&sc->ste_stat_callout);
1165		ether_ifdetach(ifp);
1166		if_free(ifp);
1167	}
1168	if (sc->ste_miibus)
1169		device_delete_child(dev, sc->ste_miibus);
1170	bus_generic_detach(dev);
1171
1172	if (sc->ste_intrhand)
1173		bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand);
1174	if (sc->ste_irq)
1175		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq);
1176	if (sc->ste_res)
1177		bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res);
1178
1179	if (sc->ste_ldata) {
1180		contigfree(sc->ste_ldata, sizeof(struct ste_list_data),
1181		    M_DEVBUF);
1182	}
1183
1184	mtx_destroy(&sc->ste_mtx);
1185
1186	return(0);
1187}
1188
1189static int
1190ste_newbuf(sc, c, m)
1191	struct ste_softc	*sc;
1192	struct ste_chain_onefrag	*c;
1193	struct mbuf		*m;
1194{
1195	struct mbuf		*m_new = NULL;
1196
1197	if (m == NULL) {
1198		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1199		if (m_new == NULL)
1200			return(ENOBUFS);
1201		MCLGET(m_new, M_DONTWAIT);
1202		if (!(m_new->m_flags & M_EXT)) {
1203			m_freem(m_new);
1204			return(ENOBUFS);
1205		}
1206		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1207	} else {
1208		m_new = m;
1209		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1210		m_new->m_data = m_new->m_ext.ext_buf;
1211	}
1212
1213	m_adj(m_new, ETHER_ALIGN);
1214
1215	c->ste_mbuf = m_new;
1216	c->ste_ptr->ste_status = 0;
1217	c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, caddr_t));
1218	c->ste_ptr->ste_frag.ste_len = (1536 + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST;
1219
1220	return(0);
1221}
1222
1223static int
1224ste_init_rx_list(sc)
1225	struct ste_softc	*sc;
1226{
1227	struct ste_chain_data	*cd;
1228	struct ste_list_data	*ld;
1229	int			i;
1230
1231	cd = &sc->ste_cdata;
1232	ld = sc->ste_ldata;
1233
1234	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1235		cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
1236		if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS)
1237			return(ENOBUFS);
1238		if (i == (STE_RX_LIST_CNT - 1)) {
1239			cd->ste_rx_chain[i].ste_next =
1240			    &cd->ste_rx_chain[0];
1241			ld->ste_rx_list[i].ste_next =
1242			    vtophys(&ld->ste_rx_list[0]);
1243		} else {
1244			cd->ste_rx_chain[i].ste_next =
1245			    &cd->ste_rx_chain[i + 1];
1246			ld->ste_rx_list[i].ste_next =
1247			    vtophys(&ld->ste_rx_list[i + 1]);
1248		}
1249		ld->ste_rx_list[i].ste_status = 0;
1250	}
1251
1252	cd->ste_rx_head = &cd->ste_rx_chain[0];
1253
1254	return(0);
1255}
1256
1257static void
1258ste_init_tx_list(sc)
1259	struct ste_softc	*sc;
1260{
1261	struct ste_chain_data	*cd;
1262	struct ste_list_data	*ld;
1263	int			i;
1264
1265	cd = &sc->ste_cdata;
1266	ld = sc->ste_ldata;
1267	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1268		cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1269		cd->ste_tx_chain[i].ste_ptr->ste_next = 0;
1270		cd->ste_tx_chain[i].ste_ptr->ste_ctl  = 0;
1271		cd->ste_tx_chain[i].ste_phys = vtophys(&ld->ste_tx_list[i]);
1272		if (i == (STE_TX_LIST_CNT - 1))
1273			cd->ste_tx_chain[i].ste_next =
1274			    &cd->ste_tx_chain[0];
1275		else
1276			cd->ste_tx_chain[i].ste_next =
1277			    &cd->ste_tx_chain[i + 1];
1278	}
1279
1280	cd->ste_tx_prod = 0;
1281	cd->ste_tx_cons = 0;
1282
1283	return;
1284}
1285
1286static void
1287ste_init(xsc)
1288	void			*xsc;
1289{
1290	struct ste_softc	*sc;
1291
1292	sc = xsc;
1293	STE_LOCK(sc);
1294	ste_init_locked(sc);
1295	STE_UNLOCK(sc);
1296}
1297
1298static void
1299ste_init_locked(sc)
1300	struct ste_softc	*sc;
1301{
1302	int			i;
1303	struct ifnet		*ifp;
1304
1305	STE_LOCK_ASSERT(sc);
1306	ifp = sc->ste_ifp;
1307
1308	ste_stop(sc);
1309
1310	/* Init our MAC address */
1311	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1312		CSR_WRITE_1(sc, STE_PAR0 + i, IFP2ENADDR(sc->ste_ifp)[i]);
1313	}
1314
1315	/* Init RX list */
1316	if (ste_init_rx_list(sc) == ENOBUFS) {
1317		if_printf(ifp,
1318		    "initialization failed: no memory for RX buffers\n");
1319		ste_stop(sc);
1320		return;
1321	}
1322
1323	/* Set RX polling interval */
1324	CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
1325
1326	/* Init TX descriptors */
1327	ste_init_tx_list(sc);
1328
1329	/* Set the TX freethresh value */
1330	CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8);
1331
1332	/* Set the TX start threshold for best performance. */
1333	CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1334
1335	/* Set the TX reclaim threshold. */
1336	CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4));
1337
1338	/* Set up the RX filter. */
1339	CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST);
1340
1341	/* If we want promiscuous mode, set the allframes bit. */
1342	if (ifp->if_flags & IFF_PROMISC) {
1343		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC);
1344	} else {
1345		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC);
1346	}
1347
1348	/* Set capture broadcast bit to accept broadcast frames. */
1349	if (ifp->if_flags & IFF_BROADCAST) {
1350		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST);
1351	} else {
1352		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST);
1353	}
1354
1355	ste_setmulti(sc);
1356
1357	/* Load the address of the RX list. */
1358	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1359	ste_wait(sc);
1360	CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1361	    vtophys(&sc->ste_ldata->ste_rx_list[0]));
1362	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1363	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1364
1365	/* Set TX polling interval (defer until we TX first packet */
1366	CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1367
1368	/* Load address of the TX list */
1369	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1370	ste_wait(sc);
1371	CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1372	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1373	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1374	ste_wait(sc);
1375	sc->ste_tx_prev = NULL;
1376
1377	/* Enable receiver and transmitter */
1378	CSR_WRITE_2(sc, STE_MACCTL0, 0);
1379	CSR_WRITE_2(sc, STE_MACCTL1, 0);
1380	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1381	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1382
1383	/* Enable stats counters. */
1384	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1385
1386	CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1387#ifdef DEVICE_POLLING
1388	/* Disable interrupts if we are polling. */
1389	if (ifp->if_flags & IFF_POLLING)
1390		CSR_WRITE_2(sc, STE_IMR, 0);
1391	else
1392#endif /* DEVICE_POLLING */
1393	/* Enable interrupts. */
1394	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1395
1396	/* Accept VLAN length packets */
1397	CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1398
1399	ste_ifmedia_upd_locked(ifp);
1400
1401	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1402	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1403
1404	callout_reset(&sc->ste_stat_callout, hz, ste_stats_update, sc);
1405
1406	return;
1407}
1408
1409static void
1410ste_stop(sc)
1411	struct ste_softc	*sc;
1412{
1413	int			i;
1414	struct ifnet		*ifp;
1415
1416	STE_LOCK_ASSERT(sc);
1417	ifp = sc->ste_ifp;
1418
1419	callout_stop(&sc->ste_stat_callout);
1420	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
1421#ifdef DEVICE_POLLING
1422	ether_poll_deregister(ifp);
1423#endif /* DEVICE_POLLING */
1424
1425	CSR_WRITE_2(sc, STE_IMR, 0);
1426	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1427	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1428	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1429	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1430	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1431	ste_wait(sc);
1432	/*
1433	 * Try really hard to stop the RX engine or under heavy RX
1434	 * data chip will write into de-allocated memory.
1435	 */
1436	ste_reset(sc);
1437
1438	sc->ste_link = 0;
1439
1440	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1441		if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) {
1442			m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf);
1443			sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL;
1444		}
1445	}
1446
1447	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1448		if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) {
1449			m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf);
1450			sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL;
1451		}
1452	}
1453
1454	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1455
1456	return;
1457}
1458
1459static void
1460ste_reset(sc)
1461	struct ste_softc	*sc;
1462{
1463	int			i;
1464
1465	STE_SETBIT4(sc, STE_ASICCTL,
1466	    STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1467	    STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1468	    STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1469	    STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1470	    STE_ASICCTL_EXTRESET_RESET);
1471
1472	DELAY(100000);
1473
1474	for (i = 0; i < STE_TIMEOUT; i++) {
1475		if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1476			break;
1477	}
1478
1479	if (i == STE_TIMEOUT)
1480		if_printf(sc->ste_ifp, "global reset never completed\n");
1481
1482	return;
1483}
1484
1485static int
1486ste_ioctl(ifp, command, data)
1487	struct ifnet		*ifp;
1488	u_long			command;
1489	caddr_t			data;
1490{
1491	struct ste_softc	*sc;
1492	struct ifreq		*ifr;
1493	struct mii_data		*mii;
1494	int			error = 0;
1495
1496	sc = ifp->if_softc;
1497	ifr = (struct ifreq *)data;
1498
1499	switch(command) {
1500	case SIOCSIFFLAGS:
1501		STE_LOCK(sc);
1502		if (ifp->if_flags & IFF_UP) {
1503			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1504			    ifp->if_flags & IFF_PROMISC &&
1505			    !(sc->ste_if_flags & IFF_PROMISC)) {
1506				STE_SETBIT1(sc, STE_RX_MODE,
1507				    STE_RXMODE_PROMISC);
1508			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1509			    !(ifp->if_flags & IFF_PROMISC) &&
1510			    sc->ste_if_flags & IFF_PROMISC) {
1511				STE_CLRBIT1(sc, STE_RX_MODE,
1512				    STE_RXMODE_PROMISC);
1513			}
1514			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1515			    (ifp->if_flags ^ sc->ste_if_flags) & IFF_ALLMULTI)
1516				ste_setmulti(sc);
1517			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1518				sc->ste_tx_thresh = STE_TXSTART_THRESH;
1519				ste_init_locked(sc);
1520			}
1521		} else {
1522			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1523				ste_stop(sc);
1524		}
1525		sc->ste_if_flags = ifp->if_flags;
1526		STE_UNLOCK(sc);
1527		error = 0;
1528		break;
1529	case SIOCADDMULTI:
1530	case SIOCDELMULTI:
1531		STE_LOCK(sc);
1532		ste_setmulti(sc);
1533		STE_UNLOCK(sc);
1534		error = 0;
1535		break;
1536	case SIOCGIFMEDIA:
1537	case SIOCSIFMEDIA:
1538		mii = device_get_softc(sc->ste_miibus);
1539		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1540		break;
1541	case SIOCSIFCAP:
1542		STE_LOCK(sc);
1543		ifp->if_capenable &= ~IFCAP_POLLING;
1544		ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING;
1545		STE_UNLOCK(sc);
1546		break;
1547	default:
1548		error = ether_ioctl(ifp, command, data);
1549		break;
1550	}
1551
1552	return(error);
1553}
1554
1555static int
1556ste_encap(sc, c, m_head)
1557	struct ste_softc	*sc;
1558	struct ste_chain	*c;
1559	struct mbuf		*m_head;
1560{
1561	int			frag = 0;
1562	struct ste_frag		*f = NULL;
1563	struct mbuf		*m;
1564	struct ste_desc		*d;
1565
1566	d = c->ste_ptr;
1567	d->ste_ctl = 0;
1568
1569encap_retry:
1570	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1571		if (m->m_len != 0) {
1572			if (frag == STE_MAXFRAGS)
1573				break;
1574			f = &d->ste_frags[frag];
1575			f->ste_addr = vtophys(mtod(m, vm_offset_t));
1576			f->ste_len = m->m_len;
1577			frag++;
1578		}
1579	}
1580
1581	if (m != NULL) {
1582		struct mbuf *mn;
1583
1584		/*
1585		 * We ran out of segments. We have to recopy this
1586		 * mbuf chain first. Bail out if we can't get the
1587		 * new buffers.
1588		 */
1589		mn = m_defrag(m_head, M_DONTWAIT);
1590		if (mn == NULL) {
1591			m_freem(m_head);
1592			return ENOMEM;
1593		}
1594		m_head = mn;
1595		goto encap_retry;
1596	}
1597
1598	c->ste_mbuf = m_head;
1599	d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST;
1600	d->ste_ctl = 1;
1601
1602	return(0);
1603}
1604
1605static void
1606ste_start(ifp)
1607	struct ifnet		*ifp;
1608{
1609	struct ste_softc	*sc;
1610
1611	sc = ifp->if_softc;
1612	STE_LOCK(sc);
1613	ste_start_locked(ifp);
1614	STE_UNLOCK(sc);
1615}
1616
1617static void
1618ste_start_locked(ifp)
1619	struct ifnet		*ifp;
1620{
1621	struct ste_softc	*sc;
1622	struct mbuf		*m_head = NULL;
1623	struct ste_chain	*cur_tx;
1624	int			idx;
1625
1626	sc = ifp->if_softc;
1627	STE_LOCK_ASSERT(sc);
1628
1629	if (!sc->ste_link)
1630		return;
1631
1632	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1633		return;
1634
1635	idx = sc->ste_cdata.ste_tx_prod;
1636
1637	while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) {
1638		/*
1639		 * We cannot re-use the last (free) descriptor;
1640		 * the chip may not have read its ste_next yet.
1641		 */
1642		if (STE_NEXT(idx, STE_TX_LIST_CNT) ==
1643		    sc->ste_cdata.ste_tx_cons) {
1644			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1645			break;
1646		}
1647
1648		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1649		if (m_head == NULL)
1650			break;
1651
1652		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
1653
1654		if (ste_encap(sc, cur_tx, m_head) != 0)
1655			break;
1656
1657		cur_tx->ste_ptr->ste_next = 0;
1658
1659		if (sc->ste_tx_prev == NULL) {
1660			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1661			/* Load address of the TX list */
1662			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1663			ste_wait(sc);
1664
1665			CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1666			    vtophys(&sc->ste_ldata->ste_tx_list[0]));
1667
1668			/* Set TX polling interval to start TX engine */
1669			CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1670
1671			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1672			ste_wait(sc);
1673		}else{
1674			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1675			sc->ste_tx_prev->ste_ptr->ste_next
1676				= cur_tx->ste_phys;
1677		}
1678
1679		sc->ste_tx_prev = cur_tx;
1680
1681		/*
1682		 * If there's a BPF listener, bounce a copy of this frame
1683		 * to him.
1684	 	 */
1685		BPF_MTAP(ifp, cur_tx->ste_mbuf);
1686
1687		STE_INC(idx, STE_TX_LIST_CNT);
1688		ifp->if_timer = 5;
1689	}
1690	sc->ste_cdata.ste_tx_prod = idx;
1691
1692	return;
1693}
1694
1695static void
1696ste_watchdog(ifp)
1697	struct ifnet		*ifp;
1698{
1699	struct ste_softc	*sc;
1700
1701	sc = ifp->if_softc;
1702	STE_LOCK(sc);
1703
1704	ifp->if_oerrors++;
1705	if_printf(ifp, "watchdog timeout\n");
1706
1707	ste_txeoc(sc);
1708	ste_txeof(sc);
1709	ste_rxeoc(sc);
1710	ste_rxeof(sc);
1711	ste_reset(sc);
1712	ste_init_locked(sc);
1713
1714	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1715		ste_start_locked(ifp);
1716	STE_UNLOCK(sc);
1717
1718	return;
1719}
1720
1721static void
1722ste_shutdown(dev)
1723	device_t		dev;
1724{
1725	struct ste_softc	*sc;
1726
1727	sc = device_get_softc(dev);
1728
1729	STE_LOCK(sc);
1730	ste_stop(sc);
1731	STE_UNLOCK(sc);
1732
1733	return;
1734}
1735