if_ste.c revision 200865
1/*-
2 * Copyright (c) 1997, 1998, 1999
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/ste/if_ste.c 200865 2009-12-22 20:11:56Z yongari $");
35
36#ifdef HAVE_KERNEL_OPTION_HEADERS
37#include "opt_device_polling.h"
38#endif
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/bus.h>
43#include <sys/endian.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
48#include <sys/module.h>
49#include <sys/rman.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53
54#include <net/bpf.h>
55#include <net/if.h>
56#include <net/if_arp.h>
57#include <net/ethernet.h>
58#include <net/if_dl.h>
59#include <net/if_media.h>
60#include <net/if_types.h>
61#include <net/if_vlan_var.h>
62
63#include <machine/bus.h>
64#include <machine/resource.h>
65
66#include <dev/mii/mii.h>
67#include <dev/mii/miivar.h>
68
69#include <dev/pci/pcireg.h>
70#include <dev/pci/pcivar.h>
71
72#include <dev/ste/if_stereg.h>
73
74/* "device miibus" required.  See GENERIC if you get errors here. */
75#include "miibus_if.h"
76
77#define STE_USEIOSPACE
78
79MODULE_DEPEND(ste, pci, 1, 1, 1);
80MODULE_DEPEND(ste, ether, 1, 1, 1);
81MODULE_DEPEND(ste, miibus, 1, 1, 1);
82
83/*
84 * Various supported device vendors/types and their names.
85 */
86static struct ste_type ste_devs[] = {
87	{ ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" },
88	{ ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" },
89	{ DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" },
90	{ 0, 0, NULL }
91};
92
93static int	ste_attach(device_t);
94static int	ste_detach(device_t);
95static int	ste_probe(device_t);
96static int	ste_shutdown(device_t);
97
98static int	ste_dma_alloc(struct ste_softc *);
99static void	ste_dma_free(struct ste_softc *);
100static void	ste_dmamap_cb(void *, bus_dma_segment_t *, int, int);
101static int 	ste_eeprom_wait(struct ste_softc *);
102static int	ste_encap(struct ste_softc *, struct mbuf **,
103		    struct ste_chain *);
104static int	ste_ifmedia_upd(struct ifnet *);
105static void	ste_ifmedia_upd_locked(struct ifnet *);
106static void	ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
107static void	ste_init(void *);
108static void	ste_init_locked(struct ste_softc *);
109static int	ste_init_rx_list(struct ste_softc *);
110static void	ste_init_tx_list(struct ste_softc *);
111static void	ste_intr(void *);
112static int	ste_ioctl(struct ifnet *, u_long, caddr_t);
113static int	ste_mii_readreg(struct ste_softc *, struct ste_mii_frame *);
114static void	ste_mii_send(struct ste_softc *, uint32_t, int);
115static void	ste_mii_sync(struct ste_softc *);
116static int	ste_mii_writereg(struct ste_softc *, struct ste_mii_frame *);
117static int	ste_miibus_readreg(device_t, int, int);
118static void	ste_miibus_statchg(device_t);
119static int	ste_miibus_writereg(device_t, int, int, int);
120static int	ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *);
121static int	ste_read_eeprom(struct ste_softc *, caddr_t, int, int, int);
122static void	ste_reset(struct ste_softc *);
123static int	ste_rxeof(struct ste_softc *, int);
124static void	ste_setmulti(struct ste_softc *);
125static void	ste_start(struct ifnet *);
126static void	ste_start_locked(struct ifnet *);
127static void	ste_stats_update(struct ste_softc *);
128static void	ste_stop(struct ste_softc *);
129static void	ste_tick(void *);
130static void	ste_txeoc(struct ste_softc *);
131static void	ste_txeof(struct ste_softc *);
132static void	ste_wait(struct ste_softc *);
133static void	ste_watchdog(struct ste_softc *);
134
135#ifdef STE_USEIOSPACE
136#define STE_RES			SYS_RES_IOPORT
137#define STE_RID			STE_PCI_LOIO
138#else
139#define STE_RES			SYS_RES_MEMORY
140#define STE_RID			STE_PCI_LOMEM
141#endif
142
143static device_method_t ste_methods[] = {
144	/* Device interface */
145	DEVMETHOD(device_probe,		ste_probe),
146	DEVMETHOD(device_attach,	ste_attach),
147	DEVMETHOD(device_detach,	ste_detach),
148	DEVMETHOD(device_shutdown,	ste_shutdown),
149
150	/* bus interface */
151	DEVMETHOD(bus_print_child,	bus_generic_print_child),
152	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
153
154	/* MII interface */
155	DEVMETHOD(miibus_readreg,	ste_miibus_readreg),
156	DEVMETHOD(miibus_writereg,	ste_miibus_writereg),
157	DEVMETHOD(miibus_statchg,	ste_miibus_statchg),
158
159	{ 0, 0 }
160};
161
162static driver_t ste_driver = {
163	"ste",
164	ste_methods,
165	sizeof(struct ste_softc)
166};
167
168static devclass_t ste_devclass;
169
170DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0);
171DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0);
172
173#define STE_SETBIT4(sc, reg, x)				\
174	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
175
176#define STE_CLRBIT4(sc, reg, x)				\
177	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
178
179#define STE_SETBIT2(sc, reg, x)				\
180	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x))
181
182#define STE_CLRBIT2(sc, reg, x)				\
183	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x))
184
185#define STE_SETBIT1(sc, reg, x)				\
186	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x))
187
188#define STE_CLRBIT1(sc, reg, x)				\
189	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x))
190
191
192#define MII_SET(x)		STE_SETBIT1(sc, STE_PHYCTL, x)
193#define MII_CLR(x)		STE_CLRBIT1(sc, STE_PHYCTL, x)
194
195/*
196 * Sync the PHYs by setting data bit and strobing the clock 32 times.
197 */
198static void
199ste_mii_sync(struct ste_softc *sc)
200{
201	int i;
202
203	MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
204
205	for (i = 0; i < 32; i++) {
206		MII_SET(STE_PHYCTL_MCLK);
207		DELAY(1);
208		MII_CLR(STE_PHYCTL_MCLK);
209		DELAY(1);
210	}
211}
212
213/*
214 * Clock a series of bits through the MII.
215 */
216static void
217ste_mii_send(struct ste_softc *sc, uint32_t bits, int cnt)
218{
219	int i;
220
221	MII_CLR(STE_PHYCTL_MCLK);
222
223	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
224		if (bits & i) {
225			MII_SET(STE_PHYCTL_MDATA);
226                } else {
227			MII_CLR(STE_PHYCTL_MDATA);
228                }
229		DELAY(1);
230		MII_CLR(STE_PHYCTL_MCLK);
231		DELAY(1);
232		MII_SET(STE_PHYCTL_MCLK);
233	}
234}
235
236/*
237 * Read an PHY register through the MII.
238 */
239static int
240ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
241{
242	int i, ack;
243
244	/*
245	 * Set up frame for RX.
246	 */
247	frame->mii_stdelim = STE_MII_STARTDELIM;
248	frame->mii_opcode = STE_MII_READOP;
249	frame->mii_turnaround = 0;
250	frame->mii_data = 0;
251
252	CSR_WRITE_2(sc, STE_PHYCTL, 0);
253	/*
254 	 * Turn on data xmit.
255	 */
256	MII_SET(STE_PHYCTL_MDIR);
257
258	ste_mii_sync(sc);
259
260	/*
261	 * Send command/address info.
262	 */
263	ste_mii_send(sc, frame->mii_stdelim, 2);
264	ste_mii_send(sc, frame->mii_opcode, 2);
265	ste_mii_send(sc, frame->mii_phyaddr, 5);
266	ste_mii_send(sc, frame->mii_regaddr, 5);
267
268	/* Turn off xmit. */
269	MII_CLR(STE_PHYCTL_MDIR);
270
271	/* Idle bit */
272	MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
273	DELAY(1);
274	MII_SET(STE_PHYCTL_MCLK);
275	DELAY(1);
276
277	/* Check for ack */
278	MII_CLR(STE_PHYCTL_MCLK);
279	DELAY(1);
280	ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
281	MII_SET(STE_PHYCTL_MCLK);
282	DELAY(1);
283
284	/*
285	 * Now try reading data bits. If the ack failed, we still
286	 * need to clock through 16 cycles to keep the PHY(s) in sync.
287	 */
288	if (ack) {
289		for (i = 0; i < 16; i++) {
290			MII_CLR(STE_PHYCTL_MCLK);
291			DELAY(1);
292			MII_SET(STE_PHYCTL_MCLK);
293			DELAY(1);
294		}
295		goto fail;
296	}
297
298	for (i = 0x8000; i; i >>= 1) {
299		MII_CLR(STE_PHYCTL_MCLK);
300		DELAY(1);
301		if (!ack) {
302			if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
303				frame->mii_data |= i;
304			DELAY(1);
305		}
306		MII_SET(STE_PHYCTL_MCLK);
307		DELAY(1);
308	}
309
310fail:
311
312	MII_CLR(STE_PHYCTL_MCLK);
313	DELAY(1);
314	MII_SET(STE_PHYCTL_MCLK);
315	DELAY(1);
316
317	if (ack)
318		return (1);
319	return (0);
320}
321
322/*
323 * Write to a PHY register through the MII.
324 */
325static int
326ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
327{
328
329	/*
330	 * Set up frame for TX.
331	 */
332
333	frame->mii_stdelim = STE_MII_STARTDELIM;
334	frame->mii_opcode = STE_MII_WRITEOP;
335	frame->mii_turnaround = STE_MII_TURNAROUND;
336
337	/*
338 	 * Turn on data output.
339	 */
340	MII_SET(STE_PHYCTL_MDIR);
341
342	ste_mii_sync(sc);
343
344	ste_mii_send(sc, frame->mii_stdelim, 2);
345	ste_mii_send(sc, frame->mii_opcode, 2);
346	ste_mii_send(sc, frame->mii_phyaddr, 5);
347	ste_mii_send(sc, frame->mii_regaddr, 5);
348	ste_mii_send(sc, frame->mii_turnaround, 2);
349	ste_mii_send(sc, frame->mii_data, 16);
350
351	/* Idle bit. */
352	MII_SET(STE_PHYCTL_MCLK);
353	DELAY(1);
354	MII_CLR(STE_PHYCTL_MCLK);
355	DELAY(1);
356
357	/*
358	 * Turn off xmit.
359	 */
360	MII_CLR(STE_PHYCTL_MDIR);
361
362	return (0);
363}
364
365static int
366ste_miibus_readreg(device_t dev, int phy, int reg)
367{
368	struct ste_softc *sc;
369	struct ste_mii_frame frame;
370
371	sc = device_get_softc(dev);
372
373	if ((sc->ste_flags & STE_FLAG_ONE_PHY) != 0 && phy != 0)
374		return (0);
375
376	bzero((char *)&frame, sizeof(frame));
377
378	frame.mii_phyaddr = phy;
379	frame.mii_regaddr = reg;
380	ste_mii_readreg(sc, &frame);
381
382	return (frame.mii_data);
383}
384
385static int
386ste_miibus_writereg(device_t dev, int phy, int reg, int data)
387{
388	struct ste_softc *sc;
389	struct ste_mii_frame frame;
390
391	sc = device_get_softc(dev);
392	bzero((char *)&frame, sizeof(frame));
393
394	frame.mii_phyaddr = phy;
395	frame.mii_regaddr = reg;
396	frame.mii_data = data;
397
398	ste_mii_writereg(sc, &frame);
399
400	return (0);
401}
402
403static void
404ste_miibus_statchg(device_t dev)
405{
406	struct ste_softc *sc;
407	struct mii_data *mii;
408	struct ifnet *ifp;
409	uint16_t cfg;
410
411	sc = device_get_softc(dev);
412
413	mii = device_get_softc(sc->ste_miibus);
414	ifp = sc->ste_ifp;
415	if (mii == NULL || ifp == NULL ||
416	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
417		return;
418
419	sc->ste_flags &= ~STE_FLAG_LINK;
420	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
421	    (IFM_ACTIVE | IFM_AVALID)) {
422		switch (IFM_SUBTYPE(mii->mii_media_active)) {
423		case IFM_10_T:
424		case IFM_100_TX:
425		case IFM_100_FX:
426		case IFM_100_T4:
427			sc->ste_flags |= STE_FLAG_LINK;
428		default:
429			break;
430		}
431	}
432
433	/* Program MACs with resolved speed/duplex/flow-control. */
434	if ((sc->ste_flags & STE_FLAG_LINK) != 0) {
435		cfg = CSR_READ_2(sc, STE_MACCTL0);
436		cfg &= ~(STE_MACCTL0_FLOWCTL_ENABLE | STE_MACCTL0_FULLDUPLEX);
437		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
438			/*
439			 * ST201 data sheet says driver should enable receiving
440			 * MAC control frames bit of receive mode register to
441			 * receive flow-control frames but the register has no
442			 * such bits. In addition the controller has no ability
443			 * to send pause frames so it should be handled in
444			 * driver. Implementing pause timer handling in driver
445			 * layer is not trivial, so don't enable flow-control
446			 * here.
447			 */
448			cfg |= STE_MACCTL0_FULLDUPLEX;
449		}
450		CSR_WRITE_2(sc, STE_MACCTL0, cfg);
451	}
452}
453
454static int
455ste_ifmedia_upd(struct ifnet *ifp)
456{
457	struct ste_softc *sc;
458
459	sc = ifp->if_softc;
460	STE_LOCK(sc);
461	ste_ifmedia_upd_locked(ifp);
462	STE_UNLOCK(sc);
463
464	return (0);
465}
466
467static void
468ste_ifmedia_upd_locked(struct ifnet *ifp)
469{
470	struct ste_softc *sc;
471	struct mii_data *mii;
472
473	sc = ifp->if_softc;
474	STE_LOCK_ASSERT(sc);
475	mii = device_get_softc(sc->ste_miibus);
476	sc->ste_flags &= ~STE_FLAG_LINK;
477	if (mii->mii_instance) {
478		struct mii_softc	*miisc;
479		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
480			mii_phy_reset(miisc);
481	}
482	mii_mediachg(mii);
483}
484
485static void
486ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
487{
488	struct ste_softc *sc;
489	struct mii_data *mii;
490
491	sc = ifp->if_softc;
492	mii = device_get_softc(sc->ste_miibus);
493
494	STE_LOCK(sc);
495	mii_pollstat(mii);
496	ifmr->ifm_active = mii->mii_media_active;
497	ifmr->ifm_status = mii->mii_media_status;
498	STE_UNLOCK(sc);
499}
500
501static void
502ste_wait(struct ste_softc *sc)
503{
504	int i;
505
506	for (i = 0; i < STE_TIMEOUT; i++) {
507		if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
508			break;
509		DELAY(1);
510	}
511
512	if (i == STE_TIMEOUT)
513		device_printf(sc->ste_dev, "command never completed!\n");
514}
515
516/*
517 * The EEPROM is slow: give it time to come ready after issuing
518 * it a command.
519 */
520static int
521ste_eeprom_wait(struct ste_softc *sc)
522{
523	int i;
524
525	DELAY(1000);
526
527	for (i = 0; i < 100; i++) {
528		if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
529			DELAY(1000);
530		else
531			break;
532	}
533
534	if (i == 100) {
535		device_printf(sc->ste_dev, "eeprom failed to come ready\n");
536		return (1);
537	}
538
539	return (0);
540}
541
542/*
543 * Read a sequence of words from the EEPROM. Note that ethernet address
544 * data is stored in the EEPROM in network byte order.
545 */
546static int
547ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap)
548{
549	uint16_t word, *ptr;
550	int err = 0, i;
551
552	if (ste_eeprom_wait(sc))
553		return (1);
554
555	for (i = 0; i < cnt; i++) {
556		CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
557		err = ste_eeprom_wait(sc);
558		if (err)
559			break;
560		word = CSR_READ_2(sc, STE_EEPROM_DATA);
561		ptr = (uint16_t *)(dest + (i * 2));
562		if (swap)
563			*ptr = ntohs(word);
564		else
565			*ptr = word;
566	}
567
568	return (err ? 1 : 0);
569}
570
571static void
572ste_setmulti(struct ste_softc *sc)
573{
574	struct ifnet *ifp;
575	struct ifmultiaddr *ifma;
576	uint32_t hashes[2] = { 0, 0 };
577	int h;
578
579	ifp = sc->ste_ifp;
580	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
581		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI);
582		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH);
583		return;
584	}
585
586	/* first, zot all the existing hash bits */
587	CSR_WRITE_2(sc, STE_MAR0, 0);
588	CSR_WRITE_2(sc, STE_MAR1, 0);
589	CSR_WRITE_2(sc, STE_MAR2, 0);
590	CSR_WRITE_2(sc, STE_MAR3, 0);
591
592	/* now program new ones */
593	if_maddr_rlock(ifp);
594	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
595		if (ifma->ifma_addr->sa_family != AF_LINK)
596			continue;
597		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
598		    ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F;
599		if (h < 32)
600			hashes[0] |= (1 << h);
601		else
602			hashes[1] |= (1 << (h - 32));
603	}
604	if_maddr_runlock(ifp);
605
606	CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
607	CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
608	CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
609	CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
610	STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI);
611	STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH);
612}
613
614#ifdef DEVICE_POLLING
615static poll_handler_t ste_poll, ste_poll_locked;
616
617static int
618ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
619{
620	struct ste_softc *sc = ifp->if_softc;
621	int rx_npkts = 0;
622
623	STE_LOCK(sc);
624	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
625		rx_npkts = ste_poll_locked(ifp, cmd, count);
626	STE_UNLOCK(sc);
627	return (rx_npkts);
628}
629
630static int
631ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
632{
633	struct ste_softc *sc = ifp->if_softc;
634	int rx_npkts;
635
636	STE_LOCK_ASSERT(sc);
637
638	rx_npkts = ste_rxeof(sc, count);
639	ste_txeof(sc);
640	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
641		ste_start_locked(ifp);
642
643	if (cmd == POLL_AND_CHECK_STATUS) {
644		uint16_t status;
645
646		status = CSR_READ_2(sc, STE_ISR_ACK);
647
648		if (status & STE_ISR_TX_DONE)
649			ste_txeoc(sc);
650
651		if (status & STE_ISR_STATS_OFLOW)
652			ste_stats_update(sc);
653
654		if (status & STE_ISR_HOSTERR) {
655			ste_reset(sc);
656			ste_init_locked(sc);
657		}
658	}
659	return (rx_npkts);
660}
661#endif /* DEVICE_POLLING */
662
663static void
664ste_intr(void *xsc)
665{
666	struct ste_softc *sc;
667	struct ifnet *ifp;
668	uint16_t status;
669
670	sc = xsc;
671	STE_LOCK(sc);
672	ifp = sc->ste_ifp;
673
674#ifdef DEVICE_POLLING
675	if (ifp->if_capenable & IFCAP_POLLING) {
676		STE_UNLOCK(sc);
677		return;
678	}
679#endif
680
681	/* See if this is really our interrupt. */
682	if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) {
683		STE_UNLOCK(sc);
684		return;
685	}
686
687	for (;;) {
688		status = CSR_READ_2(sc, STE_ISR_ACK);
689
690		if (!(status & STE_INTRS))
691			break;
692
693		if (status & STE_ISR_RX_DMADONE)
694			ste_rxeof(sc, -1);
695
696		if (status & STE_ISR_TX_DMADONE)
697			ste_txeof(sc);
698
699		if (status & STE_ISR_TX_DONE)
700			ste_txeoc(sc);
701
702		if (status & STE_ISR_STATS_OFLOW)
703			ste_stats_update(sc);
704
705		if (status & STE_ISR_HOSTERR) {
706			ste_reset(sc);
707			ste_init_locked(sc);
708		}
709	}
710
711	/* Re-enable interrupts */
712	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
713
714	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
715		ste_start_locked(ifp);
716
717	STE_UNLOCK(sc);
718}
719
720/*
721 * A frame has been uploaded: pass the resulting mbuf chain up to
722 * the higher level protocols.
723 */
724static int
725ste_rxeof(struct ste_softc *sc, int count)
726{
727        struct mbuf *m;
728        struct ifnet *ifp;
729	struct ste_chain_onefrag *cur_rx;
730	uint32_t rxstat;
731	int total_len, rx_npkts;
732
733	ifp = sc->ste_ifp;
734
735	bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
736	    sc->ste_cdata.ste_rx_list_map,
737	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
738
739	cur_rx = sc->ste_cdata.ste_rx_head;
740	for (rx_npkts = 0; rx_npkts < STE_RX_LIST_CNT; rx_npkts++,
741	    cur_rx = cur_rx->ste_next) {
742		rxstat = le32toh(cur_rx->ste_ptr->ste_status);
743		if ((rxstat & STE_RXSTAT_DMADONE) == 0)
744			break;
745#ifdef DEVICE_POLLING
746		if (ifp->if_capenable & IFCAP_POLLING) {
747			if (count == 0)
748				break;
749			count--;
750		}
751#endif
752		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
753			break;
754		/*
755		 * If an error occurs, update stats, clear the
756		 * status word and leave the mbuf cluster in place:
757		 * it should simply get re-used next time this descriptor
758	 	 * comes up in the ring.
759		 */
760		if (rxstat & STE_RXSTAT_FRAME_ERR) {
761			ifp->if_ierrors++;
762			cur_rx->ste_ptr->ste_status = 0;
763			continue;
764		}
765
766		/* No errors; receive the packet. */
767		m = cur_rx->ste_mbuf;
768		total_len = STE_RX_BYTES(rxstat);
769
770		/*
771		 * Try to conjure up a new mbuf cluster. If that
772		 * fails, it means we have an out of memory condition and
773		 * should leave the buffer in place and continue. This will
774		 * result in a lost packet, but there's little else we
775		 * can do in this situation.
776		 */
777		if (ste_newbuf(sc, cur_rx) != 0) {
778			ifp->if_ierrors++;
779			cur_rx->ste_ptr->ste_status = 0;
780			continue;
781		}
782
783		m->m_pkthdr.rcvif = ifp;
784		m->m_pkthdr.len = m->m_len = total_len;
785
786		ifp->if_ipackets++;
787		STE_UNLOCK(sc);
788		(*ifp->if_input)(ifp, m);
789		STE_LOCK(sc);
790	}
791
792	if (rx_npkts > 0) {
793		sc->ste_cdata.ste_rx_head = cur_rx;
794		bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
795		    sc->ste_cdata.ste_rx_list_map,
796		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
797	}
798
799	return (rx_npkts);
800}
801
802static void
803ste_txeoc(struct ste_softc *sc)
804{
805	struct ifnet *ifp;
806	uint8_t txstat;
807
808	ifp = sc->ste_ifp;
809
810	while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
811	    STE_TXSTATUS_TXDONE) {
812		if (txstat & STE_TXSTATUS_UNDERRUN ||
813		    txstat & STE_TXSTATUS_EXCESSCOLLS ||
814		    txstat & STE_TXSTATUS_RECLAIMERR) {
815			ifp->if_oerrors++;
816			device_printf(sc->ste_dev,
817			    "transmission error: %x\n", txstat);
818
819			ste_reset(sc);
820			ste_init_locked(sc);
821
822			if (txstat & STE_TXSTATUS_UNDERRUN &&
823			    sc->ste_tx_thresh < STE_PACKET_SIZE) {
824				sc->ste_tx_thresh += STE_MIN_FRAMELEN;
825				device_printf(sc->ste_dev,
826				    "tx underrun, increasing tx"
827				    " start threshold to %d bytes\n",
828				    sc->ste_tx_thresh);
829			}
830			CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
831			CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
832			    (STE_PACKET_SIZE >> 4));
833		}
834		ste_init_locked(sc);
835		CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
836	}
837}
838
839static void
840ste_tick(void *arg)
841{
842	struct ste_softc *sc;
843	struct mii_data *mii;
844
845	sc = (struct ste_softc *)arg;
846
847	STE_LOCK_ASSERT(sc);
848
849	mii = device_get_softc(sc->ste_miibus);
850	mii_tick(mii);
851	/*
852	 * ukphy(4) does not seem to generate CB that reports
853	 * resolved link state so if we know we lost a link,
854	 * explicitly check the link state.
855	 */
856	if ((sc->ste_flags & STE_FLAG_LINK) == 0)
857		ste_miibus_statchg(sc->ste_dev);
858	ste_stats_update(sc);
859	ste_watchdog(sc);
860	callout_reset(&sc->ste_callout, hz, ste_tick, sc);
861}
862
863static void
864ste_txeof(struct ste_softc *sc)
865{
866	struct ifnet *ifp;
867	struct ste_chain *cur_tx;
868	uint32_t txstat;
869	int idx;
870
871	STE_LOCK_ASSERT(sc);
872
873	ifp = sc->ste_ifp;
874	idx = sc->ste_cdata.ste_tx_cons;
875	if (idx == sc->ste_cdata.ste_tx_prod)
876		return;
877
878	bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
879	    sc->ste_cdata.ste_tx_list_map,
880	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
881
882	while (idx != sc->ste_cdata.ste_tx_prod) {
883		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
884		txstat = le32toh(cur_tx->ste_ptr->ste_ctl);
885		if ((txstat & STE_TXCTL_DMADONE) == 0)
886			break;
887		bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map,
888		    BUS_DMASYNC_POSTWRITE);
889		bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map);
890		KASSERT(cur_tx->ste_mbuf != NULL,
891		    ("%s: freeing NULL mbuf!\n", __func__));
892		m_freem(cur_tx->ste_mbuf);
893		cur_tx->ste_mbuf = NULL;
894		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
895		ifp->if_opackets++;
896		sc->ste_cdata.ste_tx_cnt--;
897		STE_INC(idx, STE_TX_LIST_CNT);
898	}
899
900	sc->ste_cdata.ste_tx_cons = idx;
901	if (sc->ste_cdata.ste_tx_cnt == 0)
902		sc->ste_timer = 0;
903}
904
905static void
906ste_stats_update(struct ste_softc *sc)
907{
908	struct ifnet *ifp;
909
910	STE_LOCK_ASSERT(sc);
911
912	ifp = sc->ste_ifp;
913	ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS)
914	    + CSR_READ_1(sc, STE_MULTI_COLLS)
915	    + CSR_READ_1(sc, STE_SINGLE_COLLS);
916}
917
918/*
919 * Probe for a Sundance ST201 chip. Check the PCI vendor and device
920 * IDs against our list and return a device name if we find a match.
921 */
922static int
923ste_probe(device_t dev)
924{
925	struct ste_type *t;
926
927	t = ste_devs;
928
929	while (t->ste_name != NULL) {
930		if ((pci_get_vendor(dev) == t->ste_vid) &&
931		    (pci_get_device(dev) == t->ste_did)) {
932			device_set_desc(dev, t->ste_name);
933			return (BUS_PROBE_DEFAULT);
934		}
935		t++;
936	}
937
938	return (ENXIO);
939}
940
941/*
942 * Attach the interface. Allocate softc structures, do ifmedia
943 * setup and ethernet/BPF attach.
944 */
945static int
946ste_attach(device_t dev)
947{
948	struct ste_softc *sc;
949	struct ifnet *ifp;
950	u_char eaddr[6];
951	int error = 0, rid;
952
953	sc = device_get_softc(dev);
954	sc->ste_dev = dev;
955
956	/*
957	 * Only use one PHY since this chip reports multiple
958	 * Note on the DFE-550 the PHY is at 1 on the DFE-580
959	 * it is at 0 & 1.  It is rev 0x12.
960	 */
961	if (pci_get_vendor(dev) == DL_VENDORID &&
962	    pci_get_device(dev) == DL_DEVICEID_DL10050 &&
963	    pci_get_revid(dev) == 0x12 )
964		sc->ste_flags |= STE_FLAG_ONE_PHY;
965
966	mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
967	    MTX_DEF);
968	/*
969	 * Map control/status registers.
970	 */
971	pci_enable_busmaster(dev);
972
973	rid = STE_RID;
974	sc->ste_res = bus_alloc_resource_any(dev, STE_RES, &rid, RF_ACTIVE);
975
976	if (sc->ste_res == NULL) {
977		device_printf(dev, "couldn't map ports/memory\n");
978		error = ENXIO;
979		goto fail;
980	}
981
982	sc->ste_btag = rman_get_bustag(sc->ste_res);
983	sc->ste_bhandle = rman_get_bushandle(sc->ste_res);
984
985	/* Allocate interrupt */
986	rid = 0;
987	sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
988	    RF_SHAREABLE | RF_ACTIVE);
989
990	if (sc->ste_irq == NULL) {
991		device_printf(dev, "couldn't map interrupt\n");
992		error = ENXIO;
993		goto fail;
994	}
995
996	callout_init_mtx(&sc->ste_callout, &sc->ste_mtx, 0);
997
998	/* Reset the adapter. */
999	ste_reset(sc);
1000
1001	/*
1002	 * Get station address from the EEPROM.
1003	 */
1004	if (ste_read_eeprom(sc, eaddr,
1005	    STE_EEADDR_NODE0, 3, 0)) {
1006		device_printf(dev, "failed to read station address\n");
1007		error = ENXIO;;
1008		goto fail;
1009	}
1010
1011	if ((error = ste_dma_alloc(sc)) != 0)
1012		goto fail;
1013
1014	ifp = sc->ste_ifp = if_alloc(IFT_ETHER);
1015	if (ifp == NULL) {
1016		device_printf(dev, "can not if_alloc()\n");
1017		error = ENOSPC;
1018		goto fail;
1019	}
1020
1021	/* Do MII setup. */
1022	if (mii_phy_probe(dev, &sc->ste_miibus,
1023	    ste_ifmedia_upd, ste_ifmedia_sts)) {
1024		device_printf(dev, "MII without any phy!\n");
1025		error = ENXIO;
1026		goto fail;
1027	}
1028
1029	ifp->if_softc = sc;
1030	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1031	ifp->if_mtu = ETHERMTU;
1032	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1033	ifp->if_ioctl = ste_ioctl;
1034	ifp->if_start = ste_start;
1035	ifp->if_init = ste_init;
1036	IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1);
1037	ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1;
1038	IFQ_SET_READY(&ifp->if_snd);
1039
1040	sc->ste_tx_thresh = STE_TXSTART_THRESH;
1041
1042	/*
1043	 * Call MI attach routine.
1044	 */
1045	ether_ifattach(ifp, eaddr);
1046
1047	/*
1048	 * Tell the upper layer(s) we support long frames.
1049	 */
1050	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1051	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1052	ifp->if_capenable = ifp->if_capabilities;
1053#ifdef DEVICE_POLLING
1054	ifp->if_capabilities |= IFCAP_POLLING;
1055#endif
1056
1057	/* Hook interrupt last to avoid having to lock softc */
1058	error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE,
1059	    NULL, ste_intr, sc, &sc->ste_intrhand);
1060
1061	if (error) {
1062		device_printf(dev, "couldn't set up irq\n");
1063		ether_ifdetach(ifp);
1064		goto fail;
1065	}
1066
1067fail:
1068	if (error)
1069		ste_detach(dev);
1070
1071	return (error);
1072}
1073
1074/*
1075 * Shutdown hardware and free up resources. This can be called any
1076 * time after the mutex has been initialized. It is called in both
1077 * the error case in attach and the normal detach case so it needs
1078 * to be careful about only freeing resources that have actually been
1079 * allocated.
1080 */
1081static int
1082ste_detach(device_t dev)
1083{
1084	struct ste_softc *sc;
1085	struct ifnet *ifp;
1086
1087	sc = device_get_softc(dev);
1088	KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized"));
1089	ifp = sc->ste_ifp;
1090
1091#ifdef DEVICE_POLLING
1092	if (ifp->if_capenable & IFCAP_POLLING)
1093		ether_poll_deregister(ifp);
1094#endif
1095
1096	/* These should only be active if attach succeeded */
1097	if (device_is_attached(dev)) {
1098		ether_ifdetach(ifp);
1099		STE_LOCK(sc);
1100		ste_stop(sc);
1101		STE_UNLOCK(sc);
1102		callout_drain(&sc->ste_callout);
1103	}
1104	if (sc->ste_miibus)
1105		device_delete_child(dev, sc->ste_miibus);
1106	bus_generic_detach(dev);
1107
1108	if (sc->ste_intrhand)
1109		bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand);
1110	if (sc->ste_irq)
1111		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq);
1112	if (sc->ste_res)
1113		bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res);
1114
1115	if (ifp)
1116		if_free(ifp);
1117
1118	ste_dma_free(sc);
1119	mtx_destroy(&sc->ste_mtx);
1120
1121	return (0);
1122}
1123
1124struct ste_dmamap_arg {
1125	bus_addr_t	ste_busaddr;
1126};
1127
1128static void
1129ste_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1130{
1131	struct ste_dmamap_arg *ctx;
1132
1133	if (error != 0)
1134		return;
1135
1136	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1137
1138	ctx = (struct ste_dmamap_arg *)arg;
1139	ctx->ste_busaddr = segs[0].ds_addr;
1140}
1141
1142static int
1143ste_dma_alloc(struct ste_softc *sc)
1144{
1145	struct ste_chain *txc;
1146	struct ste_chain_onefrag *rxc;
1147	struct ste_dmamap_arg ctx;
1148	int error, i;
1149
1150	/* Create parent DMA tag. */
1151	error = bus_dma_tag_create(
1152	    bus_get_dma_tag(sc->ste_dev), /* parent */
1153	    1, 0,			/* alignment, boundary */
1154	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1155	    BUS_SPACE_MAXADDR,		/* highaddr */
1156	    NULL, NULL,			/* filter, filterarg */
1157	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1158	    0,				/* nsegments */
1159	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1160	    0,				/* flags */
1161	    NULL, NULL,			/* lockfunc, lockarg */
1162	    &sc->ste_cdata.ste_parent_tag);
1163	if (error != 0) {
1164		device_printf(sc->ste_dev,
1165		    "could not create parent DMA tag.\n");
1166		goto fail;
1167	}
1168
1169	/* Create DMA tag for Tx descriptor list. */
1170	error = bus_dma_tag_create(
1171	    sc->ste_cdata.ste_parent_tag, /* parent */
1172	    STE_DESC_ALIGN, 0,		/* alignment, boundary */
1173	    BUS_SPACE_MAXADDR,		/* lowaddr */
1174	    BUS_SPACE_MAXADDR,		/* highaddr */
1175	    NULL, NULL,			/* filter, filterarg */
1176	    STE_TX_LIST_SZ,		/* maxsize */
1177	    1,				/* nsegments */
1178	    STE_TX_LIST_SZ,		/* maxsegsize */
1179	    0,				/* flags */
1180	    NULL, NULL,			/* lockfunc, lockarg */
1181	    &sc->ste_cdata.ste_tx_list_tag);
1182	if (error != 0) {
1183		device_printf(sc->ste_dev,
1184		    "could not create Tx list DMA tag.\n");
1185		goto fail;
1186	}
1187
1188	/* Create DMA tag for Rx descriptor list. */
1189	error = bus_dma_tag_create(
1190	    sc->ste_cdata.ste_parent_tag, /* parent */
1191	    STE_DESC_ALIGN, 0,		/* alignment, boundary */
1192	    BUS_SPACE_MAXADDR,		/* lowaddr */
1193	    BUS_SPACE_MAXADDR,		/* highaddr */
1194	    NULL, NULL,			/* filter, filterarg */
1195	    STE_RX_LIST_SZ,		/* maxsize */
1196	    1,				/* nsegments */
1197	    STE_RX_LIST_SZ,		/* maxsegsize */
1198	    0,				/* flags */
1199	    NULL, NULL,			/* lockfunc, lockarg */
1200	    &sc->ste_cdata.ste_rx_list_tag);
1201	if (error != 0) {
1202		device_printf(sc->ste_dev,
1203		    "could not create Rx list DMA tag.\n");
1204		goto fail;
1205	}
1206
1207	/* Create DMA tag for Tx buffers. */
1208	error = bus_dma_tag_create(
1209	    sc->ste_cdata.ste_parent_tag, /* parent */
1210	    1, 0,			/* alignment, boundary */
1211	    BUS_SPACE_MAXADDR,		/* lowaddr */
1212	    BUS_SPACE_MAXADDR,		/* highaddr */
1213	    NULL, NULL,			/* filter, filterarg */
1214	    MCLBYTES * STE_MAXFRAGS,	/* maxsize */
1215	    STE_MAXFRAGS,		/* nsegments */
1216	    MCLBYTES,			/* maxsegsize */
1217	    0,				/* flags */
1218	    NULL, NULL,			/* lockfunc, lockarg */
1219	    &sc->ste_cdata.ste_tx_tag);
1220	if (error != 0) {
1221		device_printf(sc->ste_dev, "could not create Tx DMA tag.\n");
1222		goto fail;
1223	}
1224
1225	/* Create DMA tag for Rx buffers. */
1226	error = bus_dma_tag_create(
1227	    sc->ste_cdata.ste_parent_tag, /* parent */
1228	    1, 0,			/* alignment, boundary */
1229	    BUS_SPACE_MAXADDR,		/* lowaddr */
1230	    BUS_SPACE_MAXADDR,		/* highaddr */
1231	    NULL, NULL,			/* filter, filterarg */
1232	    MCLBYTES,			/* maxsize */
1233	    1,				/* nsegments */
1234	    MCLBYTES,			/* maxsegsize */
1235	    0,				/* flags */
1236	    NULL, NULL,			/* lockfunc, lockarg */
1237	    &sc->ste_cdata.ste_rx_tag);
1238	if (error != 0) {
1239		device_printf(sc->ste_dev, "could not create Rx DMA tag.\n");
1240		goto fail;
1241	}
1242
1243	/* Allocate DMA'able memory and load the DMA map for Tx list. */
1244	error = bus_dmamem_alloc(sc->ste_cdata.ste_tx_list_tag,
1245	    (void **)&sc->ste_ldata.ste_tx_list,
1246	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1247	    &sc->ste_cdata.ste_tx_list_map);
1248	if (error != 0) {
1249		device_printf(sc->ste_dev,
1250		    "could not allocate DMA'able memory for Tx list.\n");
1251		goto fail;
1252	}
1253	ctx.ste_busaddr = 0;
1254	error = bus_dmamap_load(sc->ste_cdata.ste_tx_list_tag,
1255	    sc->ste_cdata.ste_tx_list_map, sc->ste_ldata.ste_tx_list,
1256	    STE_TX_LIST_SZ, ste_dmamap_cb, &ctx, 0);
1257	if (error != 0 || ctx.ste_busaddr == 0) {
1258		device_printf(sc->ste_dev,
1259		    "could not load DMA'able memory for Tx list.\n");
1260		goto fail;
1261	}
1262	sc->ste_ldata.ste_tx_list_paddr = ctx.ste_busaddr;
1263
1264	/* Allocate DMA'able memory and load the DMA map for Rx list. */
1265	error = bus_dmamem_alloc(sc->ste_cdata.ste_rx_list_tag,
1266	    (void **)&sc->ste_ldata.ste_rx_list,
1267	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1268	    &sc->ste_cdata.ste_rx_list_map);
1269	if (error != 0) {
1270		device_printf(sc->ste_dev,
1271		    "could not allocate DMA'able memory for Rx list.\n");
1272		goto fail;
1273	}
1274	ctx.ste_busaddr = 0;
1275	error = bus_dmamap_load(sc->ste_cdata.ste_rx_list_tag,
1276	    sc->ste_cdata.ste_rx_list_map, sc->ste_ldata.ste_rx_list,
1277	    STE_RX_LIST_SZ, ste_dmamap_cb, &ctx, 0);
1278	if (error != 0 || ctx.ste_busaddr == 0) {
1279		device_printf(sc->ste_dev,
1280		    "could not load DMA'able memory for Rx list.\n");
1281		goto fail;
1282	}
1283	sc->ste_ldata.ste_rx_list_paddr = ctx.ste_busaddr;
1284
1285	/* Create DMA maps for Tx buffers. */
1286	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1287		txc = &sc->ste_cdata.ste_tx_chain[i];
1288		txc->ste_ptr = NULL;
1289		txc->ste_mbuf = NULL;
1290		txc->ste_next = NULL;
1291		txc->ste_phys = 0;
1292		txc->ste_map = NULL;
1293		error = bus_dmamap_create(sc->ste_cdata.ste_tx_tag, 0,
1294		    &txc->ste_map);
1295		if (error != 0) {
1296			device_printf(sc->ste_dev,
1297			    "could not create Tx dmamap.\n");
1298			goto fail;
1299		}
1300	}
1301	/* Create DMA maps for Rx buffers. */
1302	if ((error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0,
1303	    &sc->ste_cdata.ste_rx_sparemap)) != 0) {
1304		device_printf(sc->ste_dev,
1305		    "could not create spare Rx dmamap.\n");
1306		goto fail;
1307	}
1308	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1309		rxc = &sc->ste_cdata.ste_rx_chain[i];
1310		rxc->ste_ptr = NULL;
1311		rxc->ste_mbuf = NULL;
1312		rxc->ste_next = NULL;
1313		rxc->ste_map = NULL;
1314		error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0,
1315		    &rxc->ste_map);
1316		if (error != 0) {
1317			device_printf(sc->ste_dev,
1318			    "could not create Rx dmamap.\n");
1319			goto fail;
1320		}
1321	}
1322
1323fail:
1324	return (error);
1325}
1326
1327static void
1328ste_dma_free(struct ste_softc *sc)
1329{
1330	struct ste_chain *txc;
1331	struct ste_chain_onefrag *rxc;
1332	int i;
1333
1334	/* Tx buffers. */
1335	if (sc->ste_cdata.ste_tx_tag != NULL) {
1336		for (i = 0; i < STE_TX_LIST_CNT; i++) {
1337			txc = &sc->ste_cdata.ste_tx_chain[i];
1338			if (txc->ste_map != NULL) {
1339				bus_dmamap_destroy(sc->ste_cdata.ste_tx_tag,
1340				    txc->ste_map);
1341				txc->ste_map = NULL;
1342			}
1343		}
1344		bus_dma_tag_destroy(sc->ste_cdata.ste_tx_tag);
1345		sc->ste_cdata.ste_tx_tag = NULL;
1346	}
1347	/* Rx buffers. */
1348	if (sc->ste_cdata.ste_rx_tag != NULL) {
1349		for (i = 0; i < STE_RX_LIST_CNT; i++) {
1350			rxc = &sc->ste_cdata.ste_rx_chain[i];
1351			if (rxc->ste_map != NULL) {
1352				bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag,
1353				    rxc->ste_map);
1354				rxc->ste_map = NULL;
1355			}
1356		}
1357		if (sc->ste_cdata.ste_rx_sparemap != NULL) {
1358			bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag,
1359			    sc->ste_cdata.ste_rx_sparemap);
1360			sc->ste_cdata.ste_rx_sparemap = NULL;
1361		}
1362		bus_dma_tag_destroy(sc->ste_cdata.ste_rx_tag);
1363		sc->ste_cdata.ste_rx_tag = NULL;
1364	}
1365	/* Tx descriptor list. */
1366	if (sc->ste_cdata.ste_tx_list_tag != NULL) {
1367		if (sc->ste_cdata.ste_tx_list_map != NULL)
1368			bus_dmamap_unload(sc->ste_cdata.ste_tx_list_tag,
1369			    sc->ste_cdata.ste_tx_list_map);
1370		if (sc->ste_cdata.ste_tx_list_map != NULL &&
1371		    sc->ste_ldata.ste_tx_list != NULL)
1372			bus_dmamem_free(sc->ste_cdata.ste_tx_list_tag,
1373			    sc->ste_ldata.ste_tx_list,
1374			    sc->ste_cdata.ste_tx_list_map);
1375		sc->ste_ldata.ste_tx_list = NULL;
1376		sc->ste_cdata.ste_tx_list_map = NULL;
1377		bus_dma_tag_destroy(sc->ste_cdata.ste_tx_list_tag);
1378		sc->ste_cdata.ste_tx_list_tag = NULL;
1379	}
1380	/* Rx descriptor list. */
1381	if (sc->ste_cdata.ste_rx_list_tag != NULL) {
1382		if (sc->ste_cdata.ste_rx_list_map != NULL)
1383			bus_dmamap_unload(sc->ste_cdata.ste_rx_list_tag,
1384			    sc->ste_cdata.ste_rx_list_map);
1385		if (sc->ste_cdata.ste_rx_list_map != NULL &&
1386		    sc->ste_ldata.ste_rx_list != NULL)
1387			bus_dmamem_free(sc->ste_cdata.ste_rx_list_tag,
1388			    sc->ste_ldata.ste_rx_list,
1389			    sc->ste_cdata.ste_rx_list_map);
1390		sc->ste_ldata.ste_rx_list = NULL;
1391		sc->ste_cdata.ste_rx_list_map = NULL;
1392		bus_dma_tag_destroy(sc->ste_cdata.ste_rx_list_tag);
1393		sc->ste_cdata.ste_rx_list_tag = NULL;
1394	}
1395	if (sc->ste_cdata.ste_parent_tag != NULL) {
1396		bus_dma_tag_destroy(sc->ste_cdata.ste_parent_tag);
1397		sc->ste_cdata.ste_parent_tag = NULL;
1398	}
1399}
1400
1401static int
1402ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *rxc)
1403{
1404	struct mbuf *m;
1405	bus_dma_segment_t segs[1];
1406	bus_dmamap_t map;
1407	int error, nsegs;
1408
1409	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1410	if (m == NULL)
1411		return (ENOBUFS);
1412	m->m_len = m->m_pkthdr.len = MCLBYTES;
1413	m_adj(m, ETHER_ALIGN);
1414
1415	if ((error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_rx_tag,
1416	    sc->ste_cdata.ste_rx_sparemap, m, segs, &nsegs, 0)) != 0) {
1417		m_freem(m);
1418		return (error);
1419	}
1420	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1421
1422	if (rxc->ste_mbuf != NULL) {
1423		bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map,
1424		    BUS_DMASYNC_POSTREAD);
1425		bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, rxc->ste_map);
1426	}
1427	map = rxc->ste_map;
1428	rxc->ste_map = sc->ste_cdata.ste_rx_sparemap;
1429	sc->ste_cdata.ste_rx_sparemap = map;
1430	bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map,
1431	    BUS_DMASYNC_PREREAD);
1432	rxc->ste_mbuf = m;
1433	rxc->ste_ptr->ste_status = 0;
1434	rxc->ste_ptr->ste_frag.ste_addr = htole32(segs[0].ds_addr);
1435	rxc->ste_ptr->ste_frag.ste_len = htole32(segs[0].ds_len |
1436	    STE_FRAG_LAST);
1437	return (0);
1438}
1439
1440static int
1441ste_init_rx_list(struct ste_softc *sc)
1442{
1443	struct ste_chain_data *cd;
1444	struct ste_list_data *ld;
1445	int error, i;
1446
1447	cd = &sc->ste_cdata;
1448	ld = &sc->ste_ldata;
1449	bzero(ld->ste_rx_list, STE_RX_LIST_SZ);
1450	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1451		cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
1452		error = ste_newbuf(sc, &cd->ste_rx_chain[i]);
1453		if (error != 0)
1454			return (error);
1455		if (i == (STE_RX_LIST_CNT - 1)) {
1456			cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0];
1457			ld->ste_rx_list[i].ste_next = ld->ste_rx_list_paddr +
1458			    (sizeof(struct ste_desc_onefrag) * 0);
1459		} else {
1460			cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1];
1461			ld->ste_rx_list[i].ste_next = ld->ste_rx_list_paddr +
1462			    (sizeof(struct ste_desc_onefrag) * (i + 1));
1463		}
1464	}
1465
1466	cd->ste_rx_head = &cd->ste_rx_chain[0];
1467	bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
1468	    sc->ste_cdata.ste_rx_list_map,
1469	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1470
1471	return (0);
1472}
1473
1474static void
1475ste_init_tx_list(struct ste_softc *sc)
1476{
1477	struct ste_chain_data *cd;
1478	struct ste_list_data *ld;
1479	int i;
1480
1481	cd = &sc->ste_cdata;
1482	ld = &sc->ste_ldata;
1483	bzero(ld->ste_tx_list, STE_TX_LIST_SZ);
1484	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1485		cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1486		cd->ste_tx_chain[i].ste_mbuf = NULL;
1487		if (i == (STE_TX_LIST_CNT - 1)) {
1488			cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0];
1489			cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO(
1490			    ld->ste_tx_list_paddr +
1491			    (sizeof(struct ste_desc) * 0)));
1492		} else {
1493			cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1];
1494			cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO(
1495			    ld->ste_tx_list_paddr +
1496			    (sizeof(struct ste_desc) * (i + 1))));
1497		}
1498	}
1499
1500	cd->ste_last_tx = NULL;
1501	cd->ste_tx_prod = 0;
1502	cd->ste_tx_cons = 0;
1503	cd->ste_tx_cnt = 0;
1504
1505	bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
1506	    sc->ste_cdata.ste_tx_list_map,
1507	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1508}
1509
1510static void
1511ste_init(void *xsc)
1512{
1513	struct ste_softc *sc;
1514
1515	sc = xsc;
1516	STE_LOCK(sc);
1517	ste_init_locked(sc);
1518	STE_UNLOCK(sc);
1519}
1520
1521static void
1522ste_init_locked(struct ste_softc *sc)
1523{
1524	struct ifnet *ifp;
1525	int i;
1526
1527	STE_LOCK_ASSERT(sc);
1528	ifp = sc->ste_ifp;
1529
1530	ste_stop(sc);
1531
1532	/* Init our MAC address */
1533	for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1534		CSR_WRITE_2(sc, STE_PAR0 + i,
1535		    ((IF_LLADDR(sc->ste_ifp)[i] & 0xff) |
1536		     IF_LLADDR(sc->ste_ifp)[i + 1] << 8));
1537	}
1538
1539	/* Init RX list */
1540	if (ste_init_rx_list(sc) != 0) {
1541		device_printf(sc->ste_dev,
1542		    "initialization failed: no memory for RX buffers\n");
1543		ste_stop(sc);
1544		return;
1545	}
1546
1547	/* Set RX polling interval */
1548	CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
1549
1550	/* Init TX descriptors */
1551	ste_init_tx_list(sc);
1552
1553	/* Set the TX freethresh value */
1554	CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8);
1555
1556	/* Set the TX start threshold for best performance. */
1557	CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1558
1559	/* Set the TX reclaim threshold. */
1560	CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4));
1561
1562	/* Set up the RX filter. */
1563	CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST);
1564
1565	/* If we want promiscuous mode, set the allframes bit. */
1566	if (ifp->if_flags & IFF_PROMISC) {
1567		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC);
1568	} else {
1569		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC);
1570	}
1571
1572	/* Set capture broadcast bit to accept broadcast frames. */
1573	if (ifp->if_flags & IFF_BROADCAST) {
1574		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST);
1575	} else {
1576		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST);
1577	}
1578
1579	ste_setmulti(sc);
1580
1581	/* Load the address of the RX list. */
1582	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1583	ste_wait(sc);
1584	CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1585	    STE_ADDR_LO(sc->ste_ldata.ste_rx_list_paddr));
1586	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1587	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1588
1589	/* Set TX polling interval(defer until we TX first packet). */
1590	CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1591
1592	/* Load address of the TX list */
1593	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1594	ste_wait(sc);
1595	CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1596	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1597	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1598	ste_wait(sc);
1599
1600	/* Enable receiver and transmitter */
1601	CSR_WRITE_2(sc, STE_MACCTL0, 0);
1602	CSR_WRITE_2(sc, STE_MACCTL1, 0);
1603	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1604	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1605
1606	/* Enable stats counters. */
1607	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1608
1609	CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1610#ifdef DEVICE_POLLING
1611	/* Disable interrupts if we are polling. */
1612	if (ifp->if_capenable & IFCAP_POLLING)
1613		CSR_WRITE_2(sc, STE_IMR, 0);
1614	else
1615#endif
1616	/* Enable interrupts. */
1617	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1618
1619	/* Accept VLAN length packets */
1620	CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1621
1622	ste_ifmedia_upd_locked(ifp);
1623
1624	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1625	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1626
1627	callout_reset(&sc->ste_callout, hz, ste_tick, sc);
1628}
1629
1630static void
1631ste_stop(struct ste_softc *sc)
1632{
1633	struct ifnet *ifp;
1634	struct ste_chain_onefrag *cur_rx;
1635	struct ste_chain *cur_tx;
1636	int i;
1637
1638	STE_LOCK_ASSERT(sc);
1639	ifp = sc->ste_ifp;
1640
1641	callout_stop(&sc->ste_callout);
1642	sc->ste_timer = 0;
1643	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
1644
1645	CSR_WRITE_2(sc, STE_IMR, 0);
1646	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1647	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1648	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1649	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1650	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1651	ste_wait(sc);
1652	/*
1653	 * Try really hard to stop the RX engine or under heavy RX
1654	 * data chip will write into de-allocated memory.
1655	 */
1656	ste_reset(sc);
1657
1658	sc->ste_flags &= ~STE_FLAG_LINK;
1659
1660	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1661		cur_rx = &sc->ste_cdata.ste_rx_chain[i];
1662		if (cur_rx->ste_mbuf != NULL) {
1663			bus_dmamap_sync(sc->ste_cdata.ste_rx_tag,
1664			    cur_rx->ste_map, BUS_DMASYNC_POSTREAD);
1665			bus_dmamap_unload(sc->ste_cdata.ste_rx_tag,
1666			    cur_rx->ste_map);
1667			m_freem(cur_rx->ste_mbuf);
1668			cur_rx->ste_mbuf = NULL;
1669		}
1670	}
1671
1672	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1673		cur_tx = &sc->ste_cdata.ste_tx_chain[i];
1674		if (cur_tx->ste_mbuf != NULL) {
1675			bus_dmamap_sync(sc->ste_cdata.ste_tx_tag,
1676			    cur_tx->ste_map, BUS_DMASYNC_POSTWRITE);
1677			bus_dmamap_unload(sc->ste_cdata.ste_tx_tag,
1678			    cur_tx->ste_map);
1679			m_freem(cur_tx->ste_mbuf);
1680			cur_tx->ste_mbuf = NULL;
1681		}
1682	}
1683}
1684
1685static void
1686ste_reset(struct ste_softc *sc)
1687{
1688	int i;
1689
1690	STE_SETBIT4(sc, STE_ASICCTL,
1691	    STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1692	    STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1693	    STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1694	    STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1695	    STE_ASICCTL_EXTRESET_RESET);
1696
1697	DELAY(100000);
1698
1699	for (i = 0; i < STE_TIMEOUT; i++) {
1700		if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1701			break;
1702	}
1703
1704	if (i == STE_TIMEOUT)
1705		device_printf(sc->ste_dev, "global reset never completed\n");
1706}
1707
1708static int
1709ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1710{
1711	struct ste_softc *sc;
1712	struct ifreq *ifr;
1713	struct mii_data *mii;
1714	int error = 0;
1715
1716	sc = ifp->if_softc;
1717	ifr = (struct ifreq *)data;
1718
1719	switch (command) {
1720	case SIOCSIFFLAGS:
1721		STE_LOCK(sc);
1722		if (ifp->if_flags & IFF_UP) {
1723			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1724			    ifp->if_flags & IFF_PROMISC &&
1725			    !(sc->ste_if_flags & IFF_PROMISC)) {
1726				STE_SETBIT1(sc, STE_RX_MODE,
1727				    STE_RXMODE_PROMISC);
1728			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1729			    !(ifp->if_flags & IFF_PROMISC) &&
1730			    sc->ste_if_flags & IFF_PROMISC) {
1731				STE_CLRBIT1(sc, STE_RX_MODE,
1732				    STE_RXMODE_PROMISC);
1733			}
1734			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1735			    (ifp->if_flags ^ sc->ste_if_flags) & IFF_ALLMULTI)
1736				ste_setmulti(sc);
1737			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1738				sc->ste_tx_thresh = STE_TXSTART_THRESH;
1739				ste_init_locked(sc);
1740			}
1741		} else {
1742			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1743				ste_stop(sc);
1744		}
1745		sc->ste_if_flags = ifp->if_flags;
1746		STE_UNLOCK(sc);
1747		error = 0;
1748		break;
1749	case SIOCADDMULTI:
1750	case SIOCDELMULTI:
1751		STE_LOCK(sc);
1752		ste_setmulti(sc);
1753		STE_UNLOCK(sc);
1754		error = 0;
1755		break;
1756	case SIOCGIFMEDIA:
1757	case SIOCSIFMEDIA:
1758		mii = device_get_softc(sc->ste_miibus);
1759		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1760		break;
1761	case SIOCSIFCAP:
1762#ifdef DEVICE_POLLING
1763		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1764		    !(ifp->if_capenable & IFCAP_POLLING)) {
1765			error = ether_poll_register(ste_poll, ifp);
1766			if (error)
1767				return (error);
1768			STE_LOCK(sc);
1769			/* Disable interrupts */
1770			CSR_WRITE_2(sc, STE_IMR, 0);
1771			ifp->if_capenable |= IFCAP_POLLING;
1772			STE_UNLOCK(sc);
1773			return (error);
1774
1775		}
1776		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1777		    ifp->if_capenable & IFCAP_POLLING) {
1778			error = ether_poll_deregister(ifp);
1779			/* Enable interrupts. */
1780			STE_LOCK(sc);
1781			CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1782			ifp->if_capenable &= ~IFCAP_POLLING;
1783			STE_UNLOCK(sc);
1784			return (error);
1785		}
1786#endif /* DEVICE_POLLING */
1787		break;
1788	default:
1789		error = ether_ioctl(ifp, command, data);
1790		break;
1791	}
1792
1793	return (error);
1794}
1795
1796static int
1797ste_encap(struct ste_softc *sc, struct mbuf **m_head, struct ste_chain *txc)
1798{
1799	struct ste_frag *frag;
1800	struct mbuf *m;
1801	struct ste_desc *desc;
1802	bus_dma_segment_t txsegs[STE_MAXFRAGS];
1803	int error, i, nsegs;
1804
1805	STE_LOCK_ASSERT(sc);
1806	M_ASSERTPKTHDR((*m_head));
1807
1808	error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag,
1809	    txc->ste_map, *m_head, txsegs, &nsegs, 0);
1810	if (error == EFBIG) {
1811		m = m_collapse(*m_head, M_DONTWAIT, STE_MAXFRAGS);
1812		if (m == NULL) {
1813			m_freem(*m_head);
1814			*m_head = NULL;
1815			return (ENOMEM);
1816		}
1817		*m_head = m;
1818		error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag,
1819		    txc->ste_map, *m_head, txsegs, &nsegs, 0);
1820		if (error != 0) {
1821			m_freem(*m_head);
1822			*m_head = NULL;
1823			return (error);
1824		}
1825	} else if (error != 0)
1826		return (error);
1827	if (nsegs == 0) {
1828		m_freem(*m_head);
1829		*m_head = NULL;
1830		return (EIO);
1831	}
1832	bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, txc->ste_map,
1833	    BUS_DMASYNC_PREWRITE);
1834
1835	desc = txc->ste_ptr;
1836	for (i = 0; i < nsegs; i++) {
1837		frag = &desc->ste_frags[i];
1838		frag->ste_addr = htole32(STE_ADDR_LO(txsegs[i].ds_addr));
1839		frag->ste_len = htole32(txsegs[i].ds_len);
1840	}
1841	desc->ste_frags[i - 1].ste_len |= htole32(STE_FRAG_LAST);
1842	/*
1843	 * Because we use Tx polling we can't chain multiple
1844	 * Tx descriptors here. Otherwise we race with controller.
1845	 */
1846	desc->ste_next = 0;
1847	desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS | STE_TXCTL_DMAINTR);
1848	txc->ste_mbuf = *m_head;
1849	STE_INC(sc->ste_cdata.ste_tx_prod, STE_TX_LIST_CNT);
1850	sc->ste_cdata.ste_tx_cnt++;
1851
1852	return (0);
1853}
1854
1855static void
1856ste_start(struct ifnet *ifp)
1857{
1858	struct ste_softc *sc;
1859
1860	sc = ifp->if_softc;
1861	STE_LOCK(sc);
1862	ste_start_locked(ifp);
1863	STE_UNLOCK(sc);
1864}
1865
1866static void
1867ste_start_locked(struct ifnet *ifp)
1868{
1869	struct ste_softc *sc;
1870	struct ste_chain *cur_tx;
1871	struct mbuf *m_head = NULL;
1872	int enq;
1873
1874	sc = ifp->if_softc;
1875	STE_LOCK_ASSERT(sc);
1876
1877	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1878	    IFF_DRV_RUNNING || (sc->ste_flags & STE_FLAG_LINK) == 0)
1879		return;
1880
1881	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
1882		if (sc->ste_cdata.ste_tx_cnt == STE_TX_LIST_CNT - 1) {
1883			/*
1884			 * Controller may have cached copy of the last used
1885			 * next ptr so we have to reserve one TFD to avoid
1886			 * TFD overruns.
1887			 */
1888			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1889			break;
1890		}
1891		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1892		if (m_head == NULL)
1893			break;
1894		cur_tx = &sc->ste_cdata.ste_tx_chain[sc->ste_cdata.ste_tx_prod];
1895		if (ste_encap(sc, &m_head, cur_tx) != 0) {
1896			if (m_head == NULL)
1897				break;
1898			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1899			break;
1900		}
1901		if (sc->ste_cdata.ste_last_tx == NULL) {
1902			bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
1903			    sc->ste_cdata.ste_tx_list_map,
1904			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1905			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1906			ste_wait(sc);
1907			CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1908	    		    STE_ADDR_LO(sc->ste_ldata.ste_tx_list_paddr));
1909			CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1910			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1911			ste_wait(sc);
1912		} else {
1913			sc->ste_cdata.ste_last_tx->ste_ptr->ste_next =
1914			    sc->ste_cdata.ste_last_tx->ste_phys;
1915			bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
1916			    sc->ste_cdata.ste_tx_list_map,
1917			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1918		}
1919		sc->ste_cdata.ste_last_tx = cur_tx;
1920
1921		enq++;
1922		/*
1923		 * If there's a BPF listener, bounce a copy of this frame
1924		 * to him.
1925	 	 */
1926		BPF_MTAP(ifp, m_head);
1927	}
1928
1929	if (enq > 0)
1930		sc->ste_timer = STE_TX_TIMEOUT;
1931}
1932
1933static void
1934ste_watchdog(struct ste_softc *sc)
1935{
1936	struct ifnet *ifp;
1937
1938	ifp = sc->ste_ifp;
1939	STE_LOCK_ASSERT(sc);
1940
1941	if (sc->ste_timer == 0 || --sc->ste_timer)
1942		return;
1943
1944	ifp->if_oerrors++;
1945	if_printf(ifp, "watchdog timeout\n");
1946
1947	ste_txeoc(sc);
1948	ste_txeof(sc);
1949	ste_rxeof(sc, -1);
1950	ste_reset(sc);
1951	ste_init_locked(sc);
1952
1953	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1954		ste_start_locked(ifp);
1955}
1956
1957static int
1958ste_shutdown(device_t dev)
1959{
1960	struct ste_softc *sc;
1961
1962	sc = device_get_softc(dev);
1963
1964	STE_LOCK(sc);
1965	ste_stop(sc);
1966	STE_UNLOCK(sc);
1967
1968	return (0);
1969}
1970