if_lge.c revision 113506
1161197Simp/*
2161197Simp * Copyright (c) 2001 Wind River Systems
3161197Simp * Copyright (c) 1997, 1998, 1999, 2000, 2001
4161197Simp *	Bill Paul <william.paul@windriver.com>.  All rights reserved.
5161197Simp *
6161197Simp * Redistribution and use in source and binary forms, with or without
7161197Simp * modification, are permitted provided that the following conditions
8161197Simp * are met:
9161197Simp * 1. Redistributions of source code must retain the above copyright
10161197Simp *    notice, this list of conditions and the following disclaimer.
11161197Simp * 2. Redistributions in binary form must reproduce the above copyright
12161197Simp *    notice, this list of conditions and the following disclaimer in the
13161197Simp *    documentation and/or other materials provided with the distribution.
14161197Simp * 3. All advertising materials mentioning features or use of this software
15161197Simp *    must display the following acknowledgement:
16161197Simp *	This product includes software developed by Bill Paul.
17161197Simp * 4. Neither the name of the author nor the names of any co-contributors
18161197Simp *    may be used to endorse or promote products derived from this software
19161197Simp *    without specific prior written permission.
20161197Simp *
21161197Simp * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22161197Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23161197Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24161197Simp * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25161197Simp * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26161197Simp * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27161197Simp * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28161197Simp * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29161197Simp * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30161197Simp * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31161197Simp * THE POSSIBILITY OF SUCH DAMAGE.
32161197Simp */
33161197Simp
34161197Simp/*
35161197Simp * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public
36161197Simp * documentation not available, but ask me nicely.
37161197Simp *
38161197Simp * The Level 1 chip is used on some D-Link, SMC and Addtron NICs.
39161197Simp * It's a 64-bit PCI part that supports TCP/IP checksum offload,
40161197Simp * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There
41161197Simp * are three supported methods for data transfer between host and
42161197Simp * NIC: programmed I/O, traditional scatter/gather DMA and Packet
43165400Simp * Propulsion Technology (tm) DMA. The latter mechanism is a form
44165400Simp * of double buffer DMA where the packet data is copied to a
45165400Simp * pre-allocated DMA buffer who's physical address has been loaded
46161197Simp * into a table at device initialization time. The rationale is that
47165400Simp * the virtual to physical address translation needed for normal
48161197Simp * scatter/gather DMA is more expensive than the data copy needed
49161197Simp * for double buffering. This may be true in Windows NT and the like,
50161197Simp * but it isn't true for us, at least on the x86 arch. This driver
51161197Simp * uses the scatter/gather I/O method for both TX and RX.
52161197Simp *
53161197Simp * The LXT1001 only supports TCP/IP checksum offload on receive.
54161197Simp * Also, the VLAN tagging is done using a 16-entry table which allows
55161197Simp * the chip to perform hardware filtering based on VLAN tags. Sadly,
56161197Simp * our vlan support doesn't currently play well with this kind of
57161197Simp * hardware support.
58161197Simp *
59 * Special thanks to:
60 * - Jeff James at Intel, for arranging to have the LXT1001 manual
61 *   released (at long last)
62 * - Beny Chen at D-Link, for actually sending it to me
63 * - Brad Short and Keith Alexis at SMC, for sending me sample
64 *   SMC9462SX and SMC9462TX adapters for testing
65 * - Paul Saab at Y!, for not killing me (though it remains to be seen
66 *   if in fact he did me much of a favor)
67 */
68
69#include <sys/cdefs.h>
70__FBSDID("$FreeBSD: head/sys/dev/lge/if_lge.c 113506 2003-04-15 06:37:30Z mdodd $");
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/sockio.h>
75#include <sys/mbuf.h>
76#include <sys/malloc.h>
77#include <sys/kernel.h>
78#include <sys/socket.h>
79
80#include <net/if.h>
81#include <net/if_arp.h>
82#include <net/ethernet.h>
83#include <net/if_dl.h>
84#include <net/if_media.h>
85
86#include <net/bpf.h>
87
88#include <vm/vm.h>              /* for vtophys */
89#include <vm/pmap.h>            /* for vtophys */
90#include <machine/clock.h>      /* for DELAY */
91#include <machine/bus_pio.h>
92#include <machine/bus_memio.h>
93#include <machine/bus.h>
94#include <machine/resource.h>
95#include <sys/bus.h>
96#include <sys/rman.h>
97
98#include <dev/mii/mii.h>
99#include <dev/mii/miivar.h>
100
101#include <pci/pcireg.h>
102#include <pci/pcivar.h>
103
104#define LGE_USEIOSPACE
105
106#include <dev/lge/if_lgereg.h>
107
108/* "controller miibus0" required.  See GENERIC if you get errors here. */
109#include "miibus_if.h"
110
111/*
112 * Various supported device vendors/types and their names.
113 */
114static struct lge_type lge_devs[] = {
115	{ LGE_VENDORID, LGE_DEVICEID, "Level 1 Gigabit Ethernet" },
116	{ 0, 0, NULL }
117};
118
119static int lge_probe(device_t);
120static int lge_attach(device_t);
121static int lge_detach(device_t);
122
123static int lge_alloc_jumbo_mem(struct lge_softc *);
124static void lge_free_jumbo_mem(struct lge_softc *);
125static void *lge_jalloc(struct lge_softc *);
126static void lge_jfree(void *, void *);
127
128static int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, struct mbuf *);
129static int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *);
130static void lge_rxeof(struct lge_softc *, int);
131static void lge_rxeoc(struct lge_softc *);
132static void lge_txeof(struct lge_softc *);
133static void lge_intr(void *);
134static void lge_tick(void *);
135static void lge_start(struct ifnet *);
136static int lge_ioctl(struct ifnet *, u_long, caddr_t);
137static void lge_init(void *);
138static void lge_stop(struct lge_softc *);
139static void lge_watchdog(struct ifnet *);
140static void lge_shutdown(device_t);
141static int lge_ifmedia_upd(struct ifnet *);
142static void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
143
144static void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *);
145static void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int);
146
147static int lge_miibus_readreg(device_t, int, int);
148static int lge_miibus_writereg(device_t, int, int, int);
149static void lge_miibus_statchg(device_t);
150
151static void lge_setmulti(struct lge_softc *);
152static u_int32_t lge_crc(struct lge_softc *, caddr_t);
153static void lge_reset(struct lge_softc *);
154static int lge_list_rx_init(struct lge_softc *);
155static int lge_list_tx_init(struct lge_softc *);
156
157#ifdef LGE_USEIOSPACE
158#define LGE_RES			SYS_RES_IOPORT
159#define LGE_RID			LGE_PCI_LOIO
160#else
161#define LGE_RES			SYS_RES_MEMORY
162#define LGE_RID			LGE_PCI_LOMEM
163#endif
164
165static device_method_t lge_methods[] = {
166	/* Device interface */
167	DEVMETHOD(device_probe,		lge_probe),
168	DEVMETHOD(device_attach,	lge_attach),
169	DEVMETHOD(device_detach,	lge_detach),
170	DEVMETHOD(device_shutdown,	lge_shutdown),
171
172	/* bus interface */
173	DEVMETHOD(bus_print_child,	bus_generic_print_child),
174	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
175
176	/* MII interface */
177	DEVMETHOD(miibus_readreg,	lge_miibus_readreg),
178	DEVMETHOD(miibus_writereg,	lge_miibus_writereg),
179	DEVMETHOD(miibus_statchg,	lge_miibus_statchg),
180
181	{ 0, 0 }
182};
183
184static driver_t lge_driver = {
185	"lge",
186	lge_methods,
187	sizeof(struct lge_softc)
188};
189
190static devclass_t lge_devclass;
191
192DRIVER_MODULE(lge, pci, lge_driver, lge_devclass, 0, 0);
193DRIVER_MODULE(miibus, lge, miibus_driver, miibus_devclass, 0, 0);
194MODULE_DEPEND(lge, pci, 1, 1, 1);
195MODULE_DEPEND(lge, ether, 1, 1, 1);
196MODULE_DEPEND(lge, miibus, 1, 1, 1);
197
198#define LGE_SETBIT(sc, reg, x)				\
199	CSR_WRITE_4(sc, reg,				\
200		CSR_READ_4(sc, reg) | (x))
201
202#define LGE_CLRBIT(sc, reg, x)				\
203	CSR_WRITE_4(sc, reg,				\
204		CSR_READ_4(sc, reg) & ~(x))
205
206#define SIO_SET(x)					\
207	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x)
208
209#define SIO_CLR(x)					\
210	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x)
211
212/*
213 * Read a word of data stored in the EEPROM at address 'addr.'
214 */
215static void
216lge_eeprom_getword(sc, addr, dest)
217	struct lge_softc	*sc;
218	int			addr;
219	u_int16_t		*dest;
220{
221	register int		i;
222	u_int32_t		val;
223
224	CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ|
225	    LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8));
226
227	for (i = 0; i < LGE_TIMEOUT; i++)
228		if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ))
229			break;
230
231	if (i == LGE_TIMEOUT) {
232		printf("lge%d: EEPROM read timed out\n", sc->lge_unit);
233		return;
234	}
235
236	val = CSR_READ_4(sc, LGE_EEDATA);
237
238	if (addr & 1)
239		*dest = (val >> 16) & 0xFFFF;
240	else
241		*dest = val & 0xFFFF;
242
243	return;
244}
245
246/*
247 * Read a sequence of words from the EEPROM.
248 */
249static void
250lge_read_eeprom(sc, dest, off, cnt, swap)
251	struct lge_softc	*sc;
252	caddr_t			dest;
253	int			off;
254	int			cnt;
255	int			swap;
256{
257	int			i;
258	u_int16_t		word = 0, *ptr;
259
260	for (i = 0; i < cnt; i++) {
261		lge_eeprom_getword(sc, off + i, &word);
262		ptr = (u_int16_t *)(dest + (i * 2));
263		if (swap)
264			*ptr = ntohs(word);
265		else
266			*ptr = word;
267	}
268
269	return;
270}
271
272static int
273lge_miibus_readreg(dev, phy, reg)
274	device_t		dev;
275	int			phy, reg;
276{
277	struct lge_softc	*sc;
278	int			i;
279
280	sc = device_get_softc(dev);
281
282	/*
283	 * If we have a non-PCS PHY, pretend that the internal
284	 * autoneg stuff at PHY address 0 isn't there so that
285	 * the miibus code will find only the GMII PHY.
286	 */
287	if (sc->lge_pcs == 0 && phy == 0)
288		return(0);
289
290	CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ);
291
292	for (i = 0; i < LGE_TIMEOUT; i++)
293		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
294			break;
295
296	if (i == LGE_TIMEOUT) {
297		printf("lge%d: PHY read timed out\n", sc->lge_unit);
298		return(0);
299	}
300
301	return(CSR_READ_4(sc, LGE_GMIICTL) >> 16);
302}
303
304static int
305lge_miibus_writereg(dev, phy, reg, data)
306	device_t		dev;
307	int			phy, reg, data;
308{
309	struct lge_softc	*sc;
310	int			i;
311
312	sc = device_get_softc(dev);
313
314	CSR_WRITE_4(sc, LGE_GMIICTL,
315	    (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE);
316
317	for (i = 0; i < LGE_TIMEOUT; i++)
318		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
319			break;
320
321	if (i == LGE_TIMEOUT) {
322		printf("lge%d: PHY write timed out\n", sc->lge_unit);
323		return(0);
324	}
325
326	return(0);
327}
328
329static void
330lge_miibus_statchg(dev)
331	device_t		dev;
332{
333	struct lge_softc	*sc;
334	struct mii_data		*mii;
335
336	sc = device_get_softc(dev);
337	mii = device_get_softc(sc->lge_miibus);
338
339	LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED);
340	switch (IFM_SUBTYPE(mii->mii_media_active)) {
341	case IFM_1000_T:
342	case IFM_1000_SX:
343		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
344		break;
345	case IFM_100_TX:
346		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100);
347		break;
348	case IFM_10_T:
349		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10);
350		break;
351	default:
352		/*
353		 * Choose something, even if it's wrong. Clearing
354		 * all the bits will hose autoneg on the internal
355		 * PHY.
356		 */
357		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
358		break;
359	}
360
361	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
362		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
363	} else {
364		LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
365	}
366
367	return;
368}
369
370static u_int32_t
371lge_crc(sc, addr)
372	struct lge_softc	*sc;
373	caddr_t			addr;
374{
375	u_int32_t		crc, carry;
376	int			i, j;
377	u_int8_t		c;
378
379	/* Compute CRC for the address value. */
380	crc = 0xFFFFFFFF; /* initial value */
381
382	for (i = 0; i < 6; i++) {
383		c = *(addr + i);
384		for (j = 0; j < 8; j++) {
385			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
386			crc <<= 1;
387			c >>= 1;
388			if (carry)
389				crc = (crc ^ 0x04c11db6) | carry;
390		}
391	}
392
393	/*
394	 * return the filter bit position
395	 */
396	return((crc >> 26) & 0x0000003F);
397}
398
399static void
400lge_setmulti(sc)
401	struct lge_softc	*sc;
402{
403	struct ifnet		*ifp;
404	struct ifmultiaddr	*ifma;
405	u_int32_t		h = 0, hashes[2] = { 0, 0 };
406
407	ifp = &sc->arpcom.ac_if;
408
409	/* Make sure multicast hash table is enabled. */
410	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST);
411
412	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
413		CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF);
414		CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF);
415		return;
416	}
417
418	/* first, zot all the existing hash bits */
419	CSR_WRITE_4(sc, LGE_MAR0, 0);
420	CSR_WRITE_4(sc, LGE_MAR1, 0);
421
422	/* now program new ones */
423	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
424		if (ifma->ifma_addr->sa_family != AF_LINK)
425			continue;
426		h = lge_crc(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
427		if (h < 32)
428			hashes[0] |= (1 << h);
429		else
430			hashes[1] |= (1 << (h - 32));
431	}
432
433	CSR_WRITE_4(sc, LGE_MAR0, hashes[0]);
434	CSR_WRITE_4(sc, LGE_MAR1, hashes[1]);
435
436	return;
437}
438
439static void
440lge_reset(sc)
441	struct lge_softc	*sc;
442{
443	register int		i;
444
445	LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST);
446
447	for (i = 0; i < LGE_TIMEOUT; i++) {
448		if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST))
449			break;
450	}
451
452	if (i == LGE_TIMEOUT)
453		printf("lge%d: reset never completed\n", sc->lge_unit);
454
455	/* Wait a little while for the chip to get its brains in order. */
456	DELAY(1000);
457
458        return;
459}
460
461/*
462 * Probe for a Level 1 chip. Check the PCI vendor and device
463 * IDs against our list and return a device name if we find a match.
464 */
465static int
466lge_probe(dev)
467	device_t		dev;
468{
469	struct lge_type		*t;
470
471	t = lge_devs;
472
473	while(t->lge_name != NULL) {
474		if ((pci_get_vendor(dev) == t->lge_vid) &&
475		    (pci_get_device(dev) == t->lge_did)) {
476			device_set_desc(dev, t->lge_name);
477			return(0);
478		}
479		t++;
480	}
481
482	return(ENXIO);
483}
484
485/*
486 * Attach the interface. Allocate softc structures, do ifmedia
487 * setup and ethernet/BPF attach.
488 */
489static int
490lge_attach(dev)
491	device_t		dev;
492{
493	int			s;
494	u_char			eaddr[ETHER_ADDR_LEN];
495	u_int32_t		command;
496	struct lge_softc	*sc;
497	struct ifnet		*ifp;
498	int			unit, error = 0, rid;
499
500	s = splimp();
501
502	sc = device_get_softc(dev);
503	unit = device_get_unit(dev);
504	bzero(sc, sizeof(struct lge_softc));
505
506	/*
507	 * Handle power management nonsense.
508	 */
509	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
510		u_int32_t		iobase, membase, irq;
511
512		/* Save important PCI config data. */
513		iobase = pci_read_config(dev, LGE_PCI_LOIO, 4);
514		membase = pci_read_config(dev, LGE_PCI_LOMEM, 4);
515		irq = pci_read_config(dev, LGE_PCI_INTLINE, 4);
516
517		/* Reset the power state. */
518		printf("lge%d: chip is in D%d power mode "
519		    "-- setting to D0\n", unit,
520		    pci_get_powerstate(dev));
521		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
522
523		/* Restore PCI config data. */
524		pci_write_config(dev, LGE_PCI_LOIO, iobase, 4);
525		pci_write_config(dev, LGE_PCI_LOMEM, membase, 4);
526		pci_write_config(dev, LGE_PCI_INTLINE, irq, 4);
527	}
528
529	/*
530	 * Map control/status registers.
531	 */
532	pci_enable_busmaster(dev);
533	pci_enable_io(dev, SYS_RES_IOPORT);
534	pci_enable_io(dev, SYS_RES_MEMORY);
535	command = pci_read_config(dev, PCIR_COMMAND, 4);
536
537#ifdef LGE_USEIOSPACE
538	if (!(command & PCIM_CMD_PORTEN)) {
539		printf("lge%d: failed to enable I/O ports!\n", unit);
540		error = ENXIO;;
541		goto fail;
542	}
543#else
544	if (!(command & PCIM_CMD_MEMEN)) {
545		printf("lge%d: failed to enable memory mapping!\n", unit);
546		error = ENXIO;;
547		goto fail;
548	}
549#endif
550
551	rid = LGE_RID;
552	sc->lge_res = bus_alloc_resource(dev, LGE_RES, &rid,
553	    0, ~0, 1, RF_ACTIVE);
554
555	if (sc->lge_res == NULL) {
556		printf("lge%d: couldn't map ports/memory\n", unit);
557		error = ENXIO;
558		goto fail;
559	}
560
561	sc->lge_btag = rman_get_bustag(sc->lge_res);
562	sc->lge_bhandle = rman_get_bushandle(sc->lge_res);
563
564	/* Allocate interrupt */
565	rid = 0;
566	sc->lge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
567	    RF_SHAREABLE | RF_ACTIVE);
568
569	if (sc->lge_irq == NULL) {
570		printf("lge%d: couldn't map interrupt\n", unit);
571		bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
572		error = ENXIO;
573		goto fail;
574	}
575
576	error = bus_setup_intr(dev, sc->lge_irq, INTR_TYPE_NET,
577	    lge_intr, sc, &sc->lge_intrhand);
578
579	if (error) {
580		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
581		bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
582		printf("lge%d: couldn't set up irq\n", unit);
583		goto fail;
584	}
585
586	/* Reset the adapter. */
587	lge_reset(sc);
588
589	/*
590	 * Get station address from the EEPROM.
591	 */
592	lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0);
593	lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0);
594	lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0);
595
596	/*
597	 * A Level 1 chip was detected. Inform the world.
598	 */
599	printf("lge%d: Ethernet address: %6D\n", unit, eaddr, ":");
600
601	sc->lge_unit = unit;
602	callout_handle_init(&sc->lge_stat_ch);
603	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
604
605	sc->lge_ldata = contigmalloc(sizeof(struct lge_list_data), M_DEVBUF,
606	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
607
608	if (sc->lge_ldata == NULL) {
609		printf("lge%d: no memory for list buffers!\n", unit);
610		bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand);
611		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
612		bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
613		error = ENXIO;
614		goto fail;
615	}
616	bzero(sc->lge_ldata, sizeof(struct lge_list_data));
617
618	/* Try to allocate memory for jumbo buffers. */
619	if (lge_alloc_jumbo_mem(sc)) {
620		printf("lge%d: jumbo buffer allocation failed\n",
621                    sc->lge_unit);
622		contigfree(sc->lge_ldata,
623		    sizeof(struct lge_list_data), M_DEVBUF);
624		bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand);
625		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
626		bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
627		error = ENXIO;
628		goto fail;
629	}
630
631	ifp = &sc->arpcom.ac_if;
632	ifp->if_softc = sc;
633	ifp->if_unit = unit;
634	ifp->if_name = "lge";
635	ifp->if_mtu = ETHERMTU;
636	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
637	ifp->if_ioctl = lge_ioctl;
638	ifp->if_output = ether_output;
639	ifp->if_start = lge_start;
640	ifp->if_watchdog = lge_watchdog;
641	ifp->if_init = lge_init;
642	ifp->if_baudrate = 1000000000;
643	ifp->if_snd.ifq_maxlen = LGE_TX_LIST_CNT - 1;
644	ifp->if_capabilities = IFCAP_RXCSUM;
645	ifp->if_capenable = ifp->if_capabilities;
646
647	if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH)
648		sc->lge_pcs = 1;
649	else
650		sc->lge_pcs = 0;
651
652	/*
653	 * Do MII setup.
654	 */
655	if (mii_phy_probe(dev, &sc->lge_miibus,
656	    lge_ifmedia_upd, lge_ifmedia_sts)) {
657		printf("lge%d: MII without any PHY!\n", sc->lge_unit);
658		contigfree(sc->lge_ldata,
659		    sizeof(struct lge_list_data), M_DEVBUF);
660		lge_free_jumbo_mem(sc);
661		bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand);
662		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
663		bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
664		error = ENXIO;
665		goto fail;
666	}
667
668	/*
669	 * Call MI attach routine.
670	 */
671	ether_ifattach(ifp, eaddr);
672	callout_handle_init(&sc->lge_stat_ch);
673
674fail:
675	splx(s);
676	return(error);
677}
678
679static int
680lge_detach(dev)
681	device_t		dev;
682{
683	struct lge_softc	*sc;
684	struct ifnet		*ifp;
685	int			s;
686
687	s = splimp();
688
689	sc = device_get_softc(dev);
690	ifp = &sc->arpcom.ac_if;
691
692	lge_reset(sc);
693	lge_stop(sc);
694	ether_ifdetach(ifp);
695
696	bus_generic_detach(dev);
697	device_delete_child(dev, sc->lge_miibus);
698
699	bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand);
700	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
701	bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
702
703	contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF);
704	lge_free_jumbo_mem(sc);
705
706	splx(s);
707
708	return(0);
709}
710
711/*
712 * Initialize the transmit descriptors.
713 */
714static int
715lge_list_tx_init(sc)
716	struct lge_softc	*sc;
717{
718	struct lge_list_data	*ld;
719	struct lge_ring_data	*cd;
720	int			i;
721
722	cd = &sc->lge_cdata;
723	ld = sc->lge_ldata;
724	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
725		ld->lge_tx_list[i].lge_mbuf = NULL;
726		ld->lge_tx_list[i].lge_ctl = 0;
727	}
728
729	cd->lge_tx_prod = cd->lge_tx_cons = 0;
730
731	return(0);
732}
733
734
735/*
736 * Initialize the RX descriptors and allocate mbufs for them. Note that
737 * we arralge the descriptors in a closed ring, so that the last descriptor
738 * points back to the first.
739 */
740static int
741lge_list_rx_init(sc)
742	struct lge_softc	*sc;
743{
744	struct lge_list_data	*ld;
745	struct lge_ring_data	*cd;
746	int			i;
747
748	ld = sc->lge_ldata;
749	cd = &sc->lge_cdata;
750
751	cd->lge_rx_prod = cd->lge_rx_cons = 0;
752
753	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
754
755	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
756		if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0)
757			break;
758		if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS)
759			return(ENOBUFS);
760	}
761
762	/* Clear possible 'rx command queue empty' interrupt. */
763	CSR_READ_4(sc, LGE_ISR);
764
765	return(0);
766}
767
768/*
769 * Initialize an RX descriptor and attach an MBUF cluster.
770 */
771static int
772lge_newbuf(sc, c, m)
773	struct lge_softc	*sc;
774	struct lge_rx_desc	*c;
775	struct mbuf		*m;
776{
777	struct mbuf		*m_new = NULL;
778	caddr_t			*buf = NULL;
779
780	if (m == NULL) {
781		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
782		if (m_new == NULL) {
783			printf("lge%d: no memory for rx list "
784			    "-- packet dropped!\n", sc->lge_unit);
785			return(ENOBUFS);
786		}
787
788		/* Allocate the jumbo buffer */
789		buf = lge_jalloc(sc);
790		if (buf == NULL) {
791#ifdef LGE_VERBOSE
792			printf("lge%d: jumbo allocation failed "
793			    "-- packet dropped!\n", sc->lge_unit);
794#endif
795			m_freem(m_new);
796			return(ENOBUFS);
797		}
798		/* Attach the buffer to the mbuf */
799		m_new->m_data = (void *)buf;
800		m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN;
801		MEXTADD(m_new, buf, LGE_JUMBO_FRAMELEN, lge_jfree,
802		    (struct lge_softc *)sc, 0, EXT_NET_DRV);
803	} else {
804		m_new = m;
805		m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN;
806		m_new->m_data = m_new->m_ext.ext_buf;
807	}
808
809	/*
810	 * Adjust alignment so packet payload begins on a
811	 * longword boundary. Mandatory for Alpha, useful on
812	 * x86 too.
813	*/
814	m_adj(m_new, ETHER_ALIGN);
815
816	c->lge_mbuf = m_new;
817	c->lge_fragptr_hi = 0;
818	c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t));
819	c->lge_fraglen = m_new->m_len;
820	c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1);
821	c->lge_sts = 0;
822
823	/*
824	 * Put this buffer in the RX command FIFO. To do this,
825	 * we just write the physical address of the descriptor
826	 * into the RX descriptor address registers. Note that
827	 * there are two registers, one high DWORD and one low
828	 * DWORD, which lets us specify a 64-bit address if
829	 * desired. We only use a 32-bit address for now.
830	 * Writing to the low DWORD register is what actually
831	 * causes the command to be issued, so we do that
832	 * last.
833	 */
834	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c));
835	LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT);
836
837	return(0);
838}
839
840static int
841lge_alloc_jumbo_mem(sc)
842	struct lge_softc	*sc;
843{
844	caddr_t			ptr;
845	register int		i;
846	struct lge_jpool_entry   *entry;
847
848	/* Grab a big chunk o' storage. */
849	sc->lge_cdata.lge_jumbo_buf = contigmalloc(LGE_JMEM, M_DEVBUF,
850	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
851
852	if (sc->lge_cdata.lge_jumbo_buf == NULL) {
853		printf("lge%d: no memory for jumbo buffers!\n", sc->lge_unit);
854		return(ENOBUFS);
855	}
856
857	SLIST_INIT(&sc->lge_jfree_listhead);
858	SLIST_INIT(&sc->lge_jinuse_listhead);
859
860	/*
861	 * Now divide it up into 9K pieces and save the addresses
862	 * in an array.
863	 */
864	ptr = sc->lge_cdata.lge_jumbo_buf;
865	for (i = 0; i < LGE_JSLOTS; i++) {
866		sc->lge_cdata.lge_jslots[i] = ptr;
867		ptr += LGE_JLEN;
868		entry = malloc(sizeof(struct lge_jpool_entry),
869		    M_DEVBUF, M_NOWAIT);
870		if (entry == NULL) {
871			printf("lge%d: no memory for jumbo "
872			    "buffer queue!\n", sc->lge_unit);
873			return(ENOBUFS);
874		}
875		entry->slot = i;
876		SLIST_INSERT_HEAD(&sc->lge_jfree_listhead,
877		    entry, jpool_entries);
878	}
879
880	return(0);
881}
882
883static void
884lge_free_jumbo_mem(sc)
885	struct lge_softc	*sc;
886{
887	int			i;
888	struct lge_jpool_entry	*entry;
889
890	for (i = 0; i < LGE_JSLOTS; i++) {
891		entry = SLIST_FIRST(&sc->lge_jfree_listhead);
892		SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries);
893		free(entry, M_DEVBUF);
894	}
895
896	contigfree(sc->lge_cdata.lge_jumbo_buf, LGE_JMEM, M_DEVBUF);
897
898	return;
899}
900
901/*
902 * Allocate a jumbo buffer.
903 */
904static void *
905lge_jalloc(sc)
906	struct lge_softc	*sc;
907{
908	struct lge_jpool_entry   *entry;
909
910	entry = SLIST_FIRST(&sc->lge_jfree_listhead);
911
912	if (entry == NULL) {
913#ifdef LGE_VERBOSE
914		printf("lge%d: no free jumbo buffers\n", sc->lge_unit);
915#endif
916		return(NULL);
917	}
918
919	SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries);
920	SLIST_INSERT_HEAD(&sc->lge_jinuse_listhead, entry, jpool_entries);
921	return(sc->lge_cdata.lge_jslots[entry->slot]);
922}
923
924/*
925 * Release a jumbo buffer.
926 */
927static void
928lge_jfree(buf, args)
929	void			*buf;
930	void			*args;
931{
932	struct lge_softc	*sc;
933	int		        i;
934	struct lge_jpool_entry   *entry;
935
936	/* Extract the softc struct pointer. */
937	sc = args;
938
939	if (sc == NULL)
940		panic("lge_jfree: can't find softc pointer!");
941
942	/* calculate the slot this buffer belongs to */
943	i = ((vm_offset_t)buf
944	     - (vm_offset_t)sc->lge_cdata.lge_jumbo_buf) / LGE_JLEN;
945
946	if ((i < 0) || (i >= LGE_JSLOTS))
947		panic("lge_jfree: asked to free buffer that we don't manage!");
948
949	entry = SLIST_FIRST(&sc->lge_jinuse_listhead);
950	if (entry == NULL)
951		panic("lge_jfree: buffer not in use!");
952	entry->slot = i;
953	SLIST_REMOVE_HEAD(&sc->lge_jinuse_listhead, jpool_entries);
954	SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries);
955
956	return;
957}
958
959/*
960 * A frame has been uploaded: pass the resulting mbuf chain up to
961 * the higher level protocols.
962 */
963static void
964lge_rxeof(sc, cnt)
965	struct lge_softc	*sc;
966	int			cnt;
967{
968        struct mbuf		*m;
969        struct ifnet		*ifp;
970	struct lge_rx_desc	*cur_rx;
971	int			c, i, total_len = 0;
972	u_int32_t		rxsts, rxctl;
973
974	ifp = &sc->arpcom.ac_if;
975
976	/* Find out how many frames were processed. */
977	c = cnt;
978	i = sc->lge_cdata.lge_rx_cons;
979
980	/* Suck them in. */
981	while(c) {
982		struct mbuf		*m0 = NULL;
983
984		cur_rx = &sc->lge_ldata->lge_rx_list[i];
985		rxctl = cur_rx->lge_ctl;
986		rxsts = cur_rx->lge_sts;
987		m = cur_rx->lge_mbuf;
988		cur_rx->lge_mbuf = NULL;
989		total_len = LGE_RXBYTES(cur_rx);
990		LGE_INC(i, LGE_RX_LIST_CNT);
991		c--;
992
993		/*
994		 * If an error occurs, update stats, clear the
995		 * status word and leave the mbuf cluster in place:
996		 * it should simply get re-used next time this descriptor
997	 	 * comes up in the ring.
998		 */
999		if (rxctl & LGE_RXCTL_ERRMASK) {
1000			ifp->if_ierrors++;
1001			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
1002			continue;
1003		}
1004
1005		if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) {
1006			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
1007			    ifp, NULL);
1008			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
1009			if (m0 == NULL) {
1010				printf("lge%d: no receive buffers "
1011				    "available -- packet dropped!\n",
1012				    sc->lge_unit);
1013				ifp->if_ierrors++;
1014				continue;
1015			}
1016			m = m0;
1017		} else {
1018			m->m_pkthdr.rcvif = ifp;
1019			m->m_pkthdr.len = m->m_len = total_len;
1020		}
1021
1022		ifp->if_ipackets++;
1023
1024		/* Do IP checksum checking. */
1025		if (rxsts & LGE_RXSTS_ISIP)
1026			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1027		if (!(rxsts & LGE_RXSTS_IPCSUMERR))
1028			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1029		if ((rxsts & LGE_RXSTS_ISTCP &&
1030		    !(rxsts & LGE_RXSTS_TCPCSUMERR)) ||
1031		    (rxsts & LGE_RXSTS_ISUDP &&
1032		    !(rxsts & LGE_RXSTS_UDPCSUMERR))) {
1033			m->m_pkthdr.csum_flags |=
1034			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1035			m->m_pkthdr.csum_data = 0xffff;
1036		}
1037
1038		(*ifp->if_input)(ifp, m);
1039	}
1040
1041	sc->lge_cdata.lge_rx_cons = i;
1042
1043	return;
1044}
1045
1046static void
1047lge_rxeoc(sc)
1048	struct lge_softc	*sc;
1049{
1050	struct ifnet		*ifp;
1051
1052	ifp = &sc->arpcom.ac_if;
1053	ifp->if_flags &= ~IFF_RUNNING;
1054	lge_init(sc);
1055	return;
1056}
1057
1058/*
1059 * A frame was downloaded to the chip. It's safe for us to clean up
1060 * the list buffers.
1061 */
1062
1063static void
1064lge_txeof(sc)
1065	struct lge_softc	*sc;
1066{
1067	struct lge_tx_desc	*cur_tx = NULL;
1068	struct ifnet		*ifp;
1069	u_int32_t		idx, txdone;
1070
1071	ifp = &sc->arpcom.ac_if;
1072
1073	/* Clear the timeout timer. */
1074	ifp->if_timer = 0;
1075
1076	/*
1077	 * Go through our tx list and free mbufs for those
1078	 * frames that have been transmitted.
1079	 */
1080	idx = sc->lge_cdata.lge_tx_cons;
1081	txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT);
1082
1083	while (idx != sc->lge_cdata.lge_tx_prod && txdone) {
1084		cur_tx = &sc->lge_ldata->lge_tx_list[idx];
1085
1086		ifp->if_opackets++;
1087		if (cur_tx->lge_mbuf != NULL) {
1088			m_freem(cur_tx->lge_mbuf);
1089			cur_tx->lge_mbuf = NULL;
1090		}
1091		cur_tx->lge_ctl = 0;
1092
1093		txdone--;
1094		LGE_INC(idx, LGE_TX_LIST_CNT);
1095		ifp->if_timer = 0;
1096	}
1097
1098	sc->lge_cdata.lge_tx_cons = idx;
1099
1100	if (cur_tx != NULL)
1101		ifp->if_flags &= ~IFF_OACTIVE;
1102
1103	return;
1104}
1105
1106static void
1107lge_tick(xsc)
1108	void			*xsc;
1109{
1110	struct lge_softc	*sc;
1111	struct mii_data		*mii;
1112	struct ifnet		*ifp;
1113	int			s;
1114
1115	s = splimp();
1116
1117	sc = xsc;
1118	ifp = &sc->arpcom.ac_if;
1119
1120	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS);
1121	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
1122	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS);
1123	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
1124
1125	if (!sc->lge_link) {
1126		mii = device_get_softc(sc->lge_miibus);
1127		mii_tick(mii);
1128		if (mii->mii_media_status & IFM_ACTIVE &&
1129		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1130			sc->lge_link++;
1131			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX||
1132			    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
1133				printf("lge%d: gigabit link up\n",
1134				    sc->lge_unit);
1135			if (ifp->if_snd.ifq_head != NULL)
1136				lge_start(ifp);
1137		}
1138	}
1139
1140	sc->lge_stat_ch = timeout(lge_tick, sc, hz);
1141
1142	splx(s);
1143
1144	return;
1145}
1146
1147static void
1148lge_intr(arg)
1149	void			*arg;
1150{
1151	struct lge_softc	*sc;
1152	struct ifnet		*ifp;
1153	u_int32_t		status;
1154
1155	sc = arg;
1156	ifp = &sc->arpcom.ac_if;
1157
1158	/* Supress unwanted interrupts */
1159	if (!(ifp->if_flags & IFF_UP)) {
1160		lge_stop(sc);
1161		return;
1162	}
1163
1164	for (;;) {
1165		/*
1166		 * Reading the ISR register clears all interrupts, and
1167		 * clears the 'interrupts enabled' bit in the IMR
1168		 * register.
1169		 */
1170		status = CSR_READ_4(sc, LGE_ISR);
1171
1172		if ((status & LGE_INTRS) == 0)
1173			break;
1174
1175		if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE)))
1176			lge_txeof(sc);
1177
1178		if (status & LGE_ISR_RXDMA_DONE)
1179			lge_rxeof(sc, LGE_RX_DMACNT(status));
1180
1181		if (status & LGE_ISR_RXCMDFIFO_EMPTY)
1182			lge_rxeoc(sc);
1183
1184		if (status & LGE_ISR_PHY_INTR) {
1185			sc->lge_link = 0;
1186			untimeout(lge_tick, sc, sc->lge_stat_ch);
1187			lge_tick(sc);
1188		}
1189	}
1190
1191	/* Re-enable interrupts. */
1192	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB);
1193
1194	if (ifp->if_snd.ifq_head != NULL)
1195		lge_start(ifp);
1196
1197	return;
1198}
1199
1200/*
1201 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1202 * pointers to the fragment pointers.
1203 */
1204static int
1205lge_encap(sc, m_head, txidx)
1206	struct lge_softc	*sc;
1207	struct mbuf		*m_head;
1208	u_int32_t		*txidx;
1209{
1210	struct lge_frag		*f = NULL;
1211	struct lge_tx_desc	*cur_tx;
1212	struct mbuf		*m;
1213	int			frag = 0, tot_len = 0;
1214
1215	/*
1216 	 * Start packing the mbufs in this chain into
1217	 * the fragment pointers. Stop when we run out
1218 	 * of fragments or hit the end of the mbuf chain.
1219	 */
1220	m = m_head;
1221	cur_tx = &sc->lge_ldata->lge_tx_list[*txidx];
1222	frag = 0;
1223
1224	for (m = m_head; m != NULL; m = m->m_next) {
1225		if (m->m_len != 0) {
1226			tot_len += m->m_len;
1227			f = &cur_tx->lge_frags[frag];
1228			f->lge_fraglen = m->m_len;
1229			f->lge_fragptr_lo = vtophys(mtod(m, vm_offset_t));
1230			f->lge_fragptr_hi = 0;
1231			frag++;
1232		}
1233	}
1234
1235	if (m != NULL)
1236		return(ENOBUFS);
1237
1238	cur_tx->lge_mbuf = m_head;
1239	cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len;
1240	LGE_INC((*txidx), LGE_TX_LIST_CNT);
1241
1242	/* Queue for transmit */
1243	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx));
1244
1245	return(0);
1246}
1247
1248/*
1249 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1250 * to the mbuf data regions directly in the transmit lists. We also save a
1251 * copy of the pointers since the transmit list fragment pointers are
1252 * physical addresses.
1253 */
1254
1255static void
1256lge_start(ifp)
1257	struct ifnet		*ifp;
1258{
1259	struct lge_softc	*sc;
1260	struct mbuf		*m_head = NULL;
1261	u_int32_t		idx;
1262
1263	sc = ifp->if_softc;
1264
1265	if (!sc->lge_link)
1266		return;
1267
1268	idx = sc->lge_cdata.lge_tx_prod;
1269
1270	if (ifp->if_flags & IFF_OACTIVE)
1271		return;
1272
1273	while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) {
1274		if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0)
1275			break;
1276
1277		IF_DEQUEUE(&ifp->if_snd, m_head);
1278		if (m_head == NULL)
1279			break;
1280
1281		if (lge_encap(sc, m_head, &idx)) {
1282			IF_PREPEND(&ifp->if_snd, m_head);
1283			ifp->if_flags |= IFF_OACTIVE;
1284			break;
1285		}
1286
1287		/*
1288		 * If there's a BPF listener, bounce a copy of this frame
1289		 * to him.
1290		 */
1291		BPF_MTAP(ifp, m_head);
1292	}
1293
1294	sc->lge_cdata.lge_tx_prod = idx;
1295
1296	/*
1297	 * Set a timeout in case the chip goes out to lunch.
1298	 */
1299	ifp->if_timer = 5;
1300
1301	return;
1302}
1303
1304static void
1305lge_init(xsc)
1306	void			*xsc;
1307{
1308	struct lge_softc	*sc = xsc;
1309	struct ifnet		*ifp = &sc->arpcom.ac_if;
1310	struct mii_data		*mii;
1311	int			s;
1312
1313	if (ifp->if_flags & IFF_RUNNING)
1314		return;
1315
1316	s = splimp();
1317
1318	/*
1319	 * Cancel pending I/O and free all RX/TX buffers.
1320	 */
1321	lge_stop(sc);
1322	lge_reset(sc);
1323
1324	mii = device_get_softc(sc->lge_miibus);
1325
1326	/* Set MAC address */
1327	CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1328	CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1329
1330	/* Init circular RX list. */
1331	if (lge_list_rx_init(sc) == ENOBUFS) {
1332		printf("lge%d: initialization failed: no "
1333		    "memory for rx buffers\n", sc->lge_unit);
1334		lge_stop(sc);
1335		(void)splx(s);
1336		return;
1337	}
1338
1339	/*
1340	 * Init tx descriptors.
1341	 */
1342	lge_list_tx_init(sc);
1343
1344	/* Set initial value for MODE1 register. */
1345	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST|
1346	    LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD|
1347	    LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0|
1348	    LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2);
1349
1350	 /* If we want promiscuous mode, set the allframes bit. */
1351	if (ifp->if_flags & IFF_PROMISC) {
1352		CSR_WRITE_4(sc, LGE_MODE1,
1353		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC);
1354	} else {
1355		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC);
1356	}
1357
1358	/*
1359	 * Set the capture broadcast bit to capture broadcast frames.
1360	 */
1361	if (ifp->if_flags & IFF_BROADCAST) {
1362		CSR_WRITE_4(sc, LGE_MODE1,
1363		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST);
1364	} else {
1365		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST);
1366	}
1367
1368	/* Packet padding workaround? */
1369	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD);
1370
1371	/* No error frames */
1372	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS);
1373
1374	/* Receive large frames */
1375	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS);
1376
1377	/* Workaround: disable RX/TX flow control */
1378	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL);
1379	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL);
1380
1381	/* Make sure to strip CRC from received frames */
1382	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC);
1383
1384	/* Turn off magic packet mode */
1385	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB);
1386
1387	/* Turn off all VLAN stuff */
1388	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX|
1389	    LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT);
1390
1391	/* Workarond: FIFO overflow */
1392	CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF);
1393	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT);
1394
1395	/*
1396	 * Load the multicast filter.
1397	 */
1398	lge_setmulti(sc);
1399
1400	/*
1401	 * Enable hardware checksum validation for all received IPv4
1402	 * packets, do not reject packets with bad checksums.
1403	 */
1404	CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM|
1405	    LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM|
1406	    LGE_MODE2_RX_ERRCSUM);
1407
1408	/*
1409	 * Enable the delivery of PHY interrupts based on
1410	 * link/speed/duplex status chalges.
1411	 */
1412	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL);
1413
1414	/* Enable receiver and transmitter. */
1415	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
1416	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB);
1417
1418	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0);
1419	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB);
1420
1421	/*
1422	 * Enable interrupts.
1423	 */
1424	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|
1425	    LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS);
1426
1427	lge_ifmedia_upd(ifp);
1428
1429	ifp->if_flags |= IFF_RUNNING;
1430	ifp->if_flags &= ~IFF_OACTIVE;
1431
1432	(void)splx(s);
1433
1434	sc->lge_stat_ch = timeout(lge_tick, sc, hz);
1435
1436	return;
1437}
1438
1439/*
1440 * Set media options.
1441 */
1442static int
1443lge_ifmedia_upd(ifp)
1444	struct ifnet		*ifp;
1445{
1446	struct lge_softc	*sc;
1447	struct mii_data		*mii;
1448
1449	sc = ifp->if_softc;
1450
1451	mii = device_get_softc(sc->lge_miibus);
1452	sc->lge_link = 0;
1453	if (mii->mii_instance) {
1454		struct mii_softc	*miisc;
1455		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1456		    miisc = LIST_NEXT(miisc, mii_list))
1457			mii_phy_reset(miisc);
1458	}
1459	mii_mediachg(mii);
1460
1461	return(0);
1462}
1463
1464/*
1465 * Report current media status.
1466 */
1467static void
1468lge_ifmedia_sts(ifp, ifmr)
1469	struct ifnet		*ifp;
1470	struct ifmediareq	*ifmr;
1471{
1472	struct lge_softc	*sc;
1473	struct mii_data		*mii;
1474
1475	sc = ifp->if_softc;
1476
1477	mii = device_get_softc(sc->lge_miibus);
1478	mii_pollstat(mii);
1479	ifmr->ifm_active = mii->mii_media_active;
1480	ifmr->ifm_status = mii->mii_media_status;
1481
1482	return;
1483}
1484
1485static int
1486lge_ioctl(ifp, command, data)
1487	struct ifnet		*ifp;
1488	u_long			command;
1489	caddr_t			data;
1490{
1491	struct lge_softc	*sc = ifp->if_softc;
1492	struct ifreq		*ifr = (struct ifreq *) data;
1493	struct mii_data		*mii;
1494	int			s, error = 0;
1495
1496	s = splimp();
1497
1498	switch(command) {
1499	case SIOCSIFMTU:
1500		if (ifr->ifr_mtu > LGE_JUMBO_MTU)
1501			error = EINVAL;
1502		else
1503			ifp->if_mtu = ifr->ifr_mtu;
1504		break;
1505	case SIOCSIFFLAGS:
1506		if (ifp->if_flags & IFF_UP) {
1507			if (ifp->if_flags & IFF_RUNNING &&
1508			    ifp->if_flags & IFF_PROMISC &&
1509			    !(sc->lge_if_flags & IFF_PROMISC)) {
1510				CSR_WRITE_4(sc, LGE_MODE1,
1511				    LGE_MODE1_SETRST_CTL1|
1512				    LGE_MODE1_RX_PROMISC);
1513			} else if (ifp->if_flags & IFF_RUNNING &&
1514			    !(ifp->if_flags & IFF_PROMISC) &&
1515			    sc->lge_if_flags & IFF_PROMISC) {
1516				CSR_WRITE_4(sc, LGE_MODE1,
1517				    LGE_MODE1_RX_PROMISC);
1518			} else {
1519				ifp->if_flags &= ~IFF_RUNNING;
1520				lge_init(sc);
1521			}
1522		} else {
1523			if (ifp->if_flags & IFF_RUNNING)
1524				lge_stop(sc);
1525		}
1526		sc->lge_if_flags = ifp->if_flags;
1527		error = 0;
1528		break;
1529	case SIOCADDMULTI:
1530	case SIOCDELMULTI:
1531		lge_setmulti(sc);
1532		error = 0;
1533		break;
1534	case SIOCGIFMEDIA:
1535	case SIOCSIFMEDIA:
1536		mii = device_get_softc(sc->lge_miibus);
1537		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1538		break;
1539	default:
1540		error = ether_ioctl(ifp, command, data);
1541		break;
1542	}
1543
1544	(void)splx(s);
1545
1546	return(error);
1547}
1548
1549static void
1550lge_watchdog(ifp)
1551	struct ifnet		*ifp;
1552{
1553	struct lge_softc	*sc;
1554
1555	sc = ifp->if_softc;
1556
1557	ifp->if_oerrors++;
1558	printf("lge%d: watchdog timeout\n", sc->lge_unit);
1559
1560	lge_stop(sc);
1561	lge_reset(sc);
1562	ifp->if_flags &= ~IFF_RUNNING;
1563	lge_init(sc);
1564
1565	if (ifp->if_snd.ifq_head != NULL)
1566		lge_start(ifp);
1567
1568	return;
1569}
1570
1571/*
1572 * Stop the adapter and free any mbufs allocated to the
1573 * RX and TX lists.
1574 */
1575static void
1576lge_stop(sc)
1577	struct lge_softc	*sc;
1578{
1579	register int		i;
1580	struct ifnet		*ifp;
1581
1582	ifp = &sc->arpcom.ac_if;
1583	ifp->if_timer = 0;
1584	untimeout(lge_tick, sc, sc->lge_stat_ch);
1585	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB);
1586
1587	/* Disable receiver and transmitter. */
1588	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB);
1589	sc->lge_link = 0;
1590
1591	/*
1592	 * Free data in the RX lists.
1593	 */
1594	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
1595		if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) {
1596			m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf);
1597			sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL;
1598		}
1599	}
1600	bzero((char *)&sc->lge_ldata->lge_rx_list,
1601		sizeof(sc->lge_ldata->lge_rx_list));
1602
1603	/*
1604	 * Free the TX list buffers.
1605	 */
1606	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
1607		if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) {
1608			m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf);
1609			sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL;
1610		}
1611	}
1612
1613	bzero((char *)&sc->lge_ldata->lge_tx_list,
1614		sizeof(sc->lge_ldata->lge_tx_list));
1615
1616	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1617
1618	return;
1619}
1620
1621/*
1622 * Stop all chip I/O so that the kernel's probe routines don't
1623 * get confused by errant DMAs when rebooting.
1624 */
1625static void
1626lge_shutdown(dev)
1627	device_t		dev;
1628{
1629	struct lge_softc	*sc;
1630
1631	sc = device_get_softc(dev);
1632
1633	lge_reset(sc);
1634	lge_stop(sc);
1635
1636	return;
1637}
1638