if_lge.c revision 192024
1/*-
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2000, 2001
4 *	Bill Paul <william.paul@windriver.com>.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/dev/lge/if_lge.c 192024 2009-05-12 19:33:36Z brueffer $");
36
37/*
38 * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public
39 * documentation not available, but ask me nicely.
40 *
41 * The Level 1 chip is used on some D-Link, SMC and Addtron NICs.
42 * It's a 64-bit PCI part that supports TCP/IP checksum offload,
43 * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There
44 * are three supported methods for data transfer between host and
45 * NIC: programmed I/O, traditional scatter/gather DMA and Packet
46 * Propulsion Technology (tm) DMA. The latter mechanism is a form
47 * of double buffer DMA where the packet data is copied to a
48 * pre-allocated DMA buffer who's physical address has been loaded
49 * into a table at device initialization time. The rationale is that
50 * the virtual to physical address translation needed for normal
51 * scatter/gather DMA is more expensive than the data copy needed
52 * for double buffering. This may be true in Windows NT and the like,
53 * but it isn't true for us, at least on the x86 arch. This driver
54 * uses the scatter/gather I/O method for both TX and RX.
55 *
56 * The LXT1001 only supports TCP/IP checksum offload on receive.
57 * Also, the VLAN tagging is done using a 16-entry table which allows
58 * the chip to perform hardware filtering based on VLAN tags. Sadly,
59 * our vlan support doesn't currently play well with this kind of
60 * hardware support.
61 *
62 * Special thanks to:
63 * - Jeff James at Intel, for arranging to have the LXT1001 manual
64 *   released (at long last)
65 * - Beny Chen at D-Link, for actually sending it to me
66 * - Brad Short and Keith Alexis at SMC, for sending me sample
67 *   SMC9462SX and SMC9462TX adapters for testing
68 * - Paul Saab at Y!, for not killing me (though it remains to be seen
69 *   if in fact he did me much of a favor)
70 */
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/sockio.h>
75#include <sys/mbuf.h>
76#include <sys/malloc.h>
77#include <sys/kernel.h>
78#include <sys/module.h>
79#include <sys/socket.h>
80
81#include <net/if.h>
82#include <net/if_arp.h>
83#include <net/ethernet.h>
84#include <net/if_dl.h>
85#include <net/if_media.h>
86#include <net/if_types.h>
87
88#include <net/bpf.h>
89
90#include <vm/vm.h>              /* for vtophys */
91#include <vm/pmap.h>            /* for vtophys */
92#include <machine/bus.h>
93#include <machine/resource.h>
94#include <sys/bus.h>
95#include <sys/rman.h>
96
97#include <dev/mii/mii.h>
98#include <dev/mii/miivar.h>
99
100#include <dev/pci/pcireg.h>
101#include <dev/pci/pcivar.h>
102
103#define LGE_USEIOSPACE
104
105#include <dev/lge/if_lgereg.h>
106
107/* "device miibus" required.  See GENERIC if you get errors here. */
108#include "miibus_if.h"
109
110/*
111 * Various supported device vendors/types and their names.
112 */
113static struct lge_type lge_devs[] = {
114	{ LGE_VENDORID, LGE_DEVICEID, "Level 1 Gigabit Ethernet" },
115	{ 0, 0, NULL }
116};
117
118static int lge_probe(device_t);
119static int lge_attach(device_t);
120static int lge_detach(device_t);
121
122static int lge_alloc_jumbo_mem(struct lge_softc *);
123static void lge_free_jumbo_mem(struct lge_softc *);
124static void *lge_jalloc(struct lge_softc *);
125static void lge_jfree(void *, void *);
126
127static int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, struct mbuf *);
128static int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *);
129static void lge_rxeof(struct lge_softc *, int);
130static void lge_rxeoc(struct lge_softc *);
131static void lge_txeof(struct lge_softc *);
132static void lge_intr(void *);
133static void lge_tick(void *);
134static void lge_start(struct ifnet *);
135static void lge_start_locked(struct ifnet *);
136static int lge_ioctl(struct ifnet *, u_long, caddr_t);
137static void lge_init(void *);
138static void lge_init_locked(struct lge_softc *);
139static void lge_stop(struct lge_softc *);
140static void lge_watchdog(struct ifnet *);
141static int lge_shutdown(device_t);
142static int lge_ifmedia_upd(struct ifnet *);
143static void lge_ifmedia_upd_locked(struct ifnet *);
144static void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
145
146static void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *);
147static void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int);
148
149static int lge_miibus_readreg(device_t, int, int);
150static int lge_miibus_writereg(device_t, int, int, int);
151static void lge_miibus_statchg(device_t);
152
153static void lge_setmulti(struct lge_softc *);
154static void lge_reset(struct lge_softc *);
155static int lge_list_rx_init(struct lge_softc *);
156static int lge_list_tx_init(struct lge_softc *);
157
158#ifdef LGE_USEIOSPACE
159#define LGE_RES			SYS_RES_IOPORT
160#define LGE_RID			LGE_PCI_LOIO
161#else
162#define LGE_RES			SYS_RES_MEMORY
163#define LGE_RID			LGE_PCI_LOMEM
164#endif
165
166static device_method_t lge_methods[] = {
167	/* Device interface */
168	DEVMETHOD(device_probe,		lge_probe),
169	DEVMETHOD(device_attach,	lge_attach),
170	DEVMETHOD(device_detach,	lge_detach),
171	DEVMETHOD(device_shutdown,	lge_shutdown),
172
173	/* bus interface */
174	DEVMETHOD(bus_print_child,	bus_generic_print_child),
175	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
176
177	/* MII interface */
178	DEVMETHOD(miibus_readreg,	lge_miibus_readreg),
179	DEVMETHOD(miibus_writereg,	lge_miibus_writereg),
180	DEVMETHOD(miibus_statchg,	lge_miibus_statchg),
181
182	{ 0, 0 }
183};
184
185static driver_t lge_driver = {
186	"lge",
187	lge_methods,
188	sizeof(struct lge_softc)
189};
190
191static devclass_t lge_devclass;
192
193DRIVER_MODULE(lge, pci, lge_driver, lge_devclass, 0, 0);
194DRIVER_MODULE(miibus, lge, miibus_driver, miibus_devclass, 0, 0);
195MODULE_DEPEND(lge, pci, 1, 1, 1);
196MODULE_DEPEND(lge, ether, 1, 1, 1);
197MODULE_DEPEND(lge, miibus, 1, 1, 1);
198
199#define LGE_SETBIT(sc, reg, x)				\
200	CSR_WRITE_4(sc, reg,				\
201		CSR_READ_4(sc, reg) | (x))
202
203#define LGE_CLRBIT(sc, reg, x)				\
204	CSR_WRITE_4(sc, reg,				\
205		CSR_READ_4(sc, reg) & ~(x))
206
207#define SIO_SET(x)					\
208	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x)
209
210#define SIO_CLR(x)					\
211	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x)
212
213/*
214 * Read a word of data stored in the EEPROM at address 'addr.'
215 */
216static void
217lge_eeprom_getword(sc, addr, dest)
218	struct lge_softc	*sc;
219	int			addr;
220	u_int16_t		*dest;
221{
222	register int		i;
223	u_int32_t		val;
224
225	CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ|
226	    LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8));
227
228	for (i = 0; i < LGE_TIMEOUT; i++)
229		if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ))
230			break;
231
232	if (i == LGE_TIMEOUT) {
233		device_printf(sc->lge_dev, "EEPROM read timed out\n");
234		return;
235	}
236
237	val = CSR_READ_4(sc, LGE_EEDATA);
238
239	if (addr & 1)
240		*dest = (val >> 16) & 0xFFFF;
241	else
242		*dest = val & 0xFFFF;
243
244	return;
245}
246
247/*
248 * Read a sequence of words from the EEPROM.
249 */
250static void
251lge_read_eeprom(sc, dest, off, cnt, swap)
252	struct lge_softc	*sc;
253	caddr_t			dest;
254	int			off;
255	int			cnt;
256	int			swap;
257{
258	int			i;
259	u_int16_t		word = 0, *ptr;
260
261	for (i = 0; i < cnt; i++) {
262		lge_eeprom_getword(sc, off + i, &word);
263		ptr = (u_int16_t *)(dest + (i * 2));
264		if (swap)
265			*ptr = ntohs(word);
266		else
267			*ptr = word;
268	}
269
270	return;
271}
272
273static int
274lge_miibus_readreg(dev, phy, reg)
275	device_t		dev;
276	int			phy, reg;
277{
278	struct lge_softc	*sc;
279	int			i;
280
281	sc = device_get_softc(dev);
282
283	/*
284	 * If we have a non-PCS PHY, pretend that the internal
285	 * autoneg stuff at PHY address 0 isn't there so that
286	 * the miibus code will find only the GMII PHY.
287	 */
288	if (sc->lge_pcs == 0 && phy == 0)
289		return(0);
290
291	CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ);
292
293	for (i = 0; i < LGE_TIMEOUT; i++)
294		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
295			break;
296
297	if (i == LGE_TIMEOUT) {
298		device_printf(sc->lge_dev, "PHY read timed out\n");
299		return(0);
300	}
301
302	return(CSR_READ_4(sc, LGE_GMIICTL) >> 16);
303}
304
305static int
306lge_miibus_writereg(dev, phy, reg, data)
307	device_t		dev;
308	int			phy, reg, data;
309{
310	struct lge_softc	*sc;
311	int			i;
312
313	sc = device_get_softc(dev);
314
315	CSR_WRITE_4(sc, LGE_GMIICTL,
316	    (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE);
317
318	for (i = 0; i < LGE_TIMEOUT; i++)
319		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
320			break;
321
322	if (i == LGE_TIMEOUT) {
323		device_printf(sc->lge_dev, "PHY write timed out\n");
324		return(0);
325	}
326
327	return(0);
328}
329
330static void
331lge_miibus_statchg(dev)
332	device_t		dev;
333{
334	struct lge_softc	*sc;
335	struct mii_data		*mii;
336
337	sc = device_get_softc(dev);
338	mii = device_get_softc(sc->lge_miibus);
339
340	LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED);
341	switch (IFM_SUBTYPE(mii->mii_media_active)) {
342	case IFM_1000_T:
343	case IFM_1000_SX:
344		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
345		break;
346	case IFM_100_TX:
347		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100);
348		break;
349	case IFM_10_T:
350		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10);
351		break;
352	default:
353		/*
354		 * Choose something, even if it's wrong. Clearing
355		 * all the bits will hose autoneg on the internal
356		 * PHY.
357		 */
358		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
359		break;
360	}
361
362	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
363		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
364	} else {
365		LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
366	}
367
368	return;
369}
370
371static void
372lge_setmulti(sc)
373	struct lge_softc	*sc;
374{
375	struct ifnet		*ifp;
376	struct ifmultiaddr	*ifma;
377	u_int32_t		h = 0, hashes[2] = { 0, 0 };
378
379	ifp = sc->lge_ifp;
380	LGE_LOCK_ASSERT(sc);
381
382	/* Make sure multicast hash table is enabled. */
383	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST);
384
385	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
386		CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF);
387		CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF);
388		return;
389	}
390
391	/* first, zot all the existing hash bits */
392	CSR_WRITE_4(sc, LGE_MAR0, 0);
393	CSR_WRITE_4(sc, LGE_MAR1, 0);
394
395	/* now program new ones */
396	IF_ADDR_LOCK(ifp);
397	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
398		if (ifma->ifma_addr->sa_family != AF_LINK)
399			continue;
400		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
401		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
402		if (h < 32)
403			hashes[0] |= (1 << h);
404		else
405			hashes[1] |= (1 << (h - 32));
406	}
407	IF_ADDR_UNLOCK(ifp);
408
409	CSR_WRITE_4(sc, LGE_MAR0, hashes[0]);
410	CSR_WRITE_4(sc, LGE_MAR1, hashes[1]);
411
412	return;
413}
414
415static void
416lge_reset(sc)
417	struct lge_softc	*sc;
418{
419	register int		i;
420
421	LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST);
422
423	for (i = 0; i < LGE_TIMEOUT; i++) {
424		if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST))
425			break;
426	}
427
428	if (i == LGE_TIMEOUT)
429		device_printf(sc->lge_dev, "reset never completed\n");
430
431	/* Wait a little while for the chip to get its brains in order. */
432	DELAY(1000);
433
434        return;
435}
436
437/*
438 * Probe for a Level 1 chip. Check the PCI vendor and device
439 * IDs against our list and return a device name if we find a match.
440 */
441static int
442lge_probe(dev)
443	device_t		dev;
444{
445	struct lge_type		*t;
446
447	t = lge_devs;
448
449	while(t->lge_name != NULL) {
450		if ((pci_get_vendor(dev) == t->lge_vid) &&
451		    (pci_get_device(dev) == t->lge_did)) {
452			device_set_desc(dev, t->lge_name);
453			return(BUS_PROBE_DEFAULT);
454		}
455		t++;
456	}
457
458	return(ENXIO);
459}
460
461/*
462 * Attach the interface. Allocate softc structures, do ifmedia
463 * setup and ethernet/BPF attach.
464 */
465static int
466lge_attach(dev)
467	device_t		dev;
468{
469	u_char			eaddr[ETHER_ADDR_LEN];
470	struct lge_softc	*sc;
471	struct ifnet		*ifp = NULL;
472	int			error = 0, rid;
473
474	sc = device_get_softc(dev);
475	sc->lge_dev = dev;
476
477	mtx_init(&sc->lge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
478	    MTX_DEF);
479	callout_init_mtx(&sc->lge_stat_callout, &sc->lge_mtx, 0);
480
481	/*
482	 * Map control/status registers.
483	 */
484	pci_enable_busmaster(dev);
485
486	rid = LGE_RID;
487	sc->lge_res = bus_alloc_resource_any(dev, LGE_RES, &rid, RF_ACTIVE);
488
489	if (sc->lge_res == NULL) {
490		device_printf(dev, "couldn't map ports/memory\n");
491		error = ENXIO;
492		goto fail;
493	}
494
495	sc->lge_btag = rman_get_bustag(sc->lge_res);
496	sc->lge_bhandle = rman_get_bushandle(sc->lge_res);
497
498	/* Allocate interrupt */
499	rid = 0;
500	sc->lge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
501	    RF_SHAREABLE | RF_ACTIVE);
502
503	if (sc->lge_irq == NULL) {
504		device_printf(dev, "couldn't map interrupt\n");
505		error = ENXIO;
506		goto fail;
507	}
508
509	/* Reset the adapter. */
510	lge_reset(sc);
511
512	/*
513	 * Get station address from the EEPROM.
514	 */
515	lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0);
516	lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0);
517	lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0);
518
519	sc->lge_ldata = contigmalloc(sizeof(struct lge_list_data), M_DEVBUF,
520	    M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
521
522	if (sc->lge_ldata == NULL) {
523		device_printf(dev, "no memory for list buffers!\n");
524		error = ENXIO;
525		goto fail;
526	}
527
528	/* Try to allocate memory for jumbo buffers. */
529	if (lge_alloc_jumbo_mem(sc)) {
530		device_printf(dev, "jumbo buffer allocation failed\n");
531		error = ENXIO;
532		goto fail;
533	}
534
535	ifp = sc->lge_ifp = if_alloc(IFT_ETHER);
536	if (ifp == NULL) {
537		device_printf(dev, "can not if_alloc()\n");
538		error = ENOSPC;
539		goto fail;
540	}
541	ifp->if_softc = sc;
542	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
543	ifp->if_mtu = ETHERMTU;
544	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
545	ifp->if_ioctl = lge_ioctl;
546	ifp->if_start = lge_start;
547	ifp->if_watchdog = lge_watchdog;
548	ifp->if_init = lge_init;
549	ifp->if_snd.ifq_maxlen = LGE_TX_LIST_CNT - 1;
550	ifp->if_capabilities = IFCAP_RXCSUM;
551	ifp->if_capenable = ifp->if_capabilities;
552
553	if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH)
554		sc->lge_pcs = 1;
555	else
556		sc->lge_pcs = 0;
557
558	/*
559	 * Do MII setup.
560	 */
561	if (mii_phy_probe(dev, &sc->lge_miibus,
562	    lge_ifmedia_upd, lge_ifmedia_sts)) {
563		device_printf(dev, "MII without any PHY!\n");
564		error = ENXIO;
565		goto fail;
566	}
567
568	/*
569	 * Call MI attach routine.
570	 */
571	ether_ifattach(ifp, eaddr);
572
573	error = bus_setup_intr(dev, sc->lge_irq, INTR_TYPE_NET | INTR_MPSAFE,
574	    NULL, lge_intr, sc, &sc->lge_intrhand);
575
576	if (error) {
577		ether_ifdetach(ifp);
578		device_printf(dev, "couldn't set up irq\n");
579		goto fail;
580	}
581	return (0);
582
583fail:
584	lge_free_jumbo_mem(sc);
585	if (sc->lge_ldata)
586		contigfree(sc->lge_ldata,
587		    sizeof(struct lge_list_data), M_DEVBUF);
588	if (ifp)
589		if_free(ifp);
590	if (sc->lge_irq)
591		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
592	if (sc->lge_res)
593		bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
594	mtx_destroy(&sc->lge_mtx);
595	return(error);
596}
597
598static int
599lge_detach(dev)
600	device_t		dev;
601{
602	struct lge_softc	*sc;
603	struct ifnet		*ifp;
604
605	sc = device_get_softc(dev);
606	ifp = sc->lge_ifp;
607
608	LGE_LOCK(sc);
609	lge_reset(sc);
610	lge_stop(sc);
611	LGE_UNLOCK(sc);
612	callout_drain(&sc->lge_stat_callout);
613	ether_ifdetach(ifp);
614
615	bus_generic_detach(dev);
616	device_delete_child(dev, sc->lge_miibus);
617
618	bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand);
619	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
620	bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
621
622	contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF);
623	if_free(ifp);
624	lge_free_jumbo_mem(sc);
625	mtx_destroy(&sc->lge_mtx);
626
627	return(0);
628}
629
630/*
631 * Initialize the transmit descriptors.
632 */
633static int
634lge_list_tx_init(sc)
635	struct lge_softc	*sc;
636{
637	struct lge_list_data	*ld;
638	struct lge_ring_data	*cd;
639	int			i;
640
641	cd = &sc->lge_cdata;
642	ld = sc->lge_ldata;
643	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
644		ld->lge_tx_list[i].lge_mbuf = NULL;
645		ld->lge_tx_list[i].lge_ctl = 0;
646	}
647
648	cd->lge_tx_prod = cd->lge_tx_cons = 0;
649
650	return(0);
651}
652
653
654/*
655 * Initialize the RX descriptors and allocate mbufs for them. Note that
656 * we arralge the descriptors in a closed ring, so that the last descriptor
657 * points back to the first.
658 */
659static int
660lge_list_rx_init(sc)
661	struct lge_softc	*sc;
662{
663	struct lge_list_data	*ld;
664	struct lge_ring_data	*cd;
665	int			i;
666
667	ld = sc->lge_ldata;
668	cd = &sc->lge_cdata;
669
670	cd->lge_rx_prod = cd->lge_rx_cons = 0;
671
672	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
673
674	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
675		if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0)
676			break;
677		if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS)
678			return(ENOBUFS);
679	}
680
681	/* Clear possible 'rx command queue empty' interrupt. */
682	CSR_READ_4(sc, LGE_ISR);
683
684	return(0);
685}
686
687/*
688 * Initialize an RX descriptor and attach an MBUF cluster.
689 */
690static int
691lge_newbuf(sc, c, m)
692	struct lge_softc	*sc;
693	struct lge_rx_desc	*c;
694	struct mbuf		*m;
695{
696	struct mbuf		*m_new = NULL;
697	caddr_t			*buf = NULL;
698
699	if (m == NULL) {
700		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
701		if (m_new == NULL) {
702			device_printf(sc->lge_dev, "no memory for rx list "
703			    "-- packet dropped!\n");
704			return(ENOBUFS);
705		}
706
707		/* Allocate the jumbo buffer */
708		buf = lge_jalloc(sc);
709		if (buf == NULL) {
710#ifdef LGE_VERBOSE
711			device_printf(sc->lge_dev, "jumbo allocation failed "
712			    "-- packet dropped!\n");
713#endif
714			m_freem(m_new);
715			return(ENOBUFS);
716		}
717		/* Attach the buffer to the mbuf */
718		m_new->m_data = (void *)buf;
719		m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN;
720		MEXTADD(m_new, buf, LGE_JUMBO_FRAMELEN, lge_jfree,
721		    buf, (struct lge_softc *)sc, 0, EXT_NET_DRV);
722	} else {
723		m_new = m;
724		m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN;
725		m_new->m_data = m_new->m_ext.ext_buf;
726	}
727
728	/*
729	 * Adjust alignment so packet payload begins on a
730	 * longword boundary. Mandatory for Alpha, useful on
731	 * x86 too.
732	*/
733	m_adj(m_new, ETHER_ALIGN);
734
735	c->lge_mbuf = m_new;
736	c->lge_fragptr_hi = 0;
737	c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t));
738	c->lge_fraglen = m_new->m_len;
739	c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1);
740	c->lge_sts = 0;
741
742	/*
743	 * Put this buffer in the RX command FIFO. To do this,
744	 * we just write the physical address of the descriptor
745	 * into the RX descriptor address registers. Note that
746	 * there are two registers, one high DWORD and one low
747	 * DWORD, which lets us specify a 64-bit address if
748	 * desired. We only use a 32-bit address for now.
749	 * Writing to the low DWORD register is what actually
750	 * causes the command to be issued, so we do that
751	 * last.
752	 */
753	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c));
754	LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT);
755
756	return(0);
757}
758
759static int
760lge_alloc_jumbo_mem(sc)
761	struct lge_softc	*sc;
762{
763	caddr_t			ptr;
764	register int		i;
765	struct lge_jpool_entry   *entry;
766
767	/* Grab a big chunk o' storage. */
768	sc->lge_cdata.lge_jumbo_buf = contigmalloc(LGE_JMEM, M_DEVBUF,
769	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
770
771	if (sc->lge_cdata.lge_jumbo_buf == NULL) {
772		device_printf(sc->lge_dev, "no memory for jumbo buffers!\n");
773		return(ENOBUFS);
774	}
775
776	SLIST_INIT(&sc->lge_jfree_listhead);
777	SLIST_INIT(&sc->lge_jinuse_listhead);
778
779	/*
780	 * Now divide it up into 9K pieces and save the addresses
781	 * in an array.
782	 */
783	ptr = sc->lge_cdata.lge_jumbo_buf;
784	for (i = 0; i < LGE_JSLOTS; i++) {
785		sc->lge_cdata.lge_jslots[i] = ptr;
786		ptr += LGE_JLEN;
787		entry = malloc(sizeof(struct lge_jpool_entry),
788		    M_DEVBUF, M_NOWAIT);
789		if (entry == NULL) {
790			device_printf(sc->lge_dev, "no memory for jumbo "
791			    "buffer queue!\n");
792			return(ENOBUFS);
793		}
794		entry->slot = i;
795		SLIST_INSERT_HEAD(&sc->lge_jfree_listhead,
796		    entry, jpool_entries);
797	}
798
799	return(0);
800}
801
802static void
803lge_free_jumbo_mem(sc)
804	struct lge_softc	*sc;
805{
806	struct lge_jpool_entry	*entry;
807
808	if (sc->lge_cdata.lge_jumbo_buf == NULL)
809		return;
810
811	while ((entry = SLIST_FIRST(&sc->lge_jinuse_listhead))) {
812		device_printf(sc->lge_dev,
813		    "asked to free buffer that is in use!\n");
814		SLIST_REMOVE_HEAD(&sc->lge_jinuse_listhead, jpool_entries);
815		SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry,
816		    jpool_entries);
817	}
818	while (!SLIST_EMPTY(&sc->lge_jfree_listhead)) {
819		entry = SLIST_FIRST(&sc->lge_jfree_listhead);
820		SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries);
821		free(entry, M_DEVBUF);
822	}
823
824	contigfree(sc->lge_cdata.lge_jumbo_buf, LGE_JMEM, M_DEVBUF);
825
826	return;
827}
828
829/*
830 * Allocate a jumbo buffer.
831 */
832static void *
833lge_jalloc(sc)
834	struct lge_softc	*sc;
835{
836	struct lge_jpool_entry   *entry;
837
838	entry = SLIST_FIRST(&sc->lge_jfree_listhead);
839
840	if (entry == NULL) {
841#ifdef LGE_VERBOSE
842		device_printf(sc->lge_dev, "no free jumbo buffers\n");
843#endif
844		return(NULL);
845	}
846
847	SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries);
848	SLIST_INSERT_HEAD(&sc->lge_jinuse_listhead, entry, jpool_entries);
849	return(sc->lge_cdata.lge_jslots[entry->slot]);
850}
851
852/*
853 * Release a jumbo buffer.
854 */
855static void
856lge_jfree(buf, args)
857	void			*buf;
858	void			*args;
859{
860	struct lge_softc	*sc;
861	int		        i;
862	struct lge_jpool_entry   *entry;
863
864	/* Extract the softc struct pointer. */
865	sc = args;
866
867	if (sc == NULL)
868		panic("lge_jfree: can't find softc pointer!");
869
870	/* calculate the slot this buffer belongs to */
871	i = ((vm_offset_t)buf
872	     - (vm_offset_t)sc->lge_cdata.lge_jumbo_buf) / LGE_JLEN;
873
874	if ((i < 0) || (i >= LGE_JSLOTS))
875		panic("lge_jfree: asked to free buffer that we don't manage!");
876
877	entry = SLIST_FIRST(&sc->lge_jinuse_listhead);
878	if (entry == NULL)
879		panic("lge_jfree: buffer not in use!");
880	entry->slot = i;
881	SLIST_REMOVE_HEAD(&sc->lge_jinuse_listhead, jpool_entries);
882	SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries);
883
884	return;
885}
886
887/*
888 * A frame has been uploaded: pass the resulting mbuf chain up to
889 * the higher level protocols.
890 */
891static void
892lge_rxeof(sc, cnt)
893	struct lge_softc	*sc;
894	int			cnt;
895{
896        struct mbuf		*m;
897        struct ifnet		*ifp;
898	struct lge_rx_desc	*cur_rx;
899	int			c, i, total_len = 0;
900	u_int32_t		rxsts, rxctl;
901
902	ifp = sc->lge_ifp;
903
904	/* Find out how many frames were processed. */
905	c = cnt;
906	i = sc->lge_cdata.lge_rx_cons;
907
908	/* Suck them in. */
909	while(c) {
910		struct mbuf		*m0 = NULL;
911
912		cur_rx = &sc->lge_ldata->lge_rx_list[i];
913		rxctl = cur_rx->lge_ctl;
914		rxsts = cur_rx->lge_sts;
915		m = cur_rx->lge_mbuf;
916		cur_rx->lge_mbuf = NULL;
917		total_len = LGE_RXBYTES(cur_rx);
918		LGE_INC(i, LGE_RX_LIST_CNT);
919		c--;
920
921		/*
922		 * If an error occurs, update stats, clear the
923		 * status word and leave the mbuf cluster in place:
924		 * it should simply get re-used next time this descriptor
925	 	 * comes up in the ring.
926		 */
927		if (rxctl & LGE_RXCTL_ERRMASK) {
928			ifp->if_ierrors++;
929			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
930			continue;
931		}
932
933		if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) {
934			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
935			    ifp, NULL);
936			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
937			if (m0 == NULL) {
938				device_printf(sc->lge_dev, "no receive buffers "
939				    "available -- packet dropped!\n");
940				ifp->if_ierrors++;
941				continue;
942			}
943			m = m0;
944		} else {
945			m->m_pkthdr.rcvif = ifp;
946			m->m_pkthdr.len = m->m_len = total_len;
947		}
948
949		ifp->if_ipackets++;
950
951		/* Do IP checksum checking. */
952		if (rxsts & LGE_RXSTS_ISIP)
953			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
954		if (!(rxsts & LGE_RXSTS_IPCSUMERR))
955			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
956		if ((rxsts & LGE_RXSTS_ISTCP &&
957		    !(rxsts & LGE_RXSTS_TCPCSUMERR)) ||
958		    (rxsts & LGE_RXSTS_ISUDP &&
959		    !(rxsts & LGE_RXSTS_UDPCSUMERR))) {
960			m->m_pkthdr.csum_flags |=
961			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
962			m->m_pkthdr.csum_data = 0xffff;
963		}
964
965		LGE_UNLOCK(sc);
966		(*ifp->if_input)(ifp, m);
967		LGE_LOCK(sc);
968	}
969
970	sc->lge_cdata.lge_rx_cons = i;
971
972	return;
973}
974
975static void
976lge_rxeoc(sc)
977	struct lge_softc	*sc;
978{
979	struct ifnet		*ifp;
980
981	ifp = sc->lge_ifp;
982	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
983	lge_init_locked(sc);
984	return;
985}
986
987/*
988 * A frame was downloaded to the chip. It's safe for us to clean up
989 * the list buffers.
990 */
991
992static void
993lge_txeof(sc)
994	struct lge_softc	*sc;
995{
996	struct lge_tx_desc	*cur_tx = NULL;
997	struct ifnet		*ifp;
998	u_int32_t		idx, txdone;
999
1000	ifp = sc->lge_ifp;
1001
1002	/* Clear the timeout timer. */
1003	ifp->if_timer = 0;
1004
1005	/*
1006	 * Go through our tx list and free mbufs for those
1007	 * frames that have been transmitted.
1008	 */
1009	idx = sc->lge_cdata.lge_tx_cons;
1010	txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT);
1011
1012	while (idx != sc->lge_cdata.lge_tx_prod && txdone) {
1013		cur_tx = &sc->lge_ldata->lge_tx_list[idx];
1014
1015		ifp->if_opackets++;
1016		if (cur_tx->lge_mbuf != NULL) {
1017			m_freem(cur_tx->lge_mbuf);
1018			cur_tx->lge_mbuf = NULL;
1019		}
1020		cur_tx->lge_ctl = 0;
1021
1022		txdone--;
1023		LGE_INC(idx, LGE_TX_LIST_CNT);
1024		ifp->if_timer = 0;
1025	}
1026
1027	sc->lge_cdata.lge_tx_cons = idx;
1028
1029	if (cur_tx != NULL)
1030		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1031
1032	return;
1033}
1034
1035static void
1036lge_tick(xsc)
1037	void			*xsc;
1038{
1039	struct lge_softc	*sc;
1040	struct mii_data		*mii;
1041	struct ifnet		*ifp;
1042
1043	sc = xsc;
1044	ifp = sc->lge_ifp;
1045	LGE_LOCK_ASSERT(sc);
1046
1047	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS);
1048	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
1049	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS);
1050	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
1051
1052	if (!sc->lge_link) {
1053		mii = device_get_softc(sc->lge_miibus);
1054		mii_tick(mii);
1055		if (mii->mii_media_status & IFM_ACTIVE &&
1056		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1057			sc->lge_link++;
1058			if (bootverbose &&
1059		  	    (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX||
1060			    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T))
1061				device_printf(sc->lge_dev, "gigabit link up\n");
1062			if (ifp->if_snd.ifq_head != NULL)
1063				lge_start_locked(ifp);
1064		}
1065	}
1066
1067	callout_reset(&sc->lge_stat_callout, hz, lge_tick, sc);
1068
1069	return;
1070}
1071
1072static void
1073lge_intr(arg)
1074	void			*arg;
1075{
1076	struct lge_softc	*sc;
1077	struct ifnet		*ifp;
1078	u_int32_t		status;
1079
1080	sc = arg;
1081	ifp = sc->lge_ifp;
1082	LGE_LOCK(sc);
1083
1084	/* Supress unwanted interrupts */
1085	if (!(ifp->if_flags & IFF_UP)) {
1086		lge_stop(sc);
1087		LGE_UNLOCK(sc);
1088		return;
1089	}
1090
1091	for (;;) {
1092		/*
1093		 * Reading the ISR register clears all interrupts, and
1094		 * clears the 'interrupts enabled' bit in the IMR
1095		 * register.
1096		 */
1097		status = CSR_READ_4(sc, LGE_ISR);
1098
1099		if ((status & LGE_INTRS) == 0)
1100			break;
1101
1102		if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE)))
1103			lge_txeof(sc);
1104
1105		if (status & LGE_ISR_RXDMA_DONE)
1106			lge_rxeof(sc, LGE_RX_DMACNT(status));
1107
1108		if (status & LGE_ISR_RXCMDFIFO_EMPTY)
1109			lge_rxeoc(sc);
1110
1111		if (status & LGE_ISR_PHY_INTR) {
1112			sc->lge_link = 0;
1113			callout_stop(&sc->lge_stat_callout);
1114			lge_tick(sc);
1115		}
1116	}
1117
1118	/* Re-enable interrupts. */
1119	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB);
1120
1121	if (ifp->if_snd.ifq_head != NULL)
1122		lge_start_locked(ifp);
1123
1124	LGE_UNLOCK(sc);
1125	return;
1126}
1127
1128/*
1129 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1130 * pointers to the fragment pointers.
1131 */
1132static int
1133lge_encap(sc, m_head, txidx)
1134	struct lge_softc	*sc;
1135	struct mbuf		*m_head;
1136	u_int32_t		*txidx;
1137{
1138	struct lge_frag		*f = NULL;
1139	struct lge_tx_desc	*cur_tx;
1140	struct mbuf		*m;
1141	int			frag = 0, tot_len = 0;
1142
1143	/*
1144 	 * Start packing the mbufs in this chain into
1145	 * the fragment pointers. Stop when we run out
1146 	 * of fragments or hit the end of the mbuf chain.
1147	 */
1148	m = m_head;
1149	cur_tx = &sc->lge_ldata->lge_tx_list[*txidx];
1150	frag = 0;
1151
1152	for (m = m_head; m != NULL; m = m->m_next) {
1153		if (m->m_len != 0) {
1154			tot_len += m->m_len;
1155			f = &cur_tx->lge_frags[frag];
1156			f->lge_fraglen = m->m_len;
1157			f->lge_fragptr_lo = vtophys(mtod(m, vm_offset_t));
1158			f->lge_fragptr_hi = 0;
1159			frag++;
1160		}
1161	}
1162
1163	if (m != NULL)
1164		return(ENOBUFS);
1165
1166	cur_tx->lge_mbuf = m_head;
1167	cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len;
1168	LGE_INC((*txidx), LGE_TX_LIST_CNT);
1169
1170	/* Queue for transmit */
1171	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx));
1172
1173	return(0);
1174}
1175
1176/*
1177 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1178 * to the mbuf data regions directly in the transmit lists. We also save a
1179 * copy of the pointers since the transmit list fragment pointers are
1180 * physical addresses.
1181 */
1182
1183static void
1184lge_start(ifp)
1185	struct ifnet		*ifp;
1186{
1187	struct lge_softc	*sc;
1188
1189	sc = ifp->if_softc;
1190	LGE_LOCK(sc);
1191	lge_start_locked(ifp);
1192	LGE_UNLOCK(sc);
1193}
1194
1195static void
1196lge_start_locked(ifp)
1197	struct ifnet		*ifp;
1198{
1199	struct lge_softc	*sc;
1200	struct mbuf		*m_head = NULL;
1201	u_int32_t		idx;
1202
1203	sc = ifp->if_softc;
1204
1205	if (!sc->lge_link)
1206		return;
1207
1208	idx = sc->lge_cdata.lge_tx_prod;
1209
1210	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1211		return;
1212
1213	while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) {
1214		if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0)
1215			break;
1216
1217		IF_DEQUEUE(&ifp->if_snd, m_head);
1218		if (m_head == NULL)
1219			break;
1220
1221		if (lge_encap(sc, m_head, &idx)) {
1222			IF_PREPEND(&ifp->if_snd, m_head);
1223			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1224			break;
1225		}
1226
1227		/*
1228		 * If there's a BPF listener, bounce a copy of this frame
1229		 * to him.
1230		 */
1231		BPF_MTAP(ifp, m_head);
1232	}
1233
1234	sc->lge_cdata.lge_tx_prod = idx;
1235
1236	/*
1237	 * Set a timeout in case the chip goes out to lunch.
1238	 */
1239	ifp->if_timer = 5;
1240
1241	return;
1242}
1243
1244static void
1245lge_init(xsc)
1246	void			*xsc;
1247{
1248	struct lge_softc	*sc = xsc;
1249
1250	LGE_LOCK(sc);
1251	lge_init_locked(sc);
1252	LGE_UNLOCK(sc);
1253}
1254
1255static void
1256lge_init_locked(sc)
1257	struct lge_softc	*sc;
1258{
1259	struct ifnet		*ifp = sc->lge_ifp;
1260
1261	LGE_LOCK_ASSERT(sc);
1262	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1263		return;
1264
1265	/*
1266	 * Cancel pending I/O and free all RX/TX buffers.
1267	 */
1268	lge_stop(sc);
1269	lge_reset(sc);
1270
1271	/* Set MAC address */
1272	CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&IF_LLADDR(sc->lge_ifp)[0]));
1273	CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&IF_LLADDR(sc->lge_ifp)[4]));
1274
1275	/* Init circular RX list. */
1276	if (lge_list_rx_init(sc) == ENOBUFS) {
1277		device_printf(sc->lge_dev, "initialization failed: no "
1278		    "memory for rx buffers\n");
1279		lge_stop(sc);
1280		return;
1281	}
1282
1283	/*
1284	 * Init tx descriptors.
1285	 */
1286	lge_list_tx_init(sc);
1287
1288	/* Set initial value for MODE1 register. */
1289	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST|
1290	    LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD|
1291	    LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0|
1292	    LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2);
1293
1294	 /* If we want promiscuous mode, set the allframes bit. */
1295	if (ifp->if_flags & IFF_PROMISC) {
1296		CSR_WRITE_4(sc, LGE_MODE1,
1297		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC);
1298	} else {
1299		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC);
1300	}
1301
1302	/*
1303	 * Set the capture broadcast bit to capture broadcast frames.
1304	 */
1305	if (ifp->if_flags & IFF_BROADCAST) {
1306		CSR_WRITE_4(sc, LGE_MODE1,
1307		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST);
1308	} else {
1309		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST);
1310	}
1311
1312	/* Packet padding workaround? */
1313	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD);
1314
1315	/* No error frames */
1316	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS);
1317
1318	/* Receive large frames */
1319	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS);
1320
1321	/* Workaround: disable RX/TX flow control */
1322	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL);
1323	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL);
1324
1325	/* Make sure to strip CRC from received frames */
1326	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC);
1327
1328	/* Turn off magic packet mode */
1329	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB);
1330
1331	/* Turn off all VLAN stuff */
1332	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX|
1333	    LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT);
1334
1335	/* Workarond: FIFO overflow */
1336	CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF);
1337	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT);
1338
1339	/*
1340	 * Load the multicast filter.
1341	 */
1342	lge_setmulti(sc);
1343
1344	/*
1345	 * Enable hardware checksum validation for all received IPv4
1346	 * packets, do not reject packets with bad checksums.
1347	 */
1348	CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM|
1349	    LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM|
1350	    LGE_MODE2_RX_ERRCSUM);
1351
1352	/*
1353	 * Enable the delivery of PHY interrupts based on
1354	 * link/speed/duplex status chalges.
1355	 */
1356	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL);
1357
1358	/* Enable receiver and transmitter. */
1359	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
1360	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB);
1361
1362	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0);
1363	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB);
1364
1365	/*
1366	 * Enable interrupts.
1367	 */
1368	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|
1369	    LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS);
1370
1371	lge_ifmedia_upd_locked(ifp);
1372
1373	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1374	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1375
1376	callout_reset(&sc->lge_stat_callout, hz, lge_tick, sc);
1377
1378	return;
1379}
1380
1381/*
1382 * Set media options.
1383 */
1384static int
1385lge_ifmedia_upd(ifp)
1386	struct ifnet		*ifp;
1387{
1388	struct lge_softc	*sc;
1389
1390	sc = ifp->if_softc;
1391	LGE_LOCK(sc);
1392	lge_ifmedia_upd_locked(ifp);
1393	LGE_UNLOCK(sc);
1394
1395	return(0);
1396}
1397
1398static void
1399lge_ifmedia_upd_locked(ifp)
1400	struct ifnet		*ifp;
1401{
1402	struct lge_softc	*sc;
1403	struct mii_data		*mii;
1404
1405	sc = ifp->if_softc;
1406
1407	LGE_LOCK_ASSERT(sc);
1408	mii = device_get_softc(sc->lge_miibus);
1409	sc->lge_link = 0;
1410	if (mii->mii_instance) {
1411		struct mii_softc	*miisc;
1412		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1413		    miisc = LIST_NEXT(miisc, mii_list))
1414			mii_phy_reset(miisc);
1415	}
1416	mii_mediachg(mii);
1417}
1418
1419/*
1420 * Report current media status.
1421 */
1422static void
1423lge_ifmedia_sts(ifp, ifmr)
1424	struct ifnet		*ifp;
1425	struct ifmediareq	*ifmr;
1426{
1427	struct lge_softc	*sc;
1428	struct mii_data		*mii;
1429
1430	sc = ifp->if_softc;
1431
1432	LGE_LOCK(sc);
1433	mii = device_get_softc(sc->lge_miibus);
1434	mii_pollstat(mii);
1435	LGE_UNLOCK(sc);
1436	ifmr->ifm_active = mii->mii_media_active;
1437	ifmr->ifm_status = mii->mii_media_status;
1438
1439	return;
1440}
1441
1442static int
1443lge_ioctl(ifp, command, data)
1444	struct ifnet		*ifp;
1445	u_long			command;
1446	caddr_t			data;
1447{
1448	struct lge_softc	*sc = ifp->if_softc;
1449	struct ifreq		*ifr = (struct ifreq *) data;
1450	struct mii_data		*mii;
1451	int			error = 0;
1452
1453	switch(command) {
1454	case SIOCSIFMTU:
1455		LGE_LOCK(sc);
1456		if (ifr->ifr_mtu > LGE_JUMBO_MTU)
1457			error = EINVAL;
1458		else
1459			ifp->if_mtu = ifr->ifr_mtu;
1460		LGE_UNLOCK(sc);
1461		break;
1462	case SIOCSIFFLAGS:
1463		LGE_LOCK(sc);
1464		if (ifp->if_flags & IFF_UP) {
1465			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1466			    ifp->if_flags & IFF_PROMISC &&
1467			    !(sc->lge_if_flags & IFF_PROMISC)) {
1468				CSR_WRITE_4(sc, LGE_MODE1,
1469				    LGE_MODE1_SETRST_CTL1|
1470				    LGE_MODE1_RX_PROMISC);
1471			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1472			    !(ifp->if_flags & IFF_PROMISC) &&
1473			    sc->lge_if_flags & IFF_PROMISC) {
1474				CSR_WRITE_4(sc, LGE_MODE1,
1475				    LGE_MODE1_RX_PROMISC);
1476			} else {
1477				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1478				lge_init_locked(sc);
1479			}
1480		} else {
1481			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1482				lge_stop(sc);
1483		}
1484		sc->lge_if_flags = ifp->if_flags;
1485		LGE_UNLOCK(sc);
1486		error = 0;
1487		break;
1488	case SIOCADDMULTI:
1489	case SIOCDELMULTI:
1490		LGE_LOCK(sc);
1491		lge_setmulti(sc);
1492		LGE_UNLOCK(sc);
1493		error = 0;
1494		break;
1495	case SIOCGIFMEDIA:
1496	case SIOCSIFMEDIA:
1497		mii = device_get_softc(sc->lge_miibus);
1498		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1499		break;
1500	default:
1501		error = ether_ioctl(ifp, command, data);
1502		break;
1503	}
1504
1505	return(error);
1506}
1507
1508static void
1509lge_watchdog(ifp)
1510	struct ifnet		*ifp;
1511{
1512	struct lge_softc	*sc;
1513
1514	sc = ifp->if_softc;
1515
1516	LGE_LOCK(sc);
1517	ifp->if_oerrors++;
1518	if_printf(ifp, "watchdog timeout\n");
1519
1520	lge_stop(sc);
1521	lge_reset(sc);
1522	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1523	lge_init_locked(sc);
1524
1525	if (ifp->if_snd.ifq_head != NULL)
1526		lge_start_locked(ifp);
1527	LGE_UNLOCK(sc);
1528
1529	return;
1530}
1531
1532/*
1533 * Stop the adapter and free any mbufs allocated to the
1534 * RX and TX lists.
1535 */
1536static void
1537lge_stop(sc)
1538	struct lge_softc	*sc;
1539{
1540	register int		i;
1541	struct ifnet		*ifp;
1542
1543	LGE_LOCK_ASSERT(sc);
1544	ifp = sc->lge_ifp;
1545	ifp->if_timer = 0;
1546	callout_stop(&sc->lge_stat_callout);
1547	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB);
1548
1549	/* Disable receiver and transmitter. */
1550	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB);
1551	sc->lge_link = 0;
1552
1553	/*
1554	 * Free data in the RX lists.
1555	 */
1556	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
1557		if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) {
1558			m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf);
1559			sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL;
1560		}
1561	}
1562	bzero((char *)&sc->lge_ldata->lge_rx_list,
1563		sizeof(sc->lge_ldata->lge_rx_list));
1564
1565	/*
1566	 * Free the TX list buffers.
1567	 */
1568	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
1569		if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) {
1570			m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf);
1571			sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL;
1572		}
1573	}
1574
1575	bzero((char *)&sc->lge_ldata->lge_tx_list,
1576		sizeof(sc->lge_ldata->lge_tx_list));
1577
1578	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1579
1580	return;
1581}
1582
1583/*
1584 * Stop all chip I/O so that the kernel's probe routines don't
1585 * get confused by errant DMAs when rebooting.
1586 */
1587static int
1588lge_shutdown(dev)
1589	device_t		dev;
1590{
1591	struct lge_softc	*sc;
1592
1593	sc = device_get_softc(dev);
1594
1595	LGE_LOCK(sc);
1596	lge_reset(sc);
1597	lge_stop(sc);
1598	LGE_UNLOCK(sc);
1599
1600	return (0);
1601}
1602