if_gem.c revision 177560
1130803Smarcel/*-
2130803Smarcel * Copyright (C) 2001 Eduardo Horvath.
3130803Smarcel * Copyright (c) 2001-2003 Thomas Moestl
4130803Smarcel * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org>
5130803Smarcel * All rights reserved.
6130803Smarcel *
7130803Smarcel * Redistribution and use in source and binary forms, with or without
8130803Smarcel * modification, are permitted provided that the following conditions
9130803Smarcel * are met:
10130803Smarcel * 1. Redistributions of source code must retain the above copyright
11130803Smarcel *    notice, this list of conditions and the following disclaimer.
12130803Smarcel * 2. Redistributions in binary form must reproduce the above copyright
13130803Smarcel *    notice, this list of conditions and the following disclaimer in the
14130803Smarcel *    documentation and/or other materials provided with the distribution.
15130803Smarcel *
16130803Smarcel * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
17130803Smarcel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18130803Smarcel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19130803Smarcel * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
20130803Smarcel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21130803Smarcel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22130803Smarcel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23130803Smarcel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24130803Smarcel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25130803Smarcel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26130803Smarcel * SUCH DAMAGE.
27130803Smarcel *
28130803Smarcel *	from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
29130803Smarcel */
30130803Smarcel
31130803Smarcel#include <sys/cdefs.h>
32130803Smarcel__FBSDID("$FreeBSD: head/sys/dev/gem/if_gem.c 177560 2008-03-24 17:23:53Z marius $");
33130803Smarcel
34130803Smarcel/*
35130803Smarcel * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
36130803Smarcel */
37130803Smarcel
38130803Smarcel#if 0
39130803Smarcel#define	GEM_DEBUG
40130803Smarcel#endif
41130803Smarcel
42130803Smarcel#if 0	/* XXX: In case of emergency, re-enable this. */
43130803Smarcel#define	GEM_RINT_TIMEOUT
44130803Smarcel#endif
45130803Smarcel
46130803Smarcel#include <sys/param.h>
47130803Smarcel#include <sys/systm.h>
48130803Smarcel#include <sys/bus.h>
49130803Smarcel#include <sys/callout.h>
50130803Smarcel#include <sys/endian.h>
51130803Smarcel#include <sys/mbuf.h>
52130803Smarcel#include <sys/malloc.h>
53130803Smarcel#include <sys/kernel.h>
54130803Smarcel#include <sys/lock.h>
55130803Smarcel#include <sys/module.h>
56130803Smarcel#include <sys/mutex.h>
57130803Smarcel#include <sys/socket.h>
58130803Smarcel#include <sys/sockio.h>
59130803Smarcel#include <sys/rman.h>
60130803Smarcel
61130803Smarcel#include <net/bpf.h>
62130803Smarcel#include <net/ethernet.h>
63130803Smarcel#include <net/if.h>
64130803Smarcel#include <net/if_arp.h>
65130803Smarcel#include <net/if_dl.h>
66130803Smarcel#include <net/if_media.h>
67130803Smarcel#include <net/if_types.h>
68130803Smarcel#include <net/if_vlan_var.h>
69130803Smarcel
70130803Smarcel#include <netinet/in.h>
71130803Smarcel#include <netinet/in_systm.h>
72130803Smarcel#include <netinet/ip.h>
73130803Smarcel#include <netinet/tcp.h>
74130803Smarcel#include <netinet/udp.h>
75130803Smarcel
76130803Smarcel#include <machine/bus.h>
77130803Smarcel
78130803Smarcel#include <dev/mii/mii.h>
79130803Smarcel#include <dev/mii/miivar.h>
80130803Smarcel
81130803Smarcel#include <dev/gem/if_gemreg.h>
82130803Smarcel#include <dev/gem/if_gemvar.h>
83130803Smarcel
84130803SmarcelCTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
85130803SmarcelCTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
86130803Smarcel
87130803Smarcel#define	TRIES	10000
88130803Smarcel
89130803Smarcel/*
90130803Smarcel * The GEM hardware support basic TCP/UDP checksum offloading.  However,
91130803Smarcel * the hardware doesn't compensate the checksum for UDP datagram which
92130803Smarcel * can yield to 0x0.  As a safe guard, UDP checksum offload is disabled
93130803Smarcel * by default.  It can be reactivated by setting special link option
94130803Smarcel * link0 with ifconfig(8).
95130803Smarcel */
96130803Smarcel#define	GEM_CSUM_FEATURES	(CSUM_TCP)
97130803Smarcel
98130803Smarcelstatic int	gem_add_rxbuf(struct gem_softc *sc, int idx);
99130803Smarcelstatic int	gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r,
100130803Smarcel		    uint32_t clr, uint32_t set);
101130803Smarcelstatic void	gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
102130803Smarcel		    int nsegs, int error);
103130803Smarcelstatic int	gem_disable_rx(struct gem_softc *sc);
104130803Smarcelstatic int	gem_disable_tx(struct gem_softc *sc);
105130803Smarcelstatic void	gem_eint(struct gem_softc *sc, u_int status);
106130803Smarcelstatic void	gem_init(void *xsc);
107130803Smarcelstatic void	gem_init_locked(struct gem_softc *sc);
108130803Smarcelstatic void	gem_init_regs(struct gem_softc *sc);
109130803Smarcelstatic int	gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
110130803Smarcelstatic int	gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head);
111130803Smarcelstatic int	gem_meminit(struct gem_softc *sc);
112130803Smarcelstatic void	gem_mifinit(struct gem_softc *sc);
113130803Smarcelstatic void	gem_reset(struct gem_softc *sc);
114130803Smarcelstatic int	gem_reset_rx(struct gem_softc *sc);
115130803Smarcelstatic void	gem_reset_rxdma(struct gem_softc *sc);
116130803Smarcelstatic int	gem_reset_tx(struct gem_softc *sc);
117130803Smarcelstatic u_int	gem_ringsize(u_int sz);
118130803Smarcelstatic void	gem_rint(struct gem_softc *sc);
119130803Smarcel#ifdef GEM_RINT_TIMEOUT
120130803Smarcelstatic void	gem_rint_timeout(void *arg);
121130803Smarcel#endif
122130803Smarcelstatic __inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
123130803Smarcelstatic void	gem_rxdrain(struct gem_softc *sc);
124130803Smarcelstatic void	gem_setladrf(struct gem_softc *sc);
125130803Smarcelstatic void	gem_start(struct ifnet *ifp);
126130803Smarcelstatic void	gem_start_locked(struct ifnet *ifp);
127130803Smarcelstatic void	gem_stop(struct ifnet *ifp, int disable);
128130803Smarcelstatic void	gem_tick(void *arg);
129130803Smarcelstatic void	gem_tint(struct gem_softc *sc);
130130803Smarcelstatic __inline void gem_txcksum(struct gem_softc *sc, struct mbuf *m,
131130803Smarcel		    uint64_t *cflags);
132130803Smarcelstatic int	gem_watchdog(struct gem_softc *sc);
133130803Smarcel
134130803Smarceldevclass_t gem_devclass;
135130803SmarcelDRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
136130803SmarcelMODULE_DEPEND(gem, miibus, 1, 1, 1);
137130803Smarcel
138130803Smarcel#ifdef GEM_DEBUG
139130803Smarcel#include <sys/ktr.h>
140130803Smarcel#define	KTR_GEM		KTR_CT2
141130803Smarcel#endif
142130803Smarcel
143130803Smarcel#define	GEM_BANK1_BITWAIT(sc, r, clr, set)				\
144130803Smarcel	gem_bitwait((sc), GEM_RES_BANK1, (r), (clr), (set))
145130803Smarcel#define	GEM_BANK2_BITWAIT(sc, r, clr, set)				\
146130803Smarcel	gem_bitwait((sc), GEM_RES_BANK2, (r), (clr), (set))
147130803Smarcel
148130803Smarcelint
149130803Smarcelgem_attach(struct gem_softc *sc)
150130803Smarcel{
151130803Smarcel	struct gem_txsoft *txs;
152130803Smarcel	struct ifnet *ifp;
153130803Smarcel	int error, i;
154130803Smarcel	uint32_t v;
155130803Smarcel
156130803Smarcel	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
157130803Smarcel	if (ifp == NULL)
158130803Smarcel		return (ENOSPC);
159130803Smarcel
160130803Smarcel	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
161130803Smarcel#ifdef GEM_RINT_TIMEOUT
162130803Smarcel	callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
163130803Smarcel#endif
164130803Smarcel
165130803Smarcel	/* Make sure the chip is stopped. */
166130803Smarcel	ifp->if_softc = sc;
167130803Smarcel	gem_reset(sc);
168130803Smarcel
169130803Smarcel	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
170130803Smarcel	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
171130803Smarcel	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
172130803Smarcel	    NULL, &sc->sc_pdmatag);
173130803Smarcel	if (error)
174130803Smarcel		goto fail_ifnet;
175130803Smarcel
176130803Smarcel	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
177130803Smarcel	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
178130803Smarcel	    1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
179130803Smarcel	if (error)
180130803Smarcel		goto fail_ptag;
181130803Smarcel
182130803Smarcel	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
183130803Smarcel	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
184130803Smarcel	    MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
185130803Smarcel	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
186130803Smarcel	if (error)
187130803Smarcel		goto fail_rtag;
188130803Smarcel
189130803Smarcel	error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
190130803Smarcel	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
191130803Smarcel	    sizeof(struct gem_control_data), 1,
192130803Smarcel	    sizeof(struct gem_control_data), 0,
193130803Smarcel	    NULL, NULL, &sc->sc_cdmatag);
194130803Smarcel	if (error)
195130803Smarcel		goto fail_ttag;
196130803Smarcel
197130803Smarcel	/*
198130803Smarcel	 * Allocate the control data structures, create and load the
199130803Smarcel	 * DMA map for it.
200130803Smarcel	 */
201130803Smarcel	if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
202130803Smarcel	    (void **)&sc->sc_control_data,
203130803Smarcel	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
204130803Smarcel	    &sc->sc_cddmamap))) {
205130803Smarcel		device_printf(sc->sc_dev,
206130803Smarcel		    "unable to allocate control data, error = %d\n", error);
207130803Smarcel		goto fail_ctag;
208130803Smarcel	}
209130803Smarcel
210130803Smarcel	sc->sc_cddma = 0;
211130803Smarcel	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
212130803Smarcel	    sc->sc_control_data, sizeof(struct gem_control_data),
213130803Smarcel	    gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
214130803Smarcel		device_printf(sc->sc_dev,
215130803Smarcel		    "unable to load control data DMA map, error = %d\n",
216130803Smarcel		    error);
217130803Smarcel		goto fail_cmem;
218130803Smarcel	}
219130803Smarcel
220130803Smarcel	/*
221130803Smarcel	 * Initialize the transmit job descriptors.
222130803Smarcel	 */
223130803Smarcel	STAILQ_INIT(&sc->sc_txfreeq);
224130803Smarcel	STAILQ_INIT(&sc->sc_txdirtyq);
225130803Smarcel
226130803Smarcel	/*
227130803Smarcel	 * Create the transmit buffer DMA maps.
228130803Smarcel	 */
229130803Smarcel	error = ENOMEM;
230130803Smarcel	for (i = 0; i < GEM_TXQUEUELEN; i++) {
231130803Smarcel		txs = &sc->sc_txsoft[i];
232130803Smarcel		txs->txs_mbuf = NULL;
233130803Smarcel		txs->txs_ndescs = 0;
234130803Smarcel		if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
235130803Smarcel		    &txs->txs_dmamap)) != 0) {
236130803Smarcel			device_printf(sc->sc_dev,
237130803Smarcel			    "unable to create TX DMA map %d, error = %d\n",
238130803Smarcel			    i, error);
239130803Smarcel			goto fail_txd;
240130803Smarcel		}
241130803Smarcel		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
242130803Smarcel	}
243130803Smarcel
244130803Smarcel	/*
245130803Smarcel	 * Create the receive buffer DMA maps.
246130803Smarcel	 */
247130803Smarcel	for (i = 0; i < GEM_NRXDESC; i++) {
248130803Smarcel		if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
249130803Smarcel		    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
250130803Smarcel			device_printf(sc->sc_dev,
251130803Smarcel			    "unable to create RX DMA map %d, error = %d\n",
252130803Smarcel			    i, error);
253130803Smarcel			goto fail_rxd;
254130803Smarcel		}
255130803Smarcel		sc->sc_rxsoft[i].rxs_mbuf = NULL;
256130803Smarcel	}
257130803Smarcel
258130803Smarcel	/* Bad things will happen when touching this register on ERI. */
259130803Smarcel	if (sc->sc_variant != GEM_SUN_ERI)
260130803Smarcel		GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
261130803Smarcel		    GEM_MII_DATAPATH_MII);
262130803Smarcel
263130803Smarcel	gem_mifinit(sc);
264130803Smarcel
265130803Smarcel	/*
266130803Smarcel	 * Look for an external PHY.
267130803Smarcel	 */
268130803Smarcel	error = ENXIO;
269130803Smarcel	v = GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG);
270130803Smarcel	if ((v & GEM_MIF_CONFIG_MDI1) != 0) {
271130803Smarcel		v |= GEM_MIF_CONFIG_PHY_SEL;
272130803Smarcel		GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
273130803Smarcel		switch (sc->sc_variant) {
274130803Smarcel		case GEM_SUN_ERI:
275130803Smarcel			sc->sc_phyad = GEM_PHYAD_EXTERNAL;
276130803Smarcel			break;
277130803Smarcel		default:
278130803Smarcel			sc->sc_phyad = -1;
279130803Smarcel			break;
280130803Smarcel		}
281130803Smarcel		error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
282130803Smarcel		    gem_mediachange, gem_mediastatus);
283130803Smarcel	}
284130803Smarcel
285130803Smarcel	/*
286130803Smarcel	 * Fall back on an internal PHY if no external PHY was found.
287130803Smarcel	 */
288130803Smarcel	if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) {
289130803Smarcel		v &= ~GEM_MIF_CONFIG_PHY_SEL;
290130803Smarcel		GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
291130803Smarcel		switch (sc->sc_variant) {
292130803Smarcel		case GEM_SUN_ERI:
293130803Smarcel		case GEM_APPLE_K2_GMAC:
294130803Smarcel			sc->sc_phyad = GEM_PHYAD_INTERNAL;
295130803Smarcel			break;
296130803Smarcel		case GEM_APPLE_GMAC:
297130803Smarcel			sc->sc_phyad = GEM_PHYAD_EXTERNAL;
298130803Smarcel			break;
299130803Smarcel		default:
300130803Smarcel			sc->sc_phyad = -1;
301130803Smarcel			break;
302130803Smarcel		}
303130803Smarcel		error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
304130803Smarcel		    gem_mediachange, gem_mediastatus);
305130803Smarcel	}
306130803Smarcel
307130803Smarcel	/*
308130803Smarcel	 * Try the external PCS SERDES if we didn't find any PHYs.
309130803Smarcel	 */
310130803Smarcel	if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
311130803Smarcel		GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
312130803Smarcel		    GEM_MII_DATAPATH_SERDES);
313130803Smarcel		GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
314130803Smarcel		    GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
315130803Smarcel		GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
316130803Smarcel		sc->sc_flags |= GEM_SERDES;
317130803Smarcel		sc->sc_phyad = GEM_PHYAD_EXTERNAL;
318130803Smarcel		error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
319130803Smarcel		    gem_mediachange, gem_mediastatus);
320130803Smarcel	}
321130803Smarcel
322130803Smarcel	if (error != 0) {
323130803Smarcel		device_printf(sc->sc_dev, "PHY probe failed: %d\n", error);
324130803Smarcel		goto fail_rxd;
325130803Smarcel	}
326130803Smarcel	sc->sc_mii = device_get_softc(sc->sc_miibus);
327130803Smarcel
328130803Smarcel	/*
329130803Smarcel	 * From this point forward, the attachment cannot fail.  A failure
330130803Smarcel	 * before this point releases all resources that may have been
331130803Smarcel	 * allocated.
332130803Smarcel	 */
333130803Smarcel
334130803Smarcel	/* Get RX FIFO size. */
335130803Smarcel	sc->sc_rxfifosize = 64 *
336130803Smarcel	    GEM_BANK1_READ_4(sc, GEM_RX_FIFO_SIZE);
337130803Smarcel
338130803Smarcel	/* Get TX FIFO size. */
339130803Smarcel	v = GEM_BANK1_READ_4(sc, GEM_TX_FIFO_SIZE);
340130803Smarcel	device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
341130803Smarcel	    sc->sc_rxfifosize / 1024, v / 16);
342130803Smarcel
343130803Smarcel	sc->sc_csum_features = GEM_CSUM_FEATURES;
344130803Smarcel	/* Initialize ifnet structure. */
345130803Smarcel	ifp->if_softc = sc;
346130803Smarcel	if_initname(ifp, device_get_name(sc->sc_dev),
347130803Smarcel	    device_get_unit(sc->sc_dev));
348130803Smarcel	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
349130803Smarcel	ifp->if_start = gem_start;
350130803Smarcel	ifp->if_ioctl = gem_ioctl;
351130803Smarcel	ifp->if_init = gem_init;
352130803Smarcel	IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
353130803Smarcel	ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
354130803Smarcel	IFQ_SET_READY(&ifp->if_snd);
355130803Smarcel
356130803Smarcel	/* Attach the interface. */
357130803Smarcel	ether_ifattach(ifp, sc->sc_enaddr);
358130803Smarcel
359130803Smarcel	/*
360130803Smarcel	 * Tell the upper layer(s) we support long frames/checksum offloads.
361130803Smarcel	 */
362130803Smarcel	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
363130803Smarcel	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
364130803Smarcel	ifp->if_hwassist |= sc->sc_csum_features;
365130803Smarcel	ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
366130803Smarcel
367130803Smarcel	return (0);
368130803Smarcel
369130803Smarcel	/*
370130803Smarcel	 * Free any resources we've allocated during the failed attach
371130803Smarcel	 * attempt.  Do this in reverse order and fall through.
372130803Smarcel	 */
373130803Smarcel fail_rxd:
374130803Smarcel	for (i = 0; i < GEM_NRXDESC; i++)
375130803Smarcel		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
376130803Smarcel			bus_dmamap_destroy(sc->sc_rdmatag,
377130803Smarcel			    sc->sc_rxsoft[i].rxs_dmamap);
378130803Smarcel fail_txd:
379130803Smarcel	for (i = 0; i < GEM_TXQUEUELEN; i++)
380130803Smarcel		if (sc->sc_txsoft[i].txs_dmamap != NULL)
381130803Smarcel			bus_dmamap_destroy(sc->sc_tdmatag,
382130803Smarcel			    sc->sc_txsoft[i].txs_dmamap);
383130803Smarcel	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
384130803Smarcel fail_cmem:
385130803Smarcel	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
386130803Smarcel	    sc->sc_cddmamap);
387130803Smarcel fail_ctag:
388130803Smarcel	bus_dma_tag_destroy(sc->sc_cdmatag);
389130803Smarcel fail_ttag:
390130803Smarcel	bus_dma_tag_destroy(sc->sc_tdmatag);
391130803Smarcel fail_rtag:
392130803Smarcel	bus_dma_tag_destroy(sc->sc_rdmatag);
393130803Smarcel fail_ptag:
394130803Smarcel	bus_dma_tag_destroy(sc->sc_pdmatag);
395130803Smarcel fail_ifnet:
396	if_free(ifp);
397	return (error);
398}
399
400void
401gem_detach(struct gem_softc *sc)
402{
403	struct ifnet *ifp = sc->sc_ifp;
404	int i;
405
406	GEM_LOCK(sc);
407	gem_stop(ifp, 1);
408	GEM_UNLOCK(sc);
409	callout_drain(&sc->sc_tick_ch);
410#ifdef GEM_RINT_TIMEOUT
411	callout_drain(&sc->sc_rx_ch);
412#endif
413	ether_ifdetach(ifp);
414	if_free(ifp);
415	device_delete_child(sc->sc_dev, sc->sc_miibus);
416
417	for (i = 0; i < GEM_NRXDESC; i++)
418		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
419			bus_dmamap_destroy(sc->sc_rdmatag,
420			    sc->sc_rxsoft[i].rxs_dmamap);
421	for (i = 0; i < GEM_TXQUEUELEN; i++)
422		if (sc->sc_txsoft[i].txs_dmamap != NULL)
423			bus_dmamap_destroy(sc->sc_tdmatag,
424			    sc->sc_txsoft[i].txs_dmamap);
425	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
426	GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE);
427	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
428	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
429	    sc->sc_cddmamap);
430	bus_dma_tag_destroy(sc->sc_cdmatag);
431	bus_dma_tag_destroy(sc->sc_tdmatag);
432	bus_dma_tag_destroy(sc->sc_rdmatag);
433	bus_dma_tag_destroy(sc->sc_pdmatag);
434}
435
436void
437gem_suspend(struct gem_softc *sc)
438{
439	struct ifnet *ifp = sc->sc_ifp;
440
441	GEM_LOCK(sc);
442	gem_stop(ifp, 0);
443	GEM_UNLOCK(sc);
444}
445
446void
447gem_resume(struct gem_softc *sc)
448{
449	struct ifnet *ifp = sc->sc_ifp;
450
451	GEM_LOCK(sc);
452	/*
453	 * On resume all registers have to be initialized again like
454	 * after power-on.
455	 */
456	sc->sc_flags &= ~GEM_INITED;
457	if (ifp->if_flags & IFF_UP)
458		gem_init_locked(sc);
459	GEM_UNLOCK(sc);
460}
461
462static __inline void
463gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags)
464{
465	char *p;
466	struct ip *ip;
467	struct mbuf *m0;
468	uint64_t offset, offset2;
469
470	m0 = m;
471	offset = sizeof(struct ip) + ETHER_HDR_LEN;
472	for(; m && m->m_len == 0; m = m->m_next)
473		;
474	if (m == NULL || m->m_len < ETHER_HDR_LEN) {
475		device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n",
476		    __func__);
477		/* Checksum will be corrupted. */
478		m = m0;
479		goto sendit;
480	}
481	if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) {
482		if (m->m_len != ETHER_HDR_LEN) {
483			device_printf(sc->sc_dev,
484			    "%s: m_len != ETHER_HDR_LEN\n", __func__);
485			/* Checksum will be corrupted. */
486			m = m0;
487			goto sendit;
488		}
489		for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
490			;
491		if (m == NULL) {
492			/* Checksum will be corrupted. */
493			m = m0;
494			goto sendit;
495		}
496		ip = mtod(m, struct ip *);
497	} else {
498		p = mtod(m, uint8_t *);
499		p += ETHER_HDR_LEN;
500		ip = (struct ip *)p;
501	}
502	offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
503
504 sendit:
505	offset2 = m->m_pkthdr.csum_data;
506	*cflags = offset << GEM_TD_CXSUM_STARTSHFT;
507	*cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT);
508	*cflags |= GEM_TD_CXSUM_ENABLE;
509}
510
511static __inline void
512gem_rxcksum(struct mbuf *m, uint64_t flags)
513{
514	struct ether_header *eh;
515	struct ip *ip;
516	struct udphdr *uh;
517	uint16_t *opts;
518	int32_t hlen, len, pktlen;
519	uint32_t temp32;
520	uint16_t cksum;
521
522	pktlen = m->m_pkthdr.len;
523	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
524		return;
525	eh = mtod(m, struct ether_header *);
526	if (eh->ether_type != htons(ETHERTYPE_IP))
527		return;
528	ip = (struct ip *)(eh + 1);
529	if (ip->ip_v != IPVERSION)
530		return;
531
532	hlen = ip->ip_hl << 2;
533	pktlen -= sizeof(struct ether_header);
534	if (hlen < sizeof(struct ip))
535		return;
536	if (ntohs(ip->ip_len) < hlen)
537		return;
538	if (ntohs(ip->ip_len) != pktlen)
539		return;
540	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
541		return;	/* Cannot handle fragmented packet. */
542
543	switch (ip->ip_p) {
544	case IPPROTO_TCP:
545		if (pktlen < (hlen + sizeof(struct tcphdr)))
546			return;
547		break;
548	case IPPROTO_UDP:
549		if (pktlen < (hlen + sizeof(struct udphdr)))
550			return;
551		uh = (struct udphdr *)((uint8_t *)ip + hlen);
552		if (uh->uh_sum == 0)
553			return; /* no checksum */
554		break;
555	default:
556		return;
557	}
558
559	cksum = ~(flags & GEM_RD_CHECKSUM);
560	/* checksum fixup for IP options */
561	len = hlen - sizeof(struct ip);
562	if (len > 0) {
563		opts = (uint16_t *)(ip + 1);
564		for (; len > 0; len -= sizeof(uint16_t), opts++) {
565			temp32 = cksum - *opts;
566			temp32 = (temp32 >> 16) + (temp32 & 65535);
567			cksum = temp32 & 65535;
568		}
569	}
570	m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
571	m->m_pkthdr.csum_data = cksum;
572}
573
574static void
575gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
576{
577	struct gem_softc *sc = xsc;
578
579	if (error != 0)
580		return;
581	if (nsegs != 1)
582		panic("%s: bad control buffer segment count", __func__);
583	sc->sc_cddma = segs[0].ds_addr;
584}
585
586static void
587gem_tick(void *arg)
588{
589	struct gem_softc *sc = arg;
590	struct ifnet *ifp;
591
592	GEM_LOCK_ASSERT(sc, MA_OWNED);
593
594	ifp = sc->sc_ifp;
595	/*
596	 * Unload collision counters.
597	 */
598	ifp->if_collisions +=
599	    GEM_BANK1_READ_4(sc, GEM_MAC_NORM_COLL_CNT) +
600	    GEM_BANK1_READ_4(sc, GEM_MAC_FIRST_COLL_CNT) +
601	    GEM_BANK1_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) +
602	    GEM_BANK1_READ_4(sc, GEM_MAC_LATE_COLL_CNT);
603
604	/*
605	 * Then clear the hardware counters.
606	 */
607	GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
608	GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
609	GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
610	GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
611
612	mii_tick(sc->sc_mii);
613
614	if (gem_watchdog(sc) == EJUSTRETURN)
615		return;
616
617	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
618}
619
620static int
621gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr,
622    uint32_t set)
623{
624	int i;
625	uint32_t reg;
626
627	for (i = TRIES; i--; DELAY(100)) {
628		reg = GEM_BANKN_READ_M(bank, 4, sc, r);
629		if ((reg & clr) == 0 && (reg & set) == set)
630			return (1);
631	}
632	return (0);
633}
634
635static void
636gem_reset(sc)
637	struct gem_softc *sc;
638{
639
640#ifdef GEM_DEBUG
641	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
642#endif
643	gem_reset_rx(sc);
644	gem_reset_tx(sc);
645
646	/* Do a full reset. */
647	GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
648	GEM_BANK2_BARRIER(sc, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
649	if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
650		device_printf(sc->sc_dev, "cannot reset device\n");
651}
652
653static void
654gem_rxdrain(struct gem_softc *sc)
655{
656	struct gem_rxsoft *rxs;
657	int i;
658
659	for (i = 0; i < GEM_NRXDESC; i++) {
660		rxs = &sc->sc_rxsoft[i];
661		if (rxs->rxs_mbuf != NULL) {
662			bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
663			    BUS_DMASYNC_POSTREAD);
664			bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
665			m_freem(rxs->rxs_mbuf);
666			rxs->rxs_mbuf = NULL;
667		}
668	}
669}
670
671static void
672gem_stop(struct ifnet *ifp, int disable)
673{
674	struct gem_softc *sc = ifp->if_softc;
675	struct gem_txsoft *txs;
676
677#ifdef GEM_DEBUG
678	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
679#endif
680
681	callout_stop(&sc->sc_tick_ch);
682#ifdef GEM_RINT_TIMEOUT
683	callout_stop(&sc->sc_rx_ch);
684#endif
685
686	/* XXX should we reset these instead? */
687	gem_disable_tx(sc);
688	gem_disable_rx(sc);
689
690	/*
691	 * Release any queued transmit buffers.
692	 */
693	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
694		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
695		if (txs->txs_ndescs != 0) {
696			bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
697			    BUS_DMASYNC_POSTWRITE);
698			bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
699			if (txs->txs_mbuf != NULL) {
700				m_freem(txs->txs_mbuf);
701				txs->txs_mbuf = NULL;
702			}
703		}
704		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
705	}
706
707	if (disable)
708		gem_rxdrain(sc);
709
710	/*
711	 * Mark the interface down and cancel the watchdog timer.
712	 */
713	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
714	sc->sc_flags &= ~GEM_LINK;
715	sc->sc_wdog_timer = 0;
716}
717
718static int
719gem_reset_rx(struct gem_softc *sc)
720{
721
722	/*
723	 * Resetting while DMA is in progress can cause a bus hang, so we
724	 * disable DMA first.
725	 */
726	gem_disable_rx(sc);
727	GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 0);
728	GEM_BANK1_BARRIER(sc, GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
729	if (!GEM_BANK1_BITWAIT(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0))
730		device_printf(sc->sc_dev, "cannot disable RX DMA\n");
731
732	/* Finally, reset the ERX. */
733	GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX);
734	GEM_BANK2_BARRIER(sc, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
735	if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX,
736	    0)) {
737		device_printf(sc->sc_dev, "cannot reset receiver\n");
738		return (1);
739	}
740	return (0);
741}
742
743/*
744 * Reset the receiver DMA engine.
745 *
746 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
747 * etc in order to reset the receiver DMA engine only and not do a full
748 * reset which amongst others also downs the link and clears the FIFOs.
749 */
750static void
751gem_reset_rxdma(struct gem_softc *sc)
752{
753	int i;
754
755	if (gem_reset_rx(sc) != 0)
756		return (gem_init_locked(sc));
757	for (i = 0; i < GEM_NRXDESC; i++)
758		if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
759			GEM_UPDATE_RXDESC(sc, i);
760	sc->sc_rxptr = 0;
761	GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
762	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
763
764	/* NOTE: we use only 32-bit DMA addresses here. */
765	GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
766	GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
767	GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
768	GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
769	    gem_ringsize(GEM_NRXDESC /* XXX */) |
770	    ((ETHER_HDR_LEN + sizeof(struct ip)) <<
771	    GEM_RX_CONFIG_CXM_START_SHFT) |
772	    (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
773	    (2 << GEM_RX_CONFIG_FBOFF_SHFT));
774	GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
775	    (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
776	GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
777	    (3 * sc->sc_rxfifosize / 256) |
778	    ((sc->sc_rxfifosize / 256) << 12));
779	GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
780	    GEM_BANK1_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
781	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
782	    GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
783	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
784	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE);
785}
786
787static int
788gem_reset_tx(struct gem_softc *sc)
789{
790
791	/*
792	 * Resetting while DMA is in progress can cause a bus hang, so we
793	 * disable DMA first.
794	 */
795	gem_disable_tx(sc);
796	GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, 0);
797	GEM_BANK1_BARRIER(sc, GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
798	if (!GEM_BANK1_BITWAIT(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0))
799		device_printf(sc->sc_dev, "cannot disable TX DMA\n");
800
801	/* Finally, reset the ETX. */
802	GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_TX);
803	GEM_BANK2_BARRIER(sc, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
804	if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX,
805	    0)) {
806		device_printf(sc->sc_dev, "cannot reset transmitter\n");
807		return (1);
808	}
809	return (0);
810}
811
812static int
813gem_disable_rx(struct gem_softc *sc)
814{
815
816	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
817	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE);
818	GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
819	return (GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE,
820	    0));
821}
822
823static int
824gem_disable_tx(struct gem_softc *sc)
825{
826
827	GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
828	    GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE);
829	GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
830	return (GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE,
831	    0));
832}
833
834static int
835gem_meminit(sc)
836	struct gem_softc *sc;
837{
838	struct gem_rxsoft *rxs;
839	int error, i;
840
841	/*
842	 * Initialize the transmit descriptor ring.
843	 */
844	for (i = 0; i < GEM_NTXDESC; i++) {
845		sc->sc_txdescs[i].gd_flags = 0;
846		sc->sc_txdescs[i].gd_addr = 0;
847	}
848	sc->sc_txfree = GEM_MAXTXFREE;
849	sc->sc_txnext = 0;
850	sc->sc_txwin = 0;
851
852	/*
853	 * Initialize the receive descriptor and receive job
854	 * descriptor rings.
855	 */
856	for (i = 0; i < GEM_NRXDESC; i++) {
857		rxs = &sc->sc_rxsoft[i];
858		if (rxs->rxs_mbuf == NULL) {
859			if ((error = gem_add_rxbuf(sc, i)) != 0) {
860				device_printf(sc->sc_dev,
861				    "unable to allocate or map RX buffer %d, "
862				    "error = %d\n", i, error);
863				/*
864				 * XXX we should attempt to run with fewer
865				 * receive buffers instead of just failing.
866				 */
867				gem_rxdrain(sc);
868				return (1);
869			}
870		} else
871			GEM_INIT_RXDESC(sc, i);
872	}
873	sc->sc_rxptr = 0;
874	GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
875	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
876
877	return (0);
878}
879
880static u_int
881gem_ringsize(u_int sz)
882{
883
884	switch (sz) {
885	case 32:
886		return (GEM_RING_SZ_32);
887	case 64:
888		return (GEM_RING_SZ_64);
889	case 128:
890		return (GEM_RING_SZ_128);
891	case 256:
892		return (GEM_RING_SZ_256);
893	case 512:
894		return (GEM_RING_SZ_512);
895	case 1024:
896		return (GEM_RING_SZ_1024);
897	case 2048:
898		return (GEM_RING_SZ_2048);
899	case 4096:
900		return (GEM_RING_SZ_4096);
901	case 8192:
902		return (GEM_RING_SZ_8192);
903	default:
904		printf("%s: invalid ring size %d\n", __func__, sz);
905		return (GEM_RING_SZ_32);
906	}
907}
908
909static void
910gem_init(void *xsc)
911{
912	struct gem_softc *sc = xsc;
913
914	GEM_LOCK(sc);
915	gem_init_locked(sc);
916	GEM_UNLOCK(sc);
917}
918
919/*
920 * Initialization of interface; set up initialization block
921 * and transmit/receive descriptor rings.
922 */
923static void
924gem_init_locked(struct gem_softc *sc)
925{
926	struct ifnet *ifp = sc->sc_ifp;
927	uint32_t v;
928
929	GEM_LOCK_ASSERT(sc, MA_OWNED);
930
931#ifdef GEM_DEBUG
932	CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
933	    __func__);
934#endif
935	/*
936	 * Initialization sequence.  The numbered steps below correspond
937	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
938	 * Channel Engine manual (part of the PCIO manual).
939	 * See also the STP2002-STQ document from Sun Microsystems.
940	 */
941
942	/* step 1 & 2.  Reset the Ethernet Channel. */
943	gem_stop(sc->sc_ifp, 0);
944	gem_reset(sc);
945#ifdef GEM_DEBUG
946	CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
947	    __func__);
948#endif
949
950	/* Re-initialize the MIF. */
951	gem_mifinit(sc);
952
953	/* step 3.  Setup data structures in host memory. */
954	if (gem_meminit(sc) != 0)
955		return;
956
957	/* step 4.  TX MAC registers & counters */
958	gem_init_regs(sc);
959
960	/* step 5.  RX MAC registers & counters */
961	gem_setladrf(sc);
962
963	/* step 6 & 7.  Program Descriptor Ring Base Addresses. */
964	/* NOTE: we use only 32-bit DMA addresses here. */
965	GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0);
966	GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
967
968	GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
969	GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
970#ifdef GEM_DEBUG
971	CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
972	    GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
973#endif
974
975	/* step 8.  Global Configuration & Interrupt Mask */
976	GEM_BANK1_WRITE_4(sc, GEM_INTMASK,
977	    ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
978	    GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
979	    GEM_INTR_BERR
980#ifdef GEM_DEBUG
981	    | GEM_INTR_PCS | GEM_INTR_MIF
982#endif
983	    ));
984	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
985	    GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
986	GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK,
987	    GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP);
988#ifdef GEM_DEBUG
989	GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
990	    ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
991#else
992	GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
993	    GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
994#endif
995
996	/* step 9.  ETX Configuration: use mostly default values. */
997
998	/* Enable DMA. */
999	v = gem_ringsize(GEM_NTXDESC /* XXX */);
1000	GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG,
1001	    v | GEM_TX_CONFIG_TXDMA_EN |
1002	    ((0x400 << 10) & GEM_TX_CONFIG_TXFIFO_TH));
1003
1004	/* step 10.  ERX Configuration */
1005
1006	/* Encode Receive Descriptor ring size. */
1007	v = gem_ringsize(GEM_NRXDESC /* XXX */);
1008	/* RX TCP/UDP checksum offset */
1009	v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
1010	    GEM_RX_CONFIG_CXM_START_SHFT);
1011
1012	/* Enable DMA. */
1013	GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
1014	    v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
1015	    (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN);
1016
1017	GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
1018	    (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
1019
1020	/*
1021	 * The following value is for an OFF Threshold of about 3/4 full
1022	 * and an ON Threshold of 1/4 full.
1023	 */
1024	GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
1025	    (3 * sc->sc_rxfifosize / 256) |
1026	    ((sc->sc_rxfifosize / 256) << 12));
1027
1028	/* step 11.  Configure Media. */
1029
1030	/* step 12.  RX_MAC Configuration Register */
1031	v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
1032	v |= GEM_MAC_RX_STRIP_CRC;
1033	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0);
1034	GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
1035	if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
1036		device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1037	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
1038
1039	/* step 14.  Issue Transmit Pending command. */
1040
1041	/* step 15.  Give the reciever a swift kick. */
1042	GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
1043
1044	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1045	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1046	sc->sc_ifflags = ifp->if_flags;
1047
1048	sc->sc_flags &= ~GEM_LINK;
1049	mii_mediachg(sc->sc_mii);
1050
1051	/* Start the one second timer. */
1052	sc->sc_wdog_timer = 0;
1053	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1054}
1055
1056static int
1057gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
1058{
1059	bus_dma_segment_t txsegs[GEM_NTXSEGS];
1060	struct gem_txsoft *txs;
1061	struct mbuf *m;
1062	uint64_t cflags, flags;
1063	int error, nexttx, nsegs, seg;
1064
1065	/* Get a work queue entry. */
1066	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1067		/* Ran out of descriptors. */
1068		return (ENOBUFS);
1069	}
1070	error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1071	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1072	if (error == EFBIG) {
1073		m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS);
1074		if (m == NULL) {
1075			m_freem(*m_head);
1076			*m_head = NULL;
1077			return (ENOBUFS);
1078		}
1079		*m_head = m;
1080		error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1081		    txs->txs_dmamap, *m_head, txsegs, &nsegs,
1082		    BUS_DMA_NOWAIT);
1083		if (error != 0) {
1084			m_freem(*m_head);
1085			*m_head = NULL;
1086			return (error);
1087		}
1088	} else if (error != 0)
1089		return (error);
1090	/* If nsegs is wrong then the stack is corrupt. */
1091	KASSERT(nsegs <= GEM_NTXSEGS,
1092	    ("%s: too many DMA segments (%d)", __func__, nsegs));
1093	if (nsegs == 0) {
1094		m_freem(*m_head);
1095		*m_head = NULL;
1096		return (EIO);
1097	}
1098
1099	/*
1100	 * Ensure we have enough descriptors free to describe
1101	 * the packet.  Note, we always reserve one descriptor
1102	 * at the end of the ring as a termination point, in
1103	 * order to prevent wrap-around.
1104	 */
1105	if (nsegs > sc->sc_txfree - 1) {
1106		txs->txs_ndescs = 0;
1107		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1108		return (ENOBUFS);
1109	}
1110
1111	flags = cflags = 0;
1112	if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
1113		gem_txcksum(sc, *m_head, &cflags);
1114
1115	txs->txs_ndescs = nsegs;
1116	txs->txs_firstdesc = sc->sc_txnext;
1117	nexttx = txs->txs_firstdesc;
1118	for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1119#ifdef GEM_DEBUG
1120		CTR6(KTR_GEM,
1121		    "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
1122		    __func__, seg, nexttx, txsegs[seg].ds_len,
1123		    txsegs[seg].ds_addr,
1124		    GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
1125#endif
1126		sc->sc_txdescs[nexttx].gd_addr =
1127		    GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
1128		KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1129		    ("%s: segment size too large!", __func__));
1130		flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1131		sc->sc_txdescs[nexttx].gd_flags =
1132		    GEM_DMA_WRITE(sc, flags | cflags);
1133		txs->txs_lastdesc = nexttx;
1134	}
1135
1136	/* Set EOP on the last descriptor. */
1137#ifdef GEM_DEBUG
1138	CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
1139	    __func__, seg, nexttx);
1140#endif
1141	sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1142	    GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
1143
1144	/* Lastly set SOP on the first descriptor. */
1145#ifdef GEM_DEBUG
1146	CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
1147	    __func__, seg, nexttx);
1148#endif
1149	if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1150		sc->sc_txwin = 0;
1151		flags |= GEM_TD_INTERRUPT_ME;
1152		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1153		    GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
1154		    GEM_TD_START_OF_PACKET);
1155	} else
1156		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1157		    GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
1158
1159	/* Sync the DMA map. */
1160	bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1161	    BUS_DMASYNC_PREWRITE);
1162
1163#ifdef GEM_DEBUG
1164	CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1165	    __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1166	    txs->txs_ndescs);
1167#endif
1168	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1169	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1170	txs->txs_mbuf = *m_head;
1171
1172	sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1173	sc->sc_txfree -= txs->txs_ndescs;
1174
1175	return (0);
1176}
1177
1178static void
1179gem_init_regs(struct gem_softc *sc)
1180{
1181	const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1182
1183	/* These registers are not cleared on reset. */
1184	if ((sc->sc_flags & GEM_INITED) == 0) {
1185		/* magic values */
1186		GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG0, 0);
1187		GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8);
1188		GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4);
1189
1190		GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1191		/* max frame and max burst size */
1192		GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME,
1193		    (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1194
1195		GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7);
1196		GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4);
1197		GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1198		/* dunno... */
1199		GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8088);
1200		GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED,
1201		    ((laddr[5] << 8) | laddr[4]) & 0x3ff);
1202
1203		/* secondary MAC address: 0:0:0:0:0:0 */
1204		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR3, 0);
1205		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR4, 0);
1206		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR5, 0);
1207
1208		/* MAC control address: 01:80:c2:00:00:01 */
1209		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001);
1210		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200);
1211		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180);
1212
1213		/* MAC filter address: 0:0:0:0:0:0 */
1214		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0);
1215		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0);
1216		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0);
1217
1218		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0);
1219		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0);
1220
1221		sc->sc_flags |= GEM_INITED;
1222	}
1223
1224	/* Counters need to be zeroed. */
1225	GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
1226	GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
1227	GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
1228	GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
1229	GEM_BANK1_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0);
1230	GEM_BANK1_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0);
1231	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0);
1232	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
1233	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
1234	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
1235	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
1236
1237	/* Set XOFF PAUSE time. */
1238	GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1239
1240	/*
1241	 * Set the internal arbitration to "infinite" bursts of the
1242	 * maximum length of 31 * 64 bytes so DMA transfers aren't
1243	 * split up in cache line size chunks.  This greatly improves
1244	 * especially RX performance.
1245	 * Enable silicon bug workarounds for the Apple variants.
1246	 */
1247	GEM_BANK1_WRITE_4(sc, GEM_CONFIG,
1248	    GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
1249	    GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ?
1250	    GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
1251
1252	/* Set the station address. */
1253	GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
1254	GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
1255	GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
1256
1257	/* Enable MII outputs. */
1258	GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
1259}
1260
1261static void
1262gem_start(struct ifnet *ifp)
1263{
1264	struct gem_softc *sc = ifp->if_softc;
1265
1266	GEM_LOCK(sc);
1267	gem_start_locked(ifp);
1268	GEM_UNLOCK(sc);
1269}
1270
1271static void
1272gem_start_locked(struct ifnet *ifp)
1273{
1274	struct gem_softc *sc = ifp->if_softc;
1275	struct mbuf *m;
1276	int ntx;
1277
1278	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1279	    IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1280		return;
1281
1282#ifdef GEM_DEBUG
1283	CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1284	    device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1285	    sc->sc_txnext);
1286#endif
1287	ntx = 0;
1288	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1289		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1290		if (m == NULL)
1291			break;
1292		if (gem_load_txmbuf(sc, &m) != 0) {
1293			if (m == NULL)
1294				break;
1295			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1296			IFQ_DRV_PREPEND(&ifp->if_snd, m);
1297			break;
1298		}
1299		ntx++;
1300		/* Kick the transmitter. */
1301#ifdef GEM_DEBUG
1302		CTR3(KTR_GEM, "%s: %s: kicking TX %d",
1303		    device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1304#endif
1305		GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1306		GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
1307
1308		BPF_MTAP(ifp, m);
1309	}
1310
1311	if (ntx > 0) {
1312#ifdef GEM_DEBUG
1313		CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1314		    device_get_name(sc->sc_dev), sc->sc_txnext);
1315#endif
1316
1317		/* Set a watchdog timer in case the chip flakes out. */
1318		sc->sc_wdog_timer = 5;
1319#ifdef GEM_DEBUG
1320		CTR3(KTR_GEM, "%s: %s: watchdog %d",
1321		    device_get_name(sc->sc_dev), __func__,
1322		    sc->sc_wdog_timer);
1323#endif
1324	}
1325}
1326
1327static void
1328gem_tint(struct gem_softc *sc)
1329{
1330	struct ifnet *ifp = sc->sc_ifp;
1331	struct gem_txsoft *txs;
1332	int txlast, progress;
1333#ifdef GEM_DEBUG
1334	int i;
1335
1336	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1337#endif
1338
1339	/*
1340	 * Go through our TX list and free mbufs for those
1341	 * frames that have been transmitted.
1342	 */
1343	progress = 0;
1344	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1345	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1346
1347#ifdef GEM_DEBUG
1348		if ((ifp->if_flags & IFF_DEBUG) != 0) {
1349			printf("    txsoft %p transmit chain:\n", txs);
1350			for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1351				printf("descriptor %d: ", i);
1352				printf("gd_flags: 0x%016llx\t",
1353				    (long long)GEM_DMA_READ(sc,
1354				    sc->sc_txdescs[i].gd_flags));
1355				printf("gd_addr: 0x%016llx\n",
1356				    (long long)GEM_DMA_READ(sc,
1357				    sc->sc_txdescs[i].gd_addr));
1358				if (i == txs->txs_lastdesc)
1359					break;
1360			}
1361		}
1362#endif
1363
1364		/*
1365		 * In theory, we could harvest some descriptors before
1366		 * the ring is empty, but that's a bit complicated.
1367		 *
1368		 * GEM_TX_COMPLETION points to the last descriptor
1369		 * processed + 1.
1370		 */
1371		txlast = GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION);
1372#ifdef GEM_DEBUG
1373		CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1374		    "txs->txs_lastdesc = %d, txlast = %d",
1375		    __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1376#endif
1377		if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1378			if ((txlast >= txs->txs_firstdesc) &&
1379			    (txlast <= txs->txs_lastdesc))
1380				break;
1381		} else {
1382			/* Ick -- this command wraps. */
1383			if ((txlast >= txs->txs_firstdesc) ||
1384			    (txlast <= txs->txs_lastdesc))
1385				break;
1386		}
1387
1388#ifdef GEM_DEBUG
1389		CTR1(KTR_GEM, "%s: releasing a descriptor", __func__);
1390#endif
1391		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1392
1393		sc->sc_txfree += txs->txs_ndescs;
1394
1395		bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1396		    BUS_DMASYNC_POSTWRITE);
1397		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1398		if (txs->txs_mbuf != NULL) {
1399			m_freem(txs->txs_mbuf);
1400			txs->txs_mbuf = NULL;
1401		}
1402
1403		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1404
1405		ifp->if_opackets++;
1406		progress = 1;
1407	}
1408
1409#ifdef GEM_DEBUG
1410	CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx "
1411	    "GEM_TX_COMPLETION %x",
1412	    __func__, GEM_BANK1_READ_4(sc, GEM_TX_STATE_MACHINE),
1413	    ((long long)GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) |
1414	    GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_LO),
1415	    GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION));
1416#endif
1417
1418	if (progress) {
1419		if (sc->sc_txfree == GEM_NTXDESC - 1)
1420			sc->sc_txwin = 0;
1421
1422		/*
1423		 * We freed some descriptors, so reset IFF_DRV_OACTIVE
1424		 * and restart.
1425		 */
1426		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1427		sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
1428
1429		if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1430		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1431			gem_start_locked(ifp);
1432	}
1433
1434#ifdef GEM_DEBUG
1435	CTR3(KTR_GEM, "%s: %s: watchdog %d",
1436	    device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1437#endif
1438}
1439
1440#ifdef GEM_RINT_TIMEOUT
1441static void
1442gem_rint_timeout(void *arg)
1443{
1444	struct gem_softc *sc = arg;
1445
1446	GEM_LOCK_ASSERT(sc, MA_OWNED);
1447	gem_rint(sc);
1448}
1449#endif
1450
1451static void
1452gem_rint(struct gem_softc *sc)
1453{
1454	struct ifnet *ifp = sc->sc_ifp;
1455	struct mbuf *m;
1456	uint64_t rxstat;
1457	uint32_t rxcomp;
1458
1459#ifdef GEM_RINT_TIMEOUT
1460	callout_stop(&sc->sc_rx_ch);
1461#endif
1462#ifdef GEM_DEBUG
1463	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1464#endif
1465
1466	/*
1467	 * Read the completion register once.  This limits
1468	 * how long the following loop can execute.
1469	 */
1470	rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION);
1471
1472#ifdef GEM_DEBUG
1473	CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d",
1474	    __func__, sc->sc_rxptr, rxcomp);
1475#endif
1476	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1477	for (; sc->sc_rxptr != rxcomp;) {
1478		m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1479		rxstat = GEM_DMA_READ(sc,
1480		    sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1481
1482		if (rxstat & GEM_RD_OWN) {
1483#ifdef GEM_RINT_TIMEOUT
1484			/*
1485			 * The descriptor is still marked as owned, although
1486			 * it is supposed to have completed.  This has been
1487			 * observed on some machines.  Just exiting here
1488			 * might leave the packet sitting around until another
1489			 * one arrives to trigger a new interrupt, which is
1490			 * generally undesirable, so set up a timeout.
1491			 */
1492			callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1493			    gem_rint_timeout, sc);
1494#endif
1495			m = NULL;
1496			goto kickit;
1497		}
1498
1499		if (rxstat & GEM_RD_BAD_CRC) {
1500			ifp->if_ierrors++;
1501			device_printf(sc->sc_dev, "receive error: CRC error\n");
1502			GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1503			m = NULL;
1504			goto kickit;
1505		}
1506
1507#ifdef GEM_DEBUG
1508		if ((ifp->if_flags & IFF_DEBUG) != 0) {
1509			printf("    rxsoft %p descriptor %d: ",
1510			    &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1511			printf("gd_flags: 0x%016llx\t",
1512			    (long long)GEM_DMA_READ(sc,
1513			    sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
1514			printf("gd_addr: 0x%016llx\n",
1515			    (long long)GEM_DMA_READ(sc,
1516			    sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
1517		}
1518#endif
1519
1520		/*
1521		 * Allocate a new mbuf cluster.  If that fails, we are
1522		 * out of memory, and must drop the packet and recycle
1523		 * the buffer that's already attached to this descriptor.
1524		 */
1525		if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1526			ifp->if_ierrors++;
1527			GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1528			m = NULL;
1529		}
1530
1531 kickit:
1532		/*
1533		 * Update the RX kick register.  This register has to point
1534		 * to the descriptor after the last valid one (before the
1535		 * current batch) and must be incremented in multiples of
1536		 * 4 (because the DMA engine fetches/updates descriptors
1537		 * in batches of 4).
1538		 */
1539		sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1540		if ((sc->sc_rxptr % 4) == 0) {
1541			GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1542			GEM_BANK1_WRITE_4(sc, GEM_RX_KICK,
1543			    (sc->sc_rxptr + GEM_NRXDESC - 4) &
1544			    GEM_NRXDESC_MASK);
1545		}
1546
1547		if (m == NULL) {
1548			if (rxstat & GEM_RD_OWN)
1549				break;
1550			continue;
1551		}
1552
1553		ifp->if_ipackets++;
1554		m->m_data += 2; /* We're already off by two */
1555		m->m_pkthdr.rcvif = ifp;
1556		m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
1557
1558		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1559			gem_rxcksum(m, rxstat);
1560
1561		/* Pass it on. */
1562		GEM_UNLOCK(sc);
1563		(*ifp->if_input)(ifp, m);
1564		GEM_LOCK(sc);
1565	}
1566
1567#ifdef GEM_DEBUG
1568	CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__,
1569	    sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION));
1570#endif
1571}
1572
1573static int
1574gem_add_rxbuf(struct gem_softc *sc, int idx)
1575{
1576	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1577	struct mbuf *m;
1578	bus_dma_segment_t segs[1];
1579	int error, nsegs;
1580
1581	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1582	if (m == NULL)
1583		return (ENOBUFS);
1584	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1585
1586#ifdef GEM_DEBUG
1587	/* Bzero the packet to check DMA. */
1588	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1589#endif
1590
1591	if (rxs->rxs_mbuf != NULL) {
1592		bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1593		    BUS_DMASYNC_POSTREAD);
1594		bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1595	}
1596
1597	error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1598	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1599	if (error != 0) {
1600		device_printf(sc->sc_dev,
1601		    "cannot load RS DMA map %d, error = %d\n", idx, error);
1602		m_freem(m);
1603		return (error);
1604	}
1605	/* If nsegs is wrong then the stack is corrupt. */
1606	KASSERT(nsegs == 1,
1607	    ("%s: too many DMA segments (%d)", __func__, nsegs));
1608	rxs->rxs_mbuf = m;
1609	rxs->rxs_paddr = segs[0].ds_addr;
1610
1611	bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1612	    BUS_DMASYNC_PREREAD);
1613
1614	GEM_INIT_RXDESC(sc, idx);
1615
1616	return (0);
1617}
1618
1619static void
1620gem_eint(struct gem_softc *sc, u_int status)
1621{
1622
1623	sc->sc_ifp->if_ierrors++;
1624	if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1625		gem_reset_rxdma(sc);
1626		return;
1627	}
1628
1629	device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status);
1630}
1631
1632void
1633gem_intr(void *v)
1634{
1635	struct gem_softc *sc = v;
1636	uint32_t status, status2;
1637
1638	GEM_LOCK(sc);
1639	status = GEM_BANK1_READ_4(sc, GEM_STATUS);
1640
1641#ifdef GEM_DEBUG
1642	CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1643	    device_get_name(sc->sc_dev), __func__, (status >> 19),
1644	    (u_int)status);
1645
1646	/*
1647	 * PCS interrupts must be cleared, otherwise no traffic is passed!
1648	 */
1649	if ((status & GEM_INTR_PCS) != 0) {
1650		status2 =
1651		    GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS) |
1652		    GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS);
1653		if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
1654			device_printf(sc->sc_dev,
1655			    "%s: PCS link status changed\n", __func__);
1656	}
1657	if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
1658		status2 = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_STATUS);
1659		if ((status2 & GEM_MAC_PAUSED) != 0)
1660			device_printf(sc->sc_dev,
1661			    "%s: PAUSE received (PAUSE time %d slots)\n",
1662			    __func__, GEM_MAC_PAUSE_TIME(status2));
1663		if ((status2 & GEM_MAC_PAUSE) != 0)
1664			device_printf(sc->sc_dev,
1665			    "%s: transited to PAUSE state\n", __func__);
1666		if ((status2 & GEM_MAC_RESUME) != 0)
1667			device_printf(sc->sc_dev,
1668			    "%s: transited to non-PAUSE state\n", __func__);
1669	}
1670	if ((status & GEM_INTR_MIF) != 0)
1671		device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
1672#endif
1673
1674	if ((status &
1675	    (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
1676		gem_eint(sc, status);
1677
1678	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1679		gem_rint(sc);
1680
1681	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1682		gem_tint(sc);
1683
1684	if (status & GEM_INTR_TX_MAC) {
1685		status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS);
1686		if ((status2 &
1687		    ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP)) != 0)
1688			device_printf(sc->sc_dev,
1689			    "MAC TX fault, status %x\n", status2);
1690		if ((status2 &
1691		    (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0)
1692			gem_init_locked(sc);
1693	}
1694	if (status & GEM_INTR_RX_MAC) {
1695		status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS);
1696		/*
1697		 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
1698		 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
1699		 * silicon bug so handle them silently.  Moreover, it's
1700		 * likely that the receiver has hung so we reset it.
1701		 */
1702		if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) {
1703			sc->sc_ifp->if_ierrors++;
1704			gem_reset_rxdma(sc);
1705		} else if ((status2 &
1706		    ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0)
1707			device_printf(sc->sc_dev,
1708			    "MAC RX fault, status %x\n", status2);
1709	}
1710	GEM_UNLOCK(sc);
1711}
1712
1713static int
1714gem_watchdog(struct gem_softc *sc)
1715{
1716
1717	GEM_LOCK_ASSERT(sc, MA_OWNED);
1718
1719#ifdef GEM_DEBUG
1720	CTR4(KTR_GEM,
1721	    "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x",
1722	    __func__, GEM_BANK1_READ_4(sc, GEM_RX_CONFIG),
1723	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS),
1724	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG));
1725	CTR4(KTR_GEM,
1726	    "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x",
1727	    __func__, GEM_BANK1_READ_4(sc, GEM_TX_CONFIG),
1728	    GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS),
1729	    GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG));
1730#endif
1731
1732	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1733		return (0);
1734
1735	if ((sc->sc_flags & GEM_LINK) != 0)
1736		device_printf(sc->sc_dev, "device timeout\n");
1737	else if (bootverbose)
1738		device_printf(sc->sc_dev, "device timeout (no link)\n");
1739	++sc->sc_ifp->if_oerrors;
1740
1741	/* Try to get more packets going. */
1742	gem_init_locked(sc);
1743	return (EJUSTRETURN);
1744}
1745
1746static void
1747gem_mifinit(struct gem_softc *sc)
1748{
1749
1750	/* Configure the MIF in frame mode. */
1751	GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG,
1752	    GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
1753}
1754
1755/*
1756 * MII interface
1757 *
1758 * The GEM MII interface supports at least three different operating modes:
1759 *
1760 * Bitbang mode is implemented using data, clock and output enable registers.
1761 *
1762 * Frame mode is implemented by loading a complete frame into the frame
1763 * register and polling the valid bit for completion.
1764 *
1765 * Polling mode uses the frame register but completion is indicated by
1766 * an interrupt.
1767 *
1768 */
1769int
1770gem_mii_readreg(device_t dev, int phy, int reg)
1771{
1772	struct gem_softc *sc;
1773	int n;
1774	uint32_t v;
1775
1776#ifdef GEM_DEBUG_PHY
1777	printf("%s: phy %d reg %d\n", __func__, phy, reg);
1778#endif
1779
1780	sc = device_get_softc(dev);
1781	if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
1782		return (0);
1783
1784	if ((sc->sc_flags & GEM_SERDES) != 0) {
1785		switch (reg) {
1786		case MII_BMCR:
1787			reg = GEM_MII_CONTROL;
1788			break;
1789		case MII_BMSR:
1790			reg = GEM_MII_STATUS;
1791			break;
1792		case MII_PHYIDR1:
1793		case MII_PHYIDR2:
1794			return (0);
1795		case MII_ANAR:
1796			reg = GEM_MII_ANAR;
1797			break;
1798		case MII_ANLPAR:
1799			reg = GEM_MII_ANLPAR;
1800			break;
1801		case MII_EXTSR:
1802			return (EXTSR_1000XFDX | EXTSR_1000XHDX);
1803		default:
1804			device_printf(sc->sc_dev,
1805			    "%s: unhandled register %d\n", __func__, reg);
1806			return (0);
1807		}
1808		return (GEM_BANK1_READ_4(sc, reg));
1809	}
1810
1811	/* Construct the frame command. */
1812	v = GEM_MIF_FRAME_READ |
1813	    (phy << GEM_MIF_PHY_SHIFT) |
1814	    (reg << GEM_MIF_REG_SHIFT);
1815
1816	GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1817	for (n = 0; n < 100; n++) {
1818		DELAY(1);
1819		v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
1820		if (v & GEM_MIF_FRAME_TA0)
1821			return (v & GEM_MIF_FRAME_DATA);
1822	}
1823
1824	device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1825	return (0);
1826}
1827
1828int
1829gem_mii_writereg(device_t dev, int phy, int reg, int val)
1830{
1831	struct gem_softc *sc;
1832	int n;
1833	uint32_t v;
1834
1835#ifdef GEM_DEBUG_PHY
1836	printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
1837#endif
1838
1839	sc = device_get_softc(dev);
1840	if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
1841		return (0);
1842
1843	if ((sc->sc_flags & GEM_SERDES) != 0) {
1844		switch (reg) {
1845		case MII_BMCR:
1846			reg = GEM_MII_CONTROL;
1847			break;
1848		case MII_BMSR:
1849			reg = GEM_MII_STATUS;
1850			break;
1851		case MII_ANAR:
1852			GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 0);
1853			GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
1854			    BUS_SPACE_BARRIER_WRITE);
1855			GEM_BANK1_WRITE_4(sc, GEM_MII_ANAR, val);
1856			GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
1857			    GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
1858			GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG,
1859			    GEM_MII_CONFIG_ENABLE);
1860			return (0);
1861		case MII_ANLPAR:
1862			reg = GEM_MII_ANLPAR;
1863			break;
1864		default:
1865			device_printf(sc->sc_dev,
1866			    "%s: unhandled register %d\n", __func__, reg);
1867			return (0);
1868		}
1869		GEM_BANK1_WRITE_4(sc, reg, val);
1870		return (0);
1871	}
1872
1873	/* Construct the frame command. */
1874	v = GEM_MIF_FRAME_WRITE |
1875	    (phy << GEM_MIF_PHY_SHIFT) |
1876	    (reg << GEM_MIF_REG_SHIFT) |
1877	    (val & GEM_MIF_FRAME_DATA);
1878
1879	GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1880	for (n = 0; n < 100; n++) {
1881		DELAY(1);
1882		v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
1883		if (v & GEM_MIF_FRAME_TA0)
1884			return (1);
1885	}
1886
1887	device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1888	return (0);
1889}
1890
1891void
1892gem_mii_statchg(device_t dev)
1893{
1894	struct gem_softc *sc;
1895	int gigabit;
1896	uint32_t rxcfg, txcfg, v;
1897
1898	sc = device_get_softc(dev);
1899
1900#ifdef GEM_DEBUG
1901	if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1902		device_printf(sc->sc_dev, "%s: status change: PHY = %d\n",
1903		    __func__, sc->sc_phyad);
1904#endif
1905
1906	if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1907	    IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1908		sc->sc_flags |= GEM_LINK;
1909	else
1910		sc->sc_flags &= ~GEM_LINK;
1911
1912	switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
1913	case IFM_1000_SX:
1914	case IFM_1000_LX:
1915	case IFM_1000_CX:
1916	case IFM_1000_T:
1917		gigabit = 1;
1918		break;
1919	default:
1920		gigabit = 0;
1921	}
1922
1923	/*
1924	 * The configuration done here corresponds to the steps F) and
1925	 * G) and as far as enabling of RX and TX MAC goes also step H)
1926	 * of the initialization sequence outlined in section 3.2.1 of
1927	 * the GEM Gigabit Ethernet ASIC Specification.
1928	 */
1929
1930	rxcfg = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
1931	rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE);
1932	txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
1933	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1934		txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
1935	else if (gigabit != 0) {
1936		rxcfg |= GEM_MAC_RX_CARR_EXTEND;
1937		txcfg |= GEM_MAC_TX_CARR_EXTEND;
1938	}
1939	GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0);
1940	GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
1941	if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
1942		device_printf(sc->sc_dev, "cannot disable TX MAC\n");
1943	GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg);
1944	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, 0);
1945	GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
1946	if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
1947		device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1948	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg);
1949
1950	v = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_CONFIG) &
1951	    ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
1952#ifdef notyet
1953	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
1954	    IFM_ETH_RXPAUSE) != 0)
1955		v |= GEM_MAC_CC_RX_PAUSE;
1956	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
1957	    IFM_ETH_TXPAUSE) != 0)
1958		v |= GEM_MAC_CC_TX_PAUSE;
1959#endif
1960	GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v);
1961
1962	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
1963	    gigabit != 0)
1964		GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
1965		    GEM_MAC_SLOT_TIME_CARR_EXTEND);
1966	else
1967		GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
1968		    GEM_MAC_SLOT_TIME_NORMAL);
1969
1970	/* XIF Configuration */
1971	v = GEM_MAC_XIF_LINK_LED;
1972	v |= GEM_MAC_XIF_TX_MII_ENA;
1973	if ((sc->sc_flags & GEM_SERDES) == 0) {
1974		if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) &
1975		    GEM_MIF_CONFIG_PHY_SEL) != 0 &&
1976		    (IFM_OPTIONS(sc->sc_mii->mii_media_active) &
1977		    IFM_FDX) == 0)
1978			/* External MII needs echo disable if half duplex. */
1979			v |= GEM_MAC_XIF_ECHO_DISABL;
1980		else
1981			/*
1982			 * Internal MII needs buffer enable.
1983			 * XXX buffer enable makes only sense for an
1984			 * external PHY.
1985			 */
1986			v |= GEM_MAC_XIF_MII_BUF_ENA;
1987	}
1988	if (gigabit != 0)
1989		v |= GEM_MAC_XIF_GMII_MODE;
1990	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1991		v |= GEM_MAC_XIF_FDPLX_LED;
1992	GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v);
1993
1994	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1995	    (sc->sc_flags & GEM_LINK) != 0) {
1996		GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
1997		    txcfg | GEM_MAC_TX_ENABLE);
1998		GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
1999		    rxcfg | GEM_MAC_RX_ENABLE);
2000	}
2001}
2002
2003int
2004gem_mediachange(struct ifnet *ifp)
2005{
2006	struct gem_softc *sc = ifp->if_softc;
2007	int error;
2008
2009	/* XXX add support for serial media. */
2010
2011	GEM_LOCK(sc);
2012	error = mii_mediachg(sc->sc_mii);
2013	GEM_UNLOCK(sc);
2014	return (error);
2015}
2016
2017void
2018gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2019{
2020	struct gem_softc *sc = ifp->if_softc;
2021
2022	GEM_LOCK(sc);
2023	if ((ifp->if_flags & IFF_UP) == 0) {
2024		GEM_UNLOCK(sc);
2025		return;
2026	}
2027
2028	mii_pollstat(sc->sc_mii);
2029	ifmr->ifm_active = sc->sc_mii->mii_media_active;
2030	ifmr->ifm_status = sc->sc_mii->mii_media_status;
2031	GEM_UNLOCK(sc);
2032}
2033
2034static int
2035gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2036{
2037	struct gem_softc *sc = ifp->if_softc;
2038	struct ifreq *ifr = (struct ifreq *)data;
2039	int error;
2040
2041	error = 0;
2042	switch (cmd) {
2043	case SIOCSIFFLAGS:
2044		GEM_LOCK(sc);
2045		if ((ifp->if_flags & IFF_UP) != 0) {
2046			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2047			    ((ifp->if_flags ^ sc->sc_ifflags) &
2048			    (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2049				gem_setladrf(sc);
2050			else
2051				gem_init_locked(sc);
2052		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2053			gem_stop(ifp, 0);
2054		if ((ifp->if_flags & IFF_LINK0) != 0)
2055			sc->sc_csum_features |= CSUM_UDP;
2056		else
2057			sc->sc_csum_features &= ~CSUM_UDP;
2058		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2059			ifp->if_hwassist = sc->sc_csum_features;
2060		sc->sc_ifflags = ifp->if_flags;
2061		GEM_UNLOCK(sc);
2062		break;
2063	case SIOCADDMULTI:
2064	case SIOCDELMULTI:
2065		GEM_LOCK(sc);
2066		gem_setladrf(sc);
2067		GEM_UNLOCK(sc);
2068		break;
2069	case SIOCGIFMEDIA:
2070	case SIOCSIFMEDIA:
2071		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2072		break;
2073	case SIOCSIFCAP:
2074		GEM_LOCK(sc);
2075		ifp->if_capenable = ifr->ifr_reqcap;
2076		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2077			ifp->if_hwassist = sc->sc_csum_features;
2078		else
2079			ifp->if_hwassist = 0;
2080		GEM_UNLOCK(sc);
2081		break;
2082	default:
2083		error = ether_ioctl(ifp, cmd, data);
2084		break;
2085	}
2086
2087	return (error);
2088}
2089
2090static void
2091gem_setladrf(struct gem_softc *sc)
2092{
2093	struct ifnet *ifp = sc->sc_ifp;
2094	struct ifmultiaddr *inm;
2095	int i;
2096	uint32_t hash[16];
2097	uint32_t crc, v;
2098
2099	GEM_LOCK_ASSERT(sc, MA_OWNED);
2100
2101	/* Get the current RX configuration. */
2102	v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
2103
2104	/*
2105	 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2106	 * and hash filter.  Depending on the case, the right bit will be
2107	 * enabled.
2108	 */
2109	v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER |
2110	    GEM_MAC_RX_PROMISC_GRP);
2111
2112	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
2113	GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
2114	if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER,
2115	    0))
2116		device_printf(sc->sc_dev, "cannot disable RX hash filter\n");
2117
2118	if ((ifp->if_flags & IFF_PROMISC) != 0) {
2119		v |= GEM_MAC_RX_PROMISCUOUS;
2120		goto chipit;
2121	}
2122	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2123		v |= GEM_MAC_RX_PROMISC_GRP;
2124		goto chipit;
2125	}
2126
2127	/*
2128	 * Set up multicast address filter by passing all multicast
2129	 * addresses through a crc generator, and then using the high
2130	 * order 8 bits as an index into the 256 bit logical address
2131	 * filter.  The high order 4 bits selects the word, while the
2132	 * other 4 bits select the bit within the word (where bit 0
2133	 * is the MSB).
2134	 */
2135
2136	/* Clear the hash table. */
2137	memset(hash, 0, sizeof(hash));
2138
2139	IF_ADDR_LOCK(ifp);
2140	TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
2141		if (inm->ifma_addr->sa_family != AF_LINK)
2142			continue;
2143		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2144		    inm->ifma_addr), ETHER_ADDR_LEN);
2145
2146		/* We just want the 8 most significant bits. */
2147		crc >>= 24;
2148
2149		/* Set the corresponding bit in the filter. */
2150		hash[crc >> 4] |= 1 << (15 - (crc & 15));
2151	}
2152	IF_ADDR_UNLOCK(ifp);
2153
2154	v |= GEM_MAC_RX_HASH_FILTER;
2155
2156	/* Now load the hash table into the chip (if we are using it). */
2157	for (i = 0; i < 16; i++)
2158		GEM_BANK1_WRITE_4(sc,
2159		    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
2160		    hash[i]);
2161
2162 chipit:
2163	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
2164}
2165