if_gem.c revision 169269
1/*-
2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *	from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/gem/if_gem.c 169269 2007-05-04 19:15:28Z phk $");
32
33/*
34 * Driver for Sun GEM ethernet controllers.
35 */
36
37#if 0
38#define	GEM_DEBUG
39#endif
40
41#if 0	/* XXX: In case of emergency, re-enable this. */
42#define	GEM_RINT_TIMEOUT
43#endif
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/bus.h>
48#include <sys/callout.h>
49#include <sys/endian.h>
50#include <sys/mbuf.h>
51#include <sys/malloc.h>
52#include <sys/kernel.h>
53#include <sys/lock.h>
54#include <sys/module.h>
55#include <sys/mutex.h>
56#include <sys/socket.h>
57#include <sys/sockio.h>
58#include <sys/rman.h>
59
60#include <net/bpf.h>
61#include <net/ethernet.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67#include <net/if_vlan_var.h>
68
69#include <machine/bus.h>
70
71#include <dev/mii/mii.h>
72#include <dev/mii/miivar.h>
73
74#include <dev/gem/if_gemreg.h>
75#include <dev/gem/if_gemvar.h>
76
77#define TRIES	10000
78
79static void	gem_start(struct ifnet *);
80static void	gem_start_locked(struct ifnet *);
81static void	gem_stop(struct ifnet *, int);
82static int	gem_ioctl(struct ifnet *, u_long, caddr_t);
83static void	gem_cddma_callback(void *, bus_dma_segment_t *, int, int);
84static void	gem_txdma_callback(void *, bus_dma_segment_t *, int,
85    bus_size_t, int);
86static void	gem_tick(void *);
87static int	gem_watchdog(struct gem_softc *);
88static void	gem_init(void *);
89static void	gem_init_locked(struct gem_softc *);
90static void	gem_init_regs(struct gem_softc *);
91static int	gem_ringsize(int sz);
92static int	gem_meminit(struct gem_softc *);
93static int	gem_load_txmbuf(struct gem_softc *, struct mbuf *);
94static void	gem_mifinit(struct gem_softc *);
95static int	gem_bitwait(struct gem_softc *, bus_addr_t, u_int32_t,
96    u_int32_t);
97static int	gem_reset_rx(struct gem_softc *);
98static int	gem_reset_tx(struct gem_softc *);
99static int	gem_disable_rx(struct gem_softc *);
100static int	gem_disable_tx(struct gem_softc *);
101static void	gem_rxdrain(struct gem_softc *);
102static int	gem_add_rxbuf(struct gem_softc *, int);
103static void	gem_setladrf(struct gem_softc *);
104
105struct mbuf	*gem_get(struct gem_softc *, int, int);
106static void	gem_eint(struct gem_softc *, u_int);
107static void	gem_rint(struct gem_softc *);
108#ifdef GEM_RINT_TIMEOUT
109static void	gem_rint_timeout(void *);
110#endif
111static void	gem_tint(struct gem_softc *);
112#ifdef notyet
113static void	gem_power(int, void *);
114#endif
115
116devclass_t gem_devclass;
117DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
118MODULE_DEPEND(gem, miibus, 1, 1, 1);
119
120#ifdef GEM_DEBUG
121#include <sys/ktr.h>
122#define	KTR_GEM		KTR_CT2
123#endif
124
125#define	GEM_NSEGS GEM_NTXDESC
126
127/*
128 * gem_attach:
129 *
130 *	Attach a Gem interface to the system.
131 */
132int
133gem_attach(sc)
134	struct gem_softc *sc;
135{
136	struct ifnet *ifp;
137	struct mii_softc *child;
138	int i, error;
139	u_int32_t v;
140
141	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
142	if (ifp == NULL)
143		return (ENOSPC);
144
145	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
146#ifdef GEM_RINT_TIMEOUT
147	callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
148#endif
149
150	/* Make sure the chip is stopped. */
151	ifp->if_softc = sc;
152	GEM_LOCK(sc);
153	gem_stop(ifp, 0);
154	gem_reset(sc);
155	GEM_UNLOCK(sc);
156
157	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
158	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
159	    MCLBYTES, GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
160	    &sc->sc_pdmatag);
161	if (error)
162		goto fail_ifnet;
163
164	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
165	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE,
166	    1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL,
167	    &sc->sc_rdmatag);
168	if (error)
169		goto fail_ptag;
170
171	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
172	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
173	    GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT,
174	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
175	if (error)
176		goto fail_rtag;
177
178	error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
179	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
180	    sizeof(struct gem_control_data), 1,
181	    sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW,
182	    busdma_lock_mutex, &sc->sc_mtx, &sc->sc_cdmatag);
183	if (error)
184		goto fail_ttag;
185
186	/*
187	 * Allocate the control data structures, and create and load the
188	 * DMA map for it.
189	 */
190	if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
191	    (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) {
192		device_printf(sc->sc_dev, "unable to allocate control data,"
193		    " error = %d\n", error);
194		goto fail_ctag;
195	}
196
197	sc->sc_cddma = 0;
198	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
199	    sc->sc_control_data, sizeof(struct gem_control_data),
200	    gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
201		device_printf(sc->sc_dev, "unable to load control data DMA "
202		    "map, error = %d\n", error);
203		goto fail_cmem;
204	}
205
206	/*
207	 * Initialize the transmit job descriptors.
208	 */
209	STAILQ_INIT(&sc->sc_txfreeq);
210	STAILQ_INIT(&sc->sc_txdirtyq);
211
212	/*
213	 * Create the transmit buffer DMA maps.
214	 */
215	error = ENOMEM;
216	for (i = 0; i < GEM_TXQUEUELEN; i++) {
217		struct gem_txsoft *txs;
218
219		txs = &sc->sc_txsoft[i];
220		txs->txs_mbuf = NULL;
221		txs->txs_ndescs = 0;
222		if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
223		    &txs->txs_dmamap)) != 0) {
224			device_printf(sc->sc_dev, "unable to create tx DMA map "
225			    "%d, error = %d\n", i, error);
226			goto fail_txd;
227		}
228		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
229	}
230
231	/*
232	 * Create the receive buffer DMA maps.
233	 */
234	for (i = 0; i < GEM_NRXDESC; i++) {
235		if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
236		    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
237			device_printf(sc->sc_dev, "unable to create rx DMA map "
238			    "%d, error = %d\n", i, error);
239			goto fail_rxd;
240		}
241		sc->sc_rxsoft[i].rxs_mbuf = NULL;
242	}
243
244	gem_mifinit(sc);
245
246	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange,
247	    gem_mediastatus)) != 0) {
248		device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
249		goto fail_rxd;
250	}
251	sc->sc_mii = device_get_softc(sc->sc_miibus);
252
253	/*
254	 * From this point forward, the attachment cannot fail.  A failure
255	 * before this point releases all resources that may have been
256	 * allocated.
257	 */
258
259	/* Get RX FIFO size */
260	sc->sc_rxfifosize = 64 *
261	    bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE);
262
263	/* Get TX FIFO size */
264	v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE);
265	device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
266	    sc->sc_rxfifosize / 1024, v / 16);
267
268	/* Initialize ifnet structure. */
269	ifp->if_softc = sc;
270	if_initname(ifp, device_get_name(sc->sc_dev),
271	    device_get_unit(sc->sc_dev));
272	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
273	ifp->if_start = gem_start;
274	ifp->if_ioctl = gem_ioctl;
275	ifp->if_init = gem_init;
276	ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN;
277	/*
278	 * Walk along the list of attached MII devices and
279	 * establish an `MII instance' to `phy number'
280	 * mapping. We'll use this mapping in media change
281	 * requests to determine which phy to use to program
282	 * the MIF configuration register.
283	 */
284	for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
285	     child = LIST_NEXT(child, mii_list)) {
286		/*
287		 * Note: we support just two PHYs: the built-in
288		 * internal device and an external on the MII
289		 * connector.
290		 */
291		if (child->mii_phy > 1 || child->mii_inst > 1) {
292			device_printf(sc->sc_dev, "cannot accomodate "
293			    "MII device %s at phy %d, instance %d\n",
294			    device_get_name(child->mii_dev),
295			    child->mii_phy, child->mii_inst);
296			continue;
297		}
298
299		sc->sc_phys[child->mii_inst] = child->mii_phy;
300	}
301
302	/*
303	 * Now select and activate the PHY we will use.
304	 *
305	 * The order of preference is External (MDI1),
306	 * Internal (MDI0), Serial Link (no MII).
307	 */
308	if (sc->sc_phys[1]) {
309#ifdef GEM_DEBUG
310		printf("using external phy\n");
311#endif
312		sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
313	} else {
314#ifdef GEM_DEBUG
315		printf("using internal phy\n");
316#endif
317		sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
318	}
319	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG,
320	    sc->sc_mif_config);
321	/* Attach the interface. */
322	ether_ifattach(ifp, sc->sc_enaddr);
323
324#ifdef notyet
325	/*
326	 * Add a suspend hook to make sure we come back up after a
327	 * resume.
328	 */
329	sc->sc_powerhook = powerhook_establish(gem_power, sc);
330	if (sc->sc_powerhook == NULL)
331		device_printf(sc->sc_dev, "WARNING: unable to establish power "
332		    "hook\n");
333#endif
334
335	/*
336	 * Tell the upper layer(s) we support long frames.
337	 */
338	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
339	ifp->if_capabilities |= IFCAP_VLAN_MTU;
340	ifp->if_capenable |= IFCAP_VLAN_MTU;
341
342	return (0);
343
344	/*
345	 * Free any resources we've allocated during the failed attach
346	 * attempt.  Do this in reverse order and fall through.
347	 */
348fail_rxd:
349	for (i = 0; i < GEM_NRXDESC; i++) {
350		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
351			bus_dmamap_destroy(sc->sc_rdmatag,
352			    sc->sc_rxsoft[i].rxs_dmamap);
353	}
354fail_txd:
355	for (i = 0; i < GEM_TXQUEUELEN; i++) {
356		if (sc->sc_txsoft[i].txs_dmamap != NULL)
357			bus_dmamap_destroy(sc->sc_tdmatag,
358			    sc->sc_txsoft[i].txs_dmamap);
359	}
360	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
361fail_cmem:
362	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
363	    sc->sc_cddmamap);
364fail_ctag:
365	bus_dma_tag_destroy(sc->sc_cdmatag);
366fail_ttag:
367	bus_dma_tag_destroy(sc->sc_tdmatag);
368fail_rtag:
369	bus_dma_tag_destroy(sc->sc_rdmatag);
370fail_ptag:
371	bus_dma_tag_destroy(sc->sc_pdmatag);
372fail_ifnet:
373	if_free(ifp);
374	return (error);
375}
376
377void
378gem_detach(sc)
379	struct gem_softc *sc;
380{
381	struct ifnet *ifp = sc->sc_ifp;
382	int i;
383
384	GEM_LOCK(sc);
385	gem_stop(ifp, 1);
386	GEM_UNLOCK(sc);
387	callout_drain(&sc->sc_tick_ch);
388#ifdef GEM_RINT_TIMEOUT
389	callout_drain(&sc->sc_rx_ch);
390#endif
391	ether_ifdetach(ifp);
392	if_free(ifp);
393	device_delete_child(sc->sc_dev, sc->sc_miibus);
394
395	for (i = 0; i < GEM_NRXDESC; i++) {
396		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
397			bus_dmamap_destroy(sc->sc_rdmatag,
398			    sc->sc_rxsoft[i].rxs_dmamap);
399	}
400	for (i = 0; i < GEM_TXQUEUELEN; i++) {
401		if (sc->sc_txsoft[i].txs_dmamap != NULL)
402			bus_dmamap_destroy(sc->sc_tdmatag,
403			    sc->sc_txsoft[i].txs_dmamap);
404	}
405	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
406	GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE);
407	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
408	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
409	    sc->sc_cddmamap);
410	bus_dma_tag_destroy(sc->sc_cdmatag);
411	bus_dma_tag_destroy(sc->sc_tdmatag);
412	bus_dma_tag_destroy(sc->sc_rdmatag);
413	bus_dma_tag_destroy(sc->sc_pdmatag);
414}
415
416void
417gem_suspend(sc)
418	struct gem_softc *sc;
419{
420	struct ifnet *ifp = sc->sc_ifp;
421
422	GEM_LOCK(sc);
423	gem_stop(ifp, 0);
424	GEM_UNLOCK(sc);
425}
426
427void
428gem_resume(sc)
429	struct gem_softc *sc;
430{
431	struct ifnet *ifp = sc->sc_ifp;
432
433	GEM_LOCK(sc);
434	/*
435	 * On resume all registers have to be initialized again like
436	 * after power-on.
437	 */
438	sc->sc_inited = 0;
439	if (ifp->if_flags & IFF_UP)
440		gem_init_locked(sc);
441	GEM_UNLOCK(sc);
442}
443
444static void
445gem_cddma_callback(xsc, segs, nsegs, error)
446	void *xsc;
447	bus_dma_segment_t *segs;
448	int nsegs;
449	int error;
450{
451	struct gem_softc *sc = (struct gem_softc *)xsc;
452
453	if (error != 0)
454		return;
455	if (nsegs != 1) {
456		/* can't happen... */
457		panic("gem_cddma_callback: bad control buffer segment count");
458	}
459	sc->sc_cddma = segs[0].ds_addr;
460}
461
462static void
463gem_txdma_callback(xsc, segs, nsegs, totsz, error)
464	void *xsc;
465	bus_dma_segment_t *segs;
466	int nsegs;
467	bus_size_t totsz;
468	int error;
469{
470	struct gem_txdma *txd = (struct gem_txdma *)xsc;
471	struct gem_softc *sc = txd->txd_sc;
472	struct gem_txsoft *txs = txd->txd_txs;
473	bus_size_t len = 0;
474	uint64_t flags = 0;
475	int seg, nexttx;
476
477	if (error != 0)
478		return;
479	/*
480	 * Ensure we have enough descriptors free to describe
481	 * the packet.  Note, we always reserve one descriptor
482	 * at the end of the ring as a termination point, to
483	 * prevent wrap-around.
484	 */
485	if (nsegs > sc->sc_txfree - 1) {
486		txs->txs_ndescs = -1;
487		return;
488	}
489	txs->txs_ndescs = nsegs;
490
491	nexttx = txs->txs_firstdesc;
492	/*
493	 * Initialize the transmit descriptors.
494	 */
495	for (seg = 0; seg < nsegs;
496	     seg++, nexttx = GEM_NEXTTX(nexttx)) {
497#ifdef GEM_DEBUG
498		CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len "
499		    "%lx, addr %#lx (%#lx)",  seg, nexttx,
500		    segs[seg].ds_len, segs[seg].ds_addr,
501		    GEM_DMA_WRITE(sc, segs[seg].ds_addr));
502#endif
503
504		if (segs[seg].ds_len == 0)
505			continue;
506		sc->sc_txdescs[nexttx].gd_addr =
507		    GEM_DMA_WRITE(sc, segs[seg].ds_addr);
508		KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE,
509		    ("gem_txdma_callback: segment size too large!"));
510		flags = segs[seg].ds_len & GEM_TD_BUFSIZE;
511		if (len == 0) {
512#ifdef GEM_DEBUG
513			CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, "
514			    "tx %d", seg, nexttx);
515#endif
516			flags |= GEM_TD_START_OF_PACKET;
517			if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
518				sc->sc_txwin = 0;
519				flags |= GEM_TD_INTERRUPT_ME;
520			}
521		}
522		if (len + segs[seg].ds_len == totsz) {
523#ifdef GEM_DEBUG
524			CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, "
525			    "tx %d", seg, nexttx);
526#endif
527			flags |= GEM_TD_END_OF_PACKET;
528		}
529		sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags);
530		txs->txs_lastdesc = nexttx;
531		len += segs[seg].ds_len;
532	}
533	KASSERT((flags & GEM_TD_END_OF_PACKET) != 0,
534	    ("gem_txdma_callback: missed end of packet!"));
535}
536
537static void
538gem_tick(arg)
539	void *arg;
540{
541	struct gem_softc *sc = arg;
542
543	GEM_LOCK_ASSERT(sc, MA_OWNED);
544	mii_tick(sc->sc_mii);
545
546	if (gem_watchdog(sc) == EJUSTRETURN)
547		return;
548
549	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
550}
551
552static int
553gem_bitwait(sc, r, clr, set)
554	struct gem_softc *sc;
555	bus_addr_t r;
556	u_int32_t clr;
557	u_int32_t set;
558{
559	int i;
560	u_int32_t reg;
561
562	for (i = TRIES; i--; DELAY(100)) {
563		reg = bus_read_4(sc->sc_res[0], r);
564		if ((r & clr) == 0 && (r & set) == set)
565			return (1);
566	}
567	return (0);
568}
569
570void
571gem_reset(sc)
572	struct gem_softc *sc;
573{
574
575#ifdef GEM_DEBUG
576	CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev));
577#endif
578	gem_reset_rx(sc);
579	gem_reset_tx(sc);
580
581	/* Do a full reset */
582	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
583	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
584		device_printf(sc->sc_dev, "cannot reset device\n");
585}
586
587
588/*
589 * gem_rxdrain:
590 *
591 *	Drain the receive queue.
592 */
593static void
594gem_rxdrain(sc)
595	struct gem_softc *sc;
596{
597	struct gem_rxsoft *rxs;
598	int i;
599
600	for (i = 0; i < GEM_NRXDESC; i++) {
601		rxs = &sc->sc_rxsoft[i];
602		if (rxs->rxs_mbuf != NULL) {
603			bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
604			    BUS_DMASYNC_POSTREAD);
605			bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
606			m_freem(rxs->rxs_mbuf);
607			rxs->rxs_mbuf = NULL;
608		}
609	}
610}
611
612/*
613 * Reset the whole thing.
614 */
615static void
616gem_stop(ifp, disable)
617	struct ifnet *ifp;
618	int disable;
619{
620	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
621	struct gem_txsoft *txs;
622
623#ifdef GEM_DEBUG
624	CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev));
625#endif
626
627	callout_stop(&sc->sc_tick_ch);
628#ifdef GEM_RINT_TIMEOUT
629	callout_stop(&sc->sc_rx_ch);
630#endif
631
632	/* XXX - Should we reset these instead? */
633	gem_disable_tx(sc);
634	gem_disable_rx(sc);
635
636	/*
637	 * Release any queued transmit buffers.
638	 */
639	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
640		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
641		if (txs->txs_ndescs != 0) {
642			bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
643			    BUS_DMASYNC_POSTWRITE);
644			bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
645			if (txs->txs_mbuf != NULL) {
646				m_freem(txs->txs_mbuf);
647				txs->txs_mbuf = NULL;
648			}
649		}
650		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
651	}
652
653	if (disable)
654		gem_rxdrain(sc);
655
656	/*
657	 * Mark the interface down and cancel the watchdog timer.
658	 */
659	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
660	sc->sc_wdog_timer = 0;
661}
662
663/*
664 * Reset the receiver
665 */
666int
667gem_reset_rx(sc)
668	struct gem_softc *sc;
669{
670
671	/*
672	 * Resetting while DMA is in progress can cause a bus hang, so we
673	 * disable DMA first.
674	 */
675	gem_disable_rx(sc);
676	bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0);
677	/* Wait till it finishes */
678	if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0))
679		device_printf(sc->sc_dev, "cannot disable read dma\n");
680
681	/* Wait 5ms extra. */
682	DELAY(5000);
683
684	/* Finally, reset the ERX */
685	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX);
686	/* Wait till it finishes */
687	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
688		device_printf(sc->sc_dev, "cannot reset receiver\n");
689		return (1);
690	}
691	return (0);
692}
693
694
695/*
696 * Reset the transmitter
697 */
698static int
699gem_reset_tx(sc)
700	struct gem_softc *sc;
701{
702	int i;
703
704	/*
705	 * Resetting while DMA is in progress can cause a bus hang, so we
706	 * disable DMA first.
707	 */
708	gem_disable_tx(sc);
709	bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0);
710	/* Wait till it finishes */
711	if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0))
712		device_printf(sc->sc_dev, "cannot disable read dma\n");
713
714	/* Wait 5ms extra. */
715	DELAY(5000);
716
717	/* Finally, reset the ETX */
718	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_TX);
719	/* Wait till it finishes */
720	for (i = TRIES; i--; DELAY(100))
721		if ((bus_read_4(sc->sc_res[0], GEM_RESET) & GEM_RESET_TX) == 0)
722			break;
723	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
724		device_printf(sc->sc_dev, "cannot reset receiver\n");
725		return (1);
726	}
727	return (0);
728}
729
730/*
731 * disable receiver.
732 */
733static int
734gem_disable_rx(sc)
735	struct gem_softc *sc;
736{
737	u_int32_t cfg;
738
739	/* Flip the enable bit */
740	cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
741	cfg &= ~GEM_MAC_RX_ENABLE;
742	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg);
743
744	/* Wait for it to finish */
745	return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
746}
747
748/*
749 * disable transmitter.
750 */
751static int
752gem_disable_tx(sc)
753	struct gem_softc *sc;
754{
755	u_int32_t cfg;
756
757	/* Flip the enable bit */
758	cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG);
759	cfg &= ~GEM_MAC_TX_ENABLE;
760	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg);
761
762	/* Wait for it to finish */
763	return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
764}
765
766/*
767 * Initialize interface.
768 */
769static int
770gem_meminit(sc)
771	struct gem_softc *sc;
772{
773	struct gem_rxsoft *rxs;
774	int i, error;
775
776	/*
777	 * Initialize the transmit descriptor ring.
778	 */
779	for (i = 0; i < GEM_NTXDESC; i++) {
780		sc->sc_txdescs[i].gd_flags = 0;
781		sc->sc_txdescs[i].gd_addr = 0;
782	}
783	sc->sc_txfree = GEM_MAXTXFREE;
784	sc->sc_txnext = 0;
785	sc->sc_txwin = 0;
786
787	/*
788	 * Initialize the receive descriptor and receive job
789	 * descriptor rings.
790	 */
791	for (i = 0; i < GEM_NRXDESC; i++) {
792		rxs = &sc->sc_rxsoft[i];
793		if (rxs->rxs_mbuf == NULL) {
794			if ((error = gem_add_rxbuf(sc, i)) != 0) {
795				device_printf(sc->sc_dev, "unable to "
796				    "allocate or map rx buffer %d, error = "
797				    "%d\n", i, error);
798				/*
799				 * XXX Should attempt to run with fewer receive
800				 * XXX buffers instead of just failing.
801				 */
802				gem_rxdrain(sc);
803				return (1);
804			}
805		} else
806			GEM_INIT_RXDESC(sc, i);
807	}
808	sc->sc_rxptr = 0;
809	GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
810	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
811
812	return (0);
813}
814
815static int
816gem_ringsize(sz)
817	int sz;
818{
819	int v = 0;
820
821	switch (sz) {
822	case 32:
823		v = GEM_RING_SZ_32;
824		break;
825	case 64:
826		v = GEM_RING_SZ_64;
827		break;
828	case 128:
829		v = GEM_RING_SZ_128;
830		break;
831	case 256:
832		v = GEM_RING_SZ_256;
833		break;
834	case 512:
835		v = GEM_RING_SZ_512;
836		break;
837	case 1024:
838		v = GEM_RING_SZ_1024;
839		break;
840	case 2048:
841		v = GEM_RING_SZ_2048;
842		break;
843	case 4096:
844		v = GEM_RING_SZ_4096;
845		break;
846	case 8192:
847		v = GEM_RING_SZ_8192;
848		break;
849	default:
850		printf("gem: invalid Receive Descriptor ring size\n");
851		break;
852	}
853	return (v);
854}
855
856static void
857gem_init(xsc)
858	void *xsc;
859{
860	struct gem_softc *sc = (struct gem_softc *)xsc;
861
862	GEM_LOCK(sc);
863	gem_init_locked(sc);
864	GEM_UNLOCK(sc);
865}
866
867/*
868 * Initialization of interface; set up initialization block
869 * and transmit/receive descriptor rings.
870 */
871static void
872gem_init_locked(sc)
873	struct gem_softc *sc;
874{
875	struct ifnet *ifp = sc->sc_ifp;
876	u_int32_t v;
877
878	GEM_LOCK_ASSERT(sc, MA_OWNED);
879
880#ifdef GEM_DEBUG
881	CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev));
882#endif
883	/*
884	 * Initialization sequence. The numbered steps below correspond
885	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
886	 * Channel Engine manual (part of the PCIO manual).
887	 * See also the STP2002-STQ document from Sun Microsystems.
888	 */
889
890	/* step 1 & 2. Reset the Ethernet Channel */
891	gem_stop(sc->sc_ifp, 0);
892	gem_reset(sc);
893#ifdef GEM_DEBUG
894	CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev));
895#endif
896
897	/* Re-initialize the MIF */
898	gem_mifinit(sc);
899
900	/* step 3. Setup data structures in host memory */
901	gem_meminit(sc);
902
903	/* step 4. TX MAC registers & counters */
904	gem_init_regs(sc);
905
906	/* step 5. RX MAC registers & counters */
907	gem_setladrf(sc);
908
909	/* step 6 & 7. Program Descriptor Ring Base Addresses */
910	/* NOTE: we use only 32-bit DMA addresses here. */
911	bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0);
912	bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
913
914	bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0);
915	bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
916#ifdef GEM_DEBUG
917	CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx",
918	    GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
919#endif
920
921	/* step 8. Global Configuration & Interrupt Mask */
922	bus_write_4(sc->sc_res[0], GEM_INTMASK,
923		      ~(GEM_INTR_TX_INTME|
924			GEM_INTR_TX_EMPTY|
925			GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
926			GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
927			GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
928			GEM_INTR_BERR));
929	bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK,
930			GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
931	bus_write_4(sc->sc_res[0], GEM_MAC_TX_MASK, 0xffff); /* XXXX */
932	bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 0); /* XXXX */
933
934	/* step 9. ETX Configuration: use mostly default values */
935
936	/* Enable DMA */
937	v = gem_ringsize(GEM_NTXDESC /*XXX*/);
938	bus_write_4(sc->sc_res[0], GEM_TX_CONFIG,
939		v|GEM_TX_CONFIG_TXDMA_EN|
940		((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
941
942	/* step 10. ERX Configuration */
943
944	/* Encode Receive Descriptor ring size: four possible values */
945	v = gem_ringsize(GEM_NRXDESC /*XXX*/);
946
947	/* Enable DMA */
948	bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
949		v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
950		(2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|
951		(0<<GEM_RX_CONFIG_CXM_START_SHFT));
952	/*
953	 * The following value is for an OFF Threshold of about 3/4 full
954	 * and an ON Threshold of 1/4 full.
955	 */
956	bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
957	    (3 * sc->sc_rxfifosize / 256) |
958	    (   (sc->sc_rxfifosize / 256) << 12));
959	bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, (6<<12)|6);
960
961	/* step 11. Configure Media */
962	mii_mediachg(sc->sc_mii);
963
964	/* step 12. RX_MAC Configuration Register */
965	v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
966	v |= GEM_MAC_RX_ENABLE;
967	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
968
969	/* step 14. Issue Transmit Pending command */
970
971	/* step 15.  Give the reciever a swift kick */
972	bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC-4);
973
974	/* Start the one second timer. */
975	sc->sc_wdog_timer = 0;
976	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
977
978	ifp->if_drv_flags |= IFF_DRV_RUNNING;
979	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
980	sc->sc_ifflags = ifp->if_flags;
981}
982
983static int
984gem_load_txmbuf(sc, m0)
985	struct gem_softc *sc;
986	struct mbuf *m0;
987{
988	struct gem_txdma txd;
989	struct gem_txsoft *txs;
990	int error;
991
992	/* Get a work queue entry. */
993	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
994		/* Ran out of descriptors. */
995		return (-1);
996	}
997	txd.txd_sc = sc;
998	txd.txd_txs = txs;
999	txs->txs_firstdesc = sc->sc_txnext;
1000	error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0,
1001	    gem_txdma_callback, &txd, BUS_DMA_NOWAIT);
1002	if (error != 0)
1003		goto fail;
1004	if (txs->txs_ndescs == -1) {
1005		error = -1;
1006		goto fail;
1007	}
1008
1009	/* Sync the DMA map. */
1010	bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1011	    BUS_DMASYNC_PREWRITE);
1012
1013#ifdef GEM_DEBUG
1014	CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, "
1015	    "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc,
1016	    txs->txs_ndescs);
1017#endif
1018	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1019	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1020	txs->txs_mbuf = m0;
1021
1022	sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1023	sc->sc_txfree -= txs->txs_ndescs;
1024	return (0);
1025
1026fail:
1027#ifdef GEM_DEBUG
1028	CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error);
1029#endif
1030	bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1031	return (error);
1032}
1033
1034static void
1035gem_init_regs(sc)
1036	struct gem_softc *sc;
1037{
1038	const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1039	u_int32_t v;
1040
1041	/* These regs are not cleared on reset */
1042	if (!sc->sc_inited) {
1043
1044		/* Wooo.  Magic values. */
1045		bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0);
1046		bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8);
1047		bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4);
1048
1049		bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1050		/* Max frame and max burst size */
1051		bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME,
1052		    (ETHER_MAX_LEN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) |
1053		    (0x2000 << 16));
1054
1055		bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7);
1056		bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4);
1057		bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10);
1058		/* Dunno.... */
1059		bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088);
1060		bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED,
1061		    ((laddr[5]<<8)|laddr[4])&0x3ff);
1062
1063		/* Secondary MAC addr set to 0:0:0:0:0:0 */
1064		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0);
1065		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0);
1066		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0);
1067
1068		/* MAC control addr set to 01:80:c2:00:00:01 */
1069		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001);
1070		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200);
1071		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180);
1072
1073		/* MAC filter addr set to 0:0:0:0:0:0 */
1074		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0);
1075		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0);
1076		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0);
1077
1078		bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0);
1079		bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0);
1080
1081		sc->sc_inited = 1;
1082	}
1083
1084	/* Counters need to be zeroed */
1085	bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
1086	bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
1087	bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
1088	bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
1089	bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0);
1090	bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0);
1091	bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0);
1092	bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0);
1093	bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0);
1094	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0);
1095	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0);
1096
1097	/* Un-pause stuff */
1098#if 0
1099	bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1100#else
1101	bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0);
1102#endif
1103
1104	/*
1105	 * Set the station address.
1106	 */
1107	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
1108	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
1109	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
1110
1111	/*
1112	 * Enable MII outputs.  Enable GMII if there is a gigabit PHY.
1113	 */
1114	sc->sc_mif_config = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1115	v = GEM_MAC_XIF_TX_MII_ENA;
1116	if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
1117		v |= GEM_MAC_XIF_FDPLX_LED;
1118		if (sc->sc_flags & GEM_GIGABIT)
1119			v |= GEM_MAC_XIF_GMII_MODE;
1120	}
1121	bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v);
1122}
1123
1124static void
1125gem_start(ifp)
1126	struct ifnet *ifp;
1127{
1128	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1129
1130	GEM_LOCK(sc);
1131	gem_start_locked(ifp);
1132	GEM_UNLOCK(sc);
1133}
1134
1135static void
1136gem_start_locked(ifp)
1137	struct ifnet *ifp;
1138{
1139	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1140	struct mbuf *m0 = NULL;
1141	int firsttx, ntx = 0, ofree, txmfail;
1142
1143	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1144	    IFF_DRV_RUNNING)
1145		return;
1146
1147	/*
1148	 * Remember the previous number of free descriptors and
1149	 * the first descriptor we'll use.
1150	 */
1151	ofree = sc->sc_txfree;
1152	firsttx = sc->sc_txnext;
1153
1154#ifdef GEM_DEBUG
1155	CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d",
1156	    device_get_name(sc->sc_dev), ofree, firsttx);
1157#endif
1158
1159	/*
1160	 * Loop through the send queue, setting up transmit descriptors
1161	 * until we drain the queue, or use up all available transmit
1162	 * descriptors.
1163	 */
1164	txmfail = 0;
1165	do {
1166		/*
1167		 * Grab a packet off the queue.
1168		 */
1169		IF_DEQUEUE(&ifp->if_snd, m0);
1170		if (m0 == NULL)
1171			break;
1172
1173		txmfail = gem_load_txmbuf(sc, m0);
1174		if (txmfail > 0) {
1175			/* Drop the mbuf and complain. */
1176			printf("gem_start: error %d while loading mbuf dma "
1177			    "map\n", txmfail);
1178			continue;
1179		}
1180		/* Not enough descriptors. */
1181		if (txmfail == -1) {
1182			if (sc->sc_txfree == GEM_MAXTXFREE)
1183				panic("gem_start: mbuf chain too long!");
1184			IF_PREPEND(&ifp->if_snd, m0);
1185			break;
1186		}
1187
1188		ntx++;
1189		/* Kick the transmitter. */
1190#ifdef GEM_DEBUG
1191		CTR2(KTR_GEM, "%s: gem_start: kicking tx %d",
1192		    device_get_name(sc->sc_dev), sc->sc_txnext);
1193#endif
1194		bus_write_4(sc->sc_res[0], GEM_TX_KICK,
1195			sc->sc_txnext);
1196
1197		BPF_MTAP(ifp, m0);
1198	} while (1);
1199
1200	if (txmfail == -1 || sc->sc_txfree == 0) {
1201		/* No more slots left; notify upper layer. */
1202		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1203	}
1204
1205	if (ntx > 0) {
1206		GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1207
1208#ifdef GEM_DEBUG
1209		CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1210		    device_get_name(sc->sc_dev), firsttx);
1211#endif
1212
1213		/* Set a watchdog timer in case the chip flakes out. */
1214		sc->sc_wdog_timer = 5;
1215#ifdef GEM_DEBUG
1216		CTR2(KTR_GEM, "%s: gem_start: watchdog %d",
1217		    device_get_name(sc->sc_dev), sc->sc_wdog_timer);
1218#endif
1219	}
1220}
1221
1222/*
1223 * Transmit interrupt.
1224 */
1225static void
1226gem_tint(sc)
1227	struct gem_softc *sc;
1228{
1229	struct ifnet *ifp = sc->sc_ifp;
1230	struct gem_txsoft *txs;
1231	int txlast;
1232	int progress = 0;
1233
1234
1235#ifdef GEM_DEBUG
1236	CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev));
1237#endif
1238
1239	/*
1240	 * Unload collision counters
1241	 */
1242	ifp->if_collisions +=
1243		bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) +
1244		bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) +
1245		bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) +
1246		bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT);
1247
1248	/*
1249	 * then clear the hardware counters.
1250	 */
1251	bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
1252	bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
1253	bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
1254	bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
1255
1256	/*
1257	 * Go through our Tx list and free mbufs for those
1258	 * frames that have been transmitted.
1259	 */
1260	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1261	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1262
1263#ifdef GEM_DEBUG
1264		if (ifp->if_flags & IFF_DEBUG) {
1265			int i;
1266			printf("    txsoft %p transmit chain:\n", txs);
1267			for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1268				printf("descriptor %d: ", i);
1269				printf("gd_flags: 0x%016llx\t", (long long)
1270					GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
1271				printf("gd_addr: 0x%016llx\n", (long long)
1272					GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
1273				if (i == txs->txs_lastdesc)
1274					break;
1275			}
1276		}
1277#endif
1278
1279		/*
1280		 * In theory, we could harveast some descriptors before
1281		 * the ring is empty, but that's a bit complicated.
1282		 *
1283		 * GEM_TX_COMPLETION points to the last descriptor
1284		 * processed +1.
1285		 */
1286		txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION);
1287#ifdef GEM_DEBUG
1288		CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, "
1289		    "txs->txs_lastdesc = %d, txlast = %d",
1290		    txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1291#endif
1292		if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1293			if ((txlast >= txs->txs_firstdesc) &&
1294				(txlast <= txs->txs_lastdesc))
1295				break;
1296		} else {
1297			/* Ick -- this command wraps */
1298			if ((txlast >= txs->txs_firstdesc) ||
1299				(txlast <= txs->txs_lastdesc))
1300				break;
1301		}
1302
1303#ifdef GEM_DEBUG
1304		CTR0(KTR_GEM, "gem_tint: releasing a desc");
1305#endif
1306		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1307
1308		sc->sc_txfree += txs->txs_ndescs;
1309
1310		bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1311		    BUS_DMASYNC_POSTWRITE);
1312		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1313		if (txs->txs_mbuf != NULL) {
1314			m_freem(txs->txs_mbuf);
1315			txs->txs_mbuf = NULL;
1316		}
1317
1318		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1319
1320		ifp->if_opackets++;
1321		progress = 1;
1322	}
1323
1324#ifdef GEM_DEBUG
1325	CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x "
1326		"GEM_TX_DATA_PTR %llx "
1327		"GEM_TX_COMPLETION %x",
1328		bus_read_4(sc->sc_res[0], GEM_TX_STATE_MACHINE),
1329		((long long) bus_read_4(sc->sc_res[0],
1330			GEM_TX_DATA_PTR_HI) << 32) |
1331			     bus_read_4(sc->sc_res[0],
1332			GEM_TX_DATA_PTR_LO),
1333		bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION));
1334#endif
1335
1336	if (progress) {
1337		if (sc->sc_txfree == GEM_NTXDESC - 1)
1338			sc->sc_txwin = 0;
1339
1340		/* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */
1341		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1342		gem_start_locked(ifp);
1343
1344		sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
1345	}
1346
1347#ifdef GEM_DEBUG
1348	CTR2(KTR_GEM, "%s: gem_tint: watchdog %d",
1349	    device_get_name(sc->sc_dev), sc->sc_wdog_timer);
1350#endif
1351}
1352
1353#ifdef GEM_RINT_TIMEOUT
1354static void
1355gem_rint_timeout(arg)
1356	void *arg;
1357{
1358	struct gem_softc *sc = (struct gem_softc *)arg;
1359
1360	GEM_LOCK_ASSERT(sc, MA_OWNED);
1361	gem_rint(sc);
1362}
1363#endif
1364
1365/*
1366 * Receive interrupt.
1367 */
1368static void
1369gem_rint(sc)
1370	struct gem_softc *sc;
1371{
1372	struct ifnet *ifp = sc->sc_ifp;
1373	struct gem_rxsoft *rxs;
1374	struct mbuf *m;
1375	u_int64_t rxstat;
1376	u_int32_t rxcomp;
1377	int i, len, progress = 0;
1378
1379#ifdef GEM_RINT_TIMEOUT
1380	callout_stop(&sc->sc_rx_ch);
1381#endif
1382#ifdef GEM_DEBUG
1383	CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev));
1384#endif
1385
1386	/*
1387	 * Read the completion register once.  This limits
1388	 * how long the following loop can execute.
1389	 */
1390	rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION);
1391
1392#ifdef GEM_DEBUG
1393	CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d",
1394	    sc->sc_rxptr, rxcomp);
1395#endif
1396	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1397	for (i = sc->sc_rxptr; i != rxcomp;
1398	     i = GEM_NEXTRX(i)) {
1399		rxs = &sc->sc_rxsoft[i];
1400
1401		rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
1402
1403		if (rxstat & GEM_RD_OWN) {
1404#ifdef GEM_RINT_TIMEOUT
1405			/*
1406			 * The descriptor is still marked as owned, although
1407			 * it is supposed to have completed. This has been
1408			 * observed on some machines. Just exiting here
1409			 * might leave the packet sitting around until another
1410			 * one arrives to trigger a new interrupt, which is
1411			 * generally undesirable, so set up a timeout.
1412			 */
1413			callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1414			    gem_rint_timeout, sc);
1415#endif
1416			break;
1417		}
1418
1419		progress++;
1420		ifp->if_ipackets++;
1421
1422		if (rxstat & GEM_RD_BAD_CRC) {
1423			ifp->if_ierrors++;
1424			device_printf(sc->sc_dev, "receive error: CRC error\n");
1425			GEM_INIT_RXDESC(sc, i);
1426			continue;
1427		}
1428
1429#ifdef GEM_DEBUG
1430		if (ifp->if_flags & IFF_DEBUG) {
1431			printf("    rxsoft %p descriptor %d: ", rxs, i);
1432			printf("gd_flags: 0x%016llx\t", (long long)
1433				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
1434			printf("gd_addr: 0x%016llx\n", (long long)
1435				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
1436		}
1437#endif
1438
1439		/*
1440		 * No errors; receive the packet.  Note the Gem
1441		 * includes the CRC with every packet.
1442		 */
1443		len = GEM_RD_BUFLEN(rxstat);
1444
1445		/*
1446		 * Allocate a new mbuf cluster.  If that fails, we are
1447		 * out of memory, and must drop the packet and recycle
1448		 * the buffer that's already attached to this descriptor.
1449		 */
1450		m = rxs->rxs_mbuf;
1451		if (gem_add_rxbuf(sc, i) != 0) {
1452			ifp->if_ierrors++;
1453			GEM_INIT_RXDESC(sc, i);
1454			continue;
1455		}
1456		m->m_data += 2; /* We're already off by two */
1457
1458		m->m_pkthdr.rcvif = ifp;
1459		m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN;
1460
1461		/* Pass it on. */
1462		GEM_UNLOCK(sc);
1463		(*ifp->if_input)(ifp, m);
1464		GEM_LOCK(sc);
1465	}
1466
1467	if (progress) {
1468		GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1469		/* Update the receive pointer. */
1470		if (i == sc->sc_rxptr) {
1471			device_printf(sc->sc_dev, "rint: ring wrap\n");
1472		}
1473		sc->sc_rxptr = i;
1474		bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_PREVRX(i));
1475	}
1476
1477#ifdef GEM_DEBUG
1478	CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d",
1479		sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION));
1480#endif
1481}
1482
1483
1484/*
1485 * gem_add_rxbuf:
1486 *
1487 *	Add a receive buffer to the indicated descriptor.
1488 */
1489static int
1490gem_add_rxbuf(sc, idx)
1491	struct gem_softc *sc;
1492	int idx;
1493{
1494	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1495	struct mbuf *m;
1496	bus_dma_segment_t segs[1];
1497	int error, nsegs;
1498
1499	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1500	if (m == NULL)
1501		return (ENOBUFS);
1502	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1503
1504#ifdef GEM_DEBUG
1505	/* bzero the packet to check dma */
1506	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1507#endif
1508
1509	if (rxs->rxs_mbuf != NULL) {
1510		bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1511		    BUS_DMASYNC_POSTREAD);
1512		bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1513	}
1514
1515	rxs->rxs_mbuf = m;
1516
1517	error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1518	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1519	/* If nsegs is wrong then the stack is corrupt. */
1520	KASSERT(nsegs == 1, ("Too many segments returned!"));
1521	if (error != 0) {
1522		device_printf(sc->sc_dev, "can't load rx DMA map %d, error = "
1523		    "%d\n", idx, error);
1524		m_freem(m);
1525		return (ENOBUFS);
1526	}
1527	rxs->rxs_paddr = segs[0].ds_addr;
1528
1529	bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
1530
1531	GEM_INIT_RXDESC(sc, idx);
1532
1533	return (0);
1534}
1535
1536
1537static void
1538gem_eint(sc, status)
1539	struct gem_softc *sc;
1540	u_int status;
1541{
1542
1543	if ((status & GEM_INTR_MIF) != 0) {
1544		device_printf(sc->sc_dev, "XXXlink status changed\n");
1545		return;
1546	}
1547
1548	device_printf(sc->sc_dev, "status=%x\n", status);
1549}
1550
1551
1552void
1553gem_intr(v)
1554	void *v;
1555{
1556	struct gem_softc *sc = (struct gem_softc *)v;
1557	u_int32_t status;
1558
1559	GEM_LOCK(sc);
1560	status = bus_read_4(sc->sc_res[0], GEM_STATUS);
1561#ifdef GEM_DEBUG
1562	CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x",
1563		device_get_name(sc->sc_dev), (status>>19),
1564		(u_int)status);
1565#endif
1566
1567	if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1568		gem_eint(sc, status);
1569
1570	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1571		gem_tint(sc);
1572
1573	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1574		gem_rint(sc);
1575
1576	/* We should eventually do more than just print out error stats. */
1577	if (status & GEM_INTR_TX_MAC) {
1578		int txstat = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS);
1579		if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1580			device_printf(sc->sc_dev, "MAC tx fault, status %x\n",
1581			    txstat);
1582		if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
1583			gem_init_locked(sc);
1584	}
1585	if (status & GEM_INTR_RX_MAC) {
1586		int rxstat = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS);
1587		/*
1588		 * On some chip revisions GEM_MAC_RX_OVERFLOW happen often
1589		 * due to a silicon bug so handle them silently.
1590		 */
1591		if (rxstat & GEM_MAC_RX_OVERFLOW)
1592			gem_init_locked(sc);
1593		else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1594			device_printf(sc->sc_dev, "MAC rx fault, status %x\n",
1595			    rxstat);
1596	}
1597	GEM_UNLOCK(sc);
1598}
1599
1600static int
1601gem_watchdog(sc)
1602	struct gem_softc *sc;
1603{
1604
1605	GEM_LOCK_ASSERT(sc, MA_OWNED);
1606
1607#ifdef GEM_DEBUG
1608	CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1609		"GEM_MAC_RX_CONFIG %x",
1610		bus_read_4(sc->sc_res[0], GEM_RX_CONFIG),
1611		bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS),
1612		bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG));
1613	CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x "
1614		"GEM_MAC_TX_CONFIG %x",
1615		bus_read_4(sc->sc_res[0], GEM_TX_CONFIG),
1616		bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS),
1617		bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG));
1618#endif
1619
1620	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1621		return (0);
1622
1623	device_printf(sc->sc_dev, "device timeout\n");
1624	++sc->sc_ifp->if_oerrors;
1625
1626	/* Try to get more packets going. */
1627	gem_init_locked(sc);
1628	return (EJUSTRETURN);
1629}
1630
1631/*
1632 * Initialize the MII Management Interface
1633 */
1634static void
1635gem_mifinit(sc)
1636	struct gem_softc *sc;
1637{
1638
1639	/* Configure the MIF in frame mode */
1640	sc->sc_mif_config = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1641	sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1642	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, sc->sc_mif_config);
1643}
1644
1645/*
1646 * MII interface
1647 *
1648 * The GEM MII interface supports at least three different operating modes:
1649 *
1650 * Bitbang mode is implemented using data, clock and output enable registers.
1651 *
1652 * Frame mode is implemented by loading a complete frame into the frame
1653 * register and polling the valid bit for completion.
1654 *
1655 * Polling mode uses the frame register but completion is indicated by
1656 * an interrupt.
1657 *
1658 */
1659int
1660gem_mii_readreg(dev, phy, reg)
1661	device_t dev;
1662	int phy, reg;
1663{
1664	struct gem_softc *sc = device_get_softc(dev);
1665	int n;
1666	u_int32_t v;
1667
1668#ifdef GEM_DEBUG_PHY
1669	printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
1670#endif
1671
1672#if 0
1673	/* Select the desired PHY in the MIF configuration register */
1674	v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1675	/* Clear PHY select bit */
1676	v &= ~GEM_MIF_CONFIG_PHY_SEL;
1677	if (phy == GEM_PHYAD_EXTERNAL)
1678		/* Set PHY select bit to get at external device */
1679		v |= GEM_MIF_CONFIG_PHY_SEL;
1680	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v);
1681#endif
1682
1683	/* Construct the frame command */
1684	v = (reg << GEM_MIF_REG_SHIFT)	| (phy << GEM_MIF_PHY_SHIFT) |
1685		GEM_MIF_FRAME_READ;
1686
1687	bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
1688	for (n = 0; n < 100; n++) {
1689		DELAY(1);
1690		v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
1691		if (v & GEM_MIF_FRAME_TA0)
1692			return (v & GEM_MIF_FRAME_DATA);
1693	}
1694
1695	device_printf(sc->sc_dev, "mii_read timeout\n");
1696	return (0);
1697}
1698
1699int
1700gem_mii_writereg(dev, phy, reg, val)
1701	device_t dev;
1702	int phy, reg, val;
1703{
1704	struct gem_softc *sc = device_get_softc(dev);
1705	int n;
1706	u_int32_t v;
1707
1708#ifdef GEM_DEBUG_PHY
1709	printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val);
1710#endif
1711
1712#if 0
1713	/* Select the desired PHY in the MIF configuration register */
1714	v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1715	/* Clear PHY select bit */
1716	v &= ~GEM_MIF_CONFIG_PHY_SEL;
1717	if (phy == GEM_PHYAD_EXTERNAL)
1718		/* Set PHY select bit to get at external device */
1719		v |= GEM_MIF_CONFIG_PHY_SEL;
1720	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v);
1721#endif
1722	/* Construct the frame command */
1723	v = GEM_MIF_FRAME_WRITE			|
1724	    (phy << GEM_MIF_PHY_SHIFT)		|
1725	    (reg << GEM_MIF_REG_SHIFT)		|
1726	    (val & GEM_MIF_FRAME_DATA);
1727
1728	bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
1729	for (n = 0; n < 100; n++) {
1730		DELAY(1);
1731		v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
1732		if (v & GEM_MIF_FRAME_TA0)
1733			return (1);
1734	}
1735
1736	device_printf(sc->sc_dev, "mii_write timeout\n");
1737	return (0);
1738}
1739
1740void
1741gem_mii_statchg(dev)
1742	device_t dev;
1743{
1744	struct gem_softc *sc = device_get_softc(dev);
1745#ifdef GEM_DEBUG
1746	int instance;
1747#endif
1748	u_int32_t v;
1749
1750#ifdef GEM_DEBUG
1751	instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
1752	if (sc->sc_debug)
1753		printf("gem_mii_statchg: status change: phy = %d\n",
1754			sc->sc_phys[instance]);
1755#endif
1756
1757	/* Set tx full duplex options */
1758	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 0);
1759	DELAY(10000); /* reg must be cleared and delay before changing. */
1760	v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
1761		GEM_MAC_TX_ENABLE;
1762	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) {
1763		v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
1764	}
1765	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, v);
1766
1767	/* XIF Configuration */
1768	v = GEM_MAC_XIF_LINK_LED;
1769	v |= GEM_MAC_XIF_TX_MII_ENA;
1770
1771	/* If an external transceiver is connected, enable its MII drivers */
1772	sc->sc_mif_config = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1773	if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
1774		/* External MII needs echo disable if half duplex. */
1775		if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1776			/* turn on full duplex LED */
1777			v |= GEM_MAC_XIF_FDPLX_LED;
1778		else
1779	 		/* half duplex -- disable echo */
1780	 		v |= GEM_MAC_XIF_ECHO_DISABL;
1781
1782		if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T)
1783			v |= GEM_MAC_XIF_GMII_MODE;
1784		else
1785			v &= ~GEM_MAC_XIF_GMII_MODE;
1786	} else {
1787		/* Internal MII needs buf enable */
1788		v |= GEM_MAC_XIF_MII_BUF_ENA;
1789	}
1790	bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v);
1791}
1792
1793int
1794gem_mediachange(ifp)
1795	struct ifnet *ifp;
1796{
1797	struct gem_softc *sc = ifp->if_softc;
1798	int error;
1799
1800	/* XXX Add support for serial media. */
1801
1802	GEM_LOCK(sc);
1803	error = mii_mediachg(sc->sc_mii);
1804	GEM_UNLOCK(sc);
1805	return (error);
1806}
1807
1808void
1809gem_mediastatus(ifp, ifmr)
1810	struct ifnet *ifp;
1811	struct ifmediareq *ifmr;
1812{
1813	struct gem_softc *sc = ifp->if_softc;
1814
1815	GEM_LOCK(sc);
1816	if ((ifp->if_flags & IFF_UP) == 0) {
1817		GEM_UNLOCK(sc);
1818		return;
1819	}
1820
1821	mii_pollstat(sc->sc_mii);
1822	ifmr->ifm_active = sc->sc_mii->mii_media_active;
1823	ifmr->ifm_status = sc->sc_mii->mii_media_status;
1824	GEM_UNLOCK(sc);
1825}
1826
1827/*
1828 * Process an ioctl request.
1829 */
1830static int
1831gem_ioctl(ifp, cmd, data)
1832	struct ifnet *ifp;
1833	u_long cmd;
1834	caddr_t data;
1835{
1836	struct gem_softc *sc = ifp->if_softc;
1837	struct ifreq *ifr = (struct ifreq *)data;
1838	int error = 0;
1839
1840	switch (cmd) {
1841	case SIOCSIFFLAGS:
1842		GEM_LOCK(sc);
1843		if (ifp->if_flags & IFF_UP) {
1844			if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC)
1845				gem_setladrf(sc);
1846			else
1847				gem_init_locked(sc);
1848		} else {
1849			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1850				gem_stop(ifp, 0);
1851		}
1852		sc->sc_ifflags = ifp->if_flags;
1853		GEM_UNLOCK(sc);
1854		break;
1855	case SIOCADDMULTI:
1856	case SIOCDELMULTI:
1857		GEM_LOCK(sc);
1858		gem_setladrf(sc);
1859		GEM_UNLOCK(sc);
1860		break;
1861	case SIOCGIFMEDIA:
1862	case SIOCSIFMEDIA:
1863		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1864		break;
1865	default:
1866		error = ether_ioctl(ifp, cmd, data);
1867		break;
1868	}
1869
1870	/* Try to get things going again */
1871	GEM_LOCK(sc);
1872	if (ifp->if_flags & IFF_UP)
1873		gem_start_locked(ifp);
1874	GEM_UNLOCK(sc);
1875	return (error);
1876}
1877
1878/*
1879 * Set up the logical address filter.
1880 */
1881static void
1882gem_setladrf(sc)
1883	struct gem_softc *sc;
1884{
1885	struct ifnet *ifp = sc->sc_ifp;
1886	struct ifmultiaddr *inm;
1887	u_int32_t crc;
1888	u_int32_t hash[16];
1889	u_int32_t v;
1890	int i;
1891
1892	GEM_LOCK_ASSERT(sc, MA_OWNED);
1893
1894	/* Get current RX configuration */
1895	v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
1896
1897	/*
1898	 * Turn off promiscuous mode, promiscuous group mode (all multicast),
1899	 * and hash filter.  Depending on the case, the right bit will be
1900	 * enabled.
1901	 */
1902	v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
1903	    GEM_MAC_RX_PROMISC_GRP);
1904
1905	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1906		/* Turn on promiscuous mode */
1907		v |= GEM_MAC_RX_PROMISCUOUS;
1908		goto chipit;
1909	}
1910	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1911		hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1912		ifp->if_flags |= IFF_ALLMULTI;
1913		v |= GEM_MAC_RX_PROMISC_GRP;
1914		goto chipit;
1915	}
1916
1917	/*
1918	 * Set up multicast address filter by passing all multicast addresses
1919	 * through a crc generator, and then using the high order 8 bits as an
1920	 * index into the 256 bit logical address filter.  The high order 4
1921	 * bits selects the word, while the other 4 bits select the bit within
1922	 * the word (where bit 0 is the MSB).
1923	 */
1924
1925	/* Clear hash table */
1926	memset(hash, 0, sizeof(hash));
1927
1928	IF_ADDR_LOCK(ifp);
1929	TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
1930		if (inm->ifma_addr->sa_family != AF_LINK)
1931			continue;
1932		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1933		    inm->ifma_addr), ETHER_ADDR_LEN);
1934
1935		/* Just want the 8 most significant bits. */
1936		crc >>= 24;
1937
1938		/* Set the corresponding bit in the filter. */
1939		hash[crc >> 4] |= 1 << (15 - (crc & 15));
1940	}
1941	IF_ADDR_UNLOCK(ifp);
1942
1943	v |= GEM_MAC_RX_HASH_FILTER;
1944	ifp->if_flags &= ~IFF_ALLMULTI;
1945
1946	/* Now load the hash table into the chip (if we are using it) */
1947	for (i = 0; i < 16; i++) {
1948		bus_write_4(sc->sc_res[0],
1949		    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
1950		    hash[i]);
1951	}
1952
1953chipit:
1954	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
1955}
1956