1/*-
2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34/*
35 * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
36 */
37
38#if 0
39#define	GEM_DEBUG
40#endif
41
42#if 0	/* XXX: In case of emergency, re-enable this. */
43#define	GEM_RINT_TIMEOUT
44#endif
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/bus.h>
49#include <sys/callout.h>
50#include <sys/endian.h>
51#include <sys/mbuf.h>
52#include <sys/malloc.h>
53#include <sys/kernel.h>
54#include <sys/lock.h>
55#include <sys/module.h>
56#include <sys/mutex.h>
57#include <sys/socket.h>
58#include <sys/sockio.h>
59#include <sys/rman.h>
60
61#include <net/bpf.h>
62#include <net/ethernet.h>
63#include <net/if.h>
64#include <net/if_arp.h>
65#include <net/if_dl.h>
66#include <net/if_media.h>
67#include <net/if_types.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in.h>
71#include <netinet/in_systm.h>
72#include <netinet/ip.h>
73#include <netinet/tcp.h>
74#include <netinet/udp.h>
75
76#include <machine/bus.h>
77
78#include <dev/mii/mii.h>
79#include <dev/mii/miivar.h>
80
81#include <dev/gem/if_gemreg.h>
82#include <dev/gem/if_gemvar.h>
83
84CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
85CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
86
87#define	GEM_TRIES	10000
88
89/*
90 * The hardware supports basic TCP/UDP checksum offloading.  However,
91 * the hardware doesn't compensate the checksum for UDP datagram which
92 * can yield to 0x0.  As a safe guard, UDP checksum offload is disabled
93 * by default.  It can be reactivated by setting special link option
94 * link0 with ifconfig(8).
95 */
96#define	GEM_CSUM_FEATURES	(CSUM_TCP)
97
98static int	gem_add_rxbuf(struct gem_softc *sc, int idx);
99static int	gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r,
100		    uint32_t clr, uint32_t set);
101static void	gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
102		    int nsegs, int error);
103static int	gem_disable_rx(struct gem_softc *sc);
104static int	gem_disable_tx(struct gem_softc *sc);
105static void	gem_eint(struct gem_softc *sc, u_int status);
106static void	gem_init(void *xsc);
107static void	gem_init_locked(struct gem_softc *sc);
108static void	gem_init_regs(struct gem_softc *sc);
109static int	gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
110static int	gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head);
111static int	gem_meminit(struct gem_softc *sc);
112static void	gem_mifinit(struct gem_softc *sc);
113static void	gem_reset(struct gem_softc *sc);
114static int	gem_reset_rx(struct gem_softc *sc);
115static void	gem_reset_rxdma(struct gem_softc *sc);
116static int	gem_reset_tx(struct gem_softc *sc);
117static u_int	gem_ringsize(u_int sz);
118static void	gem_rint(struct gem_softc *sc);
119#ifdef GEM_RINT_TIMEOUT
120static void	gem_rint_timeout(void *arg);
121#endif
122static inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
123static void	gem_rxdrain(struct gem_softc *sc);
124static void	gem_setladrf(struct gem_softc *sc);
125static void	gem_start(struct ifnet *ifp);
126static void	gem_start_locked(struct ifnet *ifp);
127static void	gem_stop(struct ifnet *ifp, int disable);
128static void	gem_tick(void *arg);
129static void	gem_tint(struct gem_softc *sc);
130static inline void gem_txkick(struct gem_softc *sc);
131static int	gem_watchdog(struct gem_softc *sc);
132
133devclass_t gem_devclass;
134DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
135MODULE_DEPEND(gem, miibus, 1, 1, 1);
136
137#ifdef GEM_DEBUG
138#include <sys/ktr.h>
139#define	KTR_GEM		KTR_SPARE2
140#endif
141
142#define	GEM_BANK1_BITWAIT(sc, r, clr, set)				\
143	gem_bitwait((sc), GEM_RES_BANK1, (r), (clr), (set))
144#define	GEM_BANK2_BITWAIT(sc, r, clr, set)				\
145	gem_bitwait((sc), GEM_RES_BANK2, (r), (clr), (set))
146
147int
148gem_attach(struct gem_softc *sc)
149{
150	struct gem_txsoft *txs;
151	struct ifnet *ifp;
152	int error, i, phy;
153	uint32_t v;
154
155	if (bootverbose)
156		device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags);
157
158	/* Set up ifnet structure. */
159	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
160	if (ifp == NULL)
161		return (ENOSPC);
162	sc->sc_csum_features = GEM_CSUM_FEATURES;
163	ifp->if_softc = sc;
164	if_initname(ifp, device_get_name(sc->sc_dev),
165	    device_get_unit(sc->sc_dev));
166	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
167	ifp->if_start = gem_start;
168	ifp->if_ioctl = gem_ioctl;
169	ifp->if_init = gem_init;
170	IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
171	ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
172	IFQ_SET_READY(&ifp->if_snd);
173
174	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
175#ifdef GEM_RINT_TIMEOUT
176	callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
177#endif
178
179	/* Make sure the chip is stopped. */
180	gem_reset(sc);
181
182	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
183	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
184	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
185	    NULL, &sc->sc_pdmatag);
186	if (error != 0)
187		goto fail_ifnet;
188
189	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
190	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
191	    1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
192	if (error != 0)
193		goto fail_ptag;
194
195	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
196	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
197	    MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
198	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
199	if (error != 0)
200		goto fail_rtag;
201
202	error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
203	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
204	    sizeof(struct gem_control_data), 1,
205	    sizeof(struct gem_control_data), 0,
206	    NULL, NULL, &sc->sc_cdmatag);
207	if (error != 0)
208		goto fail_ttag;
209
210	/*
211	 * Allocate the control data structures, create and load the
212	 * DMA map for it.
213	 */
214	if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
215	    (void **)&sc->sc_control_data,
216	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
217	    &sc->sc_cddmamap)) != 0) {
218		device_printf(sc->sc_dev,
219		    "unable to allocate control data, error = %d\n", error);
220		goto fail_ctag;
221	}
222
223	sc->sc_cddma = 0;
224	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
225	    sc->sc_control_data, sizeof(struct gem_control_data),
226	    gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
227		device_printf(sc->sc_dev,
228		    "unable to load control data DMA map, error = %d\n",
229		    error);
230		goto fail_cmem;
231	}
232
233	/*
234	 * Initialize the transmit job descriptors.
235	 */
236	STAILQ_INIT(&sc->sc_txfreeq);
237	STAILQ_INIT(&sc->sc_txdirtyq);
238
239	/*
240	 * Create the transmit buffer DMA maps.
241	 */
242	error = ENOMEM;
243	for (i = 0; i < GEM_TXQUEUELEN; i++) {
244		txs = &sc->sc_txsoft[i];
245		txs->txs_mbuf = NULL;
246		txs->txs_ndescs = 0;
247		if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
248		    &txs->txs_dmamap)) != 0) {
249			device_printf(sc->sc_dev,
250			    "unable to create TX DMA map %d, error = %d\n",
251			    i, error);
252			goto fail_txd;
253		}
254		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
255	}
256
257	/*
258	 * Create the receive buffer DMA maps.
259	 */
260	for (i = 0; i < GEM_NRXDESC; i++) {
261		if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
262		    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
263			device_printf(sc->sc_dev,
264			    "unable to create RX DMA map %d, error = %d\n",
265			    i, error);
266			goto fail_rxd;
267		}
268		sc->sc_rxsoft[i].rxs_mbuf = NULL;
269	}
270
271	/* Bypass probing PHYs if we already know for sure to use a SERDES. */
272	if ((sc->sc_flags & GEM_SERDES) != 0)
273		goto serdes;
274
275	/* Bad things will happen when touching this register on ERI. */
276	if (sc->sc_variant != GEM_SUN_ERI) {
277		GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
278		    GEM_MII_DATAPATH_MII);
279		GEM_BANK1_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
280		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
281	}
282
283	gem_mifinit(sc);
284
285	/*
286	 * Look for an external PHY.
287	 */
288	error = ENXIO;
289	v = GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG);
290	if ((v & GEM_MIF_CONFIG_MDI1) != 0) {
291		v |= GEM_MIF_CONFIG_PHY_SEL;
292		GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
293		GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4,
294		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
295		switch (sc->sc_variant) {
296		case GEM_SUN_ERI:
297			phy = GEM_PHYAD_EXTERNAL;
298			break;
299		default:
300			phy = MII_PHY_ANY;
301			break;
302		}
303		error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
304		    gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy,
305		    MII_OFFSET_ANY, MIIF_DOPAUSE);
306	}
307
308	/*
309	 * Fall back on an internal PHY if no external PHY was found.
310	 * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be
311	 * trusted when the firmware has powered down the chip.
312	 */
313	if (error != 0 &&
314	    ((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) {
315		v &= ~GEM_MIF_CONFIG_PHY_SEL;
316		GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG, v);
317		GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4,
318		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
319		switch (sc->sc_variant) {
320		case GEM_SUN_ERI:
321		case GEM_APPLE_K2_GMAC:
322			phy = GEM_PHYAD_INTERNAL;
323			break;
324		case GEM_APPLE_GMAC:
325			phy = GEM_PHYAD_EXTERNAL;
326			break;
327		default:
328			phy = MII_PHY_ANY;
329			break;
330		}
331		error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
332		    gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy,
333		    MII_OFFSET_ANY, MIIF_DOPAUSE);
334	}
335
336	/*
337	 * Try the external PCS SERDES if we didn't find any PHYs.
338	 */
339	if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
340 serdes:
341		GEM_BANK1_WRITE_4(sc, GEM_MII_DATAPATH_MODE,
342		    GEM_MII_DATAPATH_SERDES);
343		GEM_BANK1_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4,
344		    BUS_SPACE_BARRIER_WRITE);
345		GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
346		    GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
347		GEM_BANK1_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
348		    BUS_SPACE_BARRIER_WRITE);
349		GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE);
350		GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
351		    BUS_SPACE_BARRIER_WRITE);
352		sc->sc_flags |= GEM_SERDES;
353		error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp,
354		    gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK,
355		    GEM_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE);
356	}
357	if (error != 0) {
358		device_printf(sc->sc_dev, "attaching PHYs failed\n");
359		goto fail_rxd;
360	}
361	sc->sc_mii = device_get_softc(sc->sc_miibus);
362
363	/*
364	 * From this point forward, the attachment cannot fail.  A failure
365	 * before this point releases all resources that may have been
366	 * allocated.
367	 */
368
369	/* Get RX FIFO size. */
370	sc->sc_rxfifosize = 64 *
371	    GEM_BANK1_READ_4(sc, GEM_RX_FIFO_SIZE);
372
373	/* Get TX FIFO size. */
374	v = GEM_BANK1_READ_4(sc, GEM_TX_FIFO_SIZE);
375	device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
376	    sc->sc_rxfifosize / 1024, v / 16);
377
378	/* Attach the interface. */
379	ether_ifattach(ifp, sc->sc_enaddr);
380
381	/*
382	 * Tell the upper layer(s) we support long frames/checksum offloads.
383	 */
384	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
385	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
386	ifp->if_hwassist |= sc->sc_csum_features;
387	ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
388
389	return (0);
390
391	/*
392	 * Free any resources we've allocated during the failed attach
393	 * attempt.  Do this in reverse order and fall through.
394	 */
395 fail_rxd:
396	for (i = 0; i < GEM_NRXDESC; i++)
397		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
398			bus_dmamap_destroy(sc->sc_rdmatag,
399			    sc->sc_rxsoft[i].rxs_dmamap);
400 fail_txd:
401	for (i = 0; i < GEM_TXQUEUELEN; i++)
402		if (sc->sc_txsoft[i].txs_dmamap != NULL)
403			bus_dmamap_destroy(sc->sc_tdmatag,
404			    sc->sc_txsoft[i].txs_dmamap);
405	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
406 fail_cmem:
407	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
408	    sc->sc_cddmamap);
409 fail_ctag:
410	bus_dma_tag_destroy(sc->sc_cdmatag);
411 fail_ttag:
412	bus_dma_tag_destroy(sc->sc_tdmatag);
413 fail_rtag:
414	bus_dma_tag_destroy(sc->sc_rdmatag);
415 fail_ptag:
416	bus_dma_tag_destroy(sc->sc_pdmatag);
417 fail_ifnet:
418	if_free(ifp);
419	return (error);
420}
421
422void
423gem_detach(struct gem_softc *sc)
424{
425	struct ifnet *ifp = sc->sc_ifp;
426	int i;
427
428	ether_ifdetach(ifp);
429	GEM_LOCK(sc);
430	gem_stop(ifp, 1);
431	GEM_UNLOCK(sc);
432	callout_drain(&sc->sc_tick_ch);
433#ifdef GEM_RINT_TIMEOUT
434	callout_drain(&sc->sc_rx_ch);
435#endif
436	if_free(ifp);
437	device_delete_child(sc->sc_dev, sc->sc_miibus);
438
439	for (i = 0; i < GEM_NRXDESC; i++)
440		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
441			bus_dmamap_destroy(sc->sc_rdmatag,
442			    sc->sc_rxsoft[i].rxs_dmamap);
443	for (i = 0; i < GEM_TXQUEUELEN; i++)
444		if (sc->sc_txsoft[i].txs_dmamap != NULL)
445			bus_dmamap_destroy(sc->sc_tdmatag,
446			    sc->sc_txsoft[i].txs_dmamap);
447	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
448	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
449	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
450	    sc->sc_cddmamap);
451	bus_dma_tag_destroy(sc->sc_cdmatag);
452	bus_dma_tag_destroy(sc->sc_tdmatag);
453	bus_dma_tag_destroy(sc->sc_rdmatag);
454	bus_dma_tag_destroy(sc->sc_pdmatag);
455}
456
457void
458gem_suspend(struct gem_softc *sc)
459{
460	struct ifnet *ifp = sc->sc_ifp;
461
462	GEM_LOCK(sc);
463	gem_stop(ifp, 0);
464	GEM_UNLOCK(sc);
465}
466
467void
468gem_resume(struct gem_softc *sc)
469{
470	struct ifnet *ifp = sc->sc_ifp;
471
472	GEM_LOCK(sc);
473	/*
474	 * On resume all registers have to be initialized again like
475	 * after power-on.
476	 */
477	sc->sc_flags &= ~GEM_INITED;
478	if (ifp->if_flags & IFF_UP)
479		gem_init_locked(sc);
480	GEM_UNLOCK(sc);
481}
482
483static inline void
484gem_rxcksum(struct mbuf *m, uint64_t flags)
485{
486	struct ether_header *eh;
487	struct ip *ip;
488	struct udphdr *uh;
489	uint16_t *opts;
490	int32_t hlen, len, pktlen;
491	uint32_t temp32;
492	uint16_t cksum;
493
494	pktlen = m->m_pkthdr.len;
495	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
496		return;
497	eh = mtod(m, struct ether_header *);
498	if (eh->ether_type != htons(ETHERTYPE_IP))
499		return;
500	ip = (struct ip *)(eh + 1);
501	if (ip->ip_v != IPVERSION)
502		return;
503
504	hlen = ip->ip_hl << 2;
505	pktlen -= sizeof(struct ether_header);
506	if (hlen < sizeof(struct ip))
507		return;
508	if (ntohs(ip->ip_len) < hlen)
509		return;
510	if (ntohs(ip->ip_len) != pktlen)
511		return;
512	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
513		return;	/* Cannot handle fragmented packet. */
514
515	switch (ip->ip_p) {
516	case IPPROTO_TCP:
517		if (pktlen < (hlen + sizeof(struct tcphdr)))
518			return;
519		break;
520	case IPPROTO_UDP:
521		if (pktlen < (hlen + sizeof(struct udphdr)))
522			return;
523		uh = (struct udphdr *)((uint8_t *)ip + hlen);
524		if (uh->uh_sum == 0)
525			return; /* no checksum */
526		break;
527	default:
528		return;
529	}
530
531	cksum = ~(flags & GEM_RD_CHECKSUM);
532	/* checksum fixup for IP options */
533	len = hlen - sizeof(struct ip);
534	if (len > 0) {
535		opts = (uint16_t *)(ip + 1);
536		for (; len > 0; len -= sizeof(uint16_t), opts++) {
537			temp32 = cksum - *opts;
538			temp32 = (temp32 >> 16) + (temp32 & 65535);
539			cksum = temp32 & 65535;
540		}
541	}
542	m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
543	m->m_pkthdr.csum_data = cksum;
544}
545
546static void
547gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
548{
549	struct gem_softc *sc = xsc;
550
551	if (error != 0)
552		return;
553	if (nsegs != 1)
554		panic("%s: bad control buffer segment count", __func__);
555	sc->sc_cddma = segs[0].ds_addr;
556}
557
558static void
559gem_tick(void *arg)
560{
561	struct gem_softc *sc = arg;
562	struct ifnet *ifp = sc->sc_ifp;
563	uint32_t v;
564
565	GEM_LOCK_ASSERT(sc, MA_OWNED);
566
567	/*
568	 * Unload collision and error counters.
569	 */
570	ifp->if_collisions +=
571	    GEM_BANK1_READ_4(sc, GEM_MAC_NORM_COLL_CNT) +
572	    GEM_BANK1_READ_4(sc, GEM_MAC_FIRST_COLL_CNT);
573	v = GEM_BANK1_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) +
574	    GEM_BANK1_READ_4(sc, GEM_MAC_LATE_COLL_CNT);
575	ifp->if_collisions += v;
576	ifp->if_oerrors += v;
577	ifp->if_ierrors +=
578	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) +
579	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) +
580	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) +
581	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_CODE_VIOL);
582
583	/*
584	 * Then clear the hardware counters.
585	 */
586	GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
587	GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
588	GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
589	GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
590	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
591	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
592	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
593	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
594
595	mii_tick(sc->sc_mii);
596
597	if (gem_watchdog(sc) == EJUSTRETURN)
598		return;
599
600	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
601}
602
603static int
604gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr,
605    uint32_t set)
606{
607	int i;
608	uint32_t reg;
609
610	for (i = GEM_TRIES; i--; DELAY(100)) {
611		reg = GEM_BANKN_READ_M(bank, 4, sc, r);
612		if ((reg & clr) == 0 && (reg & set) == set)
613			return (1);
614	}
615	return (0);
616}
617
618static void
619gem_reset(struct gem_softc *sc)
620{
621
622#ifdef GEM_DEBUG
623	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
624#endif
625	gem_reset_rx(sc);
626	gem_reset_tx(sc);
627
628	/* Do a full reset. */
629	GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX |
630	    (sc->sc_variant == GEM_SUN_ERI ? GEM_ERI_CACHE_LINE_SIZE <<
631	    GEM_RESET_CLSZ_SHFT : 0));
632	GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
633	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
634	if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
635		device_printf(sc->sc_dev, "cannot reset device\n");
636}
637
638static void
639gem_rxdrain(struct gem_softc *sc)
640{
641	struct gem_rxsoft *rxs;
642	int i;
643
644	for (i = 0; i < GEM_NRXDESC; i++) {
645		rxs = &sc->sc_rxsoft[i];
646		if (rxs->rxs_mbuf != NULL) {
647			bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
648			    BUS_DMASYNC_POSTREAD);
649			bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
650			m_freem(rxs->rxs_mbuf);
651			rxs->rxs_mbuf = NULL;
652		}
653	}
654}
655
656static void
657gem_stop(struct ifnet *ifp, int disable)
658{
659	struct gem_softc *sc = ifp->if_softc;
660	struct gem_txsoft *txs;
661
662#ifdef GEM_DEBUG
663	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
664#endif
665
666	callout_stop(&sc->sc_tick_ch);
667#ifdef GEM_RINT_TIMEOUT
668	callout_stop(&sc->sc_rx_ch);
669#endif
670
671	gem_reset_tx(sc);
672	gem_reset_rx(sc);
673
674	/*
675	 * Release any queued transmit buffers.
676	 */
677	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
678		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
679		if (txs->txs_ndescs != 0) {
680			bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
681			    BUS_DMASYNC_POSTWRITE);
682			bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
683			if (txs->txs_mbuf != NULL) {
684				m_freem(txs->txs_mbuf);
685				txs->txs_mbuf = NULL;
686			}
687		}
688		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
689	}
690
691	if (disable)
692		gem_rxdrain(sc);
693
694	/*
695	 * Mark the interface down and cancel the watchdog timer.
696	 */
697	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
698	sc->sc_flags &= ~GEM_LINK;
699	sc->sc_wdog_timer = 0;
700}
701
702static int
703gem_reset_rx(struct gem_softc *sc)
704{
705
706	/*
707	 * Resetting while DMA is in progress can cause a bus hang, so we
708	 * disable DMA first.
709	 */
710	(void)gem_disable_rx(sc);
711	GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG, 0);
712	GEM_BANK1_BARRIER(sc, GEM_RX_CONFIG, 4,
713	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
714	if (!GEM_BANK1_BITWAIT(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0))
715		device_printf(sc->sc_dev, "cannot disable RX DMA\n");
716
717	/* Wait 5ms extra. */
718	DELAY(5000);
719
720	/* Reset the ERX. */
721	GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_RX |
722	    (sc->sc_variant == GEM_SUN_ERI ? GEM_ERI_CACHE_LINE_SIZE <<
723	    GEM_RESET_CLSZ_SHFT : 0));
724	GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
725	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
726	if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_RX, 0)) {
727		device_printf(sc->sc_dev, "cannot reset receiver\n");
728		return (1);
729	}
730
731	/* Finally, reset RX MAC. */
732	GEM_BANK1_WRITE_4(sc, GEM_MAC_RXRESET, 1);
733	GEM_BANK1_BARRIER(sc, GEM_MAC_RXRESET, 4,
734	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
735	if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RXRESET, 1, 0)) {
736		device_printf(sc->sc_dev, "cannot reset RX MAC\n");
737		return (1);
738	}
739
740	return (0);
741}
742
743/*
744 * Reset the receiver DMA engine.
745 *
746 * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
747 * etc in order to reset the receiver DMA engine only and not do a full
748 * reset which amongst others also downs the link and clears the FIFOs.
749 */
750static void
751gem_reset_rxdma(struct gem_softc *sc)
752{
753	int i;
754
755	if (gem_reset_rx(sc) != 0) {
756		sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
757		return (gem_init_locked(sc));
758	}
759	for (i = 0; i < GEM_NRXDESC; i++)
760		if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
761			GEM_UPDATE_RXDESC(sc, i);
762	sc->sc_rxptr = 0;
763	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
764
765	/* NOTE: we use only 32-bit DMA addresses here. */
766	GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
767	GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
768	GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
769	GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
770	    gem_ringsize(GEM_NRXDESC /* XXX */) |
771	    ((ETHER_HDR_LEN + sizeof(struct ip)) <<
772	    GEM_RX_CONFIG_CXM_START_SHFT) |
773	    (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
774	    (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT));
775	/* Adjusting for the SBus clock probably isn't worth the fuzz. */
776	GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
777	    ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
778	    GEM_RX_BLANKING_TIME_SHIFT) | 6);
779	GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
780	    (3 * sc->sc_rxfifosize / 256) |
781	    ((sc->sc_rxfifosize / 256) << 12));
782	GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
783	    GEM_BANK1_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
784	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
785	    GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
786	/*
787	 * Clear the RX filter and reprogram it.  This will also set the
788	 * current RX MAC configuration and enable it.
789	 */
790	gem_setladrf(sc);
791}
792
793static int
794gem_reset_tx(struct gem_softc *sc)
795{
796
797	/*
798	 * Resetting while DMA is in progress can cause a bus hang, so we
799	 * disable DMA first.
800	 */
801	(void)gem_disable_tx(sc);
802	GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, 0);
803	GEM_BANK1_BARRIER(sc, GEM_TX_CONFIG, 4,
804	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
805	if (!GEM_BANK1_BITWAIT(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0))
806		device_printf(sc->sc_dev, "cannot disable TX DMA\n");
807
808	/* Wait 5ms extra. */
809	DELAY(5000);
810
811	/* Finally, reset the ETX. */
812	GEM_BANK2_WRITE_4(sc, GEM_RESET, GEM_RESET_TX |
813	    (sc->sc_variant == GEM_SUN_ERI ? GEM_ERI_CACHE_LINE_SIZE <<
814	    GEM_RESET_CLSZ_SHFT : 0));
815	GEM_BANK2_BARRIER(sc, GEM_RESET, 4,
816	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
817	if (!GEM_BANK2_BITWAIT(sc, GEM_RESET, GEM_RESET_TX, 0)) {
818		device_printf(sc->sc_dev, "cannot reset transmitter\n");
819		return (1);
820	}
821	return (0);
822}
823
824static int
825gem_disable_rx(struct gem_softc *sc)
826{
827
828	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
829	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE);
830	GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
831	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
832	if (GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
833		return (1);
834	device_printf(sc->sc_dev, "cannot disable RX MAC\n");
835	return (0);
836}
837
838static int
839gem_disable_tx(struct gem_softc *sc)
840{
841
842	GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
843	    GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE);
844	GEM_BANK1_BARRIER(sc, GEM_MAC_TX_CONFIG, 4,
845	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
846	if (GEM_BANK1_BITWAIT(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
847		return (1);
848	device_printf(sc->sc_dev, "cannot disable TX MAC\n");
849	return (0);
850}
851
852static int
853gem_meminit(struct gem_softc *sc)
854{
855	struct gem_rxsoft *rxs;
856	int error, i;
857
858	GEM_LOCK_ASSERT(sc, MA_OWNED);
859
860	/*
861	 * Initialize the transmit descriptor ring.
862	 */
863	for (i = 0; i < GEM_NTXDESC; i++) {
864		sc->sc_txdescs[i].gd_flags = 0;
865		sc->sc_txdescs[i].gd_addr = 0;
866	}
867	sc->sc_txfree = GEM_MAXTXFREE;
868	sc->sc_txnext = 0;
869	sc->sc_txwin = 0;
870
871	/*
872	 * Initialize the receive descriptor and receive job
873	 * descriptor rings.
874	 */
875	for (i = 0; i < GEM_NRXDESC; i++) {
876		rxs = &sc->sc_rxsoft[i];
877		if (rxs->rxs_mbuf == NULL) {
878			if ((error = gem_add_rxbuf(sc, i)) != 0) {
879				device_printf(sc->sc_dev,
880				    "unable to allocate or map RX buffer %d, "
881				    "error = %d\n", i, error);
882				/*
883				 * XXX we should attempt to run with fewer
884				 * receive buffers instead of just failing.
885				 */
886				gem_rxdrain(sc);
887				return (1);
888			}
889		} else
890			GEM_INIT_RXDESC(sc, i);
891	}
892	sc->sc_rxptr = 0;
893
894	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
895
896	return (0);
897}
898
899static u_int
900gem_ringsize(u_int sz)
901{
902
903	switch (sz) {
904	case 32:
905		return (GEM_RING_SZ_32);
906	case 64:
907		return (GEM_RING_SZ_64);
908	case 128:
909		return (GEM_RING_SZ_128);
910	case 256:
911		return (GEM_RING_SZ_256);
912	case 512:
913		return (GEM_RING_SZ_512);
914	case 1024:
915		return (GEM_RING_SZ_1024);
916	case 2048:
917		return (GEM_RING_SZ_2048);
918	case 4096:
919		return (GEM_RING_SZ_4096);
920	case 8192:
921		return (GEM_RING_SZ_8192);
922	default:
923		printf("%s: invalid ring size %d\n", __func__, sz);
924		return (GEM_RING_SZ_32);
925	}
926}
927
928static void
929gem_init(void *xsc)
930{
931	struct gem_softc *sc = xsc;
932
933	GEM_LOCK(sc);
934	gem_init_locked(sc);
935	GEM_UNLOCK(sc);
936}
937
938/*
939 * Initialization of interface; set up initialization block
940 * and transmit/receive descriptor rings.
941 */
942static void
943gem_init_locked(struct gem_softc *sc)
944{
945	struct ifnet *ifp = sc->sc_ifp;
946	uint32_t v;
947
948	GEM_LOCK_ASSERT(sc, MA_OWNED);
949
950	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
951		return;
952
953#ifdef GEM_DEBUG
954	CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
955	    __func__);
956#endif
957	/*
958	 * Initialization sequence.  The numbered steps below correspond
959	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
960	 * Channel Engine manual (part of the PCIO manual).
961	 * See also the STP2002-STQ document from Sun Microsystems.
962	 */
963
964	/* step 1 & 2.  Reset the Ethernet Channel. */
965	gem_stop(ifp, 0);
966	gem_reset(sc);
967#ifdef GEM_DEBUG
968	CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
969	    __func__);
970#endif
971
972	if ((sc->sc_flags & GEM_SERDES) == 0)
973		/* Re-initialize the MIF. */
974		gem_mifinit(sc);
975
976	/* step 3.  Setup data structures in host memory. */
977	if (gem_meminit(sc) != 0)
978		return;
979
980	/* step 4.  TX MAC registers & counters */
981	gem_init_regs(sc);
982
983	/* step 5.  RX MAC registers & counters */
984
985	/* step 6 & 7.  Program Descriptor Ring Base Addresses. */
986	/* NOTE: we use only 32-bit DMA addresses here. */
987	GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0);
988	GEM_BANK1_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
989
990	GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
991	GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
992#ifdef GEM_DEBUG
993	CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx",
994	    GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
995#endif
996
997	/* step 8.  Global Configuration & Interrupt Mask */
998
999	/*
1000	 * Set the internal arbitration to "infinite" bursts of the
1001	 * maximum length of 31 * 64 bytes so DMA transfers aren't
1002	 * split up in cache line size chunks.  This greatly improves
1003	 * RX performance.
1004	 * Enable silicon bug workarounds for the Apple variants.
1005	 */
1006	GEM_BANK1_WRITE_4(sc, GEM_CONFIG,
1007	    GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
1008	    ((sc->sc_flags & GEM_PCI) != 0 ? GEM_CONFIG_BURST_INF :
1009	    GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ?
1010	    GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
1011
1012	GEM_BANK1_WRITE_4(sc, GEM_INTMASK,
1013	    ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
1014	    GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
1015	    GEM_INTR_BERR
1016#ifdef GEM_DEBUG
1017	    | GEM_INTR_PCS | GEM_INTR_MIF
1018#endif
1019	    ));
1020	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
1021	    GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
1022	GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK,
1023	    GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
1024	    GEM_MAC_TX_PEAK_EXP);
1025#ifdef GEM_DEBUG
1026	GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
1027	    ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
1028#else
1029	GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
1030	    GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
1031#endif
1032
1033	/* step 9.  ETX Configuration: use mostly default values. */
1034
1035	/* Enable DMA. */
1036	v = gem_ringsize(GEM_NTXDESC);
1037	/* Set TX FIFO threshold and enable DMA. */
1038	v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x4ff) << 10) &
1039	    GEM_TX_CONFIG_TXFIFO_TH;
1040	GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
1041
1042	/* step 10.  ERX Configuration */
1043
1044	/* Encode Receive Descriptor ring size. */
1045	v = gem_ringsize(GEM_NRXDESC /* XXX */);
1046	/* RX TCP/UDP checksum offset */
1047	v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
1048	    GEM_RX_CONFIG_CXM_START_SHFT);
1049	/* Set RX FIFO threshold, set first byte offset and enable DMA. */
1050	GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
1051	    v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
1052	    (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) |
1053	    GEM_RX_CONFIG_RXDMA_EN);
1054
1055	/* Adjusting for the SBus clock probably isn't worth the fuzz. */
1056	GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
1057	    ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
1058	    GEM_RX_BLANKING_TIME_SHIFT) | 6);
1059
1060	/*
1061	 * The following value is for an OFF Threshold of about 3/4 full
1062	 * and an ON Threshold of 1/4 full.
1063	 */
1064	GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
1065	    (3 * sc->sc_rxfifosize / 256) |
1066	    ((sc->sc_rxfifosize / 256) << 12));
1067
1068	/* step 11.  Configure Media. */
1069
1070	/* step 12.  RX_MAC Configuration Register */
1071	v = GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG);
1072	v &= ~GEM_MAC_RX_ENABLE;
1073	v |= GEM_MAC_RX_STRIP_CRC;
1074	sc->sc_mac_rxcfg = v;
1075	/*
1076	 * Clear the RX filter and reprogram it.  This will also set the
1077	 * current RX MAC configuration and enable it.
1078	 */
1079	gem_setladrf(sc);
1080
1081	/* step 13.  TX_MAC Configuration Register */
1082	v = GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG);
1083	v |= GEM_MAC_TX_ENABLE;
1084	(void)gem_disable_tx(sc);
1085	GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, v);
1086
1087	/* step 14.  Issue Transmit Pending command. */
1088
1089	/* step 15.  Give the receiver a swift kick. */
1090	GEM_BANK1_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4);
1091
1092	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1093	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1094
1095	mii_mediachg(sc->sc_mii);
1096
1097	/* Start the one second timer. */
1098	sc->sc_wdog_timer = 0;
1099	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1100}
1101
1102static int
1103gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
1104{
1105	bus_dma_segment_t txsegs[GEM_NTXSEGS];
1106	struct gem_txsoft *txs;
1107	struct ip *ip;
1108	struct mbuf *m;
1109	uint64_t cflags, flags;
1110	int error, nexttx, nsegs, offset, seg;
1111
1112	GEM_LOCK_ASSERT(sc, MA_OWNED);
1113
1114	/* Get a work queue entry. */
1115	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1116		/* Ran out of descriptors. */
1117		return (ENOBUFS);
1118	}
1119
1120	cflags = 0;
1121	if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
1122		if (M_WRITABLE(*m_head) == 0) {
1123			m = m_dup(*m_head, M_NOWAIT);
1124			m_freem(*m_head);
1125			*m_head = m;
1126			if (m == NULL)
1127				return (ENOBUFS);
1128		}
1129		offset = sizeof(struct ether_header);
1130		m = m_pullup(*m_head, offset + sizeof(struct ip));
1131		if (m == NULL) {
1132			*m_head = NULL;
1133			return (ENOBUFS);
1134		}
1135		ip = (struct ip *)(mtod(m, caddr_t) + offset);
1136		offset += (ip->ip_hl << 2);
1137		cflags = offset << GEM_TD_CXSUM_STARTSHFT |
1138		    ((offset + m->m_pkthdr.csum_data) <<
1139		    GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE;
1140		*m_head = m;
1141	}
1142
1143	error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1144	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1145	if (error == EFBIG) {
1146		m = m_collapse(*m_head, M_NOWAIT, GEM_NTXSEGS);
1147		if (m == NULL) {
1148			m_freem(*m_head);
1149			*m_head = NULL;
1150			return (ENOBUFS);
1151		}
1152		*m_head = m;
1153		error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag,
1154		    txs->txs_dmamap, *m_head, txsegs, &nsegs,
1155		    BUS_DMA_NOWAIT);
1156		if (error != 0) {
1157			m_freem(*m_head);
1158			*m_head = NULL;
1159			return (error);
1160		}
1161	} else if (error != 0)
1162		return (error);
1163	/* If nsegs is wrong then the stack is corrupt. */
1164	KASSERT(nsegs <= GEM_NTXSEGS,
1165	    ("%s: too many DMA segments (%d)", __func__, nsegs));
1166	if (nsegs == 0) {
1167		m_freem(*m_head);
1168		*m_head = NULL;
1169		return (EIO);
1170	}
1171
1172	/*
1173	 * Ensure we have enough descriptors free to describe
1174	 * the packet.  Note, we always reserve one descriptor
1175	 * at the end of the ring as a termination point, in
1176	 * order to prevent wrap-around.
1177	 */
1178	if (nsegs > sc->sc_txfree - 1) {
1179		txs->txs_ndescs = 0;
1180		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1181		return (ENOBUFS);
1182	}
1183
1184	txs->txs_ndescs = nsegs;
1185	txs->txs_firstdesc = sc->sc_txnext;
1186	nexttx = txs->txs_firstdesc;
1187	for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1188#ifdef GEM_DEBUG
1189		CTR6(KTR_GEM,
1190		    "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)",
1191		    __func__, seg, nexttx, txsegs[seg].ds_len,
1192		    txsegs[seg].ds_addr,
1193		    GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
1194#endif
1195		sc->sc_txdescs[nexttx].gd_addr =
1196		    GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
1197		KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1198		    ("%s: segment size too large!", __func__));
1199		flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1200		sc->sc_txdescs[nexttx].gd_flags =
1201		    GEM_DMA_WRITE(sc, flags | cflags);
1202		txs->txs_lastdesc = nexttx;
1203	}
1204
1205	/* Set EOP on the last descriptor. */
1206#ifdef GEM_DEBUG
1207	CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d",
1208	    __func__, seg, nexttx);
1209#endif
1210	sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1211	    GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
1212
1213	/* Lastly set SOP on the first descriptor. */
1214#ifdef GEM_DEBUG
1215	CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d",
1216	    __func__, seg, nexttx);
1217#endif
1218	if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1219		sc->sc_txwin = 0;
1220		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1221		    GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
1222		    GEM_TD_START_OF_PACKET);
1223	} else
1224		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1225		    GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
1226
1227	/* Sync the DMA map. */
1228	bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1229	    BUS_DMASYNC_PREWRITE);
1230
1231#ifdef GEM_DEBUG
1232	CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1233	    __func__, txs->txs_firstdesc, txs->txs_lastdesc,
1234	    txs->txs_ndescs);
1235#endif
1236	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1237	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1238	txs->txs_mbuf = *m_head;
1239
1240	sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1241	sc->sc_txfree -= txs->txs_ndescs;
1242
1243	return (0);
1244}
1245
1246static void
1247gem_init_regs(struct gem_softc *sc)
1248{
1249	const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1250
1251	GEM_LOCK_ASSERT(sc, MA_OWNED);
1252
1253	/* These registers are not cleared on reset. */
1254	if ((sc->sc_flags & GEM_INITED) == 0) {
1255		/* magic values */
1256		GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG0, 0);
1257		GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8);
1258		GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4);
1259
1260		/* min frame length */
1261		GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1262		/* max frame length and max burst size */
1263		GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME,
1264		    (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1265
1266		/* more magic values */
1267		GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7);
1268		GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4);
1269		GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1270		GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8808);
1271
1272		/* random number seed */
1273		GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED,
1274		    ((laddr[5] << 8) | laddr[4]) & 0x3ff);
1275
1276		/* secondary MAC address: 0:0:0:0:0:0 */
1277		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR3, 0);
1278		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR4, 0);
1279		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR5, 0);
1280
1281		/* MAC control address: 01:80:c2:00:00:01 */
1282		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001);
1283		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200);
1284		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180);
1285
1286		/* MAC filter address: 0:0:0:0:0:0 */
1287		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0);
1288		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0);
1289		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0);
1290		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0);
1291		GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0);
1292
1293		sc->sc_flags |= GEM_INITED;
1294	}
1295
1296	/* Counters need to be zeroed. */
1297	GEM_BANK1_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0);
1298	GEM_BANK1_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0);
1299	GEM_BANK1_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0);
1300	GEM_BANK1_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0);
1301	GEM_BANK1_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0);
1302	GEM_BANK1_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0);
1303	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0);
1304	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0);
1305	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0);
1306	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0);
1307	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0);
1308
1309	/* Set XOFF PAUSE time. */
1310	GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1311
1312	/* Set the station address. */
1313	GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
1314	GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
1315	GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]);
1316
1317	/* Enable MII outputs. */
1318	GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
1319}
1320
1321static void
1322gem_start(struct ifnet *ifp)
1323{
1324	struct gem_softc *sc = ifp->if_softc;
1325
1326	GEM_LOCK(sc);
1327	gem_start_locked(ifp);
1328	GEM_UNLOCK(sc);
1329}
1330
1331static inline void
1332gem_txkick(struct gem_softc *sc)
1333{
1334
1335	/*
1336	 * Update the TX kick register.  This register has to point to the
1337	 * descriptor after the last valid one and for optimum performance
1338	 * should be incremented in multiples of 4 (the DMA engine fetches/
1339	 * updates descriptors in batches of 4).
1340	 */
1341#ifdef GEM_DEBUG
1342	CTR3(KTR_GEM, "%s: %s: kicking TX %d",
1343	    device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1344#endif
1345	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1346	GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
1347}
1348
1349static void
1350gem_start_locked(struct ifnet *ifp)
1351{
1352	struct gem_softc *sc = ifp->if_softc;
1353	struct mbuf *m;
1354	int kicked, ntx;
1355
1356	GEM_LOCK_ASSERT(sc, MA_OWNED);
1357
1358	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1359	    IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
1360		return;
1361
1362#ifdef GEM_DEBUG
1363	CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1364	    device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
1365	    sc->sc_txnext);
1366#endif
1367	ntx = 0;
1368	kicked = 0;
1369	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1370		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1371		if (m == NULL)
1372			break;
1373		if (gem_load_txmbuf(sc, &m) != 0) {
1374			if (m == NULL)
1375				break;
1376			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1377			IFQ_DRV_PREPEND(&ifp->if_snd, m);
1378			break;
1379		}
1380		if ((sc->sc_txnext % 4) == 0) {
1381			gem_txkick(sc);
1382			kicked = 1;
1383		} else
1384			kicked = 0;
1385		ntx++;
1386		BPF_MTAP(ifp, m);
1387	}
1388
1389	if (ntx > 0) {
1390		if (kicked == 0)
1391			gem_txkick(sc);
1392#ifdef GEM_DEBUG
1393		CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1394		    device_get_name(sc->sc_dev), sc->sc_txnext);
1395#endif
1396
1397		/* Set a watchdog timer in case the chip flakes out. */
1398		sc->sc_wdog_timer = 5;
1399#ifdef GEM_DEBUG
1400		CTR3(KTR_GEM, "%s: %s: watchdog %d",
1401		    device_get_name(sc->sc_dev), __func__,
1402		    sc->sc_wdog_timer);
1403#endif
1404	}
1405}
1406
1407static void
1408gem_tint(struct gem_softc *sc)
1409{
1410	struct ifnet *ifp = sc->sc_ifp;
1411	struct gem_txsoft *txs;
1412	int progress;
1413	uint32_t txlast;
1414#ifdef GEM_DEBUG
1415	int i;
1416
1417	GEM_LOCK_ASSERT(sc, MA_OWNED);
1418
1419	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1420#endif
1421
1422	/*
1423	 * Go through our TX list and free mbufs for those
1424	 * frames that have been transmitted.
1425	 */
1426	progress = 0;
1427	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1428	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1429#ifdef GEM_DEBUG
1430		if ((ifp->if_flags & IFF_DEBUG) != 0) {
1431			printf("    txsoft %p transmit chain:\n", txs);
1432			for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1433				printf("descriptor %d: ", i);
1434				printf("gd_flags: 0x%016llx\t",
1435				    (long long)GEM_DMA_READ(sc,
1436				    sc->sc_txdescs[i].gd_flags));
1437				printf("gd_addr: 0x%016llx\n",
1438				    (long long)GEM_DMA_READ(sc,
1439				    sc->sc_txdescs[i].gd_addr));
1440				if (i == txs->txs_lastdesc)
1441					break;
1442			}
1443		}
1444#endif
1445
1446		/*
1447		 * In theory, we could harvest some descriptors before
1448		 * the ring is empty, but that's a bit complicated.
1449		 *
1450		 * GEM_TX_COMPLETION points to the last descriptor
1451		 * processed + 1.
1452		 */
1453		txlast = GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION);
1454#ifdef GEM_DEBUG
1455		CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1456		    "txs->txs_lastdesc = %d, txlast = %d",
1457		    __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1458#endif
1459		if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1460			if ((txlast >= txs->txs_firstdesc) &&
1461			    (txlast <= txs->txs_lastdesc))
1462				break;
1463		} else {
1464			/* Ick -- this command wraps. */
1465			if ((txlast >= txs->txs_firstdesc) ||
1466			    (txlast <= txs->txs_lastdesc))
1467				break;
1468		}
1469
1470#ifdef GEM_DEBUG
1471		CTR1(KTR_GEM, "%s: releasing a descriptor", __func__);
1472#endif
1473		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1474
1475		sc->sc_txfree += txs->txs_ndescs;
1476
1477		bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1478		    BUS_DMASYNC_POSTWRITE);
1479		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1480		if (txs->txs_mbuf != NULL) {
1481			m_freem(txs->txs_mbuf);
1482			txs->txs_mbuf = NULL;
1483		}
1484
1485		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1486
1487		ifp->if_opackets++;
1488		progress = 1;
1489	}
1490
1491#ifdef GEM_DEBUG
1492	CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx "
1493	    "GEM_TX_COMPLETION %x",
1494	    __func__, GEM_BANK1_READ_4(sc, GEM_TX_STATE_MACHINE),
1495	    ((long long)GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) |
1496	    GEM_BANK1_READ_4(sc, GEM_TX_DATA_PTR_LO),
1497	    GEM_BANK1_READ_4(sc, GEM_TX_COMPLETION));
1498#endif
1499
1500	if (progress) {
1501		if (sc->sc_txfree == GEM_NTXDESC - 1)
1502			sc->sc_txwin = 0;
1503
1504		/*
1505		 * We freed some descriptors, so reset IFF_DRV_OACTIVE
1506		 * and restart.
1507		 */
1508		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1509		if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1510		    sc->sc_wdog_timer = 0;
1511		gem_start_locked(ifp);
1512	}
1513
1514#ifdef GEM_DEBUG
1515	CTR3(KTR_GEM, "%s: %s: watchdog %d",
1516	    device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1517#endif
1518}
1519
1520#ifdef GEM_RINT_TIMEOUT
1521static void
1522gem_rint_timeout(void *arg)
1523{
1524	struct gem_softc *sc = arg;
1525
1526	GEM_LOCK_ASSERT(sc, MA_OWNED);
1527
1528	gem_rint(sc);
1529}
1530#endif
1531
1532static void
1533gem_rint(struct gem_softc *sc)
1534{
1535	struct ifnet *ifp = sc->sc_ifp;
1536	struct mbuf *m;
1537	uint64_t rxstat;
1538	uint32_t rxcomp;
1539
1540	GEM_LOCK_ASSERT(sc, MA_OWNED);
1541
1542#ifdef GEM_RINT_TIMEOUT
1543	callout_stop(&sc->sc_rx_ch);
1544#endif
1545#ifdef GEM_DEBUG
1546	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1547#endif
1548
1549	/*
1550	 * Read the completion register once.  This limits
1551	 * how long the following loop can execute.
1552	 */
1553	rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION);
1554#ifdef GEM_DEBUG
1555	CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d",
1556	    __func__, sc->sc_rxptr, rxcomp);
1557#endif
1558	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1559	for (; sc->sc_rxptr != rxcomp;) {
1560		m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
1561		rxstat = GEM_DMA_READ(sc,
1562		    sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
1563
1564		if (rxstat & GEM_RD_OWN) {
1565#ifdef GEM_RINT_TIMEOUT
1566			/*
1567			 * The descriptor is still marked as owned, although
1568			 * it is supposed to have completed.  This has been
1569			 * observed on some machines.  Just exiting here
1570			 * might leave the packet sitting around until another
1571			 * one arrives to trigger a new interrupt, which is
1572			 * generally undesirable, so set up a timeout.
1573			 */
1574			callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1575			    gem_rint_timeout, sc);
1576#endif
1577			m = NULL;
1578			goto kickit;
1579		}
1580
1581		if (rxstat & GEM_RD_BAD_CRC) {
1582			ifp->if_ierrors++;
1583			device_printf(sc->sc_dev, "receive error: CRC error\n");
1584			GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1585			m = NULL;
1586			goto kickit;
1587		}
1588
1589#ifdef GEM_DEBUG
1590		if ((ifp->if_flags & IFF_DEBUG) != 0) {
1591			printf("    rxsoft %p descriptor %d: ",
1592			    &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
1593			printf("gd_flags: 0x%016llx\t",
1594			    (long long)GEM_DMA_READ(sc,
1595			    sc->sc_rxdescs[sc->sc_rxptr].gd_flags));
1596			printf("gd_addr: 0x%016llx\n",
1597			    (long long)GEM_DMA_READ(sc,
1598			    sc->sc_rxdescs[sc->sc_rxptr].gd_addr));
1599		}
1600#endif
1601
1602		/*
1603		 * Allocate a new mbuf cluster.  If that fails, we are
1604		 * out of memory, and must drop the packet and recycle
1605		 * the buffer that's already attached to this descriptor.
1606		 */
1607		if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
1608			ifp->if_iqdrops++;
1609			GEM_INIT_RXDESC(sc, sc->sc_rxptr);
1610			m = NULL;
1611		}
1612
1613 kickit:
1614		/*
1615		 * Update the RX kick register.  This register has to point
1616		 * to the descriptor after the last valid one (before the
1617		 * current batch) and for optimum performance should be
1618		 * incremented in multiples of 4 (the DMA engine fetches/
1619		 * updates descriptors in batches of 4).
1620		 */
1621		sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
1622		if ((sc->sc_rxptr % 4) == 0) {
1623			GEM_CDSYNC(sc,
1624			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1625			GEM_BANK1_WRITE_4(sc, GEM_RX_KICK,
1626			    (sc->sc_rxptr + GEM_NRXDESC - 4) &
1627			    GEM_NRXDESC_MASK);
1628		}
1629
1630		if (m == NULL) {
1631			if (rxstat & GEM_RD_OWN)
1632				break;
1633			continue;
1634		}
1635
1636		ifp->if_ipackets++;
1637		m->m_data += ETHER_ALIGN; /* first byte offset */
1638		m->m_pkthdr.rcvif = ifp;
1639		m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
1640
1641		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1642			gem_rxcksum(m, rxstat);
1643
1644		/* Pass it on. */
1645		GEM_UNLOCK(sc);
1646		(*ifp->if_input)(ifp, m);
1647		GEM_LOCK(sc);
1648	}
1649
1650#ifdef GEM_DEBUG
1651	CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__,
1652	    sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION));
1653#endif
1654}
1655
1656static int
1657gem_add_rxbuf(struct gem_softc *sc, int idx)
1658{
1659	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1660	struct mbuf *m;
1661	bus_dma_segment_t segs[1];
1662	int error, nsegs;
1663
1664	GEM_LOCK_ASSERT(sc, MA_OWNED);
1665
1666	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1667	if (m == NULL)
1668		return (ENOBUFS);
1669	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1670
1671#ifdef GEM_DEBUG
1672	/* Bzero the packet to check DMA. */
1673	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1674#endif
1675
1676	if (rxs->rxs_mbuf != NULL) {
1677		bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1678		    BUS_DMASYNC_POSTREAD);
1679		bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1680	}
1681
1682	error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1683	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1684	if (error != 0) {
1685		device_printf(sc->sc_dev,
1686		    "cannot load RS DMA map %d, error = %d\n", idx, error);
1687		m_freem(m);
1688		return (error);
1689	}
1690	/* If nsegs is wrong then the stack is corrupt. */
1691	KASSERT(nsegs == 1,
1692	    ("%s: too many DMA segments (%d)", __func__, nsegs));
1693	rxs->rxs_mbuf = m;
1694	rxs->rxs_paddr = segs[0].ds_addr;
1695
1696	bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1697	    BUS_DMASYNC_PREREAD);
1698
1699	GEM_INIT_RXDESC(sc, idx);
1700
1701	return (0);
1702}
1703
1704static void
1705gem_eint(struct gem_softc *sc, u_int status)
1706{
1707
1708	sc->sc_ifp->if_ierrors++;
1709	if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
1710		gem_reset_rxdma(sc);
1711		return;
1712	}
1713
1714	device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
1715	if ((status & GEM_INTR_BERR) != 0) {
1716		if ((sc->sc_flags & GEM_PCI) != 0)
1717			printf(", PCI bus error 0x%x\n",
1718			    GEM_BANK1_READ_4(sc, GEM_PCI_ERROR_STATUS));
1719		else
1720			printf(", SBus error 0x%x\n",
1721			    GEM_BANK1_READ_4(sc, GEM_SBUS_STATUS));
1722	}
1723}
1724
1725void
1726gem_intr(void *v)
1727{
1728	struct gem_softc *sc = v;
1729	uint32_t status, status2;
1730
1731	GEM_LOCK(sc);
1732	status = GEM_BANK1_READ_4(sc, GEM_STATUS);
1733
1734#ifdef GEM_DEBUG
1735	CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1736	    device_get_name(sc->sc_dev), __func__,
1737	    (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status);
1738
1739	/*
1740	 * PCS interrupts must be cleared, otherwise no traffic is passed!
1741	 */
1742	if ((status & GEM_INTR_PCS) != 0) {
1743		status2 =
1744		    GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS) |
1745		    GEM_BANK1_READ_4(sc, GEM_MII_INTERRUP_STATUS);
1746		if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
1747			device_printf(sc->sc_dev,
1748			    "%s: PCS link status changed\n", __func__);
1749	}
1750	if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
1751		status2 = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_STATUS);
1752		if ((status2 & GEM_MAC_PAUSED) != 0)
1753			device_printf(sc->sc_dev,
1754			    "%s: PAUSE received (PAUSE time %d slots)\n",
1755			    __func__, GEM_MAC_PAUSE_TIME(status2));
1756		if ((status2 & GEM_MAC_PAUSE) != 0)
1757			device_printf(sc->sc_dev,
1758			    "%s: transited to PAUSE state\n", __func__);
1759		if ((status2 & GEM_MAC_RESUME) != 0)
1760			device_printf(sc->sc_dev,
1761			    "%s: transited to non-PAUSE state\n", __func__);
1762	}
1763	if ((status & GEM_INTR_MIF) != 0)
1764		device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
1765#endif
1766
1767	if (__predict_false(status &
1768	    (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
1769		gem_eint(sc, status);
1770
1771	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1772		gem_rint(sc);
1773
1774	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1775		gem_tint(sc);
1776
1777	if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) {
1778		status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS);
1779		if ((status2 &
1780		    ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
1781		    GEM_MAC_TX_PEAK_EXP)) != 0)
1782			device_printf(sc->sc_dev,
1783			    "MAC TX fault, status %x\n", status2);
1784		if ((status2 &
1785		    (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) {
1786			sc->sc_ifp->if_oerrors++;
1787			sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1788			gem_init_locked(sc);
1789		}
1790	}
1791	if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) {
1792		status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS);
1793		/*
1794		 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
1795		 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
1796		 * silicon bug so handle them silently.  Moreover, it's
1797		 * likely that the receiver has hung so we reset it.
1798		 */
1799		if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) {
1800			sc->sc_ifp->if_ierrors++;
1801			gem_reset_rxdma(sc);
1802		} else if ((status2 &
1803		    ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0)
1804			device_printf(sc->sc_dev,
1805			    "MAC RX fault, status %x\n", status2);
1806	}
1807	GEM_UNLOCK(sc);
1808}
1809
1810static int
1811gem_watchdog(struct gem_softc *sc)
1812{
1813	struct ifnet *ifp = sc->sc_ifp;
1814
1815	GEM_LOCK_ASSERT(sc, MA_OWNED);
1816
1817#ifdef GEM_DEBUG
1818	CTR4(KTR_GEM,
1819	    "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x",
1820	    __func__, GEM_BANK1_READ_4(sc, GEM_RX_CONFIG),
1821	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS),
1822	    GEM_BANK1_READ_4(sc, GEM_MAC_RX_CONFIG));
1823	CTR4(KTR_GEM,
1824	    "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x",
1825	    __func__, GEM_BANK1_READ_4(sc, GEM_TX_CONFIG),
1826	    GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS),
1827	    GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG));
1828#endif
1829
1830	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1831		return (0);
1832
1833	if ((sc->sc_flags & GEM_LINK) != 0)
1834		device_printf(sc->sc_dev, "device timeout\n");
1835	else if (bootverbose)
1836		device_printf(sc->sc_dev, "device timeout (no link)\n");
1837	++ifp->if_oerrors;
1838
1839	/* Try to get more packets going. */
1840	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1841	gem_init_locked(sc);
1842	gem_start_locked(ifp);
1843	return (EJUSTRETURN);
1844}
1845
1846static void
1847gem_mifinit(struct gem_softc *sc)
1848{
1849
1850	/* Configure the MIF in frame mode. */
1851	GEM_BANK1_WRITE_4(sc, GEM_MIF_CONFIG,
1852	    GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
1853	GEM_BANK1_BARRIER(sc, GEM_MIF_CONFIG, 4,
1854	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1855}
1856
1857/*
1858 * MII interface
1859 *
1860 * The MII interface supports at least three different operating modes:
1861 *
1862 * Bitbang mode is implemented using data, clock and output enable registers.
1863 *
1864 * Frame mode is implemented by loading a complete frame into the frame
1865 * register and polling the valid bit for completion.
1866 *
1867 * Polling mode uses the frame register but completion is indicated by
1868 * an interrupt.
1869 *
1870 */
1871int
1872gem_mii_readreg(device_t dev, int phy, int reg)
1873{
1874	struct gem_softc *sc;
1875	int n;
1876	uint32_t v;
1877
1878#ifdef GEM_DEBUG_PHY
1879	printf("%s: phy %d reg %d\n", __func__, phy, reg);
1880#endif
1881
1882	sc = device_get_softc(dev);
1883	if ((sc->sc_flags & GEM_SERDES) != 0) {
1884		switch (reg) {
1885		case MII_BMCR:
1886			reg = GEM_MII_CONTROL;
1887			break;
1888		case MII_BMSR:
1889			reg = GEM_MII_STATUS;
1890			break;
1891		case MII_PHYIDR1:
1892		case MII_PHYIDR2:
1893			return (0);
1894		case MII_ANAR:
1895			reg = GEM_MII_ANAR;
1896			break;
1897		case MII_ANLPAR:
1898			reg = GEM_MII_ANLPAR;
1899			break;
1900		case MII_EXTSR:
1901			return (EXTSR_1000XFDX | EXTSR_1000XHDX);
1902		default:
1903			device_printf(sc->sc_dev,
1904			    "%s: unhandled register %d\n", __func__, reg);
1905			return (0);
1906		}
1907		return (GEM_BANK1_READ_4(sc, reg));
1908	}
1909
1910	/* Construct the frame command. */
1911	v = GEM_MIF_FRAME_READ |
1912	    (phy << GEM_MIF_PHY_SHIFT) |
1913	    (reg << GEM_MIF_REG_SHIFT);
1914
1915	GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1916	GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4,
1917	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1918	for (n = 0; n < 100; n++) {
1919		DELAY(1);
1920		v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
1921		if (v & GEM_MIF_FRAME_TA0)
1922			return (v & GEM_MIF_FRAME_DATA);
1923	}
1924
1925	device_printf(sc->sc_dev, "%s: timed out\n", __func__);
1926	return (0);
1927}
1928
1929int
1930gem_mii_writereg(device_t dev, int phy, int reg, int val)
1931{
1932	struct gem_softc *sc;
1933	int n;
1934	uint32_t v;
1935
1936#ifdef GEM_DEBUG_PHY
1937	printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
1938#endif
1939
1940	sc = device_get_softc(dev);
1941	if ((sc->sc_flags & GEM_SERDES) != 0) {
1942		switch (reg) {
1943		case MII_BMSR:
1944			reg = GEM_MII_STATUS;
1945			break;
1946		case MII_BMCR:
1947			reg = GEM_MII_CONTROL;
1948			if ((val & GEM_MII_CONTROL_RESET) == 0)
1949				break;
1950			GEM_BANK1_WRITE_4(sc, GEM_MII_CONTROL, val);
1951			GEM_BANK1_BARRIER(sc, GEM_MII_CONTROL, 4,
1952			    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1953			if (!GEM_BANK1_BITWAIT(sc, GEM_MII_CONTROL,
1954			    GEM_MII_CONTROL_RESET, 0))
1955				device_printf(sc->sc_dev,
1956				    "cannot reset PCS\n");
1957			/* FALLTHROUGH */
1958		case MII_ANAR:
1959			GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG, 0);
1960			GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
1961			    BUS_SPACE_BARRIER_WRITE);
1962			GEM_BANK1_WRITE_4(sc, GEM_MII_ANAR, val);
1963			GEM_BANK1_BARRIER(sc, GEM_MII_ANAR, 4,
1964			    BUS_SPACE_BARRIER_WRITE);
1965			GEM_BANK1_WRITE_4(sc, GEM_MII_SLINK_CONTROL,
1966			    GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
1967			GEM_BANK1_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4,
1968			    BUS_SPACE_BARRIER_WRITE);
1969			GEM_BANK1_WRITE_4(sc, GEM_MII_CONFIG,
1970			    GEM_MII_CONFIG_ENABLE);
1971			GEM_BANK1_BARRIER(sc, GEM_MII_CONFIG, 4,
1972			    BUS_SPACE_BARRIER_WRITE);
1973			return (0);
1974		case MII_ANLPAR:
1975			reg = GEM_MII_ANLPAR;
1976			break;
1977		default:
1978			device_printf(sc->sc_dev,
1979			    "%s: unhandled register %d\n", __func__, reg);
1980			return (0);
1981		}
1982		GEM_BANK1_WRITE_4(sc, reg, val);
1983		GEM_BANK1_BARRIER(sc, reg, 4,
1984		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1985		return (0);
1986	}
1987
1988	/* Construct the frame command. */
1989	v = GEM_MIF_FRAME_WRITE |
1990	    (phy << GEM_MIF_PHY_SHIFT) |
1991	    (reg << GEM_MIF_REG_SHIFT) |
1992	    (val & GEM_MIF_FRAME_DATA);
1993
1994	GEM_BANK1_WRITE_4(sc, GEM_MIF_FRAME, v);
1995	GEM_BANK1_BARRIER(sc, GEM_MIF_FRAME, 4,
1996	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1997	for (n = 0; n < 100; n++) {
1998		DELAY(1);
1999		v = GEM_BANK1_READ_4(sc, GEM_MIF_FRAME);
2000		if (v & GEM_MIF_FRAME_TA0)
2001			return (1);
2002	}
2003
2004	device_printf(sc->sc_dev, "%s: timed out\n", __func__);
2005	return (0);
2006}
2007
2008void
2009gem_mii_statchg(device_t dev)
2010{
2011	struct gem_softc *sc;
2012	int gigabit;
2013	uint32_t rxcfg, txcfg, v;
2014
2015	sc = device_get_softc(dev);
2016
2017	GEM_LOCK_ASSERT(sc, MA_OWNED);
2018
2019#ifdef GEM_DEBUG
2020	if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
2021		device_printf(sc->sc_dev, "%s: status change\n", __func__);
2022#endif
2023
2024	if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
2025	    IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
2026		sc->sc_flags |= GEM_LINK;
2027	else
2028		sc->sc_flags &= ~GEM_LINK;
2029
2030	switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
2031	case IFM_1000_SX:
2032	case IFM_1000_LX:
2033	case IFM_1000_CX:
2034	case IFM_1000_T:
2035		gigabit = 1;
2036		break;
2037	default:
2038		gigabit = 0;
2039	}
2040
2041	/*
2042	 * The configuration done here corresponds to the steps F) and
2043	 * G) and as far as enabling of RX and TX MAC goes also step H)
2044	 * of the initialization sequence outlined in section 3.2.1 of
2045	 * the GEM Gigabit Ethernet ASIC Specification.
2046	 */
2047
2048	rxcfg = sc->sc_mac_rxcfg;
2049	rxcfg &= ~GEM_MAC_RX_CARR_EXTEND;
2050	txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
2051	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2052		txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
2053	else if (gigabit != 0) {
2054		rxcfg |= GEM_MAC_RX_CARR_EXTEND;
2055		txcfg |= GEM_MAC_TX_CARR_EXTEND;
2056	}
2057	(void)gem_disable_tx(sc);
2058	GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg);
2059	(void)gem_disable_rx(sc);
2060	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg);
2061
2062	v = GEM_BANK1_READ_4(sc, GEM_MAC_CONTROL_CONFIG) &
2063	    ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
2064	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2065	    IFM_ETH_RXPAUSE) != 0)
2066		v |= GEM_MAC_CC_RX_PAUSE;
2067	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2068	    IFM_ETH_TXPAUSE) != 0)
2069		v |= GEM_MAC_CC_TX_PAUSE;
2070	GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v);
2071
2072	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
2073	    gigabit != 0)
2074		GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2075		    GEM_MAC_SLOT_TIME_CARR_EXTEND);
2076	else
2077		GEM_BANK1_WRITE_4(sc, GEM_MAC_SLOT_TIME,
2078		    GEM_MAC_SLOT_TIME_NORMAL);
2079
2080	/* XIF Configuration */
2081	v = GEM_MAC_XIF_LINK_LED;
2082	v |= GEM_MAC_XIF_TX_MII_ENA;
2083	if ((sc->sc_flags & GEM_SERDES) == 0) {
2084		if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) &
2085		    GEM_MIF_CONFIG_PHY_SEL) != 0) {
2086			/* External MII needs echo disable if half duplex. */
2087			if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
2088			    IFM_FDX) == 0)
2089				v |= GEM_MAC_XIF_ECHO_DISABL;
2090		} else
2091			/*
2092			 * Internal MII needs buffer enable.
2093			 * XXX buffer enable makes only sense for an
2094			 * external PHY.
2095			 */
2096			v |= GEM_MAC_XIF_MII_BUF_ENA;
2097	}
2098	if (gigabit != 0)
2099		v |= GEM_MAC_XIF_GMII_MODE;
2100	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
2101		v |= GEM_MAC_XIF_FDPLX_LED;
2102	GEM_BANK1_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v);
2103
2104	sc->sc_mac_rxcfg = rxcfg;
2105	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2106	    (sc->sc_flags & GEM_LINK) != 0) {
2107		GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG,
2108		    txcfg | GEM_MAC_TX_ENABLE);
2109		GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG,
2110		    rxcfg | GEM_MAC_RX_ENABLE);
2111	}
2112}
2113
2114int
2115gem_mediachange(struct ifnet *ifp)
2116{
2117	struct gem_softc *sc = ifp->if_softc;
2118	int error;
2119
2120	/* XXX add support for serial media. */
2121
2122	GEM_LOCK(sc);
2123	error = mii_mediachg(sc->sc_mii);
2124	GEM_UNLOCK(sc);
2125	return (error);
2126}
2127
2128void
2129gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2130{
2131	struct gem_softc *sc = ifp->if_softc;
2132
2133	GEM_LOCK(sc);
2134	if ((ifp->if_flags & IFF_UP) == 0) {
2135		GEM_UNLOCK(sc);
2136		return;
2137	}
2138
2139	mii_pollstat(sc->sc_mii);
2140	ifmr->ifm_active = sc->sc_mii->mii_media_active;
2141	ifmr->ifm_status = sc->sc_mii->mii_media_status;
2142	GEM_UNLOCK(sc);
2143}
2144
2145static int
2146gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2147{
2148	struct gem_softc *sc = ifp->if_softc;
2149	struct ifreq *ifr = (struct ifreq *)data;
2150	int error;
2151
2152	error = 0;
2153	switch (cmd) {
2154	case SIOCSIFFLAGS:
2155		GEM_LOCK(sc);
2156		if ((ifp->if_flags & IFF_UP) != 0) {
2157			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2158			    ((ifp->if_flags ^ sc->sc_ifflags) &
2159			    (IFF_ALLMULTI | IFF_PROMISC)) != 0)
2160				gem_setladrf(sc);
2161			else
2162				gem_init_locked(sc);
2163		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2164			gem_stop(ifp, 0);
2165		if ((ifp->if_flags & IFF_LINK0) != 0)
2166			sc->sc_csum_features |= CSUM_UDP;
2167		else
2168			sc->sc_csum_features &= ~CSUM_UDP;
2169		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2170			ifp->if_hwassist = sc->sc_csum_features;
2171		sc->sc_ifflags = ifp->if_flags;
2172		GEM_UNLOCK(sc);
2173		break;
2174	case SIOCADDMULTI:
2175	case SIOCDELMULTI:
2176		GEM_LOCK(sc);
2177		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2178			gem_setladrf(sc);
2179		GEM_UNLOCK(sc);
2180		break;
2181	case SIOCGIFMEDIA:
2182	case SIOCSIFMEDIA:
2183		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2184		break;
2185	case SIOCSIFCAP:
2186		GEM_LOCK(sc);
2187		ifp->if_capenable = ifr->ifr_reqcap;
2188		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2189			ifp->if_hwassist = sc->sc_csum_features;
2190		else
2191			ifp->if_hwassist = 0;
2192		GEM_UNLOCK(sc);
2193		break;
2194	default:
2195		error = ether_ioctl(ifp, cmd, data);
2196		break;
2197	}
2198
2199	return (error);
2200}
2201
2202static void
2203gem_setladrf(struct gem_softc *sc)
2204{
2205	struct ifnet *ifp = sc->sc_ifp;
2206	struct ifmultiaddr *inm;
2207	int i;
2208	uint32_t hash[16];
2209	uint32_t crc, v;
2210
2211	GEM_LOCK_ASSERT(sc, MA_OWNED);
2212
2213	/*
2214	 * Turn off the RX MAC and the hash filter as required by the Sun GEM
2215	 * programming restrictions.
2216	 */
2217	v = sc->sc_mac_rxcfg & ~GEM_MAC_RX_HASH_FILTER;
2218	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
2219	GEM_BANK1_BARRIER(sc, GEM_MAC_RX_CONFIG, 4,
2220	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2221	if (!GEM_BANK1_BITWAIT(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER |
2222	    GEM_MAC_RX_ENABLE, 0))
2223		device_printf(sc->sc_dev,
2224		    "cannot disable RX MAC or hash filter\n");
2225
2226	v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_PROMISC_GRP);
2227	if ((ifp->if_flags & IFF_PROMISC) != 0) {
2228		v |= GEM_MAC_RX_PROMISCUOUS;
2229		goto chipit;
2230	}
2231	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2232		v |= GEM_MAC_RX_PROMISC_GRP;
2233		goto chipit;
2234	}
2235
2236	/*
2237	 * Set up multicast address filter by passing all multicast
2238	 * addresses through a crc generator, and then using the high
2239	 * order 8 bits as an index into the 256 bit logical address
2240	 * filter.  The high order 4 bits selects the word, while the
2241	 * other 4 bits select the bit within the word (where bit 0
2242	 * is the MSB).
2243	 */
2244
2245	/* Clear the hash table. */
2246	memset(hash, 0, sizeof(hash));
2247
2248	if_maddr_rlock(ifp);
2249	TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
2250		if (inm->ifma_addr->sa_family != AF_LINK)
2251			continue;
2252		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2253		    inm->ifma_addr), ETHER_ADDR_LEN);
2254
2255		/* We just want the 8 most significant bits. */
2256		crc >>= 24;
2257
2258		/* Set the corresponding bit in the filter. */
2259		hash[crc >> 4] |= 1 << (15 - (crc & 15));
2260	}
2261	if_maddr_runlock(ifp);
2262
2263	v |= GEM_MAC_RX_HASH_FILTER;
2264
2265	/* Now load the hash table into the chip (if we are using it). */
2266	for (i = 0; i < 16; i++)
2267		GEM_BANK1_WRITE_4(sc,
2268		    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0),
2269		    hash[i]);
2270
2271 chipit:
2272	sc->sc_mac_rxcfg = v;
2273	GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v | GEM_MAC_RX_ENABLE);
2274}
2275