if_gem.c revision 170847
1/*-
2 * Copyright (C) 2001 Eduardo Horvath.
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *	from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/gem/if_gem.c 170847 2007-06-16 23:27:59Z marius $");
32
33/*
34 * Driver for Sun GEM ethernet controllers.
35 */
36
37#if 0
38#define	GEM_DEBUG
39#endif
40
41#if 0	/* XXX: In case of emergency, re-enable this. */
42#define	GEM_RINT_TIMEOUT
43#endif
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/bus.h>
48#include <sys/callout.h>
49#include <sys/endian.h>
50#include <sys/mbuf.h>
51#include <sys/malloc.h>
52#include <sys/kernel.h>
53#include <sys/lock.h>
54#include <sys/module.h>
55#include <sys/mutex.h>
56#include <sys/socket.h>
57#include <sys/sockio.h>
58#include <sys/rman.h>
59
60#include <net/bpf.h>
61#include <net/ethernet.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67#include <net/if_vlan_var.h>
68
69#include <netinet/in.h>
70#include <netinet/in_systm.h>
71#include <netinet/ip.h>
72#include <netinet/tcp.h>
73#include <netinet/udp.h>
74
75#include <machine/bus.h>
76
77#include <dev/mii/mii.h>
78#include <dev/mii/miivar.h>
79
80#include <dev/gem/if_gemreg.h>
81#include <dev/gem/if_gemvar.h>
82
83#define TRIES	10000
84/*
85 * The GEM hardware support basic TCP/UDP checksum offloading. However,
86 * the hardware doesn't compensate the checksum for UDP datagram which
87 * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
88 * by default. It can be reactivated by setting special link option
89 * link0 with ifconfig(8).
90 */
91#define	GEM_CSUM_FEATURES	(CSUM_TCP)
92
93static void	gem_start(struct ifnet *);
94static void	gem_start_locked(struct ifnet *);
95static void	gem_stop(struct ifnet *, int);
96static int	gem_ioctl(struct ifnet *, u_long, caddr_t);
97static void	gem_cddma_callback(void *, bus_dma_segment_t *, int, int);
98static __inline void gem_txcksum(struct gem_softc *, struct mbuf *, uint64_t *);
99static __inline void gem_rxcksum(struct mbuf *, uint64_t);
100static void	gem_tick(void *);
101static int	gem_watchdog(struct gem_softc *);
102static void	gem_init(void *);
103static void	gem_init_locked(struct gem_softc *);
104static void	gem_init_regs(struct gem_softc *);
105static int	gem_ringsize(int sz);
106static int	gem_meminit(struct gem_softc *);
107static struct mbuf *gem_defrag(struct mbuf *, int, int);
108static int	gem_load_txmbuf(struct gem_softc *, struct mbuf **);
109static void	gem_mifinit(struct gem_softc *);
110static int	gem_bitwait(struct gem_softc *, bus_addr_t, u_int32_t,
111    u_int32_t);
112static int	gem_reset_rx(struct gem_softc *);
113static int	gem_reset_tx(struct gem_softc *);
114static int	gem_disable_rx(struct gem_softc *);
115static int	gem_disable_tx(struct gem_softc *);
116static void	gem_rxdrain(struct gem_softc *);
117static int	gem_add_rxbuf(struct gem_softc *, int);
118static void	gem_setladrf(struct gem_softc *);
119
120struct mbuf	*gem_get(struct gem_softc *, int, int);
121static void	gem_eint(struct gem_softc *, u_int);
122static void	gem_rint(struct gem_softc *);
123#ifdef GEM_RINT_TIMEOUT
124static void	gem_rint_timeout(void *);
125#endif
126static void	gem_tint(struct gem_softc *);
127#ifdef notyet
128static void	gem_power(int, void *);
129#endif
130
131devclass_t gem_devclass;
132DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
133MODULE_DEPEND(gem, miibus, 1, 1, 1);
134
135#ifdef GEM_DEBUG
136#include <sys/ktr.h>
137#define	KTR_GEM		KTR_CT2
138#endif
139
140#define	GEM_NSEGS GEM_NTXDESC
141
142/*
143 * gem_attach:
144 *
145 *	Attach a Gem interface to the system.
146 */
147int
148gem_attach(sc)
149	struct gem_softc *sc;
150{
151	struct ifnet *ifp;
152	struct mii_softc *child;
153	int i, error;
154	u_int32_t v;
155
156	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
157	if (ifp == NULL)
158		return (ENOSPC);
159
160	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
161#ifdef GEM_RINT_TIMEOUT
162	callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
163#endif
164
165	/* Make sure the chip is stopped. */
166	ifp->if_softc = sc;
167	GEM_LOCK(sc);
168	gem_stop(ifp, 0);
169	gem_reset(sc);
170	GEM_UNLOCK(sc);
171
172	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
173	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
174	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
175	    &sc->sc_pdmatag);
176	if (error)
177		goto fail_ifnet;
178
179	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
180	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
181	    1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
182	if (error)
183		goto fail_ptag;
184
185	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
186	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
187	    MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
188	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
189	if (error)
190		goto fail_rtag;
191
192	error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
193	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
194	    sizeof(struct gem_control_data), 1,
195	    sizeof(struct gem_control_data), 0,
196	    NULL, NULL, &sc->sc_cdmatag);
197	if (error)
198		goto fail_ttag;
199
200	/*
201	 * Allocate the control data structures, and create and load the
202	 * DMA map for it.
203	 */
204	if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
205	    (void **)&sc->sc_control_data,
206	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
207	    &sc->sc_cddmamap))) {
208		device_printf(sc->sc_dev, "unable to allocate control data,"
209		    " error = %d\n", error);
210		goto fail_ctag;
211	}
212
213	sc->sc_cddma = 0;
214	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
215	    sc->sc_control_data, sizeof(struct gem_control_data),
216	    gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
217		device_printf(sc->sc_dev, "unable to load control data DMA "
218		    "map, error = %d\n", error);
219		goto fail_cmem;
220	}
221
222	/*
223	 * Initialize the transmit job descriptors.
224	 */
225	STAILQ_INIT(&sc->sc_txfreeq);
226	STAILQ_INIT(&sc->sc_txdirtyq);
227
228	/*
229	 * Create the transmit buffer DMA maps.
230	 */
231	error = ENOMEM;
232	for (i = 0; i < GEM_TXQUEUELEN; i++) {
233		struct gem_txsoft *txs;
234
235		txs = &sc->sc_txsoft[i];
236		txs->txs_mbuf = NULL;
237		txs->txs_ndescs = 0;
238		if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
239		    &txs->txs_dmamap)) != 0) {
240			device_printf(sc->sc_dev, "unable to create tx DMA map "
241			    "%d, error = %d\n", i, error);
242			goto fail_txd;
243		}
244		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
245	}
246
247	/*
248	 * Create the receive buffer DMA maps.
249	 */
250	for (i = 0; i < GEM_NRXDESC; i++) {
251		if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
252		    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
253			device_printf(sc->sc_dev, "unable to create rx DMA map "
254			    "%d, error = %d\n", i, error);
255			goto fail_rxd;
256		}
257		sc->sc_rxsoft[i].rxs_mbuf = NULL;
258	}
259
260	gem_mifinit(sc);
261
262	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange,
263	    gem_mediastatus)) != 0) {
264		device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
265		goto fail_rxd;
266	}
267	sc->sc_mii = device_get_softc(sc->sc_miibus);
268
269	/*
270	 * From this point forward, the attachment cannot fail.  A failure
271	 * before this point releases all resources that may have been
272	 * allocated.
273	 */
274
275	/* Get RX FIFO size */
276	sc->sc_rxfifosize = 64 *
277	    bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE);
278
279	/* Get TX FIFO size */
280	v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE);
281	device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
282	    sc->sc_rxfifosize / 1024, v / 16);
283
284	sc->sc_csum_features = GEM_CSUM_FEATURES;
285	/* Initialize ifnet structure. */
286	ifp->if_softc = sc;
287	if_initname(ifp, device_get_name(sc->sc_dev),
288	    device_get_unit(sc->sc_dev));
289	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
290	ifp->if_start = gem_start;
291	ifp->if_ioctl = gem_ioctl;
292	ifp->if_init = gem_init;
293	IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
294	ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
295	IFQ_SET_READY(&ifp->if_snd);
296	/*
297	 * Walk along the list of attached MII devices and
298	 * establish an `MII instance' to `phy number'
299	 * mapping. We'll use this mapping in media change
300	 * requests to determine which phy to use to program
301	 * the MIF configuration register.
302	 */
303	for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
304	     child = LIST_NEXT(child, mii_list)) {
305		/*
306		 * Note: we support just two PHYs: the built-in
307		 * internal device and an external on the MII
308		 * connector.
309		 */
310		if (child->mii_phy > 1 || child->mii_inst > 1) {
311			device_printf(sc->sc_dev, "cannot accomodate "
312			    "MII device %s at phy %d, instance %d\n",
313			    device_get_name(child->mii_dev),
314			    child->mii_phy, child->mii_inst);
315			continue;
316		}
317
318		sc->sc_phys[child->mii_inst] = child->mii_phy;
319	}
320
321	/*
322	 * Now select and activate the PHY we will use.
323	 *
324	 * The order of preference is External (MDI1),
325	 * Internal (MDI0), Serial Link (no MII).
326	 */
327	if (sc->sc_phys[1]) {
328#ifdef GEM_DEBUG
329		printf("using external phy\n");
330#endif
331		sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
332	} else {
333#ifdef GEM_DEBUG
334		printf("using internal phy\n");
335#endif
336		sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
337	}
338	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG,
339	    sc->sc_mif_config);
340	/* Attach the interface. */
341	ether_ifattach(ifp, sc->sc_enaddr);
342
343#ifdef notyet
344	/*
345	 * Add a suspend hook to make sure we come back up after a
346	 * resume.
347	 */
348	sc->sc_powerhook = powerhook_establish(gem_power, sc);
349	if (sc->sc_powerhook == NULL)
350		device_printf(sc->sc_dev, "WARNING: unable to establish power "
351		    "hook\n");
352#endif
353
354	/*
355	 * Tell the upper layer(s) we support long frames/checksum offloads.
356	 */
357	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
358	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
359	ifp->if_hwassist |= sc->sc_csum_features;
360	ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
361
362	return (0);
363
364	/*
365	 * Free any resources we've allocated during the failed attach
366	 * attempt.  Do this in reverse order and fall through.
367	 */
368fail_rxd:
369	for (i = 0; i < GEM_NRXDESC; i++) {
370		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
371			bus_dmamap_destroy(sc->sc_rdmatag,
372			    sc->sc_rxsoft[i].rxs_dmamap);
373	}
374fail_txd:
375	for (i = 0; i < GEM_TXQUEUELEN; i++) {
376		if (sc->sc_txsoft[i].txs_dmamap != NULL)
377			bus_dmamap_destroy(sc->sc_tdmatag,
378			    sc->sc_txsoft[i].txs_dmamap);
379	}
380	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
381fail_cmem:
382	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
383	    sc->sc_cddmamap);
384fail_ctag:
385	bus_dma_tag_destroy(sc->sc_cdmatag);
386fail_ttag:
387	bus_dma_tag_destroy(sc->sc_tdmatag);
388fail_rtag:
389	bus_dma_tag_destroy(sc->sc_rdmatag);
390fail_ptag:
391	bus_dma_tag_destroy(sc->sc_pdmatag);
392fail_ifnet:
393	if_free(ifp);
394	return (error);
395}
396
397void
398gem_detach(sc)
399	struct gem_softc *sc;
400{
401	struct ifnet *ifp = sc->sc_ifp;
402	int i;
403
404	GEM_LOCK(sc);
405	gem_stop(ifp, 1);
406	GEM_UNLOCK(sc);
407	callout_drain(&sc->sc_tick_ch);
408#ifdef GEM_RINT_TIMEOUT
409	callout_drain(&sc->sc_rx_ch);
410#endif
411	ether_ifdetach(ifp);
412	if_free(ifp);
413	device_delete_child(sc->sc_dev, sc->sc_miibus);
414
415	for (i = 0; i < GEM_NRXDESC; i++) {
416		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
417			bus_dmamap_destroy(sc->sc_rdmatag,
418			    sc->sc_rxsoft[i].rxs_dmamap);
419	}
420	for (i = 0; i < GEM_TXQUEUELEN; i++) {
421		if (sc->sc_txsoft[i].txs_dmamap != NULL)
422			bus_dmamap_destroy(sc->sc_tdmatag,
423			    sc->sc_txsoft[i].txs_dmamap);
424	}
425	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
426	GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE);
427	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
428	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
429	    sc->sc_cddmamap);
430	bus_dma_tag_destroy(sc->sc_cdmatag);
431	bus_dma_tag_destroy(sc->sc_tdmatag);
432	bus_dma_tag_destroy(sc->sc_rdmatag);
433	bus_dma_tag_destroy(sc->sc_pdmatag);
434}
435
436void
437gem_suspend(sc)
438	struct gem_softc *sc;
439{
440	struct ifnet *ifp = sc->sc_ifp;
441
442	GEM_LOCK(sc);
443	gem_stop(ifp, 0);
444	GEM_UNLOCK(sc);
445}
446
447void
448gem_resume(sc)
449	struct gem_softc *sc;
450{
451	struct ifnet *ifp = sc->sc_ifp;
452
453	GEM_LOCK(sc);
454	/*
455	 * On resume all registers have to be initialized again like
456	 * after power-on.
457	 */
458	sc->sc_inited = 0;
459	if (ifp->if_flags & IFF_UP)
460		gem_init_locked(sc);
461	GEM_UNLOCK(sc);
462}
463
464static __inline void
465gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags)
466{
467	struct ip *ip;
468	uint64_t offset, offset2;
469	char *p;
470
471	offset = sizeof(struct ip) + ETHER_HDR_LEN;
472	for(; m && m->m_len == 0; m = m->m_next)
473		;
474	if (m == NULL || m->m_len < ETHER_HDR_LEN) {
475		device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n",
476		    __func__);
477		/* checksum will be corrupted */
478		goto sendit;
479	}
480	if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) {
481		if (m->m_len != ETHER_HDR_LEN) {
482			device_printf(sc->sc_dev,
483			    "%s: m_len != ETHER_HDR_LEN\n", __func__);
484			/* checksum will be corrupted */
485			goto sendit;
486		}
487		for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
488			;
489		if (m == NULL) {
490			/* checksum will be corrupted */
491			goto sendit;
492		}
493		ip = mtod(m, struct ip *);
494	} else {
495		p = mtod(m, uint8_t *);
496		p += ETHER_HDR_LEN;
497		ip = (struct ip *)p;
498	}
499	offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
500
501sendit:
502	offset2 = m->m_pkthdr.csum_data;
503	*cflags = offset << GEM_TD_CXSUM_STARTSHFT;
504	*cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT);
505	*cflags |= GEM_TD_CXSUM_ENABLE;
506}
507
508static __inline void
509gem_rxcksum(struct mbuf *m, uint64_t flags)
510{
511	struct ether_header *eh;
512	struct ip *ip;
513	struct udphdr *uh;
514	int32_t hlen, len, pktlen;
515	uint16_t cksum, *opts;
516	uint32_t temp32;
517
518	pktlen = m->m_pkthdr.len;
519	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
520		return;
521	eh = mtod(m, struct ether_header *);
522	if (eh->ether_type != htons(ETHERTYPE_IP))
523		return;
524	ip = (struct ip *)(eh + 1);
525	if (ip->ip_v != IPVERSION)
526		return;
527
528	hlen = ip->ip_hl << 2;
529	pktlen -= sizeof(struct ether_header);
530	if (hlen < sizeof(struct ip))
531		return;
532	if (ntohs(ip->ip_len) < hlen)
533		return;
534	if (ntohs(ip->ip_len) != pktlen)
535		return;
536	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
537		return;	/* can't handle fragmented packet */
538
539	switch (ip->ip_p) {
540	case IPPROTO_TCP:
541		if (pktlen < (hlen + sizeof(struct tcphdr)))
542			return;
543		break;
544	case IPPROTO_UDP:
545		if (pktlen < (hlen + sizeof(struct udphdr)))
546			return;
547		uh = (struct udphdr *)((uint8_t *)ip + hlen);
548		if (uh->uh_sum == 0)
549			return; /* no checksum */
550		break;
551	default:
552		return;
553	}
554
555	cksum = ~(flags & GEM_RD_CHECKSUM);
556	/* checksum fixup for IP options */
557	len = hlen - sizeof(struct ip);
558	if (len > 0) {
559		opts = (uint16_t *)(ip + 1);
560		for (; len > 0; len -= sizeof(uint16_t), opts++) {
561			temp32 = cksum - *opts;
562			temp32 = (temp32 >> 16) + (temp32 & 65535);
563			cksum = temp32 & 65535;
564		}
565	}
566	m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
567	m->m_pkthdr.csum_data = cksum;
568}
569
570static void
571gem_cddma_callback(xsc, segs, nsegs, error)
572	void *xsc;
573	bus_dma_segment_t *segs;
574	int nsegs;
575	int error;
576{
577	struct gem_softc *sc = (struct gem_softc *)xsc;
578
579	if (error != 0)
580		return;
581	if (nsegs != 1) {
582		/* can't happen... */
583		panic("gem_cddma_callback: bad control buffer segment count");
584	}
585	sc->sc_cddma = segs[0].ds_addr;
586}
587
588static void
589gem_tick(arg)
590	void *arg;
591{
592	struct gem_softc *sc = arg;
593	struct ifnet *ifp;
594
595	GEM_LOCK_ASSERT(sc, MA_OWNED);
596
597	ifp = sc->sc_ifp;
598	/*
599	 * Unload collision counters
600	 */
601	ifp->if_collisions +=
602	    bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) +
603	    bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) +
604	    bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) +
605	    bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT);
606
607	/*
608	 * then clear the hardware counters.
609	 */
610	bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
611	bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
612	bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
613	bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
614
615	mii_tick(sc->sc_mii);
616
617	if (gem_watchdog(sc) == EJUSTRETURN)
618		return;
619
620	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
621}
622
623static int
624gem_bitwait(sc, r, clr, set)
625	struct gem_softc *sc;
626	bus_addr_t r;
627	u_int32_t clr;
628	u_int32_t set;
629{
630	int i;
631	u_int32_t reg;
632
633	for (i = TRIES; i--; DELAY(100)) {
634		reg = bus_read_4(sc->sc_res[0], r);
635		if ((reg & clr) == 0 && (reg & set) == set)
636			return (1);
637	}
638	return (0);
639}
640
641void
642gem_reset(sc)
643	struct gem_softc *sc;
644{
645
646#ifdef GEM_DEBUG
647	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
648#endif
649	gem_reset_rx(sc);
650	gem_reset_tx(sc);
651
652	/* Do a full reset */
653	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
654	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
655		device_printf(sc->sc_dev, "cannot reset device\n");
656}
657
658
659/*
660 * gem_rxdrain:
661 *
662 *	Drain the receive queue.
663 */
664static void
665gem_rxdrain(sc)
666	struct gem_softc *sc;
667{
668	struct gem_rxsoft *rxs;
669	int i;
670
671	for (i = 0; i < GEM_NRXDESC; i++) {
672		rxs = &sc->sc_rxsoft[i];
673		if (rxs->rxs_mbuf != NULL) {
674			bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
675			    BUS_DMASYNC_POSTREAD);
676			bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
677			m_freem(rxs->rxs_mbuf);
678			rxs->rxs_mbuf = NULL;
679		}
680	}
681}
682
683/*
684 * Reset the whole thing.
685 */
686static void
687gem_stop(ifp, disable)
688	struct ifnet *ifp;
689	int disable;
690{
691	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
692	struct gem_txsoft *txs;
693
694#ifdef GEM_DEBUG
695	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
696#endif
697
698	callout_stop(&sc->sc_tick_ch);
699#ifdef GEM_RINT_TIMEOUT
700	callout_stop(&sc->sc_rx_ch);
701#endif
702
703	/* XXX - Should we reset these instead? */
704	gem_disable_tx(sc);
705	gem_disable_rx(sc);
706
707	/*
708	 * Release any queued transmit buffers.
709	 */
710	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
711		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
712		if (txs->txs_ndescs != 0) {
713			bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
714			    BUS_DMASYNC_POSTWRITE);
715			bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
716			if (txs->txs_mbuf != NULL) {
717				m_freem(txs->txs_mbuf);
718				txs->txs_mbuf = NULL;
719			}
720		}
721		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
722	}
723
724	if (disable)
725		gem_rxdrain(sc);
726
727	/*
728	 * Mark the interface down and cancel the watchdog timer.
729	 */
730	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
731	sc->sc_wdog_timer = 0;
732}
733
734/*
735 * Reset the receiver
736 */
737int
738gem_reset_rx(sc)
739	struct gem_softc *sc;
740{
741
742	/*
743	 * Resetting while DMA is in progress can cause a bus hang, so we
744	 * disable DMA first.
745	 */
746	gem_disable_rx(sc);
747	bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0);
748	/* Wait till it finishes */
749	if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0))
750		device_printf(sc->sc_dev, "cannot disable read dma\n");
751
752	/* Wait 5ms extra. */
753	DELAY(5000);
754
755	/* Finally, reset the ERX */
756	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX);
757	/* Wait till it finishes */
758	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX, 0)) {
759		device_printf(sc->sc_dev, "cannot reset receiver\n");
760		return (1);
761	}
762	return (0);
763}
764
765
766/*
767 * Reset the transmitter
768 */
769static int
770gem_reset_tx(sc)
771	struct gem_softc *sc;
772{
773	int i;
774
775	/*
776	 * Resetting while DMA is in progress can cause a bus hang, so we
777	 * disable DMA first.
778	 */
779	gem_disable_tx(sc);
780	bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0);
781	/* Wait till it finishes */
782	if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0))
783		device_printf(sc->sc_dev, "cannot disable read dma\n");
784
785	/* Wait 5ms extra. */
786	DELAY(5000);
787
788	/* Finally, reset the ETX */
789	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_TX);
790	/* Wait till it finishes */
791	for (i = TRIES; i--; DELAY(100))
792		if ((bus_read_4(sc->sc_res[0], GEM_RESET) & GEM_RESET_TX) == 0)
793			break;
794	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
795		device_printf(sc->sc_dev, "cannot reset receiver\n");
796		return (1);
797	}
798	return (0);
799}
800
801/*
802 * disable receiver.
803 */
804static int
805gem_disable_rx(sc)
806	struct gem_softc *sc;
807{
808	u_int32_t cfg;
809
810	/* Flip the enable bit */
811	cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
812	cfg &= ~GEM_MAC_RX_ENABLE;
813	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg);
814
815	/* Wait for it to finish */
816	return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
817}
818
819/*
820 * disable transmitter.
821 */
822static int
823gem_disable_tx(sc)
824	struct gem_softc *sc;
825{
826	u_int32_t cfg;
827
828	/* Flip the enable bit */
829	cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG);
830	cfg &= ~GEM_MAC_TX_ENABLE;
831	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg);
832
833	/* Wait for it to finish */
834	return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
835}
836
837/*
838 * Initialize interface.
839 */
840static int
841gem_meminit(sc)
842	struct gem_softc *sc;
843{
844	struct gem_rxsoft *rxs;
845	int i, error;
846
847	/*
848	 * Initialize the transmit descriptor ring.
849	 */
850	for (i = 0; i < GEM_NTXDESC; i++) {
851		sc->sc_txdescs[i].gd_flags = 0;
852		sc->sc_txdescs[i].gd_addr = 0;
853	}
854	sc->sc_txfree = GEM_MAXTXFREE;
855	sc->sc_txnext = 0;
856	sc->sc_txwin = 0;
857
858	/*
859	 * Initialize the receive descriptor and receive job
860	 * descriptor rings.
861	 */
862	for (i = 0; i < GEM_NRXDESC; i++) {
863		rxs = &sc->sc_rxsoft[i];
864		if (rxs->rxs_mbuf == NULL) {
865			if ((error = gem_add_rxbuf(sc, i)) != 0) {
866				device_printf(sc->sc_dev, "unable to "
867				    "allocate or map rx buffer %d, error = "
868				    "%d\n", i, error);
869				/*
870				 * XXX Should attempt to run with fewer receive
871				 * XXX buffers instead of just failing.
872				 */
873				gem_rxdrain(sc);
874				return (1);
875			}
876		} else
877			GEM_INIT_RXDESC(sc, i);
878	}
879	sc->sc_rxptr = 0;
880	GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
881	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
882
883	return (0);
884}
885
886static int
887gem_ringsize(sz)
888	int sz;
889{
890	int v = 0;
891
892	switch (sz) {
893	case 32:
894		v = GEM_RING_SZ_32;
895		break;
896	case 64:
897		v = GEM_RING_SZ_64;
898		break;
899	case 128:
900		v = GEM_RING_SZ_128;
901		break;
902	case 256:
903		v = GEM_RING_SZ_256;
904		break;
905	case 512:
906		v = GEM_RING_SZ_512;
907		break;
908	case 1024:
909		v = GEM_RING_SZ_1024;
910		break;
911	case 2048:
912		v = GEM_RING_SZ_2048;
913		break;
914	case 4096:
915		v = GEM_RING_SZ_4096;
916		break;
917	case 8192:
918		v = GEM_RING_SZ_8192;
919		break;
920	default:
921		printf("gem: invalid Receive Descriptor ring size\n");
922		break;
923	}
924	return (v);
925}
926
927static void
928gem_init(xsc)
929	void *xsc;
930{
931	struct gem_softc *sc = (struct gem_softc *)xsc;
932
933	GEM_LOCK(sc);
934	gem_init_locked(sc);
935	GEM_UNLOCK(sc);
936}
937
938/*
939 * Initialization of interface; set up initialization block
940 * and transmit/receive descriptor rings.
941 */
942static void
943gem_init_locked(sc)
944	struct gem_softc *sc;
945{
946	struct ifnet *ifp = sc->sc_ifp;
947	u_int32_t v;
948
949	GEM_LOCK_ASSERT(sc, MA_OWNED);
950
951#ifdef GEM_DEBUG
952	CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
953	    __func__);
954#endif
955	/*
956	 * Initialization sequence. The numbered steps below correspond
957	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
958	 * Channel Engine manual (part of the PCIO manual).
959	 * See also the STP2002-STQ document from Sun Microsystems.
960	 */
961
962	/* step 1 & 2. Reset the Ethernet Channel */
963	gem_stop(sc->sc_ifp, 0);
964	gem_reset(sc);
965#ifdef GEM_DEBUG
966	CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
967	    __func__);
968#endif
969
970	/* Re-initialize the MIF */
971	gem_mifinit(sc);
972
973	/* step 3. Setup data structures in host memory */
974	gem_meminit(sc);
975
976	/* step 4. TX MAC registers & counters */
977	gem_init_regs(sc);
978
979	/* step 5. RX MAC registers & counters */
980	gem_setladrf(sc);
981
982	/* step 6 & 7. Program Descriptor Ring Base Addresses */
983	/* NOTE: we use only 32-bit DMA addresses here. */
984	bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0);
985	bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
986
987	bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0);
988	bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
989#ifdef GEM_DEBUG
990	CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx",
991	    GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
992#endif
993
994	/* step 8. Global Configuration & Interrupt Mask */
995	bus_write_4(sc->sc_res[0], GEM_INTMASK,
996		      ~(GEM_INTR_TX_INTME|
997			GEM_INTR_TX_EMPTY|
998			GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
999			GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
1000			GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
1001			GEM_INTR_BERR));
1002	bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK,
1003			GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
1004	bus_write_4(sc->sc_res[0], GEM_MAC_TX_MASK, 0xffff); /* XXXX */
1005	bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 0); /* XXXX */
1006
1007	/* step 9. ETX Configuration: use mostly default values */
1008
1009	/* Enable DMA */
1010	v = gem_ringsize(GEM_NTXDESC /*XXX*/);
1011	bus_write_4(sc->sc_res[0], GEM_TX_CONFIG,
1012		v|GEM_TX_CONFIG_TXDMA_EN|
1013		((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
1014
1015	/* step 10. ERX Configuration */
1016
1017	/* Encode Receive Descriptor ring size: four possible values */
1018	v = gem_ringsize(GEM_NRXDESC /*XXX*/);
1019	/* Rx TCP/UDP checksum offset */
1020	v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
1021	    GEM_RX_CONFIG_CXM_START_SHFT);
1022
1023	/* Enable DMA */
1024	bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
1025		v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
1026		(2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN);
1027	/*
1028	 * The following value is for an OFF Threshold of about 3/4 full
1029	 * and an ON Threshold of 1/4 full.
1030	 */
1031	bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
1032	    (3 * sc->sc_rxfifosize / 256) |
1033	    (   (sc->sc_rxfifosize / 256) << 12));
1034	bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, (6<<12)|6);
1035
1036	/* step 11. Configure Media */
1037	mii_mediachg(sc->sc_mii);
1038
1039	/* step 12. RX_MAC Configuration Register */
1040	v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
1041	v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
1042	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
1043
1044	/* step 14. Issue Transmit Pending command */
1045
1046	/* step 15.  Give the reciever a swift kick */
1047	bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC-4);
1048
1049	/* Start the one second timer. */
1050	sc->sc_wdog_timer = 0;
1051	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
1052
1053	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1054	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1055	sc->sc_ifflags = ifp->if_flags;
1056}
1057
1058/*
1059 * It's copy of ath_defrag(ath(4)).
1060 *
1061 * Defragment an mbuf chain, returning at most maxfrags separate
1062 * mbufs+clusters.  If this is not possible NULL is returned and
1063 * the original mbuf chain is left in it's present (potentially
1064 * modified) state.  We use two techniques: collapsing consecutive
1065 * mbufs and replacing consecutive mbufs by a cluster.
1066 */
1067static struct mbuf *
1068gem_defrag(m0, how, maxfrags)
1069	struct mbuf *m0;
1070	int how;
1071	int maxfrags;
1072{
1073	struct mbuf *m, *n, *n2, **prev;
1074	u_int curfrags;
1075
1076	/*
1077	 * Calculate the current number of frags.
1078	 */
1079	curfrags = 0;
1080	for (m = m0; m != NULL; m = m->m_next)
1081		curfrags++;
1082	/*
1083	 * First, try to collapse mbufs.  Note that we always collapse
1084	 * towards the front so we don't need to deal with moving the
1085	 * pkthdr.  This may be suboptimal if the first mbuf has much
1086	 * less data than the following.
1087	 */
1088	m = m0;
1089again:
1090	for (;;) {
1091		n = m->m_next;
1092		if (n == NULL)
1093			break;
1094		if ((m->m_flags & M_RDONLY) == 0 &&
1095		    n->m_len < M_TRAILINGSPACE(m)) {
1096			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1097				n->m_len);
1098			m->m_len += n->m_len;
1099			m->m_next = n->m_next;
1100			m_free(n);
1101			if (--curfrags <= maxfrags)
1102				return (m0);
1103		} else
1104			m = n;
1105	}
1106	KASSERT(maxfrags > 1,
1107		("maxfrags %u, but normal collapse failed", maxfrags));
1108	/*
1109	 * Collapse consecutive mbufs to a cluster.
1110	 */
1111	prev = &m0->m_next;		/* NB: not the first mbuf */
1112	while ((n = *prev) != NULL) {
1113		if ((n2 = n->m_next) != NULL &&
1114		    n->m_len + n2->m_len < MCLBYTES) {
1115			m = m_getcl(how, MT_DATA, 0);
1116			if (m == NULL)
1117				goto bad;
1118			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1119			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1120				n2->m_len);
1121			m->m_len = n->m_len + n2->m_len;
1122			m->m_next = n2->m_next;
1123			*prev = m;
1124			m_free(n);
1125			m_free(n2);
1126			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
1127				return m0;
1128			/*
1129			 * Still not there, try the normal collapse
1130			 * again before we allocate another cluster.
1131			 */
1132			goto again;
1133		}
1134		prev = &n->m_next;
1135	}
1136	/*
1137	 * No place where we can collapse to a cluster; punt.
1138	 * This can occur if, for example, you request 2 frags
1139	 * but the packet requires that both be clusters (we
1140	 * never reallocate the first mbuf to avoid moving the
1141	 * packet header).
1142	 */
1143bad:
1144	return (NULL);
1145}
1146
1147static int
1148gem_load_txmbuf(sc, m_head)
1149	struct gem_softc *sc;
1150	struct mbuf **m_head;
1151{
1152	struct gem_txsoft *txs;
1153	bus_dma_segment_t txsegs[GEM_NTXSEGS];
1154	struct mbuf *m;
1155	uint64_t flags, cflags;
1156	int error, nexttx, nsegs, seg;
1157
1158	/* Get a work queue entry. */
1159	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1160		/* Ran out of descriptors. */
1161		return (ENOBUFS);
1162	}
1163	error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1164	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1165	if (error == EFBIG) {
1166		m = gem_defrag(*m_head, M_DONTWAIT, GEM_NTXSEGS);
1167		if (m == NULL) {
1168			m_freem(*m_head);
1169			*m_head = NULL;
1170			return (ENOBUFS);
1171		}
1172		*m_head = m;
1173		error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
1174		    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1175		if (error != 0) {
1176			m_freem(*m_head);
1177			*m_head = NULL;
1178			return (error);
1179		}
1180	} else if (error != 0)
1181		return (error);
1182	if (nsegs == 0) {
1183		m_freem(*m_head);
1184		*m_head = NULL;
1185		return (EIO);
1186	}
1187
1188	/*
1189	 * Ensure we have enough descriptors free to describe
1190	 * the packet.  Note, we always reserve one descriptor
1191	 * at the end of the ring as a termination point, to
1192	 * prevent wrap-around.
1193	 */
1194	if (nsegs > sc->sc_txfree - 1) {
1195		txs->txs_ndescs = 0;
1196		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1197		return (ENOBUFS);
1198	}
1199
1200	flags = cflags = 0;
1201	if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
1202		gem_txcksum(sc, *m_head, &cflags);
1203
1204	txs->txs_ndescs = nsegs;
1205	txs->txs_firstdesc = sc->sc_txnext;
1206	nexttx = txs->txs_firstdesc;
1207	for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
1208#ifdef	GEM_DEBUG
1209		CTR6(KTR_GEM, "%s: mapping seg %d (txd %d), len "
1210		    "%lx, addr %#lx (%#lx)", __func__, seg, nexttx,
1211		    txsegs[seg].ds_len, txsegs[seg].ds_addr,
1212		    GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
1213#endif
1214		sc->sc_txdescs[nexttx].gd_addr =
1215		    GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
1216		KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
1217		    ("%s: segment size too large!", __func__));
1218		flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
1219		sc->sc_txdescs[nexttx].gd_flags =
1220		    GEM_DMA_WRITE(sc, flags | cflags);
1221		txs->txs_lastdesc = nexttx;
1222	}
1223
1224	/* set EOP on the last descriptor */
1225#ifdef	GEM_DEBUG
1226	CTR3(KTR_GEM, "%s: end of packet at seg %d, tx %d", __func__, seg,
1227	    nexttx);
1228#endif
1229	sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
1230	    GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
1231
1232	/* Lastly set SOP on the first descriptor */
1233#ifdef	GEM_DEBUG
1234	CTR3(KTR_GEM, "%s: start of packet at seg %d, tx %d", __func__, seg,
1235	    nexttx);
1236#endif
1237	if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1238		sc->sc_txwin = 0;
1239		flags |= GEM_TD_INTERRUPT_ME;
1240		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1241		    GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
1242		    GEM_TD_START_OF_PACKET);
1243	} else
1244		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
1245		    GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
1246
1247	/* Sync the DMA map. */
1248	bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE);
1249
1250#ifdef GEM_DEBUG
1251	CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
1252	    __func__, txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs);
1253#endif
1254	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1255	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1256	txs->txs_mbuf = *m_head;
1257
1258	sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1259	sc->sc_txfree -= txs->txs_ndescs;
1260
1261	return (0);
1262}
1263
1264static void
1265gem_init_regs(sc)
1266	struct gem_softc *sc;
1267{
1268	const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1269	u_int32_t v;
1270
1271	/* These regs are not cleared on reset */
1272	if (!sc->sc_inited) {
1273
1274		/* Wooo.  Magic values. */
1275		bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0);
1276		bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8);
1277		bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4);
1278
1279		bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1280		/* Max frame and max burst size */
1281		bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME,
1282		    (ETHER_MAX_LEN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) |
1283		    (0x2000 << 16));
1284
1285		bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7);
1286		bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4);
1287		bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10);
1288		/* Dunno.... */
1289		bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088);
1290		bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED,
1291		    ((laddr[5]<<8)|laddr[4])&0x3ff);
1292
1293		/* Secondary MAC addr set to 0:0:0:0:0:0 */
1294		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0);
1295		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0);
1296		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0);
1297
1298		/* MAC control addr set to 01:80:c2:00:00:01 */
1299		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001);
1300		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200);
1301		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180);
1302
1303		/* MAC filter addr set to 0:0:0:0:0:0 */
1304		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0);
1305		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0);
1306		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0);
1307
1308		bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0);
1309		bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0);
1310
1311		sc->sc_inited = 1;
1312	}
1313
1314	/* Counters need to be zeroed */
1315	bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
1316	bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
1317	bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
1318	bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
1319	bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0);
1320	bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0);
1321	bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0);
1322	bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0);
1323	bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0);
1324	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0);
1325	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0);
1326
1327	/* Un-pause stuff */
1328#if 0
1329	bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1330#else
1331	bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0);
1332#endif
1333
1334	/*
1335	 * Set the station address.
1336	 */
1337	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
1338	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
1339	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
1340
1341	/*
1342	 * Enable MII outputs.  Enable GMII if there is a gigabit PHY.
1343	 */
1344	sc->sc_mif_config = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1345	v = GEM_MAC_XIF_TX_MII_ENA;
1346	if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
1347		v |= GEM_MAC_XIF_FDPLX_LED;
1348		if (sc->sc_flags & GEM_GIGABIT)
1349			v |= GEM_MAC_XIF_GMII_MODE;
1350	}
1351	bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v);
1352}
1353
1354static void
1355gem_start(ifp)
1356	struct ifnet *ifp;
1357{
1358	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1359
1360	GEM_LOCK(sc);
1361	gem_start_locked(ifp);
1362	GEM_UNLOCK(sc);
1363}
1364
1365static void
1366gem_start_locked(ifp)
1367	struct ifnet *ifp;
1368{
1369	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1370	struct mbuf *m;
1371	int firsttx, ntx = 0, txmfail;
1372
1373	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1374	    IFF_DRV_RUNNING)
1375		return;
1376
1377	firsttx = sc->sc_txnext;
1378#ifdef GEM_DEBUG
1379	CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
1380	    device_get_name(sc->sc_dev), __func__, sc->sc_txfree, firsttx);
1381#endif
1382	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
1383		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1384		if (m == NULL)
1385			break;
1386		txmfail = gem_load_txmbuf(sc, &m);
1387		if (txmfail != 0) {
1388			if (m == NULL)
1389				break;
1390			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1391			IFQ_DRV_PREPEND(&ifp->if_snd, m);
1392			break;
1393		}
1394		ntx++;
1395		/* Kick the transmitter. */
1396#ifdef	GEM_DEBUG
1397		CTR3(KTR_GEM, "%s: %s: kicking tx %d",
1398		    device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
1399#endif
1400		bus_write_4(sc->sc_res[0], GEM_TX_KICK,
1401			sc->sc_txnext);
1402
1403		BPF_MTAP(ifp, m);
1404	}
1405
1406	if (ntx > 0) {
1407		GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1408
1409#ifdef GEM_DEBUG
1410		CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1411		    device_get_name(sc->sc_dev), firsttx);
1412#endif
1413
1414		/* Set a watchdog timer in case the chip flakes out. */
1415		sc->sc_wdog_timer = 5;
1416#ifdef GEM_DEBUG
1417		CTR3(KTR_GEM, "%s: %s: watchdog %d",
1418		    device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1419#endif
1420	}
1421}
1422
1423/*
1424 * Transmit interrupt.
1425 */
1426static void
1427gem_tint(sc)
1428	struct gem_softc *sc;
1429{
1430	struct ifnet *ifp = sc->sc_ifp;
1431	struct gem_txsoft *txs;
1432	int txlast;
1433	int progress = 0;
1434
1435
1436#ifdef GEM_DEBUG
1437	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1438#endif
1439
1440	/*
1441	 * Go through our Tx list and free mbufs for those
1442	 * frames that have been transmitted.
1443	 */
1444	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1445	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1446
1447#ifdef GEM_DEBUG
1448		if (ifp->if_flags & IFF_DEBUG) {
1449			int i;
1450			printf("    txsoft %p transmit chain:\n", txs);
1451			for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1452				printf("descriptor %d: ", i);
1453				printf("gd_flags: 0x%016llx\t", (long long)
1454					GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
1455				printf("gd_addr: 0x%016llx\n", (long long)
1456					GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
1457				if (i == txs->txs_lastdesc)
1458					break;
1459			}
1460		}
1461#endif
1462
1463		/*
1464		 * In theory, we could harveast some descriptors before
1465		 * the ring is empty, but that's a bit complicated.
1466		 *
1467		 * GEM_TX_COMPLETION points to the last descriptor
1468		 * processed +1.
1469		 */
1470		txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION);
1471#ifdef GEM_DEBUG
1472		CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
1473		    "txs->txs_lastdesc = %d, txlast = %d",
1474		    __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1475#endif
1476		if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1477			if ((txlast >= txs->txs_firstdesc) &&
1478				(txlast <= txs->txs_lastdesc))
1479				break;
1480		} else {
1481			/* Ick -- this command wraps */
1482			if ((txlast >= txs->txs_firstdesc) ||
1483				(txlast <= txs->txs_lastdesc))
1484				break;
1485		}
1486
1487#ifdef GEM_DEBUG
1488		CTR1(KTR_GEM, "%s: releasing a desc", __func__);
1489#endif
1490		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1491
1492		sc->sc_txfree += txs->txs_ndescs;
1493
1494		bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1495		    BUS_DMASYNC_POSTWRITE);
1496		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1497		if (txs->txs_mbuf != NULL) {
1498			m_freem(txs->txs_mbuf);
1499			txs->txs_mbuf = NULL;
1500		}
1501
1502		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1503
1504		ifp->if_opackets++;
1505		progress = 1;
1506	}
1507
1508#ifdef GEM_DEBUG
1509	CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x "
1510		"GEM_TX_DATA_PTR %llx "
1511		"GEM_TX_COMPLETION %x",
1512		__func__,
1513		bus_space_read_4(sc->sc_res[0], sc->sc_h, GEM_TX_STATE_MACHINE),
1514		((long long) bus_4(sc->sc_res[0],
1515			GEM_TX_DATA_PTR_HI) << 32) |
1516			     bus_read_4(sc->sc_res[0],
1517			GEM_TX_DATA_PTR_LO),
1518		bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION));
1519#endif
1520
1521	if (progress) {
1522		if (sc->sc_txfree == GEM_NTXDESC - 1)
1523			sc->sc_txwin = 0;
1524
1525		/* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */
1526		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1527		sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
1528
1529		if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1530		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1531			gem_start_locked(ifp);
1532	}
1533
1534#ifdef GEM_DEBUG
1535	CTR3(KTR_GEM, "%s: %s: watchdog %d",
1536	    device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
1537#endif
1538}
1539
1540#ifdef GEM_RINT_TIMEOUT
1541static void
1542gem_rint_timeout(arg)
1543	void *arg;
1544{
1545	struct gem_softc *sc = (struct gem_softc *)arg;
1546
1547	GEM_LOCK_ASSERT(sc, MA_OWNED);
1548	gem_rint(sc);
1549}
1550#endif
1551
1552/*
1553 * Receive interrupt.
1554 */
1555static void
1556gem_rint(sc)
1557	struct gem_softc *sc;
1558{
1559	struct ifnet *ifp = sc->sc_ifp;
1560	struct gem_rxsoft *rxs;
1561	struct mbuf *m;
1562	u_int64_t rxstat;
1563	u_int32_t rxcomp;
1564	int i, len, progress = 0;
1565
1566#ifdef GEM_RINT_TIMEOUT
1567	callout_stop(&sc->sc_rx_ch);
1568#endif
1569#ifdef GEM_DEBUG
1570	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
1571#endif
1572
1573	/*
1574	 * Read the completion register once.  This limits
1575	 * how long the following loop can execute.
1576	 */
1577	rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION);
1578
1579#ifdef GEM_DEBUG
1580	CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d",
1581	    __func__, sc->sc_rxptr, rxcomp);
1582#endif
1583	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1584	for (i = sc->sc_rxptr; i != rxcomp;
1585	     i = GEM_NEXTRX(i)) {
1586		rxs = &sc->sc_rxsoft[i];
1587
1588		rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
1589
1590		if (rxstat & GEM_RD_OWN) {
1591#ifdef GEM_RINT_TIMEOUT
1592			/*
1593			 * The descriptor is still marked as owned, although
1594			 * it is supposed to have completed. This has been
1595			 * observed on some machines. Just exiting here
1596			 * might leave the packet sitting around until another
1597			 * one arrives to trigger a new interrupt, which is
1598			 * generally undesirable, so set up a timeout.
1599			 */
1600			callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1601			    gem_rint_timeout, sc);
1602#endif
1603			break;
1604		}
1605
1606		progress++;
1607		ifp->if_ipackets++;
1608
1609		if (rxstat & GEM_RD_BAD_CRC) {
1610			ifp->if_ierrors++;
1611			device_printf(sc->sc_dev, "receive error: CRC error\n");
1612			GEM_INIT_RXDESC(sc, i);
1613			continue;
1614		}
1615
1616#ifdef GEM_DEBUG
1617		if (ifp->if_flags & IFF_DEBUG) {
1618			printf("    rxsoft %p descriptor %d: ", rxs, i);
1619			printf("gd_flags: 0x%016llx\t", (long long)
1620				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
1621			printf("gd_addr: 0x%016llx\n", (long long)
1622				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
1623		}
1624#endif
1625
1626		/*
1627		 * No errors; receive the packet.
1628		 */
1629		len = GEM_RD_BUFLEN(rxstat);
1630
1631		/*
1632		 * Allocate a new mbuf cluster.  If that fails, we are
1633		 * out of memory, and must drop the packet and recycle
1634		 * the buffer that's already attached to this descriptor.
1635		 */
1636		m = rxs->rxs_mbuf;
1637		if (gem_add_rxbuf(sc, i) != 0) {
1638			ifp->if_ierrors++;
1639			GEM_INIT_RXDESC(sc, i);
1640			continue;
1641		}
1642		m->m_data += 2; /* We're already off by two */
1643
1644		m->m_pkthdr.rcvif = ifp;
1645		m->m_pkthdr.len = m->m_len = len;
1646
1647		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1648			gem_rxcksum(m, rxstat);
1649
1650		/* Pass it on. */
1651		GEM_UNLOCK(sc);
1652		(*ifp->if_input)(ifp, m);
1653		GEM_LOCK(sc);
1654	}
1655
1656	if (progress) {
1657		GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1658		/* Update the receive pointer. */
1659		if (i == sc->sc_rxptr) {
1660			device_printf(sc->sc_dev, "rint: ring wrap\n");
1661		}
1662		sc->sc_rxptr = i;
1663		bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_PREVRX(i));
1664	}
1665
1666#ifdef GEM_DEBUG
1667	CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__,
1668		sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION));
1669#endif
1670}
1671
1672
1673/*
1674 * gem_add_rxbuf:
1675 *
1676 *	Add a receive buffer to the indicated descriptor.
1677 */
1678static int
1679gem_add_rxbuf(sc, idx)
1680	struct gem_softc *sc;
1681	int idx;
1682{
1683	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1684	struct mbuf *m;
1685	bus_dma_segment_t segs[1];
1686	int error, nsegs;
1687
1688	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1689	if (m == NULL)
1690		return (ENOBUFS);
1691	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1692
1693#ifdef GEM_DEBUG
1694	/* bzero the packet to check dma */
1695	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1696#endif
1697
1698	if (rxs->rxs_mbuf != NULL) {
1699		bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1700		    BUS_DMASYNC_POSTREAD);
1701		bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1702	}
1703
1704	rxs->rxs_mbuf = m;
1705
1706	error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1707	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1708	/* If nsegs is wrong then the stack is corrupt. */
1709	KASSERT(nsegs == 1, ("Too many segments returned!"));
1710	if (error != 0) {
1711		device_printf(sc->sc_dev, "can't load rx DMA map %d, error = "
1712		    "%d\n", idx, error);
1713		m_freem(m);
1714		return (ENOBUFS);
1715	}
1716	rxs->rxs_paddr = segs[0].ds_addr;
1717
1718	bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
1719
1720	GEM_INIT_RXDESC(sc, idx);
1721
1722	return (0);
1723}
1724
1725
1726static void
1727gem_eint(sc, status)
1728	struct gem_softc *sc;
1729	u_int status;
1730{
1731
1732	if ((status & GEM_INTR_MIF) != 0) {
1733		device_printf(sc->sc_dev, "XXXlink status changed\n");
1734		return;
1735	}
1736
1737	device_printf(sc->sc_dev, "status=%x\n", status);
1738}
1739
1740
1741void
1742gem_intr(v)
1743	void *v;
1744{
1745	struct gem_softc *sc = (struct gem_softc *)v;
1746	u_int32_t status;
1747
1748	GEM_LOCK(sc);
1749	status = bus_read_4(sc->sc_res[0], GEM_STATUS);
1750#ifdef GEM_DEBUG
1751	CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
1752		device_get_name(sc->sc_dev), __func__, (status>>19),
1753		(u_int)status);
1754#endif
1755
1756	if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1757		gem_eint(sc, status);
1758
1759	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1760		gem_tint(sc);
1761
1762	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1763		gem_rint(sc);
1764
1765	/* We should eventually do more than just print out error stats. */
1766	if (status & GEM_INTR_TX_MAC) {
1767		int txstat = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS);
1768		if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1769			device_printf(sc->sc_dev, "MAC tx fault, status %x\n",
1770			    txstat);
1771		if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
1772			gem_init_locked(sc);
1773	}
1774	if (status & GEM_INTR_RX_MAC) {
1775		int rxstat = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS);
1776		/*
1777		 * On some chip revisions GEM_MAC_RX_OVERFLOW happen often
1778		 * due to a silicon bug so handle them silently.
1779		 */
1780		if (rxstat & GEM_MAC_RX_OVERFLOW)
1781			gem_init_locked(sc);
1782		else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1783			device_printf(sc->sc_dev, "MAC rx fault, status %x\n",
1784			    rxstat);
1785	}
1786	GEM_UNLOCK(sc);
1787}
1788
1789static int
1790gem_watchdog(sc)
1791	struct gem_softc *sc;
1792{
1793
1794	GEM_LOCK_ASSERT(sc, MA_OWNED);
1795
1796#ifdef GEM_DEBUG
1797	CTR4(KTR_GEM, "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1798		"GEM_MAC_RX_CONFIG %x", __func__,
1799		bus_read_4(sc->sc_res[0], GEM_RX_CONFIG),
1800		bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS),
1801		bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG));
1802	CTR4(KTR_GEM, "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x "
1803		"GEM_MAC_TX_CONFIG %x", __func__,
1804		bus_read_4(sc->sc_res[0], GEM_TX_CONFIG),
1805		bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS),
1806		bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG));
1807#endif
1808
1809	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1810		return (0);
1811
1812	device_printf(sc->sc_dev, "device timeout\n");
1813	++sc->sc_ifp->if_oerrors;
1814
1815	/* Try to get more packets going. */
1816	gem_init_locked(sc);
1817	return (EJUSTRETURN);
1818}
1819
1820/*
1821 * Initialize the MII Management Interface
1822 */
1823static void
1824gem_mifinit(sc)
1825	struct gem_softc *sc;
1826{
1827
1828	/* Configure the MIF in frame mode */
1829	sc->sc_mif_config = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1830	sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1831	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, sc->sc_mif_config);
1832}
1833
1834/*
1835 * MII interface
1836 *
1837 * The GEM MII interface supports at least three different operating modes:
1838 *
1839 * Bitbang mode is implemented using data, clock and output enable registers.
1840 *
1841 * Frame mode is implemented by loading a complete frame into the frame
1842 * register and polling the valid bit for completion.
1843 *
1844 * Polling mode uses the frame register but completion is indicated by
1845 * an interrupt.
1846 *
1847 */
1848int
1849gem_mii_readreg(dev, phy, reg)
1850	device_t dev;
1851	int phy, reg;
1852{
1853	struct gem_softc *sc = device_get_softc(dev);
1854	int n;
1855	u_int32_t v;
1856
1857#ifdef GEM_DEBUG_PHY
1858	printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
1859#endif
1860
1861#if 0
1862	/* Select the desired PHY in the MIF configuration register */
1863	v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1864	/* Clear PHY select bit */
1865	v &= ~GEM_MIF_CONFIG_PHY_SEL;
1866	if (phy == GEM_PHYAD_EXTERNAL)
1867		/* Set PHY select bit to get at external device */
1868		v |= GEM_MIF_CONFIG_PHY_SEL;
1869	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v);
1870#endif
1871
1872	/* Construct the frame command */
1873	v = (reg << GEM_MIF_REG_SHIFT)	| (phy << GEM_MIF_PHY_SHIFT) |
1874		GEM_MIF_FRAME_READ;
1875
1876	bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
1877	for (n = 0; n < 100; n++) {
1878		DELAY(1);
1879		v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
1880		if (v & GEM_MIF_FRAME_TA0)
1881			return (v & GEM_MIF_FRAME_DATA);
1882	}
1883
1884	device_printf(sc->sc_dev, "mii_read timeout\n");
1885	return (0);
1886}
1887
1888int
1889gem_mii_writereg(dev, phy, reg, val)
1890	device_t dev;
1891	int phy, reg, val;
1892{
1893	struct gem_softc *sc = device_get_softc(dev);
1894	int n;
1895	u_int32_t v;
1896
1897#ifdef GEM_DEBUG_PHY
1898	printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val);
1899#endif
1900
1901#if 0
1902	/* Select the desired PHY in the MIF configuration register */
1903	v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1904	/* Clear PHY select bit */
1905	v &= ~GEM_MIF_CONFIG_PHY_SEL;
1906	if (phy == GEM_PHYAD_EXTERNAL)
1907		/* Set PHY select bit to get at external device */
1908		v |= GEM_MIF_CONFIG_PHY_SEL;
1909	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v);
1910#endif
1911	/* Construct the frame command */
1912	v = GEM_MIF_FRAME_WRITE			|
1913	    (phy << GEM_MIF_PHY_SHIFT)		|
1914	    (reg << GEM_MIF_REG_SHIFT)		|
1915	    (val & GEM_MIF_FRAME_DATA);
1916
1917	bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
1918	for (n = 0; n < 100; n++) {
1919		DELAY(1);
1920		v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
1921		if (v & GEM_MIF_FRAME_TA0)
1922			return (1);
1923	}
1924
1925	device_printf(sc->sc_dev, "mii_write timeout\n");
1926	return (0);
1927}
1928
1929void
1930gem_mii_statchg(dev)
1931	device_t dev;
1932{
1933	struct gem_softc *sc = device_get_softc(dev);
1934#ifdef GEM_DEBUG
1935	int instance;
1936#endif
1937	u_int32_t v;
1938
1939#ifdef GEM_DEBUG
1940	instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
1941	if (sc->sc_debug)
1942		printf("gem_mii_statchg: status change: phy = %d\n",
1943			sc->sc_phys[instance]);
1944#endif
1945
1946	/* Set tx full duplex options */
1947	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 0);
1948	DELAY(10000); /* reg must be cleared and delay before changing. */
1949	v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
1950		GEM_MAC_TX_ENABLE;
1951	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) {
1952		v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
1953	}
1954	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, v);
1955
1956	/* XIF Configuration */
1957	v = GEM_MAC_XIF_LINK_LED;
1958	v |= GEM_MAC_XIF_TX_MII_ENA;
1959
1960	/* If an external transceiver is connected, enable its MII drivers */
1961	sc->sc_mif_config = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1962	if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
1963		/* External MII needs echo disable if half duplex. */
1964		if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1965			/* turn on full duplex LED */
1966			v |= GEM_MAC_XIF_FDPLX_LED;
1967		else
1968	 		/* half duplex -- disable echo */
1969	 		v |= GEM_MAC_XIF_ECHO_DISABL;
1970
1971		if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T)
1972			v |= GEM_MAC_XIF_GMII_MODE;
1973		else
1974			v &= ~GEM_MAC_XIF_GMII_MODE;
1975	} else {
1976		/* Internal MII needs buf enable */
1977		v |= GEM_MAC_XIF_MII_BUF_ENA;
1978	}
1979	bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v);
1980}
1981
1982int
1983gem_mediachange(ifp)
1984	struct ifnet *ifp;
1985{
1986	struct gem_softc *sc = ifp->if_softc;
1987	int error;
1988
1989	/* XXX Add support for serial media. */
1990
1991	GEM_LOCK(sc);
1992	error = mii_mediachg(sc->sc_mii);
1993	GEM_UNLOCK(sc);
1994	return (error);
1995}
1996
1997void
1998gem_mediastatus(ifp, ifmr)
1999	struct ifnet *ifp;
2000	struct ifmediareq *ifmr;
2001{
2002	struct gem_softc *sc = ifp->if_softc;
2003
2004	GEM_LOCK(sc);
2005	if ((ifp->if_flags & IFF_UP) == 0) {
2006		GEM_UNLOCK(sc);
2007		return;
2008	}
2009
2010	mii_pollstat(sc->sc_mii);
2011	ifmr->ifm_active = sc->sc_mii->mii_media_active;
2012	ifmr->ifm_status = sc->sc_mii->mii_media_status;
2013	GEM_UNLOCK(sc);
2014}
2015
2016/*
2017 * Process an ioctl request.
2018 */
2019static int
2020gem_ioctl(ifp, cmd, data)
2021	struct ifnet *ifp;
2022	u_long cmd;
2023	caddr_t data;
2024{
2025	struct gem_softc *sc = ifp->if_softc;
2026	struct ifreq *ifr = (struct ifreq *)data;
2027	int error = 0;
2028
2029	switch (cmd) {
2030	case SIOCSIFFLAGS:
2031		GEM_LOCK(sc);
2032		if (ifp->if_flags & IFF_UP) {
2033			if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC)
2034				gem_setladrf(sc);
2035			else
2036				gem_init_locked(sc);
2037		} else {
2038			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2039				gem_stop(ifp, 0);
2040		}
2041		if ((ifp->if_flags & IFF_LINK0) != 0)
2042			sc->sc_csum_features |= CSUM_UDP;
2043		else
2044			sc->sc_csum_features &= ~CSUM_UDP;
2045		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2046			ifp->if_hwassist = sc->sc_csum_features;
2047		sc->sc_ifflags = ifp->if_flags;
2048		GEM_UNLOCK(sc);
2049		break;
2050	case SIOCADDMULTI:
2051	case SIOCDELMULTI:
2052		GEM_LOCK(sc);
2053		gem_setladrf(sc);
2054		GEM_UNLOCK(sc);
2055		break;
2056	case SIOCGIFMEDIA:
2057	case SIOCSIFMEDIA:
2058		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
2059		break;
2060	case SIOCSIFCAP:
2061		GEM_LOCK(sc);
2062		ifp->if_capenable = ifr->ifr_reqcap;
2063		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2064			ifp->if_hwassist = sc->sc_csum_features;
2065		else
2066			ifp->if_hwassist = 0;
2067		GEM_UNLOCK(sc);
2068		break;
2069	default:
2070		error = ether_ioctl(ifp, cmd, data);
2071		break;
2072	}
2073
2074	return (error);
2075}
2076
2077/*
2078 * Set up the logical address filter.
2079 */
2080static void
2081gem_setladrf(sc)
2082	struct gem_softc *sc;
2083{
2084	struct ifnet *ifp = sc->sc_ifp;
2085	struct ifmultiaddr *inm;
2086	u_int32_t crc;
2087	u_int32_t hash[16];
2088	u_int32_t v;
2089	int i;
2090
2091	GEM_LOCK_ASSERT(sc, MA_OWNED);
2092
2093	/* Get current RX configuration */
2094	v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
2095
2096	/*
2097	 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2098	 * and hash filter.  Depending on the case, the right bit will be
2099	 * enabled.
2100	 */
2101	v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
2102	    GEM_MAC_RX_PROMISC_GRP);
2103
2104	if ((ifp->if_flags & IFF_PROMISC) != 0) {
2105		/* Turn on promiscuous mode */
2106		v |= GEM_MAC_RX_PROMISCUOUS;
2107		goto chipit;
2108	}
2109	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2110		hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
2111		ifp->if_flags |= IFF_ALLMULTI;
2112		v |= GEM_MAC_RX_PROMISC_GRP;
2113		goto chipit;
2114	}
2115
2116	/*
2117	 * Set up multicast address filter by passing all multicast addresses
2118	 * through a crc generator, and then using the high order 8 bits as an
2119	 * index into the 256 bit logical address filter.  The high order 4
2120	 * bits selects the word, while the other 4 bits select the bit within
2121	 * the word (where bit 0 is the MSB).
2122	 */
2123
2124	/* Clear hash table */
2125	memset(hash, 0, sizeof(hash));
2126
2127	IF_ADDR_LOCK(ifp);
2128	TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
2129		if (inm->ifma_addr->sa_family != AF_LINK)
2130			continue;
2131		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2132		    inm->ifma_addr), ETHER_ADDR_LEN);
2133
2134		/* Just want the 8 most significant bits. */
2135		crc >>= 24;
2136
2137		/* Set the corresponding bit in the filter. */
2138		hash[crc >> 4] |= 1 << (15 - (crc & 15));
2139	}
2140	IF_ADDR_UNLOCK(ifp);
2141
2142	v |= GEM_MAC_RX_HASH_FILTER;
2143	ifp->if_flags &= ~IFF_ALLMULTI;
2144
2145	/* Now load the hash table into the chip (if we are using it) */
2146	for (i = 0; i < 16; i++) {
2147		bus_write_4(sc->sc_res[0],
2148		    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
2149		    hash[i]);
2150	}
2151
2152chipit:
2153	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
2154}
2155