if_hme.c revision 178470
191396Stmm/*-
291396Stmm * Copyright (c) 1999 The NetBSD Foundation, Inc.
3108834Stmm * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
491396Stmm * All rights reserved.
591396Stmm *
691396Stmm * This code is derived from software contributed to The NetBSD Foundation
791396Stmm * by Paul Kranenburg.
891396Stmm *
991396Stmm * Redistribution and use in source and binary forms, with or without
1091396Stmm * modification, are permitted provided that the following conditions
1191396Stmm * are met:
1291396Stmm * 1. Redistributions of source code must retain the above copyright
1391396Stmm *    notice, this list of conditions and the following disclaimer.
1491396Stmm * 2. Redistributions in binary form must reproduce the above copyright
1591396Stmm *    notice, this list of conditions and the following disclaimer in the
1691396Stmm *    documentation and/or other materials provided with the distribution.
1791396Stmm * 3. All advertising materials mentioning features or use of this software
1891396Stmm *    must display the following acknowledgement:
1991396Stmm *        This product includes software developed by the NetBSD
2091396Stmm *        Foundation, Inc. and its contributors.
2191396Stmm * 4. Neither the name of The NetBSD Foundation nor the names of its
2291396Stmm *    contributors may be used to endorse or promote products derived
2391396Stmm *    from this software without specific prior written permission.
2491396Stmm *
2591396Stmm * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2691396Stmm * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2791396Stmm * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2891396Stmm * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2991396Stmm * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
3091396Stmm * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
3191396Stmm * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
3291396Stmm * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3391396Stmm * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3491396Stmm * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3591396Stmm * POSSIBILITY OF SUCH DAMAGE.
3691396Stmm *
37178470Smarius *	from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
3891396Stmm */
3991396Stmm
40119418Sobrien#include <sys/cdefs.h>
41119418Sobrien__FBSDID("$FreeBSD: head/sys/dev/hme/if_hme.c 178470 2008-04-24 23:12:03Z marius $");
42119418Sobrien
4391396Stmm/*
4491396Stmm * HME Ethernet module driver.
4591396Stmm *
4691396Stmm * The HME is e.g. part of the PCIO PCI multi function device.
4791396Stmm * It supports TX gathering and TX and RX checksum offloading.
4891396Stmm * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
4991396Stmm * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
5091396Stmm * are skipped to make sure the header after the ethernet header is aligned on a
5191396Stmm * natural boundary, so this ensures minimal wastage in the most common case.
5291396Stmm *
5391396Stmm * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
5491396Stmm * maximum packet size (this is not verified). Buffers starting on odd
5591396Stmm * boundaries must be mapped so that the burst can start on a natural boundary.
5691396Stmm *
57133149Syongari * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
58133149Syongari * In reality, we can do the same technique for UDP datagram too. However,
59133149Syongari * the hardware doesn't compensate the checksum for UDP datagram which can yield
60133149Syongari * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
61133149Syongari * can be reactivated by setting special link option link0 with ifconfig(8).
6291396Stmm */
63133149Syongari#define HME_CSUM_FEATURES	(CSUM_TCP)
64178470Smarius#if 0
6591396Stmm#define HMEDEBUG
66178470Smarius#endif
6791396Stmm#define	KTR_HME		KTR_CT2		/* XXX */
6891396Stmm
6991396Stmm#include <sys/param.h>
7091396Stmm#include <sys/systm.h>
7191396Stmm#include <sys/bus.h>
7295533Smike#include <sys/endian.h>
7391396Stmm#include <sys/kernel.h>
74130026Sphk#include <sys/module.h>
7591396Stmm#include <sys/ktr.h>
7691396Stmm#include <sys/mbuf.h>
7791396Stmm#include <sys/malloc.h>
7891396Stmm#include <sys/socket.h>
7991396Stmm#include <sys/sockio.h>
8091396Stmm
81100980Sfenner#include <net/bpf.h>
8291396Stmm#include <net/ethernet.h>
8391396Stmm#include <net/if.h>
8491396Stmm#include <net/if_arp.h>
8591396Stmm#include <net/if_dl.h>
8691396Stmm#include <net/if_media.h>
87147256Sbrooks#include <net/if_types.h>
88129006Sjoerg#include <net/if_vlan_var.h>
8991396Stmm
90133149Syongari#include <netinet/in.h>
91133149Syongari#include <netinet/in_systm.h>
92133149Syongari#include <netinet/ip.h>
93133149Syongari#include <netinet/tcp.h>
94133149Syongari#include <netinet/udp.h>
95133149Syongari
9691396Stmm#include <dev/mii/mii.h>
9791396Stmm#include <dev/mii/miivar.h>
9891396Stmm
9991396Stmm#include <machine/bus.h>
10091396Stmm
101119351Smarcel#include <dev/hme/if_hmereg.h>
102119351Smarcel#include <dev/hme/if_hmevar.h>
10391396Stmm
104178470SmariusCTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
105178470SmariusCTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
106178470Smarius
10791396Stmmstatic void	hme_start(struct ifnet *);
108137982Syongaristatic void	hme_start_locked(struct ifnet *);
10991396Stmmstatic void	hme_stop(struct hme_softc *);
11091396Stmmstatic int	hme_ioctl(struct ifnet *, u_long, caddr_t);
11191396Stmmstatic void	hme_tick(void *);
112164932Smariusstatic int	hme_watchdog(struct hme_softc *);
11391396Stmmstatic void	hme_init(void *);
114147256Sbrooksstatic void	hme_init_locked(struct hme_softc *);
11591396Stmmstatic int	hme_add_rxbuf(struct hme_softc *, unsigned int, int);
11691396Stmmstatic int	hme_meminit(struct hme_softc *);
11791396Stmmstatic int	hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
11891396Stmm    u_int32_t, u_int32_t);
11991396Stmmstatic void	hme_mifinit(struct hme_softc *);
12091396Stmmstatic void	hme_setladrf(struct hme_softc *, int);
12191396Stmm
12291396Stmmstatic int	hme_mediachange(struct ifnet *);
123164864Smariusstatic int	hme_mediachange_locked(struct hme_softc *);
12491396Stmmstatic void	hme_mediastatus(struct ifnet *, struct ifmediareq *);
12591396Stmm
126151639Syongaristatic int	hme_load_txmbuf(struct hme_softc *, struct mbuf **);
127133149Syongaristatic void	hme_read(struct hme_softc *, int, int, u_int32_t);
12891396Stmmstatic void	hme_eint(struct hme_softc *, u_int);
12991396Stmmstatic void	hme_rint(struct hme_softc *);
13091396Stmmstatic void	hme_tint(struct hme_softc *);
131133149Syongaristatic void	hme_rxcksum(struct mbuf *, u_int32_t);
13291396Stmm
13391396Stmmstatic void	hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
13491396Stmm
13591396Stmmdevclass_t hme_devclass;
13691396Stmm
13791396Stmmstatic int hme_nerr;
13891396Stmm
13991396StmmDRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
140108960SobrienMODULE_DEPEND(hme, miibus, 1, 1, 1);
14191396Stmm
14291396Stmm#define	HME_SPC_READ_4(spc, sc, offs) \
14391396Stmm	bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
144133599Smarius	    (offs))
14591396Stmm#define	HME_SPC_WRITE_4(spc, sc, offs, v) \
14691396Stmm	bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
147133599Smarius	    (offs), (v))
148178470Smarius#define	HME_SPC_BARRIER(spc, sc, offs, l, f) \
149178470Smarius	bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
150178470Smarius	    (offs), (l), (f))
15191396Stmm
15291396Stmm#define	HME_SEB_READ_4(sc, offs)	HME_SPC_READ_4(seb, (sc), (offs))
15391396Stmm#define	HME_SEB_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(seb, (sc), (offs), (v))
154178470Smarius#define	HME_SEB_BARRIER(sc, offs, l, f) \
155178470Smarius	HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
15691396Stmm#define	HME_ERX_READ_4(sc, offs)	HME_SPC_READ_4(erx, (sc), (offs))
15791396Stmm#define	HME_ERX_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(erx, (sc), (offs), (v))
158178470Smarius#define	HME_ERX_BARRIER(sc, offs, l, f) \
159178470Smarius	HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
16091396Stmm#define	HME_ETX_READ_4(sc, offs)	HME_SPC_READ_4(etx, (sc), (offs))
16191396Stmm#define	HME_ETX_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(etx, (sc), (offs), (v))
162178470Smarius#define	HME_ETX_BARRIER(sc, offs, l, f) \
163178470Smarius	HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
16491396Stmm#define	HME_MAC_READ_4(sc, offs)	HME_SPC_READ_4(mac, (sc), (offs))
16591396Stmm#define	HME_MAC_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(mac, (sc), (offs), (v))
166178470Smarius#define	HME_MAC_BARRIER(sc, offs, l, f) \
167178470Smarius	HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
16891396Stmm#define	HME_MIF_READ_4(sc, offs)	HME_SPC_READ_4(mif, (sc), (offs))
16991396Stmm#define	HME_MIF_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(mif, (sc), (offs), (v))
170178470Smarius#define	HME_MIF_BARRIER(sc, offs, l, f) \
171178470Smarius	HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
17291396Stmm
17391396Stmm#define	HME_MAXERR	5
17491396Stmm#define	HME_WHINE(dev, ...) do {					\
17591396Stmm	if (hme_nerr++ < HME_MAXERR)					\
17691396Stmm		device_printf(dev, __VA_ARGS__);			\
17791396Stmm	if (hme_nerr == HME_MAXERR) {					\
178158973Ssimon		device_printf(dev, "too many errors; not reporting "	\
179158973Ssimon		    "any more\n");					\
18091396Stmm	}								\
18191396Stmm} while(0)
18291396Stmm
183129006Sjoerg/* Support oversized VLAN frames. */
184129006Sjoerg#define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
185129006Sjoerg
18691396Stmmint
18791396Stmmhme_config(struct hme_softc *sc)
18891396Stmm{
189147256Sbrooks	struct ifnet *ifp;
19091396Stmm	struct mii_softc *child;
19191396Stmm	bus_size_t size;
19291396Stmm	int error, rdesc, tdesc, i;
19391396Stmm
194147256Sbrooks	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
195147256Sbrooks	if (ifp == NULL)
196147256Sbrooks		return (ENOSPC);
197147256Sbrooks
19891396Stmm	/*
19991396Stmm	 * HME common initialization.
20091396Stmm	 *
20191396Stmm	 * hme_softc fields that must be initialized by the front-end:
20291396Stmm	 *
203129570Smarius	 * the DMA bus tag:
20491396Stmm	 *	sc_dmatag
20591396Stmm	 *
20691396Stmm	 * the bus handles, tags and offsets (splitted for SBus compatability):
20791396Stmm	 *	sc_seb{t,h,o}	(Shared Ethernet Block registers)
20891396Stmm	 *	sc_erx{t,h,o}	(Receiver Unit registers)
20991396Stmm	 *	sc_etx{t,h,o}	(Transmitter Unit registers)
21091396Stmm	 *	sc_mac{t,h,o}	(MAC registers)
211129570Smarius	 *	sc_mif{t,h,o}	(Management Interface registers)
21291396Stmm	 *
21391396Stmm	 * the maximum bus burst size:
21491396Stmm	 *	sc_burst
21591396Stmm	 *
21691396Stmm	 */
21791396Stmm
218149438Sjhb	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
219149438Sjhb
22091396Stmm	/* Make sure the chip is stopped. */
221137982Syongari	HME_LOCK(sc);
22291396Stmm	hme_stop(sc);
223137982Syongari	HME_UNLOCK(sc);
22491396Stmm
225178470Smarius	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
226178470Smarius	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
227178470Smarius	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
228178470Smarius	    NULL, NULL, &sc->sc_pdmatag);
229178470Smarius	if (error)
230178470Smarius		goto fail_ifnet;
231178470Smarius
23291396Stmm	/*
233178470Smarius	 * Create control, RX and TX mbuf DMA tags.
23491396Stmm	 * Buffer descriptors must be aligned on a 2048 byte boundary;
23591396Stmm	 * take this into account when calculating the size. Note that
23691396Stmm	 * the maximum number of descriptors (256) occupies 2048 bytes,
23791396Stmm	 * so we allocate that much regardless of HME_N*DESC.
23891396Stmm	 */
239178470Smarius	size = 4096;
24091396Stmm	error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
24191396Stmm	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
242178470Smarius	    1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
24391396Stmm	if (error)
24491396Stmm		goto fail_ptag;
24591396Stmm
24691396Stmm	error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
24791396Stmm	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
248178470Smarius	    1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
24991396Stmm	if (error)
25091396Stmm		goto fail_ctag;
25191396Stmm
25291396Stmm	error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
253178470Smarius	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
254178470Smarius	    MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
255117126Sscottl	    NULL, NULL, &sc->sc_tdmatag);
25691396Stmm	if (error)
25791396Stmm		goto fail_rtag;
25891396Stmm
259178470Smarius	/* Allocate the control DMA buffer. */
26091396Stmm	error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
261178470Smarius	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
26291396Stmm	if (error != 0) {
26391396Stmm		device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
26491396Stmm		goto fail_ttag;
26591396Stmm	}
26691396Stmm
267178470Smarius	/* Load the control DMA buffer. */
26891396Stmm	sc->sc_rb.rb_dmabase = 0;
26991396Stmm	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
270178470Smarius	    sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
27191396Stmm	    sc->sc_rb.rb_dmabase == 0) {
27291396Stmm		device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
27391396Stmm		    error);
27491396Stmm		goto fail_free;
27591396Stmm	}
27691396Stmm	CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
27791396Stmm	    sc->sc_rb.rb_dmabase);
27891396Stmm
27991396Stmm	/*
28091396Stmm	 * Prepare the RX descriptors. rdesc serves as marker for the last
28191396Stmm	 * processed descriptor and may be used later on.
28291396Stmm	 */
28391396Stmm	for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
28499954Stmm		sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
28591396Stmm		error = bus_dmamap_create(sc->sc_rdmatag, 0,
28691396Stmm		    &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
28791396Stmm		if (error != 0)
28891396Stmm			goto fail_rxdesc;
28991396Stmm	}
29091396Stmm	error = bus_dmamap_create(sc->sc_rdmatag, 0,
29191396Stmm	    &sc->sc_rb.rb_spare_dmamap);
29291396Stmm	if (error != 0)
29391396Stmm		goto fail_rxdesc;
29491396Stmm	/* Same for the TX descs. */
295108834Stmm	for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
29699954Stmm		sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
29791396Stmm		error = bus_dmamap_create(sc->sc_tdmatag, 0,
29891396Stmm		    &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
29991396Stmm		if (error != 0)
30091396Stmm			goto fail_txdesc;
30191396Stmm	}
30291396Stmm
303133149Syongari	sc->sc_csum_features = HME_CSUM_FEATURES;
30491396Stmm	/* Initialize ifnet structure. */
30591396Stmm	ifp->if_softc = sc;
306121816Sbrooks	if_initname(ifp, device_get_name(sc->sc_dev),
307121816Sbrooks	    device_get_unit(sc->sc_dev));
308137982Syongari	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
30991396Stmm	ifp->if_start = hme_start;
31091396Stmm	ifp->if_ioctl = hme_ioctl;
31191396Stmm	ifp->if_init = hme_init;
312132986Smlaier	IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
313132986Smlaier	ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
314132986Smlaier	IFQ_SET_READY(&ifp->if_snd);
31591396Stmm
31691396Stmm	hme_mifinit(sc);
31791396Stmm
31891396Stmm	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
31991396Stmm	    hme_mediastatus)) != 0) {
32091396Stmm		device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
32191396Stmm		goto fail_rxdesc;
32291396Stmm	}
32391396Stmm	sc->sc_mii = device_get_softc(sc->sc_miibus);
32491396Stmm
32591396Stmm	/*
32691396Stmm	 * Walk along the list of attached MII devices and
327164864Smarius	 * establish an `MII instance' to `PHY number'
328164864Smarius	 * mapping. We'll use this mapping to enable the MII
329164864Smarius	 * drivers of the external transceiver according to
330164864Smarius	 * the currently selected media.
33191396Stmm	 */
332164864Smarius	sc->sc_phys[0] = sc->sc_phys[1] = -1;
333164864Smarius	LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
33491396Stmm		/*
33591396Stmm		 * Note: we support just two PHYs: the built-in
33691396Stmm		 * internal device and an external on the MII
33791396Stmm		 * connector.
33891396Stmm		 */
339164864Smarius		if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
340164864Smarius		    child->mii_phy != HME_PHYAD_INTERNAL) ||
341164864Smarius		    child->mii_inst > 1) {
342129570Smarius			device_printf(sc->sc_dev, "cannot accommodate "
34391396Stmm			    "MII device %s at phy %d, instance %d\n",
34491396Stmm			    device_get_name(child->mii_dev),
34591396Stmm			    child->mii_phy, child->mii_inst);
34691396Stmm			continue;
34791396Stmm		}
34891396Stmm
34991396Stmm		sc->sc_phys[child->mii_inst] = child->mii_phy;
35091396Stmm	}
35191396Stmm
35291396Stmm	/* Attach the interface. */
353147256Sbrooks	ether_ifattach(ifp, sc->sc_enaddr);
35491396Stmm
355129006Sjoerg	/*
356133149Syongari	 * Tell the upper layer(s) we support long frames/checksum offloads.
357129006Sjoerg	 */
358129006Sjoerg	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
359133149Syongari	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
360133149Syongari	ifp->if_hwassist |= sc->sc_csum_features;
361133149Syongari	ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
36291396Stmm	return (0);
36391396Stmm
36491396Stmmfail_txdesc:
36591396Stmm	for (i = 0; i < tdesc; i++) {
36691396Stmm		bus_dmamap_destroy(sc->sc_tdmatag,
36791396Stmm		    sc->sc_rb.rb_txdesc[i].htx_dmamap);
36891396Stmm	}
36991396Stmm	bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
37091396Stmmfail_rxdesc:
37191396Stmm	for (i = 0; i < rdesc; i++) {
37291396Stmm		bus_dmamap_destroy(sc->sc_rdmatag,
37391396Stmm		    sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
37491396Stmm	}
37591396Stmm	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
37691396Stmmfail_free:
37791396Stmm	bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
37891396Stmmfail_ttag:
37991396Stmm	bus_dma_tag_destroy(sc->sc_tdmatag);
38091396Stmmfail_rtag:
38191396Stmm	bus_dma_tag_destroy(sc->sc_rdmatag);
38291396Stmmfail_ctag:
38391396Stmm	bus_dma_tag_destroy(sc->sc_cdmatag);
38491396Stmmfail_ptag:
38591396Stmm	bus_dma_tag_destroy(sc->sc_pdmatag);
386147256Sbrooksfail_ifnet:
387147256Sbrooks	if_free(ifp);
38891396Stmm	return (error);
38991396Stmm}
39091396Stmm
391108976Stmmvoid
392108976Stmmhme_detach(struct hme_softc *sc)
393108976Stmm{
394147256Sbrooks	struct ifnet *ifp = sc->sc_ifp;
395108976Stmm	int i;
396108976Stmm
397137982Syongari	HME_LOCK(sc);
398108976Stmm	hme_stop(sc);
399137982Syongari	HME_UNLOCK(sc);
400149203Sjhb	callout_drain(&sc->sc_tick_ch);
401149877Skensmith	ether_ifdetach(ifp);
402149877Skensmith	if_free(ifp);
403108976Stmm	device_delete_child(sc->sc_dev, sc->sc_miibus);
404108976Stmm
405108976Stmm	for (i = 0; i < HME_NTXQ; i++) {
406108976Stmm		bus_dmamap_destroy(sc->sc_tdmatag,
407108976Stmm		    sc->sc_rb.rb_txdesc[i].htx_dmamap);
408108976Stmm	}
409108976Stmm	bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
410108976Stmm	for (i = 0; i < HME_NRXDESC; i++) {
411108976Stmm		bus_dmamap_destroy(sc->sc_rdmatag,
412108976Stmm		    sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
413108976Stmm	}
414178470Smarius	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
415178470Smarius	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
416108976Stmm	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
417108976Stmm	bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
418108976Stmm	bus_dma_tag_destroy(sc->sc_tdmatag);
419108976Stmm	bus_dma_tag_destroy(sc->sc_rdmatag);
420108976Stmm	bus_dma_tag_destroy(sc->sc_cdmatag);
421108976Stmm	bus_dma_tag_destroy(sc->sc_pdmatag);
422108976Stmm}
423108976Stmm
424108976Stmmvoid
425108976Stmmhme_suspend(struct hme_softc *sc)
426108976Stmm{
427108976Stmm
428137982Syongari	HME_LOCK(sc);
429108976Stmm	hme_stop(sc);
430137982Syongari	HME_UNLOCK(sc);
431108976Stmm}
432108976Stmm
433108976Stmmvoid
434108976Stmmhme_resume(struct hme_softc *sc)
435108976Stmm{
436147256Sbrooks	struct ifnet *ifp = sc->sc_ifp;
437108976Stmm
438137982Syongari	HME_LOCK(sc);
439108976Stmm	if ((ifp->if_flags & IFF_UP) != 0)
440147256Sbrooks		hme_init_locked(sc);
441137982Syongari	HME_UNLOCK(sc);
442108976Stmm}
443108976Stmm
44491396Stmmstatic void
44591396Stmmhme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
44691396Stmm{
44791396Stmm	struct hme_softc *sc = (struct hme_softc *)xsc;
44891396Stmm
44991396Stmm	if (error != 0)
45091396Stmm		return;
451178470Smarius	KASSERT(nsegs == 1,
452178470Smarius	    ("%s: too many DMA segments (%d)", __func__, nsegs));
45391396Stmm	sc->sc_rb.rb_dmabase = segs[0].ds_addr;
45491396Stmm}
45591396Stmm
45691396Stmmstatic void
45791396Stmmhme_tick(void *arg)
45891396Stmm{
45991396Stmm	struct hme_softc *sc = arg;
460151639Syongari	struct ifnet *ifp;
46191396Stmm
462148944Sjhb	HME_LOCK_ASSERT(sc, MA_OWNED);
463151639Syongari
464151639Syongari	ifp = sc->sc_ifp;
465151639Syongari	/*
466151639Syongari	 * Unload collision counters
467151639Syongari	 */
468151639Syongari	ifp->if_collisions +=
469151639Syongari		HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
470151639Syongari		HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
471151639Syongari		HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
472151639Syongari		HME_MAC_READ_4(sc, HME_MACI_LTCNT);
473151639Syongari
474151639Syongari	/*
475151639Syongari	 * then clear the hardware counters.
476151639Syongari	 */
477151639Syongari	HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
478151639Syongari	HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
479151639Syongari	HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
480151639Syongari	HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
481151639Syongari
48291396Stmm	mii_tick(sc->sc_mii);
48391396Stmm
484164932Smarius	if (hme_watchdog(sc) == EJUSTRETURN)
485164932Smarius		return;
486164932Smarius
48791396Stmm	callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
48891396Stmm}
48991396Stmm
49091396Stmmstatic void
49191396Stmmhme_stop(struct hme_softc *sc)
49291396Stmm{
49391396Stmm	u_int32_t v;
49491396Stmm	int n;
49591396Stmm
49691396Stmm	callout_stop(&sc->sc_tick_ch);
497164932Smarius	sc->sc_wdog_timer = 0;
498148944Sjhb	sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
499178470Smarius	sc->sc_flags &= ~HME_LINK;
50091396Stmm
501164864Smarius	/* Mask all interrupts */
502164864Smarius	HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
503164864Smarius
50491396Stmm	/* Reset transmitter and receiver */
50591396Stmm	HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
50691396Stmm	    HME_SEB_RESET_ERX);
507178470Smarius	HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
508178470Smarius	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
50991396Stmm	for (n = 0; n < 20; n++) {
51091396Stmm		v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
51191396Stmm		if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
51291396Stmm			return;
51391396Stmm		DELAY(20);
51491396Stmm	}
51591396Stmm
51691396Stmm	device_printf(sc->sc_dev, "hme_stop: reset failed\n");
51791396Stmm}
51891396Stmm
51999954Stmm/*
52099954Stmm * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
52199954Stmm * ring for subsequent use.
52299954Stmm */
523109649Stmmstatic __inline void
524109649Stmmhme_discard_rxbuf(struct hme_softc *sc, int ix)
52599954Stmm{
52699954Stmm
52799954Stmm	/*
52899954Stmm	 * Dropped a packet, reinitialize the descriptor and turn the
52999954Stmm	 * ownership back to the hardware.
53099954Stmm	 */
531178470Smarius	HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
532178470Smarius	    ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
533178470Smarius	    &sc->sc_rb.rb_rxdesc[ix])));
53499954Stmm}
53599954Stmm
53691396Stmmstatic int
53791396Stmmhme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
53891396Stmm{
53991396Stmm	struct hme_rxdesc *rd;
54091396Stmm	struct mbuf *m;
541140324Sscottl	bus_dma_segment_t segs[1];
54291396Stmm	bus_dmamap_t map;
543108834Stmm	uintptr_t b;
544140324Sscottl	int a, unmap, nsegs;
54591396Stmm
54691396Stmm	rd = &sc->sc_rb.rb_rxdesc[ri];
54791396Stmm	unmap = rd->hrx_m != NULL;
54899954Stmm	if (unmap && keepold) {
54999954Stmm		/*
55099954Stmm		 * Reinitialize the descriptor flags, as they may have been
55199954Stmm		 * altered by the hardware.
55299954Stmm		 */
553109649Stmm		hme_discard_rxbuf(sc, ri);
55491396Stmm		return (0);
55599954Stmm	}
556111119Simp	if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
55791396Stmm		return (ENOBUFS);
558108834Stmm	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
559108834Stmm	b = mtod(m, uintptr_t);
56091396Stmm	/*
56191396Stmm	 * Required alignment boundary. At least 16 is needed, but since
56291396Stmm	 * the mapping must be done in a way that a burst can start on a
56391396Stmm	 * natural boundary we might need to extend this.
56491396Stmm	 */
565151639Syongari	a = imax(HME_MINRXALIGN, sc->sc_burst);
56691396Stmm	/*
567108834Stmm	 * Make sure the buffer suitably aligned. The 2 byte offset is removed
568108834Stmm	 * when the mbuf is handed up. XXX: this ensures at least 16 byte
569108834Stmm	 * alignment of the header adjacent to the ethernet header, which
570108834Stmm	 * should be sufficient in all cases. Nevertheless, this second-guesses
571108834Stmm	 * ALIGN().
57291396Stmm	 */
573108834Stmm	m_adj(m, roundup2(b, a) - b);
574140324Sscottl	if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
575140324Sscottl	    m, segs, &nsegs, 0) != 0) {
576108834Stmm		m_freem(m);
577108834Stmm		return (ENOBUFS);
578108834Stmm	}
579178470Smarius	/* If nsegs is wrong then the stack is corrupt. */
580178470Smarius	KASSERT(nsegs == 1,
581178470Smarius	    ("%s: too many DMA segments (%d)", __func__, nsegs));
58291396Stmm	if (unmap) {
58391396Stmm		bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
58491396Stmm		    BUS_DMASYNC_POSTREAD);
58591396Stmm		bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
58691396Stmm	}
58791396Stmm	map = rd->hrx_dmamap;
58891396Stmm	rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
58991396Stmm	sc->sc_rb.rb_spare_dmamap = map;
59091396Stmm	bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
591178470Smarius	HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
592178470Smarius	    segs[0].ds_addr);
593108834Stmm	rd->hrx_m = m;
594178470Smarius	HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
595178470Smarius	    HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
59691396Stmm	return (0);
59791396Stmm}
59891396Stmm
59991396Stmmstatic int
60091396Stmmhme_meminit(struct hme_softc *sc)
60191396Stmm{
60291396Stmm	struct hme_ring *hr = &sc->sc_rb;
60391396Stmm	struct hme_txdesc *td;
60491396Stmm	bus_addr_t dma;
60591396Stmm	caddr_t p;
60691396Stmm	unsigned int i;
60791396Stmm	int error;
60891396Stmm
60991396Stmm	p = hr->rb_membase;
61091396Stmm	dma = hr->rb_dmabase;
61191396Stmm
61291396Stmm	/*
61391396Stmm	 * Allocate transmit descriptors
61491396Stmm	 */
61591396Stmm	hr->rb_txd = p;
61691396Stmm	hr->rb_txddma = dma;
61791396Stmm	p += HME_NTXDESC * HME_XD_SIZE;
61891396Stmm	dma += HME_NTXDESC * HME_XD_SIZE;
619178470Smarius	/*
620178470Smarius	 * We have reserved descriptor space until the next 2048 byte
621178470Smarius	 * boundary.
622178470Smarius	 */
62391396Stmm	dma = (bus_addr_t)roundup((u_long)dma, 2048);
62491396Stmm	p = (caddr_t)roundup((u_long)p, 2048);
62591396Stmm
62691396Stmm	/*
62791396Stmm	 * Allocate receive descriptors
62891396Stmm	 */
62991396Stmm	hr->rb_rxd = p;
63091396Stmm	hr->rb_rxddma = dma;
63191396Stmm	p += HME_NRXDESC * HME_XD_SIZE;
63291396Stmm	dma += HME_NRXDESC * HME_XD_SIZE;
63391396Stmm	/* Again move forward to the next 2048 byte boundary.*/
63491396Stmm	dma = (bus_addr_t)roundup((u_long)dma, 2048);
63591396Stmm	p = (caddr_t)roundup((u_long)p, 2048);
63691396Stmm
63791396Stmm	/*
63891396Stmm	 * Initialize transmit buffer descriptors
63991396Stmm	 */
64091396Stmm	for (i = 0; i < HME_NTXDESC; i++) {
641178470Smarius		HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
642178470Smarius		HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
643108834Stmm	}
644108834Stmm
645108834Stmm	STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
646108834Stmm	STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
647108834Stmm	for (i = 0; i < HME_NTXQ; i++) {
648108834Stmm		td = &sc->sc_rb.rb_txdesc[i];
64991396Stmm		if (td->htx_m != NULL) {
650109649Stmm			bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
651109649Stmm			    BUS_DMASYNC_POSTWRITE);
652108834Stmm			bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
653151639Syongari			m_freem(td->htx_m);
65491396Stmm			td->htx_m = NULL;
65591396Stmm		}
656108834Stmm		STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
65791396Stmm	}
65891396Stmm
65991396Stmm	/*
66091396Stmm	 * Initialize receive buffer descriptors
66191396Stmm	 */
66291396Stmm	for (i = 0; i < HME_NRXDESC; i++) {
66391396Stmm		error = hme_add_rxbuf(sc, i, 1);
66491396Stmm		if (error != 0)
66591396Stmm			return (error);
66691396Stmm	}
66791396Stmm
668178470Smarius	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
669178470Smarius	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
67099954Stmm
67191396Stmm	hr->rb_tdhead = hr->rb_tdtail = 0;
67291396Stmm	hr->rb_td_nbusy = 0;
67391396Stmm	hr->rb_rdtail = 0;
67499954Stmm	CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
67591396Stmm	    hr->rb_txddma);
67699954Stmm	CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
67791396Stmm	    hr->rb_rxddma);
67891396Stmm	CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
67991396Stmm	    *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
68091396Stmm	CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
68191396Stmm	    *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
68291396Stmm	return (0);
68391396Stmm}
68491396Stmm
68591396Stmmstatic int
68691396Stmmhme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
68791396Stmm    u_int32_t clr, u_int32_t set)
68891396Stmm{
68991396Stmm	int i = 0;
69091396Stmm
69191396Stmm	val &= ~clr;
69291396Stmm	val |= set;
69391396Stmm	HME_MAC_WRITE_4(sc, reg, val);
694178470Smarius	HME_MAC_BARRIER(sc, reg, 4,
695178470Smarius	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
69691396Stmm	if (clr == 0 && set == 0)
69791396Stmm		return (1);	/* just write, no bits to wait for */
69891396Stmm	do {
69991396Stmm		DELAY(100);
70091396Stmm		i++;
70191396Stmm		val = HME_MAC_READ_4(sc, reg);
70291396Stmm		if (i > 40) {
70391396Stmm			/* After 3.5ms, we should have been done. */
70491396Stmm			device_printf(sc->sc_dev, "timeout while writing to "
70591396Stmm			    "MAC configuration register\n");
70691396Stmm			return (0);
70791396Stmm		}
70891396Stmm	} while ((val & clr) != 0 && (val & set) != set);
70991396Stmm	return (1);
71091396Stmm}
71191396Stmm
71291396Stmm/*
71391396Stmm * Initialization of interface; set up initialization block
71491396Stmm * and transmit/receive descriptor rings.
71591396Stmm */
71691396Stmmstatic void
71791396Stmmhme_init(void *xsc)
71891396Stmm{
71991396Stmm	struct hme_softc *sc = (struct hme_softc *)xsc;
720137982Syongari
721137982Syongari	HME_LOCK(sc);
722137982Syongari	hme_init_locked(sc);
723137982Syongari	HME_UNLOCK(sc);
724137982Syongari}
725137982Syongari
726137982Syongaristatic void
727147256Sbrookshme_init_locked(struct hme_softc *sc)
728137982Syongari{
729147256Sbrooks	struct ifnet *ifp = sc->sc_ifp;
73091396Stmm	u_int8_t *ea;
731133149Syongari	u_int32_t n, v;
73291396Stmm
733137982Syongari	HME_LOCK_ASSERT(sc, MA_OWNED);
73491396Stmm	/*
73591396Stmm	 * Initialization sequence. The numbered steps below correspond
73691396Stmm	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
73791396Stmm	 * Channel Engine manual (part of the PCIO manual).
73891396Stmm	 * See also the STP2002-STQ document from Sun Microsystems.
73991396Stmm	 */
74091396Stmm
74191396Stmm	/* step 1 & 2. Reset the Ethernet Channel */
74291396Stmm	hme_stop(sc);
74391396Stmm
74491396Stmm	/* Re-initialize the MIF */
74591396Stmm	hme_mifinit(sc);
74691396Stmm
74791396Stmm#if 0
74891396Stmm	/* Mask all MIF interrupts, just in case */
74991396Stmm	HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
75091396Stmm#endif
75191396Stmm
75291396Stmm	/* step 3. Setup data structures in host memory */
75391396Stmm	if (hme_meminit(sc) != 0) {
75491396Stmm		device_printf(sc->sc_dev, "out of buffers; init aborted.");
75591396Stmm		return;
75691396Stmm	}
75791396Stmm
75891396Stmm	/* step 4. TX MAC registers & counters */
75991396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
76091396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
76191396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
76291396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
763129006Sjoerg	HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
76491396Stmm
76591396Stmm	/* Load station MAC address */
766178470Smarius	ea = IF_LLADDR(ifp);
76791396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
76891396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
76991396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
77091396Stmm
77191396Stmm	/*
77291396Stmm	 * Init seed for backoff
77391396Stmm	 * (source suggested by manual: low 10 bits of MAC address)
77491396Stmm	 */
77591396Stmm	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
77691396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
77791396Stmm
77891396Stmm	/* Note: Accepting power-on default for other MAC registers here.. */
77991396Stmm
78091396Stmm	/* step 5. RX MAC registers & counters */
78191396Stmm	hme_setladrf(sc, 0);
78291396Stmm
78391396Stmm	/* step 6 & 7. Program Descriptor Ring Base Addresses */
78491396Stmm	HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
78591396Stmm	/* Transmit Descriptor ring size: in increments of 16 */
78691396Stmm	HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
78791396Stmm
78891396Stmm	HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
789129006Sjoerg	HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
79091396Stmm
79191396Stmm	/* step 8. Global Configuration & Interrupt Mask */
79291396Stmm	HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
79391396Stmm	    ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
79491396Stmm		HME_SEB_STAT_HOSTTOTX |
79591396Stmm		HME_SEB_STAT_RXTOHOST |
79691396Stmm		HME_SEB_STAT_TXALL |
79791396Stmm		HME_SEB_STAT_TXPERR |
79891396Stmm		HME_SEB_STAT_RCNTEXP |
79991396Stmm		HME_SEB_STAT_ALL_ERRORS ));
80091396Stmm
80191396Stmm	switch (sc->sc_burst) {
80291396Stmm	default:
80391396Stmm		v = 0;
80491396Stmm		break;
80591396Stmm	case 16:
80691396Stmm		v = HME_SEB_CFG_BURST16;
80791396Stmm		break;
80891396Stmm	case 32:
80991396Stmm		v = HME_SEB_CFG_BURST32;
81091396Stmm		break;
81191396Stmm	case 64:
81291396Stmm		v = HME_SEB_CFG_BURST64;
81391396Stmm		break;
81491396Stmm	}
815133149Syongari	/*
816133149Syongari	 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
817133149Syongari	 * Allowing 64bit transfers breaks TX checksum offload as well.
818133149Syongari	 * Don't know this comes from hardware bug or driver's DMAing
819133149Syongari	 * scheme.
820133149Syongari	 *
821178470Smarius	 * if (sc->sc_flags & HME_PCI == 0)
822178470Smarius	 *	v |= HME_SEB_CFG_64BIT;
823133149Syongari	 */
82491396Stmm	HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
82591396Stmm
82691396Stmm	/* step 9. ETX Configuration: use mostly default values */
82791396Stmm
82891396Stmm	/* Enable DMA */
82991396Stmm	v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
83091396Stmm	v |= HME_ETX_CFG_DMAENABLE;
83191396Stmm	HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
83291396Stmm
83391396Stmm	/* step 10. ERX Configuration */
83491396Stmm	v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
83591396Stmm
83691396Stmm	/* Encode Receive Descriptor ring size: four possible values */
83791396Stmm	v &= ~HME_ERX_CFG_RINGSIZEMSK;
83891396Stmm	switch (HME_NRXDESC) {
83991396Stmm	case 32:
84091396Stmm		v |= HME_ERX_CFG_RINGSIZE32;
84191396Stmm		break;
84291396Stmm	case 64:
84391396Stmm		v |= HME_ERX_CFG_RINGSIZE64;
84491396Stmm		break;
84591396Stmm	case 128:
84691396Stmm		v |= HME_ERX_CFG_RINGSIZE128;
84791396Stmm		break;
84891396Stmm	case 256:
84991396Stmm		v |= HME_ERX_CFG_RINGSIZE256;
85091396Stmm		break;
85191396Stmm	default:
85291396Stmm		printf("hme: invalid Receive Descriptor ring size\n");
85391396Stmm		break;
85491396Stmm	}
85591396Stmm
856108834Stmm	/* Enable DMA, fix RX first byte offset. */
85791396Stmm	v &= ~HME_ERX_CFG_FBO_MASK;
858108834Stmm	v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
859133149Syongari	/* RX TCP/UDP checksum offset */
860133149Syongari	n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
861133149Syongari	n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
862133149Syongari	v |= n;
86391396Stmm	CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
86491396Stmm	HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
86591396Stmm
86691396Stmm	/* step 11. XIF Configuration */
86791396Stmm	v = HME_MAC_READ_4(sc, HME_MACI_XIF);
86891396Stmm	v |= HME_MAC_XIF_OE;
86991396Stmm	CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
87091396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
87191396Stmm
87291396Stmm	/* step 12. RX_MAC Configuration Register */
87391396Stmm	v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
87491396Stmm	v |= HME_MAC_RXCFG_ENABLE;
87591396Stmm	v &= ~(HME_MAC_RXCFG_DCRCS);
87691396Stmm	CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
87791396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
87891396Stmm
87991396Stmm	/* step 13. TX_MAC Configuration Register */
88091396Stmm	v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
88191396Stmm	v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
88291396Stmm	CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
88391396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
88491396Stmm
88591396Stmm	/* step 14. Issue Transmit Pending command */
88691396Stmm
88791396Stmm#ifdef HMEDEBUG
88891396Stmm	/* Debug: double-check. */
88991396Stmm	CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
89091396Stmm	    "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
89191396Stmm	    HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
89291396Stmm	    HME_ERX_READ_4(sc, HME_ERXI_RING),
89391396Stmm	    HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
89491396Stmm	CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
89591396Stmm	    HME_SEB_READ_4(sc, HME_SEBI_IMASK),
89691396Stmm	    HME_ERX_READ_4(sc, HME_ERXI_CFG),
89791396Stmm	    HME_ETX_READ_4(sc, HME_ETXI_CFG));
89891396Stmm	CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
89991396Stmm	    HME_MAC_READ_4(sc, HME_MACI_RXCFG),
90091396Stmm	    HME_MAC_READ_4(sc, HME_MACI_TXCFG));
90191396Stmm#endif
90291396Stmm
903178470Smarius	ifp->if_drv_flags |= IFF_DRV_RUNNING;
904178470Smarius	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
905178470Smarius
906129847Smarius	/* Set the current media. */
907164864Smarius	hme_mediachange_locked(sc);
908129847Smarius
90991396Stmm	/* Start the one second timer. */
910164932Smarius	sc->sc_wdog_timer = 0;
91191396Stmm	callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
91291396Stmm}
91391396Stmm
91491396Stmm/*
915178470Smarius * Routine to DMA map an mbuf chain, set up the descriptor rings
916178470Smarius * accordingly and start the transmission.
917178470Smarius * Returns 0 on success, -1 if there were not enough free descriptors
918178470Smarius * to map the packet, or an errno otherwise.
919151639Syongari *
920178470Smarius * XXX: this relies on the fact that segments returned by
921178470Smarius * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
922178470Smarius * boundary on (i.e. potentially before ds_addr) to the first
923178470Smarius * boundary beyond the end.  This is usually a safe assumption to
924178470Smarius * make, but is not documented.
92591396Stmm */
92691396Stmmstatic int
927151639Syongarihme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
92891396Stmm{
929178470Smarius	bus_dma_segment_t segs[HME_NTXSEGS];
930151639Syongari	struct hme_txdesc *htx;
931178470Smarius	struct ip *ip;
932178470Smarius	struct mbuf *m;
933151639Syongari	caddr_t txd;
934178470Smarius	int error, i, nsegs, pci, ri, si;
935178470Smarius	uint32_t cflags, flags;
93691396Stmm
937151639Syongari	if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
938178470Smarius		return (ENOBUFS);
939178470Smarius
940178470Smarius	cflags = 0;
941178470Smarius	if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
942178470Smarius		if (M_WRITABLE(*m0) == 0) {
943178470Smarius			m = m_dup(*m0, M_DONTWAIT);
944178470Smarius			m_freem(*m0);
945178470Smarius			*m0 = m;
946178470Smarius			if (m == NULL)
947178470Smarius				return (ENOBUFS);
948178470Smarius		}
949178470Smarius		i = sizeof(struct ether_header);
950178470Smarius		m = m_pullup(*m0, i + sizeof(struct ip));
951178470Smarius		if (m == NULL) {
952178470Smarius			*m0 = NULL;
953178470Smarius			return (ENOBUFS);
954178470Smarius		}
955178470Smarius		ip = (struct ip *)(mtod(m, caddr_t) + i);
956178470Smarius		i += (ip->ip_hl << 2);
957178470Smarius		cflags = i << HME_XD_TXCKSUM_SSHIFT |
958178470Smarius		    ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
959178470Smarius		    HME_XD_TXCKSUM;
960178470Smarius		*m0 = m;
961178470Smarius	}
962178470Smarius
963151639Syongari	error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
964178470Smarius	    *m0, segs, &nsegs, 0);
965151639Syongari	if (error == EFBIG) {
966178470Smarius		m = m_collapse(*m0, M_DONTWAIT, HME_NTXSEGS);
967161234Syongari		if (m == NULL) {
968161234Syongari			m_freem(*m0);
969161234Syongari			*m0 = NULL;
970151639Syongari			return (ENOMEM);
971151639Syongari		}
972161234Syongari		*m0 = m;
973151639Syongari		error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
974178470Smarius		    *m0, segs, &nsegs, 0);
975151639Syongari		if (error != 0) {
976161234Syongari			m_freem(*m0);
977161234Syongari			*m0 = NULL;
978151639Syongari			return (error);
979151639Syongari		}
980151639Syongari	} else if (error != 0)
981151639Syongari		return (error);
982178470Smarius	/* If nsegs is wrong then the stack is corrupt. */
983178470Smarius	KASSERT(nsegs <= HME_NTXSEGS,
984178470Smarius	    ("%s: too many DMA segments (%d)", __func__, nsegs));
985178470Smarius	if (nsegs == 0) {
986161234Syongari		m_freem(*m0);
987161234Syongari		*m0 = NULL;
988151639Syongari		return (EIO);
989108834Stmm	}
990178470Smarius	if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
991151639Syongari		bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
992178470Smarius		/* Retry with m_collapse(9)? */
993178470Smarius		return (ENOBUFS);
994151639Syongari	}
995151639Syongari	bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
99691396Stmm
997151639Syongari	si = ri = sc->sc_rb.rb_tdhead;
998151639Syongari	txd = sc->sc_rb.rb_txd;
999178470Smarius	pci = sc->sc_flags & HME_PCI;
1000151639Syongari	CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
1001151639Syongari	    HME_XD_GETFLAGS(pci, txd, ri));
1002178470Smarius	for (i = 0; i < nsegs; i++) {
1003151639Syongari		/* Fill the ring entry. */
1004178470Smarius		flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
1005151639Syongari		if (i == 0)
1006151639Syongari			flags |= HME_XD_SOP | cflags;
1007151639Syongari		else
1008151639Syongari			flags |= HME_XD_OWN | cflags;
100991396Stmm		CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
101091396Stmm		    ri, si, flags);
1011178470Smarius		HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
1012151639Syongari		HME_XD_SETFLAGS(pci, txd, ri, flags);
1013151639Syongari		sc->sc_rb.rb_td_nbusy++;
1014151639Syongari		htx->htx_lastdesc = ri;
1015151639Syongari		ri = (ri + 1) % HME_NTXDESC;
1016151639Syongari	}
1017151639Syongari	sc->sc_rb.rb_tdhead = ri;
101891396Stmm
1019151639Syongari	/* set EOP on the last descriptor */
1020151639Syongari	ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
1021151639Syongari	flags = HME_XD_GETFLAGS(pci, txd, ri);
1022151639Syongari	flags |= HME_XD_EOP;
1023151639Syongari	CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
1024151639Syongari	    flags);
1025151639Syongari	HME_XD_SETFLAGS(pci, txd, ri, flags);
1026151639Syongari
1027151639Syongari	/* Turn the first descriptor ownership to the hme */
1028151639Syongari	flags = HME_XD_GETFLAGS(pci, txd, si);
1029151639Syongari	flags |= HME_XD_OWN;
1030151639Syongari	CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
1031151639Syongari	    ri, flags);
1032151639Syongari	HME_XD_SETFLAGS(pci, txd, si, flags);
1033151639Syongari
1034151639Syongari	STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
1035151639Syongari	STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
1036161234Syongari	htx->htx_m = *m0;
1037151639Syongari
103891396Stmm	/* start the transmission. */
103991396Stmm	HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
1040151639Syongari
104191396Stmm	return (0);
104291396Stmm}
104391396Stmm
104491396Stmm/*
104591396Stmm * Pass a packet to the higher levels.
104691396Stmm */
104791396Stmmstatic void
1048133149Syongarihme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
104991396Stmm{
1050147256Sbrooks	struct ifnet *ifp = sc->sc_ifp;
105191396Stmm	struct mbuf *m;
105291396Stmm
105391396Stmm	if (len <= sizeof(struct ether_header) ||
1054129006Sjoerg	    len > HME_MAX_FRAMESIZE) {
105591396Stmm#ifdef HMEDEBUG
105691396Stmm		HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
105791396Stmm		    len);
105891396Stmm#endif
105999954Stmm		ifp->if_ierrors++;
1060109649Stmm		hme_discard_rxbuf(sc, ix);
106199954Stmm		return;
106291396Stmm	}
106391396Stmm
106491396Stmm	m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1065108834Stmm	CTR1(KTR_HME, "hme_read: len %d", len);
106691396Stmm
106791396Stmm	if (hme_add_rxbuf(sc, ix, 0) != 0) {
106891396Stmm		/*
106991396Stmm		 * hme_add_rxbuf will leave the old buffer in the ring until
107091396Stmm		 * it is sure that a new buffer can be mapped. If it can not,
107191396Stmm		 * drop the packet, but leave the interface up.
107291396Stmm		 */
107399954Stmm		ifp->if_iqdrops++;
1074109649Stmm		hme_discard_rxbuf(sc, ix);
107599954Stmm		return;
107691396Stmm	}
107791396Stmm
107891396Stmm	ifp->if_ipackets++;
107991396Stmm
108091396Stmm	m->m_pkthdr.rcvif = ifp;
1081108834Stmm	m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
1082108834Stmm	m_adj(m, HME_RXOFFS);
1083133149Syongari	/* RX TCP/UDP checksum */
1084133149Syongari	if (ifp->if_capenable & IFCAP_RXCSUM)
1085133149Syongari		hme_rxcksum(m, flags);
108691396Stmm	/* Pass the packet up. */
1087137982Syongari	HME_UNLOCK(sc);
1088106937Ssam	(*ifp->if_input)(ifp, m);
1089137982Syongari	HME_LOCK(sc);
109091396Stmm}
109191396Stmm
109291396Stmmstatic void
109391396Stmmhme_start(struct ifnet *ifp)
109491396Stmm{
1095137982Syongari	struct hme_softc *sc = ifp->if_softc;
1096137982Syongari
1097137982Syongari	HME_LOCK(sc);
1098137982Syongari	hme_start_locked(ifp);
1099137982Syongari	HME_UNLOCK(sc);
1100137982Syongari}
1101137982Syongari
1102137982Syongaristatic void
1103137982Syongarihme_start_locked(struct ifnet *ifp)
1104137982Syongari{
110591396Stmm	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
110691396Stmm	struct mbuf *m;
110791396Stmm	int error, enq = 0;
110891396Stmm
1109148887Srwatson	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1110178470Smarius	    IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
111191396Stmm		return;
111291396Stmm
1113151639Syongari	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1114151639Syongari	    sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
1115132986Smlaier		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
111691396Stmm		if (m == NULL)
111791396Stmm			break;
111891396Stmm
1119151639Syongari		error = hme_load_txmbuf(sc, &m);
1120151639Syongari		if (error != 0) {
1121151639Syongari			if (m == NULL)
1122151639Syongari				break;
1123148887Srwatson			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1124132986Smlaier			IFQ_DRV_PREPEND(&ifp->if_snd, m);
112591962Stmm			break;
1126100980Sfenner		}
1127151639Syongari		enq++;
1128151639Syongari		BPF_MTAP(ifp, m);
112991396Stmm	}
113091396Stmm
1131151639Syongari	if (enq > 0) {
1132109649Stmm		bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1133178470Smarius		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1134164932Smarius		sc->sc_wdog_timer = 5;
1135109649Stmm	}
113691396Stmm}
113791396Stmm
113891396Stmm/*
113991396Stmm * Transmit interrupt.
114091396Stmm */
114191396Stmmstatic void
114291396Stmmhme_tint(struct hme_softc *sc)
114391396Stmm{
1144151639Syongari	caddr_t txd;
1145147256Sbrooks	struct ifnet *ifp = sc->sc_ifp;
1146108834Stmm	struct hme_txdesc *htx;
114791396Stmm	unsigned int ri, txflags;
114891396Stmm
1149151639Syongari	txd = sc->sc_rb.rb_txd;
1150108834Stmm	htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1151109649Stmm	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
115291396Stmm	/* Fetch current position in the transmit ring */
115391396Stmm	for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
115491396Stmm		if (sc->sc_rb.rb_td_nbusy <= 0) {
115591396Stmm			CTR0(KTR_HME, "hme_tint: not busy!");
115691396Stmm			break;
115791396Stmm		}
115891396Stmm
1159178470Smarius		txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
116091396Stmm		CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
116191396Stmm
116291396Stmm		if ((txflags & HME_XD_OWN) != 0)
116391396Stmm			break;
116491396Stmm
1165108834Stmm		CTR0(KTR_HME, "hme_tint: not owned");
116691396Stmm		--sc->sc_rb.rb_td_nbusy;
1167148887Srwatson		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
116891396Stmm
116991396Stmm		/* Complete packet transmitted? */
117091396Stmm		if ((txflags & HME_XD_EOP) == 0)
117191396Stmm			continue;
117291396Stmm
1173108834Stmm		KASSERT(htx->htx_lastdesc == ri,
1174178470Smarius		    ("%s: ring indices skewed: %d != %d!",
1175178470Smarius		    __func__, htx->htx_lastdesc, ri));
1176108834Stmm		bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1177108834Stmm		    BUS_DMASYNC_POSTWRITE);
1178108834Stmm		bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1179108834Stmm
118091396Stmm		ifp->if_opackets++;
1181108834Stmm		m_freem(htx->htx_m);
1182108834Stmm		htx->htx_m = NULL;
1183108834Stmm		STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1184108834Stmm		STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1185108834Stmm		htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
118691396Stmm	}
1187164932Smarius	sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
118891396Stmm
118991396Stmm	/* Update ring */
119091396Stmm	sc->sc_rb.rb_tdtail = ri;
119191396Stmm
1192178470Smarius	hme_start_locked(ifp);
119391396Stmm}
119491396Stmm
119591396Stmm/*
1196178470Smarius * RX TCP/UDP checksum
1197133149Syongari */
1198133149Syongaristatic void
1199133149Syongarihme_rxcksum(struct mbuf *m, u_int32_t flags)
1200133149Syongari{
1201133149Syongari	struct ether_header *eh;
1202133149Syongari	struct ip *ip;
1203133149Syongari	struct udphdr *uh;
1204133149Syongari	int32_t hlen, len, pktlen;
1205133149Syongari	u_int16_t cksum, *opts;
1206133149Syongari	u_int32_t temp32;
1207133149Syongari
1208133149Syongari	pktlen = m->m_pkthdr.len;
1209133149Syongari	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1210133149Syongari		return;
1211133149Syongari	eh = mtod(m, struct ether_header *);
1212133149Syongari	if (eh->ether_type != htons(ETHERTYPE_IP))
1213133149Syongari		return;
1214133149Syongari	ip = (struct ip *)(eh + 1);
1215133149Syongari	if (ip->ip_v != IPVERSION)
1216133149Syongari		return;
1217133149Syongari
1218133149Syongari	hlen = ip->ip_hl << 2;
1219133149Syongari	pktlen -= sizeof(struct ether_header);
1220133149Syongari	if (hlen < sizeof(struct ip))
1221133149Syongari		return;
1222133149Syongari	if (ntohs(ip->ip_len) < hlen)
1223133149Syongari		return;
1224133149Syongari	if (ntohs(ip->ip_len) != pktlen)
1225133149Syongari		return;
1226133149Syongari	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1227133149Syongari		return;	/* can't handle fragmented packet */
1228133149Syongari
1229133149Syongari	switch (ip->ip_p) {
1230133149Syongari	case IPPROTO_TCP:
1231133149Syongari		if (pktlen < (hlen + sizeof(struct tcphdr)))
1232133149Syongari			return;
1233133149Syongari		break;
1234133149Syongari	case IPPROTO_UDP:
1235133149Syongari		if (pktlen < (hlen + sizeof(struct udphdr)))
1236133149Syongari			return;
1237133149Syongari		uh = (struct udphdr *)((caddr_t)ip + hlen);
1238133149Syongari		if (uh->uh_sum == 0)
1239133149Syongari			return; /* no checksum */
1240133149Syongari		break;
1241133149Syongari	default:
1242133149Syongari		return;
1243133149Syongari	}
1244133149Syongari
1245156945Syongari	cksum = ~(flags & HME_XD_RXCKSUM);
1246133149Syongari	/* checksum fixup for IP options */
1247133149Syongari	len = hlen - sizeof(struct ip);
1248133149Syongari	if (len > 0) {
1249133149Syongari		opts = (u_int16_t *)(ip + 1);
1250133149Syongari		for (; len > 0; len -= sizeof(u_int16_t), opts++) {
1251133149Syongari			temp32 = cksum - *opts;
1252133149Syongari			temp32 = (temp32 >> 16) + (temp32 & 65535);
1253133149Syongari			cksum = temp32 & 65535;
1254133149Syongari		}
1255133149Syongari	}
1256133149Syongari	m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1257133149Syongari	m->m_pkthdr.csum_data = cksum;
1258133149Syongari}
1259133149Syongari
1260133149Syongari/*
126191396Stmm * Receive interrupt.
126291396Stmm */
126391396Stmmstatic void
126491396Stmmhme_rint(struct hme_softc *sc)
126591396Stmm{
126691396Stmm	caddr_t xdr = sc->sc_rb.rb_rxd;
1267147256Sbrooks	struct ifnet *ifp = sc->sc_ifp;
126891396Stmm	unsigned int ri, len;
1269109649Stmm	int progress = 0;
127091396Stmm	u_int32_t flags;
127191396Stmm
127291396Stmm	/*
127391396Stmm	 * Process all buffers with valid data.
127491396Stmm	 */
1275109649Stmm	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
127691396Stmm	for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1277178470Smarius		flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
127891396Stmm		CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
127991396Stmm		if ((flags & HME_XD_OWN) != 0)
128091396Stmm			break;
128191396Stmm
1282109649Stmm		progress++;
128391396Stmm		if ((flags & HME_XD_OFL) != 0) {
128491396Stmm			device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
128591396Stmm			    "flags=0x%x\n", ri, flags);
128699954Stmm			ifp->if_ierrors++;
1287109649Stmm			hme_discard_rxbuf(sc, ri);
128891396Stmm		} else {
128991396Stmm			len = HME_XD_DECODE_RSIZE(flags);
1290133149Syongari			hme_read(sc, ri, len, flags);
129191396Stmm		}
129291396Stmm	}
1293109649Stmm	if (progress) {
1294109649Stmm		bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1295178470Smarius		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1296109649Stmm	}
129791396Stmm	sc->sc_rb.rb_rdtail = ri;
129891396Stmm}
129991396Stmm
130091396Stmmstatic void
130191396Stmmhme_eint(struct hme_softc *sc, u_int status)
130291396Stmm{
130391396Stmm
130491396Stmm	if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1305164864Smarius		device_printf(sc->sc_dev, "XXXlink status changed: "
1306164864Smarius		    "cfg=%#x, stat=%#x, sm=%#x\n",
1307164864Smarius		    HME_MIF_READ_4(sc, HME_MIFI_CFG),
1308164864Smarius		    HME_MIF_READ_4(sc, HME_MIFI_STAT),
1309164864Smarius		    HME_MIF_READ_4(sc, HME_MIFI_SM));
131091396Stmm		return;
131191396Stmm	}
131291396Stmm
1313151639Syongari	/* check for fatal errors that needs reset to unfreeze DMA engine */
1314151639Syongari	if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
1315151639Syongari		HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1316151639Syongari		hme_init_locked(sc);
1317151639Syongari	}
131891396Stmm}
131991396Stmm
132091396Stmmvoid
132191396Stmmhme_intr(void *v)
132291396Stmm{
132391396Stmm	struct hme_softc *sc = (struct hme_softc *)v;
132491396Stmm	u_int32_t status;
132591396Stmm
1326137982Syongari	HME_LOCK(sc);
132791396Stmm	status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
132891396Stmm	CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
132991396Stmm
133091396Stmm	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
133191396Stmm		hme_eint(sc, status);
133291396Stmm
1333178470Smarius	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1334178470Smarius		hme_rint(sc);
1335178470Smarius
133691396Stmm	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
133791396Stmm		hme_tint(sc);
1338137982Syongari	HME_UNLOCK(sc);
133991396Stmm}
134091396Stmm
1341164932Smariusstatic int
1342164932Smariushme_watchdog(struct hme_softc *sc)
134391396Stmm{
1344178470Smarius	struct ifnet *ifp = sc->sc_ifp;
134591396Stmm
1346164932Smarius	HME_LOCK_ASSERT(sc, MA_OWNED);
1347178470Smarius
1348137982Syongari#ifdef HMEDEBUG
1349178470Smarius	CTR1(KTR_HME, "hme_watchdog: status %x",
1350178470Smarius	    (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
135191396Stmm#endif
1352164932Smarius
1353164932Smarius	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1354164932Smarius		return (0);
1355164932Smarius
1356178470Smarius	if ((sc->sc_flags & HME_LINK) != 0)
1357178470Smarius		device_printf(sc->sc_dev, "device timeout\n");
1358178470Smarius	else if (bootverbose)
1359178470Smarius		device_printf(sc->sc_dev, "device timeout (no link)\n");
1360178470Smarius	++ifp->if_oerrors;
1361148944Sjhb
1362148944Sjhb	hme_init_locked(sc);
1363178470Smarius	hme_start_locked(ifp);
1364164932Smarius	return (EJUSTRETURN);
136591396Stmm}
136691396Stmm
136791396Stmm/*
136891396Stmm * Initialize the MII Management Interface
136991396Stmm */
137091396Stmmstatic void
137191396Stmmhme_mifinit(struct hme_softc *sc)
137291396Stmm{
137391396Stmm	u_int32_t v;
137491396Stmm
1375164864Smarius	/*
1376164864Smarius	 * Configure the MIF in frame mode, polling disabled, internal PHY
1377164864Smarius	 * selected.
1378164864Smarius	 */
1379164864Smarius	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
1380164864Smarius
1381164864Smarius	/*
1382164864Smarius	 * If the currently selected media uses the external transceiver,
1383164864Smarius	 * enable its MII drivers (which basically isolates the internal
1384164864Smarius	 * one and vice versa). In case the current media hasn't been set,
1385164864Smarius	 * yet, we default to the internal transceiver.
1386164864Smarius	 */
1387164864Smarius	v = HME_MAC_READ_4(sc, HME_MACI_XIF);
1388164864Smarius	if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
1389164864Smarius	    sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
1390164864Smarius	    HME_PHYAD_EXTERNAL)
1391164864Smarius		v |= HME_MAC_XIF_MIIENABLE;
1392164864Smarius	else
1393164864Smarius		v &= ~HME_MAC_XIF_MIIENABLE;
1394164864Smarius	HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
139591396Stmm}
139691396Stmm
139791396Stmm/*
139891396Stmm * MII interface
139991396Stmm */
140091396Stmmint
140191396Stmmhme_mii_readreg(device_t dev, int phy, int reg)
140291396Stmm{
1403164864Smarius	struct hme_softc *sc;
140491396Stmm	int n;
140591396Stmm	u_int32_t v;
140691396Stmm
1407164864Smarius	/* We can at most have two PHYs. */
1408164864Smarius	if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1409164864Smarius		return (0);
1410164864Smarius
1411164864Smarius	sc = device_get_softc(dev);
141291396Stmm	/* Select the desired PHY in the MIF configuration register */
141391396Stmm	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
141491396Stmm	if (phy == HME_PHYAD_EXTERNAL)
141591396Stmm		v |= HME_MIF_CFG_PHY;
1416164864Smarius	else
1417164864Smarius		v &= ~HME_MIF_CFG_PHY;
141891396Stmm	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
141991396Stmm
142091396Stmm	/* Construct the frame command */
142191396Stmm	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
142291396Stmm	    HME_MIF_FO_TAMSB |
142391396Stmm	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
142491396Stmm	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
142591396Stmm	    (reg << HME_MIF_FO_REGAD_SHIFT);
142691396Stmm
142791396Stmm	HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1428178470Smarius	HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1429178470Smarius	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
143091396Stmm	for (n = 0; n < 100; n++) {
143191396Stmm		DELAY(1);
143291396Stmm		v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1433164864Smarius		if (v & HME_MIF_FO_TALSB)
143491396Stmm			return (v & HME_MIF_FO_DATA);
143591396Stmm	}
143691396Stmm
143791396Stmm	device_printf(sc->sc_dev, "mii_read timeout\n");
143891396Stmm	return (0);
143991396Stmm}
144091396Stmm
144191396Stmmint
144291396Stmmhme_mii_writereg(device_t dev, int phy, int reg, int val)
144391396Stmm{
1444164864Smarius	struct hme_softc *sc;
144591396Stmm	int n;
144691396Stmm	u_int32_t v;
144791396Stmm
1448164864Smarius	/* We can at most have two PHYs. */
1449164864Smarius	if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1450164864Smarius		return (0);
1451164864Smarius
1452164864Smarius	sc = device_get_softc(dev);
145391396Stmm	/* Select the desired PHY in the MIF configuration register */
145491396Stmm	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
145591396Stmm	if (phy == HME_PHYAD_EXTERNAL)
145691396Stmm		v |= HME_MIF_CFG_PHY;
1457164864Smarius	else
1458164864Smarius		v &= ~HME_MIF_CFG_PHY;
145991396Stmm	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
146091396Stmm
146191396Stmm	/* Construct the frame command */
146291396Stmm	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
146391396Stmm	    HME_MIF_FO_TAMSB				|
146491396Stmm	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
146591396Stmm	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
146691396Stmm	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
146791396Stmm	    (val & HME_MIF_FO_DATA);
146891396Stmm
146991396Stmm	HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1470178470Smarius	HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1471178470Smarius	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
147291396Stmm	for (n = 0; n < 100; n++) {
147391396Stmm		DELAY(1);
147491396Stmm		v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1475148944Sjhb		if (v & HME_MIF_FO_TALSB)
147691396Stmm			return (1);
147791396Stmm	}
147891396Stmm
147991396Stmm	device_printf(sc->sc_dev, "mii_write timeout\n");
148091396Stmm	return (0);
148191396Stmm}
148291396Stmm
148391396Stmmvoid
148491396Stmmhme_mii_statchg(device_t dev)
148591396Stmm{
1486164864Smarius	struct hme_softc *sc;
1487178470Smarius	uint32_t rxcfg, txcfg;
148891396Stmm
1489164864Smarius	sc = device_get_softc(dev);
1490164864Smarius
149191396Stmm#ifdef HMEDEBUG
1492178470Smarius	if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1493164864Smarius		device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
149491396Stmm#endif
149591396Stmm
1496178470Smarius	if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1497178470Smarius	    IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1498178470Smarius		sc->sc_flags |= HME_LINK;
1499178470Smarius	else
1500178470Smarius		sc->sc_flags &= ~HME_LINK;
1501178470Smarius
1502178470Smarius	txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1503178470Smarius	if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
1504178470Smarius	    HME_MAC_TXCFG_ENABLE, 0))
1505178470Smarius		device_printf(sc->sc_dev, "cannot disable TX MAC\n");
1506178470Smarius	rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1507178470Smarius	if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
1508178470Smarius	    HME_MAC_RXCFG_ENABLE, 0))
1509178470Smarius		device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1510178470Smarius
1511178470Smarius	/* Set the MAC Full Duplex bit appropriately. */
151291396Stmm	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1513178470Smarius		txcfg |= HME_MAC_TXCFG_FULLDPLX;
151491396Stmm	else
1515178470Smarius		txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
1516178470Smarius	HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
1517178470Smarius
1518178470Smarius	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1519178470Smarius	    (sc->sc_flags & HME_LINK) != 0) {
1520178470Smarius		if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
1521178470Smarius		    HME_MAC_TXCFG_ENABLE))
1522178470Smarius			device_printf(sc->sc_dev, "cannot enable TX MAC\n");
1523178470Smarius		if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
1524178470Smarius		    HME_MAC_RXCFG_ENABLE))
1525178470Smarius			device_printf(sc->sc_dev, "cannot enable RX MAC\n");
1526178470Smarius	}
152791396Stmm}
152891396Stmm
152991396Stmmstatic int
153091396Stmmhme_mediachange(struct ifnet *ifp)
153191396Stmm{
153291396Stmm	struct hme_softc *sc = ifp->if_softc;
1533148944Sjhb	int error;
153491396Stmm
1535148944Sjhb	HME_LOCK(sc);
1536164864Smarius	error = hme_mediachange_locked(sc);
1537148944Sjhb	HME_UNLOCK(sc);
1538148944Sjhb	return (error);
153991396Stmm}
154091396Stmm
1541164864Smariusstatic int
1542164864Smariushme_mediachange_locked(struct hme_softc *sc)
1543164864Smarius{
1544164864Smarius	struct mii_softc *child;
1545164864Smarius
1546164864Smarius	HME_LOCK_ASSERT(sc, MA_OWNED);
1547178470Smarius
1548164864Smarius#ifdef HMEDEBUG
1549178470Smarius	if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1550164864Smarius		device_printf(sc->sc_dev, "hme_mediachange_locked");
1551164864Smarius#endif
1552164864Smarius
1553164864Smarius	hme_mifinit(sc);
1554164864Smarius
1555164864Smarius	/*
1556164864Smarius	 * If both PHYs are present reset them. This is required for
1557164864Smarius	 * unisolating the previously isolated PHY when switching PHYs.
1558164864Smarius	 * As the above hme_mifinit() call will set the MII drivers in
1559164864Smarius	 * the XIF configuration register accoring to the currently
1560164864Smarius	 * selected media, there should be no window during which the
1561164864Smarius	 * data paths of both transceivers are open at the same time,
1562164864Smarius	 * even if the PHY device drivers use MIIF_NOISOLATE.
1563164864Smarius	 */
1564164864Smarius	if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
1565164864Smarius		LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
1566164864Smarius			mii_phy_reset(child);
1567164864Smarius	return (mii_mediachg(sc->sc_mii));
1568164864Smarius}
1569164864Smarius
157091396Stmmstatic void
157191396Stmmhme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
157291396Stmm{
157391396Stmm	struct hme_softc *sc = ifp->if_softc;
157491396Stmm
1575137982Syongari	HME_LOCK(sc);
1576137982Syongari	if ((ifp->if_flags & IFF_UP) == 0) {
1577137982Syongari		HME_UNLOCK(sc);
157891396Stmm		return;
1579137982Syongari	}
158091396Stmm
158191396Stmm	mii_pollstat(sc->sc_mii);
158291396Stmm	ifmr->ifm_active = sc->sc_mii->mii_media_active;
158391396Stmm	ifmr->ifm_status = sc->sc_mii->mii_media_status;
1584137982Syongari	HME_UNLOCK(sc);
158591396Stmm}
158691396Stmm
158791396Stmm/*
158891396Stmm * Process an ioctl request.
158991396Stmm */
159091396Stmmstatic int
159191396Stmmhme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
159291396Stmm{
159391396Stmm	struct hme_softc *sc = ifp->if_softc;
159491396Stmm	struct ifreq *ifr = (struct ifreq *)data;
1595148944Sjhb	int error = 0;
159691396Stmm
159791396Stmm	switch (cmd) {
159891396Stmm	case SIOCSIFFLAGS:
1599148944Sjhb		HME_LOCK(sc);
1600178470Smarius		if ((ifp->if_flags & IFF_UP) != 0) {
1601178470Smarius			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1602178470Smarius			    ((ifp->if_flags ^ sc->sc_ifflags) &
1603178470Smarius			    (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1604178470Smarius				hme_setladrf(sc, 1);
1605178470Smarius			else
1606178470Smarius				hme_init_locked(sc);
1607178470Smarius		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
160891396Stmm			hme_stop(sc);
1609133149Syongari		if ((ifp->if_flags & IFF_LINK0) != 0)
1610133149Syongari			sc->sc_csum_features |= CSUM_UDP;
1611133149Syongari		else
1612133149Syongari			sc->sc_csum_features &= ~CSUM_UDP;
1613133149Syongari		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1614133149Syongari			ifp->if_hwassist = sc->sc_csum_features;
1615178470Smarius		sc->sc_ifflags = ifp->if_flags;
1616148944Sjhb		HME_UNLOCK(sc);
161791396Stmm		break;
161891396Stmm
161991396Stmm	case SIOCADDMULTI:
162091396Stmm	case SIOCDELMULTI:
1621148944Sjhb		HME_LOCK(sc);
162291396Stmm		hme_setladrf(sc, 1);
1623148944Sjhb		HME_UNLOCK(sc);
162491396Stmm		error = 0;
162591396Stmm		break;
162691396Stmm	case SIOCGIFMEDIA:
162791396Stmm	case SIOCSIFMEDIA:
162891396Stmm		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
162991396Stmm		break;
1630133149Syongari	case SIOCSIFCAP:
1631148944Sjhb		HME_LOCK(sc);
1632133149Syongari		ifp->if_capenable = ifr->ifr_reqcap;
1633133149Syongari		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1634133149Syongari			ifp->if_hwassist = sc->sc_csum_features;
1635133149Syongari		else
1636133149Syongari			ifp->if_hwassist = 0;
1637148944Sjhb		HME_UNLOCK(sc);
1638133149Syongari		break;
163991396Stmm	default:
1640106937Ssam		error = ether_ioctl(ifp, cmd, data);
164191396Stmm		break;
164291396Stmm	}
164391396Stmm
164491396Stmm	return (error);
164591396Stmm}
164691396Stmm
164791396Stmm/*
164891396Stmm * Set up the logical address filter.
164991396Stmm */
165091396Stmmstatic void
165191396Stmmhme_setladrf(struct hme_softc *sc, int reenable)
165291396Stmm{
1653147256Sbrooks	struct ifnet *ifp = sc->sc_ifp;
165491396Stmm	struct ifmultiaddr *inm;
165591396Stmm	u_int32_t crc;
165691396Stmm	u_int32_t hash[4];
165791396Stmm	u_int32_t macc;
165891396Stmm
1659137982Syongari	HME_LOCK_ASSERT(sc, MA_OWNED);
1660178470Smarius	/* Clear the hash table. */
166191396Stmm	hash[3] = hash[2] = hash[1] = hash[0] = 0;
166291396Stmm
1663178470Smarius	/* Get the current RX configuration. */
166491396Stmm	macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
166591396Stmm
166691396Stmm	/*
1667178470Smarius	 * Turn off promiscuous mode, promiscuous group mode (all multicast),
1668178470Smarius	 * and hash filter.  Depending on the case, the right bit will be
1669178470Smarius	 * enabled.
1670178470Smarius	 */
1671178470Smarius	macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
1672178470Smarius
1673178470Smarius	/*
167491396Stmm	 * Disable the receiver while changing it's state as the documentation
167591396Stmm	 * mandates.
167691396Stmm	 * We then must wait until the bit clears in the register. This should
167791396Stmm	 * take at most 3.5ms.
167891396Stmm	 */
1679178470Smarius	if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1680178470Smarius	    HME_MAC_RXCFG_ENABLE, 0))
1681178470Smarius		device_printf(sc->sc_dev, "cannot disable RX MAC\n");
168291396Stmm	/* Disable the hash filter before writing to the filter registers. */
168391396Stmm	if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
168491396Stmm	    HME_MAC_RXCFG_HENABLE, 0))
1685178470Smarius		device_printf(sc->sc_dev, "cannot disable hash filter\n");
168691396Stmm
1687178470Smarius	/* Make the RX MAC really SIMPLEX. */
1688146513Syongari	macc |= HME_MAC_RXCFG_ME;
168991396Stmm	if (reenable)
169091396Stmm		macc |= HME_MAC_RXCFG_ENABLE;
169191396Stmm	else
169291396Stmm		macc &= ~HME_MAC_RXCFG_ENABLE;
169391396Stmm
169491396Stmm	if ((ifp->if_flags & IFF_PROMISC) != 0) {
169591396Stmm		macc |= HME_MAC_RXCFG_PMISC;
169691396Stmm		goto chipit;
169791396Stmm	}
1698178470Smarius	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1699178470Smarius		macc |= HME_MAC_RXCFG_PGRP;
1700178470Smarius		goto chipit;
1701178470Smarius	}
170291396Stmm
170391396Stmm	macc |= HME_MAC_RXCFG_HENABLE;
170491396Stmm
170591396Stmm	/*
170691396Stmm	 * Set up multicast address filter by passing all multicast addresses
170791396Stmm	 * through a crc generator, and then using the high order 6 bits as an
170891396Stmm	 * index into the 64 bit logical address filter.  The high order bit
170991396Stmm	 * selects the word, while the rest of the bits select the bit within
171091396Stmm	 * the word.
171191396Stmm	 */
171291396Stmm
1713178470Smarius	IF_ADDR_LOCK(ifp);
1714178470Smarius	TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
171591396Stmm		if (inm->ifma_addr->sa_family != AF_LINK)
171691396Stmm			continue;
1717130290Smarius		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1718130290Smarius		    inm->ifma_addr), ETHER_ADDR_LEN);
171991396Stmm
172091396Stmm		/* Just want the 6 most significant bits. */
172191396Stmm		crc >>= 26;
172291396Stmm
172391396Stmm		/* Set the corresponding bit in the filter. */
172491396Stmm		hash[crc >> 4] |= 1 << (crc & 0xf);
172591396Stmm	}
1726178470Smarius	IF_ADDR_UNLOCK(ifp);
172791396Stmm
172891396Stmmchipit:
172991396Stmm	/* Now load the hash table into the chip */
173091396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
173191396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
173291396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
173391396Stmm	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1734178470Smarius	if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1735146513Syongari	    macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
1736178470Smarius	    HME_MAC_RXCFG_ME)))
1737178470Smarius		device_printf(sc->sc_dev, "cannot configure RX MAC\n");
173891396Stmm}
1739