191396Stmm/*- 291396Stmm * Copyright (c) 1999 The NetBSD Foundation, Inc. 3108834Stmm * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>. 491396Stmm * All rights reserved. 591396Stmm * 691396Stmm * This code is derived from software contributed to The NetBSD Foundation 791396Stmm * by Paul Kranenburg. 891396Stmm * 991396Stmm * Redistribution and use in source and binary forms, with or without 1091396Stmm * modification, are permitted provided that the following conditions 1191396Stmm * are met: 1291396Stmm * 1. Redistributions of source code must retain the above copyright 1391396Stmm * notice, this list of conditions and the following disclaimer. 1491396Stmm * 2. Redistributions in binary form must reproduce the above copyright 1591396Stmm * notice, this list of conditions and the following disclaimer in the 1691396Stmm * documentation and/or other materials provided with the distribution. 1791396Stmm * 3. All advertising materials mentioning features or use of this software 1891396Stmm * must display the following acknowledgement: 1991396Stmm * This product includes software developed by the NetBSD 2091396Stmm * Foundation, Inc. and its contributors. 2191396Stmm * 4. Neither the name of The NetBSD Foundation nor the names of its 2291396Stmm * contributors may be used to endorse or promote products derived 2391396Stmm * from this software without specific prior written permission. 2491396Stmm * 2591396Stmm * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 2691396Stmm * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 2791396Stmm * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2891396Stmm * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 2991396Stmm * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 3091396Stmm * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 3191396Stmm * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 3291396Stmm * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 3391396Stmm * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3491396Stmm * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3591396Stmm * POSSIBILITY OF SUCH DAMAGE. 3691396Stmm * 37178470Smarius * from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp 3891396Stmm */ 3991396Stmm 40119418Sobrien#include <sys/cdefs.h> 41119418Sobrien__FBSDID("$FreeBSD$"); 42119418Sobrien 4391396Stmm/* 4491396Stmm * HME Ethernet module driver. 4591396Stmm * 4691396Stmm * The HME is e.g. part of the PCIO PCI multi function device. 4791396Stmm * It supports TX gathering and TX and RX checksum offloading. 4891396Stmm * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 4991396Stmm * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes 5091396Stmm * are skipped to make sure the header after the ethernet header is aligned on a 5191396Stmm * natural boundary, so this ensures minimal wastage in the most common case. 5291396Stmm * 5391396Stmm * Also, apparently, the buffers must extend to a DMA burst boundary beyond the 5491396Stmm * maximum packet size (this is not verified). Buffers starting on odd 5591396Stmm * boundaries must be mapped so that the burst can start on a natural boundary. 5691396Stmm * 57133149Syongari * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading. 58133149Syongari * In reality, we can do the same technique for UDP datagram too. However, 59133149Syongari * the hardware doesn't compensate the checksum for UDP datagram which can yield 60133149Syongari * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It 61133149Syongari * can be reactivated by setting special link option link0 with ifconfig(8). 6291396Stmm */ 63133149Syongari#define HME_CSUM_FEATURES (CSUM_TCP) 64178470Smarius#if 0 6591396Stmm#define HMEDEBUG 66178470Smarius#endif 67210334Sattilio#define KTR_HME KTR_SPARE2 /* XXX */ 6891396Stmm 6991396Stmm#include <sys/param.h> 7091396Stmm#include <sys/systm.h> 7191396Stmm#include <sys/bus.h> 7295533Smike#include <sys/endian.h> 7391396Stmm#include <sys/kernel.h> 74130026Sphk#include <sys/module.h> 7591396Stmm#include <sys/ktr.h> 7691396Stmm#include <sys/mbuf.h> 7791396Stmm#include <sys/malloc.h> 7891396Stmm#include <sys/socket.h> 7991396Stmm#include <sys/sockio.h> 8091396Stmm 81100980Sfenner#include <net/bpf.h> 8291396Stmm#include <net/ethernet.h> 8391396Stmm#include <net/if.h> 8491396Stmm#include <net/if_arp.h> 8591396Stmm#include <net/if_dl.h> 8691396Stmm#include <net/if_media.h> 87147256Sbrooks#include <net/if_types.h> 88129006Sjoerg#include <net/if_vlan_var.h> 8991396Stmm 90133149Syongari#include <netinet/in.h> 91133149Syongari#include <netinet/in_systm.h> 92133149Syongari#include <netinet/ip.h> 93133149Syongari#include <netinet/tcp.h> 94133149Syongari#include <netinet/udp.h> 95133149Syongari 9691396Stmm#include <dev/mii/mii.h> 9791396Stmm#include <dev/mii/miivar.h> 9891396Stmm 9991396Stmm#include <machine/bus.h> 10091396Stmm 101119351Smarcel#include <dev/hme/if_hmereg.h> 102119351Smarcel#include <dev/hme/if_hmevar.h> 10391396Stmm 104178470SmariusCTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256); 105178470SmariusCTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256); 106178470Smarius 10791396Stmmstatic void hme_start(struct ifnet *); 108137982Syongaristatic void hme_start_locked(struct ifnet *); 10991396Stmmstatic void hme_stop(struct hme_softc *); 11091396Stmmstatic int hme_ioctl(struct ifnet *, u_long, caddr_t); 11191396Stmmstatic void hme_tick(void *); 112164932Smariusstatic int hme_watchdog(struct hme_softc *); 11391396Stmmstatic void hme_init(void *); 114147256Sbrooksstatic void hme_init_locked(struct hme_softc *); 11591396Stmmstatic int hme_add_rxbuf(struct hme_softc *, unsigned int, int); 11691396Stmmstatic int hme_meminit(struct hme_softc *); 11791396Stmmstatic int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t, 11891396Stmm u_int32_t, u_int32_t); 11991396Stmmstatic void hme_mifinit(struct hme_softc *); 12091396Stmmstatic void hme_setladrf(struct hme_softc *, int); 12191396Stmm 12291396Stmmstatic int hme_mediachange(struct ifnet *); 123164864Smariusstatic int hme_mediachange_locked(struct hme_softc *); 12491396Stmmstatic void hme_mediastatus(struct ifnet *, struct ifmediareq *); 12591396Stmm 126151639Syongaristatic int hme_load_txmbuf(struct hme_softc *, struct mbuf **); 127133149Syongaristatic void hme_read(struct hme_softc *, int, int, u_int32_t); 12891396Stmmstatic void hme_eint(struct hme_softc *, u_int); 12991396Stmmstatic void hme_rint(struct hme_softc *); 13091396Stmmstatic void hme_tint(struct hme_softc *); 131133149Syongaristatic void hme_rxcksum(struct mbuf *, u_int32_t); 13291396Stmm 13391396Stmmstatic void hme_cdma_callback(void *, bus_dma_segment_t *, int, int); 13491396Stmm 13591396Stmmdevclass_t hme_devclass; 13691396Stmm 13791396Stmmstatic int hme_nerr; 13891396Stmm 13991396StmmDRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); 140108960SobrienMODULE_DEPEND(hme, miibus, 1, 1, 1); 14191396Stmm 14291396Stmm#define HME_SPC_READ_4(spc, sc, offs) \ 14391396Stmm bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 144133599Smarius (offs)) 14591396Stmm#define HME_SPC_WRITE_4(spc, sc, offs, v) \ 14691396Stmm bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 147133599Smarius (offs), (v)) 148178470Smarius#define HME_SPC_BARRIER(spc, sc, offs, l, f) \ 149178470Smarius bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 150178470Smarius (offs), (l), (f)) 15191396Stmm 15291396Stmm#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs)) 15391396Stmm#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v)) 154178470Smarius#define HME_SEB_BARRIER(sc, offs, l, f) \ 155178470Smarius HME_SPC_BARRIER(seb, (sc), (offs), (l), (f)) 15691396Stmm#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs)) 15791396Stmm#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v)) 158178470Smarius#define HME_ERX_BARRIER(sc, offs, l, f) \ 159178470Smarius HME_SPC_BARRIER(erx, (sc), (offs), (l), (f)) 16091396Stmm#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs)) 16191396Stmm#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v)) 162178470Smarius#define HME_ETX_BARRIER(sc, offs, l, f) \ 163178470Smarius HME_SPC_BARRIER(etx, (sc), (offs), (l), (f)) 16491396Stmm#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs)) 16591396Stmm#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v)) 166178470Smarius#define HME_MAC_BARRIER(sc, offs, l, f) \ 167178470Smarius HME_SPC_BARRIER(mac, (sc), (offs), (l), (f)) 16891396Stmm#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs)) 16991396Stmm#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v)) 170178470Smarius#define HME_MIF_BARRIER(sc, offs, l, f) \ 171178470Smarius HME_SPC_BARRIER(mif, (sc), (offs), (l), (f)) 17291396Stmm 17391396Stmm#define HME_MAXERR 5 17491396Stmm#define HME_WHINE(dev, ...) do { \ 17591396Stmm if (hme_nerr++ < HME_MAXERR) \ 17691396Stmm device_printf(dev, __VA_ARGS__); \ 17791396Stmm if (hme_nerr == HME_MAXERR) { \ 178158973Ssimon device_printf(dev, "too many errors; not reporting " \ 179158973Ssimon "any more\n"); \ 18091396Stmm } \ 18191396Stmm} while(0) 18291396Stmm 183129006Sjoerg/* Support oversized VLAN frames. */ 184129006Sjoerg#define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) 185129006Sjoerg 18691396Stmmint 18791396Stmmhme_config(struct hme_softc *sc) 18891396Stmm{ 189147256Sbrooks struct ifnet *ifp; 19091396Stmm struct mii_softc *child; 19191396Stmm bus_size_t size; 19291396Stmm int error, rdesc, tdesc, i; 19391396Stmm 194147256Sbrooks ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 195147256Sbrooks if (ifp == NULL) 196147256Sbrooks return (ENOSPC); 197147256Sbrooks 19891396Stmm /* 19991396Stmm * HME common initialization. 20091396Stmm * 20191396Stmm * hme_softc fields that must be initialized by the front-end: 20291396Stmm * 203129570Smarius * the DMA bus tag: 20491396Stmm * sc_dmatag 20591396Stmm * 206220940Smarius * the bus handles, tags and offsets (splitted for SBus compatibility): 20791396Stmm * sc_seb{t,h,o} (Shared Ethernet Block registers) 20891396Stmm * sc_erx{t,h,o} (Receiver Unit registers) 20991396Stmm * sc_etx{t,h,o} (Transmitter Unit registers) 21091396Stmm * sc_mac{t,h,o} (MAC registers) 211129570Smarius * sc_mif{t,h,o} (Management Interface registers) 21291396Stmm * 21391396Stmm * the maximum bus burst size: 21491396Stmm * sc_burst 21591396Stmm * 21691396Stmm */ 21791396Stmm 218149438Sjhb callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0); 219149438Sjhb 22091396Stmm /* Make sure the chip is stopped. */ 221137982Syongari HME_LOCK(sc); 22291396Stmm hme_stop(sc); 223137982Syongari HME_UNLOCK(sc); 22491396Stmm 225178470Smarius error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 226178470Smarius BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 227178470Smarius BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 228178470Smarius NULL, NULL, &sc->sc_pdmatag); 229178470Smarius if (error) 230178470Smarius goto fail_ifnet; 231178470Smarius 23291396Stmm /* 233178470Smarius * Create control, RX and TX mbuf DMA tags. 23491396Stmm * Buffer descriptors must be aligned on a 2048 byte boundary; 23591396Stmm * take this into account when calculating the size. Note that 23691396Stmm * the maximum number of descriptors (256) occupies 2048 bytes, 23791396Stmm * so we allocate that much regardless of HME_N*DESC. 23891396Stmm */ 239178470Smarius size = 4096; 24091396Stmm error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0, 24191396Stmm BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 242178470Smarius 1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag); 24391396Stmm if (error) 24491396Stmm goto fail_ptag; 24591396Stmm 24691396Stmm error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 24791396Stmm BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 248178470Smarius 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 24991396Stmm if (error) 25091396Stmm goto fail_ctag; 25191396Stmm 25291396Stmm error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 253178470Smarius BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 254178470Smarius MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW, 255117126Sscottl NULL, NULL, &sc->sc_tdmatag); 25691396Stmm if (error) 25791396Stmm goto fail_rtag; 25891396Stmm 259178470Smarius /* Allocate the control DMA buffer. */ 26091396Stmm error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase, 261178470Smarius BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap); 26291396Stmm if (error != 0) { 26391396Stmm device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error); 26491396Stmm goto fail_ttag; 26591396Stmm } 26691396Stmm 267178470Smarius /* Load the control DMA buffer. */ 26891396Stmm sc->sc_rb.rb_dmabase = 0; 26991396Stmm if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap, 270178470Smarius sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 || 27191396Stmm sc->sc_rb.rb_dmabase == 0) { 27291396Stmm device_printf(sc->sc_dev, "DMA buffer map load error %d\n", 27391396Stmm error); 27491396Stmm goto fail_free; 27591396Stmm } 27691396Stmm CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase, 27791396Stmm sc->sc_rb.rb_dmabase); 27891396Stmm 27991396Stmm /* 28091396Stmm * Prepare the RX descriptors. rdesc serves as marker for the last 28191396Stmm * processed descriptor and may be used later on. 28291396Stmm */ 28391396Stmm for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) { 28499954Stmm sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL; 28591396Stmm error = bus_dmamap_create(sc->sc_rdmatag, 0, 28691396Stmm &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap); 28791396Stmm if (error != 0) 28891396Stmm goto fail_rxdesc; 28991396Stmm } 29091396Stmm error = bus_dmamap_create(sc->sc_rdmatag, 0, 29191396Stmm &sc->sc_rb.rb_spare_dmamap); 29291396Stmm if (error != 0) 29391396Stmm goto fail_rxdesc; 29491396Stmm /* Same for the TX descs. */ 295108834Stmm for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) { 29699954Stmm sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL; 29791396Stmm error = bus_dmamap_create(sc->sc_tdmatag, 0, 29891396Stmm &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); 29991396Stmm if (error != 0) 30091396Stmm goto fail_txdesc; 30191396Stmm } 30291396Stmm 303133149Syongari sc->sc_csum_features = HME_CSUM_FEATURES; 30491396Stmm /* Initialize ifnet structure. */ 30591396Stmm ifp->if_softc = sc; 306121816Sbrooks if_initname(ifp, device_get_name(sc->sc_dev), 307121816Sbrooks device_get_unit(sc->sc_dev)); 308137982Syongari ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 30991396Stmm ifp->if_start = hme_start; 31091396Stmm ifp->if_ioctl = hme_ioctl; 31191396Stmm ifp->if_init = hme_init; 312132986Smlaier IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ); 313132986Smlaier ifp->if_snd.ifq_drv_maxlen = HME_NTXQ; 314132986Smlaier IFQ_SET_READY(&ifp->if_snd); 31591396Stmm 31691396Stmm hme_mifinit(sc); 31791396Stmm 318213893Smarius /* 319213893Smarius * DP83840A used with HME chips don't advertise their media 320213893Smarius * capabilities themselves properly so force writing the ANAR 321213893Smarius * according to the BMSR in mii_phy_setmedia(). 322213893Smarius */ 323213893Smarius error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange, 324213893Smarius hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_EXTERNAL, 325213893Smarius MII_OFFSET_ANY, MIIF_FORCEANEG); 326213893Smarius i = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange, 327213893Smarius hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_INTERNAL, 328213893Smarius MII_OFFSET_ANY, MIIF_FORCEANEG); 329213893Smarius if (error != 0 && i != 0) { 330213893Smarius error = ENXIO; 331213893Smarius device_printf(sc->sc_dev, "attaching PHYs failed\n"); 33291396Stmm goto fail_rxdesc; 33391396Stmm } 33491396Stmm sc->sc_mii = device_get_softc(sc->sc_miibus); 33591396Stmm 33691396Stmm /* 33791396Stmm * Walk along the list of attached MII devices and 338164864Smarius * establish an `MII instance' to `PHY number' 339164864Smarius * mapping. We'll use this mapping to enable the MII 340164864Smarius * drivers of the external transceiver according to 341164864Smarius * the currently selected media. 34291396Stmm */ 343164864Smarius sc->sc_phys[0] = sc->sc_phys[1] = -1; 344164864Smarius LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) { 34591396Stmm /* 34691396Stmm * Note: we support just two PHYs: the built-in 34791396Stmm * internal device and an external on the MII 34891396Stmm * connector. 34991396Stmm */ 350164864Smarius if ((child->mii_phy != HME_PHYAD_EXTERNAL && 351164864Smarius child->mii_phy != HME_PHYAD_INTERNAL) || 352164864Smarius child->mii_inst > 1) { 353129570Smarius device_printf(sc->sc_dev, "cannot accommodate " 35491396Stmm "MII device %s at phy %d, instance %d\n", 35591396Stmm device_get_name(child->mii_dev), 35691396Stmm child->mii_phy, child->mii_inst); 35791396Stmm continue; 35891396Stmm } 35991396Stmm 36091396Stmm sc->sc_phys[child->mii_inst] = child->mii_phy; 36191396Stmm } 36291396Stmm 36391396Stmm /* Attach the interface. */ 364147256Sbrooks ether_ifattach(ifp, sc->sc_enaddr); 36591396Stmm 366129006Sjoerg /* 367133149Syongari * Tell the upper layer(s) we support long frames/checksum offloads. 368129006Sjoerg */ 369129006Sjoerg ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 370133149Syongari ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 371133149Syongari ifp->if_hwassist |= sc->sc_csum_features; 372133149Syongari ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 37391396Stmm return (0); 37491396Stmm 37591396Stmmfail_txdesc: 37691396Stmm for (i = 0; i < tdesc; i++) { 37791396Stmm bus_dmamap_destroy(sc->sc_tdmatag, 37891396Stmm sc->sc_rb.rb_txdesc[i].htx_dmamap); 37991396Stmm } 38091396Stmm bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 38191396Stmmfail_rxdesc: 38291396Stmm for (i = 0; i < rdesc; i++) { 38391396Stmm bus_dmamap_destroy(sc->sc_rdmatag, 38491396Stmm sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 38591396Stmm } 38691396Stmm bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 38791396Stmmfail_free: 38891396Stmm bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 38991396Stmmfail_ttag: 39091396Stmm bus_dma_tag_destroy(sc->sc_tdmatag); 39191396Stmmfail_rtag: 39291396Stmm bus_dma_tag_destroy(sc->sc_rdmatag); 39391396Stmmfail_ctag: 39491396Stmm bus_dma_tag_destroy(sc->sc_cdmatag); 39591396Stmmfail_ptag: 39691396Stmm bus_dma_tag_destroy(sc->sc_pdmatag); 397147256Sbrooksfail_ifnet: 398147256Sbrooks if_free(ifp); 39991396Stmm return (error); 40091396Stmm} 40191396Stmm 402108976Stmmvoid 403108976Stmmhme_detach(struct hme_softc *sc) 404108976Stmm{ 405147256Sbrooks struct ifnet *ifp = sc->sc_ifp; 406108976Stmm int i; 407108976Stmm 408137982Syongari HME_LOCK(sc); 409108976Stmm hme_stop(sc); 410137982Syongari HME_UNLOCK(sc); 411149203Sjhb callout_drain(&sc->sc_tick_ch); 412149877Skensmith ether_ifdetach(ifp); 413149877Skensmith if_free(ifp); 414108976Stmm device_delete_child(sc->sc_dev, sc->sc_miibus); 415108976Stmm 416108976Stmm for (i = 0; i < HME_NTXQ; i++) { 417108976Stmm bus_dmamap_destroy(sc->sc_tdmatag, 418108976Stmm sc->sc_rb.rb_txdesc[i].htx_dmamap); 419108976Stmm } 420108976Stmm bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 421108976Stmm for (i = 0; i < HME_NRXDESC; i++) { 422108976Stmm bus_dmamap_destroy(sc->sc_rdmatag, 423108976Stmm sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 424108976Stmm } 425178470Smarius bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 426178470Smarius BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 427108976Stmm bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 428108976Stmm bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 429108976Stmm bus_dma_tag_destroy(sc->sc_tdmatag); 430108976Stmm bus_dma_tag_destroy(sc->sc_rdmatag); 431108976Stmm bus_dma_tag_destroy(sc->sc_cdmatag); 432108976Stmm bus_dma_tag_destroy(sc->sc_pdmatag); 433108976Stmm} 434108976Stmm 435108976Stmmvoid 436108976Stmmhme_suspend(struct hme_softc *sc) 437108976Stmm{ 438108976Stmm 439137982Syongari HME_LOCK(sc); 440108976Stmm hme_stop(sc); 441137982Syongari HME_UNLOCK(sc); 442108976Stmm} 443108976Stmm 444108976Stmmvoid 445108976Stmmhme_resume(struct hme_softc *sc) 446108976Stmm{ 447147256Sbrooks struct ifnet *ifp = sc->sc_ifp; 448108976Stmm 449137982Syongari HME_LOCK(sc); 450108976Stmm if ((ifp->if_flags & IFF_UP) != 0) 451147256Sbrooks hme_init_locked(sc); 452137982Syongari HME_UNLOCK(sc); 453108976Stmm} 454108976Stmm 45591396Stmmstatic void 45691396Stmmhme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 45791396Stmm{ 45891396Stmm struct hme_softc *sc = (struct hme_softc *)xsc; 45991396Stmm 46091396Stmm if (error != 0) 46191396Stmm return; 462178470Smarius KASSERT(nsegs == 1, 463178470Smarius ("%s: too many DMA segments (%d)", __func__, nsegs)); 46491396Stmm sc->sc_rb.rb_dmabase = segs[0].ds_addr; 46591396Stmm} 46691396Stmm 46791396Stmmstatic void 46891396Stmmhme_tick(void *arg) 46991396Stmm{ 47091396Stmm struct hme_softc *sc = arg; 471151639Syongari struct ifnet *ifp; 47291396Stmm 473148944Sjhb HME_LOCK_ASSERT(sc, MA_OWNED); 474151639Syongari 475151639Syongari ifp = sc->sc_ifp; 476151639Syongari /* 477151639Syongari * Unload collision counters 478151639Syongari */ 479151639Syongari ifp->if_collisions += 480151639Syongari HME_MAC_READ_4(sc, HME_MACI_NCCNT) + 481151639Syongari HME_MAC_READ_4(sc, HME_MACI_FCCNT) + 482151639Syongari HME_MAC_READ_4(sc, HME_MACI_EXCNT) + 483151639Syongari HME_MAC_READ_4(sc, HME_MACI_LTCNT); 484151639Syongari 485151639Syongari /* 486151639Syongari * then clear the hardware counters. 487151639Syongari */ 488151639Syongari HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 489151639Syongari HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 490151639Syongari HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 491151639Syongari HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 492151639Syongari 49391396Stmm mii_tick(sc->sc_mii); 49491396Stmm 495164932Smarius if (hme_watchdog(sc) == EJUSTRETURN) 496164932Smarius return; 497164932Smarius 49891396Stmm callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 49991396Stmm} 50091396Stmm 50191396Stmmstatic void 50291396Stmmhme_stop(struct hme_softc *sc) 50391396Stmm{ 50491396Stmm u_int32_t v; 50591396Stmm int n; 50691396Stmm 50791396Stmm callout_stop(&sc->sc_tick_ch); 508164932Smarius sc->sc_wdog_timer = 0; 509148944Sjhb sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 510178470Smarius sc->sc_flags &= ~HME_LINK; 51191396Stmm 512164864Smarius /* Mask all interrupts */ 513164864Smarius HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff); 514164864Smarius 51591396Stmm /* Reset transmitter and receiver */ 51691396Stmm HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX | 51791396Stmm HME_SEB_RESET_ERX); 518178470Smarius HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4, 519178470Smarius BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 52091396Stmm for (n = 0; n < 20; n++) { 52191396Stmm v = HME_SEB_READ_4(sc, HME_SEBI_RESET); 52291396Stmm if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 52391396Stmm return; 52491396Stmm DELAY(20); 52591396Stmm } 52691396Stmm 52791396Stmm device_printf(sc->sc_dev, "hme_stop: reset failed\n"); 52891396Stmm} 52991396Stmm 53099954Stmm/* 53199954Stmm * Discard the contents of an mbuf in the RX ring, freeing the buffer in the 53299954Stmm * ring for subsequent use. 53399954Stmm */ 534109649Stmmstatic __inline void 535109649Stmmhme_discard_rxbuf(struct hme_softc *sc, int ix) 53699954Stmm{ 53799954Stmm 53899954Stmm /* 53999954Stmm * Dropped a packet, reinitialize the descriptor and turn the 54099954Stmm * ownership back to the hardware. 54199954Stmm */ 542178470Smarius HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, 543178470Smarius ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, 544178470Smarius &sc->sc_rb.rb_rxdesc[ix]))); 54599954Stmm} 54699954Stmm 54791396Stmmstatic int 54891396Stmmhme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) 54991396Stmm{ 55091396Stmm struct hme_rxdesc *rd; 55191396Stmm struct mbuf *m; 552140324Sscottl bus_dma_segment_t segs[1]; 55391396Stmm bus_dmamap_t map; 554108834Stmm uintptr_t b; 555140324Sscottl int a, unmap, nsegs; 55691396Stmm 55791396Stmm rd = &sc->sc_rb.rb_rxdesc[ri]; 55891396Stmm unmap = rd->hrx_m != NULL; 55999954Stmm if (unmap && keepold) { 56099954Stmm /* 56199954Stmm * Reinitialize the descriptor flags, as they may have been 56299954Stmm * altered by the hardware. 56399954Stmm */ 564109649Stmm hme_discard_rxbuf(sc, ri); 56591396Stmm return (0); 56699954Stmm } 567243857Sglebius if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL) 56891396Stmm return (ENOBUFS); 569108834Stmm m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 570108834Stmm b = mtod(m, uintptr_t); 57191396Stmm /* 57291396Stmm * Required alignment boundary. At least 16 is needed, but since 57391396Stmm * the mapping must be done in a way that a burst can start on a 57491396Stmm * natural boundary we might need to extend this. 57591396Stmm */ 576151639Syongari a = imax(HME_MINRXALIGN, sc->sc_burst); 57791396Stmm /* 578108834Stmm * Make sure the buffer suitably aligned. The 2 byte offset is removed 579108834Stmm * when the mbuf is handed up. XXX: this ensures at least 16 byte 580108834Stmm * alignment of the header adjacent to the ethernet header, which 581108834Stmm * should be sufficient in all cases. Nevertheless, this second-guesses 582108834Stmm * ALIGN(). 58391396Stmm */ 584108834Stmm m_adj(m, roundup2(b, a) - b); 585140324Sscottl if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, 586140324Sscottl m, segs, &nsegs, 0) != 0) { 587108834Stmm m_freem(m); 588108834Stmm return (ENOBUFS); 589108834Stmm } 590178470Smarius /* If nsegs is wrong then the stack is corrupt. */ 591178470Smarius KASSERT(nsegs == 1, 592178470Smarius ("%s: too many DMA segments (%d)", __func__, nsegs)); 59391396Stmm if (unmap) { 59491396Stmm bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, 59591396Stmm BUS_DMASYNC_POSTREAD); 59691396Stmm bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); 59791396Stmm } 59891396Stmm map = rd->hrx_dmamap; 59991396Stmm rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; 60091396Stmm sc->sc_rb.rb_spare_dmamap = map; 60191396Stmm bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); 602178470Smarius HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri, 603178470Smarius segs[0].ds_addr); 604108834Stmm rd->hrx_m = m; 605178470Smarius HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri, 606178470Smarius HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd))); 60791396Stmm return (0); 60891396Stmm} 60991396Stmm 61091396Stmmstatic int 61191396Stmmhme_meminit(struct hme_softc *sc) 61291396Stmm{ 61391396Stmm struct hme_ring *hr = &sc->sc_rb; 61491396Stmm struct hme_txdesc *td; 61591396Stmm bus_addr_t dma; 61691396Stmm caddr_t p; 61791396Stmm unsigned int i; 61891396Stmm int error; 61991396Stmm 62091396Stmm p = hr->rb_membase; 62191396Stmm dma = hr->rb_dmabase; 62291396Stmm 62391396Stmm /* 62491396Stmm * Allocate transmit descriptors 62591396Stmm */ 62691396Stmm hr->rb_txd = p; 62791396Stmm hr->rb_txddma = dma; 62891396Stmm p += HME_NTXDESC * HME_XD_SIZE; 62991396Stmm dma += HME_NTXDESC * HME_XD_SIZE; 630178470Smarius /* 631178470Smarius * We have reserved descriptor space until the next 2048 byte 632178470Smarius * boundary. 633178470Smarius */ 63491396Stmm dma = (bus_addr_t)roundup((u_long)dma, 2048); 63591396Stmm p = (caddr_t)roundup((u_long)p, 2048); 63691396Stmm 63791396Stmm /* 63891396Stmm * Allocate receive descriptors 63991396Stmm */ 64091396Stmm hr->rb_rxd = p; 64191396Stmm hr->rb_rxddma = dma; 64291396Stmm p += HME_NRXDESC * HME_XD_SIZE; 64391396Stmm dma += HME_NRXDESC * HME_XD_SIZE; 64491396Stmm /* Again move forward to the next 2048 byte boundary.*/ 64591396Stmm dma = (bus_addr_t)roundup((u_long)dma, 2048); 64691396Stmm p = (caddr_t)roundup((u_long)p, 2048); 64791396Stmm 64891396Stmm /* 64991396Stmm * Initialize transmit buffer descriptors 65091396Stmm */ 65191396Stmm for (i = 0; i < HME_NTXDESC; i++) { 652178470Smarius HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0); 653178470Smarius HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0); 654108834Stmm } 655108834Stmm 656108834Stmm STAILQ_INIT(&sc->sc_rb.rb_txfreeq); 657108834Stmm STAILQ_INIT(&sc->sc_rb.rb_txbusyq); 658108834Stmm for (i = 0; i < HME_NTXQ; i++) { 659108834Stmm td = &sc->sc_rb.rb_txdesc[i]; 66091396Stmm if (td->htx_m != NULL) { 661109649Stmm bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 662109649Stmm BUS_DMASYNC_POSTWRITE); 663108834Stmm bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 664151639Syongari m_freem(td->htx_m); 66591396Stmm td->htx_m = NULL; 66691396Stmm } 667108834Stmm STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q); 66891396Stmm } 66991396Stmm 67091396Stmm /* 67191396Stmm * Initialize receive buffer descriptors 67291396Stmm */ 67391396Stmm for (i = 0; i < HME_NRXDESC; i++) { 67491396Stmm error = hme_add_rxbuf(sc, i, 1); 67591396Stmm if (error != 0) 67691396Stmm return (error); 67791396Stmm } 67891396Stmm 679178470Smarius bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 680178470Smarius BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 68199954Stmm 68291396Stmm hr->rb_tdhead = hr->rb_tdtail = 0; 68391396Stmm hr->rb_td_nbusy = 0; 68491396Stmm hr->rb_rdtail = 0; 68599954Stmm CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd, 68691396Stmm hr->rb_txddma); 68799954Stmm CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd, 68891396Stmm hr->rb_rxddma); 68991396Stmm CTR2(KTR_HME, "rx entry 1: flags %x, address %x", 69091396Stmm *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4)); 69191396Stmm CTR2(KTR_HME, "tx entry 1: flags %x, address %x", 69291396Stmm *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4)); 69391396Stmm return (0); 69491396Stmm} 69591396Stmm 69691396Stmmstatic int 69791396Stmmhme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val, 69891396Stmm u_int32_t clr, u_int32_t set) 69991396Stmm{ 70091396Stmm int i = 0; 70191396Stmm 70291396Stmm val &= ~clr; 70391396Stmm val |= set; 70491396Stmm HME_MAC_WRITE_4(sc, reg, val); 705178470Smarius HME_MAC_BARRIER(sc, reg, 4, 706178470Smarius BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 70791396Stmm if (clr == 0 && set == 0) 70891396Stmm return (1); /* just write, no bits to wait for */ 70991396Stmm do { 71091396Stmm DELAY(100); 71191396Stmm i++; 71291396Stmm val = HME_MAC_READ_4(sc, reg); 71391396Stmm if (i > 40) { 71491396Stmm /* After 3.5ms, we should have been done. */ 71591396Stmm device_printf(sc->sc_dev, "timeout while writing to " 71691396Stmm "MAC configuration register\n"); 71791396Stmm return (0); 71891396Stmm } 71991396Stmm } while ((val & clr) != 0 && (val & set) != set); 72091396Stmm return (1); 72191396Stmm} 72291396Stmm 72391396Stmm/* 72491396Stmm * Initialization of interface; set up initialization block 72591396Stmm * and transmit/receive descriptor rings. 72691396Stmm */ 72791396Stmmstatic void 72891396Stmmhme_init(void *xsc) 72991396Stmm{ 73091396Stmm struct hme_softc *sc = (struct hme_softc *)xsc; 731137982Syongari 732137982Syongari HME_LOCK(sc); 733137982Syongari hme_init_locked(sc); 734137982Syongari HME_UNLOCK(sc); 735137982Syongari} 736137982Syongari 737137982Syongaristatic void 738147256Sbrookshme_init_locked(struct hme_softc *sc) 739137982Syongari{ 740147256Sbrooks struct ifnet *ifp = sc->sc_ifp; 74191396Stmm u_int8_t *ea; 742133149Syongari u_int32_t n, v; 74391396Stmm 744137982Syongari HME_LOCK_ASSERT(sc, MA_OWNED); 745253134Syongari 746253134Syongari if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 747253134Syongari return; 748253134Syongari 74991396Stmm /* 75091396Stmm * Initialization sequence. The numbered steps below correspond 75191396Stmm * to the sequence outlined in section 6.3.5.1 in the Ethernet 75291396Stmm * Channel Engine manual (part of the PCIO manual). 75391396Stmm * See also the STP2002-STQ document from Sun Microsystems. 75491396Stmm */ 75591396Stmm 75691396Stmm /* step 1 & 2. Reset the Ethernet Channel */ 75791396Stmm hme_stop(sc); 75891396Stmm 75991396Stmm /* Re-initialize the MIF */ 76091396Stmm hme_mifinit(sc); 76191396Stmm 76291396Stmm#if 0 76391396Stmm /* Mask all MIF interrupts, just in case */ 76491396Stmm HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); 76591396Stmm#endif 76691396Stmm 76791396Stmm /* step 3. Setup data structures in host memory */ 76891396Stmm if (hme_meminit(sc) != 0) { 76991396Stmm device_printf(sc->sc_dev, "out of buffers; init aborted."); 77091396Stmm return; 77191396Stmm } 77291396Stmm 77391396Stmm /* step 4. TX MAC registers & counters */ 77491396Stmm HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 77591396Stmm HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 77691396Stmm HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 77791396Stmm HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 778129006Sjoerg HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE); 77991396Stmm 78091396Stmm /* Load station MAC address */ 781178470Smarius ea = IF_LLADDR(ifp); 78291396Stmm HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 78391396Stmm HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 78491396Stmm HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 78591396Stmm 78691396Stmm /* 78791396Stmm * Init seed for backoff 78891396Stmm * (source suggested by manual: low 10 bits of MAC address) 78991396Stmm */ 79091396Stmm v = ((ea[4] << 8) | ea[5]) & 0x3fff; 79191396Stmm HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v); 79291396Stmm 79391396Stmm /* Note: Accepting power-on default for other MAC registers here.. */ 79491396Stmm 79591396Stmm /* step 5. RX MAC registers & counters */ 79691396Stmm hme_setladrf(sc, 0); 79791396Stmm 79891396Stmm /* step 6 & 7. Program Descriptor Ring Base Addresses */ 79991396Stmm HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma); 80091396Stmm /* Transmit Descriptor ring size: in increments of 16 */ 80191396Stmm HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1); 80291396Stmm 80391396Stmm HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 804129006Sjoerg HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE); 80591396Stmm 80691396Stmm /* step 8. Global Configuration & Interrupt Mask */ 80791396Stmm HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 80891396Stmm ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 80991396Stmm HME_SEB_STAT_HOSTTOTX | 81091396Stmm HME_SEB_STAT_RXTOHOST | 81191396Stmm HME_SEB_STAT_TXALL | 81291396Stmm HME_SEB_STAT_TXPERR | 81391396Stmm HME_SEB_STAT_RCNTEXP | 81491396Stmm HME_SEB_STAT_ALL_ERRORS )); 81591396Stmm 81691396Stmm switch (sc->sc_burst) { 81791396Stmm default: 81891396Stmm v = 0; 81991396Stmm break; 82091396Stmm case 16: 82191396Stmm v = HME_SEB_CFG_BURST16; 82291396Stmm break; 82391396Stmm case 32: 82491396Stmm v = HME_SEB_CFG_BURST32; 82591396Stmm break; 82691396Stmm case 64: 82791396Stmm v = HME_SEB_CFG_BURST64; 82891396Stmm break; 82991396Stmm } 830133149Syongari /* 831133149Syongari * Blindly setting 64bit transfers may hang PCI cards(Cheerio?). 832133149Syongari * Allowing 64bit transfers breaks TX checksum offload as well. 833133149Syongari * Don't know this comes from hardware bug or driver's DMAing 834133149Syongari * scheme. 835133149Syongari * 836178470Smarius * if (sc->sc_flags & HME_PCI == 0) 837178470Smarius * v |= HME_SEB_CFG_64BIT; 838133149Syongari */ 83991396Stmm HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v); 84091396Stmm 84191396Stmm /* step 9. ETX Configuration: use mostly default values */ 84291396Stmm 84391396Stmm /* Enable DMA */ 84491396Stmm v = HME_ETX_READ_4(sc, HME_ETXI_CFG); 84591396Stmm v |= HME_ETX_CFG_DMAENABLE; 84691396Stmm HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v); 84791396Stmm 84891396Stmm /* step 10. ERX Configuration */ 84991396Stmm v = HME_ERX_READ_4(sc, HME_ERXI_CFG); 85091396Stmm 85191396Stmm /* Encode Receive Descriptor ring size: four possible values */ 85291396Stmm v &= ~HME_ERX_CFG_RINGSIZEMSK; 85391396Stmm switch (HME_NRXDESC) { 85491396Stmm case 32: 85591396Stmm v |= HME_ERX_CFG_RINGSIZE32; 85691396Stmm break; 85791396Stmm case 64: 85891396Stmm v |= HME_ERX_CFG_RINGSIZE64; 85991396Stmm break; 86091396Stmm case 128: 86191396Stmm v |= HME_ERX_CFG_RINGSIZE128; 86291396Stmm break; 86391396Stmm case 256: 86491396Stmm v |= HME_ERX_CFG_RINGSIZE256; 86591396Stmm break; 86691396Stmm default: 86791396Stmm printf("hme: invalid Receive Descriptor ring size\n"); 86891396Stmm break; 86991396Stmm } 87091396Stmm 871108834Stmm /* Enable DMA, fix RX first byte offset. */ 87291396Stmm v &= ~HME_ERX_CFG_FBO_MASK; 873108834Stmm v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT); 874133149Syongari /* RX TCP/UDP checksum offset */ 875133149Syongari n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2; 876133149Syongari n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK; 877133149Syongari v |= n; 87891396Stmm CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); 87991396Stmm HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); 88091396Stmm 88191396Stmm /* step 11. XIF Configuration */ 88291396Stmm v = HME_MAC_READ_4(sc, HME_MACI_XIF); 88391396Stmm v |= HME_MAC_XIF_OE; 88491396Stmm CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v); 88591396Stmm HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); 88691396Stmm 88791396Stmm /* step 12. RX_MAC Configuration Register */ 88891396Stmm v = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 88991396Stmm v |= HME_MAC_RXCFG_ENABLE; 89091396Stmm v &= ~(HME_MAC_RXCFG_DCRCS); 89191396Stmm CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v); 89291396Stmm HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v); 89391396Stmm 89491396Stmm /* step 13. TX_MAC Configuration Register */ 89591396Stmm v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 89691396Stmm v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 89791396Stmm CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); 89891396Stmm HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 89991396Stmm 90091396Stmm /* step 14. Issue Transmit Pending command */ 90191396Stmm 90291396Stmm#ifdef HMEDEBUG 90391396Stmm /* Debug: double-check. */ 90491396Stmm CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " 90591396Stmm "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), 90691396Stmm HME_ETX_READ_4(sc, HME_ETXI_RSIZE), 90791396Stmm HME_ERX_READ_4(sc, HME_ERXI_RING), 90891396Stmm HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); 90991396Stmm CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", 91091396Stmm HME_SEB_READ_4(sc, HME_SEBI_IMASK), 91191396Stmm HME_ERX_READ_4(sc, HME_ERXI_CFG), 91291396Stmm HME_ETX_READ_4(sc, HME_ETXI_CFG)); 91391396Stmm CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x", 91491396Stmm HME_MAC_READ_4(sc, HME_MACI_RXCFG), 91591396Stmm HME_MAC_READ_4(sc, HME_MACI_TXCFG)); 91691396Stmm#endif 91791396Stmm 918178470Smarius ifp->if_drv_flags |= IFF_DRV_RUNNING; 919178470Smarius ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 920178470Smarius 921129847Smarius /* Set the current media. */ 922164864Smarius hme_mediachange_locked(sc); 923129847Smarius 92491396Stmm /* Start the one second timer. */ 925164932Smarius sc->sc_wdog_timer = 0; 92691396Stmm callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 92791396Stmm} 92891396Stmm 92991396Stmm/* 930178470Smarius * Routine to DMA map an mbuf chain, set up the descriptor rings 931178470Smarius * accordingly and start the transmission. 932178470Smarius * Returns 0 on success, -1 if there were not enough free descriptors 933178470Smarius * to map the packet, or an errno otherwise. 934151639Syongari * 935178470Smarius * XXX: this relies on the fact that segments returned by 936178470Smarius * bus_dmamap_load_mbuf_sg() are readable from the nearest burst 937178470Smarius * boundary on (i.e. potentially before ds_addr) to the first 938178470Smarius * boundary beyond the end. This is usually a safe assumption to 939178470Smarius * make, but is not documented. 94091396Stmm */ 94191396Stmmstatic int 942151639Syongarihme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0) 94391396Stmm{ 944178470Smarius bus_dma_segment_t segs[HME_NTXSEGS]; 945151639Syongari struct hme_txdesc *htx; 946178470Smarius struct ip *ip; 947178470Smarius struct mbuf *m; 948151639Syongari caddr_t txd; 949178470Smarius int error, i, nsegs, pci, ri, si; 950178470Smarius uint32_t cflags, flags; 95191396Stmm 952151639Syongari if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL) 953178470Smarius return (ENOBUFS); 954178470Smarius 955178470Smarius cflags = 0; 956178470Smarius if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) { 957178470Smarius if (M_WRITABLE(*m0) == 0) { 958243857Sglebius m = m_dup(*m0, M_NOWAIT); 959178470Smarius m_freem(*m0); 960178470Smarius *m0 = m; 961178470Smarius if (m == NULL) 962178470Smarius return (ENOBUFS); 963178470Smarius } 964178470Smarius i = sizeof(struct ether_header); 965178470Smarius m = m_pullup(*m0, i + sizeof(struct ip)); 966178470Smarius if (m == NULL) { 967178470Smarius *m0 = NULL; 968178470Smarius return (ENOBUFS); 969178470Smarius } 970178470Smarius ip = (struct ip *)(mtod(m, caddr_t) + i); 971178470Smarius i += (ip->ip_hl << 2); 972178470Smarius cflags = i << HME_XD_TXCKSUM_SSHIFT | 973178470Smarius ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) | 974178470Smarius HME_XD_TXCKSUM; 975178470Smarius *m0 = m; 976178470Smarius } 977178470Smarius 978151639Syongari error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap, 979178470Smarius *m0, segs, &nsegs, 0); 980151639Syongari if (error == EFBIG) { 981243857Sglebius m = m_collapse(*m0, M_NOWAIT, HME_NTXSEGS); 982161234Syongari if (m == NULL) { 983161234Syongari m_freem(*m0); 984161234Syongari *m0 = NULL; 985151639Syongari return (ENOMEM); 986151639Syongari } 987161234Syongari *m0 = m; 988151639Syongari error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap, 989178470Smarius *m0, segs, &nsegs, 0); 990151639Syongari if (error != 0) { 991161234Syongari m_freem(*m0); 992161234Syongari *m0 = NULL; 993151639Syongari return (error); 994151639Syongari } 995151639Syongari } else if (error != 0) 996151639Syongari return (error); 997178470Smarius /* If nsegs is wrong then the stack is corrupt. */ 998178470Smarius KASSERT(nsegs <= HME_NTXSEGS, 999178470Smarius ("%s: too many DMA segments (%d)", __func__, nsegs)); 1000178470Smarius if (nsegs == 0) { 1001161234Syongari m_freem(*m0); 1002161234Syongari *m0 = NULL; 1003151639Syongari return (EIO); 1004108834Stmm } 1005178470Smarius if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) { 1006151639Syongari bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); 1007178470Smarius /* Retry with m_collapse(9)? */ 1008178470Smarius return (ENOBUFS); 1009151639Syongari } 1010151639Syongari bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE); 101191396Stmm 1012151639Syongari si = ri = sc->sc_rb.rb_tdhead; 1013151639Syongari txd = sc->sc_rb.rb_txd; 1014178470Smarius pci = sc->sc_flags & HME_PCI; 1015151639Syongari CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri, 1016151639Syongari HME_XD_GETFLAGS(pci, txd, ri)); 1017178470Smarius for (i = 0; i < nsegs; i++) { 1018151639Syongari /* Fill the ring entry. */ 1019178470Smarius flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len); 1020151639Syongari if (i == 0) 1021151639Syongari flags |= HME_XD_SOP | cflags; 1022151639Syongari else 1023151639Syongari flags |= HME_XD_OWN | cflags; 102491396Stmm CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)", 102591396Stmm ri, si, flags); 1026178470Smarius HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr); 1027151639Syongari HME_XD_SETFLAGS(pci, txd, ri, flags); 1028151639Syongari sc->sc_rb.rb_td_nbusy++; 1029151639Syongari htx->htx_lastdesc = ri; 1030151639Syongari ri = (ri + 1) % HME_NTXDESC; 1031151639Syongari } 1032151639Syongari sc->sc_rb.rb_tdhead = ri; 103391396Stmm 1034151639Syongari /* set EOP on the last descriptor */ 1035151639Syongari ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; 1036151639Syongari flags = HME_XD_GETFLAGS(pci, txd, ri); 1037151639Syongari flags |= HME_XD_EOP; 1038151639Syongari CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si, 1039151639Syongari flags); 1040151639Syongari HME_XD_SETFLAGS(pci, txd, ri, flags); 1041151639Syongari 1042151639Syongari /* Turn the first descriptor ownership to the hme */ 1043151639Syongari flags = HME_XD_GETFLAGS(pci, txd, si); 1044151639Syongari flags |= HME_XD_OWN; 1045151639Syongari CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)", 1046151639Syongari ri, flags); 1047151639Syongari HME_XD_SETFLAGS(pci, txd, si, flags); 1048151639Syongari 1049151639Syongari STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q); 1050151639Syongari STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q); 1051161234Syongari htx->htx_m = *m0; 1052151639Syongari 105391396Stmm /* start the transmission. */ 105491396Stmm HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); 1055151639Syongari 105691396Stmm return (0); 105791396Stmm} 105891396Stmm 105991396Stmm/* 106091396Stmm * Pass a packet to the higher levels. 106191396Stmm */ 106291396Stmmstatic void 1063133149Syongarihme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags) 106491396Stmm{ 1065147256Sbrooks struct ifnet *ifp = sc->sc_ifp; 106691396Stmm struct mbuf *m; 106791396Stmm 106891396Stmm if (len <= sizeof(struct ether_header) || 1069129006Sjoerg len > HME_MAX_FRAMESIZE) { 107091396Stmm#ifdef HMEDEBUG 107191396Stmm HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", 107291396Stmm len); 107391396Stmm#endif 107499954Stmm ifp->if_ierrors++; 1075109649Stmm hme_discard_rxbuf(sc, ix); 107699954Stmm return; 107791396Stmm } 107891396Stmm 107991396Stmm m = sc->sc_rb.rb_rxdesc[ix].hrx_m; 1080108834Stmm CTR1(KTR_HME, "hme_read: len %d", len); 108191396Stmm 108291396Stmm if (hme_add_rxbuf(sc, ix, 0) != 0) { 108391396Stmm /* 108491396Stmm * hme_add_rxbuf will leave the old buffer in the ring until 108591396Stmm * it is sure that a new buffer can be mapped. If it can not, 108691396Stmm * drop the packet, but leave the interface up. 108791396Stmm */ 108899954Stmm ifp->if_iqdrops++; 1089109649Stmm hme_discard_rxbuf(sc, ix); 109099954Stmm return; 109191396Stmm } 109291396Stmm 109391396Stmm ifp->if_ipackets++; 109491396Stmm 109591396Stmm m->m_pkthdr.rcvif = ifp; 1096108834Stmm m->m_pkthdr.len = m->m_len = len + HME_RXOFFS; 1097108834Stmm m_adj(m, HME_RXOFFS); 1098133149Syongari /* RX TCP/UDP checksum */ 1099133149Syongari if (ifp->if_capenable & IFCAP_RXCSUM) 1100133149Syongari hme_rxcksum(m, flags); 110191396Stmm /* Pass the packet up. */ 1102137982Syongari HME_UNLOCK(sc); 1103106937Ssam (*ifp->if_input)(ifp, m); 1104137982Syongari HME_LOCK(sc); 110591396Stmm} 110691396Stmm 110791396Stmmstatic void 110891396Stmmhme_start(struct ifnet *ifp) 110991396Stmm{ 1110137982Syongari struct hme_softc *sc = ifp->if_softc; 1111137982Syongari 1112137982Syongari HME_LOCK(sc); 1113137982Syongari hme_start_locked(ifp); 1114137982Syongari HME_UNLOCK(sc); 1115137982Syongari} 1116137982Syongari 1117137982Syongaristatic void 1118137982Syongarihme_start_locked(struct ifnet *ifp) 1119137982Syongari{ 112091396Stmm struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 112191396Stmm struct mbuf *m; 112291396Stmm int error, enq = 0; 112391396Stmm 1124148887Srwatson if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1125178470Smarius IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0) 112691396Stmm return; 112791396Stmm 1128151639Syongari for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1129151639Syongari sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) { 1130132986Smlaier IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 113191396Stmm if (m == NULL) 113291396Stmm break; 113391396Stmm 1134151639Syongari error = hme_load_txmbuf(sc, &m); 1135151639Syongari if (error != 0) { 1136151639Syongari if (m == NULL) 1137151639Syongari break; 1138148887Srwatson ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1139132986Smlaier IFQ_DRV_PREPEND(&ifp->if_snd, m); 114091962Stmm break; 1141100980Sfenner } 1142151639Syongari enq++; 1143151639Syongari BPF_MTAP(ifp, m); 114491396Stmm } 114591396Stmm 1146151639Syongari if (enq > 0) { 1147109649Stmm bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1148178470Smarius BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1149164932Smarius sc->sc_wdog_timer = 5; 1150109649Stmm } 115191396Stmm} 115291396Stmm 115391396Stmm/* 115491396Stmm * Transmit interrupt. 115591396Stmm */ 115691396Stmmstatic void 115791396Stmmhme_tint(struct hme_softc *sc) 115891396Stmm{ 1159151639Syongari caddr_t txd; 1160147256Sbrooks struct ifnet *ifp = sc->sc_ifp; 1161108834Stmm struct hme_txdesc *htx; 116291396Stmm unsigned int ri, txflags; 116391396Stmm 1164151639Syongari txd = sc->sc_rb.rb_txd; 1165108834Stmm htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1166109649Stmm bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 116791396Stmm /* Fetch current position in the transmit ring */ 116891396Stmm for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { 116991396Stmm if (sc->sc_rb.rb_td_nbusy <= 0) { 117091396Stmm CTR0(KTR_HME, "hme_tint: not busy!"); 117191396Stmm break; 117291396Stmm } 117391396Stmm 1174178470Smarius txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri); 117591396Stmm CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); 117691396Stmm 117791396Stmm if ((txflags & HME_XD_OWN) != 0) 117891396Stmm break; 117991396Stmm 1180108834Stmm CTR0(KTR_HME, "hme_tint: not owned"); 118191396Stmm --sc->sc_rb.rb_td_nbusy; 1182148887Srwatson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 118391396Stmm 118491396Stmm /* Complete packet transmitted? */ 118591396Stmm if ((txflags & HME_XD_EOP) == 0) 118691396Stmm continue; 118791396Stmm 1188108834Stmm KASSERT(htx->htx_lastdesc == ri, 1189178470Smarius ("%s: ring indices skewed: %d != %d!", 1190178470Smarius __func__, htx->htx_lastdesc, ri)); 1191108834Stmm bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, 1192108834Stmm BUS_DMASYNC_POSTWRITE); 1193108834Stmm bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); 1194108834Stmm 119591396Stmm ifp->if_opackets++; 1196108834Stmm m_freem(htx->htx_m); 1197108834Stmm htx->htx_m = NULL; 1198108834Stmm STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q); 1199108834Stmm STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q); 1200108834Stmm htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 120191396Stmm } 1202164932Smarius sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0; 120391396Stmm 120491396Stmm /* Update ring */ 120591396Stmm sc->sc_rb.rb_tdtail = ri; 120691396Stmm 1207178470Smarius hme_start_locked(ifp); 120891396Stmm} 120991396Stmm 121091396Stmm/* 1211178470Smarius * RX TCP/UDP checksum 1212133149Syongari */ 1213133149Syongaristatic void 1214133149Syongarihme_rxcksum(struct mbuf *m, u_int32_t flags) 1215133149Syongari{ 1216133149Syongari struct ether_header *eh; 1217133149Syongari struct ip *ip; 1218133149Syongari struct udphdr *uh; 1219133149Syongari int32_t hlen, len, pktlen; 1220133149Syongari u_int16_t cksum, *opts; 1221133149Syongari u_int32_t temp32; 1222133149Syongari 1223133149Syongari pktlen = m->m_pkthdr.len; 1224133149Syongari if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 1225133149Syongari return; 1226133149Syongari eh = mtod(m, struct ether_header *); 1227133149Syongari if (eh->ether_type != htons(ETHERTYPE_IP)) 1228133149Syongari return; 1229133149Syongari ip = (struct ip *)(eh + 1); 1230133149Syongari if (ip->ip_v != IPVERSION) 1231133149Syongari return; 1232133149Syongari 1233133149Syongari hlen = ip->ip_hl << 2; 1234133149Syongari pktlen -= sizeof(struct ether_header); 1235133149Syongari if (hlen < sizeof(struct ip)) 1236133149Syongari return; 1237133149Syongari if (ntohs(ip->ip_len) < hlen) 1238133149Syongari return; 1239133149Syongari if (ntohs(ip->ip_len) != pktlen) 1240133149Syongari return; 1241133149Syongari if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 1242133149Syongari return; /* can't handle fragmented packet */ 1243133149Syongari 1244133149Syongari switch (ip->ip_p) { 1245133149Syongari case IPPROTO_TCP: 1246133149Syongari if (pktlen < (hlen + sizeof(struct tcphdr))) 1247133149Syongari return; 1248133149Syongari break; 1249133149Syongari case IPPROTO_UDP: 1250133149Syongari if (pktlen < (hlen + sizeof(struct udphdr))) 1251133149Syongari return; 1252133149Syongari uh = (struct udphdr *)((caddr_t)ip + hlen); 1253133149Syongari if (uh->uh_sum == 0) 1254133149Syongari return; /* no checksum */ 1255133149Syongari break; 1256133149Syongari default: 1257133149Syongari return; 1258133149Syongari } 1259133149Syongari 1260156945Syongari cksum = ~(flags & HME_XD_RXCKSUM); 1261133149Syongari /* checksum fixup for IP options */ 1262133149Syongari len = hlen - sizeof(struct ip); 1263133149Syongari if (len > 0) { 1264133149Syongari opts = (u_int16_t *)(ip + 1); 1265133149Syongari for (; len > 0; len -= sizeof(u_int16_t), opts++) { 1266133149Syongari temp32 = cksum - *opts; 1267133149Syongari temp32 = (temp32 >> 16) + (temp32 & 65535); 1268133149Syongari cksum = temp32 & 65535; 1269133149Syongari } 1270133149Syongari } 1271133149Syongari m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 1272133149Syongari m->m_pkthdr.csum_data = cksum; 1273133149Syongari} 1274133149Syongari 1275133149Syongari/* 127691396Stmm * Receive interrupt. 127791396Stmm */ 127891396Stmmstatic void 127991396Stmmhme_rint(struct hme_softc *sc) 128091396Stmm{ 128191396Stmm caddr_t xdr = sc->sc_rb.rb_rxd; 1282147256Sbrooks struct ifnet *ifp = sc->sc_ifp; 128391396Stmm unsigned int ri, len; 1284109649Stmm int progress = 0; 128591396Stmm u_int32_t flags; 128691396Stmm 128791396Stmm /* 128891396Stmm * Process all buffers with valid data. 128991396Stmm */ 1290109649Stmm bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 129191396Stmm for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) { 1292178470Smarius flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri); 129391396Stmm CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags); 129491396Stmm if ((flags & HME_XD_OWN) != 0) 129591396Stmm break; 129691396Stmm 1297109649Stmm progress++; 129891396Stmm if ((flags & HME_XD_OFL) != 0) { 129991396Stmm device_printf(sc->sc_dev, "buffer overflow, ri=%d; " 130091396Stmm "flags=0x%x\n", ri, flags); 130199954Stmm ifp->if_ierrors++; 1302109649Stmm hme_discard_rxbuf(sc, ri); 130391396Stmm } else { 130491396Stmm len = HME_XD_DECODE_RSIZE(flags); 1305133149Syongari hme_read(sc, ri, len, flags); 130691396Stmm } 130791396Stmm } 1308109649Stmm if (progress) { 1309109649Stmm bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1310178470Smarius BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1311109649Stmm } 131291396Stmm sc->sc_rb.rb_rdtail = ri; 131391396Stmm} 131491396Stmm 131591396Stmmstatic void 131691396Stmmhme_eint(struct hme_softc *sc, u_int status) 131791396Stmm{ 131891396Stmm 131991396Stmm if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1320164864Smarius device_printf(sc->sc_dev, "XXXlink status changed: " 1321164864Smarius "cfg=%#x, stat=%#x, sm=%#x\n", 1322164864Smarius HME_MIF_READ_4(sc, HME_MIFI_CFG), 1323164864Smarius HME_MIF_READ_4(sc, HME_MIFI_STAT), 1324164864Smarius HME_MIF_READ_4(sc, HME_MIFI_SM)); 132591396Stmm return; 132691396Stmm } 132791396Stmm 1328151639Syongari /* check for fatal errors that needs reset to unfreeze DMA engine */ 1329151639Syongari if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) { 1330151639Syongari HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status); 1331253134Syongari sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1332151639Syongari hme_init_locked(sc); 1333151639Syongari } 133491396Stmm} 133591396Stmm 133691396Stmmvoid 133791396Stmmhme_intr(void *v) 133891396Stmm{ 133991396Stmm struct hme_softc *sc = (struct hme_softc *)v; 134091396Stmm u_int32_t status; 134191396Stmm 1342137982Syongari HME_LOCK(sc); 134391396Stmm status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 134491396Stmm CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status); 134591396Stmm 134691396Stmm if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 134791396Stmm hme_eint(sc, status); 134891396Stmm 1349178470Smarius if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1350178470Smarius hme_rint(sc); 1351178470Smarius 135291396Stmm if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 135391396Stmm hme_tint(sc); 1354137982Syongari HME_UNLOCK(sc); 135591396Stmm} 135691396Stmm 1357164932Smariusstatic int 1358164932Smariushme_watchdog(struct hme_softc *sc) 135991396Stmm{ 1360178470Smarius struct ifnet *ifp = sc->sc_ifp; 136191396Stmm 1362164932Smarius HME_LOCK_ASSERT(sc, MA_OWNED); 1363178470Smarius 1364137982Syongari#ifdef HMEDEBUG 1365178470Smarius CTR1(KTR_HME, "hme_watchdog: status %x", 1366178470Smarius (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT)); 136791396Stmm#endif 1368164932Smarius 1369164932Smarius if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 1370164932Smarius return (0); 1371164932Smarius 1372178470Smarius if ((sc->sc_flags & HME_LINK) != 0) 1373178470Smarius device_printf(sc->sc_dev, "device timeout\n"); 1374178470Smarius else if (bootverbose) 1375178470Smarius device_printf(sc->sc_dev, "device timeout (no link)\n"); 1376178470Smarius ++ifp->if_oerrors; 1377148944Sjhb 1378253134Syongari ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1379148944Sjhb hme_init_locked(sc); 1380178470Smarius hme_start_locked(ifp); 1381164932Smarius return (EJUSTRETURN); 138291396Stmm} 138391396Stmm 138491396Stmm/* 138591396Stmm * Initialize the MII Management Interface 138691396Stmm */ 138791396Stmmstatic void 138891396Stmmhme_mifinit(struct hme_softc *sc) 138991396Stmm{ 139091396Stmm u_int32_t v; 139191396Stmm 1392164864Smarius /* 1393164864Smarius * Configure the MIF in frame mode, polling disabled, internal PHY 1394164864Smarius * selected. 1395164864Smarius */ 1396164864Smarius HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0); 1397164864Smarius 1398164864Smarius /* 1399164864Smarius * If the currently selected media uses the external transceiver, 1400164864Smarius * enable its MII drivers (which basically isolates the internal 1401164864Smarius * one and vice versa). In case the current media hasn't been set, 1402164864Smarius * yet, we default to the internal transceiver. 1403164864Smarius */ 1404164864Smarius v = HME_MAC_READ_4(sc, HME_MACI_XIF); 1405164864Smarius if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL && 1406164864Smarius sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] == 1407164864Smarius HME_PHYAD_EXTERNAL) 1408164864Smarius v |= HME_MAC_XIF_MIIENABLE; 1409164864Smarius else 1410164864Smarius v &= ~HME_MAC_XIF_MIIENABLE; 1411164864Smarius HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); 141291396Stmm} 141391396Stmm 141491396Stmm/* 141591396Stmm * MII interface 141691396Stmm */ 141791396Stmmint 141891396Stmmhme_mii_readreg(device_t dev, int phy, int reg) 141991396Stmm{ 1420164864Smarius struct hme_softc *sc; 142191396Stmm int n; 142291396Stmm u_int32_t v; 142391396Stmm 1424164864Smarius sc = device_get_softc(dev); 142591396Stmm /* Select the desired PHY in the MIF configuration register */ 142691396Stmm v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 142791396Stmm if (phy == HME_PHYAD_EXTERNAL) 142891396Stmm v |= HME_MIF_CFG_PHY; 1429164864Smarius else 1430164864Smarius v &= ~HME_MIF_CFG_PHY; 143191396Stmm HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 143291396Stmm 143391396Stmm /* Construct the frame command */ 143491396Stmm v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 143591396Stmm HME_MIF_FO_TAMSB | 143691396Stmm (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 143791396Stmm (phy << HME_MIF_FO_PHYAD_SHIFT) | 143891396Stmm (reg << HME_MIF_FO_REGAD_SHIFT); 143991396Stmm 144091396Stmm HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1441178470Smarius HME_MIF_BARRIER(sc, HME_MIFI_FO, 4, 1442178470Smarius BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 144391396Stmm for (n = 0; n < 100; n++) { 144491396Stmm DELAY(1); 144591396Stmm v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1446164864Smarius if (v & HME_MIF_FO_TALSB) 144791396Stmm return (v & HME_MIF_FO_DATA); 144891396Stmm } 144991396Stmm 145091396Stmm device_printf(sc->sc_dev, "mii_read timeout\n"); 145191396Stmm return (0); 145291396Stmm} 145391396Stmm 145491396Stmmint 145591396Stmmhme_mii_writereg(device_t dev, int phy, int reg, int val) 145691396Stmm{ 1457164864Smarius struct hme_softc *sc; 145891396Stmm int n; 145991396Stmm u_int32_t v; 146091396Stmm 1461164864Smarius sc = device_get_softc(dev); 146291396Stmm /* Select the desired PHY in the MIF configuration register */ 146391396Stmm v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 146491396Stmm if (phy == HME_PHYAD_EXTERNAL) 146591396Stmm v |= HME_MIF_CFG_PHY; 1466164864Smarius else 1467164864Smarius v &= ~HME_MIF_CFG_PHY; 146891396Stmm HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 146991396Stmm 147091396Stmm /* Construct the frame command */ 147191396Stmm v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 147291396Stmm HME_MIF_FO_TAMSB | 147391396Stmm (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 147491396Stmm (phy << HME_MIF_FO_PHYAD_SHIFT) | 147591396Stmm (reg << HME_MIF_FO_REGAD_SHIFT) | 147691396Stmm (val & HME_MIF_FO_DATA); 147791396Stmm 147891396Stmm HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1479178470Smarius HME_MIF_BARRIER(sc, HME_MIFI_FO, 4, 1480178470Smarius BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 148191396Stmm for (n = 0; n < 100; n++) { 148291396Stmm DELAY(1); 148391396Stmm v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1484148944Sjhb if (v & HME_MIF_FO_TALSB) 148591396Stmm return (1); 148691396Stmm } 148791396Stmm 148891396Stmm device_printf(sc->sc_dev, "mii_write timeout\n"); 148991396Stmm return (0); 149091396Stmm} 149191396Stmm 149291396Stmmvoid 149391396Stmmhme_mii_statchg(device_t dev) 149491396Stmm{ 1495164864Smarius struct hme_softc *sc; 1496178470Smarius uint32_t rxcfg, txcfg; 149791396Stmm 1498164864Smarius sc = device_get_softc(dev); 1499164864Smarius 150091396Stmm#ifdef HMEDEBUG 1501178470Smarius if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) 1502164864Smarius device_printf(sc->sc_dev, "hme_mii_statchg: status change\n"); 150391396Stmm#endif 150491396Stmm 1505178470Smarius if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 1506178470Smarius IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 1507178470Smarius sc->sc_flags |= HME_LINK; 1508178470Smarius else 1509178470Smarius sc->sc_flags &= ~HME_LINK; 1510178470Smarius 1511178470Smarius txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 1512178470Smarius if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 1513178470Smarius HME_MAC_TXCFG_ENABLE, 0)) 1514178470Smarius device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 1515178470Smarius rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 1516178470Smarius if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 1517178470Smarius HME_MAC_RXCFG_ENABLE, 0)) 1518178470Smarius device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 1519178470Smarius 1520178470Smarius /* Set the MAC Full Duplex bit appropriately. */ 152191396Stmm if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1522178470Smarius txcfg |= HME_MAC_TXCFG_FULLDPLX; 152391396Stmm else 1524178470Smarius txcfg &= ~HME_MAC_TXCFG_FULLDPLX; 1525178470Smarius HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg); 1526178470Smarius 1527178470Smarius if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1528178470Smarius (sc->sc_flags & HME_LINK) != 0) { 1529178470Smarius if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0, 1530178470Smarius HME_MAC_TXCFG_ENABLE)) 1531178470Smarius device_printf(sc->sc_dev, "cannot enable TX MAC\n"); 1532178470Smarius if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0, 1533178470Smarius HME_MAC_RXCFG_ENABLE)) 1534178470Smarius device_printf(sc->sc_dev, "cannot enable RX MAC\n"); 1535178470Smarius } 153691396Stmm} 153791396Stmm 153891396Stmmstatic int 153991396Stmmhme_mediachange(struct ifnet *ifp) 154091396Stmm{ 154191396Stmm struct hme_softc *sc = ifp->if_softc; 1542148944Sjhb int error; 154391396Stmm 1544148944Sjhb HME_LOCK(sc); 1545164864Smarius error = hme_mediachange_locked(sc); 1546148944Sjhb HME_UNLOCK(sc); 1547148944Sjhb return (error); 154891396Stmm} 154991396Stmm 1550164864Smariusstatic int 1551164864Smariushme_mediachange_locked(struct hme_softc *sc) 1552164864Smarius{ 1553164864Smarius struct mii_softc *child; 1554164864Smarius 1555164864Smarius HME_LOCK_ASSERT(sc, MA_OWNED); 1556178470Smarius 1557164864Smarius#ifdef HMEDEBUG 1558178470Smarius if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0) 1559164864Smarius device_printf(sc->sc_dev, "hme_mediachange_locked"); 1560164864Smarius#endif 1561164864Smarius 1562164864Smarius hme_mifinit(sc); 1563164864Smarius 1564164864Smarius /* 1565164864Smarius * If both PHYs are present reset them. This is required for 1566164864Smarius * unisolating the previously isolated PHY when switching PHYs. 1567164864Smarius * As the above hme_mifinit() call will set the MII drivers in 1568220940Smarius * the XIF configuration register according to the currently 1569164864Smarius * selected media, there should be no window during which the 1570164864Smarius * data paths of both transceivers are open at the same time, 1571164864Smarius * even if the PHY device drivers use MIIF_NOISOLATE. 1572164864Smarius */ 1573164864Smarius if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1) 1574164864Smarius LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) 1575221407Smarius PHY_RESET(child); 1576164864Smarius return (mii_mediachg(sc->sc_mii)); 1577164864Smarius} 1578164864Smarius 157991396Stmmstatic void 158091396Stmmhme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 158191396Stmm{ 158291396Stmm struct hme_softc *sc = ifp->if_softc; 158391396Stmm 1584137982Syongari HME_LOCK(sc); 1585137982Syongari if ((ifp->if_flags & IFF_UP) == 0) { 1586137982Syongari HME_UNLOCK(sc); 158791396Stmm return; 1588137982Syongari } 158991396Stmm 159091396Stmm mii_pollstat(sc->sc_mii); 159191396Stmm ifmr->ifm_active = sc->sc_mii->mii_media_active; 159291396Stmm ifmr->ifm_status = sc->sc_mii->mii_media_status; 1593137982Syongari HME_UNLOCK(sc); 159491396Stmm} 159591396Stmm 159691396Stmm/* 159791396Stmm * Process an ioctl request. 159891396Stmm */ 159991396Stmmstatic int 160091396Stmmhme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 160191396Stmm{ 160291396Stmm struct hme_softc *sc = ifp->if_softc; 160391396Stmm struct ifreq *ifr = (struct ifreq *)data; 1604148944Sjhb int error = 0; 160591396Stmm 160691396Stmm switch (cmd) { 160791396Stmm case SIOCSIFFLAGS: 1608148944Sjhb HME_LOCK(sc); 1609178470Smarius if ((ifp->if_flags & IFF_UP) != 0) { 1610178470Smarius if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1611178470Smarius ((ifp->if_flags ^ sc->sc_ifflags) & 1612178470Smarius (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1613178470Smarius hme_setladrf(sc, 1); 1614178470Smarius else 1615178470Smarius hme_init_locked(sc); 1616178470Smarius } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 161791396Stmm hme_stop(sc); 1618133149Syongari if ((ifp->if_flags & IFF_LINK0) != 0) 1619133149Syongari sc->sc_csum_features |= CSUM_UDP; 1620133149Syongari else 1621133149Syongari sc->sc_csum_features &= ~CSUM_UDP; 1622133149Syongari if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1623133149Syongari ifp->if_hwassist = sc->sc_csum_features; 1624178470Smarius sc->sc_ifflags = ifp->if_flags; 1625148944Sjhb HME_UNLOCK(sc); 162691396Stmm break; 162791396Stmm 162891396Stmm case SIOCADDMULTI: 162991396Stmm case SIOCDELMULTI: 1630148944Sjhb HME_LOCK(sc); 163191396Stmm hme_setladrf(sc, 1); 1632148944Sjhb HME_UNLOCK(sc); 163391396Stmm error = 0; 163491396Stmm break; 163591396Stmm case SIOCGIFMEDIA: 163691396Stmm case SIOCSIFMEDIA: 163791396Stmm error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 163891396Stmm break; 1639133149Syongari case SIOCSIFCAP: 1640148944Sjhb HME_LOCK(sc); 1641133149Syongari ifp->if_capenable = ifr->ifr_reqcap; 1642133149Syongari if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1643133149Syongari ifp->if_hwassist = sc->sc_csum_features; 1644133149Syongari else 1645133149Syongari ifp->if_hwassist = 0; 1646148944Sjhb HME_UNLOCK(sc); 1647133149Syongari break; 164891396Stmm default: 1649106937Ssam error = ether_ioctl(ifp, cmd, data); 165091396Stmm break; 165191396Stmm } 165291396Stmm 165391396Stmm return (error); 165491396Stmm} 165591396Stmm 165691396Stmm/* 165791396Stmm * Set up the logical address filter. 165891396Stmm */ 165991396Stmmstatic void 166091396Stmmhme_setladrf(struct hme_softc *sc, int reenable) 166191396Stmm{ 1662147256Sbrooks struct ifnet *ifp = sc->sc_ifp; 166391396Stmm struct ifmultiaddr *inm; 166491396Stmm u_int32_t crc; 166591396Stmm u_int32_t hash[4]; 166691396Stmm u_int32_t macc; 166791396Stmm 1668137982Syongari HME_LOCK_ASSERT(sc, MA_OWNED); 1669178470Smarius /* Clear the hash table. */ 167091396Stmm hash[3] = hash[2] = hash[1] = hash[0] = 0; 167191396Stmm 1672178470Smarius /* Get the current RX configuration. */ 167391396Stmm macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 167491396Stmm 167591396Stmm /* 1676178470Smarius * Turn off promiscuous mode, promiscuous group mode (all multicast), 1677178470Smarius * and hash filter. Depending on the case, the right bit will be 1678178470Smarius * enabled. 1679178470Smarius */ 1680178470Smarius macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC); 1681178470Smarius 1682178470Smarius /* 168391396Stmm * Disable the receiver while changing it's state as the documentation 168491396Stmm * mandates. 168591396Stmm * We then must wait until the bit clears in the register. This should 168691396Stmm * take at most 3.5ms. 168791396Stmm */ 1688178470Smarius if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 1689178470Smarius HME_MAC_RXCFG_ENABLE, 0)) 1690178470Smarius device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 169191396Stmm /* Disable the hash filter before writing to the filter registers. */ 169291396Stmm if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 169391396Stmm HME_MAC_RXCFG_HENABLE, 0)) 1694178470Smarius device_printf(sc->sc_dev, "cannot disable hash filter\n"); 169591396Stmm 1696178470Smarius /* Make the RX MAC really SIMPLEX. */ 1697146513Syongari macc |= HME_MAC_RXCFG_ME; 169891396Stmm if (reenable) 169991396Stmm macc |= HME_MAC_RXCFG_ENABLE; 170091396Stmm else 170191396Stmm macc &= ~HME_MAC_RXCFG_ENABLE; 170291396Stmm 170391396Stmm if ((ifp->if_flags & IFF_PROMISC) != 0) { 170491396Stmm macc |= HME_MAC_RXCFG_PMISC; 170591396Stmm goto chipit; 170691396Stmm } 1707178470Smarius if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1708178470Smarius macc |= HME_MAC_RXCFG_PGRP; 1709178470Smarius goto chipit; 1710178470Smarius } 171191396Stmm 171291396Stmm macc |= HME_MAC_RXCFG_HENABLE; 171391396Stmm 171491396Stmm /* 171591396Stmm * Set up multicast address filter by passing all multicast addresses 171691396Stmm * through a crc generator, and then using the high order 6 bits as an 171791396Stmm * index into the 64 bit logical address filter. The high order bit 171891396Stmm * selects the word, while the rest of the bits select the bit within 171991396Stmm * the word. 172091396Stmm */ 172191396Stmm 1722195049Srwatson if_maddr_rlock(ifp); 1723178470Smarius TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 172491396Stmm if (inm->ifma_addr->sa_family != AF_LINK) 172591396Stmm continue; 1726130290Smarius crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1727130290Smarius inm->ifma_addr), ETHER_ADDR_LEN); 172891396Stmm 172991396Stmm /* Just want the 6 most significant bits. */ 173091396Stmm crc >>= 26; 173191396Stmm 173291396Stmm /* Set the corresponding bit in the filter. */ 173391396Stmm hash[crc >> 4] |= 1 << (crc & 0xf); 173491396Stmm } 1735195049Srwatson if_maddr_runlock(ifp); 173691396Stmm 173791396Stmmchipit: 173891396Stmm /* Now load the hash table into the chip */ 173991396Stmm HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]); 174091396Stmm HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]); 174191396Stmm HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]); 174291396Stmm HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]); 1743178470Smarius if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0, 1744146513Syongari macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE | 1745178470Smarius HME_MAC_RXCFG_ME))) 1746178470Smarius device_printf(sc->sc_dev, "cannot configure RX MAC\n"); 174791396Stmm} 1748